summaryrefslogtreecommitdiff
path: root/core/src/main/java
diff options
context:
space:
mode:
authorRyan Ernst <ryan@iernst.net>2015-12-18 12:24:30 -0800
committerRyan Ernst <ryan@iernst.net>2015-12-18 12:24:30 -0800
commit853e9c0fd10af51e66f5b9d63c5e6b248968c15e (patch)
treeb39f0a1723d582e857181d0cf69c3be23411e544 /core/src/main/java
parent0f518e1b07059293b7ab94c5494ff86f531053a4 (diff)
parent10dfa32f9ddc2216be2aabb88462a32ce5900333 (diff)
Merge branch 'master' into wildcard_imports
Diffstat (limited to 'core/src/main/java')
-rw-r--r--core/src/main/java/org/apache/lucene/queryparser/classic/MapperQueryParser.java1
-rw-r--r--core/src/main/java/org/elasticsearch/Version.java12
-rw-r--r--core/src/main/java/org/elasticsearch/action/ActionModule.java3
-rw-r--r--core/src/main/java/org/elasticsearch/action/admin/cluster/settings/SettingsUpdater.java127
-rw-r--r--core/src/main/java/org/elasticsearch/action/admin/cluster/settings/TransportClusterUpdateSettingsAction.java84
-rw-r--r--core/src/main/java/org/elasticsearch/action/admin/cluster/validate/template/TransportRenderSearchTemplateAction.java4
-rw-r--r--core/src/main/java/org/elasticsearch/action/admin/indices/close/TransportCloseIndexAction.java28
-rw-r--r--core/src/main/java/org/elasticsearch/action/admin/indices/delete/TransportDeleteIndexAction.java4
-rw-r--r--core/src/main/java/org/elasticsearch/action/admin/indices/flush/SyncedFlushAction.java (renamed from core/src/main/java/org/apache/lucene/queryparser/classic/MissingFieldQueryExtension.java)34
-rw-r--r--core/src/main/java/org/elasticsearch/action/admin/indices/flush/SyncedFlushRequest.java64
-rw-r--r--core/src/main/java/org/elasticsearch/action/admin/indices/flush/SyncedFlushRequestBuilder.java (renamed from core/src/main/java/org/elasticsearch/rest/RestModule.java)36
-rw-r--r--core/src/main/java/org/elasticsearch/action/admin/indices/flush/SyncedFlushResponse.java (renamed from core/src/main/java/org/elasticsearch/indices/flush/IndicesSyncedFlushResult.java)97
-rw-r--r--core/src/main/java/org/elasticsearch/action/admin/indices/flush/TransportSyncedFlushAction.java52
-rw-r--r--core/src/main/java/org/elasticsearch/action/admin/indices/open/TransportOpenIndexAction.java5
-rw-r--r--core/src/main/java/org/elasticsearch/action/admin/indices/shards/IndicesShardStoresResponse.java55
-rw-r--r--core/src/main/java/org/elasticsearch/action/admin/indices/shards/TransportIndicesShardStoresAction.java16
-rw-r--r--core/src/main/java/org/elasticsearch/action/bulk/BackoffPolicy.java203
-rw-r--r--core/src/main/java/org/elasticsearch/action/bulk/BulkProcessor.java108
-rw-r--r--core/src/main/java/org/elasticsearch/action/bulk/BulkRequestHandler.java166
-rw-r--r--core/src/main/java/org/elasticsearch/action/bulk/Retry.java237
-rw-r--r--core/src/main/java/org/elasticsearch/action/percolate/TransportShardMultiPercolateAction.java3
-rw-r--r--core/src/main/java/org/elasticsearch/action/search/SearchRequestBuilder.java8
-rw-r--r--core/src/main/java/org/elasticsearch/action/search/SearchResponse.java14
-rw-r--r--core/src/main/java/org/elasticsearch/action/support/DestructiveOperations.java26
-rw-r--r--core/src/main/java/org/elasticsearch/action/support/broadcast/node/TransportBroadcastByNodeAction.java31
-rw-r--r--core/src/main/java/org/elasticsearch/action/support/replication/TransportReplicationAction.java8
-rw-r--r--core/src/main/java/org/elasticsearch/action/update/UpdateHelper.java3
-rw-r--r--core/src/main/java/org/elasticsearch/client/IndicesAdminClient.java30
-rw-r--r--core/src/main/java/org/elasticsearch/client/Requests.java14
-rw-r--r--core/src/main/java/org/elasticsearch/client/support/AbstractClient.java19
-rw-r--r--core/src/main/java/org/elasticsearch/client/transport/TransportClient.java11
-rw-r--r--core/src/main/java/org/elasticsearch/cluster/ClusterModule.java87
-rw-r--r--core/src/main/java/org/elasticsearch/cluster/ClusterState.java2
-rw-r--r--core/src/main/java/org/elasticsearch/cluster/ClusterStateTaskExecutor.java7
-rw-r--r--core/src/main/java/org/elasticsearch/cluster/InternalClusterInfoService.java54
-rw-r--r--core/src/main/java/org/elasticsearch/cluster/action/index/MappingUpdatedAction.java28
-rw-r--r--core/src/main/java/org/elasticsearch/cluster/action/shard/ShardStateAction.java206
-rw-r--r--core/src/main/java/org/elasticsearch/cluster/metadata/IndexMetaData.java2
-rw-r--r--core/src/main/java/org/elasticsearch/cluster/metadata/MetaData.java32
-rw-r--r--core/src/main/java/org/elasticsearch/cluster/metadata/MetaDataIndexStateService.java9
-rw-r--r--core/src/main/java/org/elasticsearch/cluster/metadata/MetaDataMappingService.java45
-rw-r--r--core/src/main/java/org/elasticsearch/cluster/routing/RoutingNodes.java29
-rw-r--r--core/src/main/java/org/elasticsearch/cluster/routing/ShardRouting.java9
-rw-r--r--core/src/main/java/org/elasticsearch/cluster/routing/allocation/AllocationService.java7
-rw-r--r--core/src/main/java/org/elasticsearch/cluster/routing/allocation/allocator/BalancedShardsAllocator.java51
-rw-r--r--core/src/main/java/org/elasticsearch/cluster/routing/allocation/decider/AwarenessAllocationDecider.java56
-rw-r--r--core/src/main/java/org/elasticsearch/cluster/routing/allocation/decider/ClusterRebalanceAllocationDecider.java62
-rw-r--r--core/src/main/java/org/elasticsearch/cluster/routing/allocation/decider/ConcurrentRebalanceAllocationDecider.java27
-rw-r--r--core/src/main/java/org/elasticsearch/cluster/routing/allocation/decider/DiskThresholdDecider.java128
-rw-r--r--core/src/main/java/org/elasticsearch/cluster/routing/allocation/decider/EnableAllocationDecider.java60
-rw-r--r--core/src/main/java/org/elasticsearch/cluster/routing/allocation/decider/FilterAllocationDecider.java62
-rw-r--r--core/src/main/java/org/elasticsearch/cluster/routing/allocation/decider/ShardsLimitAllocationDecider.java30
-rw-r--r--core/src/main/java/org/elasticsearch/cluster/routing/allocation/decider/SnapshotInProgressAllocationDecider.java28
-rw-r--r--core/src/main/java/org/elasticsearch/cluster/routing/allocation/decider/ThrottlingAllocationDecider.java48
-rw-r--r--core/src/main/java/org/elasticsearch/cluster/service/InternalClusterService.java102
-rw-r--r--core/src/main/java/org/elasticsearch/common/Booleans.java3
-rw-r--r--core/src/main/java/org/elasticsearch/common/Randomness.java120
-rw-r--r--core/src/main/java/org/elasticsearch/common/geo/builders/EnvelopeBuilder.java33
-rw-r--r--core/src/main/java/org/elasticsearch/common/geo/builders/GeometryCollectionBuilder.java49
-rw-r--r--core/src/main/java/org/elasticsearch/common/geo/builders/LineStringBuilder.java41
-rw-r--r--core/src/main/java/org/elasticsearch/common/geo/builders/MultiLineStringBuilder.java44
-rw-r--r--core/src/main/java/org/elasticsearch/common/geo/builders/MultiPointBuilder.java45
-rw-r--r--core/src/main/java/org/elasticsearch/common/geo/builders/MultiPolygonBuilder.java71
-rw-r--r--core/src/main/java/org/elasticsearch/common/geo/builders/PointBuilder.java1
-rw-r--r--core/src/main/java/org/elasticsearch/common/geo/builders/PolygonBuilder.java75
-rw-r--r--core/src/main/java/org/elasticsearch/common/geo/builders/ShapeBuilder.java68
-rw-r--r--core/src/main/java/org/elasticsearch/common/geo/builders/ShapeBuilderRegistry.java45
-rw-r--r--core/src/main/java/org/elasticsearch/common/geo/builders/ShapeBuilders.java18
-rw-r--r--core/src/main/java/org/elasticsearch/common/io/stream/StreamInput.java13
-rw-r--r--core/src/main/java/org/elasticsearch/common/io/stream/StreamOutput.java8
-rw-r--r--core/src/main/java/org/elasticsearch/common/lucene/all/AllTermQuery.java4
-rw-r--r--core/src/main/java/org/elasticsearch/common/network/NetworkModule.java345
-rw-r--r--core/src/main/java/org/elasticsearch/common/settings/AbstractScopedSettings.java252
-rw-r--r--core/src/main/java/org/elasticsearch/common/settings/ClusterSettings.java141
-rw-r--r--core/src/main/java/org/elasticsearch/common/settings/Setting.java461
-rw-r--r--core/src/main/java/org/elasticsearch/common/settings/Settings.java23
-rw-r--r--core/src/main/java/org/elasticsearch/common/settings/SettingsModule.java27
-rw-r--r--core/src/main/java/org/elasticsearch/common/settings/loader/XContentSettingsLoader.java18
-rw-r--r--core/src/main/java/org/elasticsearch/common/text/BytesText.java82
-rw-r--r--core/src/main/java/org/elasticsearch/common/text/StringAndBytesText.java111
-rw-r--r--core/src/main/java/org/elasticsearch/common/text/StringText.java94
-rw-r--r--core/src/main/java/org/elasticsearch/common/text/Text.java96
-rw-r--r--core/src/main/java/org/elasticsearch/common/unit/TimeValue.java24
-rw-r--r--core/src/main/java/org/elasticsearch/common/xcontent/XContentBuilder.java28
-rw-r--r--core/src/main/java/org/elasticsearch/discovery/DiscoveryService.java9
-rw-r--r--core/src/main/java/org/elasticsearch/discovery/DiscoverySettings.java93
-rw-r--r--core/src/main/java/org/elasticsearch/discovery/zen/ZenDiscovery.java57
-rw-r--r--core/src/main/java/org/elasticsearch/discovery/zen/elect/ElectMasterService.java23
-rw-r--r--core/src/main/java/org/elasticsearch/discovery/zen/membership/MembershipAction.java30
-rw-r--r--core/src/main/java/org/elasticsearch/env/NodeEnvironment.java38
-rw-r--r--core/src/main/java/org/elasticsearch/gateway/GatewayService.java2
-rw-r--r--core/src/main/java/org/elasticsearch/gateway/PrimaryShardAllocator.java198
-rw-r--r--core/src/main/java/org/elasticsearch/gateway/ReplicaShardAllocator.java11
-rw-r--r--core/src/main/java/org/elasticsearch/gateway/TransportNodesListGatewayStartedShards.java23
-rw-r--r--core/src/main/java/org/elasticsearch/http/HttpServerModule.java59
-rw-r--r--core/src/main/java/org/elasticsearch/http/netty/pipelining/HttpPipeliningHandler.java22
-rw-r--r--core/src/main/java/org/elasticsearch/http/netty/pipelining/OrderedDownstreamChannelEvent.java22
-rw-r--r--core/src/main/java/org/elasticsearch/http/netty/pipelining/OrderedUpstreamMessageEvent.java22
-rw-r--r--core/src/main/java/org/elasticsearch/index/IndexService.java35
-rw-r--r--core/src/main/java/org/elasticsearch/index/engine/InternalEngine.java8
-rw-r--r--core/src/main/java/org/elasticsearch/index/mapper/ContentPath.java29
-rw-r--r--core/src/main/java/org/elasticsearch/index/mapper/DocumentMapper.java22
-rw-r--r--core/src/main/java/org/elasticsearch/index/mapper/DocumentParser.java18
-rw-r--r--core/src/main/java/org/elasticsearch/index/mapper/FieldMapper.java146
-rw-r--r--core/src/main/java/org/elasticsearch/index/mapper/Mapper.java4
-rwxr-xr-xcore/src/main/java/org/elasticsearch/index/mapper/MapperService.java174
-rw-r--r--core/src/main/java/org/elasticsearch/index/mapper/MapperUtils.java46
-rw-r--r--core/src/main/java/org/elasticsearch/index/mapper/Mapping.java39
-rw-r--r--core/src/main/java/org/elasticsearch/index/mapper/MergeResult.java81
-rw-r--r--core/src/main/java/org/elasticsearch/index/mapper/MetadataFieldMapper.java8
-rw-r--r--core/src/main/java/org/elasticsearch/index/mapper/ParseContext.java2
-rw-r--r--core/src/main/java/org/elasticsearch/index/mapper/ParsedDocument.java2
-rw-r--r--core/src/main/java/org/elasticsearch/index/mapper/core/BinaryFieldMapper.java2
-rw-r--r--core/src/main/java/org/elasticsearch/index/mapper/core/BooleanFieldMapper.java2
-rw-r--r--core/src/main/java/org/elasticsearch/index/mapper/core/ByteFieldMapper.java3
-rw-r--r--core/src/main/java/org/elasticsearch/index/mapper/core/CompletionFieldMapper.java10
-rw-r--r--core/src/main/java/org/elasticsearch/index/mapper/core/DateFieldMapper.java3
-rw-r--r--core/src/main/java/org/elasticsearch/index/mapper/core/DoubleFieldMapper.java3
-rw-r--r--core/src/main/java/org/elasticsearch/index/mapper/core/FloatFieldMapper.java3
-rw-r--r--core/src/main/java/org/elasticsearch/index/mapper/core/IntegerFieldMapper.java3
-rw-r--r--core/src/main/java/org/elasticsearch/index/mapper/core/LongFieldMapper.java3
-rw-r--r--core/src/main/java/org/elasticsearch/index/mapper/core/NumberFieldMapper.java54
-rw-r--r--core/src/main/java/org/elasticsearch/index/mapper/core/ShortFieldMapper.java3
-rw-r--r--core/src/main/java/org/elasticsearch/index/mapper/core/StringFieldMapper.java50
-rw-r--r--core/src/main/java/org/elasticsearch/index/mapper/core/TokenCountFieldMapper.java15
-rw-r--r--core/src/main/java/org/elasticsearch/index/mapper/core/TypeParsers.java31
-rw-r--r--core/src/main/java/org/elasticsearch/index/mapper/geo/BaseGeoPointFieldMapper.java45
-rw-r--r--core/src/main/java/org/elasticsearch/index/mapper/geo/GeoPointFieldMapper.java9
-rw-r--r--core/src/main/java/org/elasticsearch/index/mapper/geo/GeoPointFieldMapperLegacy.java25
-rw-r--r--core/src/main/java/org/elasticsearch/index/mapper/geo/GeoShapeFieldMapper.java28
-rw-r--r--core/src/main/java/org/elasticsearch/index/mapper/internal/AllFieldMapper.java32
-rw-r--r--core/src/main/java/org/elasticsearch/index/mapper/internal/FieldNamesFieldMapper.java2
-rw-r--r--core/src/main/java/org/elasticsearch/index/mapper/internal/IdFieldMapper.java5
-rw-r--r--core/src/main/java/org/elasticsearch/index/mapper/internal/IndexFieldMapper.java11
-rw-r--r--core/src/main/java/org/elasticsearch/index/mapper/internal/ParentFieldMapper.java17
-rw-r--r--core/src/main/java/org/elasticsearch/index/mapper/internal/RoutingFieldMapper.java5
-rw-r--r--core/src/main/java/org/elasticsearch/index/mapper/internal/SourceFieldMapper.java182
-rw-r--r--core/src/main/java/org/elasticsearch/index/mapper/internal/TTLFieldMapper.java17
-rw-r--r--core/src/main/java/org/elasticsearch/index/mapper/internal/TimestampFieldMapper.java51
-rw-r--r--core/src/main/java/org/elasticsearch/index/mapper/internal/TypeFieldMapper.java5
-rw-r--r--core/src/main/java/org/elasticsearch/index/mapper/internal/UidFieldMapper.java5
-rw-r--r--core/src/main/java/org/elasticsearch/index/mapper/internal/VersionFieldMapper.java5
-rw-r--r--core/src/main/java/org/elasticsearch/index/mapper/ip/IpFieldMapper.java3
-rw-r--r--core/src/main/java/org/elasticsearch/index/mapper/object/DynamicTemplate.java4
-rw-r--r--core/src/main/java/org/elasticsearch/index/mapper/object/ObjectMapper.java151
-rw-r--r--core/src/main/java/org/elasticsearch/index/mapper/object/RootObjectMapper.java50
-rw-r--r--core/src/main/java/org/elasticsearch/index/query/GeoShapeQueryBuilder.java78
-rw-r--r--core/src/main/java/org/elasticsearch/index/query/GeoShapeQueryParser.java9
-rw-r--r--core/src/main/java/org/elasticsearch/index/query/MissingQueryBuilder.java234
-rw-r--r--core/src/main/java/org/elasticsearch/index/query/MissingQueryParser.java88
-rw-r--r--core/src/main/java/org/elasticsearch/index/query/QueryBuilders.java21
-rw-r--r--core/src/main/java/org/elasticsearch/index/query/QueryShardContext.java3
-rw-r--r--core/src/main/java/org/elasticsearch/index/query/ScriptQueryBuilder.java5
-rw-r--r--core/src/main/java/org/elasticsearch/index/query/functionscore/ScoreFunctionParserMapper.java9
-rw-r--r--core/src/main/java/org/elasticsearch/index/query/functionscore/script/ScriptScoreFunctionBuilder.java5
-rw-r--r--core/src/main/java/org/elasticsearch/index/shard/IndexShard.java37
-rw-r--r--core/src/main/java/org/elasticsearch/index/shard/TranslogRecoveryPerformer.java2
-rw-r--r--core/src/main/java/org/elasticsearch/index/store/IndexStoreConfig.java39
-rw-r--r--core/src/main/java/org/elasticsearch/index/translog/BufferingTranslogWriter.java54
-rw-r--r--core/src/main/java/org/elasticsearch/index/translog/Translog.java102
-rw-r--r--core/src/main/java/org/elasticsearch/index/translog/TranslogReader.java8
-rw-r--r--core/src/main/java/org/elasticsearch/index/translog/TranslogWriter.java63
-rw-r--r--core/src/main/java/org/elasticsearch/indices/IndicesModule.java80
-rw-r--r--core/src/main/java/org/elasticsearch/indices/IndicesService.java12
-rw-r--r--core/src/main/java/org/elasticsearch/indices/breaker/HierarchyCircuitBreakerService.java129
-rw-r--r--core/src/main/java/org/elasticsearch/indices/flush/ShardsSyncedFlushResult.java56
-rw-r--r--core/src/main/java/org/elasticsearch/indices/flush/SyncedFlushService.java86
-rw-r--r--core/src/main/java/org/elasticsearch/indices/query/IndicesQueriesRegistry.java8
-rw-r--r--core/src/main/java/org/elasticsearch/indices/recovery/RecoverySettings.java138
-rw-r--r--core/src/main/java/org/elasticsearch/indices/ttl/IndicesTTLService.java26
-rw-r--r--core/src/main/java/org/elasticsearch/monitor/jvm/JvmInfo.java42
-rw-r--r--core/src/main/java/org/elasticsearch/monitor/os/DummyOsInfo.java (renamed from core/src/main/java/org/elasticsearch/client/transport/ClientTransportModule.java)25
-rw-r--r--core/src/main/java/org/elasticsearch/monitor/os/OsInfo.java6
-rw-r--r--core/src/main/java/org/elasticsearch/node/Node.java64
-rw-r--r--core/src/main/java/org/elasticsearch/node/NodeModule.java7
-rw-r--r--core/src/main/java/org/elasticsearch/node/settings/NodeSettingsService.java122
-rw-r--r--core/src/main/java/org/elasticsearch/percolator/PercolateContext.java13
-rw-r--r--core/src/main/java/org/elasticsearch/percolator/PercolatorService.java14
-rw-r--r--core/src/main/java/org/elasticsearch/plugins/DummyPluginInfo.java (renamed from core/src/main/java/org/elasticsearch/cluster/settings/ClusterDynamicSettings.java)24
-rw-r--r--core/src/main/java/org/elasticsearch/plugins/PluginManager.java8
-rw-r--r--core/src/main/java/org/elasticsearch/rest/action/RestActionModule.java273
-rw-r--r--core/src/main/java/org/elasticsearch/rest/action/admin/cluster/settings/RestClusterGetSettingsAction.java42
-rw-r--r--core/src/main/java/org/elasticsearch/rest/action/admin/indices/flush/RestSyncedFlushAction.java17
-rw-r--r--core/src/main/java/org/elasticsearch/rest/action/admin/indices/settings/RestUpdateSettingsAction.java2
-rw-r--r--core/src/main/java/org/elasticsearch/script/NativeScriptEngineService.java2
-rw-r--r--core/src/main/java/org/elasticsearch/script/ScriptEngineService.java2
-rw-r--r--core/src/main/java/org/elasticsearch/script/ScriptService.java78
-rw-r--r--core/src/main/java/org/elasticsearch/search/SearchService.java55
-rw-r--r--core/src/main/java/org/elasticsearch/search/SearchShardTarget.java6
-rw-r--r--core/src/main/java/org/elasticsearch/search/aggregations/AggregationPhase.java28
-rw-r--r--core/src/main/java/org/elasticsearch/search/aggregations/BucketCollector.java6
-rw-r--r--core/src/main/java/org/elasticsearch/search/aggregations/bucket/children/ParentToChildrenAggregator.java16
-rw-r--r--core/src/main/java/org/elasticsearch/search/aggregations/bucket/histogram/InternalHistogram.java25
-rw-r--r--core/src/main/java/org/elasticsearch/search/aggregations/bucket/significant/heuristics/ScriptHeuristic.java5
-rw-r--r--core/src/main/java/org/elasticsearch/search/aggregations/bucket/terms/LongTerms.java2
-rw-r--r--core/src/main/java/org/elasticsearch/search/aggregations/metrics/scripted/InternalScriptedMetric.java3
-rw-r--r--core/src/main/java/org/elasticsearch/search/aggregations/metrics/scripted/ScriptedMetricAggregator.java7
-rw-r--r--core/src/main/java/org/elasticsearch/search/aggregations/metrics/tophits/TopHitsBuilder.java3
-rw-r--r--core/src/main/java/org/elasticsearch/search/aggregations/pipeline/bucketscript/BucketScriptPipelineAggregator.java2
-rw-r--r--core/src/main/java/org/elasticsearch/search/aggregations/pipeline/having/BucketSelectorPipelineAggregator.java3
-rw-r--r--core/src/main/java/org/elasticsearch/search/aggregations/support/ValuesSourceParser.java3
-rw-r--r--core/src/main/java/org/elasticsearch/search/builder/SearchSourceBuilder.java70
-rw-r--r--core/src/main/java/org/elasticsearch/search/controller/SearchPhaseController.java16
-rw-r--r--core/src/main/java/org/elasticsearch/search/fetch/FetchPhase.java4
-rw-r--r--core/src/main/java/org/elasticsearch/search/fetch/innerhits/InnerHitsBuilder.java3
-rw-r--r--core/src/main/java/org/elasticsearch/search/fetch/script/ScriptFieldsParseElement.java5
-rw-r--r--core/src/main/java/org/elasticsearch/search/highlight/AbstractHighlighterBuilder.java132
-rw-r--r--core/src/main/java/org/elasticsearch/search/highlight/CustomQueryScorer.java2
-rw-r--r--core/src/main/java/org/elasticsearch/search/highlight/FastVectorHighlighter.java6
-rw-r--r--core/src/main/java/org/elasticsearch/search/highlight/HighlightBuilder.java294
-rw-r--r--core/src/main/java/org/elasticsearch/search/highlight/HighlightField.java3
-rw-r--r--core/src/main/java/org/elasticsearch/search/highlight/PlainHighlighter.java6
-rw-r--r--core/src/main/java/org/elasticsearch/search/highlight/PostingsHighlighter.java4
-rw-r--r--core/src/main/java/org/elasticsearch/search/internal/ContextIndexSearcher.java57
-rw-r--r--core/src/main/java/org/elasticsearch/search/internal/DefaultSearchContext.java14
-rw-r--r--core/src/main/java/org/elasticsearch/search/internal/FilteredSearchContext.java9
-rw-r--r--core/src/main/java/org/elasticsearch/search/internal/InternalSearchHit.java10
-rw-r--r--core/src/main/java/org/elasticsearch/search/internal/InternalSearchResponse.java44
-rw-r--r--core/src/main/java/org/elasticsearch/search/internal/SearchContext.java8
-rw-r--r--core/src/main/java/org/elasticsearch/search/internal/ShardSearchLocalRequest.java12
-rw-r--r--core/src/main/java/org/elasticsearch/search/internal/ShardSearchRequest.java11
-rw-r--r--core/src/main/java/org/elasticsearch/search/internal/ShardSearchTransportRequest.java10
-rw-r--r--core/src/main/java/org/elasticsearch/search/profile/CollectorResult.java156
-rw-r--r--core/src/main/java/org/elasticsearch/search/profile/InternalProfileCollector.java135
-rw-r--r--core/src/main/java/org/elasticsearch/search/profile/InternalProfileShardResults.java108
-rw-r--r--core/src/main/java/org/elasticsearch/search/profile/InternalProfileTree.java235
-rw-r--r--core/src/main/java/org/elasticsearch/search/profile/ProfileBreakdown.java113
-rw-r--r--core/src/main/java/org/elasticsearch/search/profile/ProfileCollector.java94
-rw-r--r--core/src/main/java/org/elasticsearch/search/profile/ProfileResult.java165
-rw-r--r--core/src/main/java/org/elasticsearch/search/profile/ProfileScorer.java158
-rw-r--r--core/src/main/java/org/elasticsearch/search/profile/ProfileShardResult.java103
-rw-r--r--core/src/main/java/org/elasticsearch/search/profile/ProfileWeight.java97
-rw-r--r--core/src/main/java/org/elasticsearch/search/profile/Profiler.java130
-rw-r--r--core/src/main/java/org/elasticsearch/search/profile/Profilers.java59
-rw-r--r--core/src/main/java/org/elasticsearch/search/query/QueryPhase.java68
-rw-r--r--core/src/main/java/org/elasticsearch/search/query/QuerySearchResult.java41
-rw-r--r--core/src/main/java/org/elasticsearch/search/sort/ScriptSortParser.java3
-rw-r--r--core/src/main/java/org/elasticsearch/search/suggest/completion/CompletionSuggester.java6
-rw-r--r--core/src/main/java/org/elasticsearch/search/suggest/phrase/PhraseSuggestParser.java3
-rw-r--r--core/src/main/java/org/elasticsearch/search/suggest/phrase/PhraseSuggester.java7
-rw-r--r--core/src/main/java/org/elasticsearch/search/suggest/term/TermSuggester.java6
-rw-r--r--core/src/main/java/org/elasticsearch/snapshots/RestoreService.java32
-rw-r--r--core/src/main/java/org/elasticsearch/threadpool/ThreadPool.java77
-rw-r--r--core/src/main/java/org/elasticsearch/transport/Transport.java7
-rw-r--r--core/src/main/java/org/elasticsearch/transport/TransportChannelResponseHandler.java76
-rw-r--r--core/src/main/java/org/elasticsearch/transport/TransportModule.java122
-rw-r--r--core/src/main/java/org/elasticsearch/transport/TransportService.java51
-rw-r--r--core/src/main/java/org/elasticsearch/transport/netty/NettyTransport.java10
-rw-r--r--core/src/main/java/org/elasticsearch/tribe/TribeService.java272
249 files changed, 7797 insertions, 4528 deletions
diff --git a/core/src/main/java/org/apache/lucene/queryparser/classic/MapperQueryParser.java b/core/src/main/java/org/apache/lucene/queryparser/classic/MapperQueryParser.java
index fce58d2f88..9f2b1b6622 100644
--- a/core/src/main/java/org/apache/lucene/queryparser/classic/MapperQueryParser.java
+++ b/core/src/main/java/org/apache/lucene/queryparser/classic/MapperQueryParser.java
@@ -54,7 +54,6 @@ public class MapperQueryParser extends QueryParser {
static {
Map<String, FieldQueryExtension> fieldQueryExtensions = new HashMap<>();
fieldQueryExtensions.put(ExistsFieldQueryExtension.NAME, new ExistsFieldQueryExtension());
- fieldQueryExtensions.put(MissingFieldQueryExtension.NAME, new MissingFieldQueryExtension());
FIELD_QUERY_EXTENSIONS = unmodifiableMap(fieldQueryExtensions);
}
diff --git a/core/src/main/java/org/elasticsearch/Version.java b/core/src/main/java/org/elasticsearch/Version.java
index a5e2e38ca2..b8ba0a411a 100644
--- a/core/src/main/java/org/elasticsearch/Version.java
+++ b/core/src/main/java/org/elasticsearch/Version.java
@@ -268,11 +268,15 @@ public class Version {
public static final int V_2_0_1_ID = 2000199;
public static final Version V_2_0_1 = new Version(V_2_0_1_ID, false, org.apache.lucene.util.Version.LUCENE_5_2_1);
public static final int V_2_0_2_ID = 2000299;
- public static final Version V_2_0_2 = new Version(V_2_0_2_ID, true, org.apache.lucene.util.Version.LUCENE_5_2_1);
+ public static final Version V_2_0_2 = new Version(V_2_0_2_ID, false, org.apache.lucene.util.Version.LUCENE_5_2_1);
+ public static final int V_2_0_3_ID = 2000399;
+ public static final Version V_2_0_3 = new Version(V_2_0_3_ID, true, org.apache.lucene.util.Version.LUCENE_5_2_1);
public static final int V_2_1_0_ID = 2010099;
public static final Version V_2_1_0 = new Version(V_2_1_0_ID, false, org.apache.lucene.util.Version.LUCENE_5_3_1);
public static final int V_2_1_1_ID = 2010199;
- public static final Version V_2_1_1 = new Version(V_2_1_1_ID, true, org.apache.lucene.util.Version.LUCENE_5_3_1);
+ public static final Version V_2_1_1 = new Version(V_2_1_1_ID, false, org.apache.lucene.util.Version.LUCENE_5_3_1);
+ public static final int V_2_1_2_ID = 2010299;
+ public static final Version V_2_1_2 = new Version(V_2_1_2_ID, true, org.apache.lucene.util.Version.LUCENE_5_3_1);
public static final int V_2_2_0_ID = 2020099;
public static final Version V_2_2_0 = new Version(V_2_2_0_ID, true, org.apache.lucene.util.Version.LUCENE_5_4_0);
public static final int V_3_0_0_ID = 3000099;
@@ -293,10 +297,14 @@ public class Version {
return V_3_0_0;
case V_2_2_0_ID:
return V_2_2_0;
+ case V_2_1_2_ID:
+ return V_2_1_2;
case V_2_1_1_ID:
return V_2_1_1;
case V_2_1_0_ID:
return V_2_1_0;
+ case V_2_0_3_ID:
+ return V_2_0_3;
case V_2_0_2_ID:
return V_2_0_2;
case V_2_0_1_ID:
diff --git a/core/src/main/java/org/elasticsearch/action/ActionModule.java b/core/src/main/java/org/elasticsearch/action/ActionModule.java
index 88ccb80971..adcb873e83 100644
--- a/core/src/main/java/org/elasticsearch/action/ActionModule.java
+++ b/core/src/main/java/org/elasticsearch/action/ActionModule.java
@@ -107,6 +107,8 @@ import org.elasticsearch.action.admin.indices.shards.IndicesShardStoresAction;
import org.elasticsearch.action.admin.indices.shards.TransportIndicesShardStoresAction;
import org.elasticsearch.action.admin.indices.stats.IndicesStatsAction;
import org.elasticsearch.action.admin.indices.stats.TransportIndicesStatsAction;
+import org.elasticsearch.action.admin.indices.flush.SyncedFlushAction;
+import org.elasticsearch.action.admin.indices.flush.TransportSyncedFlushAction;
import org.elasticsearch.action.admin.indices.template.delete.DeleteIndexTemplateAction;
import org.elasticsearch.action.admin.indices.template.delete.TransportDeleteIndexTemplateAction;
import org.elasticsearch.action.admin.indices.template.get.GetIndexTemplatesAction;
@@ -293,6 +295,7 @@ public class ActionModule extends AbstractModule {
registerAction(ValidateQueryAction.INSTANCE, TransportValidateQueryAction.class);
registerAction(RefreshAction.INSTANCE, TransportRefreshAction.class);
registerAction(FlushAction.INSTANCE, TransportFlushAction.class);
+ registerAction(SyncedFlushAction.INSTANCE, TransportSyncedFlushAction.class);
registerAction(ForceMergeAction.INSTANCE, TransportForceMergeAction.class);
registerAction(UpgradeAction.INSTANCE, TransportUpgradeAction.class);
registerAction(UpgradeStatusAction.INSTANCE, TransportUpgradeStatusAction.class);
diff --git a/core/src/main/java/org/elasticsearch/action/admin/cluster/settings/SettingsUpdater.java b/core/src/main/java/org/elasticsearch/action/admin/cluster/settings/SettingsUpdater.java
new file mode 100644
index 0000000000..f5020a46b3
--- /dev/null
+++ b/core/src/main/java/org/elasticsearch/action/admin/cluster/settings/SettingsUpdater.java
@@ -0,0 +1,127 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.action.admin.cluster.settings;
+
+import org.elasticsearch.cluster.ClusterState;
+import org.elasticsearch.cluster.block.ClusterBlocks;
+import org.elasticsearch.cluster.metadata.MetaData;
+import org.elasticsearch.common.regex.Regex;
+import org.elasticsearch.common.settings.ClusterSettings;
+import org.elasticsearch.common.settings.Settings;
+
+import java.util.HashSet;
+import java.util.Map;
+import java.util.Set;
+
+import static org.elasticsearch.cluster.ClusterState.builder;
+
+/**
+ * Updates transient and persistent cluster state settings if there are any changes
+ * due to the update.
+ */
+final class SettingsUpdater {
+ final Settings.Builder transientUpdates = Settings.settingsBuilder();
+ final Settings.Builder persistentUpdates = Settings.settingsBuilder();
+ private final ClusterSettings clusterSettings;
+
+ SettingsUpdater(ClusterSettings clusterSettings) {
+ this.clusterSettings = clusterSettings;
+ }
+
+ synchronized Settings getTransientUpdates() {
+ return transientUpdates.build();
+ }
+
+ synchronized Settings getPersistentUpdate() {
+ return persistentUpdates.build();
+ }
+
+ synchronized ClusterState updateSettings(final ClusterState currentState, Settings transientToApply, Settings persistentToApply) {
+ boolean changed = false;
+ Settings.Builder transientSettings = Settings.settingsBuilder();
+ transientSettings.put(currentState.metaData().transientSettings());
+ changed |= apply(transientToApply, transientSettings, transientUpdates, "transient");
+
+ Settings.Builder persistentSettings = Settings.settingsBuilder();
+ persistentSettings.put(currentState.metaData().persistentSettings());
+ changed |= apply(persistentToApply, persistentSettings, persistentUpdates, "persistent");
+
+ if (!changed) {
+ return currentState;
+ }
+
+ MetaData.Builder metaData = MetaData.builder(currentState.metaData())
+ .persistentSettings(persistentSettings.build())
+ .transientSettings(transientSettings.build());
+
+ ClusterBlocks.Builder blocks = ClusterBlocks.builder().blocks(currentState.blocks());
+ boolean updatedReadOnly = MetaData.SETTING_READ_ONLY_SETTING.get(metaData.persistentSettings()) || MetaData.SETTING_READ_ONLY_SETTING.get(metaData.transientSettings());
+ if (updatedReadOnly) {
+ blocks.addGlobalBlock(MetaData.CLUSTER_READ_ONLY_BLOCK);
+ } else {
+ blocks.removeGlobalBlock(MetaData.CLUSTER_READ_ONLY_BLOCK);
+ }
+ ClusterState build = builder(currentState).metaData(metaData).blocks(blocks).build();
+ Settings settings = build.metaData().settings();
+ // now we try to apply things and if they are invalid we fail
+ // this dryRun will validate & parse settings but won't actually apply them.
+ clusterSettings.dryRun(settings);
+ return build;
+ }
+
+ private boolean apply(Settings toApply, Settings.Builder target, Settings.Builder updates, String type) {
+ boolean changed = false;
+ final Set<String> toRemove = new HashSet<>();
+ Settings.Builder settingsBuilder = Settings.settingsBuilder();
+ for (Map.Entry<String, String> entry : toApply.getAsMap().entrySet()) {
+ if (entry.getValue() == null) {
+ toRemove.add(entry.getKey());
+ } else if (clusterSettings.isLoggerSetting(entry.getKey()) || clusterSettings.hasDynamicSetting(entry.getKey())) {
+ settingsBuilder.put(entry.getKey(), entry.getValue());
+ updates.put(entry.getKey(), entry.getValue());
+ changed = true;
+ } else {
+ throw new IllegalArgumentException(type + " setting [" + entry.getKey() + "], not dynamically updateable");
+ }
+
+ }
+ changed |= applyDeletes(toRemove, target);
+ target.put(settingsBuilder.build());
+ return changed;
+ }
+
+ private final boolean applyDeletes(Set<String> deletes, Settings.Builder builder) {
+ boolean changed = false;
+ for (String entry : deletes) {
+ Set<String> keysToRemove = new HashSet<>();
+ Set<String> keySet = builder.internalMap().keySet();
+ for (String key : keySet) {
+ if (Regex.simpleMatch(entry, key)) {
+ keysToRemove.add(key);
+ }
+ }
+ for (String key : keysToRemove) {
+ builder.remove(key);
+ changed = true;
+ }
+ }
+ return changed;
+ }
+}
diff --git a/core/src/main/java/org/elasticsearch/action/admin/cluster/settings/TransportClusterUpdateSettingsAction.java b/core/src/main/java/org/elasticsearch/action/admin/cluster/settings/TransportClusterUpdateSettingsAction.java
index 73d14a2bb1..99815b77ff 100644
--- a/core/src/main/java/org/elasticsearch/action/admin/cluster/settings/TransportClusterUpdateSettingsAction.java
+++ b/core/src/main/java/org/elasticsearch/action/admin/cluster/settings/TransportClusterUpdateSettingsAction.java
@@ -28,23 +28,19 @@ import org.elasticsearch.cluster.ClusterService;
import org.elasticsearch.cluster.ClusterState;
import org.elasticsearch.cluster.block.ClusterBlockException;
import org.elasticsearch.cluster.block.ClusterBlockLevel;
-import org.elasticsearch.cluster.block.ClusterBlocks;
import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver;
import org.elasticsearch.cluster.metadata.MetaData;
import org.elasticsearch.cluster.node.DiscoveryNode;
import org.elasticsearch.cluster.routing.allocation.AllocationService;
import org.elasticsearch.cluster.routing.allocation.RoutingAllocation;
-import org.elasticsearch.cluster.settings.ClusterDynamicSettings;
-import org.elasticsearch.cluster.settings.DynamicSettings;
import org.elasticsearch.common.Nullable;
import org.elasticsearch.common.Priority;
import org.elasticsearch.common.inject.Inject;
+import org.elasticsearch.common.settings.ClusterSettings;
import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.threadpool.ThreadPool;
import org.elasticsearch.transport.TransportService;
-import java.util.Map;
-
import static org.elasticsearch.cluster.ClusterState.builder;
/**
@@ -54,15 +50,14 @@ public class TransportClusterUpdateSettingsAction extends TransportMasterNodeAct
private final AllocationService allocationService;
- private final DynamicSettings dynamicSettings;
+ private final ClusterSettings clusterSettings;
@Inject
public TransportClusterUpdateSettingsAction(Settings settings, TransportService transportService, ClusterService clusterService, ThreadPool threadPool,
- AllocationService allocationService, @ClusterDynamicSettings DynamicSettings dynamicSettings,
- ActionFilters actionFilters, IndexNameExpressionResolver indexNameExpressionResolver) {
+ AllocationService allocationService, ActionFilters actionFilters, IndexNameExpressionResolver indexNameExpressionResolver, ClusterSettings clusterSettings) {
super(settings, ClusterUpdateSettingsAction.NAME, transportService, clusterService, threadPool, actionFilters, indexNameExpressionResolver, ClusterUpdateSettingsRequest::new);
this.allocationService = allocationService;
- this.dynamicSettings = dynamicSettings;
+ this.clusterSettings = clusterSettings;
}
@Override
@@ -73,8 +68,8 @@ public class TransportClusterUpdateSettingsAction extends TransportMasterNodeAct
@Override
protected ClusterBlockException checkBlock(ClusterUpdateSettingsRequest request, ClusterState state) {
// allow for dedicated changes to the metadata blocks, so we don't block those to allow to "re-enable" it
- if ((request.transientSettings().getAsMap().isEmpty() && request.persistentSettings().getAsMap().size() == 1 && request.persistentSettings().get(MetaData.SETTING_READ_ONLY) != null) ||
- request.persistentSettings().getAsMap().isEmpty() && request.transientSettings().getAsMap().size() == 1 && request.transientSettings().get(MetaData.SETTING_READ_ONLY) != null) {
+ if ((request.transientSettings().getAsMap().isEmpty() && request.persistentSettings().getAsMap().size() == 1 && MetaData.SETTING_READ_ONLY_SETTING.exists(request.persistentSettings())) ||
+ request.persistentSettings().getAsMap().isEmpty() && request.transientSettings().getAsMap().size() == 1 && MetaData.SETTING_READ_ONLY_SETTING.exists(request.transientSettings())) {
return null;
}
return state.blocks().globalBlockedException(ClusterBlockLevel.METADATA_WRITE);
@@ -88,9 +83,7 @@ public class TransportClusterUpdateSettingsAction extends TransportMasterNodeAct
@Override
protected void masterOperation(final ClusterUpdateSettingsRequest request, final ClusterState state, final ActionListener<ClusterUpdateSettingsResponse> listener) {
- final Settings.Builder transientUpdates = Settings.settingsBuilder();
- final Settings.Builder persistentUpdates = Settings.settingsBuilder();
-
+ final SettingsUpdater updater = new SettingsUpdater(clusterSettings);
clusterService.submitStateUpdateTask("cluster_update_settings",
new AckedClusterStateUpdateTask<ClusterUpdateSettingsResponse>(Priority.IMMEDIATE, request, listener) {
@@ -98,7 +91,7 @@ public class TransportClusterUpdateSettingsAction extends TransportMasterNodeAct
@Override
protected ClusterUpdateSettingsResponse newResponse(boolean acknowledged) {
- return new ClusterUpdateSettingsResponse(acknowledged, transientUpdates.build(), persistentUpdates.build());
+ return new ClusterUpdateSettingsResponse(acknowledged, updater.getTransientUpdates(), updater.getPersistentUpdate());
}
@Override
@@ -125,7 +118,7 @@ public class TransportClusterUpdateSettingsAction extends TransportMasterNodeAct
// so we should *not* execute the reroute.
if (!clusterService.state().nodes().localNodeMaster()) {
logger.debug("Skipping reroute after cluster update settings, because node is no longer master");
- listener.onResponse(new ClusterUpdateSettingsResponse(updateSettingsAcked, transientUpdates.build(), persistentUpdates.build()));
+ listener.onResponse(new ClusterUpdateSettingsResponse(updateSettingsAcked, updater.getTransientUpdates(), updater.getPersistentUpdate()));
return;
}
@@ -145,13 +138,13 @@ public class TransportClusterUpdateSettingsAction extends TransportMasterNodeAct
@Override
//we return when the cluster reroute is acked or it times out but the acknowledged flag depends on whether the update settings was acknowledged
protected ClusterUpdateSettingsResponse newResponse(boolean acknowledged) {
- return new ClusterUpdateSettingsResponse(updateSettingsAcked && acknowledged, transientUpdates.build(), persistentUpdates.build());
+ return new ClusterUpdateSettingsResponse(updateSettingsAcked && acknowledged, updater.getTransientUpdates(), updater.getPersistentUpdate());
}
@Override
public void onNoLongerMaster(String source) {
logger.debug("failed to preform reroute after cluster settings were updated - current node is no longer a master");
- listener.onResponse(new ClusterUpdateSettingsResponse(updateSettingsAcked, transientUpdates.build(), persistentUpdates.build()));
+ listener.onResponse(new ClusterUpdateSettingsResponse(updateSettingsAcked, updater.getTransientUpdates(), updater.getPersistentUpdate()));
}
@Override
@@ -181,58 +174,11 @@ public class TransportClusterUpdateSettingsAction extends TransportMasterNodeAct
@Override
public ClusterState execute(final ClusterState currentState) {
- Settings.Builder transientSettings = Settings.settingsBuilder();
- transientSettings.put(currentState.metaData().transientSettings());
- for (Map.Entry<String, String> entry : request.transientSettings().getAsMap().entrySet()) {
- if (dynamicSettings.isDynamicOrLoggingSetting(entry.getKey())) {
- String error = dynamicSettings.validateDynamicSetting(entry.getKey(), entry.getValue(), clusterService.state());
- if (error == null) {
- transientSettings.put(entry.getKey(), entry.getValue());
- transientUpdates.put(entry.getKey(), entry.getValue());
- changed = true;
- } else {
- logger.warn("ignoring transient setting [{}], [{}]", entry.getKey(), error);
- }
- } else {
- logger.warn("ignoring transient setting [{}], not dynamically updateable", entry.getKey());
- }
- }
-
- Settings.Builder persistentSettings = Settings.settingsBuilder();
- persistentSettings.put(currentState.metaData().persistentSettings());
- for (Map.Entry<String, String> entry : request.persistentSettings().getAsMap().entrySet()) {
- if (dynamicSettings.isDynamicOrLoggingSetting(entry.getKey())) {
- String error = dynamicSettings.validateDynamicSetting(entry.getKey(), entry.getValue(), clusterService.state());
- if (error == null) {
- persistentSettings.put(entry.getKey(), entry.getValue());
- persistentUpdates.put(entry.getKey(), entry.getValue());
- changed = true;
- } else {
- logger.warn("ignoring persistent setting [{}], [{}]", entry.getKey(), error);
- }
- } else {
- logger.warn("ignoring persistent setting [{}], not dynamically updateable", entry.getKey());
- }
- }
-
- if (!changed) {
- return currentState;
- }
-
- MetaData.Builder metaData = MetaData.builder(currentState.metaData())
- .persistentSettings(persistentSettings.build())
- .transientSettings(transientSettings.build());
-
- ClusterBlocks.Builder blocks = ClusterBlocks.builder().blocks(currentState.blocks());
- boolean updatedReadOnly = metaData.persistentSettings().getAsBoolean(MetaData.SETTING_READ_ONLY, false) || metaData.transientSettings().getAsBoolean(MetaData.SETTING_READ_ONLY, false);
- if (updatedReadOnly) {
- blocks.addGlobalBlock(MetaData.CLUSTER_READ_ONLY_BLOCK);
- } else {
- blocks.removeGlobalBlock(MetaData.CLUSTER_READ_ONLY_BLOCK);
- }
-
- return builder(currentState).metaData(metaData).blocks(blocks).build();
+ ClusterState clusterState = updater.updateSettings(currentState, request.transientSettings(), request.persistentSettings());
+ changed = clusterState != currentState;
+ return clusterState;
}
});
}
+
}
diff --git a/core/src/main/java/org/elasticsearch/action/admin/cluster/validate/template/TransportRenderSearchTemplateAction.java b/core/src/main/java/org/elasticsearch/action/admin/cluster/validate/template/TransportRenderSearchTemplateAction.java
index 5fe8297a6b..f2bfb18c43 100644
--- a/core/src/main/java/org/elasticsearch/action/admin/cluster/validate/template/TransportRenderSearchTemplateAction.java
+++ b/core/src/main/java/org/elasticsearch/action/admin/cluster/validate/template/TransportRenderSearchTemplateAction.java
@@ -33,6 +33,8 @@ import org.elasticsearch.script.ScriptService;
import org.elasticsearch.threadpool.ThreadPool;
import org.elasticsearch.transport.TransportService;
+import java.util.Collections;
+
public class TransportRenderSearchTemplateAction extends HandledTransportAction<RenderSearchTemplateRequest, RenderSearchTemplateResponse> {
private final ScriptService scriptService;
@@ -55,7 +57,7 @@ public class TransportRenderSearchTemplateAction extends HandledTransportAction<
@Override
protected void doRun() throws Exception {
- ExecutableScript executable = scriptService.executable(request.template(), ScriptContext.Standard.SEARCH, request);
+ ExecutableScript executable = scriptService.executable(request.template(), ScriptContext.Standard.SEARCH, request, Collections.emptyMap());
BytesReference processedTemplate = (BytesReference) executable.run();
RenderSearchTemplateResponse response = new RenderSearchTemplateResponse();
response.source(processedTemplate);
diff --git a/core/src/main/java/org/elasticsearch/action/admin/indices/close/TransportCloseIndexAction.java b/core/src/main/java/org/elasticsearch/action/admin/indices/close/TransportCloseIndexAction.java
index 2c25ee34f1..e454fcabc7 100644
--- a/core/src/main/java/org/elasticsearch/action/admin/indices/close/TransportCloseIndexAction.java
+++ b/core/src/main/java/org/elasticsearch/action/admin/indices/close/TransportCloseIndexAction.java
@@ -31,31 +31,36 @@ import org.elasticsearch.cluster.block.ClusterBlockLevel;
import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver;
import org.elasticsearch.cluster.metadata.MetaDataIndexStateService;
import org.elasticsearch.common.inject.Inject;
+import org.elasticsearch.common.settings.ClusterSettings;
+import org.elasticsearch.common.settings.Setting;
import org.elasticsearch.common.settings.Settings;
-import org.elasticsearch.node.settings.NodeSettingsService;
import org.elasticsearch.threadpool.ThreadPool;
import org.elasticsearch.transport.TransportService;
/**
* Close index action
*/
-public class TransportCloseIndexAction extends TransportMasterNodeAction<CloseIndexRequest, CloseIndexResponse> implements NodeSettingsService.Listener {
+public class TransportCloseIndexAction extends TransportMasterNodeAction<CloseIndexRequest, CloseIndexResponse> {
private final MetaDataIndexStateService indexStateService;
private final DestructiveOperations destructiveOperations;
private volatile boolean closeIndexEnabled;
- public static final String SETTING_CLUSTER_INDICES_CLOSE_ENABLE = "cluster.indices.close.enable";
+ public static final Setting<Boolean> CLUSTER_INDICES_CLOSE_ENABLE_SETTING = Setting.boolSetting("cluster.indices.close.enable", true, true, Setting.Scope.CLUSTER);
@Inject
public TransportCloseIndexAction(Settings settings, TransportService transportService, ClusterService clusterService,
ThreadPool threadPool, MetaDataIndexStateService indexStateService,
- NodeSettingsService nodeSettingsService, ActionFilters actionFilters,
+ ClusterSettings clusterSettings, ActionFilters actionFilters,
IndexNameExpressionResolver indexNameExpressionResolver, DestructiveOperations destructiveOperations) {
super(settings, CloseIndexAction.NAME, transportService, clusterService, threadPool, actionFilters, indexNameExpressionResolver, CloseIndexRequest::new);
this.indexStateService = indexStateService;
this.destructiveOperations = destructiveOperations;
- this.closeIndexEnabled = settings.getAsBoolean(SETTING_CLUSTER_INDICES_CLOSE_ENABLE, true);
- nodeSettingsService.addListener(this);
+ this.closeIndexEnabled = CLUSTER_INDICES_CLOSE_ENABLE_SETTING.get(settings);
+ clusterSettings.addSettingsUpdateConsumer(CLUSTER_INDICES_CLOSE_ENABLE_SETTING, this::setCloseIndexEnabled);
+ }
+
+ private void setCloseIndexEnabled(boolean closeIndexEnabled) {
+ this.closeIndexEnabled = closeIndexEnabled;
}
@Override
@@ -73,7 +78,7 @@ public class TransportCloseIndexAction extends TransportMasterNodeAction<CloseIn
protected void doExecute(CloseIndexRequest request, ActionListener<CloseIndexResponse> listener) {
destructiveOperations.failDestructive(request.indices());
if (closeIndexEnabled == false) {
- throw new IllegalStateException("closing indices is disabled - set [" + SETTING_CLUSTER_INDICES_CLOSE_ENABLE + ": true] to enable it. NOTE: closed indices still consume a significant amount of diskspace");
+ throw new IllegalStateException("closing indices is disabled - set [" + CLUSTER_INDICES_CLOSE_ENABLE_SETTING.getKey() + ": true] to enable it. NOTE: closed indices still consume a significant amount of diskspace");
}
super.doExecute(request, listener);
}
@@ -104,13 +109,4 @@ public class TransportCloseIndexAction extends TransportMasterNodeAction<CloseIn
}
});
}
-
- @Override
- public void onRefreshSettings(Settings settings) {
- final boolean enable = settings.getAsBoolean(SETTING_CLUSTER_INDICES_CLOSE_ENABLE, this.closeIndexEnabled);
- if (enable != this.closeIndexEnabled) {
- logger.info("updating [{}] from [{}] to [{}]", SETTING_CLUSTER_INDICES_CLOSE_ENABLE, this.closeIndexEnabled, enable);
- this.closeIndexEnabled = enable;
- }
- }
}
diff --git a/core/src/main/java/org/elasticsearch/action/admin/indices/delete/TransportDeleteIndexAction.java b/core/src/main/java/org/elasticsearch/action/admin/indices/delete/TransportDeleteIndexAction.java
index c02e2ade2a..82176da053 100644
--- a/core/src/main/java/org/elasticsearch/action/admin/indices/delete/TransportDeleteIndexAction.java
+++ b/core/src/main/java/org/elasticsearch/action/admin/indices/delete/TransportDeleteIndexAction.java
@@ -31,7 +31,6 @@ import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver;
import org.elasticsearch.cluster.metadata.MetaDataDeleteIndexService;
import org.elasticsearch.common.inject.Inject;
import org.elasticsearch.common.settings.Settings;
-import org.elasticsearch.node.settings.NodeSettingsService;
import org.elasticsearch.threadpool.ThreadPool;
import org.elasticsearch.transport.TransportService;
@@ -45,8 +44,7 @@ public class TransportDeleteIndexAction extends TransportMasterNodeAction<Delete
@Inject
public TransportDeleteIndexAction(Settings settings, TransportService transportService, ClusterService clusterService,
- ThreadPool threadPool, MetaDataDeleteIndexService deleteIndexService,
- NodeSettingsService nodeSettingsService, ActionFilters actionFilters,
+ ThreadPool threadPool, MetaDataDeleteIndexService deleteIndexService, ActionFilters actionFilters,
IndexNameExpressionResolver indexNameExpressionResolver, DestructiveOperations destructiveOperations) {
super(settings, DeleteIndexAction.NAME, transportService, clusterService, threadPool, actionFilters, indexNameExpressionResolver, DeleteIndexRequest::new);
this.deleteIndexService = deleteIndexService;
diff --git a/core/src/main/java/org/apache/lucene/queryparser/classic/MissingFieldQueryExtension.java b/core/src/main/java/org/elasticsearch/action/admin/indices/flush/SyncedFlushAction.java
index f9fc8c9d5d..291fd49c63 100644
--- a/core/src/main/java/org/apache/lucene/queryparser/classic/MissingFieldQueryExtension.java
+++ b/core/src/main/java/org/elasticsearch/action/admin/indices/flush/SyncedFlushAction.java
@@ -17,26 +17,28 @@
* under the License.
*/
-package org.apache.lucene.queryparser.classic;
+package org.elasticsearch.action.admin.indices.flush;
-import org.apache.lucene.search.ConstantScoreQuery;
-import org.apache.lucene.search.Query;
-import org.elasticsearch.index.query.MissingQueryBuilder;
-import org.elasticsearch.index.query.QueryShardContext;
+import org.elasticsearch.action.Action;
+import org.elasticsearch.client.ElasticsearchClient;
-/**
- *
- */
-public class MissingFieldQueryExtension implements FieldQueryExtension {
- public static final String NAME = "_missing_";
+public class SyncedFlushAction extends Action<SyncedFlushRequest, SyncedFlushResponse, SyncedFlushRequestBuilder> {
+
+ public static final SyncedFlushAction INSTANCE = new SyncedFlushAction();
+ public static final String NAME = "indices:admin/synced_flush";
+
+ private SyncedFlushAction() {
+ super(NAME);
+ }
+
+ @Override
+ public SyncedFlushResponse newResponse() {
+ return new SyncedFlushResponse();
+ }
@Override
- public Query query(QueryShardContext context, String queryText) {
- Query query = MissingQueryBuilder.newFilter(context, queryText, MissingQueryBuilder.DEFAULT_EXISTENCE_VALUE, MissingQueryBuilder.DEFAULT_NULL_VALUE);
- if (query != null) {
- return new ConstantScoreQuery(query);
- }
- return null;
+ public SyncedFlushRequestBuilder newRequestBuilder(ElasticsearchClient client) {
+ return new SyncedFlushRequestBuilder(client, this);
}
}
diff --git a/core/src/main/java/org/elasticsearch/action/admin/indices/flush/SyncedFlushRequest.java b/core/src/main/java/org/elasticsearch/action/admin/indices/flush/SyncedFlushRequest.java
new file mode 100644
index 0000000000..59719fe887
--- /dev/null
+++ b/core/src/main/java/org/elasticsearch/action/admin/indices/flush/SyncedFlushRequest.java
@@ -0,0 +1,64 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.action.admin.indices.flush;
+
+import org.elasticsearch.action.ActionRequest;
+import org.elasticsearch.action.support.broadcast.BroadcastRequest;
+
+import java.util.Arrays;
+
+/**
+ * A synced flush request to sync flush one or more indices. The synced flush process of an index performs a flush
+ * and writes the same sync id to primary and all copies.
+ *
+ * <p>Best created with {@link org.elasticsearch.client.Requests#syncedFlushRequest(String...)}. </p>
+ *
+ * @see org.elasticsearch.client.Requests#flushRequest(String...)
+ * @see org.elasticsearch.client.IndicesAdminClient#syncedFlush(SyncedFlushRequest)
+ * @see SyncedFlushResponse
+ */
+public class SyncedFlushRequest extends BroadcastRequest<SyncedFlushRequest> {
+
+ public SyncedFlushRequest() {
+ }
+
+ /**
+ * Copy constructor that creates a new synced flush request that is a copy of the one provided as an argument.
+ * The new request will inherit though headers and context from the original request that caused it.
+ */
+ public SyncedFlushRequest(ActionRequest originalRequest) {
+ super(originalRequest);
+ }
+
+ /**
+ * Constructs a new synced flush request against one or more indices. If nothing is provided, all indices will
+ * be sync flushed.
+ */
+ public SyncedFlushRequest(String... indices) {
+ super(indices);
+ }
+
+
+ @Override
+ public String toString() {
+ return "SyncedFlushRequest{" +
+ "indices=" + Arrays.toString(indices) + "}";
+ }
+}
diff --git a/core/src/main/java/org/elasticsearch/rest/RestModule.java b/core/src/main/java/org/elasticsearch/action/admin/indices/flush/SyncedFlushRequestBuilder.java
index e7949172d0..9e40726081 100644
--- a/core/src/main/java/org/elasticsearch/rest/RestModule.java
+++ b/core/src/main/java/org/elasticsearch/action/admin/indices/flush/SyncedFlushRequestBuilder.java
@@ -17,35 +17,25 @@
* under the License.
*/
-package org.elasticsearch.rest;
+package org.elasticsearch.action.admin.indices.flush;
-import org.elasticsearch.common.inject.AbstractModule;
-import org.elasticsearch.common.settings.Settings;
-import org.elasticsearch.rest.action.RestActionModule;
+import org.elasticsearch.action.ActionRequestBuilder;
+import org.elasticsearch.action.support.IndicesOptions;
+import org.elasticsearch.client.ElasticsearchClient;
-import java.util.ArrayList;
-import java.util.List;
+public class SyncedFlushRequestBuilder extends ActionRequestBuilder<SyncedFlushRequest, SyncedFlushResponse, SyncedFlushRequestBuilder> {
-/**
- *
- */
-public class RestModule extends AbstractModule {
-
- private final Settings settings;
- private List<Class<? extends BaseRestHandler>> restPluginsActions = new ArrayList<>();
-
- public void addRestAction(Class<? extends BaseRestHandler> restAction) {
- restPluginsActions.add(restAction);
+ public SyncedFlushRequestBuilder(ElasticsearchClient client, SyncedFlushAction action) {
+ super(client, action, new SyncedFlushRequest());
}
- public RestModule(Settings settings) {
- this.settings = settings;
+ public SyncedFlushRequestBuilder setIndices(String[] indices) {
+ super.request().indices(indices);
+ return this;
}
-
- @Override
- protected void configure() {
- bind(RestController.class).asEagerSingleton();
- new RestActionModule(restPluginsActions).configure(binder());
+ public SyncedFlushRequestBuilder setIndicesOptions(IndicesOptions indicesOptions) {
+ super.request().indicesOptions(indicesOptions);
+ return this;
}
}
diff --git a/core/src/main/java/org/elasticsearch/indices/flush/IndicesSyncedFlushResult.java b/core/src/main/java/org/elasticsearch/action/admin/indices/flush/SyncedFlushResponse.java
index 435c0d138c..5925370e5f 100644
--- a/core/src/main/java/org/elasticsearch/indices/flush/IndicesSyncedFlushResult.java
+++ b/core/src/main/java/org/elasticsearch/action/admin/indices/flush/SyncedFlushResponse.java
@@ -16,16 +16,25 @@
* specific language governing permissions and limitations
* under the License.
*/
-package org.elasticsearch.indices.flush;
+package org.elasticsearch.action.admin.indices.flush;
+import org.elasticsearch.action.ActionResponse;
import org.elasticsearch.cluster.routing.ShardRouting;
+import org.elasticsearch.common.io.stream.StreamInput;
+import org.elasticsearch.common.io.stream.StreamOutput;
+import org.elasticsearch.common.io.stream.Streamable;
import org.elasticsearch.common.util.iterable.Iterables;
import org.elasticsearch.common.xcontent.ToXContent;
import org.elasticsearch.common.xcontent.XContentBuilder;
import org.elasticsearch.common.xcontent.XContentBuilderString;
+import org.elasticsearch.indices.flush.ShardsSyncedFlushResult;
+import org.elasticsearch.indices.flush.SyncedFlushService;
import org.elasticsearch.rest.RestStatus;
import java.io.IOException;
+import java.util.ArrayList;
+import java.util.Collections;
+import java.util.HashMap;
import java.util.List;
import java.util.Map;
@@ -34,13 +43,16 @@ import static java.util.Collections.unmodifiableMap;
/**
* The result of performing a sync flush operation on all shards of multiple indices
*/
-public class IndicesSyncedFlushResult implements ToXContent {
+public class SyncedFlushResponse extends ActionResponse implements ToXContent {
- final Map<String, List<ShardsSyncedFlushResult>> shardsResultPerIndex;
- final ShardCounts shardCounts;
+ Map<String, List<ShardsSyncedFlushResult>> shardsResultPerIndex;
+ ShardCounts shardCounts;
+ SyncedFlushResponse() {
- public IndicesSyncedFlushResult(Map<String, List<ShardsSyncedFlushResult>> shardsResultPerIndex) {
+ }
+
+ public SyncedFlushResponse(Map<String, List<ShardsSyncedFlushResult>> shardsResultPerIndex) {
// shardsResultPerIndex is never modified after it is passed to this
// constructor so this is safe even though shardsResultPerIndex is a
// ConcurrentHashMap
@@ -48,17 +60,23 @@ public class IndicesSyncedFlushResult implements ToXContent {
this.shardCounts = calculateShardCounts(Iterables.flatten(shardsResultPerIndex.values()));
}
- /** total number shards, including replicas, both assigned and unassigned */
+ /**
+ * total number shards, including replicas, both assigned and unassigned
+ */
public int totalShards() {
return shardCounts.total;
}
- /** total number of shards for which the operation failed */
+ /**
+ * total number of shards for which the operation failed
+ */
public int failedShards() {
return shardCounts.failed;
}
- /** total number of shards which were successfully sync-flushed */
+ /**
+ * total number of shards which were successfully sync-flushed
+ */
public int successfulShards() {
return shardCounts.successful;
}
@@ -91,8 +109,8 @@ public class IndicesSyncedFlushResult implements ToXContent {
builder.endObject();
continue;
}
- Map<ShardRouting, SyncedFlushService.SyncedFlushResponse> failedShards = shardResults.failedShards();
- for (Map.Entry<ShardRouting, SyncedFlushService.SyncedFlushResponse> shardEntry : failedShards.entrySet()) {
+ Map<ShardRouting, SyncedFlushService.ShardSyncedFlushResponse> failedShards = shardResults.failedShards();
+ for (Map.Entry<ShardRouting, SyncedFlushService.ShardSyncedFlushResponse> shardEntry : failedShards.entrySet()) {
builder.startObject();
builder.field(Fields.SHARD, shardResults.shardId().id());
builder.field(Fields.REASON, shardEntry.getValue().failureReason());
@@ -123,11 +141,11 @@ public class IndicesSyncedFlushResult implements ToXContent {
return new ShardCounts(total, successful, failed);
}
- static final class ShardCounts implements ToXContent {
+ static final class ShardCounts implements ToXContent, Streamable {
- public final int total;
- public final int successful;
- public final int failed;
+ public int total;
+ public int successful;
+ public int failed;
ShardCounts(int total, int successful, int failed) {
this.total = total;
@@ -135,6 +153,10 @@ public class IndicesSyncedFlushResult implements ToXContent {
this.failed = failed;
}
+ ShardCounts() {
+
+ }
+
@Override
public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException {
builder.field(Fields.TOTAL, total);
@@ -142,6 +164,20 @@ public class IndicesSyncedFlushResult implements ToXContent {
builder.field(Fields.FAILED, failed);
return builder;
}
+
+ @Override
+ public void readFrom(StreamInput in) throws IOException {
+ total = in.readInt();
+ successful = in.readInt();
+ failed = in.readInt();
+ }
+
+ @Override
+ public void writeTo(StreamOutput out) throws IOException {
+ out.writeInt(total);
+ out.writeInt(successful);
+ out.writeInt(failed);
+ }
}
static final class Fields {
@@ -154,4 +190,37 @@ public class IndicesSyncedFlushResult implements ToXContent {
static final XContentBuilderString ROUTING = new XContentBuilderString("routing");
static final XContentBuilderString REASON = new XContentBuilderString("reason");
}
+
+ @Override
+ public void readFrom(StreamInput in) throws IOException {
+ super.readFrom(in);
+ shardCounts = new ShardCounts();
+ shardCounts.readFrom(in);
+ Map<String, List<ShardsSyncedFlushResult>> tmpShardsResultPerIndex = new HashMap<>();
+ int numShardsResults = in.readInt();
+ for (int i =0 ; i< numShardsResults; i++) {
+ String index = in.readString();
+ List<ShardsSyncedFlushResult> shardsSyncedFlushResults = new ArrayList<>();
+ int numShards = in.readInt();
+ for (int j =0; j< numShards; j++) {
+ shardsSyncedFlushResults.add(ShardsSyncedFlushResult.readShardsSyncedFlushResult(in));
+ }
+ tmpShardsResultPerIndex.put(index, shardsSyncedFlushResults);
+ }
+ shardsResultPerIndex = Collections.unmodifiableMap(tmpShardsResultPerIndex);
+ }
+
+ @Override
+ public void writeTo(StreamOutput out) throws IOException {
+ super.writeTo(out);
+ shardCounts.writeTo(out);
+ out.writeInt(shardsResultPerIndex.size());
+ for (Map.Entry<String, List<ShardsSyncedFlushResult>> entry : shardsResultPerIndex.entrySet()) {
+ out.writeString(entry.getKey());
+ out.writeInt(entry.getValue().size());
+ for (ShardsSyncedFlushResult shardsSyncedFlushResult : entry.getValue()) {
+ shardsSyncedFlushResult.writeTo(out);
+ }
+ }
+ }
}
diff --git a/core/src/main/java/org/elasticsearch/action/admin/indices/flush/TransportSyncedFlushAction.java b/core/src/main/java/org/elasticsearch/action/admin/indices/flush/TransportSyncedFlushAction.java
new file mode 100644
index 0000000000..3ba354f462
--- /dev/null
+++ b/core/src/main/java/org/elasticsearch/action/admin/indices/flush/TransportSyncedFlushAction.java
@@ -0,0 +1,52 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.action.admin.indices.flush;
+
+import org.elasticsearch.action.ActionListener;
+import org.elasticsearch.action.support.ActionFilters;
+import org.elasticsearch.action.support.HandledTransportAction;
+import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver;
+import org.elasticsearch.common.inject.Inject;
+import org.elasticsearch.common.settings.Settings;
+import org.elasticsearch.indices.flush.SyncedFlushService;
+import org.elasticsearch.threadpool.ThreadPool;
+import org.elasticsearch.transport.TransportService;
+
+/**
+ * Synced flush Action.
+ */
+public class TransportSyncedFlushAction extends HandledTransportAction<SyncedFlushRequest, SyncedFlushResponse> {
+
+ SyncedFlushService syncedFlushService;
+
+ @Inject
+ public TransportSyncedFlushAction(Settings settings, ThreadPool threadPool,
+ TransportService transportService, ActionFilters actionFilters,
+ IndexNameExpressionResolver indexNameExpressionResolver,
+ SyncedFlushService syncedFlushService) {
+ super(settings, SyncedFlushAction.NAME, threadPool, transportService, actionFilters, indexNameExpressionResolver, SyncedFlushRequest::new);
+ this.syncedFlushService = syncedFlushService;
+ }
+
+ @Override
+ protected void doExecute(SyncedFlushRequest request, ActionListener<SyncedFlushResponse> listener) {
+ syncedFlushService.attemptSyncedFlush(request.indices(), request.indicesOptions(), listener);
+ }
+}
diff --git a/core/src/main/java/org/elasticsearch/action/admin/indices/open/TransportOpenIndexAction.java b/core/src/main/java/org/elasticsearch/action/admin/indices/open/TransportOpenIndexAction.java
index cab1047cac..2717a2320e 100644
--- a/core/src/main/java/org/elasticsearch/action/admin/indices/open/TransportOpenIndexAction.java
+++ b/core/src/main/java/org/elasticsearch/action/admin/indices/open/TransportOpenIndexAction.java
@@ -32,12 +32,9 @@ import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver;
import org.elasticsearch.cluster.metadata.MetaDataIndexStateService;
import org.elasticsearch.common.inject.Inject;
import org.elasticsearch.common.settings.Settings;
-import org.elasticsearch.node.settings.NodeSettingsService;
import org.elasticsearch.threadpool.ThreadPool;
import org.elasticsearch.transport.TransportService;
-import java.util.Arrays;
-
/**
* Open index action
*/
@@ -49,7 +46,7 @@ public class TransportOpenIndexAction extends TransportMasterNodeAction<OpenInde
@Inject
public TransportOpenIndexAction(Settings settings, TransportService transportService, ClusterService clusterService,
ThreadPool threadPool, MetaDataIndexStateService indexStateService,
- NodeSettingsService nodeSettingsService, ActionFilters actionFilters, IndexNameExpressionResolver indexNameExpressionResolver,
+ ActionFilters actionFilters, IndexNameExpressionResolver indexNameExpressionResolver,
DestructiveOperations destructiveOperations) {
super(settings, OpenIndexAction.NAME, transportService, clusterService, threadPool, actionFilters, indexNameExpressionResolver, OpenIndexRequest::new);
this.indexStateService = indexStateService;
diff --git a/core/src/main/java/org/elasticsearch/action/admin/indices/shards/IndicesShardStoresResponse.java b/core/src/main/java/org/elasticsearch/action/admin/indices/shards/IndicesShardStoresResponse.java
index 84b39d4c68..380f6e0089 100644
--- a/core/src/main/java/org/elasticsearch/action/admin/indices/shards/IndicesShardStoresResponse.java
+++ b/core/src/main/java/org/elasticsearch/action/admin/indices/shards/IndicesShardStoresResponse.java
@@ -56,13 +56,14 @@ public class IndicesShardStoresResponse extends ActionResponse implements ToXCon
public static class StoreStatus implements Streamable, ToXContent, Comparable<StoreStatus> {
private DiscoveryNode node;
private long version;
+ private String allocationId;
private Throwable storeException;
- private Allocation allocation;
+ private AllocationStatus allocationStatus;
/**
* The status of the shard store with respect to the cluster
*/
- public enum Allocation {
+ public enum AllocationStatus {
/**
* Allocated as primary
@@ -81,16 +82,16 @@ public class IndicesShardStoresResponse extends ActionResponse implements ToXCon
private final byte id;
- Allocation(byte id) {
+ AllocationStatus(byte id) {
this.id = id;
}
- private static Allocation fromId(byte id) {
+ private static AllocationStatus fromId(byte id) {
switch (id) {
case 0: return PRIMARY;
case 1: return REPLICA;
case 2: return UNUSED;
- default: throw new IllegalArgumentException("unknown id for allocation [" + id + "]");
+ default: throw new IllegalArgumentException("unknown id for allocation status [" + id + "]");
}
}
@@ -99,11 +100,11 @@ public class IndicesShardStoresResponse extends ActionResponse implements ToXCon
case 0: return "primary";
case 1: return "replica";
case 2: return "unused";
- default: throw new IllegalArgumentException("unknown id for allocation [" + id + "]");
+ default: throw new IllegalArgumentException("unknown id for allocation status [" + id + "]");
}
}
- private static Allocation readFrom(StreamInput in) throws IOException {
+ private static AllocationStatus readFrom(StreamInput in) throws IOException {
return fromId(in.readByte());
}
@@ -115,10 +116,11 @@ public class IndicesShardStoresResponse extends ActionResponse implements ToXCon
private StoreStatus() {
}
- public StoreStatus(DiscoveryNode node, long version, Allocation allocation, Throwable storeException) {
+ public StoreStatus(DiscoveryNode node, long version, String allocationId, AllocationStatus allocationStatus, Throwable storeException) {
this.node = node;
this.version = version;
- this.allocation = allocation;
+ this.allocationId = allocationId;
+ this.allocationStatus = allocationStatus;
this.storeException = storeException;
}
@@ -130,14 +132,21 @@ public class IndicesShardStoresResponse extends ActionResponse implements ToXCon
}
/**
- * Version of the store, used to select the store that will be
- * used as a primary.
+ * Version of the store
*/
public long getVersion() {
return version;
}
/**
+ * AllocationStatus id of the store, used to select the store that will be
+ * used as a primary.
+ */
+ public String getAllocationId() {
+ return allocationId;
+ }
+
+ /**
* Exception while trying to open the
* shard index or from when the shard failed
*/
@@ -146,13 +155,13 @@ public class IndicesShardStoresResponse extends ActionResponse implements ToXCon
}
/**
- * The allocation status of the store.
- * {@link Allocation#PRIMARY} indicates a primary shard copy
- * {@link Allocation#REPLICA} indicates a replica shard copy
- * {@link Allocation#UNUSED} indicates an unused shard copy
+ * The allocationStatus status of the store.
+ * {@link AllocationStatus#PRIMARY} indicates a primary shard copy
+ * {@link AllocationStatus#REPLICA} indicates a replica shard copy
+ * {@link AllocationStatus#UNUSED} indicates an unused shard copy
*/
- public Allocation getAllocation() {
- return allocation;
+ public AllocationStatus getAllocationStatus() {
+ return allocationStatus;
}
static StoreStatus readStoreStatus(StreamInput in) throws IOException {
@@ -165,7 +174,8 @@ public class IndicesShardStoresResponse extends ActionResponse implements ToXCon
public void readFrom(StreamInput in) throws IOException {
node = DiscoveryNode.readNode(in);
version = in.readLong();
- allocation = Allocation.readFrom(in);
+ allocationId = in.readOptionalString();
+ allocationStatus = AllocationStatus.readFrom(in);
if (in.readBoolean()) {
storeException = in.readThrowable();
}
@@ -175,7 +185,8 @@ public class IndicesShardStoresResponse extends ActionResponse implements ToXCon
public void writeTo(StreamOutput out) throws IOException {
node.writeTo(out);
out.writeLong(version);
- allocation.writeTo(out);
+ out.writeOptionalString(allocationId);
+ allocationStatus.writeTo(out);
if (storeException != null) {
out.writeBoolean(true);
out.writeThrowable(storeException);
@@ -188,7 +199,8 @@ public class IndicesShardStoresResponse extends ActionResponse implements ToXCon
public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException {
node.toXContent(builder, params);
builder.field(Fields.VERSION, version);
- builder.field(Fields.ALLOCATED, allocation.value());
+ builder.field(Fields.ALLOCATION_ID, allocationId);
+ builder.field(Fields.ALLOCATED, allocationStatus.value());
if (storeException != null) {
builder.startObject(Fields.STORE_EXCEPTION);
ElasticsearchException.toXContent(builder, params, storeException);
@@ -206,7 +218,7 @@ public class IndicesShardStoresResponse extends ActionResponse implements ToXCon
} else {
int compare = Long.compare(other.version, version);
if (compare == 0) {
- return Integer.compare(allocation.id, other.allocation.id);
+ return Integer.compare(allocationStatus.id, other.allocationStatus.id);
}
return compare;
}
@@ -379,6 +391,7 @@ public class IndicesShardStoresResponse extends ActionResponse implements ToXCon
static final XContentBuilderString STORES = new XContentBuilderString("stores");
// StoreStatus fields
static final XContentBuilderString VERSION = new XContentBuilderString("version");
+ static final XContentBuilderString ALLOCATION_ID = new XContentBuilderString("allocation_id");
static final XContentBuilderString STORE_EXCEPTION = new XContentBuilderString("store_exception");
static final XContentBuilderString ALLOCATED = new XContentBuilderString("allocation");
}
diff --git a/core/src/main/java/org/elasticsearch/action/admin/indices/shards/TransportIndicesShardStoresAction.java b/core/src/main/java/org/elasticsearch/action/admin/indices/shards/TransportIndicesShardStoresAction.java
index 336ebc254b..d345c0e7d4 100644
--- a/core/src/main/java/org/elasticsearch/action/admin/indices/shards/TransportIndicesShardStoresAction.java
+++ b/core/src/main/java/org/elasticsearch/action/admin/indices/shards/TransportIndicesShardStoresAction.java
@@ -179,8 +179,8 @@ public class TransportIndicesShardStoresAction extends TransportMasterNodeReadAc
}
for (NodeGatewayStartedShards response : fetchResponse.responses) {
if (shardExistsInNode(response)) {
- IndicesShardStoresResponse.StoreStatus.Allocation allocation = getAllocation(fetchResponse.shardId.getIndex(), fetchResponse.shardId.id(), response.getNode());
- storeStatuses.add(new IndicesShardStoresResponse.StoreStatus(response.getNode(), response.version(), allocation, response.storeException()));
+ IndicesShardStoresResponse.StoreStatus.AllocationStatus allocationStatus = getAllocationStatus(fetchResponse.shardId.getIndex(), fetchResponse.shardId.id(), response.getNode());
+ storeStatuses.add(new IndicesShardStoresResponse.StoreStatus(response.getNode(), response.version(), response.allocationId(), allocationStatus, response.storeException()));
}
}
CollectionUtil.timSort(storeStatuses);
@@ -193,27 +193,27 @@ public class TransportIndicesShardStoresAction extends TransportMasterNodeReadAc
listener.onResponse(new IndicesShardStoresResponse(indicesStoreStatusesBuilder.build(), Collections.unmodifiableList(failureBuilder)));
}
- private IndicesShardStoresResponse.StoreStatus.Allocation getAllocation(String index, int shardID, DiscoveryNode node) {
+ private IndicesShardStoresResponse.StoreStatus.AllocationStatus getAllocationStatus(String index, int shardID, DiscoveryNode node) {
for (ShardRouting shardRouting : routingNodes.node(node.id())) {
ShardId shardId = shardRouting.shardId();
if (shardId.id() == shardID && shardId.getIndex().equals(index)) {
if (shardRouting.primary()) {
- return IndicesShardStoresResponse.StoreStatus.Allocation.PRIMARY;
+ return IndicesShardStoresResponse.StoreStatus.AllocationStatus.PRIMARY;
} else if (shardRouting.assignedToNode()) {
- return IndicesShardStoresResponse.StoreStatus.Allocation.REPLICA;
+ return IndicesShardStoresResponse.StoreStatus.AllocationStatus.REPLICA;
} else {
- return IndicesShardStoresResponse.StoreStatus.Allocation.UNUSED;
+ return IndicesShardStoresResponse.StoreStatus.AllocationStatus.UNUSED;
}
}
}
- return IndicesShardStoresResponse.StoreStatus.Allocation.UNUSED;
+ return IndicesShardStoresResponse.StoreStatus.AllocationStatus.UNUSED;
}
/**
* A shard exists/existed in a node only if shard state file exists in the node
*/
private boolean shardExistsInNode(final NodeGatewayStartedShards response) {
- return response.storeException() != null || response.version() != -1;
+ return response.storeException() != null || response.version() != -1 || response.allocationId() != null;
}
@Override
diff --git a/core/src/main/java/org/elasticsearch/action/bulk/BackoffPolicy.java b/core/src/main/java/org/elasticsearch/action/bulk/BackoffPolicy.java
new file mode 100644
index 0000000000..a0ccca0fb5
--- /dev/null
+++ b/core/src/main/java/org/elasticsearch/action/bulk/BackoffPolicy.java
@@ -0,0 +1,203 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.elasticsearch.action.bulk;
+
+import org.elasticsearch.common.unit.TimeValue;
+
+import java.util.Iterator;
+import java.util.NoSuchElementException;
+
+/**
+ * Provides a backoff policy for bulk requests. Whenever a bulk request is rejected due to resource constraints (i.e. the client's internal
+ * thread pool is full), the backoff policy decides how long the bulk processor will wait before the operation is retried internally.
+ *
+ * Notes for implementing custom subclasses:
+ *
+ * The underlying mathematical principle of <code>BackoffPolicy</code> are progressions which can be either finite or infinite although
+ * the latter should not be used for retrying. A progression can be mapped to a <code>java.util.Iterator</code> with the following
+ * semantics:
+ *
+ * <ul>
+ * <li><code>#hasNext()</code> determines whether the progression has more elements. Return <code>true</code> for infinite progressions</li>
+ * <li><code>#next()</code> determines the next element in the progression, i.e. the next wait time period</li>
+ * </ul>
+ *
+ * Note that backoff policies are exposed as <code>Iterables</code> in order to be consumed multiple times.
+ */
+public abstract class BackoffPolicy implements Iterable<TimeValue> {
+ private static final BackoffPolicy NO_BACKOFF = new NoBackoff();
+
+ /**
+ * Creates a backoff policy that will not allow any backoff, i.e. an operation will fail after the first attempt.
+ *
+ * @return A backoff policy without any backoff period. The returned instance is thread safe.
+ */
+ public static BackoffPolicy noBackoff() {
+ return NO_BACKOFF;
+ }
+
+ /**
+ * Creates an new constant backoff policy with the provided configuration.
+ *
+ * @param delay The delay defines how long to wait between retry attempts. Must not be null.
+ * Must be &lt;= <code>Integer.MAX_VALUE</code> ms.
+ * @param maxNumberOfRetries The maximum number of retries. Must be a non-negative number.
+ * @return A backoff policy with a constant wait time between retries. The returned instance is thread safe but each
+ * iterator created from it should only be used by a single thread.
+ */
+ public static BackoffPolicy constantBackoff(TimeValue delay, int maxNumberOfRetries) {
+ return new ConstantBackoff(checkDelay(delay), maxNumberOfRetries);
+ }
+
+ /**
+ * Creates an new exponential backoff policy with a default configuration of 50 ms initial wait period and 8 retries taking
+ * roughly 5.1 seconds in total.
+ *
+ * @return A backoff policy with an exponential increase in wait time for retries. The returned instance is thread safe but each
+ * iterator created from it should only be used by a single thread.
+ */
+ public static BackoffPolicy exponentialBackoff() {
+ return exponentialBackoff(TimeValue.timeValueMillis(50), 8);
+ }
+
+ /**
+ * Creates an new exponential backoff policy with the provided configuration.
+ *
+ * @param initialDelay The initial delay defines how long to wait for the first retry attempt. Must not be null.
+ * Must be &lt;= <code>Integer.MAX_VALUE</code> ms.
+ * @param maxNumberOfRetries The maximum number of retries. Must be a non-negative number.
+ * @return A backoff policy with an exponential increase in wait time for retries. The returned instance is thread safe but each
+ * iterator created from it should only be used by a single thread.
+ */
+ public static BackoffPolicy exponentialBackoff(TimeValue initialDelay, int maxNumberOfRetries) {
+ return new ExponentialBackoff((int) checkDelay(initialDelay).millis(), maxNumberOfRetries);
+ }
+
+ private static TimeValue checkDelay(TimeValue delay) {
+ if (delay.millis() > Integer.MAX_VALUE) {
+ throw new IllegalArgumentException("delay must be <= " + Integer.MAX_VALUE + " ms");
+ }
+ return delay;
+ }
+
+ private static class NoBackoff extends BackoffPolicy {
+ @Override
+ public Iterator<TimeValue> iterator() {
+ return new Iterator<TimeValue>() {
+ @Override
+ public boolean hasNext() {
+ return false;
+ }
+
+ @Override
+ public TimeValue next() {
+ throw new NoSuchElementException("No backoff");
+ }
+ };
+ }
+ }
+
+ private static class ExponentialBackoff extends BackoffPolicy {
+ private final int start;
+
+ private final int numberOfElements;
+
+ private ExponentialBackoff(int start, int numberOfElements) {
+ assert start >= 0;
+ assert numberOfElements >= 0;
+ this.start = start;
+ this.numberOfElements = numberOfElements;
+ }
+
+ @Override
+ public Iterator<TimeValue> iterator() {
+ return new ExponentialBackoffIterator(start, numberOfElements);
+ }
+ }
+
+ private static class ExponentialBackoffIterator implements Iterator<TimeValue> {
+ private final int numberOfElements;
+
+ private final int start;
+
+ private int currentlyConsumed;
+
+ private ExponentialBackoffIterator(int start, int numberOfElements) {
+ this.start = start;
+ this.numberOfElements = numberOfElements;
+ }
+
+ @Override
+ public boolean hasNext() {
+ return currentlyConsumed < numberOfElements;
+ }
+
+ @Override
+ public TimeValue next() {
+ if (!hasNext()) {
+ throw new NoSuchElementException("Only up to " + numberOfElements + " elements");
+ }
+ int result = start + 10 * ((int) Math.exp(0.8d * (currentlyConsumed)) - 1);
+ currentlyConsumed++;
+ return TimeValue.timeValueMillis(result);
+ }
+ }
+
+ private static final class ConstantBackoff extends BackoffPolicy {
+ private final TimeValue delay;
+
+ private final int numberOfElements;
+
+ public ConstantBackoff(TimeValue delay, int numberOfElements) {
+ assert numberOfElements >= 0;
+ this.delay = delay;
+ this.numberOfElements = numberOfElements;
+ }
+
+ @Override
+ public Iterator<TimeValue> iterator() {
+ return new ConstantBackoffIterator(delay, numberOfElements);
+ }
+ }
+
+ private static final class ConstantBackoffIterator implements Iterator<TimeValue> {
+ private final TimeValue delay;
+ private final int numberOfElements;
+ private int curr;
+
+ public ConstantBackoffIterator(TimeValue delay, int numberOfElements) {
+ this.delay = delay;
+ this.numberOfElements = numberOfElements;
+ }
+
+ @Override
+ public boolean hasNext() {
+ return curr < numberOfElements;
+ }
+
+ @Override
+ public TimeValue next() {
+ if (!hasNext()) {
+ throw new NoSuchElementException();
+ }
+ curr++;
+ return delay;
+ }
+ }
+}
diff --git a/core/src/main/java/org/elasticsearch/action/bulk/BulkProcessor.java b/core/src/main/java/org/elasticsearch/action/bulk/BulkProcessor.java
index 2a7c185ad8..43014cfb75 100644
--- a/core/src/main/java/org/elasticsearch/action/bulk/BulkProcessor.java
+++ b/core/src/main/java/org/elasticsearch/action/bulk/BulkProcessor.java
@@ -19,7 +19,6 @@
package org.elasticsearch.action.bulk;
-import org.elasticsearch.action.ActionListener;
import org.elasticsearch.action.ActionRequest;
import org.elasticsearch.action.delete.DeleteRequest;
import org.elasticsearch.action.index.IndexRequest;
@@ -48,7 +47,7 @@ public class BulkProcessor implements Closeable {
/**
* A listener for the execution.
*/
- public static interface Listener {
+ public interface Listener {
/**
* Callback before the bulk is executed.
@@ -62,6 +61,9 @@ public class BulkProcessor implements Closeable {
/**
* Callback after a failed execution of bulk request.
+ *
+ * Note that in case an instance of <code>InterruptedException</code> is passed, which means that request processing has been
+ * cancelled externally, the thread's interruption status has been restored prior to calling this method.
*/
void afterBulk(long executionId, BulkRequest request, Throwable failure);
}
@@ -79,6 +81,7 @@ public class BulkProcessor implements Closeable {
private int bulkActions = 1000;
private ByteSizeValue bulkSize = new ByteSizeValue(5, ByteSizeUnit.MB);
private TimeValue flushInterval = null;
+ private BackoffPolicy backoffPolicy = BackoffPolicy.exponentialBackoff();
/**
* Creates a builder of bulk processor with the client to use and the listener that will be used
@@ -137,10 +140,26 @@ public class BulkProcessor implements Closeable {
}
/**
+ * Sets a custom backoff policy. The backoff policy defines how the bulk processor should handle retries of bulk requests internally
+ * in case they have failed due to resource constraints (i.e. a thread pool was full).
+ *
+ * The default is to back off exponentially.
+ *
+ * @see org.elasticsearch.action.bulk.BackoffPolicy#exponentialBackoff()
+ */
+ public Builder setBackoffPolicy(BackoffPolicy backoffPolicy) {
+ if (backoffPolicy == null) {
+ throw new NullPointerException("'backoffPolicy' must not be null. To disable backoff, pass BackoffPolicy.noBackoff()");
+ }
+ this.backoffPolicy = backoffPolicy;
+ return this;
+ }
+
+ /**
* Builds a new bulk processor.
*/
public BulkProcessor build() {
- return new BulkProcessor(client, listener, name, concurrentRequests, bulkActions, bulkSize, flushInterval);
+ return new BulkProcessor(client, backoffPolicy, listener, name, concurrentRequests, bulkActions, bulkSize, flushInterval);
}
}
@@ -148,42 +167,31 @@ public class BulkProcessor implements Closeable {
if (client == null) {
throw new NullPointerException("The client you specified while building a BulkProcessor is null");
}
-
+
return new Builder(client, listener);
}
- private final Client client;
- private final Listener listener;
-
- private final String name;
-
- private final int concurrentRequests;
private final int bulkActions;
private final long bulkSize;
- private final TimeValue flushInterval;
- private final Semaphore semaphore;
+
private final ScheduledThreadPoolExecutor scheduler;
private final ScheduledFuture scheduledFuture;
private final AtomicLong executionIdGen = new AtomicLong();
private BulkRequest bulkRequest;
+ private final BulkRequestHandler bulkRequestHandler;
private volatile boolean closed = false;
- BulkProcessor(Client client, Listener listener, @Nullable String name, int concurrentRequests, int bulkActions, ByteSizeValue bulkSize, @Nullable TimeValue flushInterval) {
- this.client = client;
- this.listener = listener;
- this.name = name;
- this.concurrentRequests = concurrentRequests;
+ BulkProcessor(Client client, BackoffPolicy backoffPolicy, Listener listener, @Nullable String name, int concurrentRequests, int bulkActions, ByteSizeValue bulkSize, @Nullable TimeValue flushInterval) {
this.bulkActions = bulkActions;
this.bulkSize = bulkSize.bytes();
- this.semaphore = new Semaphore(concurrentRequests);
this.bulkRequest = new BulkRequest();
+ this.bulkRequestHandler = (concurrentRequests == 0) ? BulkRequestHandler.syncHandler(client, backoffPolicy, listener) : BulkRequestHandler.asyncHandler(client, backoffPolicy, listener, concurrentRequests);
- this.flushInterval = flushInterval;
if (flushInterval != null) {
this.scheduler = (ScheduledThreadPoolExecutor) Executors.newScheduledThreadPool(1, EsExecutors.daemonThreadFactory(client.settings(), (name != null ? "[" + name + "]" : "") + "bulk_processor"));
this.scheduler.setExecuteExistingDelayedTasksAfterShutdownPolicy(false);
@@ -231,14 +239,7 @@ public class BulkProcessor implements Closeable {
if (bulkRequest.numberOfActions() > 0) {
execute();
}
- if (this.concurrentRequests < 1) {
- return true;
- }
- if (semaphore.tryAcquire(this.concurrentRequests, timeout, unit)) {
- semaphore.release(this.concurrentRequests);
- return true;
- }
- return false;
+ return this.bulkRequestHandler.awaitClose(timeout, unit);
}
/**
@@ -308,58 +309,7 @@ public class BulkProcessor implements Closeable {
final long executionId = executionIdGen.incrementAndGet();
this.bulkRequest = new BulkRequest();
-
- if (concurrentRequests == 0) {
- // execute in a blocking fashion...
- boolean afterCalled = false;
- try {
- listener.beforeBulk(executionId, bulkRequest);
- BulkResponse bulkItemResponses = client.bulk(bulkRequest).actionGet();
- afterCalled = true;
- listener.afterBulk(executionId, bulkRequest, bulkItemResponses);
- } catch (Exception e) {
- if (!afterCalled) {
- listener.afterBulk(executionId, bulkRequest, e);
- }
- }
- } else {
- boolean success = false;
- boolean acquired = false;
- try {
- listener.beforeBulk(executionId, bulkRequest);
- semaphore.acquire();
- acquired = true;
- client.bulk(bulkRequest, new ActionListener<BulkResponse>() {
- @Override
- public void onResponse(BulkResponse response) {
- try {
- listener.afterBulk(executionId, bulkRequest, response);
- } finally {
- semaphore.release();
- }
- }
-
- @Override
- public void onFailure(Throwable e) {
- try {
- listener.afterBulk(executionId, bulkRequest, e);
- } finally {
- semaphore.release();
- }
- }
- });
- success = true;
- } catch (InterruptedException e) {
- Thread.interrupted();
- listener.afterBulk(executionId, bulkRequest, e);
- } catch (Throwable t) {
- listener.afterBulk(executionId, bulkRequest, t);
- } finally {
- if (!success && acquired) { // if we fail on client.bulk() release the semaphore
- semaphore.release();
- }
- }
- }
+ this.bulkRequestHandler.execute(bulkRequest, executionId);
}
private boolean isOverTheLimit() {
diff --git a/core/src/main/java/org/elasticsearch/action/bulk/BulkRequestHandler.java b/core/src/main/java/org/elasticsearch/action/bulk/BulkRequestHandler.java
new file mode 100644
index 0000000000..dc98a16c57
--- /dev/null
+++ b/core/src/main/java/org/elasticsearch/action/bulk/BulkRequestHandler.java
@@ -0,0 +1,166 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.elasticsearch.action.bulk;
+
+import org.elasticsearch.action.ActionListener;
+import org.elasticsearch.client.Client;
+import org.elasticsearch.common.logging.ESLogger;
+import org.elasticsearch.common.logging.Loggers;
+import org.elasticsearch.common.util.concurrent.EsRejectedExecutionException;
+
+import java.util.concurrent.Semaphore;
+import java.util.concurrent.TimeUnit;
+
+/**
+ * Abstracts the low-level details of bulk request handling
+ */
+abstract class BulkRequestHandler {
+ protected final ESLogger logger;
+ protected final Client client;
+
+ protected BulkRequestHandler(Client client) {
+ this.client = client;
+ this.logger = Loggers.getLogger(getClass(), client.settings());
+ }
+
+
+ public abstract void execute(BulkRequest bulkRequest, long executionId);
+
+ public abstract boolean awaitClose(long timeout, TimeUnit unit) throws InterruptedException;
+
+
+ public static BulkRequestHandler syncHandler(Client client, BackoffPolicy backoffPolicy, BulkProcessor.Listener listener) {
+ return new SyncBulkRequestHandler(client, backoffPolicy, listener);
+ }
+
+ public static BulkRequestHandler asyncHandler(Client client, BackoffPolicy backoffPolicy, BulkProcessor.Listener listener, int concurrentRequests) {
+ return new AsyncBulkRequestHandler(client, backoffPolicy, listener, concurrentRequests);
+ }
+
+ private static class SyncBulkRequestHandler extends BulkRequestHandler {
+ private final BulkProcessor.Listener listener;
+ private final BackoffPolicy backoffPolicy;
+
+ public SyncBulkRequestHandler(Client client, BackoffPolicy backoffPolicy, BulkProcessor.Listener listener) {
+ super(client);
+ this.backoffPolicy = backoffPolicy;
+ this.listener = listener;
+ }
+
+ @Override
+ public void execute(BulkRequest bulkRequest, long executionId) {
+ boolean afterCalled = false;
+ try {
+ listener.beforeBulk(executionId, bulkRequest);
+ BulkResponse bulkResponse = Retry
+ .on(EsRejectedExecutionException.class)
+ .policy(backoffPolicy)
+ .withSyncBackoff(client, bulkRequest);
+ afterCalled = true;
+ listener.afterBulk(executionId, bulkRequest, bulkResponse);
+ } catch (InterruptedException e) {
+ Thread.currentThread().interrupt();
+ logger.info("Bulk request {} has been cancelled.", e, executionId);
+ if (!afterCalled) {
+ listener.afterBulk(executionId, bulkRequest, e);
+ }
+ } catch (Throwable t) {
+ logger.warn("Failed to execute bulk request {}.", t, executionId);
+ if (!afterCalled) {
+ listener.afterBulk(executionId, bulkRequest, t);
+ }
+ }
+ }
+
+ @Override
+ public boolean awaitClose(long timeout, TimeUnit unit) throws InterruptedException {
+ // we are "closed" immediately as there is no request in flight
+ return true;
+ }
+ }
+
+ private static class AsyncBulkRequestHandler extends BulkRequestHandler {
+ private final BackoffPolicy backoffPolicy;
+ private final BulkProcessor.Listener listener;
+ private final Semaphore semaphore;
+ private final int concurrentRequests;
+
+ private AsyncBulkRequestHandler(Client client, BackoffPolicy backoffPolicy, BulkProcessor.Listener listener, int concurrentRequests) {
+ super(client);
+ this.backoffPolicy = backoffPolicy;
+ assert concurrentRequests > 0;
+ this.listener = listener;
+ this.concurrentRequests = concurrentRequests;
+ this.semaphore = new Semaphore(concurrentRequests);
+ }
+
+ @Override
+ public void execute(BulkRequest bulkRequest, long executionId) {
+ boolean bulkRequestSetupSuccessful = false;
+ boolean acquired = false;
+ try {
+ listener.beforeBulk(executionId, bulkRequest);
+ semaphore.acquire();
+ acquired = true;
+ Retry.on(EsRejectedExecutionException.class)
+ .policy(backoffPolicy)
+ .withAsyncBackoff(client, bulkRequest, new ActionListener<BulkResponse>() {
+ @Override
+ public void onResponse(BulkResponse response) {
+ try {
+ listener.afterBulk(executionId, bulkRequest, response);
+ } finally {
+ semaphore.release();
+ }
+ }
+
+ @Override
+ public void onFailure(Throwable e) {
+ try {
+ listener.afterBulk(executionId, bulkRequest, e);
+ } finally {
+ semaphore.release();
+ }
+ }
+ });
+ bulkRequestSetupSuccessful = true;
+ } catch (InterruptedException e) {
+ Thread.currentThread().interrupt();
+ logger.info("Bulk request {} has been cancelled.", e, executionId);
+ listener.afterBulk(executionId, bulkRequest, e);
+ } catch (Throwable t) {
+ logger.warn("Failed to execute bulk request {}.", t, executionId);
+ listener.afterBulk(executionId, bulkRequest, t);
+ } finally {
+ if (!bulkRequestSetupSuccessful && acquired) { // if we fail on client.bulk() release the semaphore
+ semaphore.release();
+ }
+ }
+ }
+
+ @Override
+ public boolean awaitClose(long timeout, TimeUnit unit) throws InterruptedException {
+ if (semaphore.tryAcquire(this.concurrentRequests, timeout, unit)) {
+ semaphore.release(this.concurrentRequests);
+ return true;
+ }
+ return false;
+ }
+ }
+}
diff --git a/core/src/main/java/org/elasticsearch/action/bulk/Retry.java b/core/src/main/java/org/elasticsearch/action/bulk/Retry.java
new file mode 100644
index 0000000000..477e61045b
--- /dev/null
+++ b/core/src/main/java/org/elasticsearch/action/bulk/Retry.java
@@ -0,0 +1,237 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.elasticsearch.action.bulk;
+
+import org.elasticsearch.ExceptionsHelper;
+import org.elasticsearch.action.ActionFuture;
+import org.elasticsearch.action.ActionListener;
+import org.elasticsearch.action.support.PlainActionFuture;
+import org.elasticsearch.client.Client;
+import org.elasticsearch.common.logging.ESLogger;
+import org.elasticsearch.common.logging.Loggers;
+import org.elasticsearch.common.unit.TimeValue;
+import org.elasticsearch.common.util.concurrent.FutureUtils;
+import org.elasticsearch.threadpool.ThreadPool;
+
+import java.util.ArrayList;
+import java.util.Iterator;
+import java.util.List;
+import java.util.concurrent.*;
+import java.util.function.Predicate;
+
+/**
+ * Encapsulates synchronous and asynchronous retry logic.
+ */
+class Retry {
+ private final Class<? extends Throwable> retryOnThrowable;
+
+ private BackoffPolicy backoffPolicy;
+
+ public static Retry on(Class<? extends Throwable> retryOnThrowable) {
+ return new Retry(retryOnThrowable);
+ }
+
+ /**
+ * @param backoffPolicy The backoff policy that defines how long and how often to wait for retries.
+ */
+ public Retry policy(BackoffPolicy backoffPolicy) {
+ this.backoffPolicy = backoffPolicy;
+ return this;
+ }
+
+ Retry(Class<? extends Throwable> retryOnThrowable) {
+ this.retryOnThrowable = retryOnThrowable;
+ }
+
+ /**
+ * Invokes #bulk(BulkRequest, ActionListener) on the provided client. Backs off on the provided exception and delegates results to the
+ * provided listener.
+ *
+ * @param client Client invoking the bulk request.
+ * @param bulkRequest The bulk request that should be executed.
+ * @param listener A listener that is invoked when the bulk request finishes or completes with an exception. The listener is not
+ */
+ public void withAsyncBackoff(Client client, BulkRequest bulkRequest, ActionListener<BulkResponse> listener) {
+ AsyncRetryHandler r = new AsyncRetryHandler(retryOnThrowable, backoffPolicy, client, listener);
+ r.execute(bulkRequest);
+
+ }
+
+ /**
+ * Invokes #bulk(BulkRequest) on the provided client. Backs off on the provided exception.
+ *
+ * @param client Client invoking the bulk request.
+ * @param bulkRequest The bulk request that should be executed.
+ * @return the bulk response as returned by the client.
+ * @throws Exception Any exception thrown by the callable.
+ */
+ public BulkResponse withSyncBackoff(Client client, BulkRequest bulkRequest) throws Exception {
+ return SyncRetryHandler
+ .create(retryOnThrowable, backoffPolicy, client)
+ .executeBlocking(bulkRequest)
+ .actionGet();
+ }
+
+ static class AbstractRetryHandler implements ActionListener<BulkResponse> {
+ private final ESLogger logger;
+ private final Client client;
+ private final ActionListener<BulkResponse> listener;
+ private final Iterator<TimeValue> backoff;
+ private final Class<? extends Throwable> retryOnThrowable;
+ // Access only when holding a client-side lock, see also #addResponses()
+ private final List<BulkItemResponse> responses = new ArrayList<>();
+ private final long startTimestampNanos;
+ // needed to construct the next bulk request based on the response to the previous one
+ // volatile as we're called from a scheduled thread
+ private volatile BulkRequest currentBulkRequest;
+ private volatile ScheduledFuture<?> scheduledRequestFuture;
+
+ public AbstractRetryHandler(Class<? extends Throwable> retryOnThrowable, BackoffPolicy backoffPolicy, Client client, ActionListener<BulkResponse> listener) {
+ this.retryOnThrowable = retryOnThrowable;
+ this.backoff = backoffPolicy.iterator();
+ this.client = client;
+ this.listener = listener;
+ this.logger = Loggers.getLogger(getClass(), client.settings());
+ // in contrast to System.currentTimeMillis(), nanoTime() uses a monotonic clock under the hood
+ this.startTimestampNanos = System.nanoTime();
+ }
+
+ @Override
+ public void onResponse(BulkResponse bulkItemResponses) {
+ if (!bulkItemResponses.hasFailures()) {
+ // we're done here, include all responses
+ addResponses(bulkItemResponses, (r -> true));
+ finishHim();
+ } else {
+ if (canRetry(bulkItemResponses)) {
+ addResponses(bulkItemResponses, (r -> !r.isFailed()));
+ retry(createBulkRequestForRetry(bulkItemResponses));
+ } else {
+ addResponses(bulkItemResponses, (r -> true));
+ finishHim();
+ }
+ }
+ }
+
+ @Override
+ public void onFailure(Throwable e) {
+ try {
+ listener.onFailure(e);
+ } finally {
+ FutureUtils.cancel(scheduledRequestFuture);
+ }
+ }
+
+ private void retry(BulkRequest bulkRequestForRetry) {
+ assert backoff.hasNext();
+ TimeValue next = backoff.next();
+ logger.trace("Retry of bulk request scheduled in {} ms.", next.millis());
+ scheduledRequestFuture = client.threadPool().schedule(next, ThreadPool.Names.SAME, (() -> this.execute(bulkRequestForRetry)));
+ }
+
+ private BulkRequest createBulkRequestForRetry(BulkResponse bulkItemResponses) {
+ BulkRequest requestToReissue = new BulkRequest();
+ int index = 0;
+ for (BulkItemResponse bulkItemResponse : bulkItemResponses.getItems()) {
+ if (bulkItemResponse.isFailed()) {
+ requestToReissue.add(currentBulkRequest.requests().get(index));
+ }
+ index++;
+ }
+ return requestToReissue;
+ }
+
+ private boolean canRetry(BulkResponse bulkItemResponses) {
+ if (!backoff.hasNext()) {
+ return false;
+ }
+ for (BulkItemResponse bulkItemResponse : bulkItemResponses) {
+ if (bulkItemResponse.isFailed()) {
+ Throwable cause = bulkItemResponse.getFailure().getCause();
+ Throwable rootCause = ExceptionsHelper.unwrapCause(cause);
+ if (!rootCause.getClass().equals(retryOnThrowable)) {
+ return false;
+ }
+ }
+ }
+ return true;
+ }
+
+ private void finishHim() {
+ try {
+ listener.onResponse(getAccumulatedResponse());
+ } finally {
+ FutureUtils.cancel(scheduledRequestFuture);
+ }
+ }
+
+ private void addResponses(BulkResponse response, Predicate<BulkItemResponse> filter) {
+ for (BulkItemResponse bulkItemResponse : response) {
+ if (filter.test(bulkItemResponse)) {
+ // Use client-side lock here to avoid visibility issues. This method may be called multiple times
+ // (based on how many retries we have to issue) and relying that the response handling code will be
+ // scheduled on the same thread is fragile.
+ synchronized (responses) {
+ responses.add(bulkItemResponse);
+ }
+ }
+ }
+ }
+
+ private BulkResponse getAccumulatedResponse() {
+ BulkItemResponse[] itemResponses;
+ synchronized (responses) {
+ itemResponses = responses.toArray(new BulkItemResponse[1]);
+ }
+ long stopTimestamp = System.nanoTime();
+ long totalLatencyMs = TimeValue.timeValueNanos(stopTimestamp - startTimestampNanos).millis();
+ return new BulkResponse(itemResponses, totalLatencyMs);
+ }
+
+ public void execute(BulkRequest bulkRequest) {
+ this.currentBulkRequest = bulkRequest;
+ client.bulk(bulkRequest, this);
+ }
+ }
+
+ static class AsyncRetryHandler extends AbstractRetryHandler {
+ public AsyncRetryHandler(Class<? extends Throwable> retryOnThrowable, BackoffPolicy backoffPolicy, Client client, ActionListener<BulkResponse> listener) {
+ super(retryOnThrowable, backoffPolicy, client, listener);
+ }
+ }
+
+ static class SyncRetryHandler extends AbstractRetryHandler {
+ private final PlainActionFuture<BulkResponse> actionFuture;
+
+ public static SyncRetryHandler create(Class<? extends Throwable> retryOnThrowable, BackoffPolicy backoffPolicy, Client client) {
+ PlainActionFuture<BulkResponse> actionFuture = PlainActionFuture.newFuture();
+ return new SyncRetryHandler(retryOnThrowable, backoffPolicy, client, actionFuture);
+ }
+
+ public SyncRetryHandler(Class<? extends Throwable> retryOnThrowable, BackoffPolicy backoffPolicy, Client client, PlainActionFuture<BulkResponse> actionFuture) {
+ super(retryOnThrowable, backoffPolicy, client, actionFuture);
+ this.actionFuture = actionFuture;
+ }
+
+ public ActionFuture<BulkResponse> executeBlocking(BulkRequest bulkRequest) {
+ super.execute(bulkRequest);
+ return actionFuture;
+ }
+ }
+}
diff --git a/core/src/main/java/org/elasticsearch/action/percolate/TransportShardMultiPercolateAction.java b/core/src/main/java/org/elasticsearch/action/percolate/TransportShardMultiPercolateAction.java
index 68bcdc1503..1d29e6c397 100644
--- a/core/src/main/java/org/elasticsearch/action/percolate/TransportShardMultiPercolateAction.java
+++ b/core/src/main/java/org/elasticsearch/action/percolate/TransportShardMultiPercolateAction.java
@@ -20,7 +20,6 @@
package org.elasticsearch.action.percolate;
import org.elasticsearch.ElasticsearchException;
-import org.elasticsearch.ExceptionsHelper;
import org.elasticsearch.action.ActionRequestValidationException;
import org.elasticsearch.action.ActionResponse;
import org.elasticsearch.action.IndicesRequest;
@@ -37,8 +36,6 @@ import org.elasticsearch.common.inject.Inject;
import org.elasticsearch.common.io.stream.StreamInput;
import org.elasticsearch.common.io.stream.StreamOutput;
import org.elasticsearch.common.settings.Settings;
-import org.elasticsearch.common.text.StringText;
-import org.elasticsearch.common.text.Text;
import org.elasticsearch.index.shard.ShardId;
import org.elasticsearch.percolator.PercolatorService;
import org.elasticsearch.threadpool.ThreadPool;
diff --git a/core/src/main/java/org/elasticsearch/action/search/SearchRequestBuilder.java b/core/src/main/java/org/elasticsearch/action/search/SearchRequestBuilder.java
index 442b0915e3..52d45ec940 100644
--- a/core/src/main/java/org/elasticsearch/action/search/SearchRequestBuilder.java
+++ b/core/src/main/java/org/elasticsearch/action/search/SearchRequestBuilder.java
@@ -473,6 +473,14 @@ public class SearchRequestBuilder extends ActionRequestBuilder<SearchRequest, Se
return this;
}
+ /**
+ * Should the query be profiled. Defaults to <code>false</code>
+ */
+ public SearchRequestBuilder setProfile(boolean profile) {
+ sourceBuilder().profile(profile);
+ return this;
+ }
+
@Override
public String toString() {
if (request.source() != null) {
diff --git a/core/src/main/java/org/elasticsearch/action/search/SearchResponse.java b/core/src/main/java/org/elasticsearch/action/search/SearchResponse.java
index 769e0978a7..e6681bf2b9 100644
--- a/core/src/main/java/org/elasticsearch/action/search/SearchResponse.java
+++ b/core/src/main/java/org/elasticsearch/action/search/SearchResponse.java
@@ -20,6 +20,7 @@
package org.elasticsearch.action.search;
import org.elasticsearch.action.ActionResponse;
+import org.elasticsearch.common.Nullable;
import org.elasticsearch.common.io.stream.StreamInput;
import org.elasticsearch.common.io.stream.StreamOutput;
import org.elasticsearch.common.unit.TimeValue;
@@ -32,9 +33,12 @@ import org.elasticsearch.rest.action.support.RestActions;
import org.elasticsearch.search.SearchHits;
import org.elasticsearch.search.aggregations.Aggregations;
import org.elasticsearch.search.internal.InternalSearchResponse;
+import org.elasticsearch.search.profile.ProfileShardResult;
import org.elasticsearch.search.suggest.Suggest;
import java.io.IOException;
+import java.util.List;
+import java.util.Map;
import static org.elasticsearch.action.search.ShardSearchFailure.readShardSearchFailure;
import static org.elasticsearch.search.internal.InternalSearchResponse.readInternalSearchResponse;
@@ -160,6 +164,16 @@ public class SearchResponse extends ActionResponse implements StatusToXContent {
this.scrollId = scrollId;
}
+ /**
+ * If profiling was enabled, this returns an object containing the profile results from
+ * each shard. If profiling was not enabled, this will return null
+ *
+ * @return The profile results or null
+ */
+ public @Nullable Map<String, List<ProfileShardResult>> getProfileResults() {
+ return internalResponse.profile();
+ }
+
static final class Fields {
static final XContentBuilderString _SCROLL_ID = new XContentBuilderString("_scroll_id");
static final XContentBuilderString TOOK = new XContentBuilderString("took");
diff --git a/core/src/main/java/org/elasticsearch/action/support/DestructiveOperations.java b/core/src/main/java/org/elasticsearch/action/support/DestructiveOperations.java
index b73ee8a75f..5f2fb33e04 100644
--- a/core/src/main/java/org/elasticsearch/action/support/DestructiveOperations.java
+++ b/core/src/main/java/org/elasticsearch/action/support/DestructiveOperations.java
@@ -21,25 +21,30 @@ package org.elasticsearch.action.support;
import org.elasticsearch.common.component.AbstractComponent;
import org.elasticsearch.common.inject.Inject;
+import org.elasticsearch.common.settings.ClusterSettings;
+import org.elasticsearch.common.settings.Setting;
import org.elasticsearch.common.settings.Settings;
-import org.elasticsearch.node.settings.NodeSettingsService;
/**
* Helper for dealing with destructive operations and wildcard usage.
*/
-public final class DestructiveOperations extends AbstractComponent implements NodeSettingsService.Listener {
+public final class DestructiveOperations extends AbstractComponent {
/**
* Setting which controls whether wildcard usage (*, prefix*, _all) is allowed.
*/
- public static final String REQUIRES_NAME = "action.destructive_requires_name";
+ public static final Setting<Boolean> REQUIRES_NAME_SETTING = Setting.boolSetting("action.destructive_requires_name", false, true, Setting.Scope.CLUSTER);
private volatile boolean destructiveRequiresName;
@Inject
- public DestructiveOperations(Settings settings, NodeSettingsService nodeSettingsService) {
+ public DestructiveOperations(Settings settings, ClusterSettings clusterSettings) {
super(settings);
- destructiveRequiresName = settings.getAsBoolean(DestructiveOperations.REQUIRES_NAME, false);
- nodeSettingsService.addListener(this);
+ destructiveRequiresName = REQUIRES_NAME_SETTING.get(settings);
+ clusterSettings.addSettingsUpdateConsumer(REQUIRES_NAME_SETTING, this::setDestructiveRequiresName);
+ }
+
+ private void setDestructiveRequiresName(boolean destructiveRequiresName) {
+ this.destructiveRequiresName = destructiveRequiresName;
}
/**
@@ -65,15 +70,6 @@ public final class DestructiveOperations extends AbstractComponent implements No
}
}
- @Override
- public void onRefreshSettings(Settings settings) {
- boolean newValue = settings.getAsBoolean(DestructiveOperations.REQUIRES_NAME, destructiveRequiresName);
- if (destructiveRequiresName != newValue) {
- logger.info("updating [action.operate_all_indices] from [{}] to [{}]", destructiveRequiresName, newValue);
- this.destructiveRequiresName = newValue;
- }
- }
-
private static boolean hasWildcardUsage(String aliasOrIndex) {
return "_all".equals(aliasOrIndex) || aliasOrIndex.indexOf('*') != -1;
}
diff --git a/core/src/main/java/org/elasticsearch/action/support/broadcast/node/TransportBroadcastByNodeAction.java b/core/src/main/java/org/elasticsearch/action/support/broadcast/node/TransportBroadcastByNodeAction.java
index 8bcba8ad54..e8f4a0d83c 100644
--- a/core/src/main/java/org/elasticsearch/action/support/broadcast/node/TransportBroadcastByNodeAction.java
+++ b/core/src/main/java/org/elasticsearch/action/support/broadcast/node/TransportBroadcastByNodeAction.java
@@ -19,8 +19,16 @@
package org.elasticsearch.action.support.broadcast.node;
-import org.elasticsearch.action.*;
-import org.elasticsearch.action.support.*;
+import org.elasticsearch.action.ActionListener;
+import org.elasticsearch.action.FailedNodeException;
+import org.elasticsearch.action.IndicesRequest;
+import org.elasticsearch.action.NoShardAvailableActionException;
+import org.elasticsearch.action.ShardOperationFailedException;
+import org.elasticsearch.action.support.ActionFilters;
+import org.elasticsearch.action.support.DefaultShardOperationFailedException;
+import org.elasticsearch.action.support.HandledTransportAction;
+import org.elasticsearch.action.support.IndicesOptions;
+import org.elasticsearch.action.support.TransportActions;
import org.elasticsearch.action.support.broadcast.BroadcastRequest;
import org.elasticsearch.action.support.broadcast.BroadcastResponse;
import org.elasticsearch.action.support.broadcast.BroadcastShardOperationFailedException;
@@ -37,7 +45,14 @@ import org.elasticsearch.common.io.stream.StreamOutput;
import org.elasticsearch.common.io.stream.Streamable;
import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.threadpool.ThreadPool;
-import org.elasticsearch.transport.*;
+import org.elasticsearch.transport.BaseTransportResponseHandler;
+import org.elasticsearch.transport.NodeShouldNotConnectException;
+import org.elasticsearch.transport.TransportChannel;
+import org.elasticsearch.transport.TransportException;
+import org.elasticsearch.transport.TransportRequest;
+import org.elasticsearch.transport.TransportRequestHandler;
+import org.elasticsearch.transport.TransportResponse;
+import org.elasticsearch.transport.TransportService;
import java.io.IOException;
import java.util.ArrayList;
@@ -394,7 +409,15 @@ public abstract class TransportBroadcastByNodeAction<Request extends BroadcastRe
e.setIndex(shardRouting.getIndex());
e.setShard(shardRouting.shardId());
shardResults[shardIndex] = e;
- logger.debug("[{}] failed to execute operation for shard [{}]", e, actionName, shardRouting.shortSummary());
+ if (TransportActions.isShardNotAvailableException(t)) {
+ if (logger.isTraceEnabled()) {
+ logger.trace("[{}] failed to execute operation for shard [{}]", t, actionName, shardRouting.shortSummary());
+ }
+ } else {
+ if (logger.isDebugEnabled()) {
+ logger.debug("[{}] failed to execute operation for shard [{}]", t, actionName, shardRouting.shortSummary());
+ }
+ }
}
}
}
diff --git a/core/src/main/java/org/elasticsearch/action/support/replication/TransportReplicationAction.java b/core/src/main/java/org/elasticsearch/action/support/replication/TransportReplicationAction.java
index 26c439c0a3..d17cc02c5b 100644
--- a/core/src/main/java/org/elasticsearch/action/support/replication/TransportReplicationAction.java
+++ b/core/src/main/java/org/elasticsearch/action/support/replication/TransportReplicationAction.java
@@ -300,11 +300,15 @@ public abstract class TransportReplicationAction<Request extends ReplicationRequ
@Override
public void onFailure(Throwable t) {
if (t instanceof RetryOnReplicaException) {
- logger.trace("Retrying operation on replica, action [{}], request [{}]", t, actionName, request);
+ logger.trace("Retrying operation on replica, action [{}], request [{}]", t, transportReplicaAction, request);
observer.waitForNextChange(new ClusterStateObserver.Listener() {
@Override
public void onNewClusterState(ClusterState state) {
- threadPool.executor(executor).execute(AsyncReplicaAction.this);
+ // Forking a thread on local node via transport service so that custom transport service have an
+ // opportunity to execute custom logic before the replica operation begins
+ String extraMessage = "action [" + transportReplicaAction + "], request[" + request + "]";
+ TransportChannelResponseHandler<TransportResponse.Empty> handler = TransportChannelResponseHandler.emptyResponseHandler(logger, channel, extraMessage);
+ transportService.sendRequest(clusterService.localNode(), transportReplicaAction, request, handler);
}
@Override
diff --git a/core/src/main/java/org/elasticsearch/action/update/UpdateHelper.java b/core/src/main/java/org/elasticsearch/action/update/UpdateHelper.java
index 9f8b2a2e7b..d28ba2986e 100644
--- a/core/src/main/java/org/elasticsearch/action/update/UpdateHelper.java
+++ b/core/src/main/java/org/elasticsearch/action/update/UpdateHelper.java
@@ -50,6 +50,7 @@ import org.elasticsearch.search.fetch.source.FetchSourceContext;
import org.elasticsearch.search.lookup.SourceLookup;
import java.util.ArrayList;
+import java.util.Collections;
import java.util.HashMap;
import java.util.Map;
@@ -245,7 +246,7 @@ public class UpdateHelper extends AbstractComponent {
private Map<String, Object> executeScript(UpdateRequest request, Map<String, Object> ctx) {
try {
if (scriptService != null) {
- ExecutableScript script = scriptService.executable(request.script, ScriptContext.Standard.UPDATE, request);
+ ExecutableScript script = scriptService.executable(request.script, ScriptContext.Standard.UPDATE, request, Collections.emptyMap());
script.setNextVar("ctx", ctx);
script.run();
// we need to unwrap the ctx...
diff --git a/core/src/main/java/org/elasticsearch/client/IndicesAdminClient.java b/core/src/main/java/org/elasticsearch/client/IndicesAdminClient.java
index 15def3b273..73eed43352 100644
--- a/core/src/main/java/org/elasticsearch/client/IndicesAdminClient.java
+++ b/core/src/main/java/org/elasticsearch/client/IndicesAdminClient.java
@@ -53,8 +53,8 @@ import org.elasticsearch.action.admin.indices.exists.types.TypesExistsResponse;
import org.elasticsearch.action.admin.indices.flush.FlushRequest;
import org.elasticsearch.action.admin.indices.flush.FlushRequestBuilder;
import org.elasticsearch.action.admin.indices.flush.FlushResponse;
-import org.elasticsearch.action.admin.indices.forcemerge.ForceMergeRequestBuilder;
import org.elasticsearch.action.admin.indices.forcemerge.ForceMergeRequest;
+import org.elasticsearch.action.admin.indices.forcemerge.ForceMergeRequestBuilder;
import org.elasticsearch.action.admin.indices.forcemerge.ForceMergeResponse;
import org.elasticsearch.action.admin.indices.get.GetIndexRequest;
import org.elasticsearch.action.admin.indices.get.GetIndexRequestBuilder;
@@ -82,11 +82,14 @@ import org.elasticsearch.action.admin.indices.settings.put.UpdateSettingsRequest
import org.elasticsearch.action.admin.indices.settings.put.UpdateSettingsRequestBuilder;
import org.elasticsearch.action.admin.indices.settings.put.UpdateSettingsResponse;
import org.elasticsearch.action.admin.indices.shards.IndicesShardStoreRequestBuilder;
-import org.elasticsearch.action.admin.indices.shards.IndicesShardStoresResponse;
import org.elasticsearch.action.admin.indices.shards.IndicesShardStoresRequest;
+import org.elasticsearch.action.admin.indices.shards.IndicesShardStoresResponse;
import org.elasticsearch.action.admin.indices.stats.IndicesStatsRequest;
import org.elasticsearch.action.admin.indices.stats.IndicesStatsRequestBuilder;
import org.elasticsearch.action.admin.indices.stats.IndicesStatsResponse;
+import org.elasticsearch.action.admin.indices.flush.SyncedFlushRequest;
+import org.elasticsearch.action.admin.indices.flush.SyncedFlushRequestBuilder;
+import org.elasticsearch.action.admin.indices.flush.SyncedFlushResponse;
import org.elasticsearch.action.admin.indices.template.delete.DeleteIndexTemplateRequest;
import org.elasticsearch.action.admin.indices.template.delete.DeleteIndexTemplateRequestBuilder;
import org.elasticsearch.action.admin.indices.template.delete.DeleteIndexTemplateResponse;
@@ -391,6 +394,29 @@ public interface IndicesAdminClient extends ElasticsearchClient {
FlushRequestBuilder prepareFlush(String... indices);
/**
+ * Explicitly sync flush one or more indices (write sync id to shards for faster recovery).
+ *
+ * @param request The sync flush request
+ * @return A result future
+ * @see org.elasticsearch.client.Requests#syncedFlushRequest(String...)
+ */
+ ActionFuture<SyncedFlushResponse> syncedFlush(SyncedFlushRequest request);
+
+ /**
+ * Explicitly sync flush one or more indices (write sync id to shards for faster recovery).
+ *
+ * @param request The sync flush request
+ * @param listener A listener to be notified with a result
+ * @see org.elasticsearch.client.Requests#syncedFlushRequest(String...)
+ */
+ void syncedFlush(SyncedFlushRequest request, ActionListener <SyncedFlushResponse> listener);
+
+ /**
+ * Explicitly sync flush one or more indices (write sync id to shards for faster recovery).
+ */
+ SyncedFlushRequestBuilder prepareSyncedFlush(String... indices);
+
+ /**
* Explicitly force merge one or more indices into a the number of segments.
*
* @param request The optimize request
diff --git a/core/src/main/java/org/elasticsearch/client/Requests.java b/core/src/main/java/org/elasticsearch/client/Requests.java
index 7f0decaba5..063fd10dcf 100644
--- a/core/src/main/java/org/elasticsearch/client/Requests.java
+++ b/core/src/main/java/org/elasticsearch/client/Requests.java
@@ -50,6 +50,7 @@ import org.elasticsearch.action.admin.indices.refresh.RefreshRequest;
import org.elasticsearch.action.admin.indices.segments.IndicesSegmentsRequest;
import org.elasticsearch.action.admin.indices.settings.put.UpdateSettingsRequest;
import org.elasticsearch.action.admin.indices.shards.IndicesShardStoresRequest;
+import org.elasticsearch.action.admin.indices.flush.SyncedFlushRequest;
import org.elasticsearch.action.admin.indices.upgrade.post.UpgradeRequest;
import org.elasticsearch.action.bulk.BulkRequest;
import org.elasticsearch.action.delete.DeleteRequest;
@@ -131,7 +132,7 @@ public class Requests {
public static SuggestRequest suggestRequest(String... indices) {
return new SuggestRequest(indices);
}
-
+
/**
* Creates a search request against one or more indices. Note, the search source must be set either using the
* actual JSON search source, or the {@link org.elasticsearch.search.builder.SearchSourceBuilder}.
@@ -266,6 +267,17 @@ public class Requests {
}
/**
+ * Creates a synced flush indices request.
+ *
+ * @param indices The indices to sync flush. Use <tt>null</tt> or <tt>_all</tt> to execute against all indices
+ * @return The synced flush request
+ * @see org.elasticsearch.client.IndicesAdminClient#syncedFlush(SyncedFlushRequest)
+ */
+ public static SyncedFlushRequest syncedFlushRequest(String... indices) {
+ return new SyncedFlushRequest(indices);
+ }
+
+ /**
* Creates a force merge request.
*
* @param indices The indices to force merge. Use <tt>null</tt> or <tt>_all</tt> to execute against all indices
diff --git a/core/src/main/java/org/elasticsearch/client/support/AbstractClient.java b/core/src/main/java/org/elasticsearch/client/support/AbstractClient.java
index 1b5e8539ac..ea57901f2b 100644
--- a/core/src/main/java/org/elasticsearch/client/support/AbstractClient.java
+++ b/core/src/main/java/org/elasticsearch/client/support/AbstractClient.java
@@ -188,6 +188,10 @@ import org.elasticsearch.action.admin.indices.stats.IndicesStatsAction;
import org.elasticsearch.action.admin.indices.stats.IndicesStatsRequest;
import org.elasticsearch.action.admin.indices.stats.IndicesStatsRequestBuilder;
import org.elasticsearch.action.admin.indices.stats.IndicesStatsResponse;
+import org.elasticsearch.action.admin.indices.flush.SyncedFlushAction;
+import org.elasticsearch.action.admin.indices.flush.SyncedFlushRequest;
+import org.elasticsearch.action.admin.indices.flush.SyncedFlushRequestBuilder;
+import org.elasticsearch.action.admin.indices.flush.SyncedFlushResponse;
import org.elasticsearch.action.admin.indices.template.delete.DeleteIndexTemplateAction;
import org.elasticsearch.action.admin.indices.template.delete.DeleteIndexTemplateRequest;
import org.elasticsearch.action.admin.indices.template.delete.DeleteIndexTemplateRequestBuilder;
@@ -1316,6 +1320,21 @@ public abstract class AbstractClient extends AbstractComponent implements Client
}
@Override
+ public ActionFuture<SyncedFlushResponse> syncedFlush(SyncedFlushRequest request) {
+ return execute(SyncedFlushAction.INSTANCE, request);
+ }
+
+ @Override
+ public void syncedFlush(SyncedFlushRequest request, ActionListener<SyncedFlushResponse> listener) {
+ execute(SyncedFlushAction.INSTANCE, request, listener);
+ }
+
+ @Override
+ public SyncedFlushRequestBuilder prepareSyncedFlush(String... indices) {
+ return new SyncedFlushRequestBuilder(this, SyncedFlushAction.INSTANCE).setIndices(indices);
+ }
+
+ @Override
public void getMappings(GetMappingsRequest request, ActionListener<GetMappingsResponse> listener) {
execute(GetMappingsAction.INSTANCE, request, listener);
}
diff --git a/core/src/main/java/org/elasticsearch/client/transport/TransportClient.java b/core/src/main/java/org/elasticsearch/client/transport/TransportClient.java
index 33cf347941..3b8be668f4 100644
--- a/core/src/main/java/org/elasticsearch/client/transport/TransportClient.java
+++ b/core/src/main/java/org/elasticsearch/client/transport/TransportClient.java
@@ -32,7 +32,6 @@ import org.elasticsearch.client.support.Headers;
import org.elasticsearch.client.transport.support.TransportProxyClient;
import org.elasticsearch.cluster.ClusterNameModule;
import org.elasticsearch.cluster.node.DiscoveryNode;
-import org.elasticsearch.common.collect.Tuple;
import org.elasticsearch.common.component.LifecycleComponent;
import org.elasticsearch.common.inject.Injector;
import org.elasticsearch.common.inject.Module;
@@ -43,19 +42,15 @@ import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.common.settings.SettingsFilter;
import org.elasticsearch.common.settings.SettingsModule;
import org.elasticsearch.common.transport.TransportAddress;
-import org.elasticsearch.env.Environment;
-import org.elasticsearch.env.EnvironmentModule;
import org.elasticsearch.indices.breaker.CircuitBreakerModule;
import org.elasticsearch.monitor.MonitorService;
import org.elasticsearch.node.internal.InternalSettingsPreparer;
-import org.elasticsearch.node.settings.NodeSettingsService;
import org.elasticsearch.plugins.Plugin;
import org.elasticsearch.plugins.PluginsModule;
import org.elasticsearch.plugins.PluginsService;
import org.elasticsearch.search.SearchModule;
import org.elasticsearch.threadpool.ThreadPool;
import org.elasticsearch.threadpool.ThreadPoolModule;
-import org.elasticsearch.transport.TransportModule;
import org.elasticsearch.transport.TransportService;
import org.elasticsearch.transport.netty.NettyTransport;
@@ -69,7 +64,7 @@ import static org.elasticsearch.common.settings.Settings.settingsBuilder;
* The transport client allows to create a client that is not part of the cluster, but simply connects to one
* or more nodes directly by adding their respective addresses using {@link #addTransportAddress(org.elasticsearch.common.transport.TransportAddress)}.
* <p>
- * The transport client important modules used is the {@link org.elasticsearch.transport.TransportModule} which is
+ * The transport client important modules used is the {@link org.elasticsearch.common.network.NetworkModule} which is
* started in client mode (only connects, no bind).
*/
public class TransportClient extends AbstractClient {
@@ -143,10 +138,9 @@ public class TransportClient extends AbstractClient {
}
modules.add(new PluginsModule(pluginsService));
modules.add(new SettingsModule(this.settings, settingsFilter ));
- modules.add(new NetworkModule(networkService));
+ modules.add(new NetworkModule(networkService, this.settings, true));
modules.add(new ClusterNameModule(this.settings));
modules.add(new ThreadPoolModule(threadPool));
- modules.add(new TransportModule(this.settings));
modules.add(new SearchModule() {
@Override
protected void configure() {
@@ -154,7 +148,6 @@ public class TransportClient extends AbstractClient {
}
});
modules.add(new ActionModule(true));
- modules.add(new ClientTransportModule());
modules.add(new CircuitBreakerModule(this.settings));
pluginsService.processModules(modules);
diff --git a/core/src/main/java/org/elasticsearch/cluster/ClusterModule.java b/core/src/main/java/org/elasticsearch/cluster/ClusterModule.java
index 2b7786fdb1..43c616d799 100644
--- a/core/src/main/java/org/elasticsearch/cluster/ClusterModule.java
+++ b/core/src/main/java/org/elasticsearch/cluster/ClusterModule.java
@@ -19,9 +19,6 @@
package org.elasticsearch.cluster;
-import org.elasticsearch.action.admin.indices.close.TransportCloseIndexAction;
-import org.elasticsearch.action.support.DestructiveOperations;
-import org.elasticsearch.action.support.replication.TransportReplicationAction;
import org.elasticsearch.cluster.action.index.MappingUpdatedAction;
import org.elasticsearch.cluster.action.index.NodeIndexDeletedAction;
import org.elasticsearch.cluster.action.index.NodeMappingRefreshAction;
@@ -29,7 +26,6 @@ import org.elasticsearch.cluster.action.shard.ShardStateAction;
import org.elasticsearch.cluster.metadata.IndexMetaData;
import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver;
import org.elasticsearch.cluster.metadata.IndexTemplateFilter;
-import org.elasticsearch.cluster.metadata.MetaData;
import org.elasticsearch.cluster.metadata.MetaDataCreateIndexService;
import org.elasticsearch.cluster.metadata.MetaDataDeleteIndexService;
import org.elasticsearch.cluster.metadata.MetaDataIndexAliasesService;
@@ -60,17 +56,15 @@ import org.elasticsearch.cluster.routing.allocation.decider.ShardsLimitAllocatio
import org.elasticsearch.cluster.routing.allocation.decider.SnapshotInProgressAllocationDecider;
import org.elasticsearch.cluster.routing.allocation.decider.ThrottlingAllocationDecider;
import org.elasticsearch.cluster.service.InternalClusterService;
-import org.elasticsearch.cluster.settings.ClusterDynamicSettings;
+import org.elasticsearch.common.settings.ClusterSettings;
import org.elasticsearch.cluster.settings.DynamicSettings;
import org.elasticsearch.cluster.settings.Validator;
import org.elasticsearch.common.inject.AbstractModule;
import org.elasticsearch.common.logging.ESLogger;
import org.elasticsearch.common.logging.Loggers;
+import org.elasticsearch.common.settings.Setting;
import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.common.util.ExtensionPoint;
-import org.elasticsearch.discovery.DiscoverySettings;
-import org.elasticsearch.discovery.zen.ZenDiscovery;
-import org.elasticsearch.discovery.zen.elect.ElectMasterService;
import org.elasticsearch.gateway.GatewayAllocator;
import org.elasticsearch.gateway.PrimaryShardAllocator;
import org.elasticsearch.index.engine.EngineConfig;
@@ -81,21 +75,13 @@ import org.elasticsearch.index.shard.IndexShard;
import org.elasticsearch.index.shard.MergePolicyConfig;
import org.elasticsearch.index.shard.MergeSchedulerConfig;
import org.elasticsearch.index.store.IndexStore;
-import org.elasticsearch.index.store.IndexStoreConfig;
import org.elasticsearch.index.translog.TranslogConfig;
import org.elasticsearch.indices.IndicesWarmer;
-import org.elasticsearch.indices.breaker.HierarchyCircuitBreakerService;
import org.elasticsearch.indices.cache.request.IndicesRequestCache;
-import org.elasticsearch.indices.recovery.RecoverySettings;
import org.elasticsearch.indices.ttl.IndicesTTLService;
-import org.elasticsearch.search.SearchService;
import org.elasticsearch.search.internal.DefaultSearchContext;
-import org.elasticsearch.threadpool.ThreadPool;
-import org.elasticsearch.transport.TransportService;
-import java.util.Arrays;
-import java.util.Collections;
-import java.util.List;
+import java.util.*;
/**
* Configures classes and services that affect the entire cluster.
@@ -122,7 +108,6 @@ public class ClusterModule extends AbstractModule {
SnapshotInProgressAllocationDecider.class));
private final Settings settings;
- private final DynamicSettings.Builder clusterDynamicSettings = new DynamicSettings.Builder();
private final DynamicSettings.Builder indexDynamicSettings = new DynamicSettings.Builder();
private final ExtensionPoint.SelectedType<ShardsAllocator> shardsAllocators = new ExtensionPoint.SelectedType<>("shards_allocator", ShardsAllocator.class);
private final ExtensionPoint.ClassSet<AllocationDecider> allocationDeciders = new ExtensionPoint.ClassSet<>("allocation_decider", AllocationDecider.class, AllocationDeciders.class);
@@ -134,7 +119,6 @@ public class ClusterModule extends AbstractModule {
public ClusterModule(Settings settings) {
this.settings = settings;
- registerBuiltinClusterSettings();
registerBuiltinIndexSettings();
for (Class<? extends AllocationDecider> decider : ClusterModule.DEFAULT_ALLOCATION_DECIDERS) {
@@ -144,70 +128,11 @@ public class ClusterModule extends AbstractModule {
registerShardsAllocator(ClusterModule.EVEN_SHARD_COUNT_ALLOCATOR, BalancedShardsAllocator.class);
}
- private void registerBuiltinClusterSettings() {
- registerClusterDynamicSetting(AwarenessAllocationDecider.CLUSTER_ROUTING_ALLOCATION_AWARENESS_ATTRIBUTES, Validator.EMPTY);
- registerClusterDynamicSetting(AwarenessAllocationDecider.CLUSTER_ROUTING_ALLOCATION_AWARENESS_FORCE_GROUP + "*", Validator.EMPTY);
- registerClusterDynamicSetting(BalancedShardsAllocator.SETTING_INDEX_BALANCE_FACTOR, Validator.FLOAT);
- registerClusterDynamicSetting(BalancedShardsAllocator.SETTING_SHARD_BALANCE_FACTOR, Validator.FLOAT);
- registerClusterDynamicSetting(BalancedShardsAllocator.SETTING_THRESHOLD, Validator.NON_NEGATIVE_FLOAT);
- registerClusterDynamicSetting(ClusterRebalanceAllocationDecider.CLUSTER_ROUTING_ALLOCATION_ALLOW_REBALANCE, ClusterRebalanceAllocationDecider.ALLOCATION_ALLOW_REBALANCE_VALIDATOR);
- registerClusterDynamicSetting(ConcurrentRebalanceAllocationDecider.CLUSTER_ROUTING_ALLOCATION_CLUSTER_CONCURRENT_REBALANCE, Validator.INTEGER);
- registerClusterDynamicSetting(EnableAllocationDecider.CLUSTER_ROUTING_ALLOCATION_ENABLE, Validator.EMPTY);
- registerClusterDynamicSetting(EnableAllocationDecider.CLUSTER_ROUTING_REBALANCE_ENABLE, Validator.EMPTY);
- registerClusterDynamicSetting(ZenDiscovery.SETTING_REJOIN_ON_MASTER_GONE, Validator.BOOLEAN);
- registerClusterDynamicSetting(DiscoverySettings.NO_MASTER_BLOCK, Validator.EMPTY);
- registerClusterDynamicSetting(FilterAllocationDecider.CLUSTER_ROUTING_INCLUDE_GROUP + "*", Validator.EMPTY);
- registerClusterDynamicSetting(FilterAllocationDecider.CLUSTER_ROUTING_EXCLUDE_GROUP + "*", Validator.EMPTY);
- registerClusterDynamicSetting(FilterAllocationDecider.CLUSTER_ROUTING_REQUIRE_GROUP + "*", Validator.EMPTY);
- registerClusterDynamicSetting(IndexStoreConfig.INDICES_STORE_THROTTLE_TYPE, Validator.EMPTY);
- registerClusterDynamicSetting(IndexStoreConfig.INDICES_STORE_THROTTLE_MAX_BYTES_PER_SEC, Validator.BYTES_SIZE);
- registerClusterDynamicSetting(IndicesTTLService.INDICES_TTL_INTERVAL, Validator.TIME);
- registerClusterDynamicSetting(MappingUpdatedAction.INDICES_MAPPING_DYNAMIC_TIMEOUT, Validator.TIME);
- registerClusterDynamicSetting(MetaData.SETTING_READ_ONLY, Validator.EMPTY);
- registerClusterDynamicSetting(RecoverySettings.INDICES_RECOVERY_CONCURRENT_STREAMS, Validator.POSITIVE_INTEGER);
- registerClusterDynamicSetting(RecoverySettings.INDICES_RECOVERY_CONCURRENT_SMALL_FILE_STREAMS, Validator.POSITIVE_INTEGER);
- registerClusterDynamicSetting(RecoverySettings.INDICES_RECOVERY_MAX_BYTES_PER_SEC, Validator.BYTES_SIZE);
- registerClusterDynamicSetting(RecoverySettings.INDICES_RECOVERY_RETRY_DELAY_STATE_SYNC, Validator.TIME_NON_NEGATIVE);
- registerClusterDynamicSetting(RecoverySettings.INDICES_RECOVERY_RETRY_DELAY_NETWORK, Validator.TIME_NON_NEGATIVE);
- registerClusterDynamicSetting(RecoverySettings.INDICES_RECOVERY_ACTIVITY_TIMEOUT, Validator.TIME_NON_NEGATIVE);
- registerClusterDynamicSetting(RecoverySettings.INDICES_RECOVERY_INTERNAL_ACTION_TIMEOUT, Validator.TIME_NON_NEGATIVE);
- registerClusterDynamicSetting(RecoverySettings.INDICES_RECOVERY_INTERNAL_LONG_ACTION_TIMEOUT, Validator.TIME_NON_NEGATIVE);
- registerClusterDynamicSetting(ThreadPool.THREADPOOL_GROUP + "*", ThreadPool.THREAD_POOL_TYPE_SETTINGS_VALIDATOR);
- registerClusterDynamicSetting(ThrottlingAllocationDecider.CLUSTER_ROUTING_ALLOCATION_NODE_INITIAL_PRIMARIES_RECOVERIES, Validator.INTEGER);
- registerClusterDynamicSetting(ThrottlingAllocationDecider.CLUSTER_ROUTING_ALLOCATION_NODE_CONCURRENT_RECOVERIES, Validator.INTEGER);
- registerClusterDynamicSetting(DiskThresholdDecider.CLUSTER_ROUTING_ALLOCATION_LOW_DISK_WATERMARK, Validator.EMPTY);
- registerClusterDynamicSetting(DiskThresholdDecider.CLUSTER_ROUTING_ALLOCATION_HIGH_DISK_WATERMARK, Validator.EMPTY);
- registerClusterDynamicSetting(DiskThresholdDecider.CLUSTER_ROUTING_ALLOCATION_DISK_THRESHOLD_ENABLED, Validator.BOOLEAN);
- registerClusterDynamicSetting(DiskThresholdDecider.CLUSTER_ROUTING_ALLOCATION_INCLUDE_RELOCATIONS, Validator.BOOLEAN);
- registerClusterDynamicSetting(DiskThresholdDecider.CLUSTER_ROUTING_ALLOCATION_REROUTE_INTERVAL, Validator.TIME_NON_NEGATIVE);
- registerClusterDynamicSetting(InternalClusterInfoService.INTERNAL_CLUSTER_INFO_UPDATE_INTERVAL, Validator.TIME_NON_NEGATIVE);
- registerClusterDynamicSetting(InternalClusterInfoService.INTERNAL_CLUSTER_INFO_TIMEOUT, Validator.TIME_NON_NEGATIVE);
- registerClusterDynamicSetting(SnapshotInProgressAllocationDecider.CLUSTER_ROUTING_ALLOCATION_SNAPSHOT_RELOCATION_ENABLED, Validator.EMPTY);
- registerClusterDynamicSetting(DestructiveOperations.REQUIRES_NAME, Validator.EMPTY);
- registerClusterDynamicSetting(DiscoverySettings.PUBLISH_TIMEOUT, Validator.TIME_NON_NEGATIVE);
- registerClusterDynamicSetting(DiscoverySettings.PUBLISH_DIFF_ENABLE, Validator.BOOLEAN);
- registerClusterDynamicSetting(DiscoverySettings.COMMIT_TIMEOUT, Validator.TIME_NON_NEGATIVE);
- registerClusterDynamicSetting(HierarchyCircuitBreakerService.TOTAL_CIRCUIT_BREAKER_LIMIT_SETTING, Validator.MEMORY_SIZE);
- registerClusterDynamicSetting(HierarchyCircuitBreakerService.FIELDDATA_CIRCUIT_BREAKER_LIMIT_SETTING, Validator.MEMORY_SIZE);
- registerClusterDynamicSetting(HierarchyCircuitBreakerService.FIELDDATA_CIRCUIT_BREAKER_OVERHEAD_SETTING, Validator.NON_NEGATIVE_DOUBLE);
- registerClusterDynamicSetting(HierarchyCircuitBreakerService.REQUEST_CIRCUIT_BREAKER_LIMIT_SETTING, Validator.MEMORY_SIZE);
- registerClusterDynamicSetting(HierarchyCircuitBreakerService.REQUEST_CIRCUIT_BREAKER_OVERHEAD_SETTING, Validator.NON_NEGATIVE_DOUBLE);
- registerClusterDynamicSetting(InternalClusterService.SETTING_CLUSTER_SERVICE_SLOW_TASK_LOGGING_THRESHOLD, Validator.TIME_NON_NEGATIVE);
- registerClusterDynamicSetting(SearchService.DEFAULT_SEARCH_TIMEOUT, Validator.TIMEOUT);
- registerClusterDynamicSetting(ElectMasterService.DISCOVERY_ZEN_MINIMUM_MASTER_NODES, ElectMasterService.DISCOVERY_ZEN_MINIMUM_MASTER_NODES_VALIDATOR);
- registerClusterDynamicSetting(TransportService.SETTING_TRACE_LOG_INCLUDE, Validator.EMPTY);
- registerClusterDynamicSetting(TransportService.SETTING_TRACE_LOG_INCLUDE + ".*", Validator.EMPTY);
- registerClusterDynamicSetting(TransportService.SETTING_TRACE_LOG_EXCLUDE, Validator.EMPTY);
- registerClusterDynamicSetting(TransportService.SETTING_TRACE_LOG_EXCLUDE + ".*", Validator.EMPTY);
- registerClusterDynamicSetting(TransportCloseIndexAction.SETTING_CLUSTER_INDICES_CLOSE_ENABLE, Validator.BOOLEAN);
- registerClusterDynamicSetting(ShardsLimitAllocationDecider.CLUSTER_TOTAL_SHARDS_PER_NODE, Validator.INTEGER);
- registerClusterDynamicSetting(TransportReplicationAction.SHARD_FAILURE_TIMEOUT, Validator.TIME_NON_NEGATIVE);
- }
private void registerBuiltinIndexSettings() {
registerIndexDynamicSetting(IndexStore.INDEX_STORE_THROTTLE_MAX_BYTES_PER_SEC, Validator.BYTES_SIZE);
registerIndexDynamicSetting(IndexStore.INDEX_STORE_THROTTLE_TYPE, Validator.EMPTY);
- registerIndexDynamicSetting(MergeSchedulerConfig.MAX_THREAD_COUNT, Validator.EMPTY);
+ registerIndexDynamicSetting(MergeSchedulerConfig.MAX_THREAD_COUNT, Validator.NON_NEGATIVE_INTEGER);
registerIndexDynamicSetting(MergeSchedulerConfig.MAX_MERGE_COUNT, Validator.EMPTY);
registerIndexDynamicSetting(MergeSchedulerConfig.AUTO_THROTTLE, Validator.EMPTY);
registerIndexDynamicSetting(FilterAllocationDecider.INDEX_ROUTING_REQUIRE_GROUP + "*", Validator.EMPTY);
@@ -272,9 +197,6 @@ public class ClusterModule extends AbstractModule {
indexDynamicSettings.addSetting(setting, validator);
}
- public void registerClusterDynamicSetting(String setting, Validator validator) {
- clusterDynamicSettings.addSetting(setting, validator);
- }
public void registerAllocationDecider(Class<? extends AllocationDecider> allocationDecider) {
allocationDeciders.registerExtension(allocationDecider);
@@ -290,7 +212,6 @@ public class ClusterModule extends AbstractModule {
@Override
protected void configure() {
- bind(DynamicSettings.class).annotatedWith(ClusterDynamicSettings.class).toInstance(clusterDynamicSettings.build());
bind(DynamicSettings.class).annotatedWith(IndexDynamicSettings.class).toInstance(indexDynamicSettings.build());
// bind ShardsAllocator
diff --git a/core/src/main/java/org/elasticsearch/cluster/ClusterState.java b/core/src/main/java/org/elasticsearch/cluster/ClusterState.java
index e20f21b4ce..34ccfd3b43 100644
--- a/core/src/main/java/org/elasticsearch/cluster/ClusterState.java
+++ b/core/src/main/java/org/elasticsearch/cluster/ClusterState.java
@@ -129,7 +129,7 @@ public class ClusterState implements ToXContent, Diffable<ClusterState> {
@SuppressWarnings("unchecked")
T proto = (T)customPrototypes.get(type);
if (proto == null) {
- throw new IllegalArgumentException("No custom state prototype registered for type [" + type + "]");
+ throw new IllegalArgumentException("No custom state prototype registered for type [" + type + "], node likely missing plugins");
}
return proto;
}
diff --git a/core/src/main/java/org/elasticsearch/cluster/ClusterStateTaskExecutor.java b/core/src/main/java/org/elasticsearch/cluster/ClusterStateTaskExecutor.java
index ab85d9540f..fb22c2ca36 100644
--- a/core/src/main/java/org/elasticsearch/cluster/ClusterStateTaskExecutor.java
+++ b/core/src/main/java/org/elasticsearch/cluster/ClusterStateTaskExecutor.java
@@ -38,6 +38,13 @@ public interface ClusterStateTaskExecutor<T> {
}
/**
+ * Callback invoked after new cluster state is published. Note that
+ * this method is not invoked if the cluster state was not updated.
+ */
+ default void clusterStatePublished(ClusterState newClusterState) {
+ }
+
+ /**
* Represents the result of a batched execution of cluster state update tasks
* @param <T> the type of the cluster state update task
*/
diff --git a/core/src/main/java/org/elasticsearch/cluster/InternalClusterInfoService.java b/core/src/main/java/org/elasticsearch/cluster/InternalClusterInfoService.java
index 039868d16c..925a5a12ed 100644
--- a/core/src/main/java/org/elasticsearch/cluster/InternalClusterInfoService.java
+++ b/core/src/main/java/org/elasticsearch/cluster/InternalClusterInfoService.java
@@ -37,11 +37,12 @@ import org.elasticsearch.common.collect.ImmutableOpenMap;
import org.elasticsearch.common.component.AbstractComponent;
import org.elasticsearch.common.inject.Inject;
import org.elasticsearch.common.logging.ESLogger;
+import org.elasticsearch.common.settings.ClusterSettings;
+import org.elasticsearch.common.settings.Setting;
import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.common.unit.TimeValue;
import org.elasticsearch.common.util.concurrent.EsRejectedExecutionException;
import org.elasticsearch.monitor.fs.FsInfo;
-import org.elasticsearch.node.settings.NodeSettingsService;
import org.elasticsearch.threadpool.ThreadPool;
import org.elasticsearch.transport.ReceiveTimeoutTransportException;
@@ -63,8 +64,8 @@ import java.util.concurrent.TimeUnit;
*/
public class InternalClusterInfoService extends AbstractComponent implements ClusterInfoService, LocalNodeMasterListener, ClusterStateListener {
- public static final String INTERNAL_CLUSTER_INFO_UPDATE_INTERVAL = "cluster.info.update.interval";
- public static final String INTERNAL_CLUSTER_INFO_TIMEOUT = "cluster.info.update.timeout";
+ public static final Setting<TimeValue> INTERNAL_CLUSTER_INFO_UPDATE_INTERVAL_SETTING = Setting.timeSetting("cluster.info.update.interval", TimeValue.timeValueSeconds(30), TimeValue.timeValueSeconds(10), true, Setting.Scope.CLUSTER);
+ public static final Setting<TimeValue> INTERNAL_CLUSTER_INFO_TIMEOUT_SETTING = Setting.positiveTimeSetting("cluster.info.update.timeout", TimeValue.timeValueSeconds(15), true, Setting.Scope.CLUSTER);
private volatile TimeValue updateFrequency;
@@ -82,7 +83,7 @@ public class InternalClusterInfoService extends AbstractComponent implements Clu
private final List<Listener> listeners = new CopyOnWriteArrayList<>();
@Inject
- public InternalClusterInfoService(Settings settings, NodeSettingsService nodeSettingsService,
+ public InternalClusterInfoService(Settings settings, ClusterSettings clusterSettings,
TransportNodesStatsAction transportNodesStatsAction,
TransportIndicesStatsAction transportIndicesStatsAction, ClusterService clusterService,
ThreadPool threadPool) {
@@ -95,10 +96,12 @@ public class InternalClusterInfoService extends AbstractComponent implements Clu
this.transportIndicesStatsAction = transportIndicesStatsAction;
this.clusterService = clusterService;
this.threadPool = threadPool;
- this.updateFrequency = settings.getAsTime(INTERNAL_CLUSTER_INFO_UPDATE_INTERVAL, TimeValue.timeValueSeconds(30));
- this.fetchTimeout = settings.getAsTime(INTERNAL_CLUSTER_INFO_TIMEOUT, TimeValue.timeValueSeconds(15));
- this.enabled = settings.getAsBoolean(DiskThresholdDecider.CLUSTER_ROUTING_ALLOCATION_DISK_THRESHOLD_ENABLED, true);
- nodeSettingsService.addListener(new ApplySettings());
+ this.updateFrequency = INTERNAL_CLUSTER_INFO_UPDATE_INTERVAL_SETTING.get(settings);
+ this.fetchTimeout = INTERNAL_CLUSTER_INFO_TIMEOUT_SETTING.get(settings);
+ this.enabled = DiskThresholdDecider.CLUSTER_ROUTING_ALLOCATION_DISK_THRESHOLD_ENABLED_SETTING.get(settings);
+ clusterSettings.addSettingsUpdateConsumer(INTERNAL_CLUSTER_INFO_TIMEOUT_SETTING, this::setFetchTimeout);
+ clusterSettings.addSettingsUpdateConsumer(INTERNAL_CLUSTER_INFO_UPDATE_INTERVAL_SETTING, this::setUpdateFrequency);
+ clusterSettings.addSettingsUpdateConsumer(DiskThresholdDecider.CLUSTER_ROUTING_ALLOCATION_DISK_THRESHOLD_ENABLED_SETTING, this::setEnabled);
// Add InternalClusterInfoService to listen for Master changes
this.clusterService.add((LocalNodeMasterListener)this);
@@ -106,35 +109,16 @@ public class InternalClusterInfoService extends AbstractComponent implements Clu
this.clusterService.add((ClusterStateListener)this);
}
- class ApplySettings implements NodeSettingsService.Listener {
- @Override
- public void onRefreshSettings(Settings settings) {
- TimeValue newUpdateFrequency = settings.getAsTime(INTERNAL_CLUSTER_INFO_UPDATE_INTERVAL, null);
- // ClusterInfoService is only enabled if the DiskThresholdDecider is enabled
- Boolean newEnabled = settings.getAsBoolean(DiskThresholdDecider.CLUSTER_ROUTING_ALLOCATION_DISK_THRESHOLD_ENABLED, null);
-
- if (newUpdateFrequency != null) {
- if (newUpdateFrequency.getMillis() < TimeValue.timeValueSeconds(10).getMillis()) {
- logger.warn("[{}] set too low [{}] (< 10s)", INTERNAL_CLUSTER_INFO_UPDATE_INTERVAL, newUpdateFrequency);
- throw new IllegalStateException("Unable to set " + INTERNAL_CLUSTER_INFO_UPDATE_INTERVAL + " less than 10 seconds");
- } else {
- logger.info("updating [{}] from [{}] to [{}]", INTERNAL_CLUSTER_INFO_UPDATE_INTERVAL, updateFrequency, newUpdateFrequency);
- InternalClusterInfoService.this.updateFrequency = newUpdateFrequency;
- }
- }
-
- TimeValue newFetchTimeout = settings.getAsTime(INTERNAL_CLUSTER_INFO_TIMEOUT, null);
- if (newFetchTimeout != null) {
- logger.info("updating fetch timeout [{}] from [{}] to [{}]", INTERNAL_CLUSTER_INFO_TIMEOUT, fetchTimeout, newFetchTimeout);
- InternalClusterInfoService.this.fetchTimeout = newFetchTimeout;
- }
+ private void setEnabled(boolean enabled) {
+ this.enabled = enabled;
+ }
+ private void setFetchTimeout(TimeValue fetchTimeout) {
+ this.fetchTimeout = fetchTimeout;
+ }
- // We don't log about enabling it here, because the DiskThresholdDecider will already be logging about enable/disable
- if (newEnabled != null) {
- InternalClusterInfoService.this.enabled = newEnabled;
- }
- }
+ void setUpdateFrequency(TimeValue updateFrequency) {
+ this.updateFrequency = updateFrequency;
}
@Override
diff --git a/core/src/main/java/org/elasticsearch/cluster/action/index/MappingUpdatedAction.java b/core/src/main/java/org/elasticsearch/cluster/action/index/MappingUpdatedAction.java
index e3925aa6f4..9e57fe3a48 100644
--- a/core/src/main/java/org/elasticsearch/cluster/action/index/MappingUpdatedAction.java
+++ b/core/src/main/java/org/elasticsearch/cluster/action/index/MappingUpdatedAction.java
@@ -26,11 +26,12 @@ import org.elasticsearch.client.Client;
import org.elasticsearch.client.IndicesAdminClient;
import org.elasticsearch.common.component.AbstractComponent;
import org.elasticsearch.common.inject.Inject;
+import org.elasticsearch.common.settings.ClusterSettings;
+import org.elasticsearch.common.settings.Setting;
import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.common.unit.TimeValue;
import org.elasticsearch.index.mapper.MapperService;
import org.elasticsearch.index.mapper.Mapping;
-import org.elasticsearch.node.settings.NodeSettingsService;
import java.util.concurrent.TimeoutException;
@@ -40,30 +41,23 @@ import java.util.concurrent.TimeoutException;
*/
public class MappingUpdatedAction extends AbstractComponent {
- public static final String INDICES_MAPPING_DYNAMIC_TIMEOUT = "indices.mapping.dynamic_timeout";
+ public static final Setting<TimeValue> INDICES_MAPPING_DYNAMIC_TIMEOUT_SETTING = Setting.positiveTimeSetting("indices.mapping.dynamic_timeout", TimeValue.timeValueSeconds(30), true, Setting.Scope.CLUSTER);
private IndicesAdminClient client;
private volatile TimeValue dynamicMappingUpdateTimeout;
- class ApplySettings implements NodeSettingsService.Listener {
- @Override
- public void onRefreshSettings(Settings settings) {
- TimeValue current = MappingUpdatedAction.this.dynamicMappingUpdateTimeout;
- TimeValue newValue = settings.getAsTime(INDICES_MAPPING_DYNAMIC_TIMEOUT, current);
- if (!current.equals(newValue)) {
- logger.info("updating " + INDICES_MAPPING_DYNAMIC_TIMEOUT + " from [{}] to [{}]", current, newValue);
- MappingUpdatedAction.this.dynamicMappingUpdateTimeout = newValue;
- }
- }
- }
-
@Inject
- public MappingUpdatedAction(Settings settings, NodeSettingsService nodeSettingsService) {
+ public MappingUpdatedAction(Settings settings, ClusterSettings clusterSettings) {
super(settings);
- this.dynamicMappingUpdateTimeout = settings.getAsTime(INDICES_MAPPING_DYNAMIC_TIMEOUT, TimeValue.timeValueSeconds(30));
- nodeSettingsService.addListener(new ApplySettings());
+ this.dynamicMappingUpdateTimeout = INDICES_MAPPING_DYNAMIC_TIMEOUT_SETTING.get(settings);
+ clusterSettings.addSettingsUpdateConsumer(INDICES_MAPPING_DYNAMIC_TIMEOUT_SETTING, this::setDynamicMappingUpdateTimeout);
}
+ private void setDynamicMappingUpdateTimeout(TimeValue dynamicMappingUpdateTimeout) {
+ this.dynamicMappingUpdateTimeout = dynamicMappingUpdateTimeout;
+ }
+
+
public void setClient(Client client) {
this.client = client.admin().indices();
}
diff --git a/core/src/main/java/org/elasticsearch/cluster/action/shard/ShardStateAction.java b/core/src/main/java/org/elasticsearch/cluster/action/shard/ShardStateAction.java
index d09df094a6..a04a6d7bd5 100644
--- a/core/src/main/java/org/elasticsearch/cluster/action/shard/ShardStateAction.java
+++ b/core/src/main/java/org/elasticsearch/cluster/action/shard/ShardStateAction.java
@@ -20,7 +20,12 @@
package org.elasticsearch.cluster.action.shard;
import org.elasticsearch.ExceptionsHelper;
-import org.elasticsearch.cluster.*;
+import org.elasticsearch.cluster.ClusterService;
+import org.elasticsearch.cluster.ClusterState;
+import org.elasticsearch.cluster.ClusterStateTaskConfig;
+import org.elasticsearch.cluster.ClusterStateTaskExecutor;
+import org.elasticsearch.cluster.ClusterStateTaskListener;
+import org.elasticsearch.cluster.NotMasterException;
import org.elasticsearch.cluster.metadata.IndexMetaData;
import org.elasticsearch.cluster.node.DiscoveryNode;
import org.elasticsearch.cluster.routing.RoutingService;
@@ -37,19 +42,24 @@ import org.elasticsearch.common.io.stream.StreamOutput;
import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.common.unit.TimeValue;
import org.elasticsearch.threadpool.ThreadPool;
-import org.elasticsearch.transport.*;
+import org.elasticsearch.transport.EmptyTransportResponseHandler;
+import org.elasticsearch.transport.TransportChannel;
+import org.elasticsearch.transport.TransportException;
+import org.elasticsearch.transport.TransportRequest;
+import org.elasticsearch.transport.TransportRequestHandler;
+import org.elasticsearch.transport.TransportRequestOptions;
+import org.elasticsearch.transport.TransportResponse;
+import org.elasticsearch.transport.TransportService;
import java.io.IOException;
import java.util.ArrayList;
import java.util.List;
+import java.util.Locale;
import static org.elasticsearch.cluster.routing.ShardRouting.readShardRoutingEntry;
-/**
- *
- */
-public class ShardStateAction extends AbstractComponent {
+public class ShardStateAction extends AbstractComponent {
public static final String SHARD_STARTED_ACTION_NAME = "internal:cluster/shard/started";
public static final String SHARD_FAILED_ACTION_NAME = "internal:cluster/shard/failure";
@@ -97,65 +107,68 @@ public class ShardStateAction extends AbstractComponent {
options = TransportRequestOptions.builder().withTimeout(timeout).build();
}
transportService.sendRequest(masterNode,
- SHARD_FAILED_ACTION_NAME, shardRoutingEntry, options, new EmptyTransportResponseHandler(ThreadPool.Names.SAME) {
+ SHARD_FAILED_ACTION_NAME, shardRoutingEntry, options, new EmptyTransportResponseHandler(ThreadPool.Names.SAME) {
+ @Override
+ public void handleResponse(TransportResponse.Empty response) {
+ listener.onSuccess();
+ }
+
+ @Override
+ public void handleException(TransportException exp) {
+ logger.warn("unexpected failure while sending request to [{}] to fail shard [{}]", exp, masterNode, shardRoutingEntry);
+ listener.onShardFailedFailure(masterNode, exp);
+ }
+ });
+ }
+
+ private class ShardFailedTransportHandler implements TransportRequestHandler<ShardRoutingEntry> {
+ @Override
+ public void messageReceived(ShardRoutingEntry request, TransportChannel channel) throws Exception {
+ handleShardFailureOnMaster(request, new ClusterStateTaskListener() {
@Override
- public void handleResponse(TransportResponse.Empty response) {
- listener.onSuccess();
+ public void onFailure(String source, Throwable t) {
+ logger.error("unexpected failure while failing shard [{}]", t, request.shardRouting);
+ try {
+ channel.sendResponse(t);
+ } catch (Throwable channelThrowable) {
+ logger.warn("failed to send failure [{}] while failing shard [{}]", channelThrowable, t, request.shardRouting);
+ }
}
@Override
- public void handleException(TransportException exp) {
- logger.warn("failed to send failed shard to {}", exp, masterNode);
- listener.onShardFailedFailure(masterNode, exp);
+ public void onNoLongerMaster(String source) {
+ logger.error("no longer master while failing shard [{}]", request.shardRouting);
+ try {
+ channel.sendResponse(new NotMasterException(source));
+ } catch (Throwable channelThrowable) {
+ logger.warn("failed to send no longer master while failing shard [{}]", channelThrowable, request.shardRouting);
+ }
}
- });
- }
-
- public void shardStarted(final ShardRouting shardRouting, String indexUUID, final String reason) {
- DiscoveryNode masterNode = clusterService.state().nodes().masterNode();
- if (masterNode == null) {
- logger.warn("{} can't send shard started for {}, no master known.", shardRouting.shardId(), shardRouting);
- return;
- }
- shardStarted(shardRouting, indexUUID, reason, masterNode);
- }
- public void shardStarted(final ShardRouting shardRouting, String indexUUID, final String reason, final DiscoveryNode masterNode) {
- ShardRoutingEntry shardRoutingEntry = new ShardRoutingEntry(shardRouting, indexUUID, reason, null);
- logger.debug("{} sending shard started for {}", shardRoutingEntry.shardRouting.shardId(), shardRoutingEntry);
- transportService.sendRequest(masterNode,
- SHARD_STARTED_ACTION_NAME, new ShardRoutingEntry(shardRouting, indexUUID, reason, null), new EmptyTransportResponseHandler(ThreadPool.Names.SAME) {
@Override
- public void handleException(TransportException exp) {
- logger.warn("failed to send shard started to [{}]", exp, masterNode);
+ public void clusterStateProcessed(String source, ClusterState oldState, ClusterState newState) {
+ try {
+ channel.sendResponse(TransportResponse.Empty.INSTANCE);
+ } catch (Throwable channelThrowable) {
+ logger.warn("failed to send response while failing shard [{}]", channelThrowable, request.shardRouting);
+ }
}
-
- });
- }
-
- private final ShardFailedClusterStateHandler shardFailedClusterStateHandler = new ShardFailedClusterStateHandler();
-
- private void handleShardFailureOnMaster(final ShardRoutingEntry shardRoutingEntry) {
- logger.warn("{} received shard failed for {}", shardRoutingEntry.failure, shardRoutingEntry.shardRouting.shardId(), shardRoutingEntry);
- clusterService.submitStateUpdateTask(
- "shard-failed (" + shardRoutingEntry.shardRouting + "), message [" + shardRoutingEntry.message + "]",
- shardRoutingEntry,
- ClusterStateTaskConfig.build(Priority.HIGH),
- shardFailedClusterStateHandler,
- shardFailedClusterStateHandler);
+ }
+ );
+ }
}
- class ShardFailedClusterStateHandler implements ClusterStateTaskExecutor<ShardRoutingEntry>, ClusterStateTaskListener {
+ class ShardFailedClusterStateHandler implements ClusterStateTaskExecutor<ShardRoutingEntry> {
@Override
public BatchResult<ShardRoutingEntry> execute(ClusterState currentState, List<ShardRoutingEntry> tasks) throws Exception {
BatchResult.Builder<ShardRoutingEntry> batchResultBuilder = BatchResult.builder();
- List<FailedRerouteAllocation.FailedShard> shardRoutingsToBeApplied = new ArrayList<>(tasks.size());
+ List<FailedRerouteAllocation.FailedShard> failedShards = new ArrayList<>(tasks.size());
for (ShardRoutingEntry task : tasks) {
- shardRoutingsToBeApplied.add(new FailedRerouteAllocation.FailedShard(task.shardRouting, task.message, task.failure));
+ failedShards.add(new FailedRerouteAllocation.FailedShard(task.shardRouting, task.message, task.failure));
}
ClusterState maybeUpdatedState = currentState;
try {
- RoutingAllocation.Result result = allocationService.applyFailedShards(currentState, shardRoutingsToBeApplied);
+ RoutingAllocation.Result result = allocationService.applyFailedShards(currentState, failedShards);
if (result.changed()) {
maybeUpdatedState = ClusterState.builder(currentState).routingResult(result).build();
}
@@ -167,31 +180,57 @@ public class ShardStateAction extends AbstractComponent {
}
@Override
- public void clusterStateProcessed(String source, ClusterState oldState, ClusterState newState) {
- if (oldState != newState && newState.getRoutingNodes().unassigned().size() > 0) {
- logger.trace("unassigned shards after shard failures. scheduling a reroute.");
- routingService.reroute("unassigned shards after shard failures, scheduling a reroute");
+ public void clusterStatePublished(ClusterState newClusterState) {
+ int numberOfUnassignedShards = newClusterState.getRoutingNodes().unassigned().size();
+ if (numberOfUnassignedShards > 0) {
+ String reason = String.format(Locale.ROOT, "[%d] unassigned shards after failing shards", numberOfUnassignedShards);
+ if (logger.isTraceEnabled()) {
+ logger.trace(reason + ", scheduling a reroute");
}
+ routingService.reroute(reason);
+ }
}
+ }
- @Override
- public void onFailure(String source, Throwable t) {
- logger.error("unexpected failure during [{}]", t, source);
- }
+ private final ShardFailedClusterStateHandler shardFailedClusterStateHandler = new ShardFailedClusterStateHandler();
+
+ private void handleShardFailureOnMaster(final ShardRoutingEntry shardRoutingEntry, ClusterStateTaskListener listener) {
+ logger.warn("{} received shard failed for {}", shardRoutingEntry.failure, shardRoutingEntry.shardRouting.shardId(), shardRoutingEntry);
+ clusterService.submitStateUpdateTask(
+ "shard-failed (" + shardRoutingEntry.shardRouting + "), message [" + shardRoutingEntry.message + "]",
+ shardRoutingEntry,
+ ClusterStateTaskConfig.build(Priority.HIGH),
+ shardFailedClusterStateHandler,
+ listener);
}
- private final ShardStartedClusterStateHandler shardStartedClusterStateHandler =
- new ShardStartedClusterStateHandler();
+ public void shardStarted(final ShardRouting shardRouting, String indexUUID, final String reason) {
+ DiscoveryNode masterNode = clusterService.state().nodes().masterNode();
+ if (masterNode == null) {
+ logger.warn("{} can't send shard started for {}, no master known.", shardRouting.shardId(), shardRouting);
+ return;
+ }
+ shardStarted(shardRouting, indexUUID, reason, masterNode);
+ }
- private void shardStartedOnMaster(final ShardRoutingEntry shardRoutingEntry) {
- logger.debug("received shard started for {}", shardRoutingEntry);
+ public void shardStarted(final ShardRouting shardRouting, String indexUUID, final String reason, final DiscoveryNode masterNode) {
+ ShardRoutingEntry shardRoutingEntry = new ShardRoutingEntry(shardRouting, indexUUID, reason, null);
+ logger.debug("{} sending shard started for {}", shardRoutingEntry.shardRouting.shardId(), shardRoutingEntry);
+ transportService.sendRequest(masterNode,
+ SHARD_STARTED_ACTION_NAME, new ShardRoutingEntry(shardRouting, indexUUID, reason, null), new EmptyTransportResponseHandler(ThreadPool.Names.SAME) {
+ @Override
+ public void handleException(TransportException exp) {
+ logger.warn("failed to send shard started to [{}]", exp, masterNode);
+ }
+ });
+ }
- clusterService.submitStateUpdateTask(
- "shard-started (" + shardRoutingEntry.shardRouting + "), reason [" + shardRoutingEntry.message + "]",
- shardRoutingEntry,
- ClusterStateTaskConfig.build(Priority.URGENT),
- shardStartedClusterStateHandler,
- shardStartedClusterStateHandler);
+ class ShardStartedTransportHandler implements TransportRequestHandler<ShardRoutingEntry> {
+ @Override
+ public void messageReceived(ShardRoutingEntry request, TransportChannel channel) throws Exception {
+ handleShardStartedOnMaster(request);
+ channel.sendResponse(TransportResponse.Empty.INSTANCE);
+ }
}
class ShardStartedClusterStateHandler implements ClusterStateTaskExecutor<ShardRoutingEntry>, ClusterStateTaskListener {
@@ -223,26 +262,20 @@ public class ShardStateAction extends AbstractComponent {
}
}
- private class ShardFailedTransportHandler implements TransportRequestHandler<ShardRoutingEntry> {
+ private final ShardStartedClusterStateHandler shardStartedClusterStateHandler = new ShardStartedClusterStateHandler();
- @Override
- public void messageReceived(ShardRoutingEntry request, TransportChannel channel) throws Exception {
- handleShardFailureOnMaster(request);
- channel.sendResponse(TransportResponse.Empty.INSTANCE);
- }
- }
-
- class ShardStartedTransportHandler implements TransportRequestHandler<ShardRoutingEntry> {
+ private void handleShardStartedOnMaster(final ShardRoutingEntry shardRoutingEntry) {
+ logger.debug("received shard started for {}", shardRoutingEntry);
- @Override
- public void messageReceived(ShardRoutingEntry request, TransportChannel channel) throws Exception {
- shardStartedOnMaster(request);
- channel.sendResponse(TransportResponse.Empty.INSTANCE);
- }
+ clusterService.submitStateUpdateTask(
+ "shard-started (" + shardRoutingEntry.shardRouting + "), reason [" + shardRoutingEntry.message + "]",
+ shardRoutingEntry,
+ ClusterStateTaskConfig.build(Priority.URGENT),
+ shardStartedClusterStateHandler,
+ shardStartedClusterStateHandler);
}
public static class ShardRoutingEntry extends TransportRequest {
-
ShardRouting shardRouting;
String indexUUID = IndexMetaData.INDEX_UUID_NA_VALUE;
String message;
@@ -283,8 +316,13 @@ public class ShardStateAction extends AbstractComponent {
}
public interface Listener {
- default void onSuccess() {}
- default void onShardFailedNoMaster() {}
- default void onShardFailedFailure(final DiscoveryNode master, final TransportException e) {}
+ default void onSuccess() {
+ }
+
+ default void onShardFailedNoMaster() {
+ }
+
+ default void onShardFailedFailure(final DiscoveryNode master, final TransportException e) {
+ }
}
}
diff --git a/core/src/main/java/org/elasticsearch/cluster/metadata/IndexMetaData.java b/core/src/main/java/org/elasticsearch/cluster/metadata/IndexMetaData.java
index 669d71477c..93961bf1fb 100644
--- a/core/src/main/java/org/elasticsearch/cluster/metadata/IndexMetaData.java
+++ b/core/src/main/java/org/elasticsearch/cluster/metadata/IndexMetaData.java
@@ -621,7 +621,7 @@ public class IndexMetaData implements Diffable<IndexMetaData>, FromXContentBuild
public int numberOfReplicas() {
return settings.getAsInt(SETTING_NUMBER_OF_REPLICAS, -1);
}
-
+
public Builder creationDate(long creationDate) {
settings = settingsBuilder().put(settings).put(SETTING_CREATION_DATE, creationDate).build();
return this;
diff --git a/core/src/main/java/org/elasticsearch/cluster/metadata/MetaData.java b/core/src/main/java/org/elasticsearch/cluster/metadata/MetaData.java
index 751f8a09ea..55cb8a5d94 100644
--- a/core/src/main/java/org/elasticsearch/cluster/metadata/MetaData.java
+++ b/core/src/main/java/org/elasticsearch/cluster/metadata/MetaData.java
@@ -40,8 +40,8 @@ import org.elasticsearch.common.collect.ImmutableOpenMap;
import org.elasticsearch.common.io.stream.StreamInput;
import org.elasticsearch.common.io.stream.StreamOutput;
import org.elasticsearch.common.logging.ESLogger;
-import org.elasticsearch.common.logging.support.LoggerMessageFormat;
import org.elasticsearch.common.regex.Regex;
+import org.elasticsearch.common.settings.Setting;
import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.common.settings.loader.SettingsLoader;
import org.elasticsearch.common.xcontent.FromXContentBuilder;
@@ -134,13 +134,13 @@ public class MetaData implements Iterable<IndexMetaData>, Diffable<MetaData>, Fr
//noinspection unchecked
T proto = (T) customPrototypes.get(type);
if (proto == null) {
- throw new IllegalArgumentException("No custom metadata prototype registered for type [" + type + "]");
+ throw new IllegalArgumentException("No custom metadata prototype registered for type [" + type + "], node likely missing plugins");
}
return proto;
}
- public static final String SETTING_READ_ONLY = "cluster.blocks.read_only";
+ public static final Setting<Boolean> SETTING_READ_ONLY_SETTING = Setting.boolSetting("cluster.blocks.read_only", false, true, Setting.Scope.CLUSTER);
public static final ClusterBlock CLUSTER_READ_ONLY_BLOCK = new ClusterBlock(6, "cluster read-only (api)", false, false, RestStatus.FORBIDDEN, EnumSet.of(ClusterBlockLevel.WRITE, ClusterBlockLevel.METADATA_WRITE));
@@ -745,23 +745,23 @@ public class MetaData implements Iterable<IndexMetaData>, Diffable<MetaData>, Fr
/** All known byte-sized cluster settings. */
public static final Set<String> CLUSTER_BYTES_SIZE_SETTINGS = unmodifiableSet(newHashSet(
- IndexStoreConfig.INDICES_STORE_THROTTLE_MAX_BYTES_PER_SEC,
- RecoverySettings.INDICES_RECOVERY_MAX_BYTES_PER_SEC));
+ IndexStoreConfig.INDICES_STORE_THROTTLE_MAX_BYTES_PER_SEC_SETTING.getKey(),
+ RecoverySettings.INDICES_RECOVERY_MAX_BYTES_PER_SEC_SETTING.getKey()));
/** All known time cluster settings. */
public static final Set<String> CLUSTER_TIME_SETTINGS = unmodifiableSet(newHashSet(
- IndicesTTLService.INDICES_TTL_INTERVAL,
- RecoverySettings.INDICES_RECOVERY_RETRY_DELAY_STATE_SYNC,
- RecoverySettings.INDICES_RECOVERY_RETRY_DELAY_NETWORK,
- RecoverySettings.INDICES_RECOVERY_ACTIVITY_TIMEOUT,
- RecoverySettings.INDICES_RECOVERY_INTERNAL_ACTION_TIMEOUT,
- RecoverySettings.INDICES_RECOVERY_INTERNAL_LONG_ACTION_TIMEOUT,
- DiskThresholdDecider.CLUSTER_ROUTING_ALLOCATION_REROUTE_INTERVAL,
- InternalClusterInfoService.INTERNAL_CLUSTER_INFO_UPDATE_INTERVAL,
- InternalClusterInfoService.INTERNAL_CLUSTER_INFO_TIMEOUT,
- DiscoverySettings.PUBLISH_TIMEOUT,
- InternalClusterService.SETTING_CLUSTER_SERVICE_SLOW_TASK_LOGGING_THRESHOLD));
+ IndicesTTLService.INDICES_TTL_INTERVAL_SETTING.getKey(),
+ RecoverySettings.INDICES_RECOVERY_RETRY_DELAY_STATE_SYNC_SETTING.getKey(),
+ RecoverySettings.INDICES_RECOVERY_RETRY_DELAY_NETWORK_SETTING.getKey(),
+ RecoverySettings.INDICES_RECOVERY_ACTIVITY_TIMEOUT_SETTING.getKey(),
+ RecoverySettings.INDICES_RECOVERY_INTERNAL_ACTION_TIMEOUT_SETTING.getKey(),
+ RecoverySettings.INDICES_RECOVERY_INTERNAL_LONG_ACTION_TIMEOUT_SETTING.getKey(),
+ DiskThresholdDecider.CLUSTER_ROUTING_ALLOCATION_REROUTE_INTERVAL_SETTING.getKey(),
+ InternalClusterInfoService.INTERNAL_CLUSTER_INFO_UPDATE_INTERVAL_SETTING.getKey(),
+ InternalClusterInfoService.INTERNAL_CLUSTER_INFO_TIMEOUT_SETTING.getKey(),
+ DiscoverySettings.PUBLISH_TIMEOUT_SETTING.getKey(),
+ InternalClusterService.CLUSTER_SERVICE_SLOW_TASK_LOGGING_THRESHOLD_SETTING.getKey()));
/** As of 2.0 we require units for time and byte-sized settings. This methods adds default units to any cluster settings that don't
* specify a unit. */
diff --git a/core/src/main/java/org/elasticsearch/cluster/metadata/MetaDataIndexStateService.java b/core/src/main/java/org/elasticsearch/cluster/metadata/MetaDataIndexStateService.java
index 1fa1b702f6..b38e99d449 100644
--- a/core/src/main/java/org/elasticsearch/cluster/metadata/MetaDataIndexStateService.java
+++ b/core/src/main/java/org/elasticsearch/cluster/metadata/MetaDataIndexStateService.java
@@ -47,7 +47,6 @@ import org.elasticsearch.rest.RestStatus;
import java.util.ArrayList;
import java.util.Arrays;
import java.util.List;
-import java.util.Locale;
/**
* Service responsible for submitting open/close index requests
@@ -92,14 +91,6 @@ public class MetaDataIndexStateService extends AbstractComponent {
}
if (indexMetaData.getState() != IndexMetaData.State.CLOSE) {
- IndexRoutingTable indexRoutingTable = currentState.routingTable().index(index);
- for (IndexShardRoutingTable shard : indexRoutingTable) {
- for (ShardRouting shardRouting : shard) {
- if (shardRouting.primary() == true && shardRouting.allocatedPostIndexCreate() == false) {
- throw new IndexPrimaryShardNotAllocatedException(new Index(index));
- }
- }
- }
indicesToClose.add(index);
}
}
diff --git a/core/src/main/java/org/elasticsearch/cluster/metadata/MetaDataMappingService.java b/core/src/main/java/org/elasticsearch/cluster/metadata/MetaDataMappingService.java
index 957125703b..8093d93ccc 100644
--- a/core/src/main/java/org/elasticsearch/cluster/metadata/MetaDataMappingService.java
+++ b/core/src/main/java/org/elasticsearch/cluster/metadata/MetaDataMappingService.java
@@ -37,7 +37,6 @@ import org.elasticsearch.index.IndexService;
import org.elasticsearch.index.NodeServicesProvider;
import org.elasticsearch.index.mapper.DocumentMapper;
import org.elasticsearch.index.mapper.MapperService;
-import org.elasticsearch.index.mapper.MergeResult;
import org.elasticsearch.indices.IndicesService;
import org.elasticsearch.indices.InvalidTypeNameException;
import org.elasticsearch.percolator.PercolatorService;
@@ -237,8 +236,8 @@ public class MetaDataMappingService extends AbstractComponent {
}
private ClusterState applyRequest(ClusterState currentState, PutMappingClusterStateUpdateRequest request) throws IOException {
- Map<String, DocumentMapper> newMappers = new HashMap<>();
- Map<String, DocumentMapper> existingMappers = new HashMap<>();
+ String mappingType = request.type();
+ CompressedXContent mappingUpdateSource = new CompressedXContent(request.source());
for (String index : request.indices()) {
IndexService indexService = indicesService.indexServiceSafe(index);
// try and parse it (no need to add it here) so we can bail early in case of parsing exception
@@ -246,16 +245,13 @@ public class MetaDataMappingService extends AbstractComponent {
DocumentMapper existingMapper = indexService.mapperService().documentMapper(request.type());
if (MapperService.DEFAULT_MAPPING.equals(request.type())) {
// _default_ types do not go through merging, but we do test the new settings. Also don't apply the old default
- newMapper = indexService.mapperService().parse(request.type(), new CompressedXContent(request.source()), false);
+ newMapper = indexService.mapperService().parse(request.type(), mappingUpdateSource, false);
} else {
- newMapper = indexService.mapperService().parse(request.type(), new CompressedXContent(request.source()), existingMapper == null);
+ newMapper = indexService.mapperService().parse(request.type(), mappingUpdateSource, existingMapper == null);
if (existingMapper != null) {
// first, simulate
- MergeResult mergeResult = existingMapper.merge(newMapper.mapping(), true, request.updateAllTypes());
- // if we have conflicts, throw an exception
- if (mergeResult.hasConflicts()) {
- throw new IllegalArgumentException("Merge failed with failures {" + Arrays.toString(mergeResult.buildConflicts()) + "}");
- }
+ // this will just throw exceptions in case of problems
+ existingMapper.merge(newMapper.mapping(), true, request.updateAllTypes());
} else {
// TODO: can we find a better place for this validation?
// The reason this validation is here is that the mapper service doesn't learn about
@@ -274,36 +270,31 @@ public class MetaDataMappingService extends AbstractComponent {
}
}
}
- newMappers.put(index, newMapper);
- if (existingMapper != null) {
- existingMappers.put(index, existingMapper);
+ if (mappingType == null) {
+ mappingType = newMapper.type();
+ } else if (mappingType.equals(newMapper.type()) == false) {
+ throw new InvalidTypeNameException("Type name provided does not match type name within mapping definition");
}
}
+ assert mappingType != null;
- String mappingType = request.type();
- if (mappingType == null) {
- mappingType = newMappers.values().iterator().next().type();
- } else if (!mappingType.equals(newMappers.values().iterator().next().type())) {
- throw new InvalidTypeNameException("Type name provided does not match type name within mapping definition");
- }
if (!MapperService.DEFAULT_MAPPING.equals(mappingType) && !PercolatorService.TYPE_NAME.equals(mappingType) && mappingType.charAt(0) == '_') {
throw new InvalidTypeNameException("Document mapping type name can't start with '_'");
}
final Map<String, MappingMetaData> mappings = new HashMap<>();
- for (Map.Entry<String, DocumentMapper> entry : newMappers.entrySet()) {
- String index = entry.getKey();
+ for (String index : request.indices()) {
// do the actual merge here on the master, and update the mapping source
- DocumentMapper newMapper = entry.getValue();
IndexService indexService = indicesService.indexService(index);
if (indexService == null) {
continue;
}
CompressedXContent existingSource = null;
- if (existingMappers.containsKey(entry.getKey())) {
- existingSource = existingMappers.get(entry.getKey()).mappingSource();
+ DocumentMapper existingMapper = indexService.mapperService().documentMapper(mappingType);
+ if (existingMapper != null) {
+ existingSource = existingMapper.mappingSource();
}
- DocumentMapper mergedMapper = indexService.mapperService().merge(newMapper.type(), newMapper.mappingSource(), false, request.updateAllTypes());
+ DocumentMapper mergedMapper = indexService.mapperService().merge(mappingType, mappingUpdateSource, true, request.updateAllTypes());
CompressedXContent updatedSource = mergedMapper.mappingSource();
if (existingSource != null) {
@@ -322,9 +313,9 @@ public class MetaDataMappingService extends AbstractComponent {
} else {
mappings.put(index, new MappingMetaData(mergedMapper));
if (logger.isDebugEnabled()) {
- logger.debug("[{}] create_mapping [{}] with source [{}]", index, newMapper.type(), updatedSource);
+ logger.debug("[{}] create_mapping [{}] with source [{}]", index, mappingType, updatedSource);
} else if (logger.isInfoEnabled()) {
- logger.info("[{}] create_mapping [{}]", index, newMapper.type());
+ logger.info("[{}] create_mapping [{}]", index, mappingType);
}
}
}
diff --git a/core/src/main/java/org/elasticsearch/cluster/routing/RoutingNodes.java b/core/src/main/java/org/elasticsearch/cluster/routing/RoutingNodes.java
index badf70a191..8dd980c8bb 100644
--- a/core/src/main/java/org/elasticsearch/cluster/routing/RoutingNodes.java
+++ b/core/src/main/java/org/elasticsearch/cluster/routing/RoutingNodes.java
@@ -21,12 +21,12 @@ package org.elasticsearch.cluster.routing;
import com.carrotsearch.hppc.ObjectIntHashMap;
import com.carrotsearch.hppc.cursors.ObjectCursor;
-
import org.apache.lucene.util.CollectionUtil;
import org.elasticsearch.cluster.ClusterState;
import org.elasticsearch.cluster.block.ClusterBlocks;
import org.elasticsearch.cluster.metadata.MetaData;
import org.elasticsearch.cluster.node.DiscoveryNode;
+import org.elasticsearch.common.Randomness;
import org.elasticsearch.common.collect.ImmutableOpenMap;
import org.elasticsearch.index.shard.ShardId;
@@ -34,7 +34,6 @@ import java.util.ArrayList;
import java.util.Collections;
import java.util.Comparator;
import java.util.HashMap;
-import java.util.HashSet;
import java.util.Iterator;
import java.util.List;
import java.util.Map;
@@ -85,7 +84,7 @@ public class RoutingNodes implements Iterable<RoutingNode> {
Map<String, List<ShardRouting>> nodesToShards = new HashMap<>();
// fill in the nodeToShards with the "live" nodes
for (ObjectCursor<DiscoveryNode> cursor : clusterState.nodes().dataNodes().values()) {
- nodesToShards.put(cursor.value.id(), new ArrayList<ShardRouting>());
+ nodesToShards.put(cursor.value.id(), new ArrayList<>());
}
// fill in the inverse of node -> shards allocated
@@ -98,21 +97,13 @@ public class RoutingNodes implements Iterable<RoutingNode> {
// by the ShardId, as this is common for primary and replicas.
// A replica Set might have one (and not more) replicas with the state of RELOCATING.
if (shard.assignedToNode()) {
- List<ShardRouting> entries = nodesToShards.get(shard.currentNodeId());
- if (entries == null) {
- entries = new ArrayList<>();
- nodesToShards.put(shard.currentNodeId(), entries);
- }
+ List<ShardRouting> entries = nodesToShards.computeIfAbsent(shard.currentNodeId(), k -> new ArrayList<>());
final ShardRouting sr = getRouting(shard, readOnly);
entries.add(sr);
assignedShardsAdd(sr);
if (shard.relocating()) {
- entries = nodesToShards.get(shard.relocatingNodeId());
relocatingShards++;
- if (entries == null) {
- entries = new ArrayList<>();
- nodesToShards.put(shard.relocatingNodeId(), entries);
- }
+ entries = nodesToShards.computeIfAbsent(shard.relocatingNodeId(), k -> new ArrayList<>());
// add the counterpart shard with relocatingNodeId reflecting the source from which
// it's relocating from.
ShardRouting targetShardRouting = shard.buildTargetRelocatingShard();
@@ -128,7 +119,7 @@ public class RoutingNodes implements Iterable<RoutingNode> {
inactiveShardCount++;
}
} else {
- final ShardRouting sr = getRouting(shard, readOnly);
+ final ShardRouting sr = getRouting(shard, readOnly);
assignedShardsAdd(sr);
unassignedShards.add(sr);
}
@@ -456,12 +447,8 @@ public class RoutingNodes implements Iterable<RoutingNode> {
// no unassigned
return;
}
- List<ShardRouting> shards = assignedShards.get(shard.shardId());
- if (shards == null) {
- shards = new ArrayList<>();
- assignedShards.put(shard.shardId(), shards);
- }
- assert assertInstanceNotInList(shard, shards);
+ List<ShardRouting> shards = assignedShards.computeIfAbsent(shard.shardId(), k -> new ArrayList<>());
+ assert assertInstanceNotInList(shard, shards);
shards.add(shard);
}
@@ -671,7 +658,7 @@ public class RoutingNodes implements Iterable<RoutingNode> {
}
public void shuffle() {
- Collections.shuffle(unassigned);
+ Randomness.shuffle(unassigned);
}
/**
diff --git a/core/src/main/java/org/elasticsearch/cluster/routing/ShardRouting.java b/core/src/main/java/org/elasticsearch/cluster/routing/ShardRouting.java
index 8dd71e3fba..5ffaee0f2f 100644
--- a/core/src/main/java/org/elasticsearch/cluster/routing/ShardRouting.java
+++ b/core/src/main/java/org/elasticsearch/cluster/routing/ShardRouting.java
@@ -19,6 +19,8 @@
package org.elasticsearch.cluster.routing;
+import org.elasticsearch.Version;
+import org.elasticsearch.cluster.metadata.IndexMetaData;
import org.elasticsearch.common.Nullable;
import org.elasticsearch.common.io.stream.StreamInput;
import org.elasticsearch.common.io.stream.StreamOutput;
@@ -267,7 +269,7 @@ public final class ShardRouting implements Streamable, ToXContent {
return shardIdentifier;
}
- public boolean allocatedPostIndexCreate() {
+ public boolean allocatedPostIndexCreate(IndexMetaData indexMetaData) {
if (active()) {
return true;
}
@@ -279,6 +281,11 @@ public final class ShardRouting implements Streamable, ToXContent {
return false;
}
+ if (indexMetaData.activeAllocationIds(id()).isEmpty() && indexMetaData.getCreationVersion().onOrAfter(Version.V_3_0_0)) {
+ // when no shards with this id have ever been active for this index
+ return false;
+ }
+
return true;
}
diff --git a/core/src/main/java/org/elasticsearch/cluster/routing/allocation/AllocationService.java b/core/src/main/java/org/elasticsearch/cluster/routing/allocation/AllocationService.java
index feafb76a5f..2268bf1d99 100644
--- a/core/src/main/java/org/elasticsearch/cluster/routing/allocation/AllocationService.java
+++ b/core/src/main/java/org/elasticsearch/cluster/routing/allocation/AllocationService.java
@@ -39,15 +39,12 @@ import org.elasticsearch.cluster.routing.allocation.allocator.ShardsAllocators;
import org.elasticsearch.cluster.routing.allocation.command.AllocationCommands;
import org.elasticsearch.cluster.routing.allocation.decider.AllocationDeciders;
import org.elasticsearch.cluster.routing.allocation.decider.Decision;
+import org.elasticsearch.common.Randomness;
import org.elasticsearch.common.component.AbstractComponent;
import org.elasticsearch.common.inject.Inject;
import org.elasticsearch.common.settings.Settings;
-import java.util.ArrayList;
-import java.util.Collections;
-import java.util.List;
-import java.util.Objects;
-import java.util.Set;
+import java.util.*;
import java.util.function.Function;
import java.util.stream.Collectors;
diff --git a/core/src/main/java/org/elasticsearch/cluster/routing/allocation/allocator/BalancedShardsAllocator.java b/core/src/main/java/org/elasticsearch/cluster/routing/allocation/allocator/BalancedShardsAllocator.java
index b9ce532a61..e6dc9a65ef 100644
--- a/core/src/main/java/org/elasticsearch/cluster/routing/allocation/allocator/BalancedShardsAllocator.java
+++ b/core/src/main/java/org/elasticsearch/cluster/routing/allocation/allocator/BalancedShardsAllocator.java
@@ -34,12 +34,13 @@ import org.elasticsearch.cluster.routing.allocation.StartedRerouteAllocation;
import org.elasticsearch.cluster.routing.allocation.decider.AllocationDeciders;
import org.elasticsearch.cluster.routing.allocation.decider.Decision;
import org.elasticsearch.cluster.routing.allocation.decider.Decision.Type;
+import org.elasticsearch.common.settings.ClusterSettings;
import org.elasticsearch.common.component.AbstractComponent;
import org.elasticsearch.common.inject.Inject;
import org.elasticsearch.common.logging.ESLogger;
+import org.elasticsearch.common.settings.Setting;
import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.gateway.PriorityComparator;
-import org.elasticsearch.node.settings.NodeSettingsService;
import java.util.ArrayList;
import java.util.Collection;
@@ -72,42 +73,32 @@ import static org.elasticsearch.cluster.routing.ShardRoutingState.RELOCATING;
*/
public class BalancedShardsAllocator extends AbstractComponent implements ShardsAllocator {
- public static final String SETTING_THRESHOLD = "cluster.routing.allocation.balance.threshold";
- public static final String SETTING_INDEX_BALANCE_FACTOR = "cluster.routing.allocation.balance.index";
- public static final String SETTING_SHARD_BALANCE_FACTOR = "cluster.routing.allocation.balance.shard";
-
- private static final float DEFAULT_INDEX_BALANCE_FACTOR = 0.55f;
- private static final float DEFAULT_SHARD_BALANCE_FACTOR = 0.45f;
-
- class ApplySettings implements NodeSettingsService.Listener {
- @Override
- public void onRefreshSettings(Settings settings) {
- final float indexBalance = settings.getAsFloat(SETTING_INDEX_BALANCE_FACTOR, weightFunction.indexBalance);
- final float shardBalance = settings.getAsFloat(SETTING_SHARD_BALANCE_FACTOR, weightFunction.shardBalance);
- float threshold = settings.getAsFloat(SETTING_THRESHOLD, BalancedShardsAllocator.this.threshold);
- if (threshold <= 0.0f) {
- throw new IllegalArgumentException("threshold must be greater than 0.0f but was: " + threshold);
- }
- BalancedShardsAllocator.this.threshold = threshold;
- BalancedShardsAllocator.this.weightFunction = new WeightFunction(indexBalance, shardBalance);
- }
- }
-
- private volatile WeightFunction weightFunction = new WeightFunction(DEFAULT_INDEX_BALANCE_FACTOR, DEFAULT_SHARD_BALANCE_FACTOR);
-
- private volatile float threshold = 1.0f;
+ public static final Setting<Float> INDEX_BALANCE_FACTOR_SETTING = Setting.floatSetting("cluster.routing.allocation.balance.index", 0.55f, true, Setting.Scope.CLUSTER);
+ public static final Setting<Float> SHARD_BALANCE_FACTOR_SETTING = Setting.floatSetting("cluster.routing.allocation.balance.shard", 0.45f, true, Setting.Scope.CLUSTER);
+ public static final Setting<Float> THRESHOLD_SETTING = Setting.floatSetting("cluster.routing.allocation.balance.threshold", 1.0f, 0.0f, true, Setting.Scope.CLUSTER);
+ private volatile WeightFunction weightFunction;
+ private volatile float threshold;
public BalancedShardsAllocator(Settings settings) {
- this(settings, new NodeSettingsService(settings));
+ this(settings, new ClusterSettings(settings, ClusterSettings.BUILT_IN_CLUSTER_SETTINGS));
}
@Inject
- public BalancedShardsAllocator(Settings settings, NodeSettingsService nodeSettingsService) {
+ public BalancedShardsAllocator(Settings settings, ClusterSettings clusterSettings) {
super(settings);
- ApplySettings applySettings = new ApplySettings();
- applySettings.onRefreshSettings(settings);
- nodeSettingsService.addListener(applySettings);
+ setWeightFunction(INDEX_BALANCE_FACTOR_SETTING.get(settings), SHARD_BALANCE_FACTOR_SETTING.get(settings));
+ setThreshold(THRESHOLD_SETTING.get(settings));
+ clusterSettings.addSettingsUpdateConsumer(INDEX_BALANCE_FACTOR_SETTING, SHARD_BALANCE_FACTOR_SETTING, this::setWeightFunction);
+ clusterSettings.addSettingsUpdateConsumer(THRESHOLD_SETTING, this::setThreshold);
+ }
+
+ private void setWeightFunction(float indexBalance, float shardBalanceFactor) {
+ weightFunction = new WeightFunction(indexBalance, shardBalanceFactor);
+ }
+
+ private void setThreshold(float threshold) {
+ this.threshold = threshold;
}
@Override
diff --git a/core/src/main/java/org/elasticsearch/cluster/routing/allocation/decider/AwarenessAllocationDecider.java b/core/src/main/java/org/elasticsearch/cluster/routing/allocation/decider/AwarenessAllocationDecider.java
index 6f7bbac8ae..a66c8ddaef 100644
--- a/core/src/main/java/org/elasticsearch/cluster/routing/allocation/decider/AwarenessAllocationDecider.java
+++ b/core/src/main/java/org/elasticsearch/cluster/routing/allocation/decider/AwarenessAllocationDecider.java
@@ -24,10 +24,11 @@ import org.elasticsearch.cluster.metadata.IndexMetaData;
import org.elasticsearch.cluster.routing.RoutingNode;
import org.elasticsearch.cluster.routing.ShardRouting;
import org.elasticsearch.cluster.routing.allocation.RoutingAllocation;
+import org.elasticsearch.common.settings.ClusterSettings;
import org.elasticsearch.common.Strings;
import org.elasticsearch.common.inject.Inject;
+import org.elasticsearch.common.settings.Setting;
import org.elasticsearch.common.settings.Settings;
-import org.elasticsearch.node.settings.NodeSettingsService;
import java.util.HashMap;
import java.util.Map;
@@ -76,37 +77,12 @@ public class AwarenessAllocationDecider extends AllocationDecider {
public static final String NAME = "awareness";
- public static final String CLUSTER_ROUTING_ALLOCATION_AWARENESS_ATTRIBUTES = "cluster.routing.allocation.awareness.attributes";
- public static final String CLUSTER_ROUTING_ALLOCATION_AWARENESS_FORCE_GROUP = "cluster.routing.allocation.awareness.force.";
-
- class ApplySettings implements NodeSettingsService.Listener {
- @Override
- public void onRefreshSettings(Settings settings) {
- String[] awarenessAttributes = settings.getAsArray(CLUSTER_ROUTING_ALLOCATION_AWARENESS_ATTRIBUTES, null);
- if (awarenessAttributes == null && "".equals(settings.get(CLUSTER_ROUTING_ALLOCATION_AWARENESS_ATTRIBUTES, null))) {
- awarenessAttributes = Strings.EMPTY_ARRAY; // the empty string resets this
- }
- if (awarenessAttributes != null) {
- logger.info("updating [cluster.routing.allocation.awareness.attributes] from [{}] to [{}]", AwarenessAllocationDecider.this.awarenessAttributes, awarenessAttributes);
- AwarenessAllocationDecider.this.awarenessAttributes = awarenessAttributes;
- }
- Map<String, String[]> forcedAwarenessAttributes = new HashMap<>(AwarenessAllocationDecider.this.forcedAwarenessAttributes);
- Map<String, Settings> forceGroups = settings.getGroups(CLUSTER_ROUTING_ALLOCATION_AWARENESS_FORCE_GROUP);
- if (!forceGroups.isEmpty()) {
- for (Map.Entry<String, Settings> entry : forceGroups.entrySet()) {
- String[] aValues = entry.getValue().getAsArray("values");
- if (aValues.length > 0) {
- forcedAwarenessAttributes.put(entry.getKey(), aValues);
- }
- }
- }
- AwarenessAllocationDecider.this.forcedAwarenessAttributes = forcedAwarenessAttributes;
- }
- }
+ public static final Setting<String[]> CLUSTER_ROUTING_ALLOCATION_AWARENESS_ATTRIBUTE_SETTING = new Setting<>("cluster.routing.allocation.awareness.attributes", "", Strings::splitStringByCommaToArray , true, Setting.Scope.CLUSTER);
+ public static final Setting<Settings> CLUSTER_ROUTING_ALLOCATION_AWARENESS_FORCE_GROUP_SETTING = Setting.groupSetting("cluster.routing.allocation.awareness.force.", true, Setting.Scope.CLUSTER);
private String[] awarenessAttributes;
- private Map<String, String[]> forcedAwarenessAttributes;
+ private volatile Map<String, String[]> forcedAwarenessAttributes;
/**
* Creates a new {@link AwarenessAllocationDecider} instance
@@ -121,24 +97,28 @@ public class AwarenessAllocationDecider extends AllocationDecider {
* @param settings {@link Settings} to use
*/
public AwarenessAllocationDecider(Settings settings) {
- this(settings, new NodeSettingsService(settings));
+ this(settings, new ClusterSettings(settings, ClusterSettings.BUILT_IN_CLUSTER_SETTINGS));
}
@Inject
- public AwarenessAllocationDecider(Settings settings, NodeSettingsService nodeSettingsService) {
+ public AwarenessAllocationDecider(Settings settings, ClusterSettings clusterSettings) {
super(settings);
- this.awarenessAttributes = settings.getAsArray(CLUSTER_ROUTING_ALLOCATION_AWARENESS_ATTRIBUTES);
+ this.awarenessAttributes = CLUSTER_ROUTING_ALLOCATION_AWARENESS_ATTRIBUTE_SETTING.get(settings);
+ clusterSettings.addSettingsUpdateConsumer(CLUSTER_ROUTING_ALLOCATION_AWARENESS_ATTRIBUTE_SETTING, this::setAwarenessAttributes);
+ setForcedAwarenessAttributes(CLUSTER_ROUTING_ALLOCATION_AWARENESS_FORCE_GROUP_SETTING.get(settings));
+ clusterSettings.addSettingsUpdateConsumer(CLUSTER_ROUTING_ALLOCATION_AWARENESS_FORCE_GROUP_SETTING, this::setForcedAwarenessAttributes);
+ }
- forcedAwarenessAttributes = new HashMap<>();
- Map<String, Settings> forceGroups = settings.getGroups(CLUSTER_ROUTING_ALLOCATION_AWARENESS_FORCE_GROUP);
+ private void setForcedAwarenessAttributes(Settings forceSettings) {
+ Map<String, String[]> forcedAwarenessAttributes = new HashMap<>();
+ Map<String, Settings> forceGroups = forceSettings.getAsGroups();
for (Map.Entry<String, Settings> entry : forceGroups.entrySet()) {
String[] aValues = entry.getValue().getAsArray("values");
if (aValues.length > 0) {
forcedAwarenessAttributes.put(entry.getKey(), aValues);
}
}
-
- nodeSettingsService.addListener(new ApplySettings());
+ this.forcedAwarenessAttributes = forcedAwarenessAttributes;
}
/**
@@ -150,6 +130,10 @@ public class AwarenessAllocationDecider extends AllocationDecider {
return this.awarenessAttributes;
}
+ private void setAwarenessAttributes(String[] awarenessAttributes) {
+ this.awarenessAttributes = awarenessAttributes;
+ }
+
@Override
public Decision canAllocate(ShardRouting shardRouting, RoutingNode node, RoutingAllocation allocation) {
return underCapacity(shardRouting, node, allocation, true);
diff --git a/core/src/main/java/org/elasticsearch/cluster/routing/allocation/decider/ClusterRebalanceAllocationDecider.java b/core/src/main/java/org/elasticsearch/cluster/routing/allocation/decider/ClusterRebalanceAllocationDecider.java
index 7638c7aeee..b1be2a6fce 100644
--- a/core/src/main/java/org/elasticsearch/cluster/routing/allocation/decider/ClusterRebalanceAllocationDecider.java
+++ b/core/src/main/java/org/elasticsearch/cluster/routing/allocation/decider/ClusterRebalanceAllocationDecider.java
@@ -19,13 +19,12 @@
package org.elasticsearch.cluster.routing.allocation.decider;
-import org.elasticsearch.cluster.ClusterState;
import org.elasticsearch.cluster.routing.ShardRouting;
import org.elasticsearch.cluster.routing.allocation.RoutingAllocation;
-import org.elasticsearch.cluster.settings.Validator;
import org.elasticsearch.common.inject.Inject;
+import org.elasticsearch.common.settings.ClusterSettings;
+import org.elasticsearch.common.settings.Setting;
import org.elasticsearch.common.settings.Settings;
-import org.elasticsearch.node.settings.NodeSettingsService;
import java.util.Locale;
@@ -38,10 +37,10 @@ import java.util.Locale;
* <ul>
* <li><tt>indices_primaries_active</tt> - Re-balancing is allowed only once all
* primary shards on all indices are active.</li>
- *
+ *
* <li><tt>indices_all_active</tt> - Re-balancing is allowed only once all
* shards on all indices are active.</li>
- *
+ *
* <li><tt>always</tt> - Re-balancing is allowed once a shard replication group
* is active</li>
* </ul>
@@ -49,19 +48,10 @@ import java.util.Locale;
public class ClusterRebalanceAllocationDecider extends AllocationDecider {
public static final String NAME = "cluster_rebalance";
-
- public static final String CLUSTER_ROUTING_ALLOCATION_ALLOW_REBALANCE = "cluster.routing.allocation.allow_rebalance";
- public static final Validator ALLOCATION_ALLOW_REBALANCE_VALIDATOR = (setting, value, clusterState) -> {
- try {
- ClusterRebalanceType.parseString(value);
- return null;
- } catch (IllegalArgumentException e) {
- return "the value of " + setting + " must be one of: [always, indices_primaries_active, indices_all_active]";
- }
- };
+ public static final Setting<ClusterRebalanceType> CLUSTER_ROUTING_ALLOCATION_ALLOW_REBALANCE_SETTING = new Setting<>("cluster.routing.allocation.allow_rebalance", ClusterRebalanceType.INDICES_ALL_ACTIVE.name().toLowerCase(Locale.ROOT), ClusterRebalanceType::parseString, true, Setting.Scope.CLUSTER);
/**
- * An enum representation for the configured re-balance type.
+ * An enum representation for the configured re-balance type.
*/
public static enum ClusterRebalanceType {
/**
@@ -73,7 +63,7 @@ public class ClusterRebalanceAllocationDecider extends AllocationDecider {
*/
INDICES_PRIMARIES_ACTIVE,
/**
- * Re-balancing is allowed only once all shards on all indices are active.
+ * Re-balancing is allowed only once all shards on all indices are active.
*/
INDICES_ALL_ACTIVE;
@@ -85,48 +75,28 @@ public class ClusterRebalanceAllocationDecider extends AllocationDecider {
} else if ("indices_all_active".equalsIgnoreCase(typeString) || "indicesAllActive".equalsIgnoreCase(typeString)) {
return ClusterRebalanceType.INDICES_ALL_ACTIVE;
}
- throw new IllegalArgumentException("Illegal value for " + CLUSTER_ROUTING_ALLOCATION_ALLOW_REBALANCE + ": " + typeString);
+ throw new IllegalArgumentException("Illegal value for " + CLUSTER_ROUTING_ALLOCATION_ALLOW_REBALANCE_SETTING + ": " + typeString);
}
}
- private ClusterRebalanceType type;
+ private volatile ClusterRebalanceType type;
@Inject
- public ClusterRebalanceAllocationDecider(Settings settings, NodeSettingsService nodeSettingsService) {
+ public ClusterRebalanceAllocationDecider(Settings settings, ClusterSettings clusterSettings) {
super(settings);
- String allowRebalance = settings.get(CLUSTER_ROUTING_ALLOCATION_ALLOW_REBALANCE, "indices_all_active");
try {
- type = ClusterRebalanceType.parseString(allowRebalance);
+ type = CLUSTER_ROUTING_ALLOCATION_ALLOW_REBALANCE_SETTING.get(settings);
} catch (IllegalStateException e) {
- logger.warn("[{}] has a wrong value {}, defaulting to 'indices_all_active'", CLUSTER_ROUTING_ALLOCATION_ALLOW_REBALANCE, allowRebalance);
+ logger.warn("[{}] has a wrong value {}, defaulting to 'indices_all_active'", CLUSTER_ROUTING_ALLOCATION_ALLOW_REBALANCE_SETTING, CLUSTER_ROUTING_ALLOCATION_ALLOW_REBALANCE_SETTING.getRaw(settings));
type = ClusterRebalanceType.INDICES_ALL_ACTIVE;
}
- logger.debug("using [{}] with [{}]", CLUSTER_ROUTING_ALLOCATION_ALLOW_REBALANCE, type.toString().toLowerCase(Locale.ROOT));
+ logger.debug("using [{}] with [{}]", CLUSTER_ROUTING_ALLOCATION_ALLOW_REBALANCE_SETTING, type.toString().toLowerCase(Locale.ROOT));
- nodeSettingsService.addListener(new ApplySettings());
+ clusterSettings.addSettingsUpdateConsumer(CLUSTER_ROUTING_ALLOCATION_ALLOW_REBALANCE_SETTING, this::setType);
}
- class ApplySettings implements NodeSettingsService.Listener {
-
- @Override
- public void onRefreshSettings(Settings settings) {
- String newAllowRebalance = settings.get(CLUSTER_ROUTING_ALLOCATION_ALLOW_REBALANCE, null);
- if (newAllowRebalance != null) {
- ClusterRebalanceType newType = null;
- try {
- newType = ClusterRebalanceType.parseString(newAllowRebalance);
- } catch (IllegalArgumentException e) {
- // ignore
- }
-
- if (newType != null && newType != ClusterRebalanceAllocationDecider.this.type) {
- logger.info("updating [{}] from [{}] to [{}]", CLUSTER_ROUTING_ALLOCATION_ALLOW_REBALANCE,
- ClusterRebalanceAllocationDecider.this.type.toString().toLowerCase(Locale.ROOT),
- newType.toString().toLowerCase(Locale.ROOT));
- ClusterRebalanceAllocationDecider.this.type = newType;
- }
- }
- }
+ private void setType(ClusterRebalanceType type) {
+ this.type = type;
}
@Override
diff --git a/core/src/main/java/org/elasticsearch/cluster/routing/allocation/decider/ConcurrentRebalanceAllocationDecider.java b/core/src/main/java/org/elasticsearch/cluster/routing/allocation/decider/ConcurrentRebalanceAllocationDecider.java
index 6bd1b437ac..a9ad35fd52 100644
--- a/core/src/main/java/org/elasticsearch/cluster/routing/allocation/decider/ConcurrentRebalanceAllocationDecider.java
+++ b/core/src/main/java/org/elasticsearch/cluster/routing/allocation/decider/ConcurrentRebalanceAllocationDecider.java
@@ -22,8 +22,9 @@ package org.elasticsearch.cluster.routing.allocation.decider;
import org.elasticsearch.cluster.routing.ShardRouting;
import org.elasticsearch.cluster.routing.allocation.RoutingAllocation;
import org.elasticsearch.common.inject.Inject;
+import org.elasticsearch.common.settings.ClusterSettings;
+import org.elasticsearch.common.settings.Setting;
import org.elasticsearch.common.settings.Settings;
-import org.elasticsearch.node.settings.NodeSettingsService;
/**
* Similar to the {@link ClusterRebalanceAllocationDecider} this
@@ -41,27 +42,19 @@ public class ConcurrentRebalanceAllocationDecider extends AllocationDecider {
public static final String NAME = "concurrent_rebalance";
- public static final String CLUSTER_ROUTING_ALLOCATION_CLUSTER_CONCURRENT_REBALANCE = "cluster.routing.allocation.cluster_concurrent_rebalance";
-
- class ApplySettings implements NodeSettingsService.Listener {
- @Override
- public void onRefreshSettings(Settings settings) {
- int clusterConcurrentRebalance = settings.getAsInt(CLUSTER_ROUTING_ALLOCATION_CLUSTER_CONCURRENT_REBALANCE, ConcurrentRebalanceAllocationDecider.this.clusterConcurrentRebalance);
- if (clusterConcurrentRebalance != ConcurrentRebalanceAllocationDecider.this.clusterConcurrentRebalance) {
- logger.info("updating [cluster.routing.allocation.cluster_concurrent_rebalance] from [{}], to [{}]", ConcurrentRebalanceAllocationDecider.this.clusterConcurrentRebalance, clusterConcurrentRebalance);
- ConcurrentRebalanceAllocationDecider.this.clusterConcurrentRebalance = clusterConcurrentRebalance;
- }
- }
- }
-
+ public static final Setting<Integer> CLUSTER_ROUTING_ALLOCATION_CLUSTER_CONCURRENT_REBALANCE_SETTING = Setting.intSetting("cluster.routing.allocation.cluster_concurrent_rebalance", 2, -1, true, Setting.Scope.CLUSTER);
private volatile int clusterConcurrentRebalance;
@Inject
- public ConcurrentRebalanceAllocationDecider(Settings settings, NodeSettingsService nodeSettingsService) {
+ public ConcurrentRebalanceAllocationDecider(Settings settings, ClusterSettings clusterSettings) {
super(settings);
- this.clusterConcurrentRebalance = settings.getAsInt(CLUSTER_ROUTING_ALLOCATION_CLUSTER_CONCURRENT_REBALANCE, 2);
+ this.clusterConcurrentRebalance = CLUSTER_ROUTING_ALLOCATION_CLUSTER_CONCURRENT_REBALANCE_SETTING.get(settings);
logger.debug("using [cluster_concurrent_rebalance] with [{}]", clusterConcurrentRebalance);
- nodeSettingsService.addListener(new ApplySettings());
+ clusterSettings.addSettingsUpdateConsumer(CLUSTER_ROUTING_ALLOCATION_CLUSTER_CONCURRENT_REBALANCE_SETTING, this::setClusterConcurrentRebalance);
+ }
+
+ private void setClusterConcurrentRebalance(int concurrentRebalance) {
+ clusterConcurrentRebalance = concurrentRebalance;
}
@Override
diff --git a/core/src/main/java/org/elasticsearch/cluster/routing/allocation/decider/DiskThresholdDecider.java b/core/src/main/java/org/elasticsearch/cluster/routing/allocation/decider/DiskThresholdDecider.java
index a02c72c574..68fd7f3db9 100644
--- a/core/src/main/java/org/elasticsearch/cluster/routing/allocation/decider/DiskThresholdDecider.java
+++ b/core/src/main/java/org/elasticsearch/cluster/routing/allocation/decider/DiskThresholdDecider.java
@@ -22,26 +22,27 @@ package org.elasticsearch.cluster.routing.allocation.decider;
import com.carrotsearch.hppc.ObjectLookupContainer;
import com.carrotsearch.hppc.cursors.ObjectCursor;
import com.carrotsearch.hppc.cursors.ObjectObjectCursor;
-
import org.elasticsearch.ElasticsearchParseException;
import org.elasticsearch.client.Client;
import org.elasticsearch.cluster.ClusterInfo;
import org.elasticsearch.cluster.ClusterInfoService;
import org.elasticsearch.cluster.DiskUsage;
import org.elasticsearch.cluster.EmptyClusterInfoService;
+import org.elasticsearch.cluster.metadata.IndexMetaData;
import org.elasticsearch.cluster.routing.RoutingNode;
import org.elasticsearch.cluster.routing.ShardRouting;
import org.elasticsearch.cluster.routing.ShardRoutingState;
import org.elasticsearch.cluster.routing.allocation.RoutingAllocation;
+import org.elasticsearch.common.settings.ClusterSettings;
import org.elasticsearch.common.Strings;
import org.elasticsearch.common.collect.ImmutableOpenMap;
import org.elasticsearch.common.inject.Inject;
+import org.elasticsearch.common.settings.Setting;
import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.common.unit.ByteSizeValue;
import org.elasticsearch.common.unit.RatioValue;
import org.elasticsearch.common.unit.TimeValue;
import org.elasticsearch.common.util.set.Sets;
-import org.elasticsearch.node.settings.NodeSettingsService;
import java.util.Set;
@@ -80,53 +81,11 @@ public class DiskThresholdDecider extends AllocationDecider {
private volatile boolean enabled;
private volatile TimeValue rerouteInterval;
- public static final String CLUSTER_ROUTING_ALLOCATION_DISK_THRESHOLD_ENABLED = "cluster.routing.allocation.disk.threshold_enabled";
- public static final String CLUSTER_ROUTING_ALLOCATION_LOW_DISK_WATERMARK = "cluster.routing.allocation.disk.watermark.low";
- public static final String CLUSTER_ROUTING_ALLOCATION_HIGH_DISK_WATERMARK = "cluster.routing.allocation.disk.watermark.high";
- public static final String CLUSTER_ROUTING_ALLOCATION_INCLUDE_RELOCATIONS = "cluster.routing.allocation.disk.include_relocations";
- public static final String CLUSTER_ROUTING_ALLOCATION_REROUTE_INTERVAL = "cluster.routing.allocation.disk.reroute_interval";
-
- class ApplySettings implements NodeSettingsService.Listener {
- @Override
- public void onRefreshSettings(Settings settings) {
- String newLowWatermark = settings.get(CLUSTER_ROUTING_ALLOCATION_LOW_DISK_WATERMARK, null);
- String newHighWatermark = settings.get(CLUSTER_ROUTING_ALLOCATION_HIGH_DISK_WATERMARK, null);
- Boolean newRelocationsSetting = settings.getAsBoolean(CLUSTER_ROUTING_ALLOCATION_INCLUDE_RELOCATIONS, null);
- Boolean newEnableSetting = settings.getAsBoolean(CLUSTER_ROUTING_ALLOCATION_DISK_THRESHOLD_ENABLED, null);
- TimeValue newRerouteInterval = settings.getAsTime(CLUSTER_ROUTING_ALLOCATION_REROUTE_INTERVAL, null);
-
- if (newEnableSetting != null) {
- logger.info("updating [{}] from [{}] to [{}]", CLUSTER_ROUTING_ALLOCATION_DISK_THRESHOLD_ENABLED,
- DiskThresholdDecider.this.enabled, newEnableSetting);
- DiskThresholdDecider.this.enabled = newEnableSetting;
- }
- if (newRelocationsSetting != null) {
- logger.info("updating [{}] from [{}] to [{}]", CLUSTER_ROUTING_ALLOCATION_INCLUDE_RELOCATIONS,
- DiskThresholdDecider.this.includeRelocations, newRelocationsSetting);
- DiskThresholdDecider.this.includeRelocations = newRelocationsSetting;
- }
- if (newLowWatermark != null) {
- if (!validWatermarkSetting(newLowWatermark, CLUSTER_ROUTING_ALLOCATION_LOW_DISK_WATERMARK)) {
- throw new ElasticsearchParseException("unable to parse low watermark [{}]", newLowWatermark);
- }
- logger.info("updating [{}] to [{}]", CLUSTER_ROUTING_ALLOCATION_LOW_DISK_WATERMARK, newLowWatermark);
- DiskThresholdDecider.this.freeDiskThresholdLow = 100.0 - thresholdPercentageFromWatermark(newLowWatermark);
- DiskThresholdDecider.this.freeBytesThresholdLow = thresholdBytesFromWatermark(newLowWatermark, CLUSTER_ROUTING_ALLOCATION_LOW_DISK_WATERMARK);
- }
- if (newHighWatermark != null) {
- if (!validWatermarkSetting(newHighWatermark, CLUSTER_ROUTING_ALLOCATION_HIGH_DISK_WATERMARK)) {
- throw new ElasticsearchParseException("unable to parse high watermark [{}]", newHighWatermark);
- }
- logger.info("updating [{}] to [{}]", CLUSTER_ROUTING_ALLOCATION_HIGH_DISK_WATERMARK, newHighWatermark);
- DiskThresholdDecider.this.freeDiskThresholdHigh = 100.0 - thresholdPercentageFromWatermark(newHighWatermark);
- DiskThresholdDecider.this.freeBytesThresholdHigh = thresholdBytesFromWatermark(newHighWatermark, CLUSTER_ROUTING_ALLOCATION_HIGH_DISK_WATERMARK);
- }
- if (newRerouteInterval != null) {
- logger.info("updating [{}] to [{}]", CLUSTER_ROUTING_ALLOCATION_REROUTE_INTERVAL, newRerouteInterval);
- DiskThresholdDecider.this.rerouteInterval = newRerouteInterval;
- }
- }
- }
+ public static final Setting<Boolean> CLUSTER_ROUTING_ALLOCATION_DISK_THRESHOLD_ENABLED_SETTING = Setting.boolSetting("cluster.routing.allocation.disk.threshold_enabled", true, true, Setting.Scope.CLUSTER);
+ public static final Setting<String> CLUSTER_ROUTING_ALLOCATION_LOW_DISK_WATERMARK_SETTING = new Setting<>("cluster.routing.allocation.disk.watermark.low", "85%", (s) -> validWatermarkSetting(s, "cluster.routing.allocation.disk.watermark.low"), true, Setting.Scope.CLUSTER);
+ public static final Setting<String> CLUSTER_ROUTING_ALLOCATION_HIGH_DISK_WATERMARK_SETTING = new Setting<>("cluster.routing.allocation.disk.watermark.high", "90%", (s) -> validWatermarkSetting(s, "cluster.routing.allocation.disk.watermark.high"), true, Setting.Scope.CLUSTER);
+ public static final Setting<Boolean> CLUSTER_ROUTING_ALLOCATION_INCLUDE_RELOCATIONS_SETTING = Setting.boolSetting("cluster.routing.allocation.disk.include_relocations", true, true, Setting.Scope.CLUSTER);;
+ public static final Setting<TimeValue> CLUSTER_ROUTING_ALLOCATION_REROUTE_INTERVAL_SETTING = Setting.positiveTimeSetting("cluster.routing.allocation.disk.reroute_interval", TimeValue.timeValueSeconds(60), true, Setting.Scope.CLUSTER);
/**
* Listens for a node to go over the high watermark and kicks off an empty
@@ -231,38 +190,49 @@ public class DiskThresholdDecider extends AllocationDecider {
// It's okay the Client is null here, because the empty cluster info
// service will never actually call the listener where the client is
// needed. Also this constructor is only used for tests
- this(settings, new NodeSettingsService(settings), EmptyClusterInfoService.INSTANCE, null);
+ this(settings, new ClusterSettings(settings, ClusterSettings.BUILT_IN_CLUSTER_SETTINGS), EmptyClusterInfoService.INSTANCE, null);
}
@Inject
- public DiskThresholdDecider(Settings settings, NodeSettingsService nodeSettingsService, ClusterInfoService infoService, Client client) {
+ public DiskThresholdDecider(Settings settings, ClusterSettings clusterSettings, ClusterInfoService infoService, Client client) {
super(settings);
- String lowWatermark = settings.get(CLUSTER_ROUTING_ALLOCATION_LOW_DISK_WATERMARK, "85%");
- String highWatermark = settings.get(CLUSTER_ROUTING_ALLOCATION_HIGH_DISK_WATERMARK, "90%");
+ final String lowWatermark = CLUSTER_ROUTING_ALLOCATION_LOW_DISK_WATERMARK_SETTING.get(settings);
+ final String highWatermark = CLUSTER_ROUTING_ALLOCATION_HIGH_DISK_WATERMARK_SETTING.get(settings);
+ setHighWatermark(highWatermark);
+ setLowWatermark(lowWatermark);
+ this.includeRelocations = CLUSTER_ROUTING_ALLOCATION_INCLUDE_RELOCATIONS_SETTING.get(settings);
+ this.rerouteInterval = CLUSTER_ROUTING_ALLOCATION_REROUTE_INTERVAL_SETTING.get(settings);
+ this.enabled = CLUSTER_ROUTING_ALLOCATION_DISK_THRESHOLD_ENABLED_SETTING.get(settings);
+ clusterSettings.addSettingsUpdateConsumer(CLUSTER_ROUTING_ALLOCATION_LOW_DISK_WATERMARK_SETTING, this::setLowWatermark);
+ clusterSettings.addSettingsUpdateConsumer(CLUSTER_ROUTING_ALLOCATION_HIGH_DISK_WATERMARK_SETTING, this::setHighWatermark);
+ clusterSettings.addSettingsUpdateConsumer(CLUSTER_ROUTING_ALLOCATION_INCLUDE_RELOCATIONS_SETTING, this::setIncludeRelocations);
+ clusterSettings.addSettingsUpdateConsumer(CLUSTER_ROUTING_ALLOCATION_REROUTE_INTERVAL_SETTING, this::setRerouteInterval);
+ clusterSettings.addSettingsUpdateConsumer(CLUSTER_ROUTING_ALLOCATION_DISK_THRESHOLD_ENABLED_SETTING, this::setEnabled);
+ infoService.addListener(new DiskListener(client));
+ }
- if (!validWatermarkSetting(lowWatermark, CLUSTER_ROUTING_ALLOCATION_LOW_DISK_WATERMARK)) {
- throw new ElasticsearchParseException("unable to parse low watermark [{}]", lowWatermark);
- }
- if (!validWatermarkSetting(highWatermark, CLUSTER_ROUTING_ALLOCATION_HIGH_DISK_WATERMARK)) {
- throw new ElasticsearchParseException("unable to parse high watermark [{}]", highWatermark);
- }
- // Watermark is expressed in terms of used data, but we need "free" data watermark
- this.freeDiskThresholdLow = 100.0 - thresholdPercentageFromWatermark(lowWatermark);
- this.freeDiskThresholdHigh = 100.0 - thresholdPercentageFromWatermark(highWatermark);
+ private void setIncludeRelocations(boolean includeRelocations) {
+ this.includeRelocations = includeRelocations;
+ }
- this.freeBytesThresholdLow = thresholdBytesFromWatermark(lowWatermark, CLUSTER_ROUTING_ALLOCATION_LOW_DISK_WATERMARK);
- this.freeBytesThresholdHigh = thresholdBytesFromWatermark(highWatermark, CLUSTER_ROUTING_ALLOCATION_HIGH_DISK_WATERMARK);
- this.includeRelocations = settings.getAsBoolean(CLUSTER_ROUTING_ALLOCATION_INCLUDE_RELOCATIONS, true);
- this.rerouteInterval = settings.getAsTime(CLUSTER_ROUTING_ALLOCATION_REROUTE_INTERVAL, TimeValue.timeValueSeconds(60));
+ private void setRerouteInterval(TimeValue rerouteInterval) {
+ this.rerouteInterval = rerouteInterval;
+ }
- this.enabled = settings.getAsBoolean(CLUSTER_ROUTING_ALLOCATION_DISK_THRESHOLD_ENABLED, true);
- nodeSettingsService.addListener(new ApplySettings());
- infoService.addListener(new DiskListener(client));
+ private void setEnabled(boolean enabled) {
+ this.enabled = enabled;
}
- // For Testing
- ApplySettings newApplySettings() {
- return new ApplySettings();
+ private void setLowWatermark(String lowWatermark) {
+ // Watermark is expressed in terms of used data, but we need "free" data watermark
+ this.freeDiskThresholdLow = 100.0 - thresholdPercentageFromWatermark(lowWatermark);
+ this.freeBytesThresholdLow = thresholdBytesFromWatermark(lowWatermark, CLUSTER_ROUTING_ALLOCATION_LOW_DISK_WATERMARK_SETTING.getKey());
+ }
+
+ private void setHighWatermark(String highWatermark) {
+ // Watermark is expressed in terms of used data, but we need "free" data watermark
+ this.freeDiskThresholdHigh = 100.0 - thresholdPercentageFromWatermark(highWatermark);
+ this.freeBytesThresholdHigh = thresholdBytesFromWatermark(highWatermark, CLUSTER_ROUTING_ALLOCATION_LOW_DISK_WATERMARK_SETTING.getKey());
}
// For Testing
@@ -360,7 +330,8 @@ public class DiskThresholdDecider extends AllocationDecider {
}
// a flag for whether the primary shard has been previously allocated
- boolean primaryHasBeenAllocated = shardRouting.primary() && shardRouting.allocatedPostIndexCreate();
+ IndexMetaData indexMetaData = allocation.metaData().index(shardRouting.getIndex());
+ boolean primaryHasBeenAllocated = shardRouting.primary() && shardRouting.allocatedPostIndexCreate(indexMetaData);
// checks for exact byte comparisons
if (freeBytes < freeBytesThresholdLow.bytes()) {
@@ -580,20 +551,21 @@ public class DiskThresholdDecider extends AllocationDecider {
/**
* Checks if a watermark string is a valid percentage or byte size value,
- * returning true if valid, false if invalid.
+ * @return the watermark value given
*/
- public boolean validWatermarkSetting(String watermark, String settingName) {
+ public static String validWatermarkSetting(String watermark, String settingName) {
try {
RatioValue.parseRatioValue(watermark);
- return true;
} catch (ElasticsearchParseException e) {
try {
ByteSizeValue.parseBytesSizeValue(watermark, settingName);
- return true;
} catch (ElasticsearchParseException ex) {
- return false;
+ ex.addSuppressed(e);
+ throw ex;
}
}
+ return watermark;
+
}
private Decision earlyTerminate(RoutingAllocation allocation, ImmutableOpenMap<String, DiskUsage> usages) {
diff --git a/core/src/main/java/org/elasticsearch/cluster/routing/allocation/decider/EnableAllocationDecider.java b/core/src/main/java/org/elasticsearch/cluster/routing/allocation/decider/EnableAllocationDecider.java
index 0bbd493504..a31d36db34 100644
--- a/core/src/main/java/org/elasticsearch/cluster/routing/allocation/decider/EnableAllocationDecider.java
+++ b/core/src/main/java/org/elasticsearch/cluster/routing/allocation/decider/EnableAllocationDecider.java
@@ -19,18 +19,20 @@
package org.elasticsearch.cluster.routing.allocation.decider;
+import org.elasticsearch.cluster.metadata.IndexMetaData;
import org.elasticsearch.cluster.routing.RoutingNode;
import org.elasticsearch.cluster.routing.ShardRouting;
import org.elasticsearch.cluster.routing.allocation.RoutingAllocation;
import org.elasticsearch.common.inject.Inject;
+import org.elasticsearch.common.settings.ClusterSettings;
+import org.elasticsearch.common.settings.Setting;
import org.elasticsearch.common.settings.Settings;
-import org.elasticsearch.node.settings.NodeSettingsService;
import java.util.Locale;
/**
- * This allocation decider allows shard allocations / rebalancing via the cluster wide settings {@link #CLUSTER_ROUTING_ALLOCATION_ENABLE} /
- * {@link #CLUSTER_ROUTING_REBALANCE_ENABLE} and the per index setting {@link #INDEX_ROUTING_ALLOCATION_ENABLE} / {@link #INDEX_ROUTING_REBALANCE_ENABLE}.
+ * This allocation decider allows shard allocations / rebalancing via the cluster wide settings {@link #CLUSTER_ROUTING_ALLOCATION_ENABLE_SETTING} /
+ * {@link #CLUSTER_ROUTING_REBALANCE_ENABLE_SETTING} and the per index setting {@link #INDEX_ROUTING_ALLOCATION_ENABLE} / {@link #INDEX_ROUTING_REBALANCE_ENABLE}.
* The per index settings overrides the cluster wide setting.
*
* <p>
@@ -54,26 +56,34 @@ import java.util.Locale;
* @see Rebalance
* @see Allocation
*/
-public class EnableAllocationDecider extends AllocationDecider implements NodeSettingsService.Listener {
+public class EnableAllocationDecider extends AllocationDecider {
public static final String NAME = "enable";
- public static final String CLUSTER_ROUTING_ALLOCATION_ENABLE = "cluster.routing.allocation.enable";
- public static final String INDEX_ROUTING_ALLOCATION_ENABLE = "index.routing.allocation.enable";
+ public static final Setting<Allocation> CLUSTER_ROUTING_ALLOCATION_ENABLE_SETTING = new Setting<>("cluster.routing.allocation.enable", Allocation.ALL.name(), Allocation::parse, true, Setting.Scope.CLUSTER);
+ public static final String INDEX_ROUTING_ALLOCATION_ENABLE= "index.routing.allocation.enable";
- public static final String CLUSTER_ROUTING_REBALANCE_ENABLE = "cluster.routing.rebalance.enable";
+ public static final Setting<Rebalance> CLUSTER_ROUTING_REBALANCE_ENABLE_SETTING = new Setting<>("cluster.routing.rebalance.enable", Rebalance.ALL.name(), Rebalance::parse, true, Setting.Scope.CLUSTER);
public static final String INDEX_ROUTING_REBALANCE_ENABLE = "index.routing.rebalance.enable";
private volatile Rebalance enableRebalance;
private volatile Allocation enableAllocation;
-
@Inject
- public EnableAllocationDecider(Settings settings, NodeSettingsService nodeSettingsService) {
+ public EnableAllocationDecider(Settings settings, ClusterSettings clusterSettings) {
super(settings);
- this.enableAllocation = Allocation.parse(settings.get(CLUSTER_ROUTING_ALLOCATION_ENABLE, Allocation.ALL.name()));
- this.enableRebalance = Rebalance.parse(settings.get(CLUSTER_ROUTING_REBALANCE_ENABLE, Rebalance.ALL.name()));
- nodeSettingsService.addListener(this);
+ this.enableAllocation = CLUSTER_ROUTING_ALLOCATION_ENABLE_SETTING.get(settings);
+ this.enableRebalance = CLUSTER_ROUTING_REBALANCE_ENABLE_SETTING.get(settings);
+ clusterSettings.addSettingsUpdateConsumer(CLUSTER_ROUTING_ALLOCATION_ENABLE_SETTING, this::setEnableAllocation);
+ clusterSettings.addSettingsUpdateConsumer(CLUSTER_ROUTING_REBALANCE_ENABLE_SETTING, this::setEnableRebalance);
+ }
+
+ public void setEnableRebalance(Rebalance enableRebalance) {
+ this.enableRebalance = enableRebalance;
+ }
+
+ public void setEnableAllocation(Allocation enableAllocation) {
+ this.enableAllocation = enableAllocation;
}
@Override
@@ -82,8 +92,8 @@ public class EnableAllocationDecider extends AllocationDecider implements NodeSe
return allocation.decision(Decision.YES, NAME, "allocation disabling is ignored");
}
- Settings indexSettings = allocation.routingNodes().metaData().index(shardRouting.index()).getSettings();
- String enableIndexValue = indexSettings.get(INDEX_ROUTING_ALLOCATION_ENABLE);
+ IndexMetaData indexMetaData = allocation.metaData().index(shardRouting.getIndex());
+ String enableIndexValue = indexMetaData.getSettings().get(INDEX_ROUTING_ALLOCATION_ENABLE);
final Allocation enable;
if (enableIndexValue != null) {
enable = Allocation.parse(enableIndexValue);
@@ -96,7 +106,7 @@ public class EnableAllocationDecider extends AllocationDecider implements NodeSe
case NONE:
return allocation.decision(Decision.NO, NAME, "no allocations are allowed");
case NEW_PRIMARIES:
- if (shardRouting.primary() && shardRouting.allocatedPostIndexCreate() == false) {
+ if (shardRouting.primary() && shardRouting.allocatedPostIndexCreate(indexMetaData) == false) {
return allocation.decision(Decision.YES, NAME, "new primary allocations are allowed");
} else {
return allocation.decision(Decision.NO, NAME, "non-new primary allocations are forbidden");
@@ -148,25 +158,9 @@ public class EnableAllocationDecider extends AllocationDecider implements NodeSe
}
}
- @Override
- public void onRefreshSettings(Settings settings) {
- final Allocation enable = Allocation.parse(settings.get(CLUSTER_ROUTING_ALLOCATION_ENABLE, this.enableAllocation.name()));
- if (enable != this.enableAllocation) {
- logger.info("updating [{}] from [{}] to [{}]", CLUSTER_ROUTING_ALLOCATION_ENABLE, this.enableAllocation, enable);
- EnableAllocationDecider.this.enableAllocation = enable;
- }
-
- final Rebalance enableRebalance = Rebalance.parse(settings.get(CLUSTER_ROUTING_REBALANCE_ENABLE, this.enableRebalance.name()));
- if (enableRebalance != this.enableRebalance) {
- logger.info("updating [{}] from [{}] to [{}]", CLUSTER_ROUTING_REBALANCE_ENABLE, this.enableRebalance, enableRebalance);
- EnableAllocationDecider.this.enableRebalance = enableRebalance;
- }
-
- }
-
/**
* Allocation values or rather their string representation to be used used with
- * {@link EnableAllocationDecider#CLUSTER_ROUTING_ALLOCATION_ENABLE} / {@link EnableAllocationDecider#INDEX_ROUTING_ALLOCATION_ENABLE}
+ * {@link EnableAllocationDecider#CLUSTER_ROUTING_ALLOCATION_ENABLE_SETTING} / {@link EnableAllocationDecider#INDEX_ROUTING_ALLOCATION_ENABLE}
* via cluster / index settings.
*/
public enum Allocation {
@@ -192,7 +186,7 @@ public class EnableAllocationDecider extends AllocationDecider implements NodeSe
/**
* Rebalance values or rather their string representation to be used used with
- * {@link EnableAllocationDecider#CLUSTER_ROUTING_REBALANCE_ENABLE} / {@link EnableAllocationDecider#INDEX_ROUTING_REBALANCE_ENABLE}
+ * {@link EnableAllocationDecider#CLUSTER_ROUTING_REBALANCE_ENABLE_SETTING} / {@link EnableAllocationDecider#INDEX_ROUTING_REBALANCE_ENABLE}
* via cluster / index settings.
*/
public enum Rebalance {
diff --git a/core/src/main/java/org/elasticsearch/cluster/routing/allocation/decider/FilterAllocationDecider.java b/core/src/main/java/org/elasticsearch/cluster/routing/allocation/decider/FilterAllocationDecider.java
index e0e2caaf04..4c451e7fff 100644
--- a/core/src/main/java/org/elasticsearch/cluster/routing/allocation/decider/FilterAllocationDecider.java
+++ b/core/src/main/java/org/elasticsearch/cluster/routing/allocation/decider/FilterAllocationDecider.java
@@ -25,10 +25,9 @@ import org.elasticsearch.cluster.routing.RoutingNode;
import org.elasticsearch.cluster.routing.ShardRouting;
import org.elasticsearch.cluster.routing.allocation.RoutingAllocation;
import org.elasticsearch.common.inject.Inject;
+import org.elasticsearch.common.settings.ClusterSettings;
+import org.elasticsearch.common.settings.Setting;
import org.elasticsearch.common.settings.Settings;
-import org.elasticsearch.node.settings.NodeSettingsService;
-
-import java.util.Map;
import static org.elasticsearch.cluster.node.DiscoveryNodeFilters.OpType.AND;
import static org.elasticsearch.cluster.node.DiscoveryNodeFilters.OpType.OR;
@@ -65,36 +64,23 @@ public class FilterAllocationDecider extends AllocationDecider {
public static final String INDEX_ROUTING_INCLUDE_GROUP = "index.routing.allocation.include.";
public static final String INDEX_ROUTING_EXCLUDE_GROUP = "index.routing.allocation.exclude.";
- public static final String CLUSTER_ROUTING_REQUIRE_GROUP = "cluster.routing.allocation.require.";
- public static final String CLUSTER_ROUTING_INCLUDE_GROUP = "cluster.routing.allocation.include.";
- public static final String CLUSTER_ROUTING_EXCLUDE_GROUP = "cluster.routing.allocation.exclude.";
+ public static final Setting<Settings> CLUSTER_ROUTING_REQUIRE_GROUP_SETTING = Setting.groupSetting("cluster.routing.allocation.require.", true, Setting.Scope.CLUSTER);
+ public static final Setting<Settings> CLUSTER_ROUTING_INCLUDE_GROUP_SETTING = Setting.groupSetting("cluster.routing.allocation.include.", true, Setting.Scope.CLUSTER);
+ public static final Setting<Settings> CLUSTER_ROUTING_EXCLUDE_GROUP_SETTING = Setting.groupSetting("cluster.routing.allocation.exclude.", true, Setting.Scope.CLUSTER);
private volatile DiscoveryNodeFilters clusterRequireFilters;
private volatile DiscoveryNodeFilters clusterIncludeFilters;
private volatile DiscoveryNodeFilters clusterExcludeFilters;
@Inject
- public FilterAllocationDecider(Settings settings, NodeSettingsService nodeSettingsService) {
+ public FilterAllocationDecider(Settings settings, ClusterSettings clusterSettings) {
super(settings);
- Map<String, String> requireMap = settings.getByPrefix(CLUSTER_ROUTING_REQUIRE_GROUP).getAsMap();
- if (requireMap.isEmpty()) {
- clusterRequireFilters = null;
- } else {
- clusterRequireFilters = DiscoveryNodeFilters.buildFromKeyValue(AND, requireMap);
- }
- Map<String, String> includeMap = settings.getByPrefix(CLUSTER_ROUTING_INCLUDE_GROUP).getAsMap();
- if (includeMap.isEmpty()) {
- clusterIncludeFilters = null;
- } else {
- clusterIncludeFilters = DiscoveryNodeFilters.buildFromKeyValue(OR, includeMap);
- }
- Map<String, String> excludeMap = settings.getByPrefix(CLUSTER_ROUTING_EXCLUDE_GROUP).getAsMap();
- if (excludeMap.isEmpty()) {
- clusterExcludeFilters = null;
- } else {
- clusterExcludeFilters = DiscoveryNodeFilters.buildFromKeyValue(OR, excludeMap);
- }
- nodeSettingsService.addListener(new ApplySettings());
+ setClusterRequireFilters(CLUSTER_ROUTING_REQUIRE_GROUP_SETTING.get(settings));
+ setClusterExcludeFilters(CLUSTER_ROUTING_EXCLUDE_GROUP_SETTING.get(settings));
+ setClusterIncludeFilters(CLUSTER_ROUTING_INCLUDE_GROUP_SETTING.get(settings));
+ clusterSettings.addSettingsUpdateConsumer(CLUSTER_ROUTING_REQUIRE_GROUP_SETTING, this::setClusterRequireFilters);
+ clusterSettings.addSettingsUpdateConsumer(CLUSTER_ROUTING_EXCLUDE_GROUP_SETTING, this::setClusterExcludeFilters);
+ clusterSettings.addSettingsUpdateConsumer(CLUSTER_ROUTING_INCLUDE_GROUP_SETTING, this::setClusterIncludeFilters);
}
@Override
@@ -144,21 +130,13 @@ public class FilterAllocationDecider extends AllocationDecider {
return allocation.decision(Decision.YES, NAME, "node passes include/exclude/require filters");
}
- class ApplySettings implements NodeSettingsService.Listener {
- @Override
- public void onRefreshSettings(Settings settings) {
- Map<String, String> requireMap = settings.getByPrefix(CLUSTER_ROUTING_REQUIRE_GROUP).getAsMap();
- if (!requireMap.isEmpty()) {
- clusterRequireFilters = DiscoveryNodeFilters.buildFromKeyValue(AND, requireMap);
- }
- Map<String, String> includeMap = settings.getByPrefix(CLUSTER_ROUTING_INCLUDE_GROUP).getAsMap();
- if (!includeMap.isEmpty()) {
- clusterIncludeFilters = DiscoveryNodeFilters.buildFromKeyValue(OR, includeMap);
- }
- Map<String, String> excludeMap = settings.getByPrefix(CLUSTER_ROUTING_EXCLUDE_GROUP).getAsMap();
- if (!excludeMap.isEmpty()) {
- clusterExcludeFilters = DiscoveryNodeFilters.buildFromKeyValue(OR, excludeMap);
- }
- }
+ private void setClusterRequireFilters(Settings settings) {
+ clusterRequireFilters = DiscoveryNodeFilters.buildFromKeyValue(AND, settings.getAsMap());
+ }
+ private void setClusterIncludeFilters(Settings settings) {
+ clusterIncludeFilters = DiscoveryNodeFilters.buildFromKeyValue(OR, settings.getAsMap());
+ }
+ private void setClusterExcludeFilters(Settings settings) {
+ clusterExcludeFilters = DiscoveryNodeFilters.buildFromKeyValue(OR, settings.getAsMap());
}
}
diff --git a/core/src/main/java/org/elasticsearch/cluster/routing/allocation/decider/ShardsLimitAllocationDecider.java b/core/src/main/java/org/elasticsearch/cluster/routing/allocation/decider/ShardsLimitAllocationDecider.java
index 3d68ed50d2..9149d04cf6 100644
--- a/core/src/main/java/org/elasticsearch/cluster/routing/allocation/decider/ShardsLimitAllocationDecider.java
+++ b/core/src/main/java/org/elasticsearch/cluster/routing/allocation/decider/ShardsLimitAllocationDecider.java
@@ -24,16 +24,16 @@ import org.elasticsearch.cluster.routing.RoutingNode;
import org.elasticsearch.cluster.routing.ShardRouting;
import org.elasticsearch.cluster.routing.ShardRoutingState;
import org.elasticsearch.cluster.routing.allocation.RoutingAllocation;
-import org.elasticsearch.common.Nullable;
import org.elasticsearch.common.inject.Inject;
+import org.elasticsearch.common.settings.ClusterSettings;
+import org.elasticsearch.common.settings.Setting;
import org.elasticsearch.common.settings.Settings;
-import org.elasticsearch.node.settings.NodeSettingsService;
/**
* This {@link AllocationDecider} limits the number of shards per node on a per
* index or node-wide basis. The allocator prevents a single node to hold more
* than {@value #INDEX_TOTAL_SHARDS_PER_NODE} per index and
- * {@value #CLUSTER_TOTAL_SHARDS_PER_NODE} globally during the allocation
+ * <tt>cluster.routing.allocation.total_shards_per_node</tt> globally during the allocation
* process. The limits of this decider can be changed in real-time via a the
* index settings API.
* <p>
@@ -64,26 +64,18 @@ public class ShardsLimitAllocationDecider extends AllocationDecider {
* Controls the maximum number of shards per node on a global level.
* Negative values are interpreted as unlimited.
*/
- public static final String CLUSTER_TOTAL_SHARDS_PER_NODE = "cluster.routing.allocation.total_shards_per_node";
+ public static final Setting<Integer> CLUSTER_TOTAL_SHARDS_PER_NODE_SETTING = Setting.intSetting("cluster.routing.allocation.total_shards_per_node", -1, true, Setting.Scope.CLUSTER);
- class ApplySettings implements NodeSettingsService.Listener {
- @Override
- public void onRefreshSettings(Settings settings) {
- Integer newClusterLimit = settings.getAsInt(CLUSTER_TOTAL_SHARDS_PER_NODE, null);
-
- if (newClusterLimit != null) {
- logger.info("updating [{}] from [{}] to [{}]", CLUSTER_TOTAL_SHARDS_PER_NODE,
- ShardsLimitAllocationDecider.this.clusterShardLimit, newClusterLimit);
- ShardsLimitAllocationDecider.this.clusterShardLimit = newClusterLimit;
- }
- }
- }
@Inject
- public ShardsLimitAllocationDecider(Settings settings, NodeSettingsService nodeSettingsService) {
+ public ShardsLimitAllocationDecider(Settings settings, ClusterSettings clusterSettings) {
super(settings);
- this.clusterShardLimit = settings.getAsInt(CLUSTER_TOTAL_SHARDS_PER_NODE, -1);
- nodeSettingsService.addListener(new ApplySettings());
+ this.clusterShardLimit = CLUSTER_TOTAL_SHARDS_PER_NODE_SETTING.get(settings);
+ clusterSettings.addSettingsUpdateConsumer(CLUSTER_TOTAL_SHARDS_PER_NODE_SETTING, this::setClusterShardLimit);
+ }
+
+ private void setClusterShardLimit(int clusterShardLimit) {
+ this.clusterShardLimit = clusterShardLimit;
}
@Override
diff --git a/core/src/main/java/org/elasticsearch/cluster/routing/allocation/decider/SnapshotInProgressAllocationDecider.java b/core/src/main/java/org/elasticsearch/cluster/routing/allocation/decider/SnapshotInProgressAllocationDecider.java
index 37b9f9f461..597f0add8d 100644
--- a/core/src/main/java/org/elasticsearch/cluster/routing/allocation/decider/SnapshotInProgressAllocationDecider.java
+++ b/core/src/main/java/org/elasticsearch/cluster/routing/allocation/decider/SnapshotInProgressAllocationDecider.java
@@ -23,9 +23,10 @@ import org.elasticsearch.cluster.SnapshotsInProgress;
import org.elasticsearch.cluster.routing.RoutingNode;
import org.elasticsearch.cluster.routing.ShardRouting;
import org.elasticsearch.cluster.routing.allocation.RoutingAllocation;
+import org.elasticsearch.common.settings.ClusterSettings;
import org.elasticsearch.common.inject.Inject;
+import org.elasticsearch.common.settings.Setting;
import org.elasticsearch.common.settings.Settings;
-import org.elasticsearch.node.settings.NodeSettingsService;
/**
* This {@link org.elasticsearch.cluster.routing.allocation.decider.AllocationDecider} prevents shards that
@@ -38,18 +39,7 @@ public class SnapshotInProgressAllocationDecider extends AllocationDecider {
/**
* Disables relocation of shards that are currently being snapshotted.
*/
- public static final String CLUSTER_ROUTING_ALLOCATION_SNAPSHOT_RELOCATION_ENABLED = "cluster.routing.allocation.snapshot.relocation_enabled";
-
- class ApplySettings implements NodeSettingsService.Listener {
- @Override
- public void onRefreshSettings(Settings settings) {
- boolean newEnableRelocation = settings.getAsBoolean(CLUSTER_ROUTING_ALLOCATION_SNAPSHOT_RELOCATION_ENABLED, enableRelocation);
- if (newEnableRelocation != enableRelocation) {
- logger.info("updating [{}] from [{}], to [{}]", CLUSTER_ROUTING_ALLOCATION_SNAPSHOT_RELOCATION_ENABLED, enableRelocation, newEnableRelocation);
- enableRelocation = newEnableRelocation;
- }
- }
- }
+ public static final Setting<Boolean> CLUSTER_ROUTING_ALLOCATION_SNAPSHOT_RELOCATION_ENABLED_SETTING = Setting.boolSetting("cluster.routing.allocation.snapshot.relocation_enabled", false, true, Setting.Scope.CLUSTER);
private volatile boolean enableRelocation = false;
@@ -66,14 +56,18 @@ public class SnapshotInProgressAllocationDecider extends AllocationDecider {
* @param settings {@link org.elasticsearch.common.settings.Settings} to use
*/
public SnapshotInProgressAllocationDecider(Settings settings) {
- this(settings, new NodeSettingsService(settings));
+ this(settings, new ClusterSettings(settings, ClusterSettings.BUILT_IN_CLUSTER_SETTINGS));
}
@Inject
- public SnapshotInProgressAllocationDecider(Settings settings, NodeSettingsService nodeSettingsService) {
+ public SnapshotInProgressAllocationDecider(Settings settings, ClusterSettings clusterSettings) {
super(settings);
- enableRelocation = settings.getAsBoolean(CLUSTER_ROUTING_ALLOCATION_SNAPSHOT_RELOCATION_ENABLED, enableRelocation);
- nodeSettingsService.addListener(new ApplySettings());
+ enableRelocation = CLUSTER_ROUTING_ALLOCATION_SNAPSHOT_RELOCATION_ENABLED_SETTING.get(settings);
+ clusterSettings.addSettingsUpdateConsumer(CLUSTER_ROUTING_ALLOCATION_SNAPSHOT_RELOCATION_ENABLED_SETTING, this::setEnableRelocation);
+ }
+
+ private void setEnableRelocation(boolean enableRelocation) {
+ this.enableRelocation = enableRelocation;
}
/**
diff --git a/core/src/main/java/org/elasticsearch/cluster/routing/allocation/decider/ThrottlingAllocationDecider.java b/core/src/main/java/org/elasticsearch/cluster/routing/allocation/decider/ThrottlingAllocationDecider.java
index ed6814d83a..b97e613867 100644
--- a/core/src/main/java/org/elasticsearch/cluster/routing/allocation/decider/ThrottlingAllocationDecider.java
+++ b/core/src/main/java/org/elasticsearch/cluster/routing/allocation/decider/ThrottlingAllocationDecider.java
@@ -19,13 +19,14 @@
package org.elasticsearch.cluster.routing.allocation.decider;
+import org.elasticsearch.ElasticsearchParseException;
import org.elasticsearch.cluster.routing.RoutingNode;
import org.elasticsearch.cluster.routing.ShardRouting;
-import org.elasticsearch.cluster.routing.ShardRoutingState;
import org.elasticsearch.cluster.routing.allocation.RoutingAllocation;
import org.elasticsearch.common.inject.Inject;
+import org.elasticsearch.common.settings.ClusterSettings;
+import org.elasticsearch.common.settings.Setting;
import org.elasticsearch.common.settings.Settings;
-import org.elasticsearch.node.settings.NodeSettingsService;
/**
* {@link ThrottlingAllocationDecider} controls the recovery process per node in
@@ -47,27 +48,33 @@ import org.elasticsearch.node.settings.NodeSettingsService;
*/
public class ThrottlingAllocationDecider extends AllocationDecider {
+ public static final int DEFAULT_CLUSTER_ROUTING_ALLOCATION_NODE_CONCURRENT_RECOVERIES = 2;
+ public static final int DEFAULT_CLUSTER_ROUTING_ALLOCATION_NODE_INITIAL_PRIMARIES_RECOVERIES = 4;
public static final String NAME = "throttling";
-
- public static final String CLUSTER_ROUTING_ALLOCATION_NODE_INITIAL_PRIMARIES_RECOVERIES = "cluster.routing.allocation.node_initial_primaries_recoveries";
- public static final String CLUSTER_ROUTING_ALLOCATION_NODE_CONCURRENT_RECOVERIES = "cluster.routing.allocation.node_concurrent_recoveries";
public static final String CLUSTER_ROUTING_ALLOCATION_CONCURRENT_RECOVERIES = "cluster.routing.allocation.concurrent_recoveries";
- public static final int DEFAULT_CLUSTER_ROUTING_ALLOCATION_NODE_CONCURRENT_RECOVERIES = 2;
- public static final int DEFAULT_CLUSTER_ROUTING_ALLOCATION_NODE_INITIAL_PRIMARIES_RECOVERIES = 4;
+ public static final Setting<Integer> CLUSTER_ROUTING_ALLOCATION_NODE_INITIAL_PRIMARIES_RECOVERIES_SETTING = Setting.intSetting("cluster.routing.allocation.node_initial_primaries_recoveries", DEFAULT_CLUSTER_ROUTING_ALLOCATION_NODE_INITIAL_PRIMARIES_RECOVERIES, 0, true, Setting.Scope.CLUSTER);
+ public static final Setting<Integer> CLUSTER_ROUTING_ALLOCATION_NODE_CONCURRENT_RECOVERIES_SETTING = new Setting<>("cluster.routing.allocation.node_concurrent_recoveries", (s) -> s.get(CLUSTER_ROUTING_ALLOCATION_CONCURRENT_RECOVERIES,Integer.toString(DEFAULT_CLUSTER_ROUTING_ALLOCATION_NODE_CONCURRENT_RECOVERIES)), (s) -> Setting.parseInt(s, 0, "cluster.routing.allocation.node_concurrent_recoveries"), true, Setting.Scope.CLUSTER);
private volatile int primariesInitialRecoveries;
private volatile int concurrentRecoveries;
@Inject
- public ThrottlingAllocationDecider(Settings settings, NodeSettingsService nodeSettingsService) {
+ public ThrottlingAllocationDecider(Settings settings, ClusterSettings clusterSettings) {
super(settings);
-
- this.primariesInitialRecoveries = settings.getAsInt(CLUSTER_ROUTING_ALLOCATION_NODE_INITIAL_PRIMARIES_RECOVERIES, DEFAULT_CLUSTER_ROUTING_ALLOCATION_NODE_INITIAL_PRIMARIES_RECOVERIES);
- this.concurrentRecoveries = settings.getAsInt(CLUSTER_ROUTING_ALLOCATION_CONCURRENT_RECOVERIES, settings.getAsInt(CLUSTER_ROUTING_ALLOCATION_NODE_CONCURRENT_RECOVERIES, DEFAULT_CLUSTER_ROUTING_ALLOCATION_NODE_CONCURRENT_RECOVERIES));
+ this.primariesInitialRecoveries = CLUSTER_ROUTING_ALLOCATION_NODE_INITIAL_PRIMARIES_RECOVERIES_SETTING.get(settings);
+ this.concurrentRecoveries = CLUSTER_ROUTING_ALLOCATION_NODE_CONCURRENT_RECOVERIES_SETTING.get(settings);
logger.debug("using node_concurrent_recoveries [{}], node_initial_primaries_recoveries [{}]", concurrentRecoveries, primariesInitialRecoveries);
+ clusterSettings.addSettingsUpdateConsumer(CLUSTER_ROUTING_ALLOCATION_NODE_INITIAL_PRIMARIES_RECOVERIES_SETTING, this::setPrimariesInitialRecoveries);
+ clusterSettings.addSettingsUpdateConsumer(CLUSTER_ROUTING_ALLOCATION_NODE_CONCURRENT_RECOVERIES_SETTING, this::setConcurrentRecoveries);
+ }
- nodeSettingsService.addListener(new ApplySettings());
+ private void setConcurrentRecoveries(int concurrentRecoveries) {
+ this.concurrentRecoveries = concurrentRecoveries;
+ }
+
+ private void setPrimariesInitialRecoveries(int primariesInitialRecoveries) {
+ this.primariesInitialRecoveries = primariesInitialRecoveries;
}
@Override
@@ -115,21 +122,4 @@ public class ThrottlingAllocationDecider extends AllocationDecider {
return allocation.decision(Decision.YES, NAME, "below shard recovery limit of [%d]", concurrentRecoveries);
}
}
-
- class ApplySettings implements NodeSettingsService.Listener {
- @Override
- public void onRefreshSettings(Settings settings) {
- int primariesInitialRecoveries = settings.getAsInt(CLUSTER_ROUTING_ALLOCATION_NODE_INITIAL_PRIMARIES_RECOVERIES, ThrottlingAllocationDecider.this.primariesInitialRecoveries);
- if (primariesInitialRecoveries != ThrottlingAllocationDecider.this.primariesInitialRecoveries) {
- logger.info("updating [cluster.routing.allocation.node_initial_primaries_recoveries] from [{}] to [{}]", ThrottlingAllocationDecider.this.primariesInitialRecoveries, primariesInitialRecoveries);
- ThrottlingAllocationDecider.this.primariesInitialRecoveries = primariesInitialRecoveries;
- }
-
- int concurrentRecoveries = settings.getAsInt(CLUSTER_ROUTING_ALLOCATION_NODE_CONCURRENT_RECOVERIES, ThrottlingAllocationDecider.this.concurrentRecoveries);
- if (concurrentRecoveries != ThrottlingAllocationDecider.this.concurrentRecoveries) {
- logger.info("updating [cluster.routing.allocation.node_concurrent_recoveries] from [{}] to [{}]", ThrottlingAllocationDecider.this.concurrentRecoveries, concurrentRecoveries);
- ThrottlingAllocationDecider.this.concurrentRecoveries = concurrentRecoveries;
- }
- }
- }
}
diff --git a/core/src/main/java/org/elasticsearch/cluster/service/InternalClusterService.java b/core/src/main/java/org/elasticsearch/cluster/service/InternalClusterService.java
index d4b1586184..5fc013b663 100644
--- a/core/src/main/java/org/elasticsearch/cluster/service/InternalClusterService.java
+++ b/core/src/main/java/org/elasticsearch/cluster/service/InternalClusterService.java
@@ -20,8 +20,19 @@
package org.elasticsearch.cluster.service;
import org.elasticsearch.Version;
-import org.elasticsearch.cluster.*;
+import org.elasticsearch.cluster.AckedClusterStateTaskListener;
+import org.elasticsearch.cluster.ClusterChangedEvent;
+import org.elasticsearch.cluster.ClusterName;
+import org.elasticsearch.cluster.ClusterService;
+import org.elasticsearch.cluster.ClusterState;
import org.elasticsearch.cluster.ClusterState.Builder;
+import org.elasticsearch.cluster.ClusterStateListener;
+import org.elasticsearch.cluster.ClusterStateTaskConfig;
+import org.elasticsearch.cluster.ClusterStateTaskExecutor;
+import org.elasticsearch.cluster.ClusterStateTaskListener;
+import org.elasticsearch.cluster.ClusterStateUpdateTask;
+import org.elasticsearch.cluster.LocalNodeMasterListener;
+import org.elasticsearch.cluster.TimeoutClusterStateListener;
import org.elasticsearch.cluster.block.ClusterBlock;
import org.elasticsearch.cluster.block.ClusterBlocks;
import org.elasticsearch.cluster.metadata.MetaData;
@@ -38,20 +49,39 @@ import org.elasticsearch.common.component.AbstractLifecycleComponent;
import org.elasticsearch.common.inject.Inject;
import org.elasticsearch.common.logging.ESLogger;
import org.elasticsearch.common.logging.Loggers;
+import org.elasticsearch.common.settings.ClusterSettings;
+import org.elasticsearch.common.settings.Setting;
import org.elasticsearch.common.settings.Settings;
-import org.elasticsearch.common.text.StringText;
+import org.elasticsearch.common.text.Text;
import org.elasticsearch.common.transport.TransportAddress;
import org.elasticsearch.common.unit.TimeValue;
-import org.elasticsearch.common.util.concurrent.*;
+import org.elasticsearch.common.util.concurrent.ConcurrentCollections;
+import org.elasticsearch.common.util.concurrent.CountDown;
+import org.elasticsearch.common.util.concurrent.EsExecutors;
+import org.elasticsearch.common.util.concurrent.EsRejectedExecutionException;
+import org.elasticsearch.common.util.concurrent.FutureUtils;
+import org.elasticsearch.common.util.concurrent.PrioritizedEsThreadPoolExecutor;
+import org.elasticsearch.common.util.concurrent.PrioritizedRunnable;
import org.elasticsearch.common.util.iterable.Iterables;
import org.elasticsearch.discovery.Discovery;
import org.elasticsearch.discovery.DiscoveryService;
-import org.elasticsearch.node.settings.NodeSettingsService;
import org.elasticsearch.threadpool.ThreadPool;
import org.elasticsearch.transport.TransportService;
-import java.util.*;
-import java.util.concurrent.*;
+import java.util.ArrayList;
+import java.util.Collection;
+import java.util.HashMap;
+import java.util.Iterator;
+import java.util.List;
+import java.util.Locale;
+import java.util.Map;
+import java.util.Queue;
+import java.util.concurrent.ConcurrentMap;
+import java.util.concurrent.CopyOnWriteArrayList;
+import java.util.concurrent.Executor;
+import java.util.concurrent.Future;
+import java.util.concurrent.ScheduledFuture;
+import java.util.concurrent.TimeUnit;
import java.util.concurrent.atomic.AtomicBoolean;
import java.util.stream.Collectors;
@@ -62,8 +92,8 @@ import static org.elasticsearch.common.util.concurrent.EsExecutors.daemonThreadF
*/
public class InternalClusterService extends AbstractLifecycleComponent<ClusterService> implements ClusterService {
- public static final String SETTING_CLUSTER_SERVICE_SLOW_TASK_LOGGING_THRESHOLD = "cluster.service.slow_task_logging_threshold";
- public static final String SETTING_CLUSTER_SERVICE_RECONNECT_INTERVAL = "cluster.service.reconnect_interval";
+ public static final Setting<TimeValue> CLUSTER_SERVICE_SLOW_TASK_LOGGING_THRESHOLD_SETTING = Setting.positiveTimeSetting("cluster.service.slow_task_logging_threshold", TimeValue.timeValueSeconds(30), true, Setting.Scope.CLUSTER);
+ public static final Setting<TimeValue> CLUSTER_SERVICE_RECONNECT_INTERVAL_SETTING = Setting.positiveTimeSetting("cluster.service.reconnect_interval", TimeValue.timeValueSeconds(10), false, Setting.Scope.CLUSTER);
public static final String UPDATE_THREAD_NAME = "clusterService#updateTask";
private final ThreadPool threadPool;
@@ -74,7 +104,7 @@ public class InternalClusterService extends AbstractLifecycleComponent<ClusterSe
private final TransportService transportService;
- private final NodeSettingsService nodeSettingsService;
+ private final ClusterSettings clusterSettings;
private final DiscoveryNodeService discoveryNodeService;
private final Version version;
@@ -107,33 +137,32 @@ public class InternalClusterService extends AbstractLifecycleComponent<ClusterSe
@Inject
public InternalClusterService(Settings settings, DiscoveryService discoveryService, OperationRouting operationRouting, TransportService transportService,
- NodeSettingsService nodeSettingsService, ThreadPool threadPool, ClusterName clusterName, DiscoveryNodeService discoveryNodeService, Version version) {
+ ClusterSettings clusterSettings, ThreadPool threadPool, ClusterName clusterName, DiscoveryNodeService discoveryNodeService, Version version) {
super(settings);
this.operationRouting = operationRouting;
this.transportService = transportService;
this.discoveryService = discoveryService;
this.threadPool = threadPool;
- this.nodeSettingsService = nodeSettingsService;
+ this.clusterSettings = clusterSettings;
this.discoveryNodeService = discoveryNodeService;
this.version = version;
// will be replaced on doStart.
this.clusterState = ClusterState.builder(clusterName).build();
- this.nodeSettingsService.setClusterService(this);
- this.nodeSettingsService.addListener(new ApplySettings());
+ this.clusterSettings.addSettingsUpdateConsumer(CLUSTER_SERVICE_SLOW_TASK_LOGGING_THRESHOLD_SETTING, this::setSlowTaskLoggingThreshold);
- this.reconnectInterval = this.settings.getAsTime(SETTING_CLUSTER_SERVICE_RECONNECT_INTERVAL, TimeValue.timeValueSeconds(10));
+ this.reconnectInterval = CLUSTER_SERVICE_RECONNECT_INTERVAL_SETTING.get(settings);
- this.slowTaskLoggingThreshold = this.settings.getAsTime(SETTING_CLUSTER_SERVICE_SLOW_TASK_LOGGING_THRESHOLD, TimeValue.timeValueSeconds(30));
+ this.slowTaskLoggingThreshold = CLUSTER_SERVICE_SLOW_TASK_LOGGING_THRESHOLD_SETTING.get(settings);
localNodeMasterListeners = new LocalNodeMasterListeners(threadPool);
initialBlocks = ClusterBlocks.builder().addGlobalBlock(discoveryService.getNoMasterBlock());
}
- public NodeSettingsService settingsService() {
- return this.nodeSettingsService;
+ private void setSlowTaskLoggingThreshold(TimeValue slowTaskLoggingThreshold) {
+ this.slowTaskLoggingThreshold = slowTaskLoggingThreshold;
}
@Override
@@ -292,6 +321,7 @@ public class InternalClusterService extends AbstractLifecycleComponent<ClusterSe
if (config.timeout() != null) {
updateTasksExecutor.execute(updateTask, threadPool.scheduler(), config.timeout(), () -> threadPool.generic().execute(() -> {
if (updateTask.processed.getAndSet(true) == false) {
+ logger.debug("cluster state update task [{}] timed out after [{}]", source, config.timeout());
listener.onFailure(source, new ProcessClusterEventTimeoutException(config.timeout(), source));
}}));
} else {
@@ -327,7 +357,7 @@ public class InternalClusterService extends AbstractLifecycleComponent<ClusterSe
timeInQueue = 0;
}
- pendingClusterTasks.add(new PendingClusterTask(pending.insertionOrder, pending.priority, new StringText(source), timeInQueue, pending.executing));
+ pendingClusterTasks.add(new PendingClusterTask(pending.insertionOrder, pending.priority, new Text(source), timeInQueue, pending.executing));
}
return pendingClusterTasks;
}
@@ -413,6 +443,15 @@ public class InternalClusterService extends AbstractLifecycleComponent<ClusterSe
}
assert batchResult.executionResults != null;
+ assert batchResult.executionResults.size() == toExecute.size()
+ : String.format(Locale.ROOT, "expected [%d] task result%s but was [%d]", toExecute.size(), toExecute.size() == 1 ? "" : "s", batchResult.executionResults.size());
+ boolean assertsEnabled = false;
+ assert (assertsEnabled = true);
+ if (assertsEnabled) {
+ for (UpdateTask<T> updateTask : toExecute) {
+ assert batchResult.executionResults.containsKey(updateTask.task) : "missing task result for [" + updateTask.task + "]";
+ }
+ }
ClusterState newClusterState = batchResult.resultingState;
final ArrayList<UpdateTask<T>> proccessedListeners = new ArrayList<>();
@@ -421,7 +460,13 @@ public class InternalClusterService extends AbstractLifecycleComponent<ClusterSe
assert batchResult.executionResults.containsKey(updateTask.task) : "missing " + updateTask.task.toString();
final ClusterStateTaskExecutor.TaskResult executionResult =
batchResult.executionResults.get(updateTask.task);
- executionResult.handle(() -> proccessedListeners.add(updateTask), ex -> updateTask.listener.onFailure(updateTask.source, ex));
+ executionResult.handle(
+ () -> proccessedListeners.add(updateTask),
+ ex -> {
+ logger.debug("cluster state update task [{}] failed", ex, updateTask.source);
+ updateTask.listener.onFailure(updateTask.source, ex);
+ }
+ );
}
if (previousClusterState == newClusterState) {
@@ -521,6 +566,15 @@ public class InternalClusterService extends AbstractLifecycleComponent<ClusterSe
// update the current cluster state
clusterState = newClusterState;
logger.debug("set local cluster state to version {}", newClusterState.version());
+ try {
+ // nothing to do until we actually recover from the gateway or any other block indicates we need to disable persistency
+ if (clusterChangedEvent.state().blocks().disableStatePersistence() == false && clusterChangedEvent.metaDataChanged()) {
+ final Settings incomingSettings = clusterChangedEvent.state().metaData().settings();
+ clusterSettings.applySettings(incomingSettings);
+ }
+ } catch (Exception ex) {
+ logger.warn("failed to apply cluster settings", ex);
+ }
for (ClusterStateListener listener : preAppliedListeners) {
try {
listener.clusterChanged(clusterChangedEvent);
@@ -560,6 +614,8 @@ public class InternalClusterService extends AbstractLifecycleComponent<ClusterSe
task.listener.clusterStateProcessed(task.source, previousClusterState, newClusterState);
}
+ executor.clusterStatePublished(newClusterState);
+
TimeValue executionTime = TimeValue.timeValueMillis(Math.max(0, TimeValue.nsecToMSec(System.nanoTime() - startTimeNS)));
logger.debug("processing [{}]: took {} done applying updated cluster_state (version: {}, uuid: {})", source, executionTime, newClusterState.version(), newClusterState.stateUUID());
warnAboutSlowTaskIfNeeded(executionTime, source);
@@ -846,12 +902,4 @@ public class InternalClusterService extends AbstractLifecycleComponent<ClusterSe
}
}
}
-
- class ApplySettings implements NodeSettingsService.Listener {
- @Override
- public void onRefreshSettings(Settings settings) {
- final TimeValue slowTaskLoggingThreshold = settings.getAsTime(SETTING_CLUSTER_SERVICE_SLOW_TASK_LOGGING_THRESHOLD, InternalClusterService.this.slowTaskLoggingThreshold);
- InternalClusterService.this.slowTaskLoggingThreshold = slowTaskLoggingThreshold;
- }
- }
}
diff --git a/core/src/main/java/org/elasticsearch/common/Booleans.java b/core/src/main/java/org/elasticsearch/common/Booleans.java
index 6b1b9b016a..9c5f574663 100644
--- a/core/src/main/java/org/elasticsearch/common/Booleans.java
+++ b/core/src/main/java/org/elasticsearch/common/Booleans.java
@@ -84,7 +84,6 @@ public class Booleans {
* throws exception if string cannot be parsed to boolean
*/
public static Boolean parseBooleanExact(String value) {
-
boolean isFalse = isExplicitFalse(value);
if (isFalse) {
return false;
@@ -94,7 +93,7 @@ public class Booleans {
return true;
}
- throw new IllegalArgumentException("value cannot be parsed to boolean [ true/1/on/yes OR false/0/off/no ] ");
+ throw new IllegalArgumentException("Failed to parse value [" + value + "] cannot be parsed to boolean [ true/1/on/yes OR false/0/off/no ]");
}
public static Boolean parseBoolean(String value, Boolean defaultValue) {
diff --git a/core/src/main/java/org/elasticsearch/common/Randomness.java b/core/src/main/java/org/elasticsearch/common/Randomness.java
new file mode 100644
index 0000000000..dbfa8034b9
--- /dev/null
+++ b/core/src/main/java/org/elasticsearch/common/Randomness.java
@@ -0,0 +1,120 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.common;
+
+import org.elasticsearch.common.settings.Settings;
+
+import java.lang.reflect.Method;
+import java.util.Collections;
+import java.util.List;
+import java.util.Random;
+import java.util.concurrent.ThreadLocalRandom;
+
+/**
+ * Provides factory methods for producing reproducible sources of
+ * randomness. Reproducible sources of randomness contribute to
+ * reproducible tests. When running the Elasticsearch test suite, the
+ * test runner will establish a global random seed accessible via the
+ * system property "tests.seed". By seeding a random number generator
+ * with this global seed, we ensure that instances of Random produced
+ * with this class produce reproducible sources of randomness under
+ * when running under the Elasticsearch test suite. Alternatively,
+ * a reproducible source of randomness can be produced by providing a
+ * setting a reproducible seed. When running the Elasticsearch server
+ * process, non-reproducible sources of randomness are provided (unless
+ * a setting is provided for a module that exposes a seed setting (e.g.,
+ * DiscoveryService#SETTING_DISCOVERY_SEED)).
+ */
+public final class Randomness {
+ private static final Method currentMethod;
+ private static final Method getRandomMethod;
+
+ static {
+ Method maybeCurrentMethod;
+ Method maybeGetRandomMethod;
+ try {
+ Class<?> clazz = Class.forName("com.carrotsearch.randomizedtesting.RandomizedContext");
+ maybeCurrentMethod = clazz.getMethod("current");
+ maybeGetRandomMethod = clazz.getMethod("getRandom");
+ } catch (Throwable t) {
+ maybeCurrentMethod = null;
+ maybeGetRandomMethod = null;
+ }
+ currentMethod = maybeCurrentMethod;
+ getRandomMethod = maybeGetRandomMethod;
+ }
+
+ private Randomness() {}
+
+ /**
+ * Provides a reproducible source of randomness seeded by a long
+ * seed in the settings with the key setting.
+ *
+ * @param settings the settings containing the seed
+ * @param setting the key to access the seed
+ * @return a reproducible source of randomness
+ */
+ public static Random get(Settings settings, String setting) {
+ Long maybeSeed = settings.getAsLong(setting, null);
+ if (maybeSeed != null) {
+ return new Random(maybeSeed);
+ } else {
+ return get();
+ }
+ }
+
+ /**
+ * Provides a source of randomness that is reproducible when
+ * running under the Elasticsearch test suite, and otherwise
+ * produces a non-reproducible source of randomness. Reproducible
+ * sources of randomness are created when the system property
+ * "tests.seed" is set and the security policy allows reading this
+ * system property. Otherwise, non-reproducible sources of
+ * randomness are created.
+ *
+ * @return a source of randomness
+ * @throws IllegalStateException if running tests but was not able
+ * to acquire an instance of Random from
+ * RandomizedContext or tests are
+ * running but tests.seed is not set
+ */
+ public static Random get() {
+ if (currentMethod != null && getRandomMethod != null) {
+ try {
+ Object randomizedContext = currentMethod.invoke(null);
+ return (Random) getRandomMethod.invoke(randomizedContext);
+ } catch (ReflectiveOperationException e) {
+ // unexpected, bail
+ throw new IllegalStateException("running tests but failed to invoke RandomizedContext#getRandom", e);
+ }
+ } else {
+ return getWithoutSeed();
+ }
+ }
+
+ private static Random getWithoutSeed() {
+ assert currentMethod == null && getRandomMethod == null : "running under tests but tried to create non-reproducible random";
+ return ThreadLocalRandom.current();
+ }
+
+ public static void shuffle(List<?> list) {
+ Collections.shuffle(list, get());
+ }
+}
diff --git a/core/src/main/java/org/elasticsearch/common/geo/builders/EnvelopeBuilder.java b/core/src/main/java/org/elasticsearch/common/geo/builders/EnvelopeBuilder.java
index 62f29d2bad..afcf899051 100644
--- a/core/src/main/java/org/elasticsearch/common/geo/builders/EnvelopeBuilder.java
+++ b/core/src/main/java/org/elasticsearch/common/geo/builders/EnvelopeBuilder.java
@@ -25,26 +25,17 @@ import com.vividsolutions.jts.geom.Coordinate;
import org.elasticsearch.common.io.stream.StreamInput;
import org.elasticsearch.common.io.stream.StreamOutput;
import org.elasticsearch.common.xcontent.XContentBuilder;
-
import java.io.IOException;
-import java.util.Locale;
import java.util.Objects;
public class EnvelopeBuilder extends ShapeBuilder {
public static final GeoShapeType TYPE = GeoShapeType.ENVELOPE;
- public static final EnvelopeBuilder PROTOTYPE = new EnvelopeBuilder();
-
- protected Coordinate topLeft;
- protected Coordinate bottomRight;
- public EnvelopeBuilder() {
- this(Orientation.RIGHT);
- }
+ public static final EnvelopeBuilder PROTOTYPE = new EnvelopeBuilder();
- public EnvelopeBuilder(Orientation orientation) {
- super(orientation);
- }
+ private Coordinate topLeft;
+ private Coordinate bottomRight;
public EnvelopeBuilder topLeft(Coordinate topLeft) {
this.topLeft = topLeft;
@@ -55,6 +46,10 @@ public class EnvelopeBuilder extends ShapeBuilder {
return topLeft(coordinate(longitude, latitude));
}
+ public Coordinate topLeft() {
+ return this.topLeft;
+ }
+
public EnvelopeBuilder bottomRight(Coordinate bottomRight) {
this.bottomRight = bottomRight;
return this;
@@ -64,11 +59,14 @@ public class EnvelopeBuilder extends ShapeBuilder {
return bottomRight(coordinate(longitude, latitude));
}
+ public Coordinate bottomRight() {
+ return this.bottomRight;
+ }
+
@Override
public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException {
builder.startObject();
builder.field(FIELD_TYPE, TYPE.shapeName());
- builder.field(FIELD_ORIENTATION, orientation.name().toLowerCase(Locale.ROOT));
builder.startArray(FIELD_COORDINATES);
toXContent(builder, topLeft);
toXContent(builder, bottomRight);
@@ -88,7 +86,7 @@ public class EnvelopeBuilder extends ShapeBuilder {
@Override
public int hashCode() {
- return Objects.hash(orientation, topLeft, bottomRight);
+ return Objects.hash(topLeft, bottomRight);
}
@Override
@@ -100,22 +98,19 @@ public class EnvelopeBuilder extends ShapeBuilder {
return false;
}
EnvelopeBuilder other = (EnvelopeBuilder) obj;
- return Objects.equals(orientation, other.orientation) &&
- Objects.equals(topLeft, other.topLeft) &&
+ return Objects.equals(topLeft, other.topLeft) &&
Objects.equals(bottomRight, other.bottomRight);
}
@Override
public void writeTo(StreamOutput out) throws IOException {
- out.writeBoolean(orientation == Orientation.RIGHT);
writeCoordinateTo(topLeft, out);
writeCoordinateTo(bottomRight, out);
}
@Override
public EnvelopeBuilder readFrom(StreamInput in) throws IOException {
- Orientation orientation = in.readBoolean() ? Orientation.RIGHT : Orientation.LEFT;
- return new EnvelopeBuilder(orientation)
+ return new EnvelopeBuilder()
.topLeft(readCoordinateFrom(in))
.bottomRight(readCoordinateFrom(in));
}
diff --git a/core/src/main/java/org/elasticsearch/common/geo/builders/GeometryCollectionBuilder.java b/core/src/main/java/org/elasticsearch/common/geo/builders/GeometryCollectionBuilder.java
index 45397ed962..067cd014c0 100644
--- a/core/src/main/java/org/elasticsearch/common/geo/builders/GeometryCollectionBuilder.java
+++ b/core/src/main/java/org/elasticsearch/common/geo/builders/GeometryCollectionBuilder.java
@@ -20,27 +20,25 @@
package org.elasticsearch.common.geo.builders;
import com.spatial4j.core.shape.Shape;
+
import org.elasticsearch.common.geo.XShapeCollection;
+import org.elasticsearch.common.io.stream.StreamInput;
+import org.elasticsearch.common.io.stream.StreamOutput;
import org.elasticsearch.ElasticsearchException;
import org.elasticsearch.common.xcontent.XContentBuilder;
import java.io.IOException;
import java.util.ArrayList;
import java.util.List;
+import java.util.Objects;
public class GeometryCollectionBuilder extends ShapeBuilder {
public static final GeoShapeType TYPE = GeoShapeType.GEOMETRYCOLLECTION;
- protected final ArrayList<ShapeBuilder> shapes = new ArrayList<>();
-
- public GeometryCollectionBuilder() {
- this(Orientation.RIGHT);
- }
+ public static final GeometryCollectionBuilder PROTOTYPE = new GeometryCollectionBuilder();
- public GeometryCollectionBuilder(Orientation orientation) {
- super(orientation);
- }
+ protected final ArrayList<ShapeBuilder> shapes = new ArrayList<>();
public GeometryCollectionBuilder shape(ShapeBuilder shape) {
this.shapes.add(shape);
@@ -132,4 +130,39 @@ public class GeometryCollectionBuilder extends ShapeBuilder {
//note: ShapeCollection is probably faster than a Multi* geom.
}
+ @Override
+ public int hashCode() {
+ return Objects.hash(shapes);
+ }
+
+ @Override
+ public boolean equals(Object obj) {
+ if (this == obj) {
+ return true;
+ }
+ if (obj == null || getClass() != obj.getClass()) {
+ return false;
+ }
+ GeometryCollectionBuilder other = (GeometryCollectionBuilder) obj;
+ return Objects.equals(shapes, other.shapes);
+ }
+
+ @Override
+ public void writeTo(StreamOutput out) throws IOException {
+ out.writeVInt(shapes.size());
+ for (ShapeBuilder shape : shapes) {
+ out.writeShape(shape);
+ }
+ }
+
+ @Override
+ public GeometryCollectionBuilder readFrom(StreamInput in) throws IOException {
+ GeometryCollectionBuilder geometryCollectionBuilder = new GeometryCollectionBuilder();
+ int shapes = in.readVInt();
+ for (int i = 0; i < shapes; i++) {
+ geometryCollectionBuilder.shape(in.readShape());
+ }
+ return geometryCollectionBuilder;
+ }
+
}
diff --git a/core/src/main/java/org/elasticsearch/common/geo/builders/LineStringBuilder.java b/core/src/main/java/org/elasticsearch/common/geo/builders/LineStringBuilder.java
index c7ba9b72f5..464d72c8d8 100644
--- a/core/src/main/java/org/elasticsearch/common/geo/builders/LineStringBuilder.java
+++ b/core/src/main/java/org/elasticsearch/common/geo/builders/LineStringBuilder.java
@@ -22,8 +22,12 @@ package org.elasticsearch.common.geo.builders;
import java.io.IOException;
import java.util.ArrayList;
import java.util.Arrays;
+import java.util.Objects;
+import org.elasticsearch.common.io.stream.StreamInput;
+import org.elasticsearch.common.io.stream.StreamOutput;
import org.elasticsearch.common.xcontent.XContentBuilder;
+
import com.spatial4j.core.shape.Shape;
import com.vividsolutions.jts.geom.Coordinate;
import com.vividsolutions.jts.geom.Geometry;
@@ -34,6 +38,8 @@ public class LineStringBuilder extends PointCollection<LineStringBuilder> {
public static final GeoShapeType TYPE = GeoShapeType.LINESTRING;
+ public static final LineStringBuilder PROTOTYPE = new LineStringBuilder();
+
@Override
public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException {
builder.startObject();
@@ -139,4 +145,39 @@ public class LineStringBuilder extends PointCollection<LineStringBuilder> {
}
return coordinates;
}
+
+ @Override
+ public int hashCode() {
+ return Objects.hash(points);
+ }
+
+ @Override
+ public boolean equals(Object obj) {
+ if (this == obj) {
+ return true;
+ }
+ if (obj == null || getClass() != obj.getClass()) {
+ return false;
+ }
+ LineStringBuilder other = (LineStringBuilder) obj;
+ return Objects.equals(points, other.points);
+ }
+
+ @Override
+ public void writeTo(StreamOutput out) throws IOException {
+ out.writeVInt(points.size());
+ for (Coordinate point : points) {
+ writeCoordinateTo(point, out);
+ }
+ }
+
+ @Override
+ public LineStringBuilder readFrom(StreamInput in) throws IOException {
+ LineStringBuilder lineStringBuilder = new LineStringBuilder();
+ int size = in.readVInt();
+ for (int i=0; i < size; i++) {
+ lineStringBuilder.point(readCoordinateFrom(in));
+ }
+ return lineStringBuilder;
+ }
}
diff --git a/core/src/main/java/org/elasticsearch/common/geo/builders/MultiLineStringBuilder.java b/core/src/main/java/org/elasticsearch/common/geo/builders/MultiLineStringBuilder.java
index a004b90a2d..4703ac19b0 100644
--- a/core/src/main/java/org/elasticsearch/common/geo/builders/MultiLineStringBuilder.java
+++ b/core/src/main/java/org/elasticsearch/common/geo/builders/MultiLineStringBuilder.java
@@ -19,6 +19,8 @@
package org.elasticsearch.common.geo.builders;
+import org.elasticsearch.common.io.stream.StreamInput;
+import org.elasticsearch.common.io.stream.StreamOutput;
import org.elasticsearch.common.xcontent.XContentBuilder;
import com.spatial4j.core.shape.Shape;
@@ -29,11 +31,14 @@ import com.vividsolutions.jts.geom.LineString;
import java.io.IOException;
import java.util.ArrayList;
import java.util.Iterator;
+import java.util.Objects;
public class MultiLineStringBuilder extends ShapeBuilder {
public static final GeoShapeType TYPE = GeoShapeType.MULTILINESTRING;
+ public static final MultiLineStringBuilder PROTOTYPE = new MultiLineStringBuilder();
+
private final ArrayList<LineStringBuilder> lines = new ArrayList<>();
public MultiLineStringBuilder linestring(LineStringBuilder line) {
@@ -41,6 +46,10 @@ public class MultiLineStringBuilder extends ShapeBuilder {
return this;
}
+ public MultiLineStringBuilder linestring(Coordinate[] coordinates) {
+ return this.linestring(new LineStringBuilder().points(coordinates));
+ }
+
public Coordinate[][] coordinates() {
Coordinate[][] result = new Coordinate[lines.size()][];
for (int i = 0; i < result.length; i++) {
@@ -92,4 +101,39 @@ public class MultiLineStringBuilder extends ShapeBuilder {
}
return jtsGeometry(geometry);
}
+
+ @Override
+ public int hashCode() {
+ return Objects.hash(lines);
+ }
+
+ @Override
+ public boolean equals(Object obj) {
+ if (this == obj) {
+ return true;
+ }
+ if (obj == null || getClass() != obj.getClass()) {
+ return false;
+ }
+ MultiLineStringBuilder other = (MultiLineStringBuilder) obj;
+ return Objects.equals(lines, other.lines);
+ }
+
+ @Override
+ public void writeTo(StreamOutput out) throws IOException {
+ out.writeVInt(lines.size());
+ for (LineStringBuilder line : lines) {
+ line.writeTo(out);
+ }
+ }
+
+ @Override
+ public MultiLineStringBuilder readFrom(StreamInput in) throws IOException {
+ MultiLineStringBuilder multiLineStringBuilder = new MultiLineStringBuilder();
+ int size = in.readVInt();
+ for (int i = 0; i < size; i++) {
+ multiLineStringBuilder.linestring(LineStringBuilder.PROTOTYPE.readFrom(in));
+ }
+ return multiLineStringBuilder;
+ }
}
diff --git a/core/src/main/java/org/elasticsearch/common/geo/builders/MultiPointBuilder.java b/core/src/main/java/org/elasticsearch/common/geo/builders/MultiPointBuilder.java
index 8d5cfabdab..a4d236e355 100644
--- a/core/src/main/java/org/elasticsearch/common/geo/builders/MultiPointBuilder.java
+++ b/core/src/main/java/org/elasticsearch/common/geo/builders/MultiPointBuilder.java
@@ -22,18 +22,22 @@ package org.elasticsearch.common.geo.builders;
import com.spatial4j.core.shape.Point;
import com.spatial4j.core.shape.Shape;
import com.vividsolutions.jts.geom.Coordinate;
+
import org.elasticsearch.common.geo.XShapeCollection;
+import org.elasticsearch.common.io.stream.StreamInput;
+import org.elasticsearch.common.io.stream.StreamOutput;
import org.elasticsearch.common.xcontent.XContentBuilder;
-
import java.io.IOException;
import java.util.ArrayList;
import java.util.List;
+import java.util.Objects;
public class MultiPointBuilder extends PointCollection<MultiPointBuilder> {
-
public static final GeoShapeType TYPE = GeoShapeType.MULTIPOINT;
+ public final static MultiPointBuilder PROTOTYPE = new MultiPointBuilder();
+
@Override
public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException {
builder.startObject();
@@ -52,7 +56,7 @@ public class MultiPointBuilder extends PointCollection<MultiPointBuilder> {
for (Coordinate coord : points) {
shapes.add(SPATIAL_CONTEXT.makePoint(coord.x, coord.y));
}
- XShapeCollection multiPoints = new XShapeCollection<>(shapes, SPATIAL_CONTEXT);
+ XShapeCollection<Point> multiPoints = new XShapeCollection<>(shapes, SPATIAL_CONTEXT);
multiPoints.setPointsOnly(true);
return multiPoints;
}
@@ -61,4 +65,39 @@ public class MultiPointBuilder extends PointCollection<MultiPointBuilder> {
public GeoShapeType type() {
return TYPE;
}
+
+ @Override
+ public int hashCode() {
+ return Objects.hash(points);
+ }
+
+ @Override
+ public boolean equals(Object obj) {
+ if (this == obj) {
+ return true;
+ }
+ if (obj == null || getClass() != obj.getClass()) {
+ return false;
+ }
+ MultiPointBuilder other = (MultiPointBuilder) obj;
+ return Objects.equals(points, other.points);
+ }
+
+ @Override
+ public void writeTo(StreamOutput out) throws IOException {
+ out.writeVInt(points.size());
+ for (Coordinate point : points) {
+ writeCoordinateTo(point, out);
+ }
+ }
+
+ @Override
+ public MultiPointBuilder readFrom(StreamInput in) throws IOException {
+ MultiPointBuilder multiPointBuilder = new MultiPointBuilder();
+ int size = in.readVInt();
+ for (int i=0; i < size; i++) {
+ multiPointBuilder.point(readCoordinateFrom(in));
+ }
+ return multiPointBuilder;
+ }
}
diff --git a/core/src/main/java/org/elasticsearch/common/geo/builders/MultiPolygonBuilder.java b/core/src/main/java/org/elasticsearch/common/geo/builders/MultiPolygonBuilder.java
index e7762e51b6..2f9d595c9c 100644
--- a/core/src/main/java/org/elasticsearch/common/geo/builders/MultiPolygonBuilder.java
+++ b/core/src/main/java/org/elasticsearch/common/geo/builders/MultiPolygonBuilder.java
@@ -22,8 +22,12 @@ package org.elasticsearch.common.geo.builders;
import java.io.IOException;
import java.util.ArrayList;
import java.util.List;
+import java.util.Locale;
+import java.util.Objects;
import org.elasticsearch.common.geo.XShapeCollection;
+import org.elasticsearch.common.io.stream.StreamInput;
+import org.elasticsearch.common.io.stream.StreamOutput;
import org.elasticsearch.common.xcontent.XContentBuilder;
import com.spatial4j.core.shape.Shape;
@@ -32,26 +36,50 @@ import com.vividsolutions.jts.geom.Coordinate;
public class MultiPolygonBuilder extends ShapeBuilder {
public static final GeoShapeType TYPE = GeoShapeType.MULTIPOLYGON;
+ public static final MultiPolygonBuilder PROTOTYPE = new MultiPolygonBuilder();
- protected final ArrayList<PolygonBuilder> polygons = new ArrayList<>();
+ private final ArrayList<PolygonBuilder> polygons = new ArrayList<>();
+
+ private Orientation orientation = Orientation.RIGHT;
public MultiPolygonBuilder() {
this(Orientation.RIGHT);
}
public MultiPolygonBuilder(Orientation orientation) {
- super(orientation);
+ this.orientation = orientation;
+ }
+
+ public Orientation orientation() {
+ return this.orientation;
}
+ /**
+ * Add a shallow copy of the polygon to the multipolygon. This will apply the orientation of the
+ * {@link MultiPolygonBuilder} to the polygon if polygon has different orientation.
+ */
public MultiPolygonBuilder polygon(PolygonBuilder polygon) {
- this.polygons.add(polygon);
+ PolygonBuilder pb = new PolygonBuilder(this.orientation);
+ pb.points(polygon.shell().coordinates(false));
+ for (LineStringBuilder hole : polygon.holes()) {
+ pb.hole(hole);
+ }
+ this.polygons.add(pb);
return this;
}
+ /**
+ * get the list of polygons
+ */
+ public ArrayList<PolygonBuilder> polygons() {
+ return polygons;
+ }
+
@Override
public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException {
builder.startObject();
builder.field(FIELD_TYPE, TYPE.shapeName());
+ builder.field(FIELD_ORIENTATION, orientation.name().toLowerCase(Locale.ROOT));
builder.startArray(FIELD_COORDINATES);
for(PolygonBuilder polygon : polygons) {
builder.startArray();
@@ -89,4 +117,41 @@ public class MultiPolygonBuilder extends ShapeBuilder {
return new XShapeCollection<>(shapes, SPATIAL_CONTEXT);
//note: ShapeCollection is probably faster than a Multi* geom.
}
+
+ @Override
+ public int hashCode() {
+ return Objects.hash(polygons, orientation);
+ }
+
+ @Override
+ public boolean equals(Object obj) {
+ if (this == obj) {
+ return true;
+ }
+ if (obj == null || getClass() != obj.getClass()) {
+ return false;
+ }
+ MultiPolygonBuilder other = (MultiPolygonBuilder) obj;
+ return Objects.equals(polygons, other.polygons) &&
+ Objects.equals(orientation, other.orientation);
+ }
+
+ @Override
+ public void writeTo(StreamOutput out) throws IOException {
+ orientation.writeTo(out);
+ out.writeVInt(polygons.size());
+ for (PolygonBuilder polygon : polygons) {
+ polygon.writeTo(out);
+ }
+ }
+
+ @Override
+ public MultiPolygonBuilder readFrom(StreamInput in) throws IOException {
+ MultiPolygonBuilder polyBuilder = new MultiPolygonBuilder(Orientation.readFrom(in));
+ int holes = in.readVInt();
+ for (int i = 0; i < holes; i++) {
+ polyBuilder.polygon(PolygonBuilder.PROTOTYPE.readFrom(in));
+ }
+ return polyBuilder;
+ }
}
diff --git a/core/src/main/java/org/elasticsearch/common/geo/builders/PointBuilder.java b/core/src/main/java/org/elasticsearch/common/geo/builders/PointBuilder.java
index d6d62c28b8..3522546165 100644
--- a/core/src/main/java/org/elasticsearch/common/geo/builders/PointBuilder.java
+++ b/core/src/main/java/org/elasticsearch/common/geo/builders/PointBuilder.java
@@ -32,7 +32,6 @@ import com.vividsolutions.jts.geom.Coordinate;
public class PointBuilder extends ShapeBuilder {
public static final GeoShapeType TYPE = GeoShapeType.POINT;
-
public static final PointBuilder PROTOTYPE = new PointBuilder();
private Coordinate coordinate;
diff --git a/core/src/main/java/org/elasticsearch/common/geo/builders/PolygonBuilder.java b/core/src/main/java/org/elasticsearch/common/geo/builders/PolygonBuilder.java
index 04540df27e..03ff6a6b89 100644
--- a/core/src/main/java/org/elasticsearch/common/geo/builders/PolygonBuilder.java
+++ b/core/src/main/java/org/elasticsearch/common/geo/builders/PolygonBuilder.java
@@ -29,6 +29,8 @@ import com.vividsolutions.jts.geom.MultiPolygon;
import com.vividsolutions.jts.geom.Polygon;
import org.elasticsearch.common.collect.Tuple;
+import org.elasticsearch.common.io.stream.StreamInput;
+import org.elasticsearch.common.io.stream.StreamOutput;
import org.elasticsearch.common.util.set.Sets;
import org.elasticsearch.common.xcontent.XContentBuilder;
@@ -39,6 +41,9 @@ import java.util.HashMap;
import java.util.HashSet;
import java.util.Iterator;
import java.util.concurrent.atomic.AtomicBoolean;
+import java.util.List;
+import java.util.Locale;
+import java.util.Objects;
/**
* The {@link PolygonBuilder} implements the groundwork to create polygons. This contains
@@ -48,6 +53,11 @@ import java.util.concurrent.atomic.AtomicBoolean;
public class PolygonBuilder extends ShapeBuilder {
public static final GeoShapeType TYPE = GeoShapeType.POLYGON;
+ public static final PolygonBuilder PROTOTYPE = new PolygonBuilder();
+
+ private static final Coordinate[][] EMPTY = new Coordinate[0][];
+
+ private Orientation orientation = Orientation.RIGHT;
// line string defining the shell of the polygon
private LineStringBuilder shell;
@@ -56,7 +66,7 @@ public class PolygonBuilder extends ShapeBuilder {
private final ArrayList<LineStringBuilder> holes = new ArrayList<>();
public PolygonBuilder() {
- this(new ArrayList<Coordinate>(), Orientation.RIGHT);
+ this(Orientation.RIGHT);
}
public PolygonBuilder(Orientation orientation) {
@@ -64,10 +74,14 @@ public class PolygonBuilder extends ShapeBuilder {
}
public PolygonBuilder(ArrayList<Coordinate> points, Orientation orientation) {
- super(orientation);
+ this.orientation = orientation;
this.shell = new LineStringBuilder().points(points);
}
+ public Orientation orientation() {
+ return this.orientation;
+ }
+
public PolygonBuilder point(double longitude, double latitude) {
shell.point(longitude, latitude);
return this;
@@ -104,6 +118,20 @@ public class PolygonBuilder extends ShapeBuilder {
}
/**
+ * @return the list of holes defined for this polygon
+ */
+ public List<LineStringBuilder> holes() {
+ return this.holes;
+ }
+
+ /**
+ * @return the list of points of the shell for this polygon
+ */
+ public LineStringBuilder shell() {
+ return this.shell;
+ }
+
+ /**
* Close the shell of the polygon
*/
public PolygonBuilder close() {
@@ -175,6 +203,7 @@ public class PolygonBuilder extends ShapeBuilder {
public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException {
builder.startObject();
builder.field(FIELD_TYPE, TYPE.shapeName());
+ builder.field(FIELD_ORIENTATION, orientation.name().toLowerCase(Locale.ROOT));
builder.startArray(FIELD_COORDINATES);
coordinatesArray(builder, params);
builder.endArray();
@@ -357,8 +386,6 @@ public class PolygonBuilder extends ShapeBuilder {
return result;
}
- private static final Coordinate[][] EMPTY = new Coordinate[0][];
-
private static Coordinate[][] holes(Edge[] holes, int numHoles) {
if (numHoles == 0) {
return EMPTY;
@@ -663,4 +690,44 @@ public class PolygonBuilder extends ShapeBuilder {
}
}
}
+
+ @Override
+ public int hashCode() {
+ return Objects.hash(shell, holes, orientation);
+ }
+
+ @Override
+ public boolean equals(Object obj) {
+ if (this == obj) {
+ return true;
+ }
+ if (obj == null || getClass() != obj.getClass()) {
+ return false;
+ }
+ PolygonBuilder other = (PolygonBuilder) obj;
+ return Objects.equals(shell, other.shell) &&
+ Objects.equals(holes, other.holes) &&
+ Objects.equals(orientation, other.orientation);
+ }
+
+ @Override
+ public void writeTo(StreamOutput out) throws IOException {
+ orientation.writeTo(out);
+ shell.writeTo(out);
+ out.writeVInt(holes.size());
+ for (LineStringBuilder hole : holes) {
+ hole.writeTo(out);
+ }
+ }
+
+ @Override
+ public PolygonBuilder readFrom(StreamInput in) throws IOException {
+ PolygonBuilder polyBuilder = new PolygonBuilder(Orientation.readFrom(in));
+ polyBuilder.shell = LineStringBuilder.PROTOTYPE.readFrom(in);
+ int holes = in.readVInt();
+ for (int i = 0; i < holes; i++) {
+ polyBuilder.hole(LineStringBuilder.PROTOTYPE.readFrom(in));
+ }
+ return polyBuilder;
+ }
}
diff --git a/core/src/main/java/org/elasticsearch/common/geo/builders/ShapeBuilder.java b/core/src/main/java/org/elasticsearch/common/geo/builders/ShapeBuilder.java
index d8689ee737..fcd8177ac6 100644
--- a/core/src/main/java/org/elasticsearch/common/geo/builders/ShapeBuilder.java
+++ b/core/src/main/java/org/elasticsearch/common/geo/builders/ShapeBuilder.java
@@ -77,16 +77,10 @@ public abstract class ShapeBuilder extends ToXContentToBytes implements NamedWri
/** @see com.spatial4j.core.shape.jts.JtsGeometry#index() */
protected final boolean autoIndexJtsGeometry = true;//may want to turn off once SpatialStrategy impls do it.
- protected Orientation orientation = Orientation.RIGHT;
-
protected ShapeBuilder() {
}
- protected ShapeBuilder(Orientation orientation) {
- this.orientation = orientation;
- }
-
protected static Coordinate coordinate(double longitude, double latitude) {
return new Coordinate(longitude, latitude);
}
@@ -186,22 +180,6 @@ public abstract class ShapeBuilder extends ToXContentToBytes implements NamedWri
return new Coordinate(in.readDouble(), in.readDouble());
}
- public static Orientation orientationFromString(String orientation) {
- orientation = orientation.toLowerCase(Locale.ROOT);
- switch (orientation) {
- case "right":
- case "counterclockwise":
- case "ccw":
- return Orientation.RIGHT;
- case "left":
- case "clockwise":
- case "cw":
- return Orientation.LEFT;
- default:
- throw new IllegalArgumentException("Unknown orientation [" + orientation + "]");
- }
- }
-
protected static Coordinate shift(Coordinate coordinate, double dateline) {
if (dateline == 0) {
return coordinate;
@@ -408,6 +386,30 @@ public abstract class ShapeBuilder extends ToXContentToBytes implements NamedWri
public static final Orientation COUNTER_CLOCKWISE = Orientation.RIGHT;
public static final Orientation CW = Orientation.LEFT;
public static final Orientation CCW = Orientation.RIGHT;
+
+ public void writeTo (StreamOutput out) throws IOException {
+ out.writeBoolean(this == Orientation.RIGHT);
+ }
+
+ public static Orientation readFrom (StreamInput in) throws IOException {
+ return in.readBoolean() ? Orientation.RIGHT : Orientation.LEFT;
+ }
+
+ public static Orientation fromString(String orientation) {
+ orientation = orientation.toLowerCase(Locale.ROOT);
+ switch (orientation) {
+ case "right":
+ case "counterclockwise":
+ case "ccw":
+ return Orientation.RIGHT;
+ case "left":
+ case "clockwise":
+ case "cw":
+ return Orientation.LEFT;
+ default:
+ throw new IllegalArgumentException("Unknown orientation [" + orientation + "]");
+ }
+ }
}
public static final String FIELD_TYPE = "type";
@@ -498,7 +500,7 @@ public abstract class ShapeBuilder extends ToXContentToBytes implements NamedWri
radius = Distance.parseDistance(parser.text());
} else if (FIELD_ORIENTATION.equals(fieldName)) {
parser.nextToken();
- requestedOrientation = orientationFromString(parser.text());
+ requestedOrientation = Orientation.fromString(parser.text());
} else {
parser.nextToken();
parser.skipChildren();
@@ -524,7 +526,7 @@ public abstract class ShapeBuilder extends ToXContentToBytes implements NamedWri
case POLYGON: return parsePolygon(node, requestedOrientation, coerce);
case MULTIPOLYGON: return parseMultiPolygon(node, requestedOrientation, coerce);
case CIRCLE: return parseCircle(node, radius);
- case ENVELOPE: return parseEnvelope(node, requestedOrientation);
+ case ENVELOPE: return parseEnvelope(node);
case GEOMETRYCOLLECTION: return geometryCollections;
default:
throw new ElasticsearchParseException("shape type [{}] not included", shapeType);
@@ -550,7 +552,7 @@ public abstract class ShapeBuilder extends ToXContentToBytes implements NamedWri
return ShapeBuilders.newCircleBuilder().center(coordinates.coordinate).radius(radius);
}
- protected static EnvelopeBuilder parseEnvelope(CoordinateNode coordinates, final Orientation orientation) {
+ protected static EnvelopeBuilder parseEnvelope(CoordinateNode coordinates) {
// validate the coordinate array for envelope type
if (coordinates.children.size() != 2) {
throw new ElasticsearchParseException("invalid number of points [{}] provided for " +
@@ -564,7 +566,7 @@ public abstract class ShapeBuilder extends ToXContentToBytes implements NamedWri
uL = new Coordinate(Math.min(uL.x, lR.x), Math.max(uL.y, lR.y));
lR = new Coordinate(Math.max(uLtmp.x, lR.x), Math.min(uLtmp.y, lR.y));
}
- return ShapeBuilders.newEnvelope(orientation).topLeft(uL).bottomRight(lR);
+ return ShapeBuilders.newEnvelope().topLeft(uL).bottomRight(lR);
}
protected static void validateMultiPointNode(CoordinateNode coordinates) {
@@ -684,8 +686,7 @@ public abstract class ShapeBuilder extends ToXContentToBytes implements NamedWri
}
XContentParser.Token token = parser.nextToken();
- GeometryCollectionBuilder geometryCollection = ShapeBuilders.newGeometryCollection( (mapper == null) ? Orientation.RIGHT : mapper
- .fieldType().orientation());
+ GeometryCollectionBuilder geometryCollection = ShapeBuilders.newGeometryCollection();
while (token != XContentParser.Token.END_ARRAY) {
ShapeBuilder shapeBuilder = GeoShapeType.parse(parser);
geometryCollection.shape(shapeBuilder);
@@ -700,15 +701,4 @@ public abstract class ShapeBuilder extends ToXContentToBytes implements NamedWri
public String getWriteableName() {
return type().shapeName();
}
-
- // NORELEASE this should be deleted as soon as all shape builders implement writable
- @Override
- public void writeTo(StreamOutput out) throws IOException {
- }
-
- // NORELEASE this should be deleted as soon as all shape builders implement writable
- @Override
- public ShapeBuilder readFrom(StreamInput in) throws IOException {
- return null;
- }
}
diff --git a/core/src/main/java/org/elasticsearch/common/geo/builders/ShapeBuilderRegistry.java b/core/src/main/java/org/elasticsearch/common/geo/builders/ShapeBuilderRegistry.java
new file mode 100644
index 0000000000..c66e969aa3
--- /dev/null
+++ b/core/src/main/java/org/elasticsearch/common/geo/builders/ShapeBuilderRegistry.java
@@ -0,0 +1,45 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.common.geo.builders;
+
+import org.elasticsearch.common.geo.ShapesAvailability;
+import org.elasticsearch.common.inject.Inject;
+import org.elasticsearch.common.io.stream.NamedWriteableRegistry;
+
+/**
+ * Register the shape builder prototypes with the {@link NamedWriteableRegistry}
+ */
+public class ShapeBuilderRegistry {
+
+ @Inject
+ public ShapeBuilderRegistry(NamedWriteableRegistry namedWriteableRegistry) {
+ if (ShapesAvailability.JTS_AVAILABLE && ShapesAvailability.SPATIAL4J_AVAILABLE) {
+ namedWriteableRegistry.registerPrototype(ShapeBuilder.class, PointBuilder.PROTOTYPE);
+ namedWriteableRegistry.registerPrototype(ShapeBuilder.class, CircleBuilder.PROTOTYPE);
+ namedWriteableRegistry.registerPrototype(ShapeBuilder.class, EnvelopeBuilder.PROTOTYPE);
+ namedWriteableRegistry.registerPrototype(ShapeBuilder.class, MultiPointBuilder.PROTOTYPE);
+ namedWriteableRegistry.registerPrototype(ShapeBuilder.class, LineStringBuilder.PROTOTYPE);
+ namedWriteableRegistry.registerPrototype(ShapeBuilder.class, MultiLineStringBuilder.PROTOTYPE);
+ namedWriteableRegistry.registerPrototype(ShapeBuilder.class, PolygonBuilder.PROTOTYPE);
+ namedWriteableRegistry.registerPrototype(ShapeBuilder.class, MultiPolygonBuilder.PROTOTYPE);
+ namedWriteableRegistry.registerPrototype(ShapeBuilder.class, GeometryCollectionBuilder.PROTOTYPE);
+ }
+ }
+}
diff --git a/core/src/main/java/org/elasticsearch/common/geo/builders/ShapeBuilders.java b/core/src/main/java/org/elasticsearch/common/geo/builders/ShapeBuilders.java
index e294a9d6ef..61d7a9cd07 100644
--- a/core/src/main/java/org/elasticsearch/common/geo/builders/ShapeBuilders.java
+++ b/core/src/main/java/org/elasticsearch/common/geo/builders/ShapeBuilders.java
@@ -111,15 +111,6 @@ public class ShapeBuilders {
}
/**
- * Create a new GeometryCollection
- *
- * @return a new {@link GeometryCollectionBuilder}
- */
- public static GeometryCollectionBuilder newGeometryCollection(ShapeBuilder.Orientation orientation) {
- return new GeometryCollectionBuilder(orientation);
- }
-
- /**
* create a new Circle
*
* @return a new {@link CircleBuilder}
@@ -136,13 +127,4 @@ public class ShapeBuilders {
public static EnvelopeBuilder newEnvelope() {
return new EnvelopeBuilder();
}
-
- /**
- * create a new rectangle
- *
- * @return a new {@link EnvelopeBuilder}
- */
- public static EnvelopeBuilder newEnvelope(ShapeBuilder.Orientation orientation) {
- return new EnvelopeBuilder(orientation);
- }
}
diff --git a/core/src/main/java/org/elasticsearch/common/io/stream/StreamInput.java b/core/src/main/java/org/elasticsearch/common/io/stream/StreamInput.java
index 20859e2716..ffcb4201f4 100644
--- a/core/src/main/java/org/elasticsearch/common/io/stream/StreamInput.java
+++ b/core/src/main/java/org/elasticsearch/common/io/stream/StreamInput.java
@@ -33,7 +33,7 @@ import org.elasticsearch.common.Strings;
import org.elasticsearch.common.bytes.BytesArray;
import org.elasticsearch.common.bytes.BytesReference;
import org.elasticsearch.common.geo.GeoPoint;
-import org.elasticsearch.common.text.StringAndBytesText;
+import org.elasticsearch.common.geo.builders.ShapeBuilder;
import org.elasticsearch.common.text.Text;
import org.elasticsearch.index.query.QueryBuilder;
import org.elasticsearch.index.query.functionscore.ScoreFunctionBuilder;
@@ -256,13 +256,13 @@ public abstract class StreamInput extends InputStream {
if (length == -1) {
return null;
}
- return new StringAndBytesText(readBytesReference(length));
+ return new Text(readBytesReference(length));
}
public Text readText() throws IOException {
// use StringAndBytes so we can cache the string if its ever converted to it
int length = readInt();
- return new StringAndBytesText(readBytesReference(length));
+ return new Text(readBytesReference(length));
}
@Nullable
@@ -630,6 +630,13 @@ public abstract class StreamInput extends InputStream {
}
/**
+ * Reads a {@link ShapeBuilder} from the current stream
+ */
+ public ShapeBuilder readShape() throws IOException {
+ return readNamedWriteable(ShapeBuilder.class);
+ }
+
+ /**
* Reads a {@link org.elasticsearch.index.query.functionscore.ScoreFunctionBuilder} from the current stream
*/
public ScoreFunctionBuilder<?> readScoreFunction() throws IOException {
diff --git a/core/src/main/java/org/elasticsearch/common/io/stream/StreamOutput.java b/core/src/main/java/org/elasticsearch/common/io/stream/StreamOutput.java
index 5f1e7623d2..e8997b8073 100644
--- a/core/src/main/java/org/elasticsearch/common/io/stream/StreamOutput.java
+++ b/core/src/main/java/org/elasticsearch/common/io/stream/StreamOutput.java
@@ -32,6 +32,7 @@ import org.elasticsearch.Version;
import org.elasticsearch.common.Nullable;
import org.elasticsearch.common.bytes.BytesReference;
import org.elasticsearch.common.geo.GeoPoint;
+import org.elasticsearch.common.geo.builders.ShapeBuilder;
import org.elasticsearch.common.text.Text;
import org.elasticsearch.index.query.QueryBuilder;
import org.elasticsearch.index.query.functionscore.ScoreFunctionBuilder;
@@ -619,6 +620,13 @@ public abstract class StreamOutput extends OutputStream {
}
/**
+ * Writes a {@link ShapeBuilder} to the current stream
+ */
+ public void writeShape(ShapeBuilder shapeBuilder) throws IOException {
+ writeNamedWriteable(shapeBuilder);
+ }
+
+ /**
* Writes a {@link ScoreFunctionBuilder} to the current stream
*/
public void writeScoreFunction(ScoreFunctionBuilder<?> scoreFunctionBuilder) throws IOException {
diff --git a/core/src/main/java/org/elasticsearch/common/lucene/all/AllTermQuery.java b/core/src/main/java/org/elasticsearch/common/lucene/all/AllTermQuery.java
index 7191c96e33..4fe90aed9e 100644
--- a/core/src/main/java/org/elasticsearch/common/lucene/all/AllTermQuery.java
+++ b/core/src/main/java/org/elasticsearch/common/lucene/all/AllTermQuery.java
@@ -149,6 +149,10 @@ public final class AllTermQuery extends Query {
return null;
}
final TermState state = termStates.get(context.ord);
+ if (state == null) {
+ // Term does not exist in this segment
+ return null;
+ }
termsEnum.seekExact(term.bytes(), state);
PostingsEnum docs = termsEnum.postings(null, PostingsEnum.PAYLOADS);
assert docs != null;
diff --git a/core/src/main/java/org/elasticsearch/common/network/NetworkModule.java b/core/src/main/java/org/elasticsearch/common/network/NetworkModule.java
index c1f282ac23..f7eab3da2a 100644
--- a/core/src/main/java/org/elasticsearch/common/network/NetworkModule.java
+++ b/core/src/main/java/org/elasticsearch/common/network/NetworkModule.java
@@ -19,21 +19,362 @@
package org.elasticsearch.common.network;
+import org.elasticsearch.client.support.Headers;
+import org.elasticsearch.client.transport.TransportClientNodesService;
+import org.elasticsearch.client.transport.support.TransportProxyClient;
+import org.elasticsearch.cluster.node.DiscoveryNode;
import org.elasticsearch.common.inject.AbstractModule;
+import org.elasticsearch.common.io.stream.NamedWriteableRegistry;
+import org.elasticsearch.common.settings.Settings;
+import org.elasticsearch.common.util.ExtensionPoint;
+import org.elasticsearch.http.HttpServer;
+import org.elasticsearch.http.HttpServerTransport;
+import org.elasticsearch.http.netty.NettyHttpServerTransport;
+import org.elasticsearch.rest.RestController;
+import org.elasticsearch.rest.RestHandler;
+import org.elasticsearch.rest.action.admin.cluster.health.RestClusterHealthAction;
+import org.elasticsearch.rest.action.admin.cluster.node.hotthreads.RestNodesHotThreadsAction;
+import org.elasticsearch.rest.action.admin.cluster.node.info.RestNodesInfoAction;
+import org.elasticsearch.rest.action.admin.cluster.node.stats.RestNodesStatsAction;
+import org.elasticsearch.rest.action.admin.cluster.repositories.delete.RestDeleteRepositoryAction;
+import org.elasticsearch.rest.action.admin.cluster.repositories.get.RestGetRepositoriesAction;
+import org.elasticsearch.rest.action.admin.cluster.repositories.put.RestPutRepositoryAction;
+import org.elasticsearch.rest.action.admin.cluster.repositories.verify.RestVerifyRepositoryAction;
+import org.elasticsearch.rest.action.admin.cluster.reroute.RestClusterRerouteAction;
+import org.elasticsearch.rest.action.admin.cluster.settings.RestClusterGetSettingsAction;
+import org.elasticsearch.rest.action.admin.cluster.settings.RestClusterUpdateSettingsAction;
+import org.elasticsearch.rest.action.admin.cluster.shards.RestClusterSearchShardsAction;
+import org.elasticsearch.rest.action.admin.cluster.snapshots.create.RestCreateSnapshotAction;
+import org.elasticsearch.rest.action.admin.cluster.snapshots.delete.RestDeleteSnapshotAction;
+import org.elasticsearch.rest.action.admin.cluster.snapshots.get.RestGetSnapshotsAction;
+import org.elasticsearch.rest.action.admin.cluster.snapshots.restore.RestRestoreSnapshotAction;
+import org.elasticsearch.rest.action.admin.cluster.snapshots.status.RestSnapshotsStatusAction;
+import org.elasticsearch.rest.action.admin.cluster.state.RestClusterStateAction;
+import org.elasticsearch.rest.action.admin.cluster.stats.RestClusterStatsAction;
+import org.elasticsearch.rest.action.admin.cluster.tasks.RestPendingClusterTasksAction;
+import org.elasticsearch.rest.action.admin.indices.alias.RestIndicesAliasesAction;
+import org.elasticsearch.rest.action.admin.indices.alias.delete.RestIndexDeleteAliasesAction;
+import org.elasticsearch.rest.action.admin.indices.alias.get.RestGetAliasesAction;
+import org.elasticsearch.rest.action.admin.indices.alias.get.RestGetIndicesAliasesAction;
+import org.elasticsearch.rest.action.admin.indices.alias.head.RestAliasesExistAction;
+import org.elasticsearch.rest.action.admin.indices.alias.put.RestIndexPutAliasAction;
+import org.elasticsearch.rest.action.admin.indices.analyze.RestAnalyzeAction;
+import org.elasticsearch.rest.action.admin.indices.cache.clear.RestClearIndicesCacheAction;
+import org.elasticsearch.rest.action.admin.indices.close.RestCloseIndexAction;
+import org.elasticsearch.rest.action.admin.indices.create.RestCreateIndexAction;
+import org.elasticsearch.rest.action.admin.indices.delete.RestDeleteIndexAction;
+import org.elasticsearch.rest.action.admin.indices.exists.indices.RestIndicesExistsAction;
+import org.elasticsearch.rest.action.admin.indices.exists.types.RestTypesExistsAction;
+import org.elasticsearch.rest.action.admin.indices.flush.RestFlushAction;
+import org.elasticsearch.rest.action.admin.indices.flush.RestSyncedFlushAction;
+import org.elasticsearch.rest.action.admin.indices.forcemerge.RestForceMergeAction;
+import org.elasticsearch.rest.action.admin.indices.get.RestGetIndicesAction;
+import org.elasticsearch.rest.action.admin.indices.mapping.get.RestGetFieldMappingAction;
+import org.elasticsearch.rest.action.admin.indices.mapping.get.RestGetMappingAction;
+import org.elasticsearch.rest.action.admin.indices.mapping.put.RestPutMappingAction;
+import org.elasticsearch.rest.action.admin.indices.open.RestOpenIndexAction;
+import org.elasticsearch.rest.action.admin.indices.recovery.RestRecoveryAction;
+import org.elasticsearch.rest.action.admin.indices.refresh.RestRefreshAction;
+import org.elasticsearch.rest.action.admin.indices.segments.RestIndicesSegmentsAction;
+import org.elasticsearch.rest.action.admin.indices.settings.RestGetSettingsAction;
+import org.elasticsearch.rest.action.admin.indices.settings.RestUpdateSettingsAction;
+import org.elasticsearch.rest.action.admin.indices.shards.RestIndicesShardStoresAction;
+import org.elasticsearch.rest.action.admin.indices.stats.RestIndicesStatsAction;
+import org.elasticsearch.rest.action.admin.indices.template.delete.RestDeleteIndexTemplateAction;
+import org.elasticsearch.rest.action.admin.indices.template.get.RestGetIndexTemplateAction;
+import org.elasticsearch.rest.action.admin.indices.template.head.RestHeadIndexTemplateAction;
+import org.elasticsearch.rest.action.admin.indices.template.put.RestPutIndexTemplateAction;
+import org.elasticsearch.rest.action.admin.indices.upgrade.RestUpgradeAction;
+import org.elasticsearch.rest.action.admin.indices.validate.query.RestValidateQueryAction;
+import org.elasticsearch.rest.action.admin.indices.validate.template.RestRenderSearchTemplateAction;
+import org.elasticsearch.rest.action.admin.indices.warmer.delete.RestDeleteWarmerAction;
+import org.elasticsearch.rest.action.admin.indices.warmer.get.RestGetWarmerAction;
+import org.elasticsearch.rest.action.admin.indices.warmer.put.RestPutWarmerAction;
+import org.elasticsearch.rest.action.bulk.RestBulkAction;
+import org.elasticsearch.rest.action.cat.AbstractCatAction;
+import org.elasticsearch.rest.action.cat.RestAliasAction;
+import org.elasticsearch.rest.action.cat.RestAllocationAction;
+import org.elasticsearch.rest.action.cat.RestCatAction;
+import org.elasticsearch.rest.action.cat.RestFielddataAction;
+import org.elasticsearch.rest.action.cat.RestHealthAction;
+import org.elasticsearch.rest.action.cat.RestIndicesAction;
+import org.elasticsearch.rest.action.cat.RestMasterAction;
+import org.elasticsearch.rest.action.cat.RestNodeAttrsAction;
+import org.elasticsearch.rest.action.cat.RestNodesAction;
+import org.elasticsearch.rest.action.cat.RestPluginsAction;
+import org.elasticsearch.rest.action.cat.RestRepositoriesAction;
+import org.elasticsearch.rest.action.cat.RestSegmentsAction;
+import org.elasticsearch.rest.action.cat.RestShardsAction;
+import org.elasticsearch.rest.action.cat.RestSnapshotAction;
+import org.elasticsearch.rest.action.cat.RestThreadPoolAction;
+import org.elasticsearch.rest.action.delete.RestDeleteAction;
+import org.elasticsearch.rest.action.explain.RestExplainAction;
+import org.elasticsearch.rest.action.fieldstats.RestFieldStatsAction;
+import org.elasticsearch.rest.action.get.RestGetAction;
+import org.elasticsearch.rest.action.get.RestGetSourceAction;
+import org.elasticsearch.rest.action.get.RestHeadAction;
+import org.elasticsearch.rest.action.get.RestMultiGetAction;
+import org.elasticsearch.rest.action.index.RestIndexAction;
+import org.elasticsearch.rest.action.main.RestMainAction;
+import org.elasticsearch.rest.action.percolate.RestMultiPercolateAction;
+import org.elasticsearch.rest.action.percolate.RestPercolateAction;
+import org.elasticsearch.rest.action.script.RestDeleteIndexedScriptAction;
+import org.elasticsearch.rest.action.script.RestGetIndexedScriptAction;
+import org.elasticsearch.rest.action.script.RestPutIndexedScriptAction;
+import org.elasticsearch.rest.action.search.RestClearScrollAction;
+import org.elasticsearch.rest.action.search.RestMultiSearchAction;
+import org.elasticsearch.rest.action.search.RestSearchAction;
+import org.elasticsearch.rest.action.search.RestSearchScrollAction;
+import org.elasticsearch.rest.action.suggest.RestSuggestAction;
+import org.elasticsearch.rest.action.template.RestDeleteSearchTemplateAction;
+import org.elasticsearch.rest.action.template.RestGetSearchTemplateAction;
+import org.elasticsearch.rest.action.template.RestPutSearchTemplateAction;
+import org.elasticsearch.rest.action.termvectors.RestMultiTermVectorsAction;
+import org.elasticsearch.rest.action.termvectors.RestTermVectorsAction;
+import org.elasticsearch.rest.action.update.RestUpdateAction;
+import org.elasticsearch.transport.Transport;
+import org.elasticsearch.transport.TransportService;
+import org.elasticsearch.transport.local.LocalTransport;
+import org.elasticsearch.transport.netty.NettyTransport;
+
+import java.util.Arrays;
+import java.util.List;
/**
- *
+ * A module to handle registering and binding all network related classes.
*/
public class NetworkModule extends AbstractModule {
+ public static final String TRANSPORT_TYPE_KEY = "transport.type";
+ public static final String TRANSPORT_SERVICE_TYPE_KEY = "transport.service.type";
+
+ public static final String LOCAL_TRANSPORT = "local";
+ public static final String NETTY_TRANSPORT = "netty";
+
+ public static final String HTTP_TYPE_KEY = "http.type";
+ public static final String HTTP_ENABLED = "http.enabled";
+
+ private static final List<Class<? extends RestHandler>> builtinRestHandlers = Arrays.asList(
+ RestMainAction.class,
+
+ RestNodesInfoAction.class,
+ RestNodesStatsAction.class,
+ RestNodesHotThreadsAction.class,
+ RestClusterStatsAction.class,
+ RestClusterStateAction.class,
+ RestClusterHealthAction.class,
+ RestClusterUpdateSettingsAction.class,
+ RestClusterGetSettingsAction.class,
+ RestClusterRerouteAction.class,
+ RestClusterSearchShardsAction.class,
+ RestPendingClusterTasksAction.class,
+ RestPutRepositoryAction.class,
+ RestGetRepositoriesAction.class,
+ RestDeleteRepositoryAction.class,
+ RestVerifyRepositoryAction.class,
+ RestGetSnapshotsAction.class,
+ RestCreateSnapshotAction.class,
+ RestRestoreSnapshotAction.class,
+ RestDeleteSnapshotAction.class,
+ RestSnapshotsStatusAction.class,
+
+ RestIndicesExistsAction.class,
+ RestTypesExistsAction.class,
+ RestGetIndicesAction.class,
+ RestIndicesStatsAction.class,
+ RestIndicesSegmentsAction.class,
+ RestIndicesShardStoresAction.class,
+ RestGetAliasesAction.class,
+ RestAliasesExistAction.class,
+ RestIndexDeleteAliasesAction.class,
+ RestIndexPutAliasAction.class,
+ RestIndicesAliasesAction.class,
+ RestGetIndicesAliasesAction.class,
+ RestCreateIndexAction.class,
+ RestDeleteIndexAction.class,
+ RestCloseIndexAction.class,
+ RestOpenIndexAction.class,
+
+ RestUpdateSettingsAction.class,
+ RestGetSettingsAction.class,
+
+ RestAnalyzeAction.class,
+ RestGetIndexTemplateAction.class,
+ RestPutIndexTemplateAction.class,
+ RestDeleteIndexTemplateAction.class,
+ RestHeadIndexTemplateAction.class,
+
+ RestPutWarmerAction.class,
+ RestDeleteWarmerAction.class,
+ RestGetWarmerAction.class,
+
+ RestPutMappingAction.class,
+ RestGetMappingAction.class,
+ RestGetFieldMappingAction.class,
+
+ RestRefreshAction.class,
+ RestFlushAction.class,
+ RestSyncedFlushAction.class,
+ RestForceMergeAction.class,
+ RestUpgradeAction.class,
+ RestClearIndicesCacheAction.class,
+
+ RestIndexAction.class,
+ RestGetAction.class,
+ RestGetSourceAction.class,
+ RestHeadAction.class,
+ RestMultiGetAction.class,
+ RestDeleteAction.class,
+ org.elasticsearch.rest.action.count.RestCountAction.class,
+ RestSuggestAction.class,
+ RestTermVectorsAction.class,
+ RestMultiTermVectorsAction.class,
+ RestBulkAction.class,
+ RestUpdateAction.class,
+ RestPercolateAction.class,
+ RestMultiPercolateAction.class,
+
+ RestSearchAction.class,
+ RestSearchScrollAction.class,
+ RestClearScrollAction.class,
+ RestMultiSearchAction.class,
+ RestRenderSearchTemplateAction.class,
+
+ RestValidateQueryAction.class,
+
+ RestExplainAction.class,
+
+ RestRecoveryAction.class,
+
+ // Templates API
+ RestGetSearchTemplateAction.class,
+ RestPutSearchTemplateAction.class,
+ RestDeleteSearchTemplateAction.class,
+
+ // Scripts API
+ RestGetIndexedScriptAction.class,
+ RestPutIndexedScriptAction.class,
+ RestDeleteIndexedScriptAction.class,
+
+ RestFieldStatsAction.class,
+
+ // no abstract cat action
+ RestCatAction.class
+ );
+
+ private static final List<Class<? extends AbstractCatAction>> builtinCatHandlers = Arrays.asList(
+ RestAllocationAction.class,
+ RestShardsAction.class,
+ RestMasterAction.class,
+ RestNodesAction.class,
+ RestIndicesAction.class,
+ RestSegmentsAction.class,
+ // Fully qualified to prevent interference with rest.action.count.RestCountAction
+ org.elasticsearch.rest.action.cat.RestCountAction.class,
+ // Fully qualified to prevent interference with rest.action.indices.RestRecoveryAction
+ org.elasticsearch.rest.action.cat.RestRecoveryAction.class,
+ RestHealthAction.class,
+ org.elasticsearch.rest.action.cat.RestPendingClusterTasksAction.class,
+ RestAliasAction.class,
+ RestThreadPoolAction.class,
+ RestPluginsAction.class,
+ RestFielddataAction.class,
+ RestNodeAttrsAction.class,
+ RestRepositoriesAction.class,
+ RestSnapshotAction.class
+ );
+
private final NetworkService networkService;
+ private final Settings settings;
+ private final boolean transportClient;
- public NetworkModule(NetworkService networkService) {
+ private final ExtensionPoint.SelectedType<TransportService> transportServiceTypes = new ExtensionPoint.SelectedType<>("transport_service", TransportService.class);
+ private final ExtensionPoint.SelectedType<Transport> transportTypes = new ExtensionPoint.SelectedType<>("transport", Transport.class);
+ private final ExtensionPoint.SelectedType<HttpServerTransport> httpTransportTypes = new ExtensionPoint.SelectedType<>("http_transport", HttpServerTransport.class);
+ private final ExtensionPoint.ClassSet<RestHandler> restHandlers = new ExtensionPoint.ClassSet<>("rest_handler", RestHandler.class);
+ // we must separate the cat rest handlers so RestCatAction can collect them...
+ private final ExtensionPoint.ClassSet<AbstractCatAction> catHandlers = new ExtensionPoint.ClassSet<>("cat_handler", AbstractCatAction.class);
+
+ /**
+ * Creates a network module that custom networking classes can be plugged into.
+ *
+ * @param networkService A constructed network service object to bind.
+ * @param settings The settings for the node
+ * @param transportClient True if only transport classes should be allowed to be registered, false otherwise.
+ */
+ public NetworkModule(NetworkService networkService, Settings settings, boolean transportClient) {
this.networkService = networkService;
+ this.settings = settings;
+ this.transportClient = transportClient;
+ registerTransportService(NETTY_TRANSPORT, TransportService.class);
+ registerTransport(LOCAL_TRANSPORT, LocalTransport.class);
+ registerTransport(NETTY_TRANSPORT, NettyTransport.class);
+
+ if (transportClient == false) {
+ registerHttpTransport(NETTY_TRANSPORT, NettyHttpServerTransport.class);
+
+ for (Class<? extends AbstractCatAction> catAction : builtinCatHandlers) {
+ catHandlers.registerExtension(catAction);
+ }
+ for (Class<? extends RestHandler> restAction : builtinRestHandlers) {
+ restHandlers.registerExtension(restAction);
+ }
+ }
+ }
+
+ /** Adds a transport service implementation that can be selected by setting {@link #TRANSPORT_SERVICE_TYPE_KEY}. */
+ public void registerTransportService(String name, Class<? extends TransportService> clazz) {
+ transportServiceTypes.registerExtension(name, clazz);
+ }
+
+ /** Adds a transport implementation that can be selected by setting {@link #TRANSPORT_TYPE_KEY}. */
+ public void registerTransport(String name, Class<? extends Transport> clazz) {
+ transportTypes.registerExtension(name, clazz);
+ }
+
+ /** Adds an http transport implementation that can be selected by setting {@link #HTTP_TYPE_KEY}. */
+ // TODO: we need another name than "http transport"....so confusing with transportClient...
+ public void registerHttpTransport(String name, Class<? extends HttpServerTransport> clazz) {
+ if (transportClient) {
+ throw new IllegalArgumentException("Cannot register http transport " + clazz.getName() + " for transport client");
+ }
+ httpTransportTypes.registerExtension(name, clazz);
+ }
+
+ /** Adds an additional rest action. */
+ // TODO: change this further to eliminate the middle man, ie RestController, and just register method and path here
+ public void registerRestHandler(Class<? extends RestHandler> clazz) {
+ if (transportClient) {
+ throw new IllegalArgumentException("Cannot register rest handler " + clazz.getName() + " for transport client");
+ }
+ if (AbstractCatAction.class.isAssignableFrom(clazz)) {
+ catHandlers.registerExtension(clazz.asSubclass(AbstractCatAction.class));
+ } else {
+ restHandlers.registerExtension(clazz);
+ }
}
@Override
protected void configure() {
bind(NetworkService.class).toInstance(networkService);
+ bind(NamedWriteableRegistry.class).asEagerSingleton();
+
+ transportServiceTypes.bindType(binder(), settings, TRANSPORT_SERVICE_TYPE_KEY, NETTY_TRANSPORT);
+ String defaultTransport = DiscoveryNode.localNode(settings) ? LOCAL_TRANSPORT : NETTY_TRANSPORT;
+ transportTypes.bindType(binder(), settings, TRANSPORT_TYPE_KEY, defaultTransport);
+
+ if (transportClient) {
+ bind(Headers.class).asEagerSingleton();
+ bind(TransportProxyClient.class).asEagerSingleton();
+ bind(TransportClientNodesService.class).asEagerSingleton();
+ } else {
+ if (settings.getAsBoolean(HTTP_ENABLED, true)) {
+ bind(HttpServer.class).asEagerSingleton();
+ httpTransportTypes.bindType(binder(), settings, HTTP_TYPE_KEY, NETTY_TRANSPORT);
+ }
+ bind(RestController.class).asEagerSingleton();
+ catHandlers.bind(binder());
+ restHandlers.bind(binder());
+ }
}
}
diff --git a/core/src/main/java/org/elasticsearch/common/settings/AbstractScopedSettings.java b/core/src/main/java/org/elasticsearch/common/settings/AbstractScopedSettings.java
new file mode 100644
index 0000000000..13743cabcf
--- /dev/null
+++ b/core/src/main/java/org/elasticsearch/common/settings/AbstractScopedSettings.java
@@ -0,0 +1,252 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.common.settings;
+
+import org.elasticsearch.ExceptionsHelper;
+import org.elasticsearch.common.component.AbstractComponent;
+
+import java.util.*;
+import java.util.function.BiConsumer;
+import java.util.function.Consumer;
+
+/**
+ * A basic setting service that can be used for per-index and per-cluster settings.
+ * This service offers transactional application of updates settings.
+ */
+public abstract class AbstractScopedSettings extends AbstractComponent {
+ private Settings lastSettingsApplied = Settings.EMPTY;
+ private final List<SettingUpdater> settingUpdaters = new ArrayList<>();
+ private final Map<String, Setting<?>> complexMatchers = new HashMap<>();
+ private final Map<String, Setting<?>> keySettings = new HashMap<>();
+ private final Setting.Scope scope;
+
+ protected AbstractScopedSettings(Settings settings, Set<Setting<?>> settingsSet, Setting.Scope scope) {
+ super(settings);
+ for (Setting<?> entry : settingsSet) {
+ if (entry.getScope() != scope) {
+ throw new IllegalArgumentException("Setting must be a cluster setting but was: " + entry.getScope());
+ }
+ if (entry.hasComplexMatcher()) {
+ complexMatchers.put(entry.getKey(), entry);
+ } else {
+ keySettings.put(entry.getKey(), entry);
+ }
+ }
+ this.scope = scope;
+ }
+
+ public Setting.Scope getScope() {
+ return this.scope;
+ }
+
+ /**
+ * Applies the given settings to all listeners and rolls back the result after application. This
+ * method will not change any settings but will fail if any of the settings can't be applied.
+ */
+ public synchronized Settings dryRun(Settings settings) {
+ final Settings current = Settings.builder().put(this.settings).put(settings).build();
+ final Settings previous = Settings.builder().put(this.settings).put(this.lastSettingsApplied).build();
+ List<RuntimeException> exceptions = new ArrayList<>();
+ for (SettingUpdater settingUpdater : settingUpdaters) {
+ try {
+ if (settingUpdater.hasChanged(current, previous)) {
+ settingUpdater.getValue(current, previous);
+ }
+ } catch (RuntimeException ex) {
+ exceptions.add(ex);
+ logger.debug("failed to prepareCommit settings for [{}]", ex, settingUpdater);
+ }
+ }
+ // here we are exhaustive and record all settings that failed.
+ ExceptionsHelper.rethrowAndSuppress(exceptions);
+ return current;
+ }
+
+ /**
+ * Applies the given settings to all the settings consumers or to none of them. The settings
+ * will be merged with the node settings before they are applied while given settings override existing node
+ * settings.
+ * @param newSettings the settings to apply
+ * @return the unmerged applied settings
+ */
+ public synchronized Settings applySettings(Settings newSettings) {
+ if (lastSettingsApplied != null && newSettings.equals(lastSettingsApplied)) {
+ // nothing changed in the settings, ignore
+ return newSettings;
+ }
+ final Settings current = Settings.builder().put(this.settings).put(newSettings).build();
+ final Settings previous = Settings.builder().put(this.settings).put(this.lastSettingsApplied).build();
+ try {
+ List<Runnable> applyRunnables = new ArrayList<>();
+ for (SettingUpdater settingUpdater : settingUpdaters) {
+ try {
+ applyRunnables.add(settingUpdater.updater(current, previous));
+ } catch (Exception ex) {
+ logger.warn("failed to prepareCommit settings for [{}]", ex, settingUpdater);
+ throw ex;
+ }
+ }
+ for (Runnable settingUpdater : applyRunnables) {
+ settingUpdater.run();
+ }
+ } catch (Exception ex) {
+ logger.warn("failed to apply settings", ex);
+ throw ex;
+ } finally {
+ }
+ return lastSettingsApplied = newSettings;
+ }
+
+ /**
+ * Adds a settings consumer with a predicate that is only evaluated at update time.
+ * <p>
+ * Note: Only settings registered in {@link SettingsModule} can be changed dynamically.
+ * </p>
+ * @param validator an additional validator that is only applied to updates of this setting.
+ * This is useful to add additional validation to settings at runtime compared to at startup time.
+ */
+ public synchronized <T> void addSettingsUpdateConsumer(Setting<T> setting, Consumer<T> consumer, Consumer<T> validator) {
+ if (setting != get(setting.getKey())) {
+ throw new IllegalArgumentException("Setting is not registered for key [" + setting.getKey() + "]");
+ }
+ this.settingUpdaters.add(setting.newUpdater(consumer, logger, validator));
+ }
+
+ /**
+ * Adds a settings consumer that accepts the values for two settings. The consumer if only notified if one or both settings change.
+ * <p>
+ * Note: Only settings registered in {@link SettingsModule} can be changed dynamically.
+ * </p>
+ * This method registers a compound updater that is useful if two settings are depending on each other. The consumer is always provided
+ * with both values even if only one of the two changes.
+ */
+ public synchronized <A, B> void addSettingsUpdateConsumer(Setting<A> a, Setting<B> b, BiConsumer<A, B> consumer) {
+ if (a != get(a.getKey())) {
+ throw new IllegalArgumentException("Setting is not registered for key [" + a.getKey() + "]");
+ }
+ if (b != get(b.getKey())) {
+ throw new IllegalArgumentException("Setting is not registered for key [" + b.getKey() + "]");
+ }
+ this.settingUpdaters.add(Setting.compoundUpdater(consumer, a, b, logger));
+ }
+
+ /**
+ * Adds a settings consumer.
+ * <p>
+ * Note: Only settings registered in {@link org.elasticsearch.cluster.ClusterModule} can be changed dynamically.
+ * </p>
+ */
+ public synchronized <T> void addSettingsUpdateConsumer(Setting<T> setting, Consumer<T> consumer) {
+ addSettingsUpdateConsumer(setting, consumer, (s) -> {});
+ }
+
+ /**
+ * Transactional interface to update settings.
+ * @see Setting
+ */
+ public interface SettingUpdater<T> {
+
+ /**
+ * Returns true if this updaters setting has changed with the current update
+ * @param current the current settings
+ * @param previous the previous setting
+ * @return true if this updaters setting has changed with the current update
+ */
+ boolean hasChanged(Settings current, Settings previous);
+
+ /**
+ * Returns the instance value for the current settings. This method is stateless and idempotent.
+ * This method will throw an exception if the source of this value is invalid.
+ */
+ T getValue(Settings current, Settings previous);
+
+ /**
+ * Applies the given value to the updater. This methods will actually run the update.
+ */
+ void apply(T value, Settings current, Settings previous);
+
+ /**
+ * Updates this updaters value if it has changed.
+ * @return <code>true</code> iff the value has been updated.
+ */
+ default boolean apply(Settings current, Settings previous) {
+ if (hasChanged(current, previous)) {
+ T value = getValue(current, previous);
+ apply(value, current, previous);
+ return true;
+ }
+ return false;
+ }
+
+ /**
+ * Returns a callable runnable that calls {@link #apply(Object, Settings, Settings)} if the settings
+ * actually changed. This allows to defer the update to a later point in time while keeping type safety.
+ * If the value didn't change the returned runnable is a noop.
+ */
+ default Runnable updater(Settings current, Settings previous) {
+ if (hasChanged(current, previous)) {
+ T value = getValue(current, previous);
+ return () -> { apply(value, current, previous);};
+ }
+ return () -> {};
+ }
+ }
+
+ /**
+ * Returns the {@link Setting} for the given key or <code>null</code> if the setting can not be found.
+ */
+ public Setting get(String key) {
+ Setting<?> setting = keySettings.get(key);
+ if (setting == null) {
+ for (Map.Entry<String, Setting<?>> entry : complexMatchers.entrySet()) {
+ if (entry.getValue().match(key)) {
+ return entry.getValue();
+ }
+ }
+ } else {
+ return setting;
+ }
+ return null;
+ }
+
+ /**
+ * Returns <code>true</code> if the setting for the given key is dynamically updateable. Otherwise <code>false</code>.
+ */
+ public boolean hasDynamicSetting(String key) {
+ final Setting setting = get(key);
+ return setting != null && setting.isDynamic();
+ }
+
+ /**
+ * Returns a settings object that contains all settings that are not
+ * already set in the given source. The diff contains either the default value for each
+ * setting or the settings value in the given default settings.
+ */
+ public Settings diff(Settings source, Settings defaultSettings) {
+ Settings.Builder builder = Settings.builder();
+ for (Setting<?> setting : keySettings.values()) {
+ if (setting.exists(source) == false) {
+ builder.put(setting.getKey(), setting.getRaw(defaultSettings));
+ }
+ }
+ return builder.build();
+ }
+
+}
diff --git a/core/src/main/java/org/elasticsearch/common/settings/ClusterSettings.java b/core/src/main/java/org/elasticsearch/common/settings/ClusterSettings.java
new file mode 100644
index 0000000000..ac9631d29b
--- /dev/null
+++ b/core/src/main/java/org/elasticsearch/common/settings/ClusterSettings.java
@@ -0,0 +1,141 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.elasticsearch.common.settings;
+
+import org.elasticsearch.action.admin.indices.close.TransportCloseIndexAction;
+import org.elasticsearch.action.support.DestructiveOperations;
+import org.elasticsearch.cluster.InternalClusterInfoService;
+import org.elasticsearch.cluster.action.index.MappingUpdatedAction;
+import org.elasticsearch.cluster.metadata.MetaData;
+import org.elasticsearch.cluster.routing.allocation.allocator.BalancedShardsAllocator;
+import org.elasticsearch.cluster.routing.allocation.decider.*;
+import org.elasticsearch.cluster.service.InternalClusterService;
+import org.elasticsearch.common.logging.ESLoggerFactory;
+import org.elasticsearch.discovery.DiscoverySettings;
+import org.elasticsearch.discovery.zen.ZenDiscovery;
+import org.elasticsearch.discovery.zen.elect.ElectMasterService;
+import org.elasticsearch.index.store.IndexStoreConfig;
+import org.elasticsearch.indices.breaker.HierarchyCircuitBreakerService;
+import org.elasticsearch.indices.recovery.RecoverySettings;
+import org.elasticsearch.indices.ttl.IndicesTTLService;
+import org.elasticsearch.search.SearchService;
+import org.elasticsearch.threadpool.ThreadPool;
+import org.elasticsearch.transport.Transport;
+import org.elasticsearch.transport.TransportService;
+
+import java.util.*;
+
+/**
+ * Encapsulates all valid cluster level settings.
+ */
+public final class ClusterSettings extends AbstractScopedSettings {
+
+ public ClusterSettings(Settings settings, Set<Setting<?>> settingsSet) {
+ super(settings, settingsSet, Setting.Scope.CLUSTER);
+ }
+
+
+ @Override
+ public synchronized Settings applySettings(Settings newSettings) {
+ Settings settings = super.applySettings(newSettings);
+ try {
+ for (Map.Entry<String, String> entry : settings.getAsMap().entrySet()) {
+ if (entry.getKey().startsWith("logger.")) {
+ String component = entry.getKey().substring("logger.".length());
+ if ("_root".equals(component)) {
+ ESLoggerFactory.getRootLogger().setLevel(entry.getValue());
+ } else {
+ ESLoggerFactory.getLogger(component).setLevel(entry.getValue());
+ }
+ }
+ }
+ } catch (Exception e) {
+ logger.warn("failed to refresh settings for [{}]", e, "logger");
+ }
+ return settings;
+ }
+
+ /**
+ * Returns <code>true</code> if the settings is a logger setting.
+ */
+ public boolean isLoggerSetting(String key) {
+ return key.startsWith("logger.");
+ }
+
+
+ public static Set<Setting<?>> BUILT_IN_CLUSTER_SETTINGS = Collections.unmodifiableSet(new HashSet<>(Arrays.asList(AwarenessAllocationDecider.CLUSTER_ROUTING_ALLOCATION_AWARENESS_ATTRIBUTE_SETTING,
+ AwarenessAllocationDecider.CLUSTER_ROUTING_ALLOCATION_AWARENESS_FORCE_GROUP_SETTING,
+ BalancedShardsAllocator.INDEX_BALANCE_FACTOR_SETTING,
+ BalancedShardsAllocator.SHARD_BALANCE_FACTOR_SETTING,
+ BalancedShardsAllocator.THRESHOLD_SETTING,
+ ClusterRebalanceAllocationDecider.CLUSTER_ROUTING_ALLOCATION_ALLOW_REBALANCE_SETTING,
+ ConcurrentRebalanceAllocationDecider.CLUSTER_ROUTING_ALLOCATION_CLUSTER_CONCURRENT_REBALANCE_SETTING,
+ EnableAllocationDecider.CLUSTER_ROUTING_ALLOCATION_ENABLE_SETTING,
+ EnableAllocationDecider.CLUSTER_ROUTING_REBALANCE_ENABLE_SETTING,
+ ZenDiscovery.REJOIN_ON_MASTER_GONE_SETTING,
+ FilterAllocationDecider.CLUSTER_ROUTING_INCLUDE_GROUP_SETTING,
+ FilterAllocationDecider.CLUSTER_ROUTING_EXCLUDE_GROUP_SETTING,
+ FilterAllocationDecider.CLUSTER_ROUTING_REQUIRE_GROUP_SETTING,
+ IndexStoreConfig.INDICES_STORE_THROTTLE_TYPE_SETTING,
+ IndexStoreConfig.INDICES_STORE_THROTTLE_MAX_BYTES_PER_SEC_SETTING,
+ IndicesTTLService.INDICES_TTL_INTERVAL_SETTING,
+ MappingUpdatedAction.INDICES_MAPPING_DYNAMIC_TIMEOUT_SETTING,
+ MetaData.SETTING_READ_ONLY_SETTING,
+ RecoverySettings.INDICES_RECOVERY_CONCURRENT_STREAMS_SETTING,
+ RecoverySettings.INDICES_RECOVERY_CONCURRENT_SMALL_FILE_STREAMS_SETTING,
+ RecoverySettings.INDICES_RECOVERY_MAX_BYTES_PER_SEC_SETTING,
+ RecoverySettings.INDICES_RECOVERY_RETRY_DELAY_STATE_SYNC_SETTING,
+ RecoverySettings.INDICES_RECOVERY_RETRY_DELAY_NETWORK_SETTING,
+ RecoverySettings.INDICES_RECOVERY_ACTIVITY_TIMEOUT_SETTING,
+ RecoverySettings.INDICES_RECOVERY_INTERNAL_ACTION_TIMEOUT_SETTING,
+ RecoverySettings.INDICES_RECOVERY_INTERNAL_LONG_ACTION_TIMEOUT_SETTING,
+ ThreadPool.THREADPOOL_GROUP_SETTING,
+ ThrottlingAllocationDecider.CLUSTER_ROUTING_ALLOCATION_NODE_INITIAL_PRIMARIES_RECOVERIES_SETTING,
+ ThrottlingAllocationDecider.CLUSTER_ROUTING_ALLOCATION_NODE_CONCURRENT_RECOVERIES_SETTING,
+ DiskThresholdDecider.CLUSTER_ROUTING_ALLOCATION_LOW_DISK_WATERMARK_SETTING,
+ DiskThresholdDecider.CLUSTER_ROUTING_ALLOCATION_HIGH_DISK_WATERMARK_SETTING,
+ DiskThresholdDecider.CLUSTER_ROUTING_ALLOCATION_DISK_THRESHOLD_ENABLED_SETTING,
+ DiskThresholdDecider.CLUSTER_ROUTING_ALLOCATION_INCLUDE_RELOCATIONS_SETTING,
+ DiskThresholdDecider.CLUSTER_ROUTING_ALLOCATION_REROUTE_INTERVAL_SETTING,
+ InternalClusterInfoService.INTERNAL_CLUSTER_INFO_UPDATE_INTERVAL_SETTING,
+ InternalClusterInfoService.INTERNAL_CLUSTER_INFO_TIMEOUT_SETTING,
+ SnapshotInProgressAllocationDecider.CLUSTER_ROUTING_ALLOCATION_SNAPSHOT_RELOCATION_ENABLED_SETTING,
+ DestructiveOperations.REQUIRES_NAME_SETTING,
+ DiscoverySettings.PUBLISH_TIMEOUT_SETTING,
+ DiscoverySettings.PUBLISH_DIFF_ENABLE_SETTING,
+ DiscoverySettings.COMMIT_TIMEOUT_SETTING,
+ DiscoverySettings.NO_MASTER_BLOCK_SETTING,
+ HierarchyCircuitBreakerService.TOTAL_CIRCUIT_BREAKER_LIMIT_SETTING,
+ HierarchyCircuitBreakerService.FIELDDATA_CIRCUIT_BREAKER_LIMIT_SETTING,
+ HierarchyCircuitBreakerService.FIELDDATA_CIRCUIT_BREAKER_OVERHEAD_SETTING,
+ HierarchyCircuitBreakerService.REQUEST_CIRCUIT_BREAKER_LIMIT_SETTING,
+ HierarchyCircuitBreakerService.REQUEST_CIRCUIT_BREAKER_OVERHEAD_SETTING,
+ InternalClusterService.CLUSTER_SERVICE_SLOW_TASK_LOGGING_THRESHOLD_SETTING,
+ SearchService.DEFAULT_SEARCH_TIMEOUT_SETTING,
+ ElectMasterService.DISCOVERY_ZEN_MINIMUM_MASTER_NODES_SETTING,
+ TransportService.TRACE_LOG_EXCLUDE_SETTING,
+ TransportService.TRACE_LOG_INCLUDE_SETTING,
+ TransportCloseIndexAction.CLUSTER_INDICES_CLOSE_ENABLE_SETTING,
+ ShardsLimitAllocationDecider.CLUSTER_TOTAL_SHARDS_PER_NODE_SETTING,
+ InternalClusterService.CLUSTER_SERVICE_RECONNECT_INTERVAL_SETTING,
+ HierarchyCircuitBreakerService.FIELDDATA_CIRCUIT_BREAKER_TYPE_SETTING,
+ HierarchyCircuitBreakerService.REQUEST_CIRCUIT_BREAKER_TYPE_SETTING,
+ Transport.TRANSPORT_PROFILES_SETTING,
+ Transport.TRANSPORT_TCP_COMPRESS)));
+}
diff --git a/core/src/main/java/org/elasticsearch/common/settings/Setting.java b/core/src/main/java/org/elasticsearch/common/settings/Setting.java
new file mode 100644
index 0000000000..236df5c567
--- /dev/null
+++ b/core/src/main/java/org/elasticsearch/common/settings/Setting.java
@@ -0,0 +1,461 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.elasticsearch.common.settings;
+
+import org.elasticsearch.ElasticsearchException;
+import org.elasticsearch.ElasticsearchParseException;
+import org.elasticsearch.action.support.ToXContentToBytes;
+import org.elasticsearch.common.Booleans;
+import org.elasticsearch.common.Strings;
+import org.elasticsearch.common.collect.Tuple;
+import org.elasticsearch.common.logging.ESLogger;
+import org.elasticsearch.common.regex.Regex;
+import org.elasticsearch.common.unit.ByteSizeValue;
+import org.elasticsearch.common.unit.MemorySizeValue;
+import org.elasticsearch.common.unit.TimeValue;
+import org.elasticsearch.common.xcontent.*;
+
+import java.io.IOException;
+import java.util.ArrayList;
+import java.util.List;
+import java.util.function.BiConsumer;
+import java.util.function.Consumer;
+import java.util.function.Function;
+import java.util.regex.Pattern;
+
+/**
+ */
+public class Setting<T> extends ToXContentToBytes {
+ private final String key;
+ protected final Function<Settings, String> defaultValue;
+ private final Function<String, T> parser;
+ private final boolean dynamic;
+ private final Scope scope;
+
+ /**
+ * Creates a new Setting instance
+ * @param key the settings key for this setting.
+ * @param defaultValue a default value function that returns the default values string representation.
+ * @param parser a parser that parses the string rep into a complex datatype.
+ * @param dynamic true iff this setting can be dynamically updateable
+ * @param scope the scope of this setting
+ */
+ public Setting(String key, Function<Settings, String> defaultValue, Function<String, T> parser, boolean dynamic, Scope scope) {
+ assert parser.apply(defaultValue.apply(Settings.EMPTY)) != null || this.isGroupSetting(): "parser returned null";
+ this.key = key;
+ this.defaultValue = defaultValue;
+ this.parser = parser;
+ this.dynamic = dynamic;
+ this.scope = scope;
+ }
+
+ /**
+ * Returns the settings key or a prefix if this setting is a group setting
+ * @see #isGroupSetting()
+ */
+ public final String getKey() {
+ return key;
+ }
+
+ /**
+ * Returns <code>true</code> iff this setting is dynamically updateable, otherwise <code>false</code>
+ */
+ public final boolean isDynamic() {
+ return dynamic;
+ }
+
+ /**
+ * Returns the settings scope
+ */
+ public final Scope getScope() {
+ return scope;
+ }
+
+ /**
+ * Returns <code>true</code> iff this setting is a group setting. Group settings represent a set of settings
+ * rather than a single value. The key, see {@link #getKey()}, in contrast to non-group settings is a prefix like <tt>cluster.store.</tt>
+ * that matches all settings with this prefix.
+ */
+ boolean isGroupSetting() {
+ return false;
+ }
+
+ boolean hasComplexMatcher() {
+ return isGroupSetting();
+ }
+
+ /**
+ * Returns the default values string representation for this setting.
+ * @param settings a settings object for settings that has a default value depending on another setting if available
+ */
+ public final String getDefault(Settings settings) {
+ return defaultValue.apply(settings);
+ }
+
+ /**
+ * Returns <code>true</code> iff this setting is present in the given settings object. Otherwise <code>false</code>
+ */
+ public final boolean exists(Settings settings) {
+ return settings.get(key) != null;
+ }
+
+ /**
+ * Returns the settings value. If the setting is not present in the given settings object the default value is returned
+ * instead.
+ */
+ public T get(Settings settings) {
+ String value = getRaw(settings);
+ try {
+ return parser.apply(value);
+ } catch (ElasticsearchParseException ex) {
+ throw new IllegalArgumentException(ex.getMessage(), ex);
+ } catch (NumberFormatException ex) {
+ throw new IllegalArgumentException("Failed to parse value [" + value + "] for setting [" + getKey() + "]", ex);
+ } catch (IllegalArgumentException ex) {
+ throw ex;
+ } catch (Exception t) {
+ throw new IllegalArgumentException("Failed to parse value [" + value + "] for setting [" + getKey() + "]", t);
+ }
+ }
+
+ /**
+ * Returns the raw (string) settings value. If the setting is not present in the given settings object the default value is returned
+ * instead. This is useful if the value can't be parsed due to an invalid value to access the actual value.
+ */
+ public String getRaw(Settings settings) {
+ return settings.get(key, defaultValue.apply(settings));
+ }
+
+ /**
+ * Returns <code>true</code> iff the given key matches the settings key or if this setting is a group setting if the
+ * given key is part of the settings group.
+ * @see #isGroupSetting()
+ */
+ public boolean match(String toTest) {
+ return key.equals(toTest);
+ }
+
+ @Override
+ public final XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException {
+ builder.startObject();
+ builder.field("key", key);
+ builder.field("type", scope.name());
+ builder.field("dynamic", dynamic);
+ builder.field("is_group_setting", isGroupSetting());
+ builder.field("default", defaultValue.apply(Settings.EMPTY));
+ builder.endObject();
+ return builder;
+ }
+
+ /**
+ * The settings scope - settings can either be cluster settings or per index settings.
+ */
+ public enum Scope {
+ CLUSTER,
+ INDEX;
+ }
+
+ final AbstractScopedSettings.SettingUpdater newUpdater(Consumer<T> consumer, ESLogger logger) {
+ return newUpdater(consumer, logger, (s) -> {});
+ }
+
+ AbstractScopedSettings.SettingUpdater newUpdater(Consumer<T> consumer, ESLogger logger, Consumer<T> validator) {
+ if (isDynamic()) {
+ return new Updater(consumer, logger, validator);
+ } else {
+ throw new IllegalStateException("setting [" + getKey() + "] is not dynamic");
+ }
+ }
+
+ /**
+ * this is used for settings that depend on each other... see {@link org.elasticsearch.common.settings.AbstractScopedSettings#addSettingsUpdateConsumer(Setting, Setting, BiConsumer)} and it's
+ * usage for details.
+ */
+ static <A, B> AbstractScopedSettings.SettingUpdater<Tuple<A, B>> compoundUpdater(final BiConsumer<A,B> consumer, final Setting<A> aSettting, final Setting<B> bSetting, ESLogger logger) {
+ final AbstractScopedSettings.SettingUpdater<A> aSettingUpdater = aSettting.newUpdater(null, logger);
+ final AbstractScopedSettings.SettingUpdater<B> bSettingUpdater = bSetting.newUpdater(null, logger);
+ return new AbstractScopedSettings.SettingUpdater<Tuple<A, B>>() {
+ @Override
+ public boolean hasChanged(Settings current, Settings previous) {
+ return aSettingUpdater.hasChanged(current, previous) || bSettingUpdater.hasChanged(current, previous);
+ }
+
+ @Override
+ public Tuple<A, B> getValue(Settings current, Settings previous) {
+ return new Tuple<>(aSettingUpdater.getValue(current, previous), bSettingUpdater.getValue(current, previous));
+ }
+
+ @Override
+ public void apply(Tuple<A, B> value, Settings current, Settings previous) {
+ consumer.accept(value.v1(), value.v2());
+ }
+
+ @Override
+ public String toString() {
+ return "CompoundUpdater for: " + aSettingUpdater + " and " + bSettingUpdater;
+ }
+ };
+ }
+
+
+ private class Updater implements AbstractScopedSettings.SettingUpdater<T> {
+ private final Consumer<T> consumer;
+ private final ESLogger logger;
+ private final Consumer<T> accept;
+
+ public Updater(Consumer<T> consumer, ESLogger logger, Consumer<T> accept) {
+ this.consumer = consumer;
+ this.logger = logger;
+ this.accept = accept;
+ }
+
+ @Override
+ public String toString() {
+ return "Updater for: " + Setting.this.toString();
+ }
+
+ @Override
+ public boolean hasChanged(Settings current, Settings previous) {
+ final String newValue = getRaw(current);
+ final String value = getRaw(previous);
+ assert isGroupSetting() == false : "group settings must override this method";
+ assert value != null : "value was null but can't be unless default is null which is invalid";
+
+ return value.equals(newValue) == false;
+ }
+
+ @Override
+ public T getValue(Settings current, Settings previous) {
+ final String newValue = getRaw(current);
+ final String value = getRaw(previous);
+ T inst = get(current);
+ try {
+ accept.accept(inst);
+ } catch (Exception | AssertionError e) {
+ throw new IllegalArgumentException("illegal value can't update [" + key + "] from [" + value + "] to [" + newValue + "]", e);
+ }
+ return inst;
+ }
+
+ @Override
+ public void apply(T value, Settings current, Settings previous) {
+ logger.info("update [{}] from [{}] to [{}]", key, getRaw(previous), getRaw(current));
+ consumer.accept(value);
+ }
+ }
+
+
+ public Setting(String key, String defaultValue, Function<String, T> parser, boolean dynamic, Scope scope) {
+ this(key, (s) -> defaultValue, parser, dynamic, scope);
+ }
+
+ public static Setting<Float> floatSetting(String key, float defaultValue, boolean dynamic, Scope scope) {
+ return new Setting<>(key, (s) -> Float.toString(defaultValue), Float::parseFloat, dynamic, scope);
+ }
+
+ public static Setting<Float> floatSetting(String key, float defaultValue, float minValue, boolean dynamic, Scope scope) {
+ return new Setting<>(key, (s) -> Float.toString(defaultValue), (s) -> {
+ float value = Float.parseFloat(s);
+ if (value < minValue) {
+ throw new IllegalArgumentException("Failed to parse value [" + s + "] for setting [" + key + "] must be >= " + minValue);
+ }
+ return value;
+ }, dynamic, scope);
+ }
+
+ public static Setting<Integer> intSetting(String key, int defaultValue, int minValue, boolean dynamic, Scope scope) {
+ return new Setting<>(key, (s) -> Integer.toString(defaultValue), (s) -> parseInt(s, minValue, key), dynamic, scope);
+ }
+
+ public static int parseInt(String s, int minValue, String key) {
+ int value = Integer.parseInt(s);
+ if (value < minValue) {
+ throw new IllegalArgumentException("Failed to parse value [" + s + "] for setting [" + key + "] must be >= " + minValue);
+ }
+ return value;
+ }
+
+ public static Setting<Integer> intSetting(String key, int defaultValue, boolean dynamic, Scope scope) {
+ return intSetting(key, defaultValue, Integer.MIN_VALUE, dynamic, scope);
+ }
+
+ public static Setting<Boolean> boolSetting(String key, boolean defaultValue, boolean dynamic, Scope scope) {
+ return new Setting<>(key, (s) -> Boolean.toString(defaultValue), Booleans::parseBooleanExact, dynamic, scope);
+ }
+
+ public static Setting<ByteSizeValue> byteSizeSetting(String key, String percentage, boolean dynamic, Scope scope) {
+ return new Setting<>(key, (s) -> percentage, (s) -> MemorySizeValue.parseBytesSizeValueOrHeapRatio(s, key), dynamic, scope);
+ }
+
+ public static Setting<ByteSizeValue> byteSizeSetting(String key, ByteSizeValue value, boolean dynamic, Scope scope) {
+ return new Setting<>(key, (s) -> value.toString(), (s) -> ByteSizeValue.parseBytesSizeValue(s, key), dynamic, scope);
+ }
+
+ public static Setting<TimeValue> positiveTimeSetting(String key, TimeValue defaultValue, boolean dynamic, Scope scope) {
+ return timeSetting(key, defaultValue, TimeValue.timeValueMillis(0), dynamic, scope);
+ }
+
+ public static <T> Setting<List<T>> listSetting(String key, List<String> defaultStringValue, Function<String, T> singleValueParser, boolean dynamic, Scope scope) {
+ return listSetting(key, (s) -> defaultStringValue, singleValueParser, dynamic, scope);
+ }
+ public static <T> Setting<List<T>> listSetting(String key, Function<Settings, List<String>> defaultStringValue, Function<String, T> singleValueParser, boolean dynamic, Scope scope) {
+ Function<String, List<T>> parser = (s) -> {
+ try (XContentParser xContentParser = XContentType.JSON.xContent().createParser(s)){
+ XContentParser.Token token = xContentParser.nextToken();
+ if (token != XContentParser.Token.START_ARRAY) {
+ throw new IllegalArgumentException("expected START_ARRAY but got " + token);
+ }
+ ArrayList<T> list = new ArrayList<>();
+ while ((token = xContentParser.nextToken()) !=XContentParser.Token.END_ARRAY) {
+ if (token != XContentParser.Token.VALUE_STRING) {
+ throw new IllegalArgumentException("expected VALUE_STRING but got " + token);
+ }
+ list.add(singleValueParser.apply(xContentParser.text()));
+ }
+ return list;
+ } catch (IOException e) {
+ throw new IllegalArgumentException("failed to parse array", e);
+ }
+ };
+ return new Setting<List<T>>(key, (s) -> arrayToParsableString(defaultStringValue.apply(s).toArray(Strings.EMPTY_ARRAY)), parser, dynamic, scope) {
+ private final Pattern pattern = Pattern.compile(Pattern.quote(key)+"(\\.\\d+)?");
+ @Override
+ public String getRaw(Settings settings) {
+ String[] array = settings.getAsArray(key, null);
+ return array == null ? defaultValue.apply(settings) : arrayToParsableString(array);
+ }
+
+ public boolean match(String toTest) {
+ return pattern.matcher(toTest).matches();
+ }
+
+ @Override
+ boolean hasComplexMatcher() {
+ return true;
+ }
+ };
+ }
+
+ private static String arrayToParsableString(String[] array) {
+ try {
+ XContentBuilder builder = XContentBuilder.builder(XContentType.JSON.xContent());
+ builder.startArray();
+ for (String element : array) {
+ builder.value(element);
+ }
+ builder.endArray();
+ return builder.string();
+ } catch (IOException ex) {
+ throw new ElasticsearchException(ex);
+ }
+ }
+
+
+
+ public static Setting<Settings> groupSetting(String key, boolean dynamic, Scope scope) {
+ if (key.endsWith(".") == false) {
+ throw new IllegalArgumentException("key must end with a '.'");
+ }
+ return new Setting<Settings>(key, "", (s) -> null, dynamic, scope) {
+
+ @Override
+ public boolean isGroupSetting() {
+ return true;
+ }
+
+ @Override
+ public Settings get(Settings settings) {
+ return settings.getByPrefix(key);
+ }
+
+ @Override
+ public boolean match(String toTest) {
+ return Regex.simpleMatch(key + "*", toTest);
+ }
+
+ @Override
+ public AbstractScopedSettings.SettingUpdater<Settings> newUpdater(Consumer<Settings> consumer, ESLogger logger, Consumer<Settings> validator) {
+ if (isDynamic() == false) {
+ throw new IllegalStateException("setting [" + getKey() + "] is not dynamic");
+ }
+ final Setting<?> setting = this;
+ return new AbstractScopedSettings.SettingUpdater<Settings>() {
+
+ @Override
+ public boolean hasChanged(Settings current, Settings previous) {
+ Settings currentSettings = get(current);
+ Settings previousSettings = get(previous);
+ return currentSettings.equals(previousSettings) == false;
+ }
+
+ @Override
+ public Settings getValue(Settings current, Settings previous) {
+ Settings currentSettings = get(current);
+ Settings previousSettings = get(previous);
+ try {
+ validator.accept(currentSettings);
+ } catch (Exception | AssertionError e) {
+ throw new IllegalArgumentException("illegal value can't update [" + key + "] from [" + previousSettings.getAsMap() + "] to [" + currentSettings.getAsMap() + "]", e);
+ }
+ return currentSettings;
+ }
+
+ @Override
+ public void apply(Settings value, Settings current, Settings previous) {
+ consumer.accept(value);
+ }
+
+ @Override
+ public String toString() {
+ return "Updater for: " + setting.toString();
+ }
+ };
+ }
+ };
+ }
+
+ public static Setting<TimeValue> timeSetting(String key, Function<Settings, String> defaultValue, TimeValue minValue, boolean dynamic, Scope scope) {
+ return new Setting<>(key, defaultValue, (s) -> {
+ TimeValue timeValue = TimeValue.parseTimeValue(s, null, key);
+ if (timeValue.millis() < minValue.millis()) {
+ throw new IllegalArgumentException("Failed to parse value [" + s + "] for setting [" + key + "] must be >= " + minValue);
+ }
+ return timeValue;
+ }, dynamic, scope);
+ }
+
+ public static Setting<TimeValue> timeSetting(String key, TimeValue defaultValue, TimeValue minValue, boolean dynamic, Scope scope) {
+ return timeSetting(key, (s) -> defaultValue.getStringRep(), minValue, dynamic, scope);
+ }
+
+ public static Setting<TimeValue> timeSetting(String key, TimeValue defaultValue, boolean dynamic, Scope scope) {
+ return new Setting<>(key, (s) -> defaultValue.toString(), (s) -> TimeValue.parseTimeValue(s, defaultValue, key), dynamic, scope);
+ }
+
+ public static Setting<Double> doubleSetting(String key, double defaultValue, double minValue, boolean dynamic, Scope scope) {
+ return new Setting<>(key, (s) -> Double.toString(defaultValue), (s) -> {
+ final double d = Double.parseDouble(s);
+ if (d < minValue) {
+ throw new IllegalArgumentException("Failed to parse value [" + s + "] for setting [" + key + "] must be >= " + minValue);
+ }
+ return d;
+ }, dynamic, scope);
+ }
+
+}
diff --git a/core/src/main/java/org/elasticsearch/common/settings/Settings.java b/core/src/main/java/org/elasticsearch/common/settings/Settings.java
index 5e083a9e74..989b05d4bf 100644
--- a/core/src/main/java/org/elasticsearch/common/settings/Settings.java
+++ b/core/src/main/java/org/elasticsearch/common/settings/Settings.java
@@ -597,6 +597,8 @@ public final class Settings implements ToXContent {
return result.toArray(new String[result.size()]);
}
+
+
/**
* Returns group settings for the given setting prefix.
*/
@@ -614,6 +616,9 @@ public final class Settings implements ToXContent {
if (settingPrefix.charAt(settingPrefix.length() - 1) != '.') {
settingPrefix = settingPrefix + ".";
}
+ return getGroupsInternal(settingPrefix, ignoreNonGrouped);
+ }
+ private Map<String, Settings> getGroupsInternal(String settingPrefix, boolean ignoreNonGrouped) throws SettingsException {
// we don't really care that it might happen twice
Map<String, Map<String, String>> map = new LinkedHashMap<>();
for (Object o : settings.keySet()) {
@@ -643,6 +648,16 @@ public final class Settings implements ToXContent {
}
return Collections.unmodifiableMap(retVal);
}
+ /**
+ * Returns group settings for the given setting prefix.
+ */
+ public Map<String, Settings> getAsGroups() throws SettingsException {
+ return getAsGroups(false);
+ }
+
+ public Map<String, Settings> getAsGroups(boolean ignoreNonGrouped) throws SettingsException {
+ return getGroupsInternal("", ignoreNonGrouped);
+ }
/**
* Returns a parsed version.
@@ -706,7 +721,7 @@ public final class Settings implements ToXContent {
Builder builder = new Builder();
int numberOfSettings = in.readVInt();
for (int i = 0; i < numberOfSettings; i++) {
- builder.put(in.readString(), in.readString());
+ builder.put(in.readString(), in.readOptionalString());
}
return builder.build();
}
@@ -715,7 +730,7 @@ public final class Settings implements ToXContent {
out.writeVInt(settings.getAsMap().size());
for (Map.Entry<String, String> entry : settings.getAsMap().entrySet()) {
out.writeString(entry.getKey());
- out.writeString(entry.getValue());
+ out.writeOptionalString(entry.getValue());
}
}
@@ -818,6 +833,10 @@ public final class Settings implements ToXContent {
return this;
}
+ public Builder putNull(String key) {
+ return put(key, (String) null);
+ }
+
/**
* Sets a setting with the provided setting key and class as value.
*
diff --git a/core/src/main/java/org/elasticsearch/common/settings/SettingsModule.java b/core/src/main/java/org/elasticsearch/common/settings/SettingsModule.java
index 2ae4799d9f..8bc8ce1b65 100644
--- a/core/src/main/java/org/elasticsearch/common/settings/SettingsModule.java
+++ b/core/src/main/java/org/elasticsearch/common/settings/SettingsModule.java
@@ -21,6 +21,10 @@ package org.elasticsearch.common.settings;
import org.elasticsearch.common.inject.AbstractModule;
+import java.util.HashMap;
+import java.util.HashSet;
+import java.util.Map;
+
/**
* A module that binds the provided settings to the {@link Settings} interface.
*
@@ -30,15 +34,36 @@ public class SettingsModule extends AbstractModule {
private final Settings settings;
private final SettingsFilter settingsFilter;
+ private final Map<String, Setting<?>> clusterDynamicSettings = new HashMap<>();
+
public SettingsModule(Settings settings, SettingsFilter settingsFilter) {
this.settings = settings;
this.settingsFilter = settingsFilter;
+ for (Setting<?> setting : ClusterSettings.BUILT_IN_CLUSTER_SETTINGS) {
+ registerSetting(setting);
+ }
}
@Override
protected void configure() {
bind(Settings.class).toInstance(settings);
bind(SettingsFilter.class).toInstance(settingsFilter);
+ final ClusterSettings clusterSettings = new ClusterSettings(settings, new HashSet<>(clusterDynamicSettings.values()));
+ bind(ClusterSettings.class).toInstance(clusterSettings);
}
-} \ No newline at end of file
+
+ public void registerSetting(Setting<?> setting) {
+ switch (setting.getScope()) {
+ case CLUSTER:
+ if (clusterDynamicSettings.containsKey(setting.getKey())) {
+ throw new IllegalArgumentException("Cannot register setting [" + setting.getKey() + "] twice");
+ }
+ clusterDynamicSettings.put(setting.getKey(), setting);
+ break;
+ case INDEX:
+ throw new UnsupportedOperationException("not yet implemented");
+ }
+ }
+
+}
diff --git a/core/src/main/java/org/elasticsearch/common/settings/loader/XContentSettingsLoader.java b/core/src/main/java/org/elasticsearch/common/settings/loader/XContentSettingsLoader.java
index 725c7e5694..9c2f973b96 100644
--- a/core/src/main/java/org/elasticsearch/common/settings/loader/XContentSettingsLoader.java
+++ b/core/src/main/java/org/elasticsearch/common/settings/loader/XContentSettingsLoader.java
@@ -103,9 +103,9 @@ public abstract class XContentSettingsLoader implements SettingsLoader {
} else if (token == XContentParser.Token.FIELD_NAME) {
currentFieldName = parser.currentName();
} else if (token == XContentParser.Token.VALUE_NULL) {
- // ignore this
+ serializeValue(settings, sb, path, parser, currentFieldName, true);
} else {
- serializeValue(settings, sb, path, parser, currentFieldName);
+ serializeValue(settings, sb, path, parser, currentFieldName, false);
}
}
@@ -126,31 +126,33 @@ public abstract class XContentSettingsLoader implements SettingsLoader {
} else if (token == XContentParser.Token.FIELD_NAME) {
fieldName = parser.currentName();
} else if (token == XContentParser.Token.VALUE_NULL) {
+ serializeValue(settings, sb, path, parser, fieldName + '.' + (counter++), true);
// ignore
} else {
- serializeValue(settings, sb, path, parser, fieldName + '.' + (counter++));
+ serializeValue(settings, sb, path, parser, fieldName + '.' + (counter++), false);
}
}
}
- private void serializeValue(Map<String, String> settings, StringBuilder sb, List<String> path, XContentParser parser, String fieldName) throws IOException {
+ private void serializeValue(Map<String, String> settings, StringBuilder sb, List<String> path, XContentParser parser, String fieldName, boolean isNull) throws IOException {
sb.setLength(0);
for (String pathEle : path) {
sb.append(pathEle).append('.');
}
sb.append(fieldName);
String key = sb.toString();
- String currentValue = parser.text();
- String previousValue = settings.put(key, currentValue);
- if (previousValue != null) {
+ String currentValue = isNull ? null : parser.text();
+
+ if (settings.containsKey(key)) {
throw new ElasticsearchParseException(
"duplicate settings key [{}] found at line number [{}], column number [{}], previous value [{}], current value [{}]",
key,
parser.getTokenLocation().lineNumber,
parser.getTokenLocation().columnNumber,
- previousValue,
+ settings.get(key),
currentValue
);
}
+ settings.put(key, currentValue);
}
}
diff --git a/core/src/main/java/org/elasticsearch/common/text/BytesText.java b/core/src/main/java/org/elasticsearch/common/text/BytesText.java
deleted file mode 100644
index d78055db2b..0000000000
--- a/core/src/main/java/org/elasticsearch/common/text/BytesText.java
+++ /dev/null
@@ -1,82 +0,0 @@
-/*
- * Licensed to Elasticsearch under one or more contributor
- * license agreements. See the NOTICE file distributed with
- * this work for additional information regarding copyright
- * ownership. Elasticsearch licenses this file to you under
- * the Apache License, Version 2.0 (the "License"); you may
- * not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing,
- * software distributed under the License is distributed on an
- * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
- * KIND, either express or implied. See the License for the
- * specific language governing permissions and limitations
- * under the License.
- */
-package org.elasticsearch.common.text;
-
-import java.nio.charset.StandardCharsets;
-import org.elasticsearch.common.bytes.BytesReference;
-
-/**
- * A {@link BytesReference} representation of the text, will always convert on the fly to a {@link String}.
- */
-public class BytesText implements Text {
-
- private BytesReference bytes;
- private int hash;
-
- public BytesText(BytesReference bytes) {
- this.bytes = bytes;
- }
-
- @Override
- public boolean hasBytes() {
- return true;
- }
-
- @Override
- public BytesReference bytes() {
- return bytes;
- }
-
- @Override
- public boolean hasString() {
- return false;
- }
-
- @Override
- public String string() {
- // TODO: we can optimize the conversion based on the bytes reference API similar to UnicodeUtil
- if (!bytes.hasArray()) {
- bytes = bytes.toBytesArray();
- }
- return new String(bytes.array(), bytes.arrayOffset(), bytes.length(), StandardCharsets.UTF_8);
- }
-
- @Override
- public String toString() {
- return string();
- }
-
- @Override
- public int hashCode() {
- if (hash == 0) {
- hash = bytes.hashCode();
- }
- return hash;
- }
-
- @Override
- public boolean equals(Object obj) {
- return bytes().equals(((Text) obj).bytes());
- }
-
- @Override
- public int compareTo(Text text) {
- return UTF8SortedAsUnicodeComparator.utf8SortedAsUnicodeSortOrder.compare(bytes(), text.bytes());
- }
-} \ No newline at end of file
diff --git a/core/src/main/java/org/elasticsearch/common/text/StringAndBytesText.java b/core/src/main/java/org/elasticsearch/common/text/StringAndBytesText.java
deleted file mode 100644
index 36bf76ce44..0000000000
--- a/core/src/main/java/org/elasticsearch/common/text/StringAndBytesText.java
+++ /dev/null
@@ -1,111 +0,0 @@
-/*
- * Licensed to Elasticsearch under one or more contributor
- * license agreements. See the NOTICE file distributed with
- * this work for additional information regarding copyright
- * ownership. Elasticsearch licenses this file to you under
- * the Apache License, Version 2.0 (the "License"); you may
- * not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing,
- * software distributed under the License is distributed on an
- * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
- * KIND, either express or implied. See the License for the
- * specific language governing permissions and limitations
- * under the License.
- */
-package org.elasticsearch.common.text;
-
-import java.nio.charset.StandardCharsets;
-import org.elasticsearch.common.bytes.BytesArray;
-import org.elasticsearch.common.bytes.BytesReference;
-
-/**
- * Both {@link String} and {@link BytesReference} representation of the text. Starts with one of those, and if
- * the other is requests, caches the other one in a local reference so no additional conversion will be needed.
- */
-public class StringAndBytesText implements Text {
-
- public static final Text[] EMPTY_ARRAY = new Text[0];
-
- public static Text[] convertFromStringArray(String[] strings) {
- if (strings.length == 0) {
- return EMPTY_ARRAY;
- }
- Text[] texts = new Text[strings.length];
- for (int i = 0; i < strings.length; i++) {
- texts[i] = new StringAndBytesText(strings[i]);
- }
- return texts;
- }
-
- private BytesReference bytes;
- private String text;
- private int hash;
-
- public StringAndBytesText(BytesReference bytes) {
- this.bytes = bytes;
- }
-
- public StringAndBytesText(String text) {
- this.text = text;
- }
-
- @Override
- public boolean hasBytes() {
- return bytes != null;
- }
-
- @Override
- public BytesReference bytes() {
- if (bytes == null) {
- bytes = new BytesArray(text.getBytes(StandardCharsets.UTF_8));
- }
- return bytes;
- }
-
- @Override
- public boolean hasString() {
- return text != null;
- }
-
- @Override
- public String string() {
- // TODO: we can optimize the conversion based on the bytes reference API similar to UnicodeUtil
- if (text == null) {
- if (!bytes.hasArray()) {
- bytes = bytes.toBytesArray();
- }
- text = new String(bytes.array(), bytes.arrayOffset(), bytes.length(), StandardCharsets.UTF_8);
- }
- return text;
- }
-
- @Override
- public String toString() {
- return string();
- }
-
- @Override
- public int hashCode() {
- if (hash == 0) {
- hash = bytes().hashCode();
- }
- return hash;
- }
-
- @Override
- public boolean equals(Object obj) {
- if (obj == null) {
- return false;
- }
- return bytes().equals(((Text) obj).bytes());
- }
-
- @Override
- public int compareTo(Text text) {
- return UTF8SortedAsUnicodeComparator.utf8SortedAsUnicodeSortOrder.compare(bytes(), text.bytes());
- }
-}
diff --git a/core/src/main/java/org/elasticsearch/common/text/StringText.java b/core/src/main/java/org/elasticsearch/common/text/StringText.java
deleted file mode 100644
index 9d12096b2c..0000000000
--- a/core/src/main/java/org/elasticsearch/common/text/StringText.java
+++ /dev/null
@@ -1,94 +0,0 @@
-/*
- * Licensed to Elasticsearch under one or more contributor
- * license agreements. See the NOTICE file distributed with
- * this work for additional information regarding copyright
- * ownership. Elasticsearch licenses this file to you under
- * the Apache License, Version 2.0 (the "License"); you may
- * not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing,
- * software distributed under the License is distributed on an
- * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
- * KIND, either express or implied. See the License for the
- * specific language governing permissions and limitations
- * under the License.
- */
-package org.elasticsearch.common.text;
-
-import java.nio.charset.StandardCharsets;
-import org.elasticsearch.common.bytes.BytesArray;
-import org.elasticsearch.common.bytes.BytesReference;
-
-/**
- * A {@link String} only representation of the text. Will always convert to bytes on the fly.
- */
-public class StringText implements Text {
-
- public static final Text[] EMPTY_ARRAY = new Text[0];
-
- public static Text[] convertFromStringArray(String[] strings) {
- if (strings.length == 0) {
- return EMPTY_ARRAY;
- }
- Text[] texts = new Text[strings.length];
- for (int i = 0; i < strings.length; i++) {
- texts[i] = new StringText(strings[i]);
- }
- return texts;
- }
-
- private final String text;
- private int hash;
-
- public StringText(String text) {
- this.text = text;
- }
-
- @Override
- public boolean hasBytes() {
- return false;
- }
-
- @Override
- public BytesReference bytes() {
- return new BytesArray(text.getBytes(StandardCharsets.UTF_8));
- }
-
- @Override
- public boolean hasString() {
- return true;
- }
-
- @Override
- public String string() {
- return text;
- }
-
- @Override
- public String toString() {
- return string();
- }
-
- @Override
- public int hashCode() {
- // we use bytes here so we can be consistent with other text implementations
- if (hash == 0) {
- hash = bytes().hashCode();
- }
- return hash;
- }
-
- @Override
- public boolean equals(Object obj) {
- // we use bytes here so we can be consistent with other text implementations
- return bytes().equals(((Text) obj).bytes());
- }
-
- @Override
- public int compareTo(Text text) {
- return UTF8SortedAsUnicodeComparator.utf8SortedAsUnicodeSortOrder.compare(bytes(), text.bytes());
- }
-}
diff --git a/core/src/main/java/org/elasticsearch/common/text/Text.java b/core/src/main/java/org/elasticsearch/common/text/Text.java
index 9fe1ea5f35..d5b02f559f 100644
--- a/core/src/main/java/org/elasticsearch/common/text/Text.java
+++ b/core/src/main/java/org/elasticsearch/common/text/Text.java
@@ -18,39 +18,101 @@
*/
package org.elasticsearch.common.text;
+import java.nio.charset.StandardCharsets;
+import org.elasticsearch.common.bytes.BytesArray;
import org.elasticsearch.common.bytes.BytesReference;
-
/**
- * Text represents a (usually) long text data. We use this abstraction instead of {@link String}
- * so we can represent it in a more optimized manner in memory as well as serializing it over the
- * network as well as converting it to json format.
+ * Both {@link String} and {@link BytesReference} representation of the text. Starts with one of those, and if
+ * the other is requests, caches the other one in a local reference so no additional conversion will be needed.
*/
-public interface Text extends Comparable<Text> {
+public final class Text implements Comparable<Text> {
- /**
- * Are bytes available without the need to be converted into bytes when calling {@link #bytes()}.
- */
- boolean hasBytes();
+ public static final Text[] EMPTY_ARRAY = new Text[0];
+
+ public static Text[] convertFromStringArray(String[] strings) {
+ if (strings.length == 0) {
+ return EMPTY_ARRAY;
+ }
+ Text[] texts = new Text[strings.length];
+ for (int i = 0; i < strings.length; i++) {
+ texts[i] = new Text(strings[i]);
+ }
+ return texts;
+ }
+
+ private BytesReference bytes;
+ private String text;
+ private int hash;
+
+ public Text(BytesReference bytes) {
+ this.bytes = bytes;
+ }
+
+ public Text(String text) {
+ this.text = text;
+ }
/**
- * The UTF8 bytes representing the the text, might be converted on the fly, see {@link #hasBytes()}
+ * Whether a {@link BytesReference} view of the data is already materialized.
*/
- BytesReference bytes();
+ public boolean hasBytes() {
+ return bytes != null;
+ }
/**
- * Is there a {@link String} representation of the text. If not, then it {@link #hasBytes()}.
+ * Returns a {@link BytesReference} view of the data.
*/
- boolean hasString();
+ public BytesReference bytes() {
+ if (bytes == null) {
+ bytes = new BytesArray(text.getBytes(StandardCharsets.UTF_8));
+ }
+ return bytes;
+ }
/**
- * Returns the string representation of the text, might be converted to a string on the fly.
+ * Whether a {@link String} view of the data is already materialized.
*/
- String string();
+ public boolean hasString() {
+ return text != null;
+ }
/**
- * Returns the string representation of the text, might be converted to a string on the fly.
+ * Returns a {@link String} view of the data.
*/
+ public String string() {
+ if (text == null) {
+ if (!bytes.hasArray()) {
+ bytes = bytes.toBytesArray();
+ }
+ text = new String(bytes.array(), bytes.arrayOffset(), bytes.length(), StandardCharsets.UTF_8);
+ }
+ return text;
+ }
+
+ @Override
+ public String toString() {
+ return string();
+ }
+
+ @Override
+ public int hashCode() {
+ if (hash == 0) {
+ hash = bytes().hashCode();
+ }
+ return hash;
+ }
+
+ @Override
+ public boolean equals(Object obj) {
+ if (obj == null) {
+ return false;
+ }
+ return bytes().equals(((Text) obj).bytes());
+ }
+
@Override
- String toString();
+ public int compareTo(Text text) {
+ return UTF8SortedAsUnicodeComparator.utf8SortedAsUnicodeSortOrder.compare(bytes(), text.bytes());
+ }
}
diff --git a/core/src/main/java/org/elasticsearch/common/unit/TimeValue.java b/core/src/main/java/org/elasticsearch/common/unit/TimeValue.java
index ee6371605e..fb44c7dc9a 100644
--- a/core/src/main/java/org/elasticsearch/common/unit/TimeValue.java
+++ b/core/src/main/java/org/elasticsearch/common/unit/TimeValue.java
@@ -229,6 +229,30 @@ public class TimeValue implements Streamable {
return Strings.format1Decimals(value, suffix);
}
+ public String getStringRep() {
+ if (duration < 0) {
+ return Long.toString(duration);
+ }
+ switch (timeUnit) {
+ case NANOSECONDS:
+ return Strings.format1Decimals(duration, "nanos");
+ case MICROSECONDS:
+ return Strings.format1Decimals(duration, "micros");
+ case MILLISECONDS:
+ return Strings.format1Decimals(duration, "ms");
+ case SECONDS:
+ return Strings.format1Decimals(duration, "s");
+ case MINUTES:
+ return Strings.format1Decimals(duration, "m");
+ case HOURS:
+ return Strings.format1Decimals(duration, "h");
+ case DAYS:
+ return Strings.format1Decimals(duration, "d");
+ default:
+ throw new IllegalArgumentException("unknown time unit: " + timeUnit.name());
+ }
+ }
+
public static TimeValue parseTimeValue(String sValue, TimeValue defaultValue, String settingName) {
settingName = Objects.requireNonNull(settingName);
assert settingName.startsWith("index.") == false || MetaDataIndexUpgradeService.INDEX_TIME_SETTINGS.contains(settingName) : settingName;
diff --git a/core/src/main/java/org/elasticsearch/common/xcontent/XContentBuilder.java b/core/src/main/java/org/elasticsearch/common/xcontent/XContentBuilder.java
index af8e753469..d26485a121 100644
--- a/core/src/main/java/org/elasticsearch/common/xcontent/XContentBuilder.java
+++ b/core/src/main/java/org/elasticsearch/common/xcontent/XContentBuilder.java
@@ -53,7 +53,7 @@ import java.util.Map;
*/
public final class XContentBuilder implements BytesStream, Releasable {
- public static enum FieldCaseConversion {
+ public enum FieldCaseConversion {
/**
* No conversion will occur.
*/
@@ -251,14 +251,7 @@ public final class XContentBuilder implements BytesStream, Releasable {
}
public XContentBuilder field(XContentBuilderString name) throws IOException {
- if (fieldCaseConversion == FieldCaseConversion.UNDERSCORE) {
- generator.writeFieldName(name.underscore());
- } else if (fieldCaseConversion == FieldCaseConversion.CAMELCASE) {
- generator.writeFieldName(name.camelCase());
- } else {
- generator.writeFieldName(name.underscore());
- }
- return this;
+ return field(name, fieldCaseConversion);
}
public XContentBuilder field(XContentBuilderString name, FieldCaseConversion conversion) throws IOException {
@@ -273,22 +266,13 @@ public final class XContentBuilder implements BytesStream, Releasable {
}
public XContentBuilder field(String name) throws IOException {
- if (fieldCaseConversion == FieldCaseConversion.UNDERSCORE) {
- if (cachedStringBuilder == null) {
- cachedStringBuilder = new StringBuilder();
- }
- name = Strings.toUnderscoreCase(name, cachedStringBuilder);
- } else if (fieldCaseConversion == FieldCaseConversion.CAMELCASE) {
- if (cachedStringBuilder == null) {
- cachedStringBuilder = new StringBuilder();
- }
- name = Strings.toCamelCase(name, cachedStringBuilder);
- }
- generator.writeFieldName(name);
- return this;
+ return field(name, fieldCaseConversion);
}
public XContentBuilder field(String name, FieldCaseConversion conversion) throws IOException {
+ if (name == null) {
+ throw new IllegalArgumentException("field name cannot be null");
+ }
if (conversion == FieldCaseConversion.UNDERSCORE) {
if (cachedStringBuilder == null) {
cachedStringBuilder = new StringBuilder();
diff --git a/core/src/main/java/org/elasticsearch/discovery/DiscoveryService.java b/core/src/main/java/org/elasticsearch/discovery/DiscoveryService.java
index eeba9baa32..a82099658e 100644
--- a/core/src/main/java/org/elasticsearch/discovery/DiscoveryService.java
+++ b/core/src/main/java/org/elasticsearch/discovery/DiscoveryService.java
@@ -23,6 +23,7 @@ import org.elasticsearch.ElasticsearchTimeoutException;
import org.elasticsearch.cluster.ClusterChangedEvent;
import org.elasticsearch.cluster.block.ClusterBlock;
import org.elasticsearch.cluster.node.DiscoveryNode;
+import org.elasticsearch.common.Randomness;
import org.elasticsearch.common.Strings;
import org.elasticsearch.common.component.AbstractLifecycleComponent;
import org.elasticsearch.common.inject.Inject;
@@ -41,7 +42,6 @@ public class DiscoveryService extends AbstractLifecycleComponent<DiscoveryServic
public static final String SETTING_INITIAL_STATE_TIMEOUT = "discovery.initial_state_timeout";
public static final String SETTING_DISCOVERY_SEED = "discovery.id.seed";
-
private static class InitialStateListener implements InitialStateDiscoveryListener {
private final CountDownLatch latch = new CountDownLatch(1);
@@ -132,10 +132,7 @@ public class DiscoveryService extends AbstractLifecycleComponent<DiscoveryServic
}
public static String generateNodeId(Settings settings) {
- String seed = settings.get(DiscoveryService.SETTING_DISCOVERY_SEED);
- if (seed != null) {
- return Strings.randomBase64UUID(new Random(Long.parseLong(seed)));
- }
- return Strings.randomBase64UUID();
+ Random random = Randomness.get(settings, DiscoveryService.SETTING_DISCOVERY_SEED);
+ return Strings.randomBase64UUID(random);
}
}
diff --git a/core/src/main/java/org/elasticsearch/discovery/DiscoverySettings.java b/core/src/main/java/org/elasticsearch/discovery/DiscoverySettings.java
index 20f2c96b12..6689d9c868 100644
--- a/core/src/main/java/org/elasticsearch/discovery/DiscoverySettings.java
+++ b/core/src/main/java/org/elasticsearch/discovery/DiscoverySettings.java
@@ -23,9 +23,10 @@ import org.elasticsearch.cluster.block.ClusterBlock;
import org.elasticsearch.cluster.block.ClusterBlockLevel;
import org.elasticsearch.common.component.AbstractComponent;
import org.elasticsearch.common.inject.Inject;
+import org.elasticsearch.common.settings.ClusterSettings;
+import org.elasticsearch.common.settings.Setting;
import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.common.unit.TimeValue;
-import org.elasticsearch.node.settings.NodeSettingsService;
import org.elasticsearch.rest.RestStatus;
import java.util.EnumSet;
@@ -35,42 +36,40 @@ import java.util.EnumSet;
*/
public class DiscoverySettings extends AbstractComponent {
+ public final static int NO_MASTER_BLOCK_ID = 2;
+ public final static ClusterBlock NO_MASTER_BLOCK_ALL = new ClusterBlock(NO_MASTER_BLOCK_ID, "no master", true, true, RestStatus.SERVICE_UNAVAILABLE, ClusterBlockLevel.ALL);
+ public final static ClusterBlock NO_MASTER_BLOCK_WRITES = new ClusterBlock(NO_MASTER_BLOCK_ID, "no master", true, false, RestStatus.SERVICE_UNAVAILABLE, EnumSet.of(ClusterBlockLevel.WRITE, ClusterBlockLevel.METADATA_WRITE));
/**
* sets the timeout for a complete publishing cycle, including both sending and committing. the master
* will continute to process the next cluster state update after this time has elapsed
**/
- public static final String PUBLISH_TIMEOUT = "discovery.zen.publish_timeout";
+ public static final Setting<TimeValue> PUBLISH_TIMEOUT_SETTING = Setting.positiveTimeSetting("discovery.zen.publish_timeout", TimeValue.timeValueSeconds(30), true, Setting.Scope.CLUSTER);
/**
* sets the timeout for receiving enough acks for a specific cluster state and committing it. failing
* to receive responses within this window will cause the cluster state change to be rejected.
*/
- public static final String COMMIT_TIMEOUT = "discovery.zen.commit_timeout";
- public static final String NO_MASTER_BLOCK = "discovery.zen.no_master_block";
- public static final String PUBLISH_DIFF_ENABLE = "discovery.zen.publish_diff.enable";
-
- public static final TimeValue DEFAULT_PUBLISH_TIMEOUT = TimeValue.timeValueSeconds(30);
- public static final TimeValue DEFAULT_COMMIT_TIMEOUT = TimeValue.timeValueSeconds(30);
- public static final String DEFAULT_NO_MASTER_BLOCK = "write";
- public final static int NO_MASTER_BLOCK_ID = 2;
- public final static boolean DEFAULT_PUBLISH_DIFF_ENABLE = true;
-
- public final static ClusterBlock NO_MASTER_BLOCK_ALL = new ClusterBlock(NO_MASTER_BLOCK_ID, "no master", true, true, RestStatus.SERVICE_UNAVAILABLE, ClusterBlockLevel.ALL);
- public final static ClusterBlock NO_MASTER_BLOCK_WRITES = new ClusterBlock(NO_MASTER_BLOCK_ID, "no master", true, false, RestStatus.SERVICE_UNAVAILABLE, EnumSet.of(ClusterBlockLevel.WRITE, ClusterBlockLevel.METADATA_WRITE));
+ public static final Setting<TimeValue> COMMIT_TIMEOUT_SETTING = new Setting<>("discovery.zen.commit_timeout", (s) -> PUBLISH_TIMEOUT_SETTING.getRaw(s), (s) -> TimeValue.parseTimeValue(s, TimeValue.timeValueSeconds(30), "discovery.zen.commit_timeout"), true, Setting.Scope.CLUSTER);
+ public static final Setting<ClusterBlock> NO_MASTER_BLOCK_SETTING = new Setting<>("discovery.zen.no_master_block", "write", DiscoverySettings::parseNoMasterBlock, true, Setting.Scope.CLUSTER);
+ public static final Setting<Boolean> PUBLISH_DIFF_ENABLE_SETTING = Setting.boolSetting("discovery.zen.publish_diff.enable", true, true, Setting.Scope.CLUSTER);
private volatile ClusterBlock noMasterBlock;
private volatile TimeValue publishTimeout;
+
private volatile TimeValue commitTimeout;
private volatile boolean publishDiff;
@Inject
- public DiscoverySettings(Settings settings, NodeSettingsService nodeSettingsService) {
+ public DiscoverySettings(Settings settings, ClusterSettings clusterSettings) {
super(settings);
- nodeSettingsService.addListener(new ApplySettings());
- this.noMasterBlock = parseNoMasterBlock(settings.get(NO_MASTER_BLOCK, DEFAULT_NO_MASTER_BLOCK));
- this.publishTimeout = settings.getAsTime(PUBLISH_TIMEOUT, DEFAULT_PUBLISH_TIMEOUT);
- this.commitTimeout = settings.getAsTime(COMMIT_TIMEOUT, new TimeValue(Math.min(DEFAULT_COMMIT_TIMEOUT.millis(), publishTimeout.millis())));
- this.publishDiff = settings.getAsBoolean(PUBLISH_DIFF_ENABLE, DEFAULT_PUBLISH_DIFF_ENABLE);
+ clusterSettings.addSettingsUpdateConsumer(NO_MASTER_BLOCK_SETTING, this::setNoMasterBlock);
+ clusterSettings.addSettingsUpdateConsumer(PUBLISH_DIFF_ENABLE_SETTING, this::setPublishDiff);
+ clusterSettings.addSettingsUpdateConsumer(COMMIT_TIMEOUT_SETTING, this::setCommitTimeout);
+ clusterSettings.addSettingsUpdateConsumer(PUBLISH_TIMEOUT_SETTING, this::setPublishTimeout);
+ this.noMasterBlock = NO_MASTER_BLOCK_SETTING.get(settings);
+ this.publishTimeout = PUBLISH_TIMEOUT_SETTING.get(settings);
+ this.commitTimeout = COMMIT_TIMEOUT_SETTING.get(settings);
+ this.publishDiff = PUBLISH_DIFF_ENABLE_SETTING.get(settings);
}
/**
@@ -88,47 +87,25 @@ public class DiscoverySettings extends AbstractComponent {
return noMasterBlock;
}
- public boolean getPublishDiff() { return publishDiff;}
+ private void setNoMasterBlock(ClusterBlock noMasterBlock) {
+ this.noMasterBlock = noMasterBlock;
+ }
- private class ApplySettings implements NodeSettingsService.Listener {
- @Override
- public void onRefreshSettings(Settings settings) {
- TimeValue newPublishTimeout = settings.getAsTime(PUBLISH_TIMEOUT, null);
- if (newPublishTimeout != null) {
- if (newPublishTimeout.millis() != publishTimeout.millis()) {
- logger.info("updating [{}] from [{}] to [{}]", PUBLISH_TIMEOUT, publishTimeout, newPublishTimeout);
- publishTimeout = newPublishTimeout;
- if (settings.getAsTime(COMMIT_TIMEOUT, null) == null && commitTimeout.millis() > publishTimeout.millis()) {
- logger.info("reducing default [{}] to [{}] due to publish timeout change", COMMIT_TIMEOUT, publishTimeout);
- commitTimeout = publishTimeout;
- }
- }
- }
- TimeValue newCommitTimeout = settings.getAsTime(COMMIT_TIMEOUT, null);
- if (newCommitTimeout != null) {
- if (newCommitTimeout.millis() != commitTimeout.millis()) {
- logger.info("updating [{}] from [{}] to [{}]", COMMIT_TIMEOUT, commitTimeout, newCommitTimeout);
- commitTimeout = newCommitTimeout;
- }
- }
- String newNoMasterBlockValue = settings.get(NO_MASTER_BLOCK);
- if (newNoMasterBlockValue != null) {
- ClusterBlock newNoMasterBlock = parseNoMasterBlock(newNoMasterBlockValue);
- if (newNoMasterBlock != noMasterBlock) {
- noMasterBlock = newNoMasterBlock;
- }
- }
- Boolean newPublishDiff = settings.getAsBoolean(PUBLISH_DIFF_ENABLE, null);
- if (newPublishDiff != null) {
- if (newPublishDiff != publishDiff) {
- logger.info("updating [{}] from [{}] to [{}]", PUBLISH_DIFF_ENABLE, publishDiff, newPublishDiff);
- publishDiff = newPublishDiff;
- }
- }
- }
+ private void setPublishDiff(boolean publishDiff) {
+ this.publishDiff = publishDiff;
+ }
+
+ private void setPublishTimeout(TimeValue publishTimeout) {
+ this.publishTimeout = publishTimeout;
}
- private ClusterBlock parseNoMasterBlock(String value) {
+ private void setCommitTimeout(TimeValue commitTimeout) {
+ this.commitTimeout = commitTimeout;
+ }
+
+ public boolean getPublishDiff() { return publishDiff;}
+
+ private static ClusterBlock parseNoMasterBlock(String value) {
switch (value) {
case "all":
return NO_MASTER_BLOCK_ALL;
diff --git a/core/src/main/java/org/elasticsearch/discovery/zen/ZenDiscovery.java b/core/src/main/java/org/elasticsearch/discovery/zen/ZenDiscovery.java
index 03111d141e..8849a849f9 100644
--- a/core/src/main/java/org/elasticsearch/discovery/zen/ZenDiscovery.java
+++ b/core/src/main/java/org/elasticsearch/discovery/zen/ZenDiscovery.java
@@ -39,6 +39,8 @@ import org.elasticsearch.common.inject.internal.Nullable;
import org.elasticsearch.common.io.stream.StreamInput;
import org.elasticsearch.common.io.stream.StreamOutput;
import org.elasticsearch.common.logging.ESLogger;
+import org.elasticsearch.common.settings.ClusterSettings;
+import org.elasticsearch.common.settings.Setting;
import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.common.unit.TimeValue;
import org.elasticsearch.discovery.Discovery;
@@ -55,7 +57,6 @@ import org.elasticsearch.discovery.zen.ping.ZenPingService;
import org.elasticsearch.discovery.zen.publish.PendingClusterStateStats;
import org.elasticsearch.discovery.zen.publish.PublishClusterStateAction;
import org.elasticsearch.node.service.NodeService;
-import org.elasticsearch.node.settings.NodeSettingsService;
import org.elasticsearch.threadpool.ThreadPool;
import org.elasticsearch.transport.*;
@@ -74,7 +75,7 @@ import static org.elasticsearch.common.unit.TimeValue.timeValueSeconds;
*/
public class ZenDiscovery extends AbstractLifecycleComponent<Discovery> implements Discovery, PingContextProvider {
- public final static String SETTING_REJOIN_ON_MASTER_GONE = "discovery.zen.rejoin_on_master_gone";
+ public final static Setting<Boolean> REJOIN_ON_MASTER_GONE_SETTING = Setting.boolSetting("discovery.zen.rejoin_on_master_gone", true, true, Setting.Scope.CLUSTER);
public final static String SETTING_PING_TIMEOUT = "discovery.zen.ping_timeout";
public final static String SETTING_JOIN_TIMEOUT = "discovery.zen.join_timeout";
public final static String SETTING_JOIN_RETRY_ATTEMPTS = "discovery.zen.join_retry_attempts";
@@ -139,7 +140,7 @@ public class ZenDiscovery extends AbstractLifecycleComponent<Discovery> implemen
@Inject
public ZenDiscovery(Settings settings, ClusterName clusterName, ThreadPool threadPool,
- TransportService transportService, final ClusterService clusterService, NodeSettingsService nodeSettingsService,
+ TransportService transportService, final ClusterService clusterService, ClusterSettings clusterSettings,
ZenPingService pingService, ElectMasterService electMasterService,
DiscoverySettings discoverySettings) {
super(settings);
@@ -160,7 +161,7 @@ public class ZenDiscovery extends AbstractLifecycleComponent<Discovery> implemen
this.masterElectionFilterClientNodes = settings.getAsBoolean(SETTING_MASTER_ELECTION_FILTER_CLIENT, true);
this.masterElectionFilterDataNodes = settings.getAsBoolean(SETTING_MASTER_ELECTION_FILTER_DATA, false);
this.masterElectionWaitForJoinsTimeout = settings.getAsTime(SETTING_MASTER_ELECTION_WAIT_FOR_JOINS_TIMEOUT, TimeValue.timeValueMillis(joinTimeout.millis() / 2));
- this.rejoinOnMasterGone = settings.getAsBoolean(SETTING_REJOIN_ON_MASTER_GONE, true);
+ this.rejoinOnMasterGone = REJOIN_ON_MASTER_GONE_SETTING.get(settings);
if (this.joinRetryAttempts < 1) {
throw new IllegalArgumentException("'" + SETTING_JOIN_RETRY_ATTEMPTS + "' must be a positive number. got [" + SETTING_JOIN_RETRY_ATTEMPTS + "]");
@@ -171,7 +172,14 @@ public class ZenDiscovery extends AbstractLifecycleComponent<Discovery> implemen
logger.debug("using ping_timeout [{}], join.timeout [{}], master_election.filter_client [{}], master_election.filter_data [{}]", this.pingTimeout, joinTimeout, masterElectionFilterClientNodes, masterElectionFilterDataNodes);
- nodeSettingsService.addListener(new ApplySettings());
+ clusterSettings.addSettingsUpdateConsumer(ElectMasterService.DISCOVERY_ZEN_MINIMUM_MASTER_NODES_SETTING, this::handleMinimumMasterNodesChanged, (value) -> {
+ final ClusterState clusterState = clusterService.state();
+ int masterNodes = clusterState.nodes().masterNodes().size();
+ if (value > masterNodes) {
+ throw new IllegalArgumentException("cannot set " + ElectMasterService.DISCOVERY_ZEN_MINIMUM_MASTER_NODES_SETTING.getKey() + " to more than the current master nodes count [" + masterNodes + "]");
+ }
+ });
+ clusterSettings.addSettingsUpdateConsumer(REJOIN_ON_MASTER_GONE_SETTING, this::setRejoingOnMasterGone);
this.masterFD = new MasterFaultDetection(settings, threadPool, transportService, clusterName, clusterService);
this.masterFD.addListener(new MasterNodeFailureListener());
@@ -306,6 +314,10 @@ public class ZenDiscovery extends AbstractLifecycleComponent<Discovery> implemen
return clusterJoinsCounter.get() > 0;
}
+ private void setRejoingOnMasterGone(boolean rejoin) {
+ this.rejoinOnMasterGone = rejoin;
+ }
+
/** end of {@link org.elasticsearch.discovery.zen.ping.PingContextProvider } implementation */
@@ -824,8 +836,7 @@ public class ZenDiscovery extends AbstractLifecycleComponent<Discovery> implemen
}
}
- void handleJoinRequest(final DiscoveryNode node, final MembershipAction.JoinCallback callback) {
-
+ void handleJoinRequest(final DiscoveryNode node, final ClusterState state, final MembershipAction.JoinCallback callback) {
if (!transportService.addressSupported(node.address().getClass())) {
// TODO, what should we do now? Maybe inform that node that its crap?
logger.warn("received a wrong address type from [{}], ignoring...", node);
@@ -837,7 +848,7 @@ public class ZenDiscovery extends AbstractLifecycleComponent<Discovery> implemen
// Sanity check: maybe we don't end up here, because serialization may have failed.
if (node.getVersion().before(minimumNodeJoinVersion)) {
callback.onFailure(
- new IllegalStateException("Can't handle join request from a node with a version [" + node.getVersion() + "] that is lower than the minimum compatible version [" + minimumNodeJoinVersion.minimumCompatibilityVersion() + "]")
+ new IllegalStateException("Can't handle join request from a node with a version [" + node.getVersion() + "] that is lower than the minimum compatible version [" + minimumNodeJoinVersion.minimumCompatibilityVersion() + "]")
);
return;
}
@@ -847,7 +858,13 @@ public class ZenDiscovery extends AbstractLifecycleComponent<Discovery> implemen
// validate the join request, will throw a failure if it fails, which will get back to the
// node calling the join request
- membership.sendValidateJoinRequestBlocking(node, joinTimeout);
+ try {
+ membership.sendValidateJoinRequestBlocking(node, state, joinTimeout);
+ } catch (Throwable e) {
+ logger.warn("failed to validate incoming join request from node [{}]", node);
+ callback.onFailure(new IllegalStateException("failure when sending a validation request to node", e));
+ return;
+ }
nodeJoinController.handleJoinRequest(node, callback);
}
}
@@ -1027,7 +1044,7 @@ public class ZenDiscovery extends AbstractLifecycleComponent<Discovery> implemen
private class MembershipListener implements MembershipAction.MembershipListener {
@Override
public void onJoin(DiscoveryNode node, MembershipAction.JoinCallback callback) {
- handleJoinRequest(node, callback);
+ handleJoinRequest(node, clusterService.state(), callback);
}
@Override
@@ -1139,26 +1156,6 @@ public class ZenDiscovery extends AbstractLifecycleComponent<Discovery> implemen
}
}
- class ApplySettings implements NodeSettingsService.Listener {
- @Override
- public void onRefreshSettings(Settings settings) {
- int minimumMasterNodes = settings.getAsInt(ElectMasterService.DISCOVERY_ZEN_MINIMUM_MASTER_NODES,
- ZenDiscovery.this.electMaster.minimumMasterNodes());
- if (minimumMasterNodes != ZenDiscovery.this.electMaster.minimumMasterNodes()) {
- logger.info("updating {} from [{}] to [{}]", ElectMasterService.DISCOVERY_ZEN_MINIMUM_MASTER_NODES,
- ZenDiscovery.this.electMaster.minimumMasterNodes(), minimumMasterNodes);
- handleMinimumMasterNodesChanged(minimumMasterNodes);
- }
-
- boolean rejoinOnMasterGone = settings.getAsBoolean(SETTING_REJOIN_ON_MASTER_GONE, ZenDiscovery.this.rejoinOnMasterGone);
- if (rejoinOnMasterGone != ZenDiscovery.this.rejoinOnMasterGone) {
- logger.info("updating {} from [{}] to [{}]", SETTING_REJOIN_ON_MASTER_GONE, ZenDiscovery.this.rejoinOnMasterGone, rejoinOnMasterGone);
- ZenDiscovery.this.rejoinOnMasterGone = rejoinOnMasterGone;
- }
- }
- }
-
-
/**
* All control of the join thread should happen under the cluster state update task thread.
* This is important to make sure that the background joining process is always in sync with any cluster state updates
diff --git a/core/src/main/java/org/elasticsearch/discovery/zen/elect/ElectMasterService.java b/core/src/main/java/org/elasticsearch/discovery/zen/elect/ElectMasterService.java
index 9164a85388..9cca1edfc5 100644
--- a/core/src/main/java/org/elasticsearch/discovery/zen/elect/ElectMasterService.java
+++ b/core/src/main/java/org/elasticsearch/discovery/zen/elect/ElectMasterService.java
@@ -22,11 +22,10 @@ package org.elasticsearch.discovery.zen.elect;
import com.carrotsearch.hppc.ObjectContainer;
import org.apache.lucene.util.CollectionUtil;
import org.elasticsearch.Version;
-import org.elasticsearch.cluster.ClusterState;
import org.elasticsearch.cluster.node.DiscoveryNode;
-import org.elasticsearch.cluster.settings.Validator;
import org.elasticsearch.common.component.AbstractComponent;
import org.elasticsearch.common.inject.Inject;
+import org.elasticsearch.common.settings.Setting;
import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.common.util.CollectionUtils;
@@ -41,23 +40,7 @@ import java.util.List;
*/
public class ElectMasterService extends AbstractComponent {
- public static final String DISCOVERY_ZEN_MINIMUM_MASTER_NODES = "discovery.zen.minimum_master_nodes";
- public static final Validator DISCOVERY_ZEN_MINIMUM_MASTER_NODES_VALIDATOR = new Validator() {
- @Override
- public String validate(String setting, String value, ClusterState clusterState) {
- int intValue;
- try {
- intValue = Integer.parseInt(value);
- } catch (NumberFormatException ex) {
- return "cannot parse value [" + value + "] as an integer";
- }
- int masterNodes = clusterState.nodes().masterNodes().size();
- if (intValue > masterNodes) {
- return "cannot set " + ElectMasterService.DISCOVERY_ZEN_MINIMUM_MASTER_NODES + " to more than the current master nodes count [" + masterNodes + "]";
- }
- return null;
- }
- };
+ public static final Setting<Integer> DISCOVERY_ZEN_MINIMUM_MASTER_NODES_SETTING = Setting.intSetting("discovery.zen.minimum_master_nodes", -1, true, Setting.Scope.CLUSTER);
// This is the minimum version a master needs to be on, otherwise it gets ignored
// This is based on the minimum compatible version of the current version this node is on
@@ -70,7 +53,7 @@ public class ElectMasterService extends AbstractComponent {
public ElectMasterService(Settings settings, Version version) {
super(settings);
this.minMasterVersion = version.minimumCompatibilityVersion();
- this.minimumMasterNodes = settings.getAsInt(DISCOVERY_ZEN_MINIMUM_MASTER_NODES, -1);
+ this.minimumMasterNodes = DISCOVERY_ZEN_MINIMUM_MASTER_NODES_SETTING.get(settings);
logger.debug("using minimum_master_nodes [{}]", minimumMasterNodes);
}
diff --git a/core/src/main/java/org/elasticsearch/discovery/zen/membership/MembershipAction.java b/core/src/main/java/org/elasticsearch/discovery/zen/membership/MembershipAction.java
index 4260b992dd..5a96addc84 100644
--- a/core/src/main/java/org/elasticsearch/discovery/zen/membership/MembershipAction.java
+++ b/core/src/main/java/org/elasticsearch/discovery/zen/membership/MembershipAction.java
@@ -20,6 +20,7 @@
package org.elasticsearch.discovery.zen.membership;
import org.elasticsearch.cluster.ClusterService;
+import org.elasticsearch.cluster.ClusterState;
import org.elasticsearch.cluster.node.DiscoveryNode;
import org.elasticsearch.common.component.AbstractComponent;
import org.elasticsearch.common.io.stream.StreamInput;
@@ -88,10 +89,6 @@ public class MembershipAction extends AbstractComponent {
transportService.submitRequest(masterNode, DISCOVERY_LEAVE_ACTION_NAME, new LeaveRequest(node), EmptyTransportResponseHandler.INSTANCE_SAME).txGet(timeout.millis(), TimeUnit.MILLISECONDS);
}
- public void sendJoinRequest(DiscoveryNode masterNode, DiscoveryNode node) {
- transportService.sendRequest(masterNode, DISCOVERY_JOIN_ACTION_NAME, new JoinRequest(node), EmptyTransportResponseHandler.INSTANCE_SAME);
- }
-
public void sendJoinRequestBlocking(DiscoveryNode masterNode, DiscoveryNode node, TimeValue timeout) {
transportService.submitRequest(masterNode, DISCOVERY_JOIN_ACTION_NAME, new JoinRequest(node), EmptyTransportResponseHandler.INSTANCE_SAME)
.txGet(timeout.millis(), TimeUnit.MILLISECONDS);
@@ -100,8 +97,8 @@ public class MembershipAction extends AbstractComponent {
/**
* Validates the join request, throwing a failure if it failed.
*/
- public void sendValidateJoinRequestBlocking(DiscoveryNode node, TimeValue timeout) {
- transportService.submitRequest(node, DISCOVERY_JOIN_VALIDATE_ACTION_NAME, new ValidateJoinRequest(), EmptyTransportResponseHandler.INSTANCE_SAME)
+ public void sendValidateJoinRequestBlocking(DiscoveryNode node, ClusterState state, TimeValue timeout) {
+ transportService.submitRequest(node, DISCOVERY_JOIN_VALIDATE_ACTION_NAME, new ValidateJoinRequest(state), EmptyTransportResponseHandler.INSTANCE_SAME)
.txGet(timeout.millis(), TimeUnit.MILLISECONDS);
}
@@ -156,9 +153,26 @@ public class MembershipAction extends AbstractComponent {
}
}
- public static class ValidateJoinRequest extends TransportRequest {
+ class ValidateJoinRequest extends TransportRequest {
+ private ClusterState state;
+
+ ValidateJoinRequest() {
+ }
+
+ ValidateJoinRequest(ClusterState state) {
+ this.state = state;
+ }
+
+ @Override
+ public void readFrom(StreamInput in) throws IOException {
+ super.readFrom(in);
+ this.state = ClusterState.Builder.readFrom(in, nodesProvider.nodes().localNode());
+ }
- public ValidateJoinRequest() {
+ @Override
+ public void writeTo(StreamOutput out) throws IOException {
+ super.writeTo(out);
+ this.state.writeTo(out);
}
}
diff --git a/core/src/main/java/org/elasticsearch/env/NodeEnvironment.java b/core/src/main/java/org/elasticsearch/env/NodeEnvironment.java
index 3a1b430f98..93e95dfaa9 100644
--- a/core/src/main/java/org/elasticsearch/env/NodeEnvironment.java
+++ b/core/src/main/java/org/elasticsearch/env/NodeEnvironment.java
@@ -21,7 +21,12 @@ package org.elasticsearch.env;
import org.apache.lucene.index.IndexWriter;
import org.apache.lucene.index.SegmentInfos;
-import org.apache.lucene.store.*;
+import org.apache.lucene.store.Directory;
+import org.apache.lucene.store.FSDirectory;
+import org.apache.lucene.store.Lock;
+import org.apache.lucene.store.LockObtainFailedException;
+import org.apache.lucene.store.NativeFSLockFactory;
+import org.apache.lucene.store.SimpleFSDirectory;
import org.apache.lucene.util.IOUtils;
import org.elasticsearch.ElasticsearchException;
import org.elasticsearch.cluster.metadata.IndexMetaData;
@@ -31,6 +36,7 @@ import org.elasticsearch.common.component.AbstractComponent;
import org.elasticsearch.common.inject.Inject;
import org.elasticsearch.common.io.FileSystemUtils;
import org.elasticsearch.common.settings.Settings;
+import org.elasticsearch.common.unit.ByteSizeValue;
import org.elasticsearch.common.unit.TimeValue;
import org.elasticsearch.index.Index;
import org.elasticsearch.index.IndexSettings;
@@ -38,11 +44,25 @@ import org.elasticsearch.index.shard.ShardId;
import org.elasticsearch.index.store.FsDirectoryService;
import org.elasticsearch.monitor.fs.FsInfo;
import org.elasticsearch.monitor.fs.FsProbe;
+import org.elasticsearch.monitor.jvm.JvmInfo;
import java.io.Closeable;
import java.io.IOException;
-import java.nio.file.*;
-import java.util.*;
+import java.nio.file.AtomicMoveNotSupportedException;
+import java.nio.file.DirectoryStream;
+import java.nio.file.FileStore;
+import java.nio.file.Files;
+import java.nio.file.Path;
+import java.nio.file.StandardCopyOption;
+import java.util.ArrayList;
+import java.util.Arrays;
+import java.util.Collection;
+import java.util.HashMap;
+import java.util.HashSet;
+import java.util.List;
+import java.util.Locale;
+import java.util.Map;
+import java.util.Set;
import java.util.concurrent.Semaphore;
import java.util.concurrent.TimeUnit;
import java.util.concurrent.atomic.AtomicBoolean;
@@ -145,7 +165,7 @@ public class NodeEnvironment extends AbstractComponent implements Closeable {
for (int dirIndex = 0; dirIndex < environment.dataWithClusterFiles().length; dirIndex++) {
Path dir = environment.dataWithClusterFiles()[dirIndex].resolve(NODES_FOLDER).resolve(Integer.toString(possibleLockId));
Files.createDirectories(dir);
-
+
try (Directory luceneDir = FSDirectory.open(dir, NativeFSLockFactory.INSTANCE)) {
logger.trace("obtaining node lock on {} ...", dir.toAbsolutePath());
try {
@@ -187,6 +207,7 @@ public class NodeEnvironment extends AbstractComponent implements Closeable {
}
maybeLogPathDetails();
+ maybeLogHeapDetails();
if (settings.getAsBoolean(SETTING_ENABLE_LUCENE_SEGMENT_INFOS_TRACE, false)) {
SegmentInfos.setInfoStream(System.out);
@@ -274,6 +295,13 @@ public class NodeEnvironment extends AbstractComponent implements Closeable {
}
}
+ private void maybeLogHeapDetails() {
+ JvmInfo jvmInfo = JvmInfo.jvmInfo();
+ ByteSizeValue maxHeapSize = jvmInfo.getMem().getHeapMax();
+ String useCompressedOops = jvmInfo.useCompressedOops();
+ logger.info("heap size [{}], compressed ordinary object pointers [{}]", maxHeapSize, useCompressedOops);
+ }
+
private static String toString(Collection<String> items) {
StringBuilder b = new StringBuilder();
for(String item : items) {
@@ -811,7 +839,7 @@ public class NodeEnvironment extends AbstractComponent implements Closeable {
// Sanity check:
assert Integer.parseInt(shardPath.getName(count-1).toString()) >= 0;
assert "indices".equals(shardPath.getName(count-3).toString());
-
+
return shardPath.getParent().getParent().getParent();
}
}
diff --git a/core/src/main/java/org/elasticsearch/gateway/GatewayService.java b/core/src/main/java/org/elasticsearch/gateway/GatewayService.java
index e83ec695a9..5e410fb6d5 100644
--- a/core/src/main/java/org/elasticsearch/gateway/GatewayService.java
+++ b/core/src/main/java/org/elasticsearch/gateway/GatewayService.java
@@ -227,7 +227,7 @@ public class GatewayService extends AbstractLifecycleComponent<GatewayService> i
// automatically generate a UID for the metadata if we need to
metaDataBuilder.generateClusterUuidIfNeeded();
- if (recoveredState.metaData().settings().getAsBoolean(MetaData.SETTING_READ_ONLY, false) || currentState.metaData().settings().getAsBoolean(MetaData.SETTING_READ_ONLY, false)) {
+ if (MetaData.SETTING_READ_ONLY_SETTING.get(recoveredState.metaData().settings()) || MetaData.SETTING_READ_ONLY_SETTING.get(currentState.metaData().settings())) {
blocks.addGlobalBlock(MetaData.CLUSTER_READ_ONLY_BLOCK);
}
diff --git a/core/src/main/java/org/elasticsearch/gateway/PrimaryShardAllocator.java b/core/src/main/java/org/elasticsearch/gateway/PrimaryShardAllocator.java
index e560b4458b..79bfbdac8c 100644
--- a/core/src/main/java/org/elasticsearch/gateway/PrimaryShardAllocator.java
+++ b/core/src/main/java/org/elasticsearch/gateway/PrimaryShardAllocator.java
@@ -20,6 +20,7 @@
package org.elasticsearch.gateway;
import org.apache.lucene.util.CollectionUtil;
+import org.elasticsearch.Version;
import org.elasticsearch.cluster.metadata.IndexMetaData;
import org.elasticsearch.cluster.metadata.MetaData;
import org.elasticsearch.cluster.node.DiscoveryNode;
@@ -30,8 +31,10 @@ import org.elasticsearch.cluster.routing.allocation.RoutingAllocation;
import org.elasticsearch.cluster.routing.allocation.decider.Decision;
import org.elasticsearch.common.component.AbstractComponent;
import org.elasticsearch.common.settings.Settings;
+import org.elasticsearch.index.IndexSettings;
import java.util.*;
+import java.util.stream.Collectors;
/**
* The primary shard allocator allocates primary shard that were not created as
@@ -39,6 +42,7 @@ import java.util.*;
*/
public abstract class PrimaryShardAllocator extends AbstractComponent {
+ @Deprecated
public static final String INDEX_RECOVERY_INITIAL_SHARDS = "index.recovery.initial_shards";
private final String initialShards;
@@ -56,13 +60,21 @@ public abstract class PrimaryShardAllocator extends AbstractComponent {
final RoutingNodes.UnassignedShards.UnassignedIterator unassignedIterator = routingNodes.unassigned().iterator();
while (unassignedIterator.hasNext()) {
- ShardRouting shard = unassignedIterator.next();
+ final ShardRouting shard = unassignedIterator.next();
- if (needToFindPrimaryCopy(shard) == false) {
+ if (shard.primary() == false) {
continue;
}
- AsyncShardFetch.FetchResult<TransportNodesListGatewayStartedShards.NodeGatewayStartedShards> shardState = fetchData(shard, allocation);
+ final IndexMetaData indexMetaData = metaData.index(shard.getIndex());
+ final IndexSettings indexSettings = new IndexSettings(indexMetaData, settings, Collections.emptyList());
+
+ if (shard.allocatedPostIndexCreate(indexMetaData) == false) {
+ // when we create a fresh index
+ continue;
+ }
+
+ final AsyncShardFetch.FetchResult<TransportNodesListGatewayStartedShards.NodeGatewayStartedShards> shardState = fetchData(shard, allocation);
if (shardState.hasData() == false) {
logger.trace("{}: ignoring allocation, still fetching shard started state", shard);
allocation.setHasPendingAsyncFetch();
@@ -70,25 +82,50 @@ public abstract class PrimaryShardAllocator extends AbstractComponent {
continue;
}
- IndexMetaData indexMetaData = metaData.index(shard.getIndex());
- Settings indexSettings = Settings.builder().put(settings).put(indexMetaData.getSettings()).build();
-
- NodesAndVersions nodesAndVersions = buildNodesAndVersions(shard, recoverOnAnyNode(indexSettings), allocation.getIgnoreNodes(shard.shardId()), shardState);
- logger.debug("[{}][{}] found {} allocations of {}, highest version: [{}]", shard.index(), shard.id(), nodesAndVersions.allocationsFound, shard, nodesAndVersions.highestVersion);
+ final Set<String> lastActiveAllocationIds = indexMetaData.activeAllocationIds(shard.id());
+ final boolean snapshotRestore = shard.restoreSource() != null;
+ final boolean recoverOnAnyNode = recoverOnAnyNode(indexSettings);
+
+ final NodesAndVersions nodesAndVersions;
+ final boolean enoughAllocationsFound;
+
+ if (lastActiveAllocationIds.isEmpty()) {
+ assert indexSettings.getIndexVersionCreated().before(Version.V_3_0_0) : "trying to allocated a primary with an empty allocation id set, but index is new";
+ // when we load an old index (after upgrading cluster) or restore a snapshot of an old index
+ // fall back to old version-based allocation mode
+ // Note that once the shard has been active, lastActiveAllocationIds will be non-empty
+ nodesAndVersions = buildNodesAndVersions(shard, snapshotRestore || recoverOnAnyNode, allocation.getIgnoreNodes(shard.shardId()), shardState);
+ if (snapshotRestore || recoverOnAnyNode) {
+ enoughAllocationsFound = nodesAndVersions.allocationsFound > 0;
+ } else {
+ enoughAllocationsFound = isEnoughVersionBasedAllocationsFound(shard, indexMetaData, nodesAndVersions);
+ }
+ logger.debug("[{}][{}]: version-based allocation for pre-{} index found {} allocations of {}, highest version: [{}]", shard.index(), shard.id(), Version.V_3_0_0, nodesAndVersions.allocationsFound, shard, nodesAndVersions.highestVersion);
+ } else {
+ assert lastActiveAllocationIds.isEmpty() == false;
+ // use allocation ids to select nodes
+ nodesAndVersions = buildAllocationIdBasedNodes(shard, snapshotRestore || recoverOnAnyNode,
+ allocation.getIgnoreNodes(shard.shardId()), lastActiveAllocationIds, shardState);
+ enoughAllocationsFound = nodesAndVersions.allocationsFound > 0;
+ logger.debug("[{}][{}]: found {} allocations of {} based on allocation ids: [{}]", shard.index(), shard.id(), nodesAndVersions.allocationsFound, shard, lastActiveAllocationIds);
+ }
- if (isEnoughAllocationsFound(shard, indexMetaData, nodesAndVersions) == false) {
- // if we are restoring this shard we still can allocate
- if (shard.restoreSource() == null) {
+ if (enoughAllocationsFound == false){
+ if (snapshotRestore) {
+ // let BalancedShardsAllocator take care of allocating this shard
+ logger.debug("[{}][{}]: missing local data, will restore from [{}]", shard.index(), shard.id(), shard.restoreSource());
+ } else if (recoverOnAnyNode) {
+ // let BalancedShardsAllocator take care of allocating this shard
+ logger.debug("[{}][{}]: missing local data, recover from any node", shard.index(), shard.id());
+ } else {
// we can't really allocate, so ignore it and continue
unassignedIterator.removeAndIgnore();
logger.debug("[{}][{}]: not allocating, number_of_allocated_shards_found [{}]", shard.index(), shard.id(), nodesAndVersions.allocationsFound);
- } else {
- logger.debug("[{}][{}]: missing local data, will restore from [{}]", shard.index(), shard.id(), shard.restoreSource());
}
continue;
}
- NodesToAllocate nodesToAllocate = buildNodesToAllocate(shard, allocation, nodesAndVersions);
+ final NodesToAllocate nodesToAllocate = buildNodesToAllocate(shard, allocation, nodesAndVersions.nodes);
if (nodesToAllocate.yesNodes.isEmpty() == false) {
DiscoveryNode node = nodesToAllocate.yesNodes.get(0);
logger.debug("[{}][{}]: allocating [{}] to [{}] on primary allocation", shard.index(), shard.id(), shard, node);
@@ -109,63 +146,99 @@ public abstract class PrimaryShardAllocator extends AbstractComponent {
}
/**
- * Does the shard need to find a primary copy?
+ * Builds a list of nodes. If matchAnyShard is set to false, only nodes that have an allocation id matching
+ * lastActiveAllocationIds are added to the list. Otherwise, any node that has a shard is added to the list, but
+ * entries with matching allocation id are always at the front of the list.
*/
- boolean needToFindPrimaryCopy(ShardRouting shard) {
- if (shard.primary() == false) {
- return false;
- }
+ protected NodesAndVersions buildAllocationIdBasedNodes(ShardRouting shard, boolean matchAnyShard, Set<String> ignoreNodes,
+ Set<String> lastActiveAllocationIds, AsyncShardFetch.FetchResult<TransportNodesListGatewayStartedShards.NodeGatewayStartedShards> shardState) {
+ List<DiscoveryNode> matchingNodes = new ArrayList<>();
+ List<DiscoveryNode> nonMatchingNodes = new ArrayList<>();
+ long highestVersion = -1;
+ for (TransportNodesListGatewayStartedShards.NodeGatewayStartedShards nodeShardState : shardState.getData().values()) {
+ DiscoveryNode node = nodeShardState.getNode();
+ String allocationId = nodeShardState.allocationId();
+
+ if (ignoreNodes.contains(node.id())) {
+ continue;
+ }
+
+ if (nodeShardState.storeException() == null) {
+ if (allocationId == null && nodeShardState.version() != -1) {
+ // old shard with no allocation id, assign dummy value so that it gets added below in case of matchAnyShard
+ allocationId = "_n/a_";
+ }
+
+ logger.trace("[{}] on node [{}] has allocation id [{}] of shard", shard, nodeShardState.getNode(), allocationId);
+ } else {
+ logger.trace("[{}] on node [{}] has allocation id [{}] but the store can not be opened, treating as no allocation id", nodeShardState.storeException(), shard, nodeShardState.getNode(), allocationId);
+ allocationId = null;
+ }
- // this is an API allocation, ignore since we know there is no data...
- if (shard.allocatedPostIndexCreate() == false) {
- return false;
+ if (allocationId != null) {
+ if (lastActiveAllocationIds.contains(allocationId)) {
+ matchingNodes.add(node);
+ highestVersion = Math.max(highestVersion, nodeShardState.version());
+ } else if (matchAnyShard) {
+ nonMatchingNodes.add(node);
+ highestVersion = Math.max(highestVersion, nodeShardState.version());
+ }
+ }
}
- return true;
+ List<DiscoveryNode> nodes = new ArrayList<>();
+ nodes.addAll(matchingNodes);
+ nodes.addAll(nonMatchingNodes);
+
+ if (logger.isTraceEnabled()) {
+ logger.trace("{} candidates for allocation: {}", shard, nodes.stream().map(DiscoveryNode::name).collect(Collectors.joining(", ")));
+ }
+ return new NodesAndVersions(nodes, nodes.size(), highestVersion);
}
- private boolean isEnoughAllocationsFound(ShardRouting shard, IndexMetaData indexMetaData, NodesAndVersions nodesAndVersions) {
+ /**
+ * used by old version-based allocation
+ */
+ private boolean isEnoughVersionBasedAllocationsFound(ShardRouting shard, IndexMetaData indexMetaData, NodesAndVersions nodesAndVersions) {
// check if the counts meets the minimum set
int requiredAllocation = 1;
// if we restore from a repository one copy is more then enough
- if (shard.restoreSource() == null) {
- try {
- String initialShards = indexMetaData.getSettings().get(INDEX_RECOVERY_INITIAL_SHARDS, settings.get(INDEX_RECOVERY_INITIAL_SHARDS, this.initialShards));
- if ("quorum".equals(initialShards)) {
- if (indexMetaData.getNumberOfReplicas() > 1) {
- requiredAllocation = ((1 + indexMetaData.getNumberOfReplicas()) / 2) + 1;
- }
- } else if ("quorum-1".equals(initialShards) || "half".equals(initialShards)) {
- if (indexMetaData.getNumberOfReplicas() > 2) {
- requiredAllocation = ((1 + indexMetaData.getNumberOfReplicas()) / 2);
- }
- } else if ("one".equals(initialShards)) {
- requiredAllocation = 1;
- } else if ("full".equals(initialShards) || "all".equals(initialShards)) {
- requiredAllocation = indexMetaData.getNumberOfReplicas() + 1;
- } else if ("full-1".equals(initialShards) || "all-1".equals(initialShards)) {
- if (indexMetaData.getNumberOfReplicas() > 1) {
- requiredAllocation = indexMetaData.getNumberOfReplicas();
- }
- } else {
- requiredAllocation = Integer.parseInt(initialShards);
+ try {
+ String initialShards = indexMetaData.getSettings().get(INDEX_RECOVERY_INITIAL_SHARDS, settings.get(INDEX_RECOVERY_INITIAL_SHARDS, this.initialShards));
+ if ("quorum".equals(initialShards)) {
+ if (indexMetaData.getNumberOfReplicas() > 1) {
+ requiredAllocation = ((1 + indexMetaData.getNumberOfReplicas()) / 2) + 1;
}
- } catch (Exception e) {
- logger.warn("[{}][{}] failed to derived initial_shards from value {}, ignore allocation for {}", shard.index(), shard.id(), initialShards, shard);
+ } else if ("quorum-1".equals(initialShards) || "half".equals(initialShards)) {
+ if (indexMetaData.getNumberOfReplicas() > 2) {
+ requiredAllocation = ((1 + indexMetaData.getNumberOfReplicas()) / 2);
+ }
+ } else if ("one".equals(initialShards)) {
+ requiredAllocation = 1;
+ } else if ("full".equals(initialShards) || "all".equals(initialShards)) {
+ requiredAllocation = indexMetaData.getNumberOfReplicas() + 1;
+ } else if ("full-1".equals(initialShards) || "all-1".equals(initialShards)) {
+ if (indexMetaData.getNumberOfReplicas() > 1) {
+ requiredAllocation = indexMetaData.getNumberOfReplicas();
+ }
+ } else {
+ requiredAllocation = Integer.parseInt(initialShards);
}
+ } catch (Exception e) {
+ logger.warn("[{}][{}] failed to derived initial_shards from value {}, ignore allocation for {}", shard.index(), shard.id(), initialShards, shard);
}
return nodesAndVersions.allocationsFound >= requiredAllocation;
}
/**
- * Based on the nodes and versions, build the list of yes/no/throttle nodes that the shard applies to.
+ * Split the list of nodes to lists of yes/no/throttle nodes based on allocation deciders
*/
- private NodesToAllocate buildNodesToAllocate(ShardRouting shard, RoutingAllocation allocation, NodesAndVersions nodesAndVersions) {
+ private NodesToAllocate buildNodesToAllocate(ShardRouting shard, RoutingAllocation allocation, List<DiscoveryNode> nodes) {
List<DiscoveryNode> yesNodes = new ArrayList<>();
List<DiscoveryNode> throttledNodes = new ArrayList<>();
List<DiscoveryNode> noNodes = new ArrayList<>();
- for (DiscoveryNode discoNode : nodesAndVersions.nodes) {
+ for (DiscoveryNode discoNode : nodes) {
RoutingNode node = allocation.routingNodes().node(discoNode.id());
if (node == null) {
continue;
@@ -184,9 +257,11 @@ public abstract class PrimaryShardAllocator extends AbstractComponent {
}
/**
- * Builds a list of nodes and version
+ * Builds a list of nodes. If matchAnyShard is set to false, only nodes that have the highest shard version
+ * are added to the list. Otherwise, any node that has a shard is added to the list, but entries with highest
+ * version are always at the front of the list.
*/
- NodesAndVersions buildNodesAndVersions(ShardRouting shard, boolean recoveryOnAnyNode, Set<String> ignoreNodes,
+ NodesAndVersions buildNodesAndVersions(ShardRouting shard, boolean matchAnyShard, Set<String> ignoreNodes,
AsyncShardFetch.FetchResult<TransportNodesListGatewayStartedShards.NodeGatewayStartedShards> shardState) {
final Map<DiscoveryNode, Long> nodesWithVersion = new HashMap<>();
int numberOfAllocationsFound = 0;
@@ -208,20 +283,15 @@ public abstract class PrimaryShardAllocator extends AbstractComponent {
version = -1;
}
- if (recoveryOnAnyNode) {
- numberOfAllocationsFound++;
- if (version > highestVersion) {
- highestVersion = version;
- }
- // We always put the node without clearing the map
- nodesWithVersion.put(node, version);
- } else if (version != -1) {
+ if (version != -1) {
numberOfAllocationsFound++;
// If we've found a new "best" candidate, clear the
// current candidates and add it
if (version > highestVersion) {
highestVersion = version;
- nodesWithVersion.clear();
+ if (matchAnyShard == false) {
+ nodesWithVersion.clear();
+ }
nodesWithVersion.put(node, version);
} else if (version == highestVersion) {
// If the candidate is the same, add it to the
@@ -258,9 +328,9 @@ public abstract class PrimaryShardAllocator extends AbstractComponent {
* Return {@code true} if the index is configured to allow shards to be
* recovered on any node
*/
- private boolean recoverOnAnyNode(Settings idxSettings) {
- return IndexMetaData.isOnSharedFilesystem(idxSettings) &&
- idxSettings.getAsBoolean(IndexMetaData.SETTING_SHARED_FS_ALLOW_RECOVERY_ON_ANY_NODE, false);
+ private boolean recoverOnAnyNode(IndexSettings indexSettings) {
+ return indexSettings.isOnSharedFilesystem()
+ && indexSettings.getSettings().getAsBoolean(IndexMetaData.SETTING_SHARED_FS_ALLOW_RECOVERY_ON_ANY_NODE, false);
}
protected abstract AsyncShardFetch.FetchResult<TransportNodesListGatewayStartedShards.NodeGatewayStartedShards> fetchData(ShardRouting shard, RoutingAllocation allocation);
diff --git a/core/src/main/java/org/elasticsearch/gateway/ReplicaShardAllocator.java b/core/src/main/java/org/elasticsearch/gateway/ReplicaShardAllocator.java
index c87f4d9475..0b5f2bc58d 100644
--- a/core/src/main/java/org/elasticsearch/gateway/ReplicaShardAllocator.java
+++ b/core/src/main/java/org/elasticsearch/gateway/ReplicaShardAllocator.java
@@ -24,6 +24,8 @@ import com.carrotsearch.hppc.ObjectLongMap;
import com.carrotsearch.hppc.cursors.ObjectCursor;
import com.carrotsearch.hppc.cursors.ObjectLongCursor;
import org.elasticsearch.cluster.ClusterChangedEvent;
+import org.elasticsearch.cluster.metadata.IndexMetaData;
+import org.elasticsearch.cluster.metadata.MetaData;
import org.elasticsearch.cluster.node.DiscoveryNode;
import org.elasticsearch.cluster.routing.RoutingNode;
import org.elasticsearch.cluster.routing.RoutingNodes;
@@ -56,6 +58,7 @@ public abstract class ReplicaShardAllocator extends AbstractComponent {
*/
public boolean processExistingRecoveries(RoutingAllocation allocation) {
boolean changed = false;
+ MetaData metaData = allocation.metaData();
for (RoutingNodes.RoutingNodesIterator nodes = allocation.routingNodes().nodes(); nodes.hasNext(); ) {
nodes.next();
for (RoutingNodes.RoutingNodeIterator it = nodes.nodeShards(); it.hasNext(); ) {
@@ -69,8 +72,10 @@ public abstract class ReplicaShardAllocator extends AbstractComponent {
if (shard.relocatingNodeId() != null) {
continue;
}
+
// if we are allocating a replica because of index creation, no need to go and find a copy, there isn't one...
- if (shard.allocatedPostIndexCreate() == false) {
+ IndexMetaData indexMetaData = metaData.index(shard.getIndex());
+ if (shard.allocatedPostIndexCreate(indexMetaData) == false) {
continue;
}
@@ -114,6 +119,7 @@ public abstract class ReplicaShardAllocator extends AbstractComponent {
boolean changed = false;
final RoutingNodes routingNodes = allocation.routingNodes();
final RoutingNodes.UnassignedShards.UnassignedIterator unassignedIterator = routingNodes.unassigned().iterator();
+ MetaData metaData = allocation.metaData();
while (unassignedIterator.hasNext()) {
ShardRouting shard = unassignedIterator.next();
if (shard.primary()) {
@@ -121,7 +127,8 @@ public abstract class ReplicaShardAllocator extends AbstractComponent {
}
// if we are allocating a replica because of index creation, no need to go and find a copy, there isn't one...
- if (shard.allocatedPostIndexCreate() == false) {
+ IndexMetaData indexMetaData = metaData.index(shard.getIndex());
+ if (shard.allocatedPostIndexCreate(indexMetaData) == false) {
continue;
}
diff --git a/core/src/main/java/org/elasticsearch/gateway/TransportNodesListGatewayStartedShards.java b/core/src/main/java/org/elasticsearch/gateway/TransportNodesListGatewayStartedShards.java
index d91b4bd8cd..539ac92426 100644
--- a/core/src/main/java/org/elasticsearch/gateway/TransportNodesListGatewayStartedShards.java
+++ b/core/src/main/java/org/elasticsearch/gateway/TransportNodesListGatewayStartedShards.java
@@ -139,7 +139,8 @@ public class TransportNodesListGatewayStartedShards extends TransportNodesAction
Store.tryOpenIndex(shardPath.resolveIndex());
} catch (Exception exception) {
logger.trace("{} can't open index for shard [{}] in path [{}]", exception, shardId, shardStateMetaData, (shardPath != null) ? shardPath.resolveIndex() : "");
- return new NodeGatewayStartedShards(clusterService.localNode(), shardStateMetaData.version, exception);
+ String allocationId = shardStateMetaData.allocationId != null ? shardStateMetaData.allocationId.getId() : null;
+ return new NodeGatewayStartedShards(clusterService.localNode(), shardStateMetaData.version, allocationId, exception);
}
}
// old shard metadata doesn't have the actual index UUID so we need to check if the actual uuid in the metadata
@@ -149,11 +150,12 @@ public class TransportNodesListGatewayStartedShards extends TransportNodesAction
logger.warn("{} shard state info found but indexUUID didn't match expected [{}] actual [{}]", shardId, indexUUID, shardStateMetaData.indexUUID);
} else {
logger.debug("{} shard state info found: [{}]", shardId, shardStateMetaData);
- return new NodeGatewayStartedShards(clusterService.localNode(), shardStateMetaData.version);
+ String allocationId = shardStateMetaData.allocationId != null ? shardStateMetaData.allocationId.getId() : null;
+ return new NodeGatewayStartedShards(clusterService.localNode(), shardStateMetaData.version, allocationId);
}
}
logger.trace("{} no local shard info found", shardId);
- return new NodeGatewayStartedShards(clusterService.localNode(), -1);
+ return new NodeGatewayStartedShards(clusterService.localNode(), -1, null);
} catch (Exception e) {
throw new ElasticsearchException("failed to load started shards", e);
}
@@ -277,17 +279,19 @@ public class TransportNodesListGatewayStartedShards extends TransportNodesAction
public static class NodeGatewayStartedShards extends BaseNodeResponse {
private long version = -1;
+ private String allocationId = null;
private Throwable storeException = null;
public NodeGatewayStartedShards() {
}
- public NodeGatewayStartedShards(DiscoveryNode node, long version) {
- this(node, version, null);
+ public NodeGatewayStartedShards(DiscoveryNode node, long version, String allocationId) {
+ this(node, version, allocationId, null);
}
- public NodeGatewayStartedShards(DiscoveryNode node, long version, Throwable storeException) {
+ public NodeGatewayStartedShards(DiscoveryNode node, long version, String allocationId, Throwable storeException) {
super(node);
this.version = version;
+ this.allocationId = allocationId;
this.storeException = storeException;
}
@@ -295,6 +299,10 @@ public class TransportNodesListGatewayStartedShards extends TransportNodesAction
return this.version;
}
+ public String allocationId() {
+ return this.allocationId;
+ }
+
public Throwable storeException() {
return this.storeException;
}
@@ -303,16 +311,17 @@ public class TransportNodesListGatewayStartedShards extends TransportNodesAction
public void readFrom(StreamInput in) throws IOException {
super.readFrom(in);
version = in.readLong();
+ allocationId = in.readOptionalString();
if (in.readBoolean()) {
storeException = in.readThrowable();
}
-
}
@Override
public void writeTo(StreamOutput out) throws IOException {
super.writeTo(out);
out.writeLong(version);
+ out.writeOptionalString(allocationId);
if (storeException != null) {
out.writeBoolean(true);
out.writeThrowable(storeException);
diff --git a/core/src/main/java/org/elasticsearch/http/HttpServerModule.java b/core/src/main/java/org/elasticsearch/http/HttpServerModule.java
deleted file mode 100644
index 49d6736964..0000000000
--- a/core/src/main/java/org/elasticsearch/http/HttpServerModule.java
+++ /dev/null
@@ -1,59 +0,0 @@
-/*
- * Licensed to Elasticsearch under one or more contributor
- * license agreements. See the NOTICE file distributed with
- * this work for additional information regarding copyright
- * ownership. Elasticsearch licenses this file to you under
- * the Apache License, Version 2.0 (the "License"); you may
- * not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing,
- * software distributed under the License is distributed on an
- * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
- * KIND, either express or implied. See the License for the
- * specific language governing permissions and limitations
- * under the License.
- */
-
-package org.elasticsearch.http;
-
-import org.elasticsearch.common.inject.AbstractModule;
-import org.elasticsearch.common.logging.ESLogger;
-import org.elasticsearch.common.logging.Loggers;
-import org.elasticsearch.common.settings.Settings;
-import org.elasticsearch.http.netty.NettyHttpServerTransport;
-
-import java.util.Objects;
-
-/**
- *
- */
-public class HttpServerModule extends AbstractModule {
-
- private final Settings settings;
- private final ESLogger logger;
-
- private Class<? extends HttpServerTransport> httpServerTransportClass;
-
- public HttpServerModule(Settings settings) {
- this.settings = settings;
- this.logger = Loggers.getLogger(getClass(), settings);
- this.httpServerTransportClass = NettyHttpServerTransport.class;
- }
-
- @SuppressWarnings({"unchecked"})
- @Override
- protected void configure() {
- bind(HttpServerTransport.class).to(httpServerTransportClass).asEagerSingleton();
- bind(HttpServer.class).asEagerSingleton();
- }
-
- public void setHttpServerTransport(Class<? extends HttpServerTransport> httpServerTransport, String source) {
- Objects.requireNonNull(httpServerTransport, "Configured http server transport may not be null");
- Objects.requireNonNull(source, "Plugin, that changes transport may not be null");
- logger.info("Using [{}] as http transport, overridden by [{}]", httpServerTransportClass.getName(), source);
- this.httpServerTransportClass = httpServerTransport;
- }
-}
diff --git a/core/src/main/java/org/elasticsearch/http/netty/pipelining/HttpPipeliningHandler.java b/core/src/main/java/org/elasticsearch/http/netty/pipelining/HttpPipeliningHandler.java
index 10008c76a5..4bcbf4079c 100644
--- a/core/src/main/java/org/elasticsearch/http/netty/pipelining/HttpPipeliningHandler.java
+++ b/core/src/main/java/org/elasticsearch/http/netty/pipelining/HttpPipeliningHandler.java
@@ -1,5 +1,27 @@
package org.elasticsearch.http.netty.pipelining;
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+// this file is from netty-http-pipelining, under apache 2.0 license
+// see github.com/typesafehub/netty-http-pipelining
+
import org.elasticsearch.common.logging.ESLogger;
import org.elasticsearch.common.logging.ESLoggerFactory;
import org.jboss.netty.channel.*;
diff --git a/core/src/main/java/org/elasticsearch/http/netty/pipelining/OrderedDownstreamChannelEvent.java b/core/src/main/java/org/elasticsearch/http/netty/pipelining/OrderedDownstreamChannelEvent.java
index 6b713a0802..622a3e6ac9 100644
--- a/core/src/main/java/org/elasticsearch/http/netty/pipelining/OrderedDownstreamChannelEvent.java
+++ b/core/src/main/java/org/elasticsearch/http/netty/pipelining/OrderedDownstreamChannelEvent.java
@@ -1,5 +1,27 @@
package org.elasticsearch.http.netty.pipelining;
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+// this file is from netty-http-pipelining, under apache 2.0 license
+// see github.com/typesafehub/netty-http-pipelining
+
import org.jboss.netty.channel.*;
/**
diff --git a/core/src/main/java/org/elasticsearch/http/netty/pipelining/OrderedUpstreamMessageEvent.java b/core/src/main/java/org/elasticsearch/http/netty/pipelining/OrderedUpstreamMessageEvent.java
index 7343b29b6c..cc47b5be32 100644
--- a/core/src/main/java/org/elasticsearch/http/netty/pipelining/OrderedUpstreamMessageEvent.java
+++ b/core/src/main/java/org/elasticsearch/http/netty/pipelining/OrderedUpstreamMessageEvent.java
@@ -1,5 +1,27 @@
package org.elasticsearch.http.netty.pipelining;
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+// this file is from netty-http-pipelining, under apache 2.0 license
+// see github.com/typesafehub/netty-http-pipelining
+
import org.jboss.netty.channel.Channel;
import org.jboss.netty.channel.UpstreamMessageEvent;
diff --git a/core/src/main/java/org/elasticsearch/index/IndexService.java b/core/src/main/java/org/elasticsearch/index/IndexService.java
index 92ca00231b..a6b66742c5 100644
--- a/core/src/main/java/org/elasticsearch/index/IndexService.java
+++ b/core/src/main/java/org/elasticsearch/index/IndexService.java
@@ -29,7 +29,6 @@ import org.elasticsearch.cluster.metadata.IndexMetaData;
import org.elasticsearch.cluster.routing.ShardRouting;
import org.elasticsearch.common.Nullable;
import org.elasticsearch.common.collect.ImmutableOpenMap;
-import org.elasticsearch.common.inject.Inject;
import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.common.xcontent.XContentFactory;
import org.elasticsearch.common.xcontent.XContentParser;
@@ -48,7 +47,13 @@ import org.elasticsearch.index.mapper.MappedFieldType;
import org.elasticsearch.index.mapper.MapperService;
import org.elasticsearch.index.query.ParsedQuery;
import org.elasticsearch.index.query.QueryShardContext;
-import org.elasticsearch.index.shard.*;
+import org.elasticsearch.index.shard.IndexEventListener;
+import org.elasticsearch.index.shard.IndexSearcherWrapper;
+import org.elasticsearch.index.shard.IndexShard;
+import org.elasticsearch.index.shard.ShadowIndexShard;
+import org.elasticsearch.index.shard.ShardId;
+import org.elasticsearch.index.shard.ShardNotFoundException;
+import org.elasticsearch.index.shard.ShardPath;
import org.elasticsearch.index.similarity.SimilarityService;
import org.elasticsearch.index.store.IndexStore;
import org.elasticsearch.index.store.Store;
@@ -73,7 +78,7 @@ import static org.elasticsearch.common.collect.MapBuilder.newMapBuilder;
/**
*
*/
-public final class IndexService extends AbstractIndexComponent implements IndexComponent, Iterable<IndexShard>{
+public final class IndexService extends AbstractIndexComponent implements IndexComponent, Iterable<IndexShard> {
private final IndexEventListener eventListener;
private final AnalysisService analysisService;
@@ -93,7 +98,6 @@ public final class IndexService extends AbstractIndexComponent implements IndexC
private final AtomicBoolean deleted = new AtomicBoolean(false);
private final IndexSettings indexSettings;
- @Inject
public IndexService(IndexSettings indexSettings, NodeEnvironment nodeEnv,
SimilarityService similarityService,
ShardStoreDeleter shardStoreDeleter,
@@ -146,7 +150,7 @@ public final class IndexService extends AbstractIndexComponent implements IndexC
*/
@Nullable
public IndexShard getShardOrNull(int shardId) {
- return shards.get(shardId);
+ return shards.get(shardId);
}
/**
@@ -160,13 +164,17 @@ public final class IndexService extends AbstractIndexComponent implements IndexC
return indexShard;
}
- public Set<Integer> shardIds() { return shards.keySet(); }
+ public Set<Integer> shardIds() {
+ return shards.keySet();
+ }
public IndexCache cache() {
return indexCache;
}
- public IndexFieldDataService fieldData() { return indexFieldData; }
+ public IndexFieldDataService fieldData() {
+ return indexFieldData;
+ }
public AnalysisService analysisService() {
return this.analysisService;
@@ -207,7 +215,7 @@ public final class IndexService extends AbstractIndexComponent implements IndexC
private long getAvgShardSizeInBytes() throws IOException {
long sum = 0;
int count = 0;
- for(IndexShard indexShard : this) {
+ for (IndexShard indexShard : this) {
sum += indexShard.store().stats().sizeInBytes();
count++;
}
@@ -254,17 +262,17 @@ public final class IndexService extends AbstractIndexComponent implements IndexC
// TODO: we should, instead, hold a "bytes reserved" of how large we anticipate this shard will be, e.g. for a shard
// that's being relocated/replicated we know how large it will become once it's done copying:
// Count up how many shards are currently on each data path:
- Map<Path,Integer> dataPathToShardCount = new HashMap<>();
- for(IndexShard shard : this) {
+ Map<Path, Integer> dataPathToShardCount = new HashMap<>();
+ for (IndexShard shard : this) {
Path dataPath = shard.shardPath().getRootStatePath();
Integer curCount = dataPathToShardCount.get(dataPath);
if (curCount == null) {
curCount = 0;
}
- dataPathToShardCount.put(dataPath, curCount+1);
+ dataPathToShardCount.put(dataPath, curCount + 1);
}
path = ShardPath.selectNewPathForShard(nodeEnv, shardId, this.indexSettings, routing.getExpectedShardSize() == ShardRouting.UNAVAILABLE_EXPECTED_SHARD_SIZE ? getAvgShardSizeInBytes() : routing.getExpectedShardSize(),
- dataPathToShardCount);
+ dataPathToShardCount);
logger.debug("{} creating using a new path [{}]", shardId, path);
} else {
logger.debug("{} creating using an existing path [{}]", shardId, path);
@@ -277,7 +285,7 @@ public final class IndexService extends AbstractIndexComponent implements IndexC
logger.debug("creating shard_id {}", shardId);
// if we are on a shared FS we only own the shard (ie. we can safely delete it) if we are the primary.
final boolean canDeleteShardContent = IndexMetaData.isOnSharedFilesystem(indexSettings) == false ||
- (primary && IndexMetaData.isOnSharedFilesystem(indexSettings));
+ (primary && IndexMetaData.isOnSharedFilesystem(indexSettings));
store = new Store(shardId, this.indexSettings, indexStore.newDirectoryService(path), lock, new StoreCloseListener(shardId, canDeleteShardContent, () -> nodeServicesProvider.getIndicesQueryCache().onClose(shardId)));
if (useShadowEngine(primary, indexSettings)) {
indexShard = new ShadowIndexShard(shardId, this.indexSettings, path, store, indexCache, mapperService, similarityService, indexFieldData, engineFactory, eventListener, searcherWrapper, nodeServicesProvider);
@@ -462,6 +470,7 @@ public final class IndexService extends AbstractIndexComponent implements IndexC
}
}
}
+
/**
* Returns the filter associated with listed filtering aliases.
* <p>
diff --git a/core/src/main/java/org/elasticsearch/index/engine/InternalEngine.java b/core/src/main/java/org/elasticsearch/index/engine/InternalEngine.java
index 1404b61b8e..de13eb1097 100644
--- a/core/src/main/java/org/elasticsearch/index/engine/InternalEngine.java
+++ b/core/src/main/java/org/elasticsearch/index/engine/InternalEngine.java
@@ -781,10 +781,14 @@ public class InternalEngine extends Engine {
// we need to fail the engine. it might have already been failed before
// but we are double-checking it's failed and closed
if (indexWriter.isOpen() == false && indexWriter.getTragicException() != null) {
- failEngine("already closed by tragic event", indexWriter.getTragicException());
+ failEngine("already closed by tragic event on the index writer", indexWriter.getTragicException());
+ } else if (translog.isOpen() == false && translog.getTragicException() != null) {
+ failEngine("already closed by tragic event on the translog", translog.getTragicException());
}
return true;
- } else if (t != null && indexWriter.isOpen() == false && indexWriter.getTragicException() == t) {
+ } else if (t != null &&
+ ((indexWriter.isOpen() == false && indexWriter.getTragicException() == t)
+ || (translog.isOpen() == false && translog.getTragicException() == t))) {
// this spot on - we are handling the tragic event exception here so we have to fail the engine
// right away
failEngine(source, t);
diff --git a/core/src/main/java/org/elasticsearch/index/mapper/ContentPath.java b/core/src/main/java/org/elasticsearch/index/mapper/ContentPath.java
index 47c4372016..54c6ef20e3 100644
--- a/core/src/main/java/org/elasticsearch/index/mapper/ContentPath.java
+++ b/core/src/main/java/org/elasticsearch/index/mapper/ContentPath.java
@@ -19,16 +19,9 @@
package org.elasticsearch.index.mapper;
-public class ContentPath {
+public final class ContentPath {
- public enum Type {
- JUST_NAME,
- FULL,
- }
-
- private Type pathType;
-
- private final char delimiter;
+ private static final char DELIMITER = '.';
private final StringBuilder sb;
@@ -47,7 +40,6 @@ public class ContentPath {
* number of path elements to not be included in {@link #pathAsText(String)}.
*/
public ContentPath(int offset) {
- this.delimiter = '.';
this.sb = new StringBuilder();
this.offset = offset;
reset();
@@ -71,26 +63,11 @@ public class ContentPath {
}
public String pathAsText(String name) {
- if (pathType == Type.JUST_NAME) {
- return name;
- }
- return fullPathAsText(name);
- }
-
- public String fullPathAsText(String name) {
sb.setLength(0);
for (int i = offset; i < index; i++) {
- sb.append(path[i]).append(delimiter);
+ sb.append(path[i]).append(DELIMITER);
}
sb.append(name);
return sb.toString();
}
-
- public Type pathType() {
- return pathType;
- }
-
- public void pathType(Type type) {
- this.pathType = type;
- }
}
diff --git a/core/src/main/java/org/elasticsearch/index/mapper/DocumentMapper.java b/core/src/main/java/org/elasticsearch/index/mapper/DocumentMapper.java
index c4fec8cf09..333cda459f 100644
--- a/core/src/main/java/org/elasticsearch/index/mapper/DocumentMapper.java
+++ b/core/src/main/java/org/elasticsearch/index/mapper/DocumentMapper.java
@@ -29,7 +29,7 @@ import org.elasticsearch.common.Nullable;
import org.elasticsearch.common.bytes.BytesReference;
import org.elasticsearch.common.compress.CompressedXContent;
import org.elasticsearch.common.settings.Settings;
-import org.elasticsearch.common.text.StringAndBytesText;
+import org.elasticsearch.common.text.Text;
import org.elasticsearch.common.text.Text;
import org.elasticsearch.common.util.concurrent.ReleasableLock;
import org.elasticsearch.common.xcontent.ToXContent;
@@ -52,6 +52,7 @@ import org.elasticsearch.search.internal.SearchContext;
import java.io.IOException;
import java.util.ArrayList;
+import java.util.Arrays;
import java.util.Collection;
import java.util.Collections;
import java.util.HashMap;
@@ -113,11 +114,11 @@ public class DocumentMapper implements ToXContent {
private final MapperService mapperService;
private final String type;
- private final StringAndBytesText typeText;
+ private final Text typeText;
private volatile CompressedXContent mappingSource;
- private final Mapping mapping;
+ private volatile Mapping mapping;
private final DocumentParser documentParser;
@@ -137,7 +138,7 @@ public class DocumentMapper implements ToXContent {
ReentrantReadWriteLock mappingLock) {
this.mapperService = mapperService;
this.type = rootObjectMapper.name();
- this.typeText = new StringAndBytesText(this.type);
+ this.typeText = new Text(this.type);
this.mapping = new Mapping(
Version.indexCreated(indexSettings),
rootObjectMapper,
@@ -352,16 +353,19 @@ public class DocumentMapper implements ToXContent {
mapperService.addMappers(type, objectMappers, fieldMappers);
}
- public MergeResult merge(Mapping mapping, boolean simulate, boolean updateAllTypes) {
+ public void merge(Mapping mapping, boolean simulate, boolean updateAllTypes) {
try (ReleasableLock lock = mappingWriteLock.acquire()) {
mapperService.checkMappersCompatibility(type, mapping, updateAllTypes);
- final MergeResult mergeResult = new MergeResult(simulate, updateAllTypes);
- this.mapping.merge(mapping, mergeResult);
+ // do the merge even if simulate == false so that we get exceptions
+ Mapping merged = this.mapping.merge(mapping, updateAllTypes);
if (simulate == false) {
- addMappers(mergeResult.getNewObjectMappers(), mergeResult.getNewFieldMappers(), updateAllTypes);
+ this.mapping = merged;
+ Collection<ObjectMapper> objectMappers = new ArrayList<>();
+ Collection<FieldMapper> fieldMappers = new ArrayList<>(Arrays.asList(merged.metadataMappers));
+ MapperUtils.collect(merged.root, objectMappers, fieldMappers);
+ addMappers(objectMappers, fieldMappers, updateAllTypes);
refreshSource();
}
- return mergeResult;
}
}
diff --git a/core/src/main/java/org/elasticsearch/index/mapper/DocumentParser.java b/core/src/main/java/org/elasticsearch/index/mapper/DocumentParser.java
index b0ad972d57..bb1749d233 100644
--- a/core/src/main/java/org/elasticsearch/index/mapper/DocumentParser.java
+++ b/core/src/main/java/org/elasticsearch/index/mapper/DocumentParser.java
@@ -234,9 +234,6 @@ class DocumentParser implements Closeable {
nestedDoc.add(new Field(TypeFieldMapper.NAME, mapper.nestedTypePathAsString(), TypeFieldMapper.Defaults.FIELD_TYPE));
}
- ContentPath.Type origPathType = context.path().pathType();
- context.path().pathType(mapper.pathType());
-
// if we are at the end of the previous object, advance
if (token == XContentParser.Token.END_OBJECT) {
token = parser.nextToken();
@@ -267,12 +264,11 @@ class DocumentParser implements Closeable {
if (update == null) {
update = newUpdate;
} else {
- MapperUtils.merge(update, newUpdate);
+ update = update.merge(newUpdate, false);
}
}
}
// restore the enable path flag
- context.path().pathType(origPathType);
if (nested.isNested()) {
ParseContext.Document nestedDoc = context.doc();
ParseContext.Document parentDoc = nestedDoc.getParent();
@@ -341,7 +337,7 @@ class DocumentParser implements Closeable {
context.path().remove();
Mapper.Builder builder = context.root().findTemplateBuilder(context, currentFieldName, "object");
if (builder == null) {
- builder = MapperBuilders.object(currentFieldName).enabled(true).pathType(mapper.pathType());
+ builder = MapperBuilders.object(currentFieldName).enabled(true);
// if this is a non root object, then explicitly set the dynamic behavior if set
if (!(mapper instanceof RootObjectMapper) && mapper.dynamic() != ObjectMapper.Defaults.DYNAMIC) {
((ObjectMapper.Builder) builder).dynamic(mapper.dynamic());
@@ -610,7 +606,7 @@ class DocumentParser implements Closeable {
return null;
}
final Mapper.BuilderContext builderContext = new Mapper.BuilderContext(context.indexSettings(), context.path());
- final MappedFieldType existingFieldType = context.mapperService().fullName(context.path().fullPathAsText(currentFieldName));
+ final MappedFieldType existingFieldType = context.mapperService().fullName(context.path().pathAsText(currentFieldName));
Mapper.Builder builder = null;
if (existingFieldType != null) {
// create a builder of the same type
@@ -695,7 +691,7 @@ class DocumentParser implements Closeable {
if (paths.length > 1) {
ObjectMapper parent = context.root();
for (int i = 0; i < paths.length-1; i++) {
- mapper = context.docMapper().objectMappers().get(context.path().fullPathAsText(paths[i]));
+ mapper = context.docMapper().objectMappers().get(context.path().pathAsText(paths[i]));
if (mapper == null) {
// One mapping is missing, check if we are allowed to create a dynamic one.
ObjectMapper.Dynamic dynamic = parent.dynamic();
@@ -713,12 +709,12 @@ class DocumentParser implements Closeable {
if (!(parent instanceof RootObjectMapper) && parent.dynamic() != ObjectMapper.Defaults.DYNAMIC) {
((ObjectMapper.Builder) builder).dynamic(parent.dynamic());
}
- builder = MapperBuilders.object(paths[i]).enabled(true).pathType(parent.pathType());
+ builder = MapperBuilders.object(paths[i]).enabled(true);
}
Mapper.BuilderContext builderContext = new Mapper.BuilderContext(context.indexSettings(), context.path());
mapper = (ObjectMapper) builder.build(builderContext);
if (mapper.nested() != ObjectMapper.Nested.NO) {
- throw new MapperParsingException("It is forbidden to create dynamic nested objects ([" + context.path().fullPathAsText(paths[i]) + "]) through `copy_to`");
+ throw new MapperParsingException("It is forbidden to create dynamic nested objects ([" + context.path().pathAsText(paths[i]) + "]) through `copy_to`");
}
break;
case FALSE:
@@ -759,7 +755,7 @@ class DocumentParser implements Closeable {
private static <M extends Mapper> M parseAndMergeUpdate(M mapper, ParseContext context) throws IOException {
final Mapper update = parseObjectOrField(context, mapper);
if (update != null) {
- MapperUtils.merge(mapper, update);
+ mapper = (M) mapper.merge(update, false);
}
return mapper;
}
diff --git a/core/src/main/java/org/elasticsearch/index/mapper/FieldMapper.java b/core/src/main/java/org/elasticsearch/index/mapper/FieldMapper.java
index ced3f08b22..30df3562ae 100644
--- a/core/src/main/java/org/elasticsearch/index/mapper/FieldMapper.java
+++ b/core/src/main/java/org/elasticsearch/index/mapper/FieldMapper.java
@@ -47,7 +47,7 @@ import java.util.List;
import java.util.Locale;
import java.util.stream.StreamSupport;
-public abstract class FieldMapper extends Mapper {
+public abstract class FieldMapper extends Mapper implements Cloneable {
public abstract static class Builder<T extends Builder, Y extends FieldMapper> extends Mapper.Builder<T, Y> {
@@ -64,10 +64,10 @@ public abstract class FieldMapper extends Mapper {
protected final MultiFields.Builder multiFieldsBuilder;
protected CopyTo copyTo;
- protected Builder(String name, MappedFieldType fieldType) {
+ protected Builder(String name, MappedFieldType fieldType, MappedFieldType defaultFieldType) {
super(name);
this.fieldType = fieldType.clone();
- this.defaultFieldType = fieldType.clone();
+ this.defaultFieldType = defaultFieldType.clone();
this.defaultOptions = fieldType.indexOptions(); // we have to store it the fieldType is mutable
multiFieldsBuilder = new MultiFields.Builder();
}
@@ -84,8 +84,13 @@ public abstract class FieldMapper extends Mapper {
* if the fieldType has a non-null option we are all good it might have been set through a different
* call.
*/
- final IndexOptions options = getDefaultIndexOption();
- assert options != IndexOptions.NONE : "default IndexOptions is NONE can't enable indexing";
+ IndexOptions options = getDefaultIndexOption();
+ if (options == IndexOptions.NONE) {
+ // can happen when an existing type on the same index has disabled indexing
+ // since we inherit the default field type from the first mapper that is
+ // created on an index
+ throw new IllegalArgumentException("mapper [" + name + "] has different [index] values from other types of the same index");
+ }
fieldType.setIndexOptions(options);
}
} else {
@@ -202,11 +207,6 @@ public abstract class FieldMapper extends Mapper {
return this;
}
- public T multiFieldPathType(ContentPath.Type pathType) {
- multiFieldsBuilder.pathType(pathType);
- return builder;
- }
-
public T addMultiField(Mapper.Builder mapperBuilder) {
multiFieldsBuilder.add(mapperBuilder);
return builder;
@@ -237,7 +237,7 @@ public abstract class FieldMapper extends Mapper {
}
protected String buildFullName(BuilderContext context) {
- return context.path().fullPathAsText(name);
+ return context.path().pathAsText(name);
}
protected void setupFieldType(BuilderContext context) {
@@ -270,7 +270,7 @@ public abstract class FieldMapper extends Mapper {
protected MappedFieldTypeReference fieldTypeRef;
protected final MappedFieldType defaultFieldType;
- protected final MultiFields multiFields;
+ protected MultiFields multiFields;
protected CopyTo copyTo;
protected final boolean indexCreatedBefore2x;
@@ -359,26 +359,41 @@ public abstract class FieldMapper extends Mapper {
}
@Override
- public void merge(Mapper mergeWith, MergeResult mergeResult) {
+ protected FieldMapper clone() {
+ try {
+ return (FieldMapper) super.clone();
+ } catch (CloneNotSupportedException e) {
+ throw new AssertionError(e);
+ }
+ }
+
+ @Override
+ public FieldMapper merge(Mapper mergeWith, boolean updateAllTypes) {
+ FieldMapper merged = clone();
+ merged.doMerge(mergeWith, updateAllTypes);
+ return merged;
+ }
+
+ /**
+ * Merge changes coming from {@code mergeWith} in place.
+ * @param updateAllTypes TODO
+ */
+ protected void doMerge(Mapper mergeWith, boolean updateAllTypes) {
if (!this.getClass().equals(mergeWith.getClass())) {
String mergedType = mergeWith.getClass().getSimpleName();
if (mergeWith instanceof FieldMapper) {
mergedType = ((FieldMapper) mergeWith).contentType();
}
- mergeResult.addConflict("mapper [" + fieldType().names().fullName() + "] of different type, current_type [" + contentType() + "], merged_type [" + mergedType + "]");
- // different types, return
- return;
+ throw new IllegalArgumentException("mapper [" + fieldType().names().fullName() + "] of different type, current_type [" + contentType() + "], merged_type [" + mergedType + "]");
}
FieldMapper fieldMergeWith = (FieldMapper) mergeWith;
- multiFields.merge(mergeWith, mergeResult);
+ multiFields = multiFields.merge(fieldMergeWith.multiFields);
- if (mergeResult.simulate() == false && mergeResult.hasConflicts() == false) {
- // apply changeable values
- MappedFieldType fieldType = fieldMergeWith.fieldType().clone();
- fieldType.freeze();
- fieldTypeRef.set(fieldType);
- this.copyTo = fieldMergeWith.copyTo;
- }
+ // apply changeable values
+ MappedFieldType fieldType = fieldMergeWith.fieldType().clone();
+ fieldType.freeze();
+ fieldTypeRef.set(fieldType);
+ this.copyTo = fieldMergeWith.copyTo;
}
@Override
@@ -520,18 +535,12 @@ public abstract class FieldMapper extends Mapper {
public static class MultiFields {
public static MultiFields empty() {
- return new MultiFields(ContentPath.Type.FULL, ImmutableOpenMap.<String, FieldMapper>of());
+ return new MultiFields(ImmutableOpenMap.<String, FieldMapper>of());
}
public static class Builder {
private final ImmutableOpenMap.Builder<String, Mapper.Builder> mapperBuilders = ImmutableOpenMap.builder();
- private ContentPath.Type pathType = ContentPath.Type.FULL;
-
- public Builder pathType(ContentPath.Type pathType) {
- this.pathType = pathType;
- return this;
- }
public Builder add(Mapper.Builder builder) {
mapperBuilders.put(builder.name(), builder);
@@ -540,13 +549,9 @@ public abstract class FieldMapper extends Mapper {
@SuppressWarnings("unchecked")
public MultiFields build(FieldMapper.Builder mainFieldBuilder, BuilderContext context) {
- if (pathType == ContentPath.Type.FULL && mapperBuilders.isEmpty()) {
+ if (mapperBuilders.isEmpty()) {
return empty();
- } else if (mapperBuilders.isEmpty()) {
- return new MultiFields(pathType, ImmutableOpenMap.<String, FieldMapper>of());
} else {
- ContentPath.Type origPathType = context.path().pathType();
- context.path().pathType(pathType);
context.path().add(mainFieldBuilder.name());
ImmutableOpenMap.Builder mapperBuilders = this.mapperBuilders;
for (ObjectObjectCursor<String, Mapper.Builder> cursor : this.mapperBuilders) {
@@ -557,26 +562,25 @@ public abstract class FieldMapper extends Mapper {
mapperBuilders.put(key, mapper);
}
context.path().remove();
- context.path().pathType(origPathType);
ImmutableOpenMap.Builder<String, FieldMapper> mappers = mapperBuilders.cast();
- return new MultiFields(pathType, mappers.build());
+ return new MultiFields(mappers.build());
}
}
}
- private final ContentPath.Type pathType;
- private volatile ImmutableOpenMap<String, FieldMapper> mappers;
+ private final ImmutableOpenMap<String, FieldMapper> mappers;
- public MultiFields(ContentPath.Type pathType, ImmutableOpenMap<String, FieldMapper> mappers) {
- this.pathType = pathType;
- this.mappers = mappers;
+ private MultiFields(ImmutableOpenMap<String, FieldMapper> mappers) {
+ ImmutableOpenMap.Builder<String, FieldMapper> builder = new ImmutableOpenMap.Builder<>();
// we disable the all in multi-field mappers
- for (ObjectCursor<FieldMapper> cursor : mappers.values()) {
+ for (ObjectObjectCursor<String, FieldMapper> cursor : mappers) {
FieldMapper mapper = cursor.value;
if (mapper instanceof AllFieldMapper.IncludeInAll) {
- ((AllFieldMapper.IncludeInAll) mapper).unsetIncludeInAll();
+ mapper = (FieldMapper) ((AllFieldMapper.IncludeInAll) mapper).unsetIncludeInAll();
}
+ builder.put(cursor.key, mapper);
}
+ this.mappers = builder.build();
}
public void parse(FieldMapper mainField, ParseContext context) throws IOException {
@@ -587,58 +591,33 @@ public abstract class FieldMapper extends Mapper {
context = context.createMultiFieldContext();
- ContentPath.Type origPathType = context.path().pathType();
- context.path().pathType(pathType);
-
context.path().add(mainField.simpleName());
for (ObjectCursor<FieldMapper> cursor : mappers.values()) {
cursor.value.parse(context);
}
context.path().remove();
- context.path().pathType(origPathType);
}
- // No need for locking, because locking is taken care of in ObjectMapper#merge and DocumentMapper#merge
- public void merge(Mapper mergeWith, MergeResult mergeResult) {
- FieldMapper mergeWithMultiField = (FieldMapper) mergeWith;
-
- List<FieldMapper> newFieldMappers = null;
- ImmutableOpenMap.Builder<String, FieldMapper> newMappersBuilder = null;
+ public MultiFields merge(MultiFields mergeWith) {
+ ImmutableOpenMap.Builder<String, FieldMapper> newMappersBuilder = ImmutableOpenMap.builder(mappers);
- for (ObjectCursor<FieldMapper> cursor : mergeWithMultiField.multiFields.mappers.values()) {
+ for (ObjectCursor<FieldMapper> cursor : mergeWith.mappers.values()) {
FieldMapper mergeWithMapper = cursor.value;
- Mapper mergeIntoMapper = mappers.get(mergeWithMapper.simpleName());
+ FieldMapper mergeIntoMapper = mappers.get(mergeWithMapper.simpleName());
if (mergeIntoMapper == null) {
- // no mapping, simply add it if not simulating
- if (!mergeResult.simulate()) {
- // we disable the all in multi-field mappers
- if (mergeWithMapper instanceof AllFieldMapper.IncludeInAll) {
- ((AllFieldMapper.IncludeInAll) mergeWithMapper).unsetIncludeInAll();
- }
- if (newMappersBuilder == null) {
- newMappersBuilder = ImmutableOpenMap.builder(mappers);
- }
- newMappersBuilder.put(mergeWithMapper.simpleName(), mergeWithMapper);
- if (mergeWithMapper instanceof FieldMapper) {
- if (newFieldMappers == null) {
- newFieldMappers = new ArrayList<>(2);
- }
- newFieldMappers.add(mergeWithMapper);
- }
+ // we disable the all in multi-field mappers
+ if (mergeWithMapper instanceof AllFieldMapper.IncludeInAll) {
+ mergeWithMapper = (FieldMapper) ((AllFieldMapper.IncludeInAll) mergeWithMapper).unsetIncludeInAll();
}
+ newMappersBuilder.put(mergeWithMapper.simpleName(), mergeWithMapper);
} else {
- mergeIntoMapper.merge(mergeWithMapper, mergeResult);
+ FieldMapper merged = mergeIntoMapper.merge(mergeWithMapper, false);
+ newMappersBuilder.put(merged.simpleName(), merged); // override previous definition
}
}
- // first add all field mappers
- if (newFieldMappers != null) {
- mergeResult.addFieldMappers(newFieldMappers);
- }
- // now publish mappers
- if (newMappersBuilder != null) {
- mappers = newMappersBuilder.build();
- }
+ ImmutableOpenMap<String, FieldMapper> mappers = newMappersBuilder.build();
+ return new MultiFields(mappers);
}
public Iterator<Mapper> iterator() {
@@ -646,9 +625,6 @@ public abstract class FieldMapper extends Mapper {
}
public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException {
- if (pathType != ContentPath.Type.FULL) {
- builder.field("path", pathType.name().toLowerCase(Locale.ROOT));
- }
if (!mappers.isEmpty()) {
// sort the mappers so we get consistent serialization format
Mapper[] sortedMappers = mappers.values().toArray(Mapper.class);
diff --git a/core/src/main/java/org/elasticsearch/index/mapper/Mapper.java b/core/src/main/java/org/elasticsearch/index/mapper/Mapper.java
index 33a4dabd3b..4c3aa3c56b 100644
--- a/core/src/main/java/org/elasticsearch/index/mapper/Mapper.java
+++ b/core/src/main/java/org/elasticsearch/index/mapper/Mapper.java
@@ -174,5 +174,7 @@ public abstract class Mapper implements ToXContent, Iterable<Mapper> {
/** Returns the canonical name which uniquely identifies the mapper against other mappers in a type. */
public abstract String name();
- public abstract void merge(Mapper mergeWith, MergeResult mergeResult);
+ /** Return the merge of {@code mergeWith} into this.
+ * Both {@code this} and {@code mergeWith} will be left unmodified. */
+ public abstract Mapper merge(Mapper mergeWith, boolean updateAllTypes);
}
diff --git a/core/src/main/java/org/elasticsearch/index/mapper/MapperService.java b/core/src/main/java/org/elasticsearch/index/mapper/MapperService.java
index 938f610d6d..37e99e8c90 100755
--- a/core/src/main/java/org/elasticsearch/index/mapper/MapperService.java
+++ b/core/src/main/java/org/elasticsearch/index/mapper/MapperService.java
@@ -32,7 +32,6 @@ import org.apache.lucene.util.BytesRef;
import org.elasticsearch.ElasticsearchGenerationException;
import org.elasticsearch.Version;
import org.elasticsearch.common.Nullable;
-import org.elasticsearch.common.collect.ImmutableOpenMap;
import org.elasticsearch.common.collect.Tuple;
import org.elasticsearch.common.compress.CompressedXContent;
import org.elasticsearch.common.lucene.search.Queries;
@@ -92,7 +91,7 @@ public class MapperService extends AbstractIndexComponent implements Closeable {
private final ReleasableLock mappingWriteLock = new ReleasableLock(mappingLock.writeLock());
private volatile FieldTypeLookup fieldTypes;
- private volatile ImmutableOpenMap<String, ObjectMapper> fullPathObjectMappers = ImmutableOpenMap.of();
+ private volatile Map<String, ObjectMapper> fullPathObjectMappers = new HashMap<>();
private boolean hasNested = false; // updated dynamically to true when a nested object is added
private final DocumentMapperParser documentParser;
@@ -199,6 +198,7 @@ public class MapperService extends AbstractIndexComponent implements Closeable {
public DocumentMapper merge(String type, CompressedXContent mappingSource, boolean applyDefault, boolean updateAllTypes) {
if (DEFAULT_MAPPING.equals(type)) {
// verify we can parse it
+ // NOTE: never apply the default here
DocumentMapper mapper = documentParser.parseCompressed(type, mappingSource);
// still add it as a document mapper so we have it registered and, for example, persisted back into
// the cluster meta data if needed, or checked for existence
@@ -212,74 +212,69 @@ public class MapperService extends AbstractIndexComponent implements Closeable {
}
return mapper;
} else {
- return merge(parse(type, mappingSource, applyDefault), updateAllTypes);
+ try (ReleasableLock lock = mappingWriteLock.acquire()) {
+ // only apply the default mapping if we don't have the type yet
+ applyDefault &= mappers.containsKey(type) == false;
+ return merge(parse(type, mappingSource, applyDefault), updateAllTypes);
+ }
}
}
// never expose this to the outside world, we need to reparse the doc mapper so we get fresh
// instances of field mappers to properly remove existing doc mapper
private DocumentMapper merge(DocumentMapper mapper, boolean updateAllTypes) {
- try (ReleasableLock lock = mappingWriteLock.acquire()) {
- if (mapper.type().length() == 0) {
- throw new InvalidTypeNameException("mapping type name is empty");
- }
- if (indexSettings.getIndexVersionCreated().onOrAfter(Version.V_2_0_0_beta1) && mapper.type().length() > 255) {
- throw new InvalidTypeNameException("mapping type name [" + mapper.type() + "] is too long; limit is length 255 but was [" + mapper.type().length() + "]");
- }
- if (mapper.type().charAt(0) == '_') {
- throw new InvalidTypeNameException("mapping type name [" + mapper.type() + "] can't start with '_'");
- }
- if (mapper.type().contains("#")) {
- throw new InvalidTypeNameException("mapping type name [" + mapper.type() + "] should not include '#' in it");
- }
- if (mapper.type().contains(",")) {
- throw new InvalidTypeNameException("mapping type name [" + mapper.type() + "] should not include ',' in it");
+ if (mapper.type().length() == 0) {
+ throw new InvalidTypeNameException("mapping type name is empty");
+ }
+ if (indexSettings.getIndexVersionCreated().onOrAfter(Version.V_2_0_0_beta1) && mapper.type().length() > 255) {
+ throw new InvalidTypeNameException("mapping type name [" + mapper.type() + "] is too long; limit is length 255 but was [" + mapper.type().length() + "]");
+ }
+ if (mapper.type().charAt(0) == '_') {
+ throw new InvalidTypeNameException("mapping type name [" + mapper.type() + "] can't start with '_'");
+ }
+ if (mapper.type().contains("#")) {
+ throw new InvalidTypeNameException("mapping type name [" + mapper.type() + "] should not include '#' in it");
+ }
+ if (mapper.type().contains(",")) {
+ throw new InvalidTypeNameException("mapping type name [" + mapper.type() + "] should not include ',' in it");
+ }
+ if (mapper.type().equals(mapper.parentFieldMapper().type())) {
+ throw new IllegalArgumentException("The [_parent.type] option can't point to the same type");
+ }
+ if (typeNameStartsWithIllegalDot(mapper)) {
+ if (indexSettings.getIndexVersionCreated().onOrAfter(Version.V_2_0_0_beta1)) {
+ throw new IllegalArgumentException("mapping type name [" + mapper.type() + "] must not start with a '.'");
+ } else {
+ logger.warn("Type [{}] starts with a '.', it is recommended not to start a type name with a '.'", mapper.type());
}
- if (mapper.type().equals(mapper.parentFieldMapper().type())) {
- throw new IllegalArgumentException("The [_parent.type] option can't point to the same type");
+ }
+ // we can add new field/object mappers while the old ones are there
+ // since we get new instances of those, and when we remove, we remove
+ // by instance equality
+ DocumentMapper oldMapper = mappers.get(mapper.type());
+
+ if (oldMapper != null) {
+ oldMapper.merge(mapper.mapping(), false, updateAllTypes);
+ return oldMapper;
+ } else {
+ Tuple<Collection<ObjectMapper>, Collection<FieldMapper>> newMappers = checkMappersCompatibility(
+ mapper.type(), mapper.mapping(), updateAllTypes);
+ Collection<ObjectMapper> newObjectMappers = newMappers.v1();
+ Collection<FieldMapper> newFieldMappers = newMappers.v2();
+ addMappers(mapper.type(), newObjectMappers, newFieldMappers);
+
+ for (DocumentTypeListener typeListener : typeListeners) {
+ typeListener.beforeCreate(mapper);
}
- if (typeNameStartsWithIllegalDot(mapper)) {
- if (indexSettings.getIndexVersionCreated().onOrAfter(Version.V_2_0_0_beta1)) {
- throw new IllegalArgumentException("mapping type name [" + mapper.type() + "] must not start with a '.'");
- } else {
- logger.warn("Type [{}] starts with a '.', it is recommended not to start a type name with a '.'", mapper.type());
- }
- }
- // we can add new field/object mappers while the old ones are there
- // since we get new instances of those, and when we remove, we remove
- // by instance equality
- DocumentMapper oldMapper = mappers.get(mapper.type());
-
- if (oldMapper != null) {
- // simulate first
- MergeResult result = oldMapper.merge(mapper.mapping(), true, updateAllTypes);
- if (result.hasConflicts()) {
- throw new IllegalArgumentException("Merge failed with failures {" + Arrays.toString(result.buildConflicts()) + "}");
- }
- // then apply for real
- result = oldMapper.merge(mapper.mapping(), false, updateAllTypes);
- assert result.hasConflicts() == false; // we already simulated
- return oldMapper;
- } else {
- Tuple<Collection<ObjectMapper>, Collection<FieldMapper>> newMappers = checkMappersCompatibility(
- mapper.type(), mapper.mapping(), updateAllTypes);
- Collection<ObjectMapper> newObjectMappers = newMappers.v1();
- Collection<FieldMapper> newFieldMappers = newMappers.v2();
- addMappers(mapper.type(), newObjectMappers, newFieldMappers);
-
- for (DocumentTypeListener typeListener : typeListeners) {
- typeListener.beforeCreate(mapper);
- }
- mappers = newMapBuilder(mappers).put(mapper.type(), mapper).map();
- if (mapper.parentFieldMapper().active()) {
- Set<String> newParentTypes = new HashSet<>(parentTypes.size() + 1);
- newParentTypes.addAll(parentTypes);
- newParentTypes.add(mapper.parentFieldMapper().type());
- parentTypes = unmodifiableSet(newParentTypes);
- }
- assert assertSerialization(mapper);
- return mapper;
+ mappers = newMapBuilder(mappers).put(mapper.type(), mapper).map();
+ if (mapper.parentFieldMapper().active()) {
+ Set<String> newParentTypes = new HashSet<>(parentTypes.size() + 1);
+ newParentTypes.addAll(parentTypes);
+ newParentTypes.add(mapper.parentFieldMapper().type());
+ parentTypes = unmodifiableSet(newParentTypes);
}
+ assert assertSerialization(mapper);
+ return mapper;
}
}
@@ -300,19 +295,56 @@ public class MapperService extends AbstractIndexComponent implements Closeable {
return true;
}
+ private void checkFieldUniqueness(String type, Collection<ObjectMapper> objectMappers, Collection<FieldMapper> fieldMappers) {
+ final Set<String> objectFullNames = new HashSet<>();
+ for (ObjectMapper objectMapper : objectMappers) {
+ final String fullPath = objectMapper.fullPath();
+ if (objectFullNames.add(fullPath) == false) {
+ throw new IllegalArgumentException("Object mapper [" + fullPath + "] is defined twice in mapping for type [" + type + "]");
+ }
+ }
+
+ if (indexSettings.getIndexVersionCreated().before(Version.V_3_0_0)) {
+ // Before 3.0 some metadata mappers are also registered under the root object mapper
+ // So we avoid false positives by deduplicating mappers
+ // given that we check exact equality, this would still catch the case that a mapper
+ // is defined under the root object
+ Collection<FieldMapper> uniqueFieldMappers = Collections.newSetFromMap(new IdentityHashMap<>());
+ uniqueFieldMappers.addAll(fieldMappers);
+ fieldMappers = uniqueFieldMappers;
+ }
+
+ final Set<String> fieldNames = new HashSet<>();
+ for (FieldMapper fieldMapper : fieldMappers) {
+ final String name = fieldMapper.name();
+ if (objectFullNames.contains(name)) {
+ throw new IllegalArgumentException("Field [" + name + "] is defined both as an object and a field in [" + type + "]");
+ } else if (fieldNames.add(name) == false) {
+ throw new IllegalArgumentException("Field [" + name + "] is defined twice in [" + type + "]");
+ }
+ }
+ }
+
protected void checkMappersCompatibility(String type, Collection<ObjectMapper> objectMappers, Collection<FieldMapper> fieldMappers, boolean updateAllTypes) {
assert mappingLock.isWriteLockedByCurrentThread();
+
+ checkFieldUniqueness(type, objectMappers, fieldMappers);
+
for (ObjectMapper newObjectMapper : objectMappers) {
ObjectMapper existingObjectMapper = fullPathObjectMappers.get(newObjectMapper.fullPath());
if (existingObjectMapper != null) {
- MergeResult result = new MergeResult(true, updateAllTypes);
- existingObjectMapper.merge(newObjectMapper, result);
- if (result.hasConflicts()) {
- throw new IllegalArgumentException("Mapper for [" + newObjectMapper.fullPath() + "] conflicts with existing mapping in other types" +
- Arrays.toString(result.buildConflicts()));
- }
+ // simulate a merge and ignore the result, we are just interested
+ // in exceptions here
+ existingObjectMapper.merge(newObjectMapper, updateAllTypes);
}
}
+
+ for (FieldMapper fieldMapper : fieldMappers) {
+ if (fullPathObjectMappers.containsKey(fieldMapper.name())) {
+ throw new IllegalArgumentException("Field [" + fieldMapper.name() + "] is defined as a field in mapping [" + type + "] but this name is already used for an object in other types");
+ }
+ }
+
fieldTypes.checkCompatibility(type, fieldMappers, updateAllTypes);
}
@@ -320,9 +352,7 @@ public class MapperService extends AbstractIndexComponent implements Closeable {
String type, Mapping mapping, boolean updateAllTypes) {
List<ObjectMapper> objectMappers = new ArrayList<>();
List<FieldMapper> fieldMappers = new ArrayList<>();
- for (MetadataFieldMapper metadataMapper : mapping.metadataMappers) {
- fieldMappers.add(metadataMapper);
- }
+ Collections.addAll(fieldMappers, mapping.metadataMappers);
MapperUtils.collect(mapping.root, objectMappers, fieldMappers);
checkMappersCompatibility(type, objectMappers, fieldMappers, updateAllTypes);
return new Tuple<>(objectMappers, fieldMappers);
@@ -330,14 +360,14 @@ public class MapperService extends AbstractIndexComponent implements Closeable {
protected void addMappers(String type, Collection<ObjectMapper> objectMappers, Collection<FieldMapper> fieldMappers) {
assert mappingLock.isWriteLockedByCurrentThread();
- ImmutableOpenMap.Builder<String, ObjectMapper> fullPathObjectMappers = ImmutableOpenMap.builder(this.fullPathObjectMappers);
+ Map<String, ObjectMapper> fullPathObjectMappers = new HashMap<>(this.fullPathObjectMappers);
for (ObjectMapper objectMapper : objectMappers) {
fullPathObjectMappers.put(objectMapper.fullPath(), objectMapper);
if (objectMapper.nested().isNested()) {
hasNested = true;
}
}
- this.fullPathObjectMappers = fullPathObjectMappers.build();
+ this.fullPathObjectMappers = Collections.unmodifiableMap(fullPathObjectMappers);
this.fieldTypes = this.fieldTypes.copyAndAddAll(type, fieldMappers);
}
diff --git a/core/src/main/java/org/elasticsearch/index/mapper/MapperUtils.java b/core/src/main/java/org/elasticsearch/index/mapper/MapperUtils.java
index d46c32a932..04508827f7 100644
--- a/core/src/main/java/org/elasticsearch/index/mapper/MapperUtils.java
+++ b/core/src/main/java/org/elasticsearch/index/mapper/MapperUtils.java
@@ -27,52 +27,6 @@ import java.util.Collection;
public enum MapperUtils {
;
- private static MergeResult newStrictMergeResult() {
- return new MergeResult(false, false) {
-
- @Override
- public void addFieldMappers(Collection<FieldMapper> fieldMappers) {
- // no-op
- }
-
- @Override
- public void addObjectMappers(Collection<ObjectMapper> objectMappers) {
- // no-op
- }
-
- @Override
- public Collection<FieldMapper> getNewFieldMappers() {
- throw new UnsupportedOperationException("Strict merge result does not support new field mappers");
- }
-
- @Override
- public Collection<ObjectMapper> getNewObjectMappers() {
- throw new UnsupportedOperationException("Strict merge result does not support new object mappers");
- }
-
- @Override
- public void addConflict(String mergeFailure) {
- throw new MapperParsingException("Merging dynamic updates triggered a conflict: " + mergeFailure);
- }
- };
- }
-
- /**
- * Merge {@code mergeWith} into {@code mergeTo}. Note: this method only
- * merges mappings, not lookup structures. Conflicts are returned as exceptions.
- */
- public static void merge(Mapper mergeInto, Mapper mergeWith) {
- mergeInto.merge(mergeWith, newStrictMergeResult());
- }
-
- /**
- * Merge {@code mergeWith} into {@code mergeTo}. Note: this method only
- * merges mappings, not lookup structures. Conflicts are returned as exceptions.
- */
- public static void merge(Mapping mergeInto, Mapping mergeWith) {
- mergeInto.merge(mergeWith, newStrictMergeResult());
- }
-
/** Split mapper and its descendants into object and field mappers. */
public static void collect(Mapper mapper, Collection<ObjectMapper> objectMappers, Collection<FieldMapper> fieldMappers) {
if (mapper instanceof RootObjectMapper) {
diff --git a/core/src/main/java/org/elasticsearch/index/mapper/Mapping.java b/core/src/main/java/org/elasticsearch/index/mapper/Mapping.java
index bac4216255..d33a97a415 100644
--- a/core/src/main/java/org/elasticsearch/index/mapper/Mapping.java
+++ b/core/src/main/java/org/elasticsearch/index/mapper/Mapping.java
@@ -27,10 +27,12 @@ import org.elasticsearch.index.mapper.object.RootObjectMapper;
import java.io.IOException;
import java.util.Arrays;
+import java.util.Collections;
import java.util.Comparator;
import java.util.HashMap;
-import java.util.List;
+import java.util.HashSet;
import java.util.Map;
+import java.util.Set;
import static java.util.Collections.emptyMap;
import static java.util.Collections.unmodifiableMap;
@@ -41,25 +43,27 @@ import static java.util.Collections.unmodifiableMap;
*/
public final class Mapping implements ToXContent {
- public static final List<String> LEGACY_INCLUDE_IN_OBJECT = Arrays.asList("_all", "_id", "_parent", "_routing", "_timestamp", "_ttl");
+ // Set of fields that were included into the root object mapper before 2.0
+ public static final Set<String> LEGACY_INCLUDE_IN_OBJECT = Collections.unmodifiableSet(new HashSet<>(
+ Arrays.asList("_all", "_id", "_parent", "_routing", "_timestamp", "_ttl")));
final Version indexCreated;
final RootObjectMapper root;
final MetadataFieldMapper[] metadataMappers;
final Map<Class<? extends MetadataFieldMapper>, MetadataFieldMapper> metadataMappersMap;
- volatile Map<String, Object> meta;
+ final Map<String, Object> meta;
public Mapping(Version indexCreated, RootObjectMapper rootObjectMapper, MetadataFieldMapper[] metadataMappers, Map<String, Object> meta) {
this.indexCreated = indexCreated;
- this.root = rootObjectMapper;
this.metadataMappers = metadataMappers;
Map<Class<? extends MetadataFieldMapper>, MetadataFieldMapper> metadataMappersMap = new HashMap<>();
for (MetadataFieldMapper metadataMapper : metadataMappers) {
if (indexCreated.before(Version.V_2_0_0_beta1) && LEGACY_INCLUDE_IN_OBJECT.contains(metadataMapper.name())) {
- root.putMapper(metadataMapper);
+ rootObjectMapper = rootObjectMapper.copyAndPutMapper(metadataMapper);
}
metadataMappersMap.put(metadataMapper.getClass(), metadataMapper);
}
+ this.root = rootObjectMapper;
// keep root mappers sorted for consistent serialization
Arrays.sort(metadataMappers, new Comparator<Mapper>() {
@Override
@@ -90,21 +94,20 @@ public final class Mapping implements ToXContent {
}
/** @see DocumentMapper#merge(Mapping, boolean, boolean) */
- public void merge(Mapping mergeWith, MergeResult mergeResult) {
- assert metadataMappers.length == mergeWith.metadataMappers.length;
-
- root.merge(mergeWith.root, mergeResult);
- for (MetadataFieldMapper metadataMapper : metadataMappers) {
- MetadataFieldMapper mergeWithMetadataMapper = mergeWith.metadataMapper(metadataMapper.getClass());
- if (mergeWithMetadataMapper != null) {
- metadataMapper.merge(mergeWithMetadataMapper, mergeResult);
+ public Mapping merge(Mapping mergeWith, boolean updateAllTypes) {
+ RootObjectMapper mergedRoot = root.merge(mergeWith.root, updateAllTypes);
+ Map<Class<? extends MetadataFieldMapper>, MetadataFieldMapper> mergedMetaDataMappers = new HashMap<>(metadataMappersMap);
+ for (MetadataFieldMapper metaMergeWith : mergeWith.metadataMappers) {
+ MetadataFieldMapper mergeInto = mergedMetaDataMappers.get(metaMergeWith.getClass());
+ MetadataFieldMapper merged;
+ if (mergeInto == null) {
+ merged = metaMergeWith;
+ } else {
+ merged = mergeInto.merge(metaMergeWith, updateAllTypes);
}
+ mergedMetaDataMappers.put(merged.getClass(), merged);
}
-
- if (mergeResult.simulate() == false) {
- // let the merge with attributes to override the attributes
- meta = mergeWith.meta;
- }
+ return new Mapping(indexCreated, mergedRoot, mergedMetaDataMappers.values().toArray(new MetadataFieldMapper[0]), mergeWith.meta);
}
@Override
diff --git a/core/src/main/java/org/elasticsearch/index/mapper/MergeResult.java b/core/src/main/java/org/elasticsearch/index/mapper/MergeResult.java
deleted file mode 100644
index f5698a0ed1..0000000000
--- a/core/src/main/java/org/elasticsearch/index/mapper/MergeResult.java
+++ /dev/null
@@ -1,81 +0,0 @@
-/*
- * Licensed to Elasticsearch under one or more contributor
- * license agreements. See the NOTICE file distributed with
- * this work for additional information regarding copyright
- * ownership. Elasticsearch licenses this file to you under
- * the Apache License, Version 2.0 (the "License"); you may
- * not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing,
- * software distributed under the License is distributed on an
- * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
- * KIND, either express or implied. See the License for the
- * specific language governing permissions and limitations
- * under the License.
- */
-
-package org.elasticsearch.index.mapper;
-
-import org.elasticsearch.common.Strings;
-import org.elasticsearch.index.mapper.object.ObjectMapper;
-
-import java.util.ArrayList;
-import java.util.Collection;
-import java.util.List;
-
-/** A container for tracking results of a mapping merge. */
-public class MergeResult {
-
- private final boolean simulate;
- private final boolean updateAllTypes;
-
- private final List<String> conflicts = new ArrayList<>();
- private final List<FieldMapper> newFieldMappers = new ArrayList<>();
- private final List<ObjectMapper> newObjectMappers = new ArrayList<>();
-
- public MergeResult(boolean simulate, boolean updateAllTypes) {
- this.simulate = simulate;
- this.updateAllTypes = updateAllTypes;
- }
-
- public void addFieldMappers(Collection<FieldMapper> fieldMappers) {
- assert simulate() == false;
- newFieldMappers.addAll(fieldMappers);
- }
-
- public void addObjectMappers(Collection<ObjectMapper> objectMappers) {
- assert simulate() == false;
- newObjectMappers.addAll(objectMappers);
- }
-
- public Collection<FieldMapper> getNewFieldMappers() {
- return newFieldMappers;
- }
-
- public Collection<ObjectMapper> getNewObjectMappers() {
- return newObjectMappers;
- }
-
- public boolean simulate() {
- return simulate;
- }
-
- public boolean updateAllTypes() {
- return updateAllTypes;
- }
-
- public void addConflict(String mergeFailure) {
- conflicts.add(mergeFailure);
- }
-
- public boolean hasConflicts() {
- return conflicts.isEmpty() == false;
- }
-
- public String[] buildConflicts() {
- return conflicts.toArray(Strings.EMPTY_ARRAY);
- }
-} \ No newline at end of file
diff --git a/core/src/main/java/org/elasticsearch/index/mapper/MetadataFieldMapper.java b/core/src/main/java/org/elasticsearch/index/mapper/MetadataFieldMapper.java
index fc6d1fa9e1..622c7729dd 100644
--- a/core/src/main/java/org/elasticsearch/index/mapper/MetadataFieldMapper.java
+++ b/core/src/main/java/org/elasticsearch/index/mapper/MetadataFieldMapper.java
@@ -51,8 +51,8 @@ public abstract class MetadataFieldMapper extends FieldMapper {
}
public abstract static class Builder<T extends Builder, Y extends MetadataFieldMapper> extends FieldMapper.Builder<T, Y> {
- public Builder(String name, MappedFieldType fieldType) {
- super(name, fieldType);
+ public Builder(String name, MappedFieldType fieldType, MappedFieldType defaultFieldType) {
+ super(name, fieldType, defaultFieldType);
}
}
@@ -70,4 +70,8 @@ public abstract class MetadataFieldMapper extends FieldMapper {
*/
public abstract void postParse(ParseContext context) throws IOException;
+ @Override
+ public MetadataFieldMapper merge(Mapper mergeWith, boolean updateAllTypes) {
+ return (MetadataFieldMapper) super.merge(mergeWith, updateAllTypes);
+ }
}
diff --git a/core/src/main/java/org/elasticsearch/index/mapper/ParseContext.java b/core/src/main/java/org/elasticsearch/index/mapper/ParseContext.java
index edf75621c1..0a88e29c8d 100644
--- a/core/src/main/java/org/elasticsearch/index/mapper/ParseContext.java
+++ b/core/src/main/java/org/elasticsearch/index/mapper/ParseContext.java
@@ -595,7 +595,7 @@ public abstract class ParseContext {
if (dynamicMappingsUpdate == null) {
dynamicMappingsUpdate = mapper;
} else {
- MapperUtils.merge(dynamicMappingsUpdate, mapper);
+ dynamicMappingsUpdate = dynamicMappingsUpdate.merge(mapper, false);
}
}
diff --git a/core/src/main/java/org/elasticsearch/index/mapper/ParsedDocument.java b/core/src/main/java/org/elasticsearch/index/mapper/ParsedDocument.java
index ed8314c6f7..aa35e699b2 100644
--- a/core/src/main/java/org/elasticsearch/index/mapper/ParsedDocument.java
+++ b/core/src/main/java/org/elasticsearch/index/mapper/ParsedDocument.java
@@ -128,7 +128,7 @@ public class ParsedDocument {
if (dynamicMappingsUpdate == null) {
dynamicMappingsUpdate = update;
} else {
- MapperUtils.merge(dynamicMappingsUpdate, update);
+ dynamicMappingsUpdate = dynamicMappingsUpdate.merge(update, false);
}
}
diff --git a/core/src/main/java/org/elasticsearch/index/mapper/core/BinaryFieldMapper.java b/core/src/main/java/org/elasticsearch/index/mapper/core/BinaryFieldMapper.java
index 7468f4fb2f..0ee311678e 100644
--- a/core/src/main/java/org/elasticsearch/index/mapper/core/BinaryFieldMapper.java
+++ b/core/src/main/java/org/elasticsearch/index/mapper/core/BinaryFieldMapper.java
@@ -72,7 +72,7 @@ public class BinaryFieldMapper extends FieldMapper {
public static class Builder extends FieldMapper.Builder<Builder, BinaryFieldMapper> {
public Builder(String name) {
- super(name, Defaults.FIELD_TYPE);
+ super(name, Defaults.FIELD_TYPE, Defaults.FIELD_TYPE);
builder = this;
}
diff --git a/core/src/main/java/org/elasticsearch/index/mapper/core/BooleanFieldMapper.java b/core/src/main/java/org/elasticsearch/index/mapper/core/BooleanFieldMapper.java
index cd76fdbb04..e381bc9c60 100644
--- a/core/src/main/java/org/elasticsearch/index/mapper/core/BooleanFieldMapper.java
+++ b/core/src/main/java/org/elasticsearch/index/mapper/core/BooleanFieldMapper.java
@@ -72,7 +72,7 @@ public class BooleanFieldMapper extends FieldMapper {
public static class Builder extends FieldMapper.Builder<Builder, BooleanFieldMapper> {
public Builder(String name) {
- super(name, Defaults.FIELD_TYPE);
+ super(name, Defaults.FIELD_TYPE, Defaults.FIELD_TYPE);
this.builder = this;
}
diff --git a/core/src/main/java/org/elasticsearch/index/mapper/core/ByteFieldMapper.java b/core/src/main/java/org/elasticsearch/index/mapper/core/ByteFieldMapper.java
index 61b22a1ee2..44b4cbcd35 100644
--- a/core/src/main/java/org/elasticsearch/index/mapper/core/ByteFieldMapper.java
+++ b/core/src/main/java/org/elasticsearch/index/mapper/core/ByteFieldMapper.java
@@ -77,8 +77,7 @@ public class ByteFieldMapper extends NumberFieldMapper {
setupFieldType(context);
ByteFieldMapper fieldMapper = new ByteFieldMapper(name, fieldType, defaultFieldType, ignoreMalformed(context),
coerce(context), context.indexSettings(), multiFieldsBuilder.build(this, context), copyTo);
- fieldMapper.includeInAll(includeInAll);
- return fieldMapper;
+ return (ByteFieldMapper) fieldMapper.includeInAll(includeInAll);
}
@Override
diff --git a/core/src/main/java/org/elasticsearch/index/mapper/core/CompletionFieldMapper.java b/core/src/main/java/org/elasticsearch/index/mapper/core/CompletionFieldMapper.java
index 5b4df635a3..69177401db 100644
--- a/core/src/main/java/org/elasticsearch/index/mapper/core/CompletionFieldMapper.java
+++ b/core/src/main/java/org/elasticsearch/index/mapper/core/CompletionFieldMapper.java
@@ -356,7 +356,7 @@ public class CompletionFieldMapper extends FieldMapper implements ArrayValueMapp
* @param name of the completion field to build
*/
public Builder(String name) {
- super(name, new CompletionFieldType());
+ super(name, Defaults.FIELD_TYPE, Defaults.FIELD_TYPE);
builder = this;
}
@@ -605,11 +605,9 @@ public class CompletionFieldMapper extends FieldMapper implements ArrayValueMapp
}
@Override
- public void merge(Mapper mergeWith, MergeResult mergeResult) {
- super.merge(mergeWith, mergeResult);
+ protected void doMerge(Mapper mergeWith, boolean updateAllTypes) {
+ super.doMerge(mergeWith, updateAllTypes);
CompletionFieldMapper fieldMergeWith = (CompletionFieldMapper) mergeWith;
- if (!mergeResult.simulate()) {
- this.maxInputLength = fieldMergeWith.maxInputLength;
- }
+ this.maxInputLength = fieldMergeWith.maxInputLength;
}
}
diff --git a/core/src/main/java/org/elasticsearch/index/mapper/core/DateFieldMapper.java b/core/src/main/java/org/elasticsearch/index/mapper/core/DateFieldMapper.java
index 27b96b27a4..7a99e6b50c 100644
--- a/core/src/main/java/org/elasticsearch/index/mapper/core/DateFieldMapper.java
+++ b/core/src/main/java/org/elasticsearch/index/mapper/core/DateFieldMapper.java
@@ -123,8 +123,7 @@ public class DateFieldMapper extends NumberFieldMapper {
fieldType.setNullValue(nullValue);
DateFieldMapper fieldMapper = new DateFieldMapper(name, fieldType, defaultFieldType, ignoreMalformed(context),
coerce(context), context.indexSettings(), multiFieldsBuilder.build(this, context), copyTo);
- fieldMapper.includeInAll(includeInAll);
- return fieldMapper;
+ return (DateFieldMapper) fieldMapper.includeInAll(includeInAll);
}
@Override
diff --git a/core/src/main/java/org/elasticsearch/index/mapper/core/DoubleFieldMapper.java b/core/src/main/java/org/elasticsearch/index/mapper/core/DoubleFieldMapper.java
index 0e512bf428..861d33e560 100644
--- a/core/src/main/java/org/elasticsearch/index/mapper/core/DoubleFieldMapper.java
+++ b/core/src/main/java/org/elasticsearch/index/mapper/core/DoubleFieldMapper.java
@@ -80,8 +80,7 @@ public class DoubleFieldMapper extends NumberFieldMapper {
setupFieldType(context);
DoubleFieldMapper fieldMapper = new DoubleFieldMapper(name, fieldType, defaultFieldType, ignoreMalformed(context), coerce(context),
context.indexSettings(), multiFieldsBuilder.build(this, context), copyTo);
- fieldMapper.includeInAll(includeInAll);
- return fieldMapper;
+ return (DoubleFieldMapper) fieldMapper.includeInAll(includeInAll);
}
@Override
diff --git a/core/src/main/java/org/elasticsearch/index/mapper/core/FloatFieldMapper.java b/core/src/main/java/org/elasticsearch/index/mapper/core/FloatFieldMapper.java
index 9a607ffd41..ad88c745df 100644
--- a/core/src/main/java/org/elasticsearch/index/mapper/core/FloatFieldMapper.java
+++ b/core/src/main/java/org/elasticsearch/index/mapper/core/FloatFieldMapper.java
@@ -81,8 +81,7 @@ public class FloatFieldMapper extends NumberFieldMapper {
setupFieldType(context);
FloatFieldMapper fieldMapper = new FloatFieldMapper(name, fieldType, defaultFieldType, ignoreMalformed(context), coerce(context),
context.indexSettings(), multiFieldsBuilder.build(this, context), copyTo);
- fieldMapper.includeInAll(includeInAll);
- return fieldMapper;
+ return (FloatFieldMapper) fieldMapper.includeInAll(includeInAll);
}
@Override
diff --git a/core/src/main/java/org/elasticsearch/index/mapper/core/IntegerFieldMapper.java b/core/src/main/java/org/elasticsearch/index/mapper/core/IntegerFieldMapper.java
index 868cfeb438..1899549811 100644
--- a/core/src/main/java/org/elasticsearch/index/mapper/core/IntegerFieldMapper.java
+++ b/core/src/main/java/org/elasticsearch/index/mapper/core/IntegerFieldMapper.java
@@ -85,8 +85,7 @@ public class IntegerFieldMapper extends NumberFieldMapper {
IntegerFieldMapper fieldMapper = new IntegerFieldMapper(name, fieldType, defaultFieldType,
ignoreMalformed(context), coerce(context),
context.indexSettings(), multiFieldsBuilder.build(this, context), copyTo);
- fieldMapper.includeInAll(includeInAll);
- return fieldMapper;
+ return (IntegerFieldMapper) fieldMapper.includeInAll(includeInAll);
}
@Override
diff --git a/core/src/main/java/org/elasticsearch/index/mapper/core/LongFieldMapper.java b/core/src/main/java/org/elasticsearch/index/mapper/core/LongFieldMapper.java
index 4130c90258..9d9557c41f 100644
--- a/core/src/main/java/org/elasticsearch/index/mapper/core/LongFieldMapper.java
+++ b/core/src/main/java/org/elasticsearch/index/mapper/core/LongFieldMapper.java
@@ -84,8 +84,7 @@ public class LongFieldMapper extends NumberFieldMapper {
setupFieldType(context);
LongFieldMapper fieldMapper = new LongFieldMapper(name, fieldType, defaultFieldType,
ignoreMalformed(context), coerce(context), context.indexSettings(), multiFieldsBuilder.build(this, context), copyTo);
- fieldMapper.includeInAll(includeInAll);
- return fieldMapper;
+ return (LongFieldMapper) fieldMapper.includeInAll(includeInAll);
}
@Override
diff --git a/core/src/main/java/org/elasticsearch/index/mapper/core/NumberFieldMapper.java b/core/src/main/java/org/elasticsearch/index/mapper/core/NumberFieldMapper.java
index 87a63de99e..ed537aa7e5 100644
--- a/core/src/main/java/org/elasticsearch/index/mapper/core/NumberFieldMapper.java
+++ b/core/src/main/java/org/elasticsearch/index/mapper/core/NumberFieldMapper.java
@@ -66,7 +66,7 @@ public abstract class NumberFieldMapper extends FieldMapper implements AllFieldM
private Boolean coerce;
public Builder(String name, MappedFieldType fieldType, int defaultPrecisionStep) {
- super(name, fieldType);
+ super(name, fieldType, fieldType);
this.fieldType.setNumericPrecisionStep(defaultPrecisionStep);
}
@@ -183,22 +183,41 @@ public abstract class NumberFieldMapper extends FieldMapper implements AllFieldM
}
@Override
- public void includeInAll(Boolean includeInAll) {
+ protected NumberFieldMapper clone() {
+ return (NumberFieldMapper) super.clone();
+ }
+
+ @Override
+ public Mapper includeInAll(Boolean includeInAll) {
if (includeInAll != null) {
- this.includeInAll = includeInAll;
+ NumberFieldMapper clone = clone();
+ clone.includeInAll = includeInAll;
+ return clone;
+ } else {
+ return this;
}
}
@Override
- public void includeInAllIfNotSet(Boolean includeInAll) {
+ public Mapper includeInAllIfNotSet(Boolean includeInAll) {
if (includeInAll != null && this.includeInAll == null) {
- this.includeInAll = includeInAll;
+ NumberFieldMapper clone = clone();
+ clone.includeInAll = includeInAll;
+ return clone;
+ } else {
+ return this;
}
}
@Override
- public void unsetIncludeInAll() {
- includeInAll = null;
+ public Mapper unsetIncludeInAll() {
+ if (includeInAll != null) {
+ NumberFieldMapper clone = clone();
+ clone.includeInAll = null;
+ return clone;
+ } else {
+ return this;
+ }
}
@Override
@@ -254,21 +273,16 @@ public abstract class NumberFieldMapper extends FieldMapper implements AllFieldM
}
@Override
- public void merge(Mapper mergeWith, MergeResult mergeResult) {
- super.merge(mergeWith, mergeResult);
- if (!this.getClass().equals(mergeWith.getClass())) {
- return;
- }
+ protected void doMerge(Mapper mergeWith, boolean updateAllTypes) {
+ super.doMerge(mergeWith, updateAllTypes);
NumberFieldMapper nfmMergeWith = (NumberFieldMapper) mergeWith;
- if (mergeResult.simulate() == false && mergeResult.hasConflicts() == false) {
- this.includeInAll = nfmMergeWith.includeInAll;
- if (nfmMergeWith.ignoreMalformed.explicit()) {
- this.ignoreMalformed = nfmMergeWith.ignoreMalformed;
- }
- if (nfmMergeWith.coerce.explicit()) {
- this.coerce = nfmMergeWith.coerce;
- }
+ this.includeInAll = nfmMergeWith.includeInAll;
+ if (nfmMergeWith.ignoreMalformed.explicit()) {
+ this.ignoreMalformed = nfmMergeWith.ignoreMalformed;
+ }
+ if (nfmMergeWith.coerce.explicit()) {
+ this.coerce = nfmMergeWith.coerce;
}
}
diff --git a/core/src/main/java/org/elasticsearch/index/mapper/core/ShortFieldMapper.java b/core/src/main/java/org/elasticsearch/index/mapper/core/ShortFieldMapper.java
index 81ed6cc3ba..e455959c53 100644
--- a/core/src/main/java/org/elasticsearch/index/mapper/core/ShortFieldMapper.java
+++ b/core/src/main/java/org/elasticsearch/index/mapper/core/ShortFieldMapper.java
@@ -81,8 +81,7 @@ public class ShortFieldMapper extends NumberFieldMapper {
ShortFieldMapper fieldMapper = new ShortFieldMapper(name, fieldType, defaultFieldType,
ignoreMalformed(context), coerce(context),
context.indexSettings(), multiFieldsBuilder.build(this, context), copyTo);
- fieldMapper.includeInAll(includeInAll);
- return fieldMapper;
+ return (ShortFieldMapper) fieldMapper.includeInAll(includeInAll);
}
@Override
diff --git a/core/src/main/java/org/elasticsearch/index/mapper/core/StringFieldMapper.java b/core/src/main/java/org/elasticsearch/index/mapper/core/StringFieldMapper.java
index 0a921ad85e..08582c6599 100644
--- a/core/src/main/java/org/elasticsearch/index/mapper/core/StringFieldMapper.java
+++ b/core/src/main/java/org/elasticsearch/index/mapper/core/StringFieldMapper.java
@@ -35,7 +35,6 @@ import org.elasticsearch.index.mapper.FieldMapper;
import org.elasticsearch.index.mapper.MappedFieldType;
import org.elasticsearch.index.mapper.Mapper;
import org.elasticsearch.index.mapper.MapperParsingException;
-import org.elasticsearch.index.mapper.MergeResult;
import org.elasticsearch.index.mapper.ParseContext;
import org.elasticsearch.index.mapper.internal.AllFieldMapper;
@@ -99,7 +98,7 @@ public class StringFieldMapper extends FieldMapper implements AllFieldMapper.Inc
protected int ignoreAbove = Defaults.IGNORE_ABOVE;
public Builder(String name) {
- super(name, Defaults.FIELD_TYPE);
+ super(name, Defaults.FIELD_TYPE, Defaults.FIELD_TYPE);
builder = this;
}
@@ -150,8 +149,7 @@ public class StringFieldMapper extends FieldMapper implements AllFieldMapper.Inc
StringFieldMapper fieldMapper = new StringFieldMapper(
name, fieldType, defaultFieldType, positionIncrementGap, ignoreAbove,
context.indexSettings(), multiFieldsBuilder.build(this, context), copyTo);
- fieldMapper.includeInAll(includeInAll);
- return fieldMapper;
+ return fieldMapper.includeInAll(includeInAll);
}
}
@@ -257,22 +255,41 @@ public class StringFieldMapper extends FieldMapper implements AllFieldMapper.Inc
}
@Override
- public void includeInAll(Boolean includeInAll) {
+ protected StringFieldMapper clone() {
+ return (StringFieldMapper) super.clone();
+ }
+
+ @Override
+ public StringFieldMapper includeInAll(Boolean includeInAll) {
if (includeInAll != null) {
- this.includeInAll = includeInAll;
+ StringFieldMapper clone = clone();
+ clone.includeInAll = includeInAll;
+ return clone;
+ } else {
+ return this;
}
}
@Override
- public void includeInAllIfNotSet(Boolean includeInAll) {
+ public StringFieldMapper includeInAllIfNotSet(Boolean includeInAll) {
if (includeInAll != null && this.includeInAll == null) {
- this.includeInAll = includeInAll;
+ StringFieldMapper clone = clone();
+ clone.includeInAll = includeInAll;
+ return clone;
+ } else {
+ return this;
}
}
@Override
- public void unsetIncludeInAll() {
- includeInAll = null;
+ public StringFieldMapper unsetIncludeInAll() {
+ if (includeInAll != null) {
+ StringFieldMapper clone = clone();
+ clone.includeInAll = null;
+ return clone;
+ } else {
+ return this;
+ }
}
@Override
@@ -359,15 +376,10 @@ public class StringFieldMapper extends FieldMapper implements AllFieldMapper.Inc
}
@Override
- public void merge(Mapper mergeWith, MergeResult mergeResult) {
- super.merge(mergeWith, mergeResult);
- if (!this.getClass().equals(mergeWith.getClass())) {
- return;
- }
- if (!mergeResult.simulate()) {
- this.includeInAll = ((StringFieldMapper) mergeWith).includeInAll;
- this.ignoreAbove = ((StringFieldMapper) mergeWith).ignoreAbove;
- }
+ protected void doMerge(Mapper mergeWith, boolean updateAllTypes) {
+ super.doMerge(mergeWith, updateAllTypes);
+ this.includeInAll = ((StringFieldMapper) mergeWith).includeInAll;
+ this.ignoreAbove = ((StringFieldMapper) mergeWith).ignoreAbove;
}
@Override
diff --git a/core/src/main/java/org/elasticsearch/index/mapper/core/TokenCountFieldMapper.java b/core/src/main/java/org/elasticsearch/index/mapper/core/TokenCountFieldMapper.java
index 8348892e44..a485c3727f 100644
--- a/core/src/main/java/org/elasticsearch/index/mapper/core/TokenCountFieldMapper.java
+++ b/core/src/main/java/org/elasticsearch/index/mapper/core/TokenCountFieldMapper.java
@@ -33,7 +33,6 @@ import org.elasticsearch.index.mapper.FieldMapper;
import org.elasticsearch.index.mapper.MappedFieldType;
import org.elasticsearch.index.mapper.Mapper;
import org.elasticsearch.index.mapper.MapperParsingException;
-import org.elasticsearch.index.mapper.MergeResult;
import org.elasticsearch.index.mapper.ParseContext;
import org.elasticsearch.index.mapper.core.StringFieldMapper.ValueAndBoost;
@@ -81,8 +80,7 @@ public class TokenCountFieldMapper extends IntegerFieldMapper {
TokenCountFieldMapper fieldMapper = new TokenCountFieldMapper(name, fieldType, defaultFieldType,
ignoreMalformed(context), coerce(context), context.indexSettings(),
analyzer, multiFieldsBuilder.build(this, context), copyTo);
- fieldMapper.includeInAll(includeInAll);
- return fieldMapper;
+ return (TokenCountFieldMapper) fieldMapper.includeInAll(includeInAll);
}
@Override
@@ -190,14 +188,9 @@ public class TokenCountFieldMapper extends IntegerFieldMapper {
}
@Override
- public void merge(Mapper mergeWith, MergeResult mergeResult) {
- super.merge(mergeWith, mergeResult);
- if (!this.getClass().equals(mergeWith.getClass())) {
- return;
- }
- if (!mergeResult.simulate()) {
- this.analyzer = ((TokenCountFieldMapper) mergeWith).analyzer;
- }
+ protected void doMerge(Mapper mergeWith, boolean updateAllTypes) {
+ super.doMerge(mergeWith, updateAllTypes);
+ this.analyzer = ((TokenCountFieldMapper) mergeWith).analyzer;
}
@Override
diff --git a/core/src/main/java/org/elasticsearch/index/mapper/core/TypeParsers.java b/core/src/main/java/org/elasticsearch/index/mapper/core/TypeParsers.java
index e530243657..f6bd4946eb 100644
--- a/core/src/main/java/org/elasticsearch/index/mapper/core/TypeParsers.java
+++ b/core/src/main/java/org/elasticsearch/index/mapper/core/TypeParsers.java
@@ -61,7 +61,6 @@ public class TypeParsers {
@Override
public Mapper.Builder<?, ?> parse(String name, Map<String, Object> node, ParserContext parserContext) throws MapperParsingException {
- ContentPath.Type pathType = null;
FieldMapper.Builder mainFieldBuilder = null;
List<FieldMapper.Builder> fields = null;
String firstType = null;
@@ -70,10 +69,7 @@ public class TypeParsers {
Map.Entry<String, Object> entry = iterator.next();
String fieldName = Strings.toUnderscoreCase(entry.getKey());
Object fieldNode = entry.getValue();
- if (fieldName.equals("path") && parserContext.indexVersionCreated().before(Version.V_2_0_0_beta1)) {
- pathType = parsePathType(name, fieldNode.toString());
- iterator.remove();
- } else if (fieldName.equals("fields")) {
+ if (fieldName.equals("fields")) {
Map<String, Object> fieldsNode = (Map<String, Object>) fieldNode;
for (Iterator<Map.Entry<String, Object>> fieldsIterator = fieldsNode.entrySet().iterator(); fieldsIterator.hasNext();) {
Map.Entry<String, Object> entry1 = fieldsIterator.next();
@@ -132,17 +128,10 @@ public class TypeParsers {
}
}
- if (fields != null && pathType != null) {
+ if (fields != null) {
for (Mapper.Builder field : fields) {
mainFieldBuilder.addMultiField(field);
}
- mainFieldBuilder.multiFieldPathType(pathType);
- } else if (fields != null) {
- for (Mapper.Builder field : fields) {
- mainFieldBuilder.addMultiField(field);
- }
- } else if (pathType != null) {
- mainFieldBuilder.multiFieldPathType(pathType);
}
return mainFieldBuilder;
}
@@ -337,10 +326,7 @@ public class TypeParsers {
public static boolean parseMultiField(FieldMapper.Builder builder, String name, Mapper.TypeParser.ParserContext parserContext, String propName, Object propNode) {
parserContext = parserContext.createMultiFieldContext(parserContext);
- if (propName.equals("path") && parserContext.indexVersionCreated().before(Version.V_2_0_0_beta1)) {
- builder.multiFieldPathType(parsePathType(name, propNode.toString()));
- return true;
- } else if (propName.equals("fields")) {
+ if (propName.equals("fields")) {
final Map<String, Object> multiFieldsPropNodes;
@@ -457,17 +443,6 @@ public class TypeParsers {
}
}
- public static ContentPath.Type parsePathType(String name, String path) throws MapperParsingException {
- path = Strings.toUnderscoreCase(path);
- if ("just_name".equals(path)) {
- return ContentPath.Type.JUST_NAME;
- } else if ("full".equals(path)) {
- return ContentPath.Type.FULL;
- } else {
- throw new MapperParsingException("wrong value for pathType [" + path + "] for object [" + name + "]");
- }
- }
-
@SuppressWarnings("unchecked")
public static void parseCopyFields(Object propNode, FieldMapper.Builder builder) {
FieldMapper.CopyTo.Builder copyToBuilder = new FieldMapper.CopyTo.Builder();
diff --git a/core/src/main/java/org/elasticsearch/index/mapper/geo/BaseGeoPointFieldMapper.java b/core/src/main/java/org/elasticsearch/index/mapper/geo/BaseGeoPointFieldMapper.java
index 0b57d866dd..0bbe2fe8f1 100644
--- a/core/src/main/java/org/elasticsearch/index/mapper/geo/BaseGeoPointFieldMapper.java
+++ b/core/src/main/java/org/elasticsearch/index/mapper/geo/BaseGeoPointFieldMapper.java
@@ -33,12 +33,10 @@ import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.common.xcontent.XContentBuilder;
import org.elasticsearch.common.xcontent.XContentParser;
import org.elasticsearch.common.xcontent.support.XContentMapValues;
-import org.elasticsearch.index.mapper.ContentPath;
import org.elasticsearch.index.mapper.FieldMapper;
import org.elasticsearch.index.mapper.MappedFieldType;
import org.elasticsearch.index.mapper.Mapper;
import org.elasticsearch.index.mapper.MapperParsingException;
-import org.elasticsearch.index.mapper.MergeResult;
import org.elasticsearch.index.mapper.ParseContext;
import org.elasticsearch.index.mapper.core.DoubleFieldMapper;
import org.elasticsearch.index.mapper.core.NumberFieldMapper;
@@ -74,7 +72,6 @@ public abstract class BaseGeoPointFieldMapper extends FieldMapper implements Arr
}
public static class Defaults {
- public static final ContentPath.Type PATH_TYPE = ContentPath.Type.FULL;
public static final boolean ENABLE_LATLON = false;
public static final boolean ENABLE_GEOHASH = false;
public static final boolean ENABLE_GEOHASH_PREFIX = false;
@@ -83,7 +80,6 @@ public abstract class BaseGeoPointFieldMapper extends FieldMapper implements Arr
}
public abstract static class Builder<T extends Builder, Y extends BaseGeoPointFieldMapper> extends FieldMapper.Builder<T, Y> {
- protected ContentPath.Type pathType = Defaults.PATH_TYPE;
protected boolean enableLatLon = Defaults.ENABLE_LATLON;
@@ -98,7 +94,7 @@ public abstract class BaseGeoPointFieldMapper extends FieldMapper implements Arr
protected Boolean ignoreMalformed;
public Builder(String name, GeoPointFieldType fieldType) {
- super(name, fieldType);
+ super(name, fieldType, fieldType);
}
@Override
@@ -107,12 +103,6 @@ public abstract class BaseGeoPointFieldMapper extends FieldMapper implements Arr
}
@Override
- public T multiFieldPathType(ContentPath.Type pathType) {
- this.pathType = pathType;
- return builder;
- }
-
- @Override
public T fieldDataSettings(Settings settings) {
this.fieldDataSettings = settings;
return builder;
@@ -159,13 +149,10 @@ public abstract class BaseGeoPointFieldMapper extends FieldMapper implements Arr
}
public abstract Y build(BuilderContext context, String simpleName, MappedFieldType fieldType, MappedFieldType defaultFieldType,
- Settings indexSettings, ContentPath.Type pathType, DoubleFieldMapper latMapper, DoubleFieldMapper lonMapper,
+ Settings indexSettings, DoubleFieldMapper latMapper, DoubleFieldMapper lonMapper,
StringFieldMapper geoHashMapper, MultiFields multiFields, Explicit<Boolean> ignoreMalformed, CopyTo copyTo);
public Y build(Mapper.BuilderContext context) {
- ContentPath.Type origPathType = context.path().pathType();
- context.path().pathType(pathType);
-
GeoPointFieldType geoPointFieldType = (GeoPointFieldType)fieldType;
DoubleFieldMapper latMapper = null;
@@ -191,9 +178,8 @@ public abstract class BaseGeoPointFieldMapper extends FieldMapper implements Arr
geoPointFieldType.setGeoHashEnabled(geoHashMapper.fieldType(), geoHashPrecision, enableGeoHashPrefix);
}
context.path().remove();
- context.path().pathType(origPathType);
- return build(context, name, fieldType, defaultFieldType, context.indexSettings(), origPathType,
+ return build(context, name, fieldType, defaultFieldType, context.indexSettings(),
latMapper, lonMapper, geoHashMapper, multiFieldsBuilder.build(this, context), ignoreMalformed(context), copyTo);
}
}
@@ -365,17 +351,14 @@ public abstract class BaseGeoPointFieldMapper extends FieldMapper implements Arr
protected final DoubleFieldMapper lonMapper;
- protected final ContentPath.Type pathType;
-
protected final StringFieldMapper geoHashMapper;
protected Explicit<Boolean> ignoreMalformed;
protected BaseGeoPointFieldMapper(String simpleName, MappedFieldType fieldType, MappedFieldType defaultFieldType, Settings indexSettings,
- ContentPath.Type pathType, DoubleFieldMapper latMapper, DoubleFieldMapper lonMapper, StringFieldMapper geoHashMapper,
+ DoubleFieldMapper latMapper, DoubleFieldMapper lonMapper, StringFieldMapper geoHashMapper,
MultiFields multiFields, Explicit<Boolean> ignoreMalformed, CopyTo copyTo) {
super(simpleName, fieldType, defaultFieldType, indexSettings, multiFields, copyTo);
- this.pathType = pathType;
this.latMapper = latMapper;
this.lonMapper = lonMapper;
this.geoHashMapper = geoHashMapper;
@@ -388,17 +371,11 @@ public abstract class BaseGeoPointFieldMapper extends FieldMapper implements Arr
}
@Override
- public void merge(Mapper mergeWith, MergeResult mergeResult) {
- super.merge(mergeWith, mergeResult);
- if (!this.getClass().equals(mergeWith.getClass())) {
- return;
- }
-
+ protected void doMerge(Mapper mergeWith, boolean updateAllTypes) {
+ super.doMerge(mergeWith, updateAllTypes);
BaseGeoPointFieldMapper gpfmMergeWith = (BaseGeoPointFieldMapper) mergeWith;
- if (mergeResult.simulate() == false && mergeResult.hasConflicts() == false) {
- if (gpfmMergeWith.ignoreMalformed.explicit()) {
- this.ignoreMalformed = gpfmMergeWith.ignoreMalformed;
- }
+ if (gpfmMergeWith.ignoreMalformed.explicit()) {
+ this.ignoreMalformed = gpfmMergeWith.ignoreMalformed;
}
}
@@ -441,8 +418,6 @@ public abstract class BaseGeoPointFieldMapper extends FieldMapper implements Arr
@Override
public Mapper parse(ParseContext context) throws IOException {
- ContentPath.Type origPathType = context.path().pathType();
- context.path().pathType(pathType);
context.path().add(simpleName());
GeoPoint sparse = context.parseExternalValue(GeoPoint.class);
@@ -487,7 +462,6 @@ public abstract class BaseGeoPointFieldMapper extends FieldMapper implements Arr
}
context.path().remove();
- context.path().pathType(origPathType);
return null;
}
@@ -512,9 +486,6 @@ public abstract class BaseGeoPointFieldMapper extends FieldMapper implements Arr
@Override
protected void doXContentBody(XContentBuilder builder, boolean includeDefaults, Params params) throws IOException {
super.doXContentBody(builder, includeDefaults, params);
- if (includeDefaults || pathType != Defaults.PATH_TYPE) {
- builder.field("path", pathType.name().toLowerCase(Locale.ROOT));
- }
if (includeDefaults || fieldType().isLatLonEnabled() != GeoPointFieldMapper.Defaults.ENABLE_LATLON) {
builder.field("lat_lon", fieldType().isLatLonEnabled());
}
diff --git a/core/src/main/java/org/elasticsearch/index/mapper/geo/GeoPointFieldMapper.java b/core/src/main/java/org/elasticsearch/index/mapper/geo/GeoPointFieldMapper.java
index 286aca2972..fa61669e80 100644
--- a/core/src/main/java/org/elasticsearch/index/mapper/geo/GeoPointFieldMapper.java
+++ b/core/src/main/java/org/elasticsearch/index/mapper/geo/GeoPointFieldMapper.java
@@ -27,7 +27,6 @@ import org.elasticsearch.common.Explicit;
import org.elasticsearch.common.geo.GeoPoint;
import org.elasticsearch.common.geo.GeoUtils;
import org.elasticsearch.common.settings.Settings;
-import org.elasticsearch.index.mapper.ContentPath;
import org.elasticsearch.index.mapper.MappedFieldType;
import org.elasticsearch.index.mapper.Mapper;
import org.elasticsearch.index.mapper.MapperParsingException;
@@ -81,12 +80,12 @@ public class GeoPointFieldMapper extends BaseGeoPointFieldMapper {
@Override
public GeoPointFieldMapper build(BuilderContext context, String simpleName, MappedFieldType fieldType,
- MappedFieldType defaultFieldType, Settings indexSettings, ContentPath.Type pathType, DoubleFieldMapper latMapper,
+ MappedFieldType defaultFieldType, Settings indexSettings, DoubleFieldMapper latMapper,
DoubleFieldMapper lonMapper, StringFieldMapper geoHashMapper, MultiFields multiFields, Explicit<Boolean> ignoreMalformed,
CopyTo copyTo) {
fieldType.setTokenized(false);
setupFieldType(context);
- return new GeoPointFieldMapper(simpleName, fieldType, defaultFieldType, indexSettings, pathType, latMapper, lonMapper,
+ return new GeoPointFieldMapper(simpleName, fieldType, defaultFieldType, indexSettings, latMapper, lonMapper,
geoHashMapper, multiFields, ignoreMalformed, copyTo);
}
@@ -104,9 +103,9 @@ public class GeoPointFieldMapper extends BaseGeoPointFieldMapper {
}
public GeoPointFieldMapper(String simpleName, MappedFieldType fieldType, MappedFieldType defaultFieldType, Settings indexSettings,
- ContentPath.Type pathType, DoubleFieldMapper latMapper, DoubleFieldMapper lonMapper,
+ DoubleFieldMapper latMapper, DoubleFieldMapper lonMapper,
StringFieldMapper geoHashMapper, MultiFields multiFields, Explicit<Boolean> ignoreMalformed, CopyTo copyTo) {
- super(simpleName, fieldType, defaultFieldType, indexSettings, pathType, latMapper, lonMapper, geoHashMapper, multiFields,
+ super(simpleName, fieldType, defaultFieldType, indexSettings, latMapper, lonMapper, geoHashMapper, multiFields,
ignoreMalformed, copyTo);
}
diff --git a/core/src/main/java/org/elasticsearch/index/mapper/geo/GeoPointFieldMapperLegacy.java b/core/src/main/java/org/elasticsearch/index/mapper/geo/GeoPointFieldMapperLegacy.java
index 84e6bde07a..735baa8853 100644
--- a/core/src/main/java/org/elasticsearch/index/mapper/geo/GeoPointFieldMapperLegacy.java
+++ b/core/src/main/java/org/elasticsearch/index/mapper/geo/GeoPointFieldMapperLegacy.java
@@ -35,11 +35,9 @@ import org.elasticsearch.common.unit.DistanceUnit;
import org.elasticsearch.common.util.ByteUtils;
import org.elasticsearch.common.xcontent.XContentBuilder;
import org.elasticsearch.common.xcontent.support.XContentMapValues;
-import org.elasticsearch.index.mapper.ContentPath;
import org.elasticsearch.index.mapper.MappedFieldType;
import org.elasticsearch.index.mapper.Mapper;
import org.elasticsearch.index.mapper.MapperParsingException;
-import org.elasticsearch.index.mapper.MergeResult;
import org.elasticsearch.index.mapper.ParseContext;
import org.elasticsearch.index.mapper.core.DoubleFieldMapper;
import org.elasticsearch.index.mapper.core.NumberFieldMapper.CustomNumericDocValuesField;
@@ -111,14 +109,14 @@ public class GeoPointFieldMapperLegacy extends BaseGeoPointFieldMapper implement
@Override
public GeoPointFieldMapperLegacy build(BuilderContext context, String simpleName, MappedFieldType fieldType,
- MappedFieldType defaultFieldType, Settings indexSettings, ContentPath.Type pathType, DoubleFieldMapper latMapper,
+ MappedFieldType defaultFieldType, Settings indexSettings, DoubleFieldMapper latMapper,
DoubleFieldMapper lonMapper, StringFieldMapper geoHashMapper, MultiFields multiFields, Explicit<Boolean> ignoreMalformed,
CopyTo copyTo) {
fieldType.setTokenized(false);
setupFieldType(context);
fieldType.setHasDocValues(false);
defaultFieldType.setHasDocValues(false);
- return new GeoPointFieldMapperLegacy(simpleName, fieldType, defaultFieldType, indexSettings, pathType, latMapper, lonMapper,
+ return new GeoPointFieldMapperLegacy(simpleName, fieldType, defaultFieldType, indexSettings, latMapper, lonMapper,
geoHashMapper, multiFields, ignoreMalformed, coerce(context), copyTo);
}
@@ -288,32 +286,27 @@ public class GeoPointFieldMapperLegacy extends BaseGeoPointFieldMapper implement
protected Explicit<Boolean> coerce;
public GeoPointFieldMapperLegacy(String simpleName, MappedFieldType fieldType, MappedFieldType defaultFieldType, Settings indexSettings,
- ContentPath.Type pathType, DoubleFieldMapper latMapper, DoubleFieldMapper lonMapper,
+ DoubleFieldMapper latMapper, DoubleFieldMapper lonMapper,
StringFieldMapper geoHashMapper, MultiFields multiFields, Explicit<Boolean> ignoreMalformed,
Explicit<Boolean> coerce, CopyTo copyTo) {
- super(simpleName, fieldType, defaultFieldType, indexSettings, pathType, latMapper, lonMapper, geoHashMapper, multiFields,
+ super(simpleName, fieldType, defaultFieldType, indexSettings, latMapper, lonMapper, geoHashMapper, multiFields,
ignoreMalformed, copyTo);
this.coerce = coerce;
}
@Override
- public void merge(Mapper mergeWith, MergeResult mergeResult) {
- super.merge(mergeWith, mergeResult);
- if (!this.getClass().equals(mergeWith.getClass())) {
- return;
- }
+ protected void doMerge(Mapper mergeWith, boolean updateAllTypes) {
+ super.doMerge(mergeWith, updateAllTypes);
GeoPointFieldMapperLegacy gpfmMergeWith = (GeoPointFieldMapperLegacy) mergeWith;
if (gpfmMergeWith.coerce.explicit()) {
if (coerce.explicit() && coerce.value() != gpfmMergeWith.coerce.value()) {
- mergeResult.addConflict("mapper [" + fieldType().names().fullName() + "] has different [coerce]");
+ throw new IllegalArgumentException("mapper [" + fieldType().names().fullName() + "] has different [coerce]");
}
}
- if (mergeResult.simulate() == false && mergeResult.hasConflicts() == false) {
- if (gpfmMergeWith.coerce.explicit()) {
- this.coerce = gpfmMergeWith.coerce;
- }
+ if (gpfmMergeWith.coerce.explicit()) {
+ this.coerce = gpfmMergeWith.coerce;
}
}
diff --git a/core/src/main/java/org/elasticsearch/index/mapper/geo/GeoShapeFieldMapper.java b/core/src/main/java/org/elasticsearch/index/mapper/geo/GeoShapeFieldMapper.java
index 7e784324f3..1ba49e64d8 100644
--- a/core/src/main/java/org/elasticsearch/index/mapper/geo/GeoShapeFieldMapper.java
+++ b/core/src/main/java/org/elasticsearch/index/mapper/geo/GeoShapeFieldMapper.java
@@ -45,7 +45,6 @@ import org.elasticsearch.index.mapper.FieldMapper;
import org.elasticsearch.index.mapper.MappedFieldType;
import org.elasticsearch.index.mapper.Mapper;
import org.elasticsearch.index.mapper.MapperParsingException;
-import org.elasticsearch.index.mapper.MergeResult;
import org.elasticsearch.index.mapper.ParseContext;
import java.io.IOException;
@@ -121,7 +120,7 @@ public class GeoShapeFieldMapper extends FieldMapper {
private Boolean coerce;
public Builder(String name) {
- super(name, Defaults.FIELD_TYPE);
+ super(name, Defaults.FIELD_TYPE, Defaults.FIELD_TYPE);
}
@Override
@@ -185,7 +184,7 @@ public class GeoShapeFieldMapper extends FieldMapper {
builder.fieldType().setDistanceErrorPct(Double.parseDouble(fieldNode.toString()));
iterator.remove();
} else if (Names.ORIENTATION.equals(fieldName)) {
- builder.fieldType().setOrientation(ShapeBuilder.orientationFromString(fieldNode.toString()));
+ builder.fieldType().setOrientation(ShapeBuilder.Orientation.fromString(fieldNode.toString()));
iterator.remove();
} else if (Names.STRATEGY.equals(fieldName)) {
builder.fieldType().setStrategyName(fieldNode.toString());
@@ -193,7 +192,8 @@ public class GeoShapeFieldMapper extends FieldMapper {
} else if (Names.COERCE.equals(fieldName)) {
builder.coerce(nodeBooleanValue(fieldNode));
iterator.remove();
- } else if (Names.STRATEGY_POINTS_ONLY.equals(fieldName)) {
+ } else if (Names.STRATEGY_POINTS_ONLY.equals(fieldName)
+ && builder.fieldType().strategyName.equals(SpatialStrategy.TERM.getStrategyName()) == false) {
builder.fieldType().setPointsOnly(XContentMapValues.nodeBooleanValue(fieldNode));
iterator.remove();
}
@@ -284,6 +284,7 @@ public class GeoShapeFieldMapper extends FieldMapper {
termStrategy = new TermQueryPrefixTreeStrategy(prefixTree, names().indexName());
termStrategy.setDistErrPct(distanceErrorPct());
defaultStrategy = resolveStrategy(strategyName);
+ defaultStrategy.setPointsOnly(pointsOnly);
}
@Override
@@ -347,6 +348,9 @@ public class GeoShapeFieldMapper extends FieldMapper {
public void setStrategyName(String strategyName) {
checkIfFrozen();
this.strategyName = strategyName;
+ if (this.strategyName.equals(SpatialStrategy.TERM)) {
+ this.pointsOnly = true;
+ }
}
public boolean pointsOnly() {
@@ -406,7 +410,6 @@ public class GeoShapeFieldMapper extends FieldMapper {
public PrefixTreeStrategy resolveStrategy(String strategyName) {
if (SpatialStrategy.RECURSIVE.getStrategyName().equals(strategyName)) {
- recursiveStrategy.setPointsOnly(pointsOnly());
return recursiveStrategy;
}
if (SpatialStrategy.TERM.getStrategyName().equals(strategyName)) {
@@ -446,7 +449,7 @@ public class GeoShapeFieldMapper extends FieldMapper {
}
shape = shapeBuilder.build();
}
- if (fieldType().defaultStrategy() instanceof RecursivePrefixTreeStrategy && fieldType().pointsOnly() && !(shape instanceof Point)) {
+ if (fieldType().pointsOnly() && !(shape instanceof Point)) {
throw new MapperParsingException("[{" + fieldType().names().fullName() + "}] is configured for points only but a " +
((shape instanceof JtsGeometry) ? ((JtsGeometry)shape).getGeom().getGeometryType() : shape.getClass()) + " was found");
}
@@ -471,17 +474,12 @@ public class GeoShapeFieldMapper extends FieldMapper {
}
@Override
- public void merge(Mapper mergeWith, MergeResult mergeResult) {
- super.merge(mergeWith, mergeResult);
- if (!this.getClass().equals(mergeWith.getClass())) {
- return;
- }
+ protected void doMerge(Mapper mergeWith, boolean updateAllTypes) {
+ super.doMerge(mergeWith, updateAllTypes);
GeoShapeFieldMapper gsfm = (GeoShapeFieldMapper)mergeWith;
- if (mergeResult.simulate() == false && mergeResult.hasConflicts() == false) {
- if (gsfm.coerce.explicit()) {
- this.coerce = gsfm.coerce;
- }
+ if (gsfm.coerce.explicit()) {
+ this.coerce = gsfm.coerce;
}
}
diff --git a/core/src/main/java/org/elasticsearch/index/mapper/internal/AllFieldMapper.java b/core/src/main/java/org/elasticsearch/index/mapper/internal/AllFieldMapper.java
index 645c36a485..bcd094d2ae 100644
--- a/core/src/main/java/org/elasticsearch/index/mapper/internal/AllFieldMapper.java
+++ b/core/src/main/java/org/elasticsearch/index/mapper/internal/AllFieldMapper.java
@@ -36,7 +36,6 @@ import org.elasticsearch.index.fielddata.FieldDataType;
import org.elasticsearch.index.mapper.MappedFieldType;
import org.elasticsearch.index.mapper.Mapper;
import org.elasticsearch.index.mapper.MapperParsingException;
-import org.elasticsearch.index.mapper.MergeResult;
import org.elasticsearch.index.mapper.MetadataFieldMapper;
import org.elasticsearch.index.mapper.ParseContext;
import org.elasticsearch.index.query.QueryShardContext;
@@ -58,11 +57,24 @@ public class AllFieldMapper extends MetadataFieldMapper {
public interface IncludeInAll {
- void includeInAll(Boolean includeInAll);
-
- void includeInAllIfNotSet(Boolean includeInAll);
-
- void unsetIncludeInAll();
+ /**
+ * If {@code includeInAll} is not null then return a copy of this mapper
+ * that will include values in the _all field according to {@code includeInAll}.
+ */
+ Mapper includeInAll(Boolean includeInAll);
+
+ /**
+ * If {@code includeInAll} is not null and not set on this mapper yet, then
+ * return a copy of this mapper that will include values in the _all field
+ * according to {@code includeInAll}.
+ */
+ Mapper includeInAllIfNotSet(Boolean includeInAll);
+
+ /**
+ * If {@code includeInAll} was already set on this mapper then return a copy
+ * of this mapper that has {@code includeInAll} not set.
+ */
+ Mapper unsetIncludeInAll();
}
public static final String NAME = "_all";
@@ -89,7 +101,7 @@ public class AllFieldMapper extends MetadataFieldMapper {
private EnabledAttributeMapper enabled = Defaults.ENABLED;
public Builder(MappedFieldType existing) {
- super(Defaults.NAME, existing == null ? Defaults.FIELD_TYPE : existing);
+ super(Defaults.NAME, existing == null ? Defaults.FIELD_TYPE : existing, Defaults.FIELD_TYPE);
builder = this;
indexName = Defaults.INDEX_NAME;
}
@@ -309,11 +321,11 @@ public class AllFieldMapper extends MetadataFieldMapper {
}
@Override
- public void merge(Mapper mergeWith, MergeResult mergeResult) {
+ protected void doMerge(Mapper mergeWith, boolean updateAllTypes) {
if (((AllFieldMapper)mergeWith).enabled() != this.enabled() && ((AllFieldMapper)mergeWith).enabledState != Defaults.ENABLED) {
- mergeResult.addConflict("mapper [" + fieldType().names().fullName() + "] enabled is " + this.enabled() + " now encountering "+ ((AllFieldMapper)mergeWith).enabled());
+ throw new IllegalArgumentException("mapper [" + fieldType().names().fullName() + "] enabled is " + this.enabled() + " now encountering "+ ((AllFieldMapper)mergeWith).enabled());
}
- super.merge(mergeWith, mergeResult);
+ super.doMerge(mergeWith, updateAllTypes);
}
@Override
diff --git a/core/src/main/java/org/elasticsearch/index/mapper/internal/FieldNamesFieldMapper.java b/core/src/main/java/org/elasticsearch/index/mapper/internal/FieldNamesFieldMapper.java
index 7883415e59..e03439f3f5 100644
--- a/core/src/main/java/org/elasticsearch/index/mapper/internal/FieldNamesFieldMapper.java
+++ b/core/src/main/java/org/elasticsearch/index/mapper/internal/FieldNamesFieldMapper.java
@@ -78,7 +78,7 @@ public class FieldNamesFieldMapper extends MetadataFieldMapper {
private boolean enabled = Defaults.ENABLED;
public Builder(MappedFieldType existing) {
- super(Defaults.NAME, existing == null ? Defaults.FIELD_TYPE : existing);
+ super(Defaults.NAME, existing == null ? Defaults.FIELD_TYPE : existing, Defaults.FIELD_TYPE);
indexName = Defaults.NAME;
}
diff --git a/core/src/main/java/org/elasticsearch/index/mapper/internal/IdFieldMapper.java b/core/src/main/java/org/elasticsearch/index/mapper/internal/IdFieldMapper.java
index 16b6c4c56d..0fe3e10bcb 100644
--- a/core/src/main/java/org/elasticsearch/index/mapper/internal/IdFieldMapper.java
+++ b/core/src/main/java/org/elasticsearch/index/mapper/internal/IdFieldMapper.java
@@ -44,7 +44,6 @@ import org.elasticsearch.index.fielddata.FieldDataType;
import org.elasticsearch.index.mapper.MappedFieldType;
import org.elasticsearch.index.mapper.Mapper;
import org.elasticsearch.index.mapper.MapperParsingException;
-import org.elasticsearch.index.mapper.MergeResult;
import org.elasticsearch.index.mapper.MetadataFieldMapper;
import org.elasticsearch.index.mapper.ParseContext;
import org.elasticsearch.index.mapper.Uid;
@@ -90,7 +89,7 @@ public class IdFieldMapper extends MetadataFieldMapper {
private String path = Defaults.PATH;
public Builder(MappedFieldType existing) {
- super(Defaults.NAME, existing == null ? Defaults.FIELD_TYPE : existing);
+ super(Defaults.NAME, existing == null ? Defaults.FIELD_TYPE : existing, Defaults.FIELD_TYPE);
indexName = Defaults.NAME;
}
@@ -331,7 +330,7 @@ public class IdFieldMapper extends MetadataFieldMapper {
}
@Override
- public void merge(Mapper mergeWith, MergeResult mergeResult) {
+ protected void doMerge(Mapper mergeWith, boolean updateAllTypes) {
// do nothing here, no merging, but also no exception
}
}
diff --git a/core/src/main/java/org/elasticsearch/index/mapper/internal/IndexFieldMapper.java b/core/src/main/java/org/elasticsearch/index/mapper/internal/IndexFieldMapper.java
index 962332b5c4..dbbf03b72e 100644
--- a/core/src/main/java/org/elasticsearch/index/mapper/internal/IndexFieldMapper.java
+++ b/core/src/main/java/org/elasticsearch/index/mapper/internal/IndexFieldMapper.java
@@ -34,7 +34,6 @@ import org.elasticsearch.common.xcontent.XContentBuilder;
import org.elasticsearch.index.mapper.MappedFieldType;
import org.elasticsearch.index.mapper.Mapper;
import org.elasticsearch.index.mapper.MapperParsingException;
-import org.elasticsearch.index.mapper.MergeResult;
import org.elasticsearch.index.mapper.MetadataFieldMapper;
import org.elasticsearch.index.mapper.ParseContext;
import org.elasticsearch.index.query.QueryShardContext;
@@ -80,7 +79,7 @@ public class IndexFieldMapper extends MetadataFieldMapper {
private EnabledAttributeMapper enabledState = EnabledAttributeMapper.UNSET_DISABLED;
public Builder(MappedFieldType existing) {
- super(Defaults.NAME, existing == null ? Defaults.FIELD_TYPE : existing);
+ super(Defaults.NAME, existing == null ? Defaults.FIELD_TYPE : existing, Defaults.FIELD_TYPE);
indexName = Defaults.NAME;
}
@@ -279,12 +278,10 @@ public class IndexFieldMapper extends MetadataFieldMapper {
}
@Override
- public void merge(Mapper mergeWith, MergeResult mergeResult) {
+ protected void doMerge(Mapper mergeWith, boolean updateAllTypes) {
IndexFieldMapper indexFieldMapperMergeWith = (IndexFieldMapper) mergeWith;
- if (!mergeResult.simulate()) {
- if (indexFieldMapperMergeWith.enabledState != enabledState && !indexFieldMapperMergeWith.enabledState.unset()) {
- this.enabledState = indexFieldMapperMergeWith.enabledState;
- }
+ if (indexFieldMapperMergeWith.enabledState != enabledState && !indexFieldMapperMergeWith.enabledState.unset()) {
+ this.enabledState = indexFieldMapperMergeWith.enabledState;
}
}
diff --git a/core/src/main/java/org/elasticsearch/index/mapper/internal/ParentFieldMapper.java b/core/src/main/java/org/elasticsearch/index/mapper/internal/ParentFieldMapper.java
index 760259a180..65daef2a83 100644
--- a/core/src/main/java/org/elasticsearch/index/mapper/internal/ParentFieldMapper.java
+++ b/core/src/main/java/org/elasticsearch/index/mapper/internal/ParentFieldMapper.java
@@ -38,7 +38,6 @@ import org.elasticsearch.index.mapper.DocumentMapper;
import org.elasticsearch.index.mapper.MappedFieldType;
import org.elasticsearch.index.mapper.Mapper;
import org.elasticsearch.index.mapper.MapperParsingException;
-import org.elasticsearch.index.mapper.MergeResult;
import org.elasticsearch.index.mapper.MetadataFieldMapper;
import org.elasticsearch.index.mapper.ParseContext;
import org.elasticsearch.index.mapper.Uid;
@@ -98,7 +97,7 @@ public class ParentFieldMapper extends MetadataFieldMapper {
private final MappedFieldType childJoinFieldType = Defaults.JOIN_FIELD_TYPE.clone();
public Builder(String documentType) {
- super(Defaults.NAME, Defaults.FIELD_TYPE);
+ super(Defaults.NAME, Defaults.FIELD_TYPE, Defaults.FIELD_TYPE);
this.indexName = name;
this.documentType = documentType;
builder = this;
@@ -371,11 +370,11 @@ public class ParentFieldMapper extends MetadataFieldMapper {
}
@Override
- public void merge(Mapper mergeWith, MergeResult mergeResult) {
- super.merge(mergeWith, mergeResult);
+ protected void doMerge(Mapper mergeWith, boolean updateAllTypes) {
+ super.doMerge(mergeWith, updateAllTypes);
ParentFieldMapper fieldMergeWith = (ParentFieldMapper) mergeWith;
if (Objects.equals(parentType, fieldMergeWith.parentType) == false) {
- mergeResult.addConflict("The _parent field's type option can't be changed: [" + parentType + "]->[" + fieldMergeWith.parentType + "]");
+ throw new IllegalArgumentException("The _parent field's type option can't be changed: [" + parentType + "]->[" + fieldMergeWith.parentType + "]");
}
List<String> conflicts = new ArrayList<>();
@@ -383,13 +382,13 @@ public class ParentFieldMapper extends MetadataFieldMapper {
parentJoinFieldType.checkCompatibility(fieldMergeWith.parentJoinFieldType, conflicts, true); // same here
if (childJoinFieldType != null) {
// TODO: this can be set to false when the old parent/child impl is removed, we can do eager global ordinals loading per type.
- childJoinFieldType.checkCompatibility(fieldMergeWith.childJoinFieldType, conflicts, mergeResult.updateAllTypes() == false);
+ childJoinFieldType.checkCompatibility(fieldMergeWith.childJoinFieldType, conflicts, updateAllTypes == false);
}
- for (String conflict : conflicts) {
- mergeResult.addConflict(conflict);
+ if (conflicts.isEmpty() == false) {
+ throw new IllegalArgumentException("Merge conflicts: " + conflicts);
}
- if (active() && mergeResult.simulate() == false && mergeResult.hasConflicts() == false) {
+ if (active()) {
childJoinFieldType = fieldMergeWith.childJoinFieldType.clone();
}
}
diff --git a/core/src/main/java/org/elasticsearch/index/mapper/internal/RoutingFieldMapper.java b/core/src/main/java/org/elasticsearch/index/mapper/internal/RoutingFieldMapper.java
index 18d0645d2d..40b7e6871c 100644
--- a/core/src/main/java/org/elasticsearch/index/mapper/internal/RoutingFieldMapper.java
+++ b/core/src/main/java/org/elasticsearch/index/mapper/internal/RoutingFieldMapper.java
@@ -31,7 +31,6 @@ import org.elasticsearch.index.fielddata.FieldDataType;
import org.elasticsearch.index.mapper.MappedFieldType;
import org.elasticsearch.index.mapper.Mapper;
import org.elasticsearch.index.mapper.MapperParsingException;
-import org.elasticsearch.index.mapper.MergeResult;
import org.elasticsearch.index.mapper.MetadataFieldMapper;
import org.elasticsearch.index.mapper.ParseContext;
@@ -78,7 +77,7 @@ public class RoutingFieldMapper extends MetadataFieldMapper {
private String path = Defaults.PATH;
public Builder(MappedFieldType existing) {
- super(Defaults.NAME, existing == null ? Defaults.FIELD_TYPE : existing);
+ super(Defaults.NAME, existing == null ? Defaults.FIELD_TYPE : existing, Defaults.FIELD_TYPE);
}
public Builder required(boolean required) {
@@ -249,7 +248,7 @@ public class RoutingFieldMapper extends MetadataFieldMapper {
}
@Override
- public void merge(Mapper mergeWith, MergeResult mergeResult) {
+ protected void doMerge(Mapper mergeWith, boolean updateAllTypes) {
// do nothing here, no merging, but also no exception
}
}
diff --git a/core/src/main/java/org/elasticsearch/index/mapper/internal/SourceFieldMapper.java b/core/src/main/java/org/elasticsearch/index/mapper/internal/SourceFieldMapper.java
index da3b8dbc5a..40bf9eb0c8 100644
--- a/core/src/main/java/org/elasticsearch/index/mapper/internal/SourceFieldMapper.java
+++ b/core/src/main/java/org/elasticsearch/index/mapper/internal/SourceFieldMapper.java
@@ -29,13 +29,10 @@ import org.elasticsearch.common.Strings;
import org.elasticsearch.common.bytes.BytesArray;
import org.elasticsearch.common.bytes.BytesReference;
import org.elasticsearch.common.collect.Tuple;
-import org.elasticsearch.common.compress.Compressor;
import org.elasticsearch.common.compress.CompressorFactory;
import org.elasticsearch.common.io.stream.BytesStreamOutput;
-import org.elasticsearch.common.io.stream.StreamOutput;
import org.elasticsearch.common.lucene.Lucene;
import org.elasticsearch.common.settings.Settings;
-import org.elasticsearch.common.unit.ByteSizeValue;
import org.elasticsearch.common.xcontent.XContentBuilder;
import org.elasticsearch.common.xcontent.XContentFactory;
import org.elasticsearch.common.xcontent.XContentHelper;
@@ -44,21 +41,17 @@ import org.elasticsearch.common.xcontent.support.XContentMapValues;
import org.elasticsearch.index.mapper.MappedFieldType;
import org.elasticsearch.index.mapper.Mapper;
import org.elasticsearch.index.mapper.MapperParsingException;
-import org.elasticsearch.index.mapper.MergeResult;
import org.elasticsearch.index.mapper.MetadataFieldMapper;
import org.elasticsearch.index.mapper.ParseContext;
-import java.io.BufferedInputStream;
import java.io.IOException;
-import java.io.InputStream;
+import java.util.ArrayList;
import java.util.Arrays;
import java.util.Iterator;
import java.util.List;
import java.util.Map;
-import java.util.Objects;
import static org.elasticsearch.common.xcontent.support.XContentMapValues.nodeBooleanValue;
-import static org.elasticsearch.common.xcontent.support.XContentMapValues.nodeStringValue;
/**
*
@@ -72,8 +65,6 @@ public class SourceFieldMapper extends MetadataFieldMapper {
public static class Defaults {
public static final String NAME = SourceFieldMapper.NAME;
public static final boolean ENABLED = true;
- public static final long COMPRESS_THRESHOLD = -1;
- public static final String FORMAT = null; // default format is to use the one provided
public static final MappedFieldType FIELD_TYPE = new SourceFieldType();
@@ -93,17 +84,11 @@ public class SourceFieldMapper extends MetadataFieldMapper {
private boolean enabled = Defaults.ENABLED;
- private long compressThreshold = Defaults.COMPRESS_THRESHOLD;
-
- private Boolean compress = null;
-
- private String format = Defaults.FORMAT;
-
private String[] includes = null;
private String[] excludes = null;
public Builder() {
- super(Defaults.NAME, Defaults.FIELD_TYPE);
+ super(Defaults.NAME, Defaults.FIELD_TYPE, Defaults.FIELD_TYPE);
}
public Builder enabled(boolean enabled) {
@@ -111,21 +96,6 @@ public class SourceFieldMapper extends MetadataFieldMapper {
return this;
}
- public Builder compress(boolean compress) {
- this.compress = compress;
- return this;
- }
-
- public Builder compressThreshold(long compressThreshold) {
- this.compressThreshold = compressThreshold;
- return this;
- }
-
- public Builder format(String format) {
- this.format = format;
- return this;
- }
-
public Builder includes(String[] includes) {
this.includes = includes;
return this;
@@ -138,7 +108,7 @@ public class SourceFieldMapper extends MetadataFieldMapper {
@Override
public SourceFieldMapper build(BuilderContext context) {
- return new SourceFieldMapper(enabled, format, compress, compressThreshold, includes, excludes, context.indexSettings());
+ return new SourceFieldMapper(enabled, includes, excludes, context.indexSettings());
}
}
@@ -154,24 +124,8 @@ public class SourceFieldMapper extends MetadataFieldMapper {
if (fieldName.equals("enabled")) {
builder.enabled(nodeBooleanValue(fieldNode));
iterator.remove();
- } else if (fieldName.equals("compress") && parserContext.indexVersionCreated().before(Version.V_2_0_0_beta1)) {
- if (fieldNode != null) {
- builder.compress(nodeBooleanValue(fieldNode));
- }
- iterator.remove();
- } else if (fieldName.equals("compress_threshold") && parserContext.indexVersionCreated().before(Version.V_2_0_0_beta1)) {
- if (fieldNode != null) {
- if (fieldNode instanceof Number) {
- builder.compressThreshold(((Number) fieldNode).longValue());
- builder.compress(true);
- } else {
- builder.compressThreshold(ByteSizeValue.parseBytesSizeValue(fieldNode.toString(), "compress_threshold").bytes());
- builder.compress(true);
- }
- }
- iterator.remove();
- } else if ("format".equals(fieldName)) {
- builder.format(nodeStringValue(fieldNode, null));
+ } else if ("format".equals(fieldName) && parserContext.indexVersionCreated().before(Version.V_3_0_0)) {
+ // ignore on old indices, reject on and after 3.0
iterator.remove();
} else if (fieldName.equals("includes")) {
List<Object> values = (List<Object>) fieldNode;
@@ -242,30 +196,18 @@ public class SourceFieldMapper extends MetadataFieldMapper {
/** indicates whether the source will always exist and be complete, for use by features like the update API */
private final boolean complete;
- private Boolean compress;
- private long compressThreshold;
-
private final String[] includes;
private final String[] excludes;
- private String format;
-
- private XContentType formatContentType;
-
private SourceFieldMapper(Settings indexSettings) {
- this(Defaults.ENABLED, Defaults.FORMAT, null, -1, null, null, indexSettings);
+ this(Defaults.ENABLED, null, null, indexSettings);
}
- private SourceFieldMapper(boolean enabled, String format, Boolean compress, long compressThreshold,
- String[] includes, String[] excludes, Settings indexSettings) {
+ private SourceFieldMapper(boolean enabled, String[] includes, String[] excludes, Settings indexSettings) {
super(NAME, Defaults.FIELD_TYPE.clone(), Defaults.FIELD_TYPE, indexSettings); // Only stored.
this.enabled = enabled;
- this.compress = compress;
- this.compressThreshold = compressThreshold;
this.includes = includes;
this.excludes = excludes;
- this.format = format;
- this.formatContentType = format == null ? null : XContentType.fromRestContentType(format);
this.complete = enabled && includes == null && excludes == null;
}
@@ -321,71 +263,11 @@ public class SourceFieldMapper extends MetadataFieldMapper {
Tuple<XContentType, Map<String, Object>> mapTuple = XContentHelper.convertToMap(source, true);
Map<String, Object> filteredSource = XContentMapValues.filter(mapTuple.v2(), includes, excludes);
BytesStreamOutput bStream = new BytesStreamOutput();
- StreamOutput streamOutput = bStream;
- if (compress != null && compress && (compressThreshold == -1 || source.length() > compressThreshold)) {
- streamOutput = CompressorFactory.defaultCompressor().streamOutput(bStream);
- }
- XContentType contentType = formatContentType;
- if (contentType == null) {
- contentType = mapTuple.v1();
- }
- XContentBuilder builder = XContentFactory.contentBuilder(contentType, streamOutput).map(filteredSource);
+ XContentType contentType = mapTuple.v1();
+ XContentBuilder builder = XContentFactory.contentBuilder(contentType, bStream).map(filteredSource);
builder.close();
source = bStream.bytes();
- } else if (compress != null && compress && !CompressorFactory.isCompressed(source)) {
- if (compressThreshold == -1 || source.length() > compressThreshold) {
- BytesStreamOutput bStream = new BytesStreamOutput();
- XContentType contentType = XContentFactory.xContentType(source);
- if (formatContentType != null && formatContentType != contentType) {
- XContentBuilder builder = XContentFactory.contentBuilder(formatContentType, CompressorFactory.defaultCompressor().streamOutput(bStream));
- builder.copyCurrentStructure(XContentFactory.xContent(contentType).createParser(source));
- builder.close();
- } else {
- StreamOutput streamOutput = CompressorFactory.defaultCompressor().streamOutput(bStream);
- source.writeTo(streamOutput);
- streamOutput.close();
- }
- source = bStream.bytes();
- // update the data in the context, so it can be compressed and stored compressed outside...
- context.source(source);
- }
- } else if (formatContentType != null) {
- // see if we need to convert the content type
- Compressor compressor = CompressorFactory.compressor(source);
- if (compressor != null) {
- InputStream compressedStreamInput = compressor.streamInput(source.streamInput());
- if (compressedStreamInput.markSupported() == false) {
- compressedStreamInput = new BufferedInputStream(compressedStreamInput);
- }
- XContentType contentType = XContentFactory.xContentType(compressedStreamInput);
- if (contentType != formatContentType) {
- // we need to reread and store back, compressed....
- BytesStreamOutput bStream = new BytesStreamOutput();
- StreamOutput streamOutput = CompressorFactory.defaultCompressor().streamOutput(bStream);
- XContentBuilder builder = XContentFactory.contentBuilder(formatContentType, streamOutput);
- builder.copyCurrentStructure(XContentFactory.xContent(contentType).createParser(compressedStreamInput));
- builder.close();
- source = bStream.bytes();
- // update the data in the context, so we store it in the translog in this format
- context.source(source);
- } else {
- compressedStreamInput.close();
- }
- } else {
- XContentType contentType = XContentFactory.xContentType(source);
- if (contentType != formatContentType) {
- // we need to reread and store back
- // we need to reread and store back, compressed....
- BytesStreamOutput bStream = new BytesStreamOutput();
- XContentBuilder builder = XContentFactory.contentBuilder(formatContentType, bStream);
- builder.copyCurrentStructure(XContentFactory.xContent(contentType).createParser(source));
- builder.close();
- source = bStream.bytes();
- // update the data in the context, so we store it in the translog in this format
- context.source(source);
- }
- }
}
if (!source.hasArray()) {
source = source.toBytesArray();
@@ -403,26 +285,13 @@ public class SourceFieldMapper extends MetadataFieldMapper {
boolean includeDefaults = params.paramAsBoolean("include_defaults", false);
// all are defaults, no need to write it at all
- if (!includeDefaults && enabled == Defaults.ENABLED && compress == null && compressThreshold == -1 && includes == null && excludes == null) {
+ if (!includeDefaults && enabled == Defaults.ENABLED && includes == null && excludes == null) {
return builder;
}
builder.startObject(contentType());
if (includeDefaults || enabled != Defaults.ENABLED) {
builder.field("enabled", enabled);
}
- if (includeDefaults || !Objects.equals(format, Defaults.FORMAT)) {
- builder.field("format", format);
- }
- if (compress != null) {
- builder.field("compress", compress);
- } else if (includeDefaults) {
- builder.field("compress", false);
- }
- if (compressThreshold != -1) {
- builder.field("compress_threshold", new ByteSizeValue(compressThreshold).toString());
- } else if (includeDefaults) {
- builder.field("compress_threshold", -1);
- }
if (includes != null) {
builder.field("includes", includes);
@@ -441,25 +310,20 @@ public class SourceFieldMapper extends MetadataFieldMapper {
}
@Override
- public void merge(Mapper mergeWith, MergeResult mergeResult) {
+ protected void doMerge(Mapper mergeWith, boolean updateAllTypes) {
SourceFieldMapper sourceMergeWith = (SourceFieldMapper) mergeWith;
- if (mergeResult.simulate()) {
- if (this.enabled != sourceMergeWith.enabled) {
- mergeResult.addConflict("Cannot update enabled setting for [_source]");
- }
- if (Arrays.equals(includes(), sourceMergeWith.includes()) == false) {
- mergeResult.addConflict("Cannot update includes setting for [_source]");
- }
- if (Arrays.equals(excludes(), sourceMergeWith.excludes()) == false) {
- mergeResult.addConflict("Cannot update excludes setting for [_source]");
- }
- } else {
- if (sourceMergeWith.compress != null) {
- this.compress = sourceMergeWith.compress;
- }
- if (sourceMergeWith.compressThreshold != -1) {
- this.compressThreshold = sourceMergeWith.compressThreshold;
- }
+ List<String> conflicts = new ArrayList<>();
+ if (this.enabled != sourceMergeWith.enabled) {
+ conflicts.add("Cannot update enabled setting for [_source]");
+ }
+ if (Arrays.equals(includes(), sourceMergeWith.includes()) == false) {
+ conflicts.add("Cannot update includes setting for [_source]");
+ }
+ if (Arrays.equals(excludes(), sourceMergeWith.excludes()) == false) {
+ conflicts.add("Cannot update excludes setting for [_source]");
+ }
+ if (conflicts.isEmpty() == false) {
+ throw new IllegalArgumentException("Can't merge because of conflicts: " + conflicts);
}
}
}
diff --git a/core/src/main/java/org/elasticsearch/index/mapper/internal/TTLFieldMapper.java b/core/src/main/java/org/elasticsearch/index/mapper/internal/TTLFieldMapper.java
index 9a18befe62..f99ca18600 100644
--- a/core/src/main/java/org/elasticsearch/index/mapper/internal/TTLFieldMapper.java
+++ b/core/src/main/java/org/elasticsearch/index/mapper/internal/TTLFieldMapper.java
@@ -32,7 +32,6 @@ import org.elasticsearch.index.analysis.NumericLongAnalyzer;
import org.elasticsearch.index.mapper.MappedFieldType;
import org.elasticsearch.index.mapper.Mapper;
import org.elasticsearch.index.mapper.MapperParsingException;
-import org.elasticsearch.index.mapper.MergeResult;
import org.elasticsearch.index.mapper.MetadataFieldMapper;
import org.elasticsearch.index.mapper.ParseContext;
import org.elasticsearch.index.mapper.SourceToParse;
@@ -79,7 +78,7 @@ public class TTLFieldMapper extends MetadataFieldMapper {
private long defaultTTL = Defaults.DEFAULT;
public Builder() {
- super(Defaults.NAME, Defaults.TTL_FIELD_TYPE);
+ super(Defaults.NAME, Defaults.TTL_FIELD_TYPE, Defaults.FIELD_TYPE);
}
public Builder enabled(EnabledAttributeMapper enabled) {
@@ -258,21 +257,19 @@ public class TTLFieldMapper extends MetadataFieldMapper {
}
@Override
- public void merge(Mapper mergeWith, MergeResult mergeResult) {
+ protected void doMerge(Mapper mergeWith, boolean updateAllTypes) {
TTLFieldMapper ttlMergeWith = (TTLFieldMapper) mergeWith;
- if (((TTLFieldMapper) mergeWith).enabledState != Defaults.ENABLED_STATE) {//only do something if actually something was set for the document mapper that we merge with
- if (this.enabledState == EnabledAttributeMapper.ENABLED && ((TTLFieldMapper) mergeWith).enabledState == EnabledAttributeMapper.DISABLED) {
- mergeResult.addConflict("_ttl cannot be disabled once it was enabled.");
+ if (ttlMergeWith.enabledState != Defaults.ENABLED_STATE) {//only do something if actually something was set for the document mapper that we merge with
+ if (this.enabledState == EnabledAttributeMapper.ENABLED && ttlMergeWith.enabledState == EnabledAttributeMapper.DISABLED) {
+ throw new IllegalArgumentException("_ttl cannot be disabled once it was enabled.");
} else {
- if (!mergeResult.simulate()) {
- this.enabledState = ttlMergeWith.enabledState;
- }
+ this.enabledState = ttlMergeWith.enabledState;
}
}
if (ttlMergeWith.defaultTTL != -1) {
// we never build the default when the field is disabled so we should also not set it
// (it does not make a difference though as everything that is not build in toXContent will also not be set in the cluster)
- if (!mergeResult.simulate() && (enabledState == EnabledAttributeMapper.ENABLED)) {
+ if (enabledState == EnabledAttributeMapper.ENABLED) {
this.defaultTTL = ttlMergeWith.defaultTTL;
}
}
diff --git a/core/src/main/java/org/elasticsearch/index/mapper/internal/TimestampFieldMapper.java b/core/src/main/java/org/elasticsearch/index/mapper/internal/TimestampFieldMapper.java
index 468243d63c..b0606f1994 100644
--- a/core/src/main/java/org/elasticsearch/index/mapper/internal/TimestampFieldMapper.java
+++ b/core/src/main/java/org/elasticsearch/index/mapper/internal/TimestampFieldMapper.java
@@ -33,13 +33,13 @@ import org.elasticsearch.index.analysis.NumericDateAnalyzer;
import org.elasticsearch.index.mapper.MappedFieldType;
import org.elasticsearch.index.mapper.Mapper;
import org.elasticsearch.index.mapper.MapperParsingException;
-import org.elasticsearch.index.mapper.MergeResult;
import org.elasticsearch.index.mapper.ParseContext;
import org.elasticsearch.index.mapper.MetadataFieldMapper;
import org.elasticsearch.index.mapper.core.DateFieldMapper;
import org.elasticsearch.index.mapper.core.LongFieldMapper;
import java.io.IOException;
+import java.util.ArrayList;
import java.util.Iterator;
import java.util.List;
import java.util.Map;
@@ -96,7 +96,7 @@ public class TimestampFieldMapper extends MetadataFieldMapper {
private Boolean ignoreMissing = null;
public Builder(MappedFieldType existing) {
- super(Defaults.NAME, existing == null ? Defaults.FIELD_TYPE : existing);
+ super(Defaults.NAME, existing == null ? Defaults.FIELD_TYPE : existing, Defaults.FIELD_TYPE);
if (existing != null) {
// if there is an existing type, always use that store value (only matters for < 2.0)
explicitStore = true;
@@ -379,31 +379,32 @@ public class TimestampFieldMapper extends MetadataFieldMapper {
}
@Override
- public void merge(Mapper mergeWith, MergeResult mergeResult) {
+ protected void doMerge(Mapper mergeWith, boolean updateAllTypes) {
TimestampFieldMapper timestampFieldMapperMergeWith = (TimestampFieldMapper) mergeWith;
- super.merge(mergeWith, mergeResult);
- if (!mergeResult.simulate()) {
- if (timestampFieldMapperMergeWith.enabledState != enabledState && !timestampFieldMapperMergeWith.enabledState.unset()) {
- this.enabledState = timestampFieldMapperMergeWith.enabledState;
- }
- } else {
- if (timestampFieldMapperMergeWith.defaultTimestamp() == null && defaultTimestamp == null) {
- return;
- }
- if (defaultTimestamp == null) {
- mergeResult.addConflict("Cannot update default in _timestamp value. Value is null now encountering " + timestampFieldMapperMergeWith.defaultTimestamp());
- } else if (timestampFieldMapperMergeWith.defaultTimestamp() == null) {
- mergeResult.addConflict("Cannot update default in _timestamp value. Value is \" + defaultTimestamp.toString() + \" now encountering null");
- } else if (!timestampFieldMapperMergeWith.defaultTimestamp().equals(defaultTimestamp)) {
- mergeResult.addConflict("Cannot update default in _timestamp value. Value is " + defaultTimestamp.toString() + " now encountering " + timestampFieldMapperMergeWith.defaultTimestamp());
- }
- if (this.path != null) {
- if (path.equals(timestampFieldMapperMergeWith.path()) == false) {
- mergeResult.addConflict("Cannot update path in _timestamp value. Value is " + path + " path in merged mapping is " + (timestampFieldMapperMergeWith.path() == null ? "missing" : timestampFieldMapperMergeWith.path()));
- }
- } else if (timestampFieldMapperMergeWith.path() != null) {
- mergeResult.addConflict("Cannot update path in _timestamp value. Value is " + path + " path in merged mapping is missing");
+ super.doMerge(mergeWith, updateAllTypes);
+ if (timestampFieldMapperMergeWith.enabledState != enabledState && !timestampFieldMapperMergeWith.enabledState.unset()) {
+ this.enabledState = timestampFieldMapperMergeWith.enabledState;
+ }
+ if (timestampFieldMapperMergeWith.defaultTimestamp() == null && defaultTimestamp == null) {
+ return;
+ }
+ List<String> conflicts = new ArrayList<>();
+ if (defaultTimestamp == null) {
+ conflicts.add("Cannot update default in _timestamp value. Value is null now encountering " + timestampFieldMapperMergeWith.defaultTimestamp());
+ } else if (timestampFieldMapperMergeWith.defaultTimestamp() == null) {
+ conflicts.add("Cannot update default in _timestamp value. Value is \" + defaultTimestamp.toString() + \" now encountering null");
+ } else if (!timestampFieldMapperMergeWith.defaultTimestamp().equals(defaultTimestamp)) {
+ conflicts.add("Cannot update default in _timestamp value. Value is " + defaultTimestamp.toString() + " now encountering " + timestampFieldMapperMergeWith.defaultTimestamp());
+ }
+ if (this.path != null) {
+ if (path.equals(timestampFieldMapperMergeWith.path()) == false) {
+ conflicts.add("Cannot update path in _timestamp value. Value is " + path + " path in merged mapping is " + (timestampFieldMapperMergeWith.path() == null ? "missing" : timestampFieldMapperMergeWith.path()));
}
+ } else if (timestampFieldMapperMergeWith.path() != null) {
+ conflicts.add("Cannot update path in _timestamp value. Value is " + path + " path in merged mapping is missing");
+ }
+ if (conflicts.isEmpty() == false) {
+ throw new IllegalArgumentException("Conflicts: " + conflicts);
}
}
}
diff --git a/core/src/main/java/org/elasticsearch/index/mapper/internal/TypeFieldMapper.java b/core/src/main/java/org/elasticsearch/index/mapper/internal/TypeFieldMapper.java
index d4acc3c597..c529db5183 100644
--- a/core/src/main/java/org/elasticsearch/index/mapper/internal/TypeFieldMapper.java
+++ b/core/src/main/java/org/elasticsearch/index/mapper/internal/TypeFieldMapper.java
@@ -40,7 +40,6 @@ import org.elasticsearch.index.fielddata.FieldDataType;
import org.elasticsearch.index.mapper.MappedFieldType;
import org.elasticsearch.index.mapper.Mapper;
import org.elasticsearch.index.mapper.MapperParsingException;
-import org.elasticsearch.index.mapper.MergeResult;
import org.elasticsearch.index.mapper.MetadataFieldMapper;
import org.elasticsearch.index.mapper.ParseContext;
import org.elasticsearch.index.mapper.Uid;
@@ -81,7 +80,7 @@ public class TypeFieldMapper extends MetadataFieldMapper {
public static class Builder extends MetadataFieldMapper.Builder<Builder, TypeFieldMapper> {
public Builder(MappedFieldType existing) {
- super(Defaults.NAME, existing == null ? Defaults.FIELD_TYPE : existing);
+ super(Defaults.NAME, existing == null ? Defaults.FIELD_TYPE : existing, Defaults.FIELD_TYPE);
indexName = Defaults.NAME;
}
@@ -225,7 +224,7 @@ public class TypeFieldMapper extends MetadataFieldMapper {
}
@Override
- public void merge(Mapper mergeWith, MergeResult mergeResult) {
+ protected void doMerge(Mapper mergeWith, boolean updateAllTypes) {
// do nothing here, no merging, but also no exception
}
}
diff --git a/core/src/main/java/org/elasticsearch/index/mapper/internal/UidFieldMapper.java b/core/src/main/java/org/elasticsearch/index/mapper/internal/UidFieldMapper.java
index ef4c48e62e..10f9880d97 100644
--- a/core/src/main/java/org/elasticsearch/index/mapper/internal/UidFieldMapper.java
+++ b/core/src/main/java/org/elasticsearch/index/mapper/internal/UidFieldMapper.java
@@ -33,7 +33,6 @@ import org.elasticsearch.index.fielddata.FieldDataType;
import org.elasticsearch.index.mapper.MappedFieldType;
import org.elasticsearch.index.mapper.Mapper;
import org.elasticsearch.index.mapper.MapperParsingException;
-import org.elasticsearch.index.mapper.MergeResult;
import org.elasticsearch.index.mapper.MetadataFieldMapper;
import org.elasticsearch.index.mapper.ParseContext;
import org.elasticsearch.index.mapper.ParseContext.Document;
@@ -79,7 +78,7 @@ public class UidFieldMapper extends MetadataFieldMapper {
public static class Builder extends MetadataFieldMapper.Builder<Builder, UidFieldMapper> {
public Builder(MappedFieldType existing) {
- super(Defaults.NAME, existing == null ? Defaults.FIELD_TYPE : existing);
+ super(Defaults.NAME, existing == null ? Defaults.FIELD_TYPE : existing, Defaults.FIELD_TYPE);
indexName = Defaults.NAME;
}
@@ -225,7 +224,7 @@ public class UidFieldMapper extends MetadataFieldMapper {
}
@Override
- public void merge(Mapper mergeWith, MergeResult mergeResult) {
+ protected void doMerge(Mapper mergeWith, boolean updateAllTypes) {
// do nothing here, no merging, but also no exception
}
}
diff --git a/core/src/main/java/org/elasticsearch/index/mapper/internal/VersionFieldMapper.java b/core/src/main/java/org/elasticsearch/index/mapper/internal/VersionFieldMapper.java
index 292a622ab7..6b1471afda 100644
--- a/core/src/main/java/org/elasticsearch/index/mapper/internal/VersionFieldMapper.java
+++ b/core/src/main/java/org/elasticsearch/index/mapper/internal/VersionFieldMapper.java
@@ -30,7 +30,6 @@ import org.elasticsearch.index.fielddata.FieldDataType;
import org.elasticsearch.index.mapper.MappedFieldType;
import org.elasticsearch.index.mapper.Mapper;
import org.elasticsearch.index.mapper.MapperParsingException;
-import org.elasticsearch.index.mapper.MergeResult;
import org.elasticsearch.index.mapper.MetadataFieldMapper;
import org.elasticsearch.index.mapper.ParseContext;
import org.elasticsearch.index.mapper.ParseContext.Document;
@@ -62,7 +61,7 @@ public class VersionFieldMapper extends MetadataFieldMapper {
public static class Builder extends MetadataFieldMapper.Builder<Builder, VersionFieldMapper> {
public Builder() {
- super(Defaults.NAME, Defaults.FIELD_TYPE);
+ super(Defaults.NAME, Defaults.FIELD_TYPE, Defaults.FIELD_TYPE);
}
@Override
@@ -166,7 +165,7 @@ public class VersionFieldMapper extends MetadataFieldMapper {
}
@Override
- public void merge(Mapper mergeWith, MergeResult mergeResult) {
+ protected void doMerge(Mapper mergeWith, boolean updateAllTypes) {
// nothing to do
}
}
diff --git a/core/src/main/java/org/elasticsearch/index/mapper/ip/IpFieldMapper.java b/core/src/main/java/org/elasticsearch/index/mapper/ip/IpFieldMapper.java
index e57ceaf8ca..d8a7c752e6 100644
--- a/core/src/main/java/org/elasticsearch/index/mapper/ip/IpFieldMapper.java
+++ b/core/src/main/java/org/elasticsearch/index/mapper/ip/IpFieldMapper.java
@@ -122,8 +122,7 @@ public class IpFieldMapper extends NumberFieldMapper {
setupFieldType(context);
IpFieldMapper fieldMapper = new IpFieldMapper(name, fieldType, defaultFieldType, ignoreMalformed(context), coerce(context),
context.indexSettings(), multiFieldsBuilder.build(this, context), copyTo);
- fieldMapper.includeInAll(includeInAll);
- return fieldMapper;
+ return (IpFieldMapper) fieldMapper.includeInAll(includeInAll);
}
@Override
diff --git a/core/src/main/java/org/elasticsearch/index/mapper/object/DynamicTemplate.java b/core/src/main/java/org/elasticsearch/index/mapper/object/DynamicTemplate.java
index c51264f3db..58602f06df 100644
--- a/core/src/main/java/org/elasticsearch/index/mapper/object/DynamicTemplate.java
+++ b/core/src/main/java/org/elasticsearch/index/mapper/object/DynamicTemplate.java
@@ -125,13 +125,13 @@ public class DynamicTemplate {
}
public boolean match(ContentPath path, String name, String dynamicType) {
- if (pathMatch != null && !patternMatch(pathMatch, path.fullPathAsText(name))) {
+ if (pathMatch != null && !patternMatch(pathMatch, path.pathAsText(name))) {
return false;
}
if (match != null && !patternMatch(match, name)) {
return false;
}
- if (pathUnmatch != null && patternMatch(pathUnmatch, path.fullPathAsText(name))) {
+ if (pathUnmatch != null && patternMatch(pathUnmatch, path.pathAsText(name))) {
return false;
}
if (unmatch != null && patternMatch(unmatch, name)) {
diff --git a/core/src/main/java/org/elasticsearch/index/mapper/object/ObjectMapper.java b/core/src/main/java/org/elasticsearch/index/mapper/object/ObjectMapper.java
index 88f8971905..c2d9783fc9 100644
--- a/core/src/main/java/org/elasticsearch/index/mapper/object/ObjectMapper.java
+++ b/core/src/main/java/org/elasticsearch/index/mapper/object/ObjectMapper.java
@@ -24,7 +24,6 @@ import org.apache.lucene.search.Query;
import org.apache.lucene.search.TermQuery;
import org.apache.lucene.util.BytesRef;
import org.elasticsearch.ElasticsearchParseException;
-import org.elasticsearch.Version;
import org.elasticsearch.common.Nullable;
import org.elasticsearch.common.Strings;
import org.elasticsearch.common.collect.CopyOnWriteHashMap;
@@ -40,7 +39,6 @@ import java.util.*;
import static org.elasticsearch.common.xcontent.support.XContentMapValues.nodeBooleanValue;
import static org.elasticsearch.index.mapper.MapperBuilders.object;
-import static org.elasticsearch.index.mapper.core.TypeParsers.parsePathType;
/**
*
@@ -54,7 +52,6 @@ public class ObjectMapper extends Mapper implements AllFieldMapper.IncludeInAll,
public static final boolean ENABLED = true;
public static final Nested NESTED = Nested.NO;
public static final Dynamic DYNAMIC = null; // not set, inherited from root
- public static final ContentPath.Type PATH_TYPE = ContentPath.Type.FULL;
}
public static enum Dynamic {
@@ -104,8 +101,6 @@ public class ObjectMapper extends Mapper implements AllFieldMapper.IncludeInAll,
protected Dynamic dynamic = Defaults.DYNAMIC;
- protected ContentPath.Type pathType = Defaults.PATH_TYPE;
-
protected Boolean includeInAll;
protected final List<Mapper.Builder> mappersBuilders = new ArrayList<>();
@@ -130,11 +125,6 @@ public class ObjectMapper extends Mapper implements AllFieldMapper.IncludeInAll,
return builder;
}
- public T pathType(ContentPath.Type pathType) {
- this.pathType = pathType;
- return builder;
- }
-
public T includeInAll(boolean includeInAll) {
this.includeInAll = includeInAll;
return builder;
@@ -147,8 +137,6 @@ public class ObjectMapper extends Mapper implements AllFieldMapper.IncludeInAll,
@Override
public Y build(BuilderContext context) {
- ContentPath.Type origPathType = context.path().pathType();
- context.path().pathType(pathType);
context.path().add(name);
Map<String, Mapper> mappers = new HashMap<>();
@@ -156,17 +144,16 @@ public class ObjectMapper extends Mapper implements AllFieldMapper.IncludeInAll,
Mapper mapper = builder.build(context);
mappers.put(mapper.simpleName(), mapper);
}
- context.path().pathType(origPathType);
context.path().remove();
- ObjectMapper objectMapper = createMapper(name, context.path().fullPathAsText(name), enabled, nested, dynamic, pathType, mappers, context.indexSettings());
- objectMapper.includeInAllIfNotSet(includeInAll);
+ ObjectMapper objectMapper = createMapper(name, context.path().pathAsText(name), enabled, nested, dynamic, mappers, context.indexSettings());
+ objectMapper = objectMapper.includeInAllIfNotSet(includeInAll);
return (Y) objectMapper;
}
- protected ObjectMapper createMapper(String name, String fullPath, boolean enabled, Nested nested, Dynamic dynamic, ContentPath.Type pathType, Map<String, Mapper> mappers, @Nullable Settings settings) {
- return new ObjectMapper(name, fullPath, enabled, nested, dynamic, pathType, mappers);
+ protected ObjectMapper createMapper(String name, String fullPath, boolean enabled, Nested nested, Dynamic dynamic, Map<String, Mapper> mappers, @Nullable Settings settings) {
+ return new ObjectMapper(name, fullPath, enabled, nested, dynamic, mappers);
}
}
@@ -179,7 +166,7 @@ public class ObjectMapper extends Mapper implements AllFieldMapper.IncludeInAll,
Map.Entry<String, Object> entry = iterator.next();
String fieldName = Strings.toUnderscoreCase(entry.getKey());
Object fieldNode = entry.getValue();
- if (parseObjectOrDocumentTypeProperties(fieldName, fieldNode, parserContext, builder) || parseObjectProperties(name, fieldName, fieldNode, parserContext, builder)) {
+ if (parseObjectOrDocumentTypeProperties(fieldName, fieldNode, parserContext, builder)) {
iterator.remove();
}
}
@@ -214,14 +201,6 @@ public class ObjectMapper extends Mapper implements AllFieldMapper.IncludeInAll,
return false;
}
- protected static boolean parseObjectProperties(String name, String fieldName, Object fieldNode, ParserContext parserContext, ObjectMapper.Builder builder) {
- if (fieldName.equals("path") && parserContext.indexVersionCreated().before(Version.V_2_0_0_beta1)) {
- builder.pathType(parsePathType(name, fieldNode.toString()));
- return true;
- }
- return false;
- }
-
protected static void parseNested(String name, Map<String, Object> node, ObjectMapper.Builder builder) {
boolean nested = false;
boolean nestedIncludeInParent = false;
@@ -326,19 +305,16 @@ public class ObjectMapper extends Mapper implements AllFieldMapper.IncludeInAll,
private volatile Dynamic dynamic;
- private final ContentPath.Type pathType;
-
private Boolean includeInAll;
private volatile CopyOnWriteHashMap<String, Mapper> mappers;
- ObjectMapper(String name, String fullPath, boolean enabled, Nested nested, Dynamic dynamic, ContentPath.Type pathType, Map<String, Mapper> mappers) {
+ ObjectMapper(String name, String fullPath, boolean enabled, Nested nested, Dynamic dynamic, Map<String, Mapper> mappers) {
super(name);
this.fullPath = fullPath;
this.enabled = enabled;
this.nested = nested;
this.dynamic = dynamic;
- this.pathType = pathType;
if (mappers == null) {
this.mappers = new CopyOnWriteHashMap<>();
} else {
@@ -380,50 +356,58 @@ public class ObjectMapper extends Mapper implements AllFieldMapper.IncludeInAll,
return this.enabled;
}
- public ContentPath.Type pathType() {
- return pathType;
- }
-
public Mapper getMapper(String field) {
return mappers.get(field);
}
@Override
- public void includeInAll(Boolean includeInAll) {
+ public ObjectMapper includeInAll(Boolean includeInAll) {
if (includeInAll == null) {
- return;
+ return this;
}
- this.includeInAll = includeInAll;
+
+ ObjectMapper clone = clone();
+ clone.includeInAll = includeInAll;
// when called from outside, apply this on all the inner mappers
- for (Mapper mapper : mappers.values()) {
+ for (Mapper mapper : clone.mappers.values()) {
if (mapper instanceof AllFieldMapper.IncludeInAll) {
- ((AllFieldMapper.IncludeInAll) mapper).includeInAll(includeInAll);
+ clone.putMapper(((AllFieldMapper.IncludeInAll) mapper).includeInAll(includeInAll));
}
}
+ return clone;
}
@Override
- public void includeInAllIfNotSet(Boolean includeInAll) {
- if (this.includeInAll == null) {
- this.includeInAll = includeInAll;
+ public ObjectMapper includeInAllIfNotSet(Boolean includeInAll) {
+ if (includeInAll == null || this.includeInAll != null) {
+ return this;
}
+
+ ObjectMapper clone = clone();
+ clone.includeInAll = includeInAll;
// when called from outside, apply this on all the inner mappers
- for (Mapper mapper : mappers.values()) {
+ for (Mapper mapper : clone.mappers.values()) {
if (mapper instanceof AllFieldMapper.IncludeInAll) {
- ((AllFieldMapper.IncludeInAll) mapper).includeInAllIfNotSet(includeInAll);
+ clone.putMapper(((AllFieldMapper.IncludeInAll) mapper).includeInAllIfNotSet(includeInAll));
}
}
+ return clone;
}
@Override
- public void unsetIncludeInAll() {
- includeInAll = null;
+ public ObjectMapper unsetIncludeInAll() {
+ if (includeInAll == null) {
+ return this;
+ }
+ ObjectMapper clone = clone();
+ clone.includeInAll = null;
// when called from outside, apply this on all the inner mappers
for (Mapper mapper : mappers.values()) {
if (mapper instanceof AllFieldMapper.IncludeInAll) {
- ((AllFieldMapper.IncludeInAll) mapper).unsetIncludeInAll();
+ clone.putMapper(((AllFieldMapper.IncludeInAll) mapper).unsetIncludeInAll());
}
}
+ return clone;
}
public Nested nested() {
@@ -434,14 +418,9 @@ public class ObjectMapper extends Mapper implements AllFieldMapper.IncludeInAll,
return this.nestedTypeFilter;
}
- /**
- * Put a new mapper.
- * NOTE: this method must be called under the current {@link DocumentMapper}
- * lock if concurrent updates are expected.
- */
- public void putMapper(Mapper mapper) {
+ protected void putMapper(Mapper mapper) {
if (mapper instanceof AllFieldMapper.IncludeInAll) {
- ((AllFieldMapper.IncludeInAll) mapper).includeInAllIfNotSet(includeInAll);
+ mapper = ((AllFieldMapper.IncludeInAll) mapper).includeInAllIfNotSet(includeInAll);
}
mappers = mappers.copyAndPut(mapper.simpleName(), mapper);
}
@@ -464,66 +443,45 @@ public class ObjectMapper extends Mapper implements AllFieldMapper.IncludeInAll,
}
@Override
- public void merge(final Mapper mergeWith, final MergeResult mergeResult) {
+ public ObjectMapper merge(Mapper mergeWith, boolean updateAllTypes) {
if (!(mergeWith instanceof ObjectMapper)) {
- mergeResult.addConflict("Can't merge a non object mapping [" + mergeWith.name() + "] with an object mapping [" + name() + "]");
- return;
+ throw new IllegalArgumentException("Can't merge a non object mapping [" + mergeWith.name() + "] with an object mapping [" + name() + "]");
}
ObjectMapper mergeWithObject = (ObjectMapper) mergeWith;
+ ObjectMapper merged = clone();
+ merged.doMerge(mergeWithObject, updateAllTypes);
+ return merged;
+ }
+ protected void doMerge(final ObjectMapper mergeWith, boolean updateAllTypes) {
if (nested().isNested()) {
- if (!mergeWithObject.nested().isNested()) {
- mergeResult.addConflict("object mapping [" + name() + "] can't be changed from nested to non-nested");
- return;
+ if (!mergeWith.nested().isNested()) {
+ throw new IllegalArgumentException("object mapping [" + name() + "] can't be changed from nested to non-nested");
}
} else {
- if (mergeWithObject.nested().isNested()) {
- mergeResult.addConflict("object mapping [" + name() + "] can't be changed from non-nested to nested");
- return;
+ if (mergeWith.nested().isNested()) {
+ throw new IllegalArgumentException("object mapping [" + name() + "] can't be changed from non-nested to nested");
}
}
- if (!mergeResult.simulate()) {
- if (mergeWithObject.dynamic != null) {
- this.dynamic = mergeWithObject.dynamic;
- }
+ if (mergeWith.dynamic != null) {
+ this.dynamic = mergeWith.dynamic;
}
- doMerge(mergeWithObject, mergeResult);
-
- List<Mapper> mappersToPut = new ArrayList<>();
- List<ObjectMapper> newObjectMappers = new ArrayList<>();
- List<FieldMapper> newFieldMappers = new ArrayList<>();
- for (Mapper mapper : mergeWithObject) {
- Mapper mergeWithMapper = mapper;
+ for (Mapper mergeWithMapper : mergeWith) {
Mapper mergeIntoMapper = mappers.get(mergeWithMapper.simpleName());
+ Mapper merged;
if (mergeIntoMapper == null) {
- // no mapping, simply add it if not simulating
- if (!mergeResult.simulate()) {
- mappersToPut.add(mergeWithMapper);
- MapperUtils.collect(mergeWithMapper, newObjectMappers, newFieldMappers);
- }
- } else if (mergeIntoMapper instanceof MetadataFieldMapper == false) {
+ // no mapping, simply add it
+ merged = mergeWithMapper;
+ } else {
// root mappers can only exist here for backcompat, and are merged in Mapping
- mergeIntoMapper.merge(mergeWithMapper, mergeResult);
+ merged = mergeIntoMapper.merge(mergeWithMapper, updateAllTypes);
}
- }
- if (!newFieldMappers.isEmpty()) {
- mergeResult.addFieldMappers(newFieldMappers);
- }
- if (!newObjectMappers.isEmpty()) {
- mergeResult.addObjectMappers(newObjectMappers);
- }
- // add the mappers only after the administration have been done, so it will not be visible to parser (which first try to read with no lock)
- for (Mapper mapper : mappersToPut) {
- putMapper(mapper);
+ putMapper(merged);
}
}
- protected void doMerge(ObjectMapper mergeWith, MergeResult mergeResult) {
-
- }
-
@Override
public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException {
toXContent(builder, params, null);
@@ -549,9 +507,6 @@ public class ObjectMapper extends Mapper implements AllFieldMapper.IncludeInAll,
if (enabled != Defaults.ENABLED) {
builder.field("enabled", enabled);
}
- if (pathType != Defaults.PATH_TYPE) {
- builder.field("path", pathType.name().toLowerCase(Locale.ROOT));
- }
if (includeInAll != null) {
builder.field("include_in_all", includeInAll);
}
diff --git a/core/src/main/java/org/elasticsearch/index/mapper/object/RootObjectMapper.java b/core/src/main/java/org/elasticsearch/index/mapper/object/RootObjectMapper.java
index a0c989abd7..2fd4e91471 100644
--- a/core/src/main/java/org/elasticsearch/index/mapper/object/RootObjectMapper.java
+++ b/core/src/main/java/org/elasticsearch/index/mapper/object/RootObjectMapper.java
@@ -95,7 +95,7 @@ public class RootObjectMapper extends ObjectMapper {
@Override
- protected ObjectMapper createMapper(String name, String fullPath, boolean enabled, Nested nested, Dynamic dynamic, ContentPath.Type pathType, Map<String, Mapper> mappers, @Nullable Settings settings) {
+ protected ObjectMapper createMapper(String name, String fullPath, boolean enabled, Nested nested, Dynamic dynamic, Map<String, Mapper> mappers, @Nullable Settings settings) {
assert !nested.isNested();
FormatDateTimeFormatter[] dates = null;
if (dynamicDateTimeFormatters == null) {
@@ -106,7 +106,7 @@ public class RootObjectMapper extends ObjectMapper {
} else {
dates = dynamicDateTimeFormatters.toArray(new FormatDateTimeFormatter[dynamicDateTimeFormatters.size()]);
}
- return new RootObjectMapper(name, enabled, dynamic, pathType, mappers,
+ return new RootObjectMapper(name, enabled, dynamic, mappers,
dates,
dynamicTemplates.toArray(new DynamicTemplate[dynamicTemplates.size()]),
dateDetection, numericDetection);
@@ -196,15 +196,23 @@ public class RootObjectMapper extends ObjectMapper {
private volatile DynamicTemplate dynamicTemplates[];
- RootObjectMapper(String name, boolean enabled, Dynamic dynamic, ContentPath.Type pathType, Map<String, Mapper> mappers,
+ RootObjectMapper(String name, boolean enabled, Dynamic dynamic, Map<String, Mapper> mappers,
FormatDateTimeFormatter[] dynamicDateTimeFormatters, DynamicTemplate dynamicTemplates[], boolean dateDetection, boolean numericDetection) {
- super(name, name, enabled, Nested.NO, dynamic, pathType, mappers);
+ super(name, name, enabled, Nested.NO, dynamic, mappers);
this.dynamicTemplates = dynamicTemplates;
this.dynamicDateTimeFormatters = dynamicDateTimeFormatters;
this.dateDetection = dateDetection;
this.numericDetection = numericDetection;
}
+ /** Return a copy of this mapper that has the given {@code mapper} as a
+ * sub mapper. */
+ public RootObjectMapper copyAndPutMapper(Mapper mapper) {
+ RootObjectMapper clone = (RootObjectMapper) clone();
+ clone.putMapper(mapper);
+ return clone;
+ }
+
@Override
public ObjectMapper mappingUpdate(Mapper mapper) {
RootObjectMapper update = (RootObjectMapper) super.mappingUpdate(mapper);
@@ -253,25 +261,29 @@ public class RootObjectMapper extends ObjectMapper {
}
@Override
- protected void doMerge(ObjectMapper mergeWith, MergeResult mergeResult) {
+ public RootObjectMapper merge(Mapper mergeWith, boolean updateAllTypes) {
+ return (RootObjectMapper) super.merge(mergeWith, updateAllTypes);
+ }
+
+ @Override
+ protected void doMerge(ObjectMapper mergeWith, boolean updateAllTypes) {
+ super.doMerge(mergeWith, updateAllTypes);
RootObjectMapper mergeWithObject = (RootObjectMapper) mergeWith;
- if (!mergeResult.simulate()) {
- // merge them
- List<DynamicTemplate> mergedTemplates = new ArrayList<>(Arrays.asList(this.dynamicTemplates));
- for (DynamicTemplate template : mergeWithObject.dynamicTemplates) {
- boolean replaced = false;
- for (int i = 0; i < mergedTemplates.size(); i++) {
- if (mergedTemplates.get(i).name().equals(template.name())) {
- mergedTemplates.set(i, template);
- replaced = true;
- }
- }
- if (!replaced) {
- mergedTemplates.add(template);
+ // merge them
+ List<DynamicTemplate> mergedTemplates = new ArrayList<>(Arrays.asList(this.dynamicTemplates));
+ for (DynamicTemplate template : mergeWithObject.dynamicTemplates) {
+ boolean replaced = false;
+ for (int i = 0; i < mergedTemplates.size(); i++) {
+ if (mergedTemplates.get(i).name().equals(template.name())) {
+ mergedTemplates.set(i, template);
+ replaced = true;
}
}
- this.dynamicTemplates = mergedTemplates.toArray(new DynamicTemplate[mergedTemplates.size()]);
+ if (!replaced) {
+ mergedTemplates.add(template);
+ }
}
+ this.dynamicTemplates = mergedTemplates.toArray(new DynamicTemplate[mergedTemplates.size()]);
}
@Override
diff --git a/core/src/main/java/org/elasticsearch/index/query/GeoShapeQueryBuilder.java b/core/src/main/java/org/elasticsearch/index/query/GeoShapeQueryBuilder.java
index 5aad36cd27..454465727b 100644
--- a/core/src/main/java/org/elasticsearch/index/query/GeoShapeQueryBuilder.java
+++ b/core/src/main/java/org/elasticsearch/index/query/GeoShapeQueryBuilder.java
@@ -31,19 +31,16 @@ import org.elasticsearch.action.get.GetRequest;
import org.elasticsearch.action.get.GetResponse;
import org.elasticsearch.client.Client;
import org.elasticsearch.common.Strings;
-import org.elasticsearch.common.bytes.BytesArray;
-import org.elasticsearch.common.bytes.BytesReference;
import org.elasticsearch.common.geo.ShapeRelation;
import org.elasticsearch.common.geo.ShapesAvailability;
import org.elasticsearch.common.geo.SpatialStrategy;
+import org.elasticsearch.common.geo.builders.PointBuilder;
import org.elasticsearch.common.geo.builders.ShapeBuilder;
import org.elasticsearch.common.io.stream.StreamInput;
import org.elasticsearch.common.io.stream.StreamOutput;
import org.elasticsearch.common.xcontent.XContentBuilder;
-import org.elasticsearch.common.xcontent.XContentFactory;
import org.elasticsearch.common.xcontent.XContentHelper;
import org.elasticsearch.common.xcontent.XContentParser;
-import org.elasticsearch.common.xcontent.XContentType;
import org.elasticsearch.index.mapper.MappedFieldType;
import org.elasticsearch.index.mapper.geo.GeoShapeFieldMapper;
import org.elasticsearch.search.internal.SearchContext;
@@ -61,13 +58,11 @@ public class GeoShapeQueryBuilder extends AbstractQueryBuilder<GeoShapeQueryBuil
public static final String DEFAULT_SHAPE_FIELD_NAME = "shape";
public static final ShapeRelation DEFAULT_SHAPE_RELATION = ShapeRelation.INTERSECTS;
- static final GeoShapeQueryBuilder PROTOTYPE = new GeoShapeQueryBuilder("field", new BytesArray(new byte[1]));
+ static final GeoShapeQueryBuilder PROTOTYPE = new GeoShapeQueryBuilder("field", new PointBuilder());
private final String fieldName;
- // TODO make the ShapeBuilder and subclasses Writable and implement hashCode
- // and Equals so ShapeBuilder can be used here
- private BytesReference shapeBytes;
+ private ShapeBuilder shape;
private SpatialStrategy strategy;
@@ -88,7 +83,7 @@ public class GeoShapeQueryBuilder extends AbstractQueryBuilder<GeoShapeQueryBuil
* @param shape
* Shape used in the Query
*/
- public GeoShapeQueryBuilder(String fieldName, ShapeBuilder shape) throws IOException {
+ public GeoShapeQueryBuilder(String fieldName, ShapeBuilder shape) {
this(fieldName, shape, null, null);
}
@@ -105,37 +100,21 @@ public class GeoShapeQueryBuilder extends AbstractQueryBuilder<GeoShapeQueryBuil
* Index type of the indexed Shapes
*/
public GeoShapeQueryBuilder(String fieldName, String indexedShapeId, String indexedShapeType) {
- this(fieldName, (BytesReference) null, indexedShapeId, indexedShapeType);
+ this(fieldName, (ShapeBuilder) null, indexedShapeId, indexedShapeType);
}
- GeoShapeQueryBuilder(String fieldName, BytesReference shapeBytes) {
- this(fieldName, shapeBytes, null, null);
- }
-
- private GeoShapeQueryBuilder(String fieldName, ShapeBuilder shape, String indexedShapeId, String indexedShapeType) throws IOException {
- this(fieldName, new BytesArray(new byte[1]), indexedShapeId, indexedShapeType);
- if (shape != null) {
- this.shapeBytes = shape.buildAsBytes(XContentType.JSON);
- if (this.shapeBytes.length() == 0) {
- throw new IllegalArgumentException("shape must not be empty");
- }
- } else {
- throw new IllegalArgumentException("shape must not be null");
- }
- }
-
- private GeoShapeQueryBuilder(String fieldName, BytesReference shapeBytes, String indexedShapeId, String indexedShapeType) {
+ private GeoShapeQueryBuilder(String fieldName, ShapeBuilder shape, String indexedShapeId, String indexedShapeType) {
if (fieldName == null) {
throw new IllegalArgumentException("fieldName is required");
}
- if ((shapeBytes == null || shapeBytes.length() == 0) && indexedShapeId == null) {
+ if (shape == null && indexedShapeId == null) {
throw new IllegalArgumentException("either shapeBytes or indexedShapeId and indexedShapeType are required");
}
if (indexedShapeId != null && indexedShapeType == null) {
throw new IllegalArgumentException("indexedShapeType is required if indexedShapeId is specified");
}
this.fieldName = fieldName;
- this.shapeBytes = shapeBytes;
+ this.shape = shape;
this.indexedShapeId = indexedShapeId;
this.indexedShapeType = indexedShapeType;
}
@@ -148,10 +127,10 @@ public class GeoShapeQueryBuilder extends AbstractQueryBuilder<GeoShapeQueryBuil
}
/**
- * @return the JSON bytes for the shape used in the Query
+ * @return the shape used in the Query
*/
- public BytesReference shapeBytes() {
- return shapeBytes;
+ public ShapeBuilder shape() {
+ return shape;
}
/**
@@ -258,15 +237,11 @@ public class GeoShapeQueryBuilder extends AbstractQueryBuilder<GeoShapeQueryBuil
@Override
protected Query doToQuery(QueryShardContext context) throws IOException {
- ShapeBuilder shape;
- if (shapeBytes == null) {
+ ShapeBuilder shapeToQuery = shape;
+ if (shapeToQuery == null) {
GetRequest getRequest = new GetRequest(indexedShapeIndex, indexedShapeType, indexedShapeId);
getRequest.copyContextAndHeadersFrom(SearchContext.current());
- shape = fetch(context.getClient(), getRequest, indexedShapePath);
- } else {
- XContentParser shapeParser = XContentHelper.createParser(shapeBytes);
- shapeParser.nextToken();
- shape = ShapeBuilder.parse(shapeParser);
+ shapeToQuery = fetch(context.getClient(), getRequest, indexedShapePath);
}
MappedFieldType fieldType = context.fieldMapper(fieldName);
if (fieldType == null) {
@@ -291,12 +266,12 @@ public class GeoShapeQueryBuilder extends AbstractQueryBuilder<GeoShapeQueryBuil
// in this case, execute disjoint as exists && !intersects
BooleanQuery.Builder bool = new BooleanQuery.Builder();
Query exists = ExistsQueryBuilder.newFilter(context, fieldName);
- Query intersects = strategy.makeQuery(getArgs(shape, ShapeRelation.INTERSECTS));
+ Query intersects = strategy.makeQuery(getArgs(shapeToQuery, ShapeRelation.INTERSECTS));
bool.add(exists, BooleanClause.Occur.MUST);
bool.add(intersects, BooleanClause.Occur.MUST_NOT);
query = new ConstantScoreQuery(bool.build());
} else {
- query = new ConstantScoreQuery(strategy.makeQuery(getArgs(shape, relation)));
+ query = new ConstantScoreQuery(strategy.makeQuery(getArgs(shapeToQuery, relation)));
}
return query;
}
@@ -378,11 +353,9 @@ public class GeoShapeQueryBuilder extends AbstractQueryBuilder<GeoShapeQueryBuil
builder.field(GeoShapeQueryParser.STRATEGY_FIELD.getPreferredName(), strategy.getStrategyName());
}
- if (shapeBytes != null) {
+ if (shape != null) {
builder.field(GeoShapeQueryParser.SHAPE_FIELD.getPreferredName());
- XContentParser parser = XContentFactory.xContent(XContentType.JSON).createParser(shapeBytes);
- parser.nextToken();
- builder.copyCurrentStructure(parser);
+ shape.toXContent(builder, params);
} else {
builder.startObject(GeoShapeQueryParser.INDEXED_SHAPE_FIELD.getPreferredName())
.field(GeoShapeQueryParser.SHAPE_ID_FIELD.getPreferredName(), indexedShapeId)
@@ -412,8 +385,7 @@ public class GeoShapeQueryBuilder extends AbstractQueryBuilder<GeoShapeQueryBuil
String fieldName = in.readString();
GeoShapeQueryBuilder builder;
if (in.readBoolean()) {
- BytesReference shapeBytes = in.readBytesReference();
- builder = new GeoShapeQueryBuilder(fieldName, shapeBytes);
+ builder = new GeoShapeQueryBuilder(fieldName, in.readShape());
} else {
String indexedShapeId = in.readOptionalString();
String indexedShapeType = in.readOptionalString();
@@ -437,10 +409,10 @@ public class GeoShapeQueryBuilder extends AbstractQueryBuilder<GeoShapeQueryBuil
@Override
protected void doWriteTo(StreamOutput out) throws IOException {
out.writeString(fieldName);
- boolean hasShapeBytes = shapeBytes != null;
- out.writeBoolean(hasShapeBytes);
- if (hasShapeBytes) {
- out.writeBytesReference(shapeBytes);
+ boolean hasShape = shape != null;
+ out.writeBoolean(hasShape);
+ if (hasShape) {
+ out.writeShape(shape);
} else {
out.writeOptionalString(indexedShapeId);
out.writeOptionalString(indexedShapeType);
@@ -464,14 +436,14 @@ public class GeoShapeQueryBuilder extends AbstractQueryBuilder<GeoShapeQueryBuil
&& Objects.equals(indexedShapePath, other.indexedShapePath)
&& Objects.equals(indexedShapeType, other.indexedShapeType)
&& Objects.equals(relation, other.relation)
- && Objects.equals(shapeBytes, other.shapeBytes)
+ && Objects.equals(shape, other.shape)
&& Objects.equals(strategy, other.strategy);
}
@Override
protected int doHashCode() {
return Objects.hash(fieldName, indexedShapeId, indexedShapeIndex,
- indexedShapePath, indexedShapeType, relation, shapeBytes, strategy);
+ indexedShapePath, indexedShapeType, relation, shape, strategy);
}
@Override
diff --git a/core/src/main/java/org/elasticsearch/index/query/GeoShapeQueryParser.java b/core/src/main/java/org/elasticsearch/index/query/GeoShapeQueryParser.java
index 12add77a88..c1d040f33b 100644
--- a/core/src/main/java/org/elasticsearch/index/query/GeoShapeQueryParser.java
+++ b/core/src/main/java/org/elasticsearch/index/query/GeoShapeQueryParser.java
@@ -22,11 +22,9 @@ package org.elasticsearch.index.query;
import org.elasticsearch.common.ParseField;
import org.elasticsearch.common.ParsingException;
import org.elasticsearch.common.Strings;
-import org.elasticsearch.common.bytes.BytesReference;
import org.elasticsearch.common.geo.ShapeRelation;
import org.elasticsearch.common.geo.SpatialStrategy;
-import org.elasticsearch.common.xcontent.XContentBuilder;
-import org.elasticsearch.common.xcontent.XContentFactory;
+import org.elasticsearch.common.geo.builders.ShapeBuilder;
import org.elasticsearch.common.xcontent.XContentParser;
import java.io.IOException;
@@ -54,7 +52,7 @@ public class GeoShapeQueryParser implements QueryParser<GeoShapeQueryBuilder> {
String fieldName = null;
ShapeRelation shapeRelation = null;
SpatialStrategy strategy = null;
- BytesReference shape = null;
+ ShapeBuilder shape = null;
String id = null;
String type = null;
@@ -79,8 +77,7 @@ public class GeoShapeQueryParser implements QueryParser<GeoShapeQueryBuilder> {
currentFieldName = parser.currentName();
token = parser.nextToken();
if (parseContext.parseFieldMatcher().match(currentFieldName, SHAPE_FIELD)) {
- XContentBuilder builder = XContentFactory.jsonBuilder().copyCurrentStructure(parser);
- shape = builder.bytes();
+ shape = ShapeBuilder.parse(parser);
} else if (parseContext.parseFieldMatcher().match(currentFieldName, STRATEGY_FIELD)) {
String strategyName = parser.text();
strategy = SpatialStrategy.fromString(strategyName);
diff --git a/core/src/main/java/org/elasticsearch/index/query/MissingQueryBuilder.java b/core/src/main/java/org/elasticsearch/index/query/MissingQueryBuilder.java
deleted file mode 100644
index 70d0bb9350..0000000000
--- a/core/src/main/java/org/elasticsearch/index/query/MissingQueryBuilder.java
+++ /dev/null
@@ -1,234 +0,0 @@
-/*
- * Licensed to Elasticsearch under one or more contributor
- * license agreements. See the NOTICE file distributed with
- * this work for additional information regarding copyright
- * ownership. Elasticsearch licenses this file to you under
- * the Apache License, Version 2.0 (the "License"); you may
- * not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing,
- * software distributed under the License is distributed on an
- * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
- * KIND, either express or implied. See the License for the
- * specific language governing permissions and limitations
- * under the License.
- */
-
-package org.elasticsearch.index.query;
-
-import org.apache.lucene.search.BooleanClause;
-import org.apache.lucene.search.BooleanQuery;
-import org.apache.lucene.search.ConstantScoreQuery;
-import org.apache.lucene.search.Query;
-import org.apache.lucene.search.TermRangeQuery;
-import org.elasticsearch.common.Strings;
-import org.elasticsearch.common.io.stream.StreamInput;
-import org.elasticsearch.common.io.stream.StreamOutput;
-import org.elasticsearch.common.lucene.search.Queries;
-import org.elasticsearch.common.xcontent.XContentBuilder;
-import org.elasticsearch.index.mapper.MappedFieldType;
-import org.elasticsearch.index.mapper.internal.FieldNamesFieldMapper;
-import org.elasticsearch.index.mapper.object.ObjectMapper;
-
-import java.io.IOException;
-import java.util.Collection;
-import java.util.Objects;
-
-/**
- * Constructs a filter that have only null values or no value in the original field.
- */
-public class MissingQueryBuilder extends AbstractQueryBuilder<MissingQueryBuilder> {
-
- public static final String NAME = "missing";
-
- public static final boolean DEFAULT_NULL_VALUE = false;
-
- public static final boolean DEFAULT_EXISTENCE_VALUE = true;
-
- private final String fieldPattern;
-
- private final boolean nullValue;
-
- private final boolean existence;
-
- static final MissingQueryBuilder PROTOTYPE = new MissingQueryBuilder("field", DEFAULT_NULL_VALUE, DEFAULT_EXISTENCE_VALUE);
-
- /**
- * Constructs a filter that returns documents with only null values or no value in the original field.
- * @param fieldPattern the field to query
- * @param nullValue should the missing filter automatically include fields with null value configured in the
- * mappings. Defaults to <tt>false</tt>.
- * @param existence should the missing filter include documents where the field doesn't exist in the docs.
- * Defaults to <tt>true</tt>.
- * @throws IllegalArgumentException when both <tt>existence</tt> and <tt>nullValue</tt> are set to false
- */
- public MissingQueryBuilder(String fieldPattern, boolean nullValue, boolean existence) {
- if (Strings.isEmpty(fieldPattern)) {
- throw new IllegalArgumentException("missing query must be provided with a [field]");
- }
- if (nullValue == false && existence == false) {
- throw new IllegalArgumentException("missing query must have either 'existence', or 'null_value', or both set to true");
- }
- this.fieldPattern = fieldPattern;
- this.nullValue = nullValue;
- this.existence = existence;
- }
-
- public MissingQueryBuilder(String fieldPattern) {
- this(fieldPattern, DEFAULT_NULL_VALUE, DEFAULT_EXISTENCE_VALUE);
- }
-
- public String fieldPattern() {
- return this.fieldPattern;
- }
-
- /**
- * Returns true if the missing filter will include documents where the field contains a null value, otherwise
- * these documents will not be included.
- */
- public boolean nullValue() {
- return this.nullValue;
- }
-
- /**
- * Returns true if the missing filter will include documents where the field has no values, otherwise
- * these documents will not be included.
- */
- public boolean existence() {
- return this.existence;
- }
-
- @Override
- protected void doXContent(XContentBuilder builder, Params params) throws IOException {
- builder.startObject(NAME);
- builder.field(MissingQueryParser.FIELD_FIELD.getPreferredName(), fieldPattern);
- builder.field(MissingQueryParser.NULL_VALUE_FIELD.getPreferredName(), nullValue);
- builder.field(MissingQueryParser.EXISTENCE_FIELD.getPreferredName(), existence);
- printBoostAndQueryName(builder);
- builder.endObject();
- }
-
- @Override
- public String getWriteableName() {
- return NAME;
- }
-
- @Override
- protected Query doToQuery(QueryShardContext context) throws IOException {
- return newFilter(context, fieldPattern, existence, nullValue);
- }
-
- public static Query newFilter(QueryShardContext context, String fieldPattern, boolean existence, boolean nullValue) {
- if (!existence && !nullValue) {
- throw new QueryShardException(context, "missing must have either existence, or null_value, or both set to true");
- }
-
- final FieldNamesFieldMapper.FieldNamesFieldType fieldNamesFieldType = (FieldNamesFieldMapper.FieldNamesFieldType) context.getMapperService().fullName(FieldNamesFieldMapper.NAME);
- if (fieldNamesFieldType == null) {
- // can only happen when no types exist, so no docs exist either
- return Queries.newMatchNoDocsQuery();
- }
-
- ObjectMapper objectMapper = context.getObjectMapper(fieldPattern);
- if (objectMapper != null) {
- // automatic make the object mapper pattern
- fieldPattern = fieldPattern + ".*";
- }
-
- Collection<String> fields = context.simpleMatchToIndexNames(fieldPattern);
- if (fields.isEmpty()) {
- if (existence) {
- // if we ask for existence of fields, and we found none, then we should match on all
- return Queries.newMatchAllQuery();
- }
- return null;
- }
-
- Query existenceFilter = null;
- Query nullFilter = null;
-
- if (existence) {
- BooleanQuery.Builder boolFilter = new BooleanQuery.Builder();
- for (String field : fields) {
- MappedFieldType fieldType = context.fieldMapper(field);
- Query filter = null;
- if (fieldNamesFieldType.isEnabled()) {
- final String f;
- if (fieldType != null) {
- f = fieldType.names().indexName();
- } else {
- f = field;
- }
- filter = fieldNamesFieldType.termQuery(f, context);
- }
- // if _field_names are not indexed, we need to go the slow way
- if (filter == null && fieldType != null) {
- filter = fieldType.rangeQuery(null, null, true, true);
- }
- if (filter == null) {
- filter = new TermRangeQuery(field, null, null, true, true);
- }
- boolFilter.add(filter, BooleanClause.Occur.SHOULD);
- }
-
- existenceFilter = boolFilter.build();
- existenceFilter = Queries.not(existenceFilter);;
- }
-
- if (nullValue) {
- for (String field : fields) {
- MappedFieldType fieldType = context.fieldMapper(field);
- if (fieldType != null) {
- nullFilter = fieldType.nullValueQuery();
- }
- }
- }
-
- Query filter;
- if (nullFilter != null) {
- if (existenceFilter != null) {
- filter = new BooleanQuery.Builder()
- .add(existenceFilter, BooleanClause.Occur.SHOULD)
- .add(nullFilter, BooleanClause.Occur.SHOULD)
- .build();
- } else {
- filter = nullFilter;
- }
- } else {
- filter = existenceFilter;
- }
-
- if (filter == null) {
- return null;
- }
-
- return new ConstantScoreQuery(filter);
- }
-
- @Override
- protected MissingQueryBuilder doReadFrom(StreamInput in) throws IOException {
- return new MissingQueryBuilder(in.readString(), in.readBoolean(), in.readBoolean());
- }
-
- @Override
- protected void doWriteTo(StreamOutput out) throws IOException {
- out.writeString(fieldPattern);
- out.writeBoolean(nullValue);
- out.writeBoolean(existence);
- }
-
- @Override
- protected int doHashCode() {
- return Objects.hash(fieldPattern, nullValue, existence);
- }
-
- @Override
- protected boolean doEquals(MissingQueryBuilder other) {
- return Objects.equals(fieldPattern, other.fieldPattern) &&
- Objects.equals(nullValue, other.nullValue) &&
- Objects.equals(existence, other.existence);
- }
-}
diff --git a/core/src/main/java/org/elasticsearch/index/query/MissingQueryParser.java b/core/src/main/java/org/elasticsearch/index/query/MissingQueryParser.java
deleted file mode 100644
index 467971b65c..0000000000
--- a/core/src/main/java/org/elasticsearch/index/query/MissingQueryParser.java
+++ /dev/null
@@ -1,88 +0,0 @@
-/*
- * Licensed to Elasticsearch under one or more contributor
- * license agreements. See the NOTICE file distributed with
- * this work for additional information regarding copyright
- * ownership. Elasticsearch licenses this file to you under
- * the Apache License, Version 2.0 (the "License"); you may
- * not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing,
- * software distributed under the License is distributed on an
- * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
- * KIND, either express or implied. See the License for the
- * specific language governing permissions and limitations
- * under the License.
- */
-
-package org.elasticsearch.index.query;
-
-import org.elasticsearch.common.ParseField;
-import org.elasticsearch.common.ParsingException;
-import org.elasticsearch.common.xcontent.XContentParser;
-
-import java.io.IOException;
-
-/**
- * Parser for missing query
- */
-public class MissingQueryParser implements QueryParser<MissingQueryBuilder> {
-
- public static final ParseField FIELD_FIELD = new ParseField("field");
- public static final ParseField NULL_VALUE_FIELD = new ParseField("null_value");
- public static final ParseField EXISTENCE_FIELD = new ParseField("existence");
-
- @Override
- public String[] names() {
- return new String[]{MissingQueryBuilder.NAME};
- }
-
- @Override
- public MissingQueryBuilder fromXContent(QueryParseContext parseContext) throws IOException {
- XContentParser parser = parseContext.parser();
-
- String fieldPattern = null;
- String queryName = null;
- float boost = AbstractQueryBuilder.DEFAULT_BOOST;
- boolean nullValue = MissingQueryBuilder.DEFAULT_NULL_VALUE;
- boolean existence = MissingQueryBuilder.DEFAULT_EXISTENCE_VALUE;
-
- XContentParser.Token token;
- String currentFieldName = null;
- while ((token = parser.nextToken()) != XContentParser.Token.END_OBJECT) {
- if (token == XContentParser.Token.FIELD_NAME) {
- currentFieldName = parser.currentName();
- } else if (token.isValue()) {
- if (parseContext.parseFieldMatcher().match(currentFieldName, FIELD_FIELD)) {
- fieldPattern = parser.text();
- } else if (parseContext.parseFieldMatcher().match(currentFieldName, NULL_VALUE_FIELD)) {
- nullValue = parser.booleanValue();
- } else if (parseContext.parseFieldMatcher().match(currentFieldName, EXISTENCE_FIELD)) {
- existence = parser.booleanValue();
- } else if (parseContext.parseFieldMatcher().match(currentFieldName, AbstractQueryBuilder.NAME_FIELD)) {
- queryName = parser.text();
- } else if (parseContext.parseFieldMatcher().match(currentFieldName, AbstractQueryBuilder.BOOST_FIELD)) {
- boost = parser.floatValue();
- } else {
- throw new ParsingException(parser.getTokenLocation(), "[" + MissingQueryBuilder.NAME + "] query does not support [" + currentFieldName + "]");
- }
- } else {
- throw new ParsingException(parser.getTokenLocation(), "[" + MissingQueryBuilder.NAME + "] unknown token [" + token + "] after [" + currentFieldName + "]");
- }
- }
-
- if (fieldPattern == null) {
- throw new ParsingException(parser.getTokenLocation(), "missing must be provided with a [field]");
- }
- return new MissingQueryBuilder(fieldPattern, nullValue, existence)
- .boost(boost)
- .queryName(queryName);
- }
-
- @Override
- public MissingQueryBuilder getBuilderPrototype() {
- return MissingQueryBuilder.PROTOTYPE;
- }
-}
diff --git a/core/src/main/java/org/elasticsearch/index/query/QueryBuilders.java b/core/src/main/java/org/elasticsearch/index/query/QueryBuilders.java
index 45f97d68c3..3fb0967920 100644
--- a/core/src/main/java/org/elasticsearch/index/query/QueryBuilders.java
+++ b/core/src/main/java/org/elasticsearch/index/query/QueryBuilders.java
@@ -810,27 +810,6 @@ public abstract class QueryBuilders {
return new ExistsQueryBuilder(name);
}
- /**
- * A filter to filter only documents where a field does not exists in them.
- * @param name the field to query
- */
- public static MissingQueryBuilder missingQuery(String name) {
- return missingQuery(name, MissingQueryBuilder.DEFAULT_NULL_VALUE, MissingQueryBuilder.DEFAULT_EXISTENCE_VALUE);
- }
-
- /**
- * A filter to filter only documents where a field does not exists in them.
- * @param name the field to query
- * @param nullValue should the missing filter automatically include fields with null value configured in the
- * mappings. Defaults to <tt>false</tt>.
- * @param existence should the missing filter include documents where the field doesn't exist in the docs.
- * Defaults to <tt>true</tt>.
- * @throws IllegalArgumentException when both <tt>existence</tt> and <tt>nullValue</tt> are set to false
- */
- public static MissingQueryBuilder missingQuery(String name, boolean nullValue, boolean existence) {
- return new MissingQueryBuilder(name, nullValue, existence);
- }
-
private QueryBuilders() {
}
diff --git a/core/src/main/java/org/elasticsearch/index/query/QueryShardContext.java b/core/src/main/java/org/elasticsearch/index/query/QueryShardContext.java
index 65dfb559e3..faf482ead9 100644
--- a/core/src/main/java/org/elasticsearch/index/query/QueryShardContext.java
+++ b/core/src/main/java/org/elasticsearch/index/query/QueryShardContext.java
@@ -63,6 +63,7 @@ import org.elasticsearch.search.lookup.SearchLookup;
import java.io.IOException;
import java.util.Arrays;
import java.util.Collection;
+import java.util.Collections;
import java.util.HashMap;
import java.util.Map;
@@ -364,7 +365,7 @@ public class QueryShardContext {
* Executes the given template, and returns the response.
*/
public BytesReference executeQueryTemplate(Template template, SearchContext searchContext) {
- ExecutableScript executable = getScriptService().executable(template, ScriptContext.Standard.SEARCH, searchContext);
+ ExecutableScript executable = getScriptService().executable(template, ScriptContext.Standard.SEARCH, searchContext, Collections.emptyMap());
return (BytesReference) executable.run();
}
diff --git a/core/src/main/java/org/elasticsearch/index/query/ScriptQueryBuilder.java b/core/src/main/java/org/elasticsearch/index/query/ScriptQueryBuilder.java
index f69ac8c054..6f14f15d3f 100644
--- a/core/src/main/java/org/elasticsearch/index/query/ScriptQueryBuilder.java
+++ b/core/src/main/java/org/elasticsearch/index/query/ScriptQueryBuilder.java
@@ -33,6 +33,7 @@ import org.elasticsearch.script.Script.ScriptField;
import org.elasticsearch.search.lookup.SearchLookup;
import java.io.IOException;
+import java.util.Collections;
import java.util.Objects;
public class ScriptQueryBuilder extends AbstractQueryBuilder<ScriptQueryBuilder> {
@@ -80,7 +81,7 @@ public class ScriptQueryBuilder extends AbstractQueryBuilder<ScriptQueryBuilder>
public ScriptQuery(Script script, ScriptService scriptService, SearchLookup searchLookup) {
this.script = script;
- this.searchScript = scriptService.search(searchLookup, script, ScriptContext.Standard.SEARCH);
+ this.searchScript = scriptService.search(searchLookup, script, ScriptContext.Standard.SEARCH, Collections.emptyMap());
}
@Override
@@ -161,4 +162,4 @@ public class ScriptQueryBuilder extends AbstractQueryBuilder<ScriptQueryBuilder>
protected boolean doEquals(ScriptQueryBuilder other) {
return Objects.equals(script, other.script);
}
-} \ No newline at end of file
+}
diff --git a/core/src/main/java/org/elasticsearch/index/query/functionscore/ScoreFunctionParserMapper.java b/core/src/main/java/org/elasticsearch/index/query/functionscore/ScoreFunctionParserMapper.java
index c528c0007f..e7ce9b90e2 100644
--- a/core/src/main/java/org/elasticsearch/index/query/functionscore/ScoreFunctionParserMapper.java
+++ b/core/src/main/java/org/elasticsearch/index/query/functionscore/ScoreFunctionParserMapper.java
@@ -19,13 +19,11 @@
package org.elasticsearch.index.query.functionscore;
-import java.util.Map;
-
import org.elasticsearch.common.ParsingException;
import org.elasticsearch.common.inject.Inject;
+import org.elasticsearch.common.io.stream.NamedWriteable;
import org.elasticsearch.common.io.stream.NamedWriteableRegistry;
import org.elasticsearch.common.xcontent.XContentLocation;
-import org.elasticsearch.index.query.QueryParseContext;
import org.elasticsearch.index.query.functionscore.exp.ExponentialDecayFunctionParser;
import org.elasticsearch.index.query.functionscore.fieldvaluefactor.FieldValueFactorFunctionParser;
import org.elasticsearch.index.query.functionscore.gauss.GaussDecayFunctionParser;
@@ -74,11 +72,12 @@ public class ScoreFunctionParserMapper {
return functionParsers.get(parserName);
}
- private static void addParser(ScoreFunctionParser<?> scoreFunctionParser, Map<String, ScoreFunctionParser<?>> map, NamedWriteableRegistry namedWriteableRegistry) {
+ private static void addParser(ScoreFunctionParser<? extends ScoreFunctionBuilder> scoreFunctionParser, Map<String, ScoreFunctionParser<?>> map, NamedWriteableRegistry namedWriteableRegistry) {
for (String name : scoreFunctionParser.getNames()) {
map.put(name, scoreFunctionParser);
}
- namedWriteableRegistry.registerPrototype(ScoreFunctionBuilder.class, scoreFunctionParser.getBuilderPrototype());
+ @SuppressWarnings("unchecked") NamedWriteable<? extends ScoreFunctionBuilder> sfb = scoreFunctionParser.getBuilderPrototype();
+ namedWriteableRegistry.registerPrototype(ScoreFunctionBuilder.class, sfb);
}
}
diff --git a/core/src/main/java/org/elasticsearch/index/query/functionscore/script/ScriptScoreFunctionBuilder.java b/core/src/main/java/org/elasticsearch/index/query/functionscore/script/ScriptScoreFunctionBuilder.java
index 9230846631..5fcd70b65d 100644
--- a/core/src/main/java/org/elasticsearch/index/query/functionscore/script/ScriptScoreFunctionBuilder.java
+++ b/core/src/main/java/org/elasticsearch/index/query/functionscore/script/ScriptScoreFunctionBuilder.java
@@ -33,6 +33,7 @@ import org.elasticsearch.script.ScriptContext;
import org.elasticsearch.script.SearchScript;
import java.io.IOException;
+import java.util.Collections;
import java.util.Objects;
/**
@@ -89,10 +90,10 @@ public class ScriptScoreFunctionBuilder extends ScoreFunctionBuilder<ScriptScore
@Override
protected ScoreFunction doToFunction(QueryShardContext context) {
try {
- SearchScript searchScript = context.getScriptService().search(context.lookup(), script, ScriptContext.Standard.SEARCH);
+ SearchScript searchScript = context.getScriptService().search(context.lookup(), script, ScriptContext.Standard.SEARCH, Collections.emptyMap());
return new ScriptScoreFunction(script, searchScript);
} catch (Exception e) {
throw new QueryShardException(context, "script_score: the script could not be loaded", e);
}
}
-} \ No newline at end of file
+}
diff --git a/core/src/main/java/org/elasticsearch/index/shard/IndexShard.java b/core/src/main/java/org/elasticsearch/index/shard/IndexShard.java
index c0bf924467..a67ca309bc 100644
--- a/core/src/main/java/org/elasticsearch/index/shard/IndexShard.java
+++ b/core/src/main/java/org/elasticsearch/index/shard/IndexShard.java
@@ -19,7 +19,11 @@
package org.elasticsearch.index.shard;
-import org.apache.lucene.index.*;
+import org.apache.lucene.index.CheckIndex;
+import org.apache.lucene.index.IndexCommit;
+import org.apache.lucene.index.KeepOnlyLastCommitDeletionPolicy;
+import org.apache.lucene.index.SnapshotDeletionPolicy;
+import org.apache.lucene.index.Term;
import org.apache.lucene.search.QueryCachingPolicy;
import org.apache.lucene.search.UsageTrackingQueryCachingPolicy;
import org.apache.lucene.store.AlreadyClosedException;
@@ -61,7 +65,16 @@ import org.elasticsearch.index.cache.bitset.ShardBitsetFilterCache;
import org.elasticsearch.index.cache.query.QueryCacheStats;
import org.elasticsearch.index.cache.request.ShardRequestCache;
import org.elasticsearch.index.codec.CodecService;
-import org.elasticsearch.index.engine.*;
+import org.elasticsearch.index.engine.CommitStats;
+import org.elasticsearch.index.engine.Engine;
+import org.elasticsearch.index.engine.EngineClosedException;
+import org.elasticsearch.index.engine.EngineConfig;
+import org.elasticsearch.index.engine.EngineException;
+import org.elasticsearch.index.engine.EngineFactory;
+import org.elasticsearch.index.engine.InternalEngineFactory;
+import org.elasticsearch.index.engine.RefreshFailedEngineException;
+import org.elasticsearch.index.engine.Segment;
+import org.elasticsearch.index.engine.SegmentsStats;
import org.elasticsearch.index.fielddata.FieldDataStats;
import org.elasticsearch.index.fielddata.IndexFieldDataService;
import org.elasticsearch.index.fielddata.ShardFieldData;
@@ -70,7 +83,12 @@ import org.elasticsearch.index.get.GetStats;
import org.elasticsearch.index.get.ShardGetService;
import org.elasticsearch.index.indexing.IndexingStats;
import org.elasticsearch.index.indexing.ShardIndexingService;
-import org.elasticsearch.index.mapper.*;
+import org.elasticsearch.index.mapper.DocumentMapper;
+import org.elasticsearch.index.mapper.DocumentMapperForType;
+import org.elasticsearch.index.mapper.MapperService;
+import org.elasticsearch.index.mapper.ParsedDocument;
+import org.elasticsearch.index.mapper.SourceToParse;
+import org.elasticsearch.index.mapper.Uid;
import org.elasticsearch.index.merge.MergeStats;
import org.elasticsearch.index.percolator.PercolateStats;
import org.elasticsearch.index.percolator.PercolatorQueriesRegistry;
@@ -108,7 +126,12 @@ import java.io.IOException;
import java.io.PrintStream;
import java.nio.channels.ClosedByInterruptException;
import java.nio.charset.StandardCharsets;
-import java.util.*;
+import java.util.Arrays;
+import java.util.EnumSet;
+import java.util.List;
+import java.util.Locale;
+import java.util.Map;
+import java.util.Objects;
import java.util.concurrent.CopyOnWriteArrayList;
import java.util.concurrent.ScheduledFuture;
import java.util.concurrent.TimeUnit;
@@ -151,7 +174,6 @@ public class IndexShard extends AbstractIndexShardComponent {
private TimeValue refreshInterval;
private volatile ScheduledFuture<?> refreshScheduledFuture;
- private volatile ScheduledFuture<?> mergeScheduleFuture;
protected volatile ShardRouting shardRouting;
protected volatile IndexShardState state;
protected final AtomicReference<Engine> currentEngineReference = new AtomicReference<>();
@@ -766,8 +788,6 @@ public class IndexShard extends AbstractIndexShardComponent {
if (state != IndexShardState.CLOSED) {
FutureUtils.cancel(refreshScheduledFuture);
refreshScheduledFuture = null;
- FutureUtils.cancel(mergeScheduleFuture);
- mergeScheduleFuture = null;
}
changeState(IndexShardState.CLOSED, reason);
indexShardOperationCounter.decRef();
@@ -1099,7 +1119,8 @@ public class IndexShard extends AbstractIndexShardComponent {
// we are the first primary, recover from the gateway
// if its post api allocation, the index should exists
assert shardRouting.primary() : "recover from store only makes sense if the shard is a primary shard";
- final boolean shouldExist = shardRouting.allocatedPostIndexCreate();
+ boolean shouldExist = shardRouting.allocatedPostIndexCreate(idxSettings.getIndexMetaData());
+
StoreRecovery storeRecovery = new StoreRecovery(shardId, logger);
return storeRecovery.recoverFromStore(this, shouldExist, localNode);
}
diff --git a/core/src/main/java/org/elasticsearch/index/shard/TranslogRecoveryPerformer.java b/core/src/main/java/org/elasticsearch/index/shard/TranslogRecoveryPerformer.java
index ac46f6725d..88e55600bc 100644
--- a/core/src/main/java/org/elasticsearch/index/shard/TranslogRecoveryPerformer.java
+++ b/core/src/main/java/org/elasticsearch/index/shard/TranslogRecoveryPerformer.java
@@ -110,7 +110,7 @@ public class TranslogRecoveryPerformer {
if (currentUpdate == null) {
recoveredTypes.put(type, update);
} else {
- MapperUtils.merge(currentUpdate, update);
+ currentUpdate = currentUpdate.merge(update, false);
}
}
diff --git a/core/src/main/java/org/elasticsearch/index/store/IndexStoreConfig.java b/core/src/main/java/org/elasticsearch/index/store/IndexStoreConfig.java
index 1bd023abdb..ed56187673 100644
--- a/core/src/main/java/org/elasticsearch/index/store/IndexStoreConfig.java
+++ b/core/src/main/java/org/elasticsearch/index/store/IndexStoreConfig.java
@@ -21,37 +21,36 @@ package org.elasticsearch.index.store;
import org.apache.lucene.store.StoreRateLimiting;
import org.elasticsearch.common.logging.ESLogger;
import org.elasticsearch.common.logging.Loggers;
+import org.elasticsearch.common.settings.Setting;
import org.elasticsearch.common.settings.Settings;
-import org.elasticsearch.common.unit.ByteSizeUnit;
import org.elasticsearch.common.unit.ByteSizeValue;
-import org.elasticsearch.node.settings.NodeSettingsService;
/**
* IndexStoreConfig encapsulates node / cluster level configuration for index level {@link IndexStore} instances.
* For instance it maintains the node level rate limiter configuration: updates to the cluster that disable or enable
- * {@value #INDICES_STORE_THROTTLE_TYPE} or {@value #INDICES_STORE_THROTTLE_MAX_BYTES_PER_SEC} are reflected immediately
+ * <tt>indices.store.throttle.type</tt> or <tt>indices.store.throttle.max_bytes_per_sec</tt> are reflected immediately
* on all referencing {@link IndexStore} instances
*/
-public class IndexStoreConfig implements NodeSettingsService.Listener {
+public class IndexStoreConfig{
/**
* Configures the node / cluster level throttle type. See {@link StoreRateLimiting.Type}.
*/
- public static final String INDICES_STORE_THROTTLE_TYPE = "indices.store.throttle.type";
+ public static final Setting<StoreRateLimiting.Type> INDICES_STORE_THROTTLE_TYPE_SETTING = new Setting<>("indices.store.throttle.type", StoreRateLimiting.Type.NONE.name(),StoreRateLimiting.Type::fromString, true, Setting.Scope.CLUSTER);
/**
* Configures the node / cluster level throttle intensity. The default is <tt>10240 MB</tt>
*/
- public static final String INDICES_STORE_THROTTLE_MAX_BYTES_PER_SEC = "indices.store.throttle.max_bytes_per_sec";
- private volatile String rateLimitingType;
+ public static final Setting<ByteSizeValue> INDICES_STORE_THROTTLE_MAX_BYTES_PER_SEC_SETTING = Setting.byteSizeSetting("indices.store.throttle.max_bytes_per_sec", new ByteSizeValue(0), true, Setting.Scope.CLUSTER);
+ private volatile StoreRateLimiting.Type rateLimitingType;
private volatile ByteSizeValue rateLimitingThrottle;
private final StoreRateLimiting rateLimiting = new StoreRateLimiting();
private final ESLogger logger;
public IndexStoreConfig(Settings settings) {
logger = Loggers.getLogger(IndexStoreConfig.class, settings);
// we don't limit by default (we default to CMS's auto throttle instead):
- this.rateLimitingType = settings.get("indices.store.throttle.type", StoreRateLimiting.Type.NONE.name());
+ this.rateLimitingType = INDICES_STORE_THROTTLE_TYPE_SETTING.get(settings);
rateLimiting.setType(rateLimitingType);
- this.rateLimitingThrottle = settings.getAsBytesSize("indices.store.throttle.max_bytes_per_sec", new ByteSizeValue(0));
+ this.rateLimitingThrottle = INDICES_STORE_THROTTLE_MAX_BYTES_PER_SEC_SETTING.get(settings);
rateLimiting.setMaxRate(rateLimitingThrottle);
logger.debug("using indices.store.throttle.type [{}], with index.store.throttle.max_bytes_per_sec [{}]", rateLimitingType, rateLimitingThrottle);
}
@@ -63,22 +62,12 @@ public class IndexStoreConfig implements NodeSettingsService.Listener {
return rateLimiting;
}
- @Override
- public void onRefreshSettings(Settings settings) {
- String rateLimitingType = settings.get(INDICES_STORE_THROTTLE_TYPE, this.rateLimitingType);
- // try and parse the type
- StoreRateLimiting.Type.fromString(rateLimitingType);
- if (!rateLimitingType.equals(this.rateLimitingType)) {
- logger.info("updating indices.store.throttle.type from [{}] to [{}]", this.rateLimitingType, rateLimitingType);
- this.rateLimitingType = rateLimitingType;
- this.rateLimiting.setType(rateLimitingType);
- }
+ public void setRateLimitingType(StoreRateLimiting.Type rateLimitingType) {
+ this.rateLimitingType = rateLimitingType;
+ rateLimiting.setType(rateLimitingType);
+ }
- ByteSizeValue rateLimitingThrottle = settings.getAsBytesSize(INDICES_STORE_THROTTLE_MAX_BYTES_PER_SEC, this.rateLimitingThrottle);
- if (!rateLimitingThrottle.equals(this.rateLimitingThrottle)) {
- logger.info("updating indices.store.throttle.max_bytes_per_sec from [{}] to [{}], note, type is [{}]", this.rateLimitingThrottle, rateLimitingThrottle, this.rateLimitingType);
- this.rateLimitingThrottle = rateLimitingThrottle;
- this.rateLimiting.setMaxRate(rateLimitingThrottle);
- }
+ public void setRateLimitingThrottle(ByteSizeValue rateLimitingThrottle) {
+ this.rateLimitingThrottle = rateLimitingThrottle;
}
}
diff --git a/core/src/main/java/org/elasticsearch/index/translog/BufferingTranslogWriter.java b/core/src/main/java/org/elasticsearch/index/translog/BufferingTranslogWriter.java
index 6026468973..a2eb0bff64 100644
--- a/core/src/main/java/org/elasticsearch/index/translog/BufferingTranslogWriter.java
+++ b/core/src/main/java/org/elasticsearch/index/translog/BufferingTranslogWriter.java
@@ -48,22 +48,27 @@ public final class BufferingTranslogWriter extends TranslogWriter {
public Translog.Location add(BytesReference data) throws IOException {
try (ReleasableLock lock = writeLock.acquire()) {
ensureOpen();
- operationCounter++;
final long offset = totalOffset;
if (data.length() >= buffer.length) {
flush();
// we use the channel to write, since on windows, writing to the RAF might not be reflected
// when reading through the channel
- data.writeTo(channel);
+ try {
+ data.writeTo(channel);
+ } catch (Throwable ex) {
+ closeWithTragicEvent(ex);
+ throw ex;
+ }
writtenOffset += data.length();
totalOffset += data.length();
- return new Translog.Location(generation, offset, data.length());
- }
- if (data.length() > buffer.length - bufferCount) {
- flush();
+ } else {
+ if (data.length() > buffer.length - bufferCount) {
+ flush();
+ }
+ data.writeTo(bufferOs);
+ totalOffset += data.length();
}
- data.writeTo(bufferOs);
- totalOffset += data.length();
+ operationCounter++;
return new Translog.Location(generation, offset, data.length());
}
}
@@ -71,10 +76,17 @@ public final class BufferingTranslogWriter extends TranslogWriter {
protected final void flush() throws IOException {
assert writeLock.isHeldByCurrentThread();
if (bufferCount > 0) {
+ ensureOpen();
// we use the channel to write, since on windows, writing to the RAF might not be reflected
// when reading through the channel
- Channels.writeToChannel(buffer, 0, bufferCount, channel);
- writtenOffset += bufferCount;
+ final int bufferSize = bufferCount;
+ try {
+ Channels.writeToChannel(buffer, 0, bufferSize, channel);
+ } catch (Throwable ex) {
+ closeWithTragicEvent(ex);
+ throw ex;
+ }
+ writtenOffset += bufferSize;
bufferCount = 0;
}
}
@@ -102,20 +114,28 @@ public final class BufferingTranslogWriter extends TranslogWriter {
}
@Override
- public void sync() throws IOException {
- if (!syncNeeded()) {
- return;
- }
- synchronized (this) {
+ public synchronized void sync() throws IOException {
+ if (syncNeeded()) {
+ ensureOpen(); // this call gives a better exception that the incRef if we are closed by a tragic event
channelReference.incRef();
try {
+ final long offsetToSync;
+ final int opsCounter;
try (ReleasableLock lock = writeLock.acquire()) {
flush();
- lastSyncedOffset = totalOffset;
+ offsetToSync = totalOffset;
+ opsCounter = operationCounter;
}
// we can do this outside of the write lock but we have to protect from
// concurrent syncs
- checkpoint(lastSyncedOffset, operationCounter, channelReference);
+ ensureOpen(); // just for kicks - the checkpoint happens or not either way
+ try {
+ checkpoint(offsetToSync, opsCounter, channelReference);
+ } catch (Throwable ex) {
+ closeWithTragicEvent(ex);
+ throw ex;
+ }
+ lastSyncedOffset = offsetToSync;
} finally {
channelReference.decRef();
}
diff --git a/core/src/main/java/org/elasticsearch/index/translog/Translog.java b/core/src/main/java/org/elasticsearch/index/translog/Translog.java
index 35dd895bc2..fd5c64f96a 100644
--- a/core/src/main/java/org/elasticsearch/index/translog/Translog.java
+++ b/core/src/main/java/org/elasticsearch/index/translog/Translog.java
@@ -115,7 +115,7 @@ public class Translog extends AbstractIndexShardComponent implements IndexShardC
private final Path location;
private TranslogWriter current;
private volatile ImmutableTranslogReader currentCommittingTranslog;
- private long lastCommittedTranslogFileGeneration = -1; // -1 is safe as it will not cause an translog deletion.
+ private volatile long lastCommittedTranslogFileGeneration = -1; // -1 is safe as it will not cause an translog deletion.
private final AtomicBoolean closed = new AtomicBoolean();
private final TranslogConfig config;
private final String translogUUID;
@@ -158,7 +158,7 @@ public class Translog extends AbstractIndexShardComponent implements IndexShardC
try {
if (translogGeneration != null) {
- final Checkpoint checkpoint = Checkpoint.read(location.resolve(CHECKPOINT_FILE_NAME));
+ final Checkpoint checkpoint = readCheckpoint();
this.recoveredTranslogs = recoverFromFiles(translogGeneration, checkpoint);
if (recoveredTranslogs.isEmpty()) {
throw new IllegalStateException("at least one reader must be recovered");
@@ -279,7 +279,8 @@ public class Translog extends AbstractIndexShardComponent implements IndexShardC
}
}
- boolean isOpen() {
+ /** Returns {@code true} if this {@code Translog} is still open. */
+ public boolean isOpen() {
return closed.get() == false;
}
@@ -288,10 +289,14 @@ public class Translog extends AbstractIndexShardComponent implements IndexShardC
if (closed.compareAndSet(false, true)) {
try (ReleasableLock lock = writeLock.acquire()) {
try {
- IOUtils.close(current, currentCommittingTranslog);
+ current.sync();
} finally {
- IOUtils.close(recoveredTranslogs);
- recoveredTranslogs.clear();
+ try {
+ IOUtils.close(current, currentCommittingTranslog);
+ } finally {
+ IOUtils.close(recoveredTranslogs);
+ recoveredTranslogs.clear();
+ }
}
} finally {
FutureUtils.cancel(syncScheduler);
@@ -354,7 +359,7 @@ public class Translog extends AbstractIndexShardComponent implements IndexShardC
TranslogWriter createWriter(long fileGeneration) throws IOException {
TranslogWriter newFile;
try {
- newFile = TranslogWriter.create(config.getType(), shardId, translogUUID, fileGeneration, location.resolve(getFilename(fileGeneration)), new OnCloseRunnable(), config.getBufferSize());
+ newFile = TranslogWriter.create(config.getType(), shardId, translogUUID, fileGeneration, location.resolve(getFilename(fileGeneration)), new OnCloseRunnable(), config.getBufferSize(), getChannelFactory());
} catch (IOException e) {
throw new TranslogException(shardId, "failed to create new translog file", e);
}
@@ -393,7 +398,7 @@ public class Translog extends AbstractIndexShardComponent implements IndexShardC
* @see Index
* @see org.elasticsearch.index.translog.Translog.Delete
*/
- public Location add(Operation operation) throws TranslogException {
+ public Location add(Operation operation) throws IOException {
final ReleasableBytesStreamOutput out = new ReleasableBytesStreamOutput(bigArrays);
try {
final BufferedChecksumStreamOutput checksumStreamOutput = new BufferedChecksumStreamOutput(out);
@@ -415,9 +420,11 @@ public class Translog extends AbstractIndexShardComponent implements IndexShardC
assert current.assertBytesAtLocation(location, bytes);
return location;
}
- } catch (AlreadyClosedException ex) {
+ } catch (AlreadyClosedException | IOException ex) {
+ closeOnTragicEvent(ex);
throw ex;
} catch (Throwable e) {
+ closeOnTragicEvent(e);
throw new TranslogException(shardId, "Failed to write operation [" + operation + "]", e);
} finally {
Releasables.close(out.bytes());
@@ -429,6 +436,7 @@ public class Translog extends AbstractIndexShardComponent implements IndexShardC
* Snapshots are fixed in time and will not be updated with future operations.
*/
public Snapshot newSnapshot() {
+ ensureOpen();
try (ReleasableLock lock = readLock.acquire()) {
ArrayList<TranslogReader> toOpen = new ArrayList<>();
toOpen.addAll(recoveredTranslogs);
@@ -493,6 +501,9 @@ public class Translog extends AbstractIndexShardComponent implements IndexShardC
if (closed.get() == false) {
current.sync();
}
+ } catch (Throwable ex) {
+ closeOnTragicEvent(ex);
+ throw ex;
}
}
@@ -520,12 +531,26 @@ public class Translog extends AbstractIndexShardComponent implements IndexShardC
public boolean ensureSynced(Location location) throws IOException {
try (ReleasableLock lock = readLock.acquire()) {
if (location.generation == current.generation) { // if we have a new one it's already synced
+ ensureOpen();
return current.syncUpTo(location.translogLocation + location.size);
}
+ } catch (Throwable ex) {
+ closeOnTragicEvent(ex);
+ throw ex;
}
return false;
}
+ private void closeOnTragicEvent(Throwable ex) {
+ if (current.getTragicException() != null) {
+ try {
+ close();
+ } catch (Exception inner) {
+ ex.addSuppressed(inner);
+ }
+ }
+ }
+
/**
* return stats
*/
@@ -548,31 +573,29 @@ public class Translog extends AbstractIndexShardComponent implements IndexShardC
private final class OnCloseRunnable implements Callback<ChannelReference> {
@Override
public void handle(ChannelReference channelReference) {
- try (ReleasableLock lock = writeLock.acquire()) {
- if (isReferencedGeneration(channelReference.getGeneration()) == false) {
- Path translogPath = channelReference.getPath();
- assert channelReference.getPath().getParent().equals(location) : "translog files must be in the location folder: " + location + " but was: " + translogPath;
- // if the given translogPath is not the current we can safely delete the file since all references are released
- logger.trace("delete translog file - not referenced and not current anymore {}", translogPath);
- IOUtils.deleteFilesIgnoringExceptions(translogPath);
- IOUtils.deleteFilesIgnoringExceptions(translogPath.resolveSibling(getCommitCheckpointFileName(channelReference.getGeneration())));
+ if (isReferencedGeneration(channelReference.getGeneration()) == false) {
+ Path translogPath = channelReference.getPath();
+ assert channelReference.getPath().getParent().equals(location) : "translog files must be in the location folder: " + location + " but was: " + translogPath;
+ // if the given translogPath is not the current we can safely delete the file since all references are released
+ logger.trace("delete translog file - not referenced and not current anymore {}", translogPath);
+ IOUtils.deleteFilesIgnoringExceptions(translogPath);
+ IOUtils.deleteFilesIgnoringExceptions(translogPath.resolveSibling(getCommitCheckpointFileName(channelReference.getGeneration())));
- }
- try (DirectoryStream<Path> stream = Files.newDirectoryStream(location)) {
- for (Path path : stream) {
- Matcher matcher = PARSE_STRICT_ID_PATTERN.matcher(path.getFileName().toString());
- if (matcher.matches()) {
- long generation = Long.parseLong(matcher.group(1));
- if (isReferencedGeneration(generation) == false) {
- logger.trace("delete translog file - not referenced and not current anymore {}", path);
- IOUtils.deleteFilesIgnoringExceptions(path);
- IOUtils.deleteFilesIgnoringExceptions(path.resolveSibling(getCommitCheckpointFileName(channelReference.getGeneration())));
- }
+ }
+ try (DirectoryStream<Path> stream = Files.newDirectoryStream(location)) {
+ for (Path path : stream) {
+ Matcher matcher = PARSE_STRICT_ID_PATTERN.matcher(path.getFileName().toString());
+ if (matcher.matches()) {
+ long generation = Long.parseLong(matcher.group(1));
+ if (isReferencedGeneration(generation) == false) {
+ logger.trace("delete translog file - not referenced and not current anymore {}", path);
+ IOUtils.deleteFilesIgnoringExceptions(path);
+ IOUtils.deleteFilesIgnoringExceptions(path.resolveSibling(getCommitCheckpointFileName(channelReference.getGeneration())));
}
}
- } catch (IOException e) {
- logger.warn("failed to delete unreferenced translog files", e);
}
+ } catch (IOException e) {
+ logger.warn("failed to delete unreferenced translog files", e);
}
}
}
@@ -1294,6 +1317,7 @@ public class Translog extends AbstractIndexShardComponent implements IndexShardC
throw new IllegalStateException("already committing a translog with generation: " + currentCommittingTranslog.getGeneration());
}
final TranslogWriter oldCurrent = current;
+ oldCurrent.ensureOpen();
oldCurrent.sync();
currentCommittingTranslog = current.immutableReader();
Path checkpoint = location.resolve(CHECKPOINT_FILE_NAME);
@@ -1389,7 +1413,7 @@ public class Translog extends AbstractIndexShardComponent implements IndexShardC
private void ensureOpen() {
if (closed.get()) {
- throw new AlreadyClosedException("translog is already closed");
+ throw new AlreadyClosedException("translog is already closed", current.getTragicException());
}
}
@@ -1400,4 +1424,20 @@ public class Translog extends AbstractIndexShardComponent implements IndexShardC
return outstandingViews.size();
}
+ TranslogWriter.ChannelFactory getChannelFactory() {
+ return TranslogWriter.ChannelFactory.DEFAULT;
+ }
+
+ /** If this {@code Translog} was closed as a side-effect of a tragic exception,
+ * e.g. disk full while flushing a new segment, this returns the root cause exception.
+ * Otherwise (no tragic exception has occurred) it returns null. */
+ public Throwable getTragicException() {
+ return current.getTragicException();
+ }
+
+ /** Reads and returns the current checkpoint */
+ final Checkpoint readCheckpoint() throws IOException {
+ return Checkpoint.read(location.resolve(CHECKPOINT_FILE_NAME));
+ }
+
}
diff --git a/core/src/main/java/org/elasticsearch/index/translog/TranslogReader.java b/core/src/main/java/org/elasticsearch/index/translog/TranslogReader.java
index 590bc31905..d7077fd90a 100644
--- a/core/src/main/java/org/elasticsearch/index/translog/TranslogReader.java
+++ b/core/src/main/java/org/elasticsearch/index/translog/TranslogReader.java
@@ -140,16 +140,16 @@ public abstract class TranslogReader implements Closeable, Comparable<TranslogRe
@Override
public void close() throws IOException {
if (closed.compareAndSet(false, true)) {
- doClose();
+ channelReference.decRef();
}
}
- protected void doClose() throws IOException {
- channelReference.decRef();
+ protected final boolean isClosed() {
+ return closed.get();
}
protected void ensureOpen() {
- if (closed.get()) {
+ if (isClosed()) {
throw new AlreadyClosedException("translog [" + getGeneration() + "] is already closed");
}
}
diff --git a/core/src/main/java/org/elasticsearch/index/translog/TranslogWriter.java b/core/src/main/java/org/elasticsearch/index/translog/TranslogWriter.java
index 045550cb62..9870bddf87 100644
--- a/core/src/main/java/org/elasticsearch/index/translog/TranslogWriter.java
+++ b/core/src/main/java/org/elasticsearch/index/translog/TranslogWriter.java
@@ -20,6 +20,7 @@
package org.elasticsearch.index.translog;
import org.apache.lucene.codecs.CodecUtil;
+import org.apache.lucene.store.AlreadyClosedException;
import org.apache.lucene.store.OutputStreamDataOutput;
import org.apache.lucene.util.BytesRef;
import org.apache.lucene.util.IOUtils;
@@ -54,6 +55,9 @@ public class TranslogWriter extends TranslogReader {
protected volatile int operationCounter;
/* the offset in bytes written to the file */
protected volatile long writtenOffset;
+ /* if we hit an exception that we can't recover from we assign it to this var and ship it with every AlreadyClosedException we throw */
+ private volatile Throwable tragedy;
+
public TranslogWriter(ShardId shardId, long generation, ChannelReference channelReference) throws IOException {
super(generation, channelReference, channelReference.getChannel().position());
@@ -65,10 +69,10 @@ public class TranslogWriter extends TranslogReader {
this.lastSyncedOffset = channelReference.getChannel().position();;
}
- public static TranslogWriter create(Type type, ShardId shardId, String translogUUID, long fileGeneration, Path file, Callback<ChannelReference> onClose, int bufferSize) throws IOException {
+ public static TranslogWriter create(Type type, ShardId shardId, String translogUUID, long fileGeneration, Path file, Callback<ChannelReference> onClose, int bufferSize, ChannelFactory channelFactory) throws IOException {
final BytesRef ref = new BytesRef(translogUUID);
final int headerLength = CodecUtil.headerLength(TRANSLOG_CODEC) + ref.length + RamUsageEstimator.NUM_BYTES_INT;
- final FileChannel channel = FileChannel.open(file, StandardOpenOption.WRITE, StandardOpenOption.READ, StandardOpenOption.CREATE_NEW);
+ final FileChannel channel = channelFactory.open(file);
try {
// This OutputStreamDataOutput is intentionally not closed because
// closing it will close the FileChannel
@@ -90,6 +94,12 @@ public class TranslogWriter extends TranslogReader {
throw throwable;
}
}
+ /** If this {@code TranslogWriter} was closed as a side-effect of a tragic exception,
+ * e.g. disk full while flushing a new segment, this returns the root cause exception.
+ * Otherwise (no tragic exception has occurred) it returns null. */
+ public Throwable getTragicException() {
+ return tragedy;
+ }
public enum Type {
@@ -118,6 +128,16 @@ public class TranslogWriter extends TranslogReader {
}
}
+ protected final void closeWithTragicEvent(Throwable throwable) throws IOException {
+ try (ReleasableLock lock = writeLock.acquire()) {
+ if (tragedy == null) {
+ tragedy = throwable;
+ } else {
+ tragedy.addSuppressed(throwable);
+ }
+ close();
+ }
+ }
/**
* add the given bytes to the translog and return the location they were written at
@@ -127,9 +147,14 @@ public class TranslogWriter extends TranslogReader {
try (ReleasableLock lock = writeLock.acquire()) {
ensureOpen();
position = writtenOffset;
- data.writeTo(channel);
+ try {
+ data.writeTo(channel);
+ } catch (Throwable e) {
+ closeWithTragicEvent(e);
+ throw e;
+ }
writtenOffset = writtenOffset + data.length();
- operationCounter = operationCounter + 1;
+ operationCounter++;;
}
return new Translog.Location(generation, position, data.length());
}
@@ -143,12 +168,13 @@ public class TranslogWriter extends TranslogReader {
/**
* write all buffered ops to disk and fsync file
*/
- public void sync() throws IOException {
+ public synchronized void sync() throws IOException { // synchronized to ensure only one sync happens a time
// check if we really need to sync here...
if (syncNeeded()) {
try (ReleasableLock lock = writeLock.acquire()) {
+ ensureOpen();
+ checkpoint(writtenOffset, operationCounter, channelReference);
lastSyncedOffset = writtenOffset;
- checkpoint(lastSyncedOffset, operationCounter, channelReference);
}
}
}
@@ -263,15 +289,6 @@ public class TranslogWriter extends TranslogReader {
}
@Override
- protected final void doClose() throws IOException {
- try (ReleasableLock lock = writeLock.acquire()) {
- sync();
- } finally {
- super.doClose();
- }
- }
-
- @Override
protected void readBytes(ByteBuffer buffer, long position) throws IOException {
try (ReleasableLock lock = readLock.acquire()) {
Channels.readFromFileChannelWithEofException(channel, position, buffer);
@@ -288,4 +305,20 @@ public class TranslogWriter extends TranslogReader {
Checkpoint checkpoint = new Checkpoint(syncPosition, numOperations, generation);
Checkpoint.write(checkpointFile, checkpoint, options);
}
+
+ static class ChannelFactory {
+
+ static final ChannelFactory DEFAULT = new ChannelFactory();
+
+ // only for testing until we have a disk-full FileSystemt
+ public FileChannel open(Path file) throws IOException {
+ return FileChannel.open(file, StandardOpenOption.WRITE, StandardOpenOption.READ, StandardOpenOption.CREATE_NEW);
+ }
+ }
+
+ protected final void ensureOpen() {
+ if (isClosed()) {
+ throw new AlreadyClosedException("translog [" + getGeneration() + "] is already closed", tragedy);
+ }
+ }
}
diff --git a/core/src/main/java/org/elasticsearch/indices/IndicesModule.java b/core/src/main/java/org/elasticsearch/indices/IndicesModule.java
index cdd7f05033..61210bb041 100644
--- a/core/src/main/java/org/elasticsearch/indices/IndicesModule.java
+++ b/core/src/main/java/org/elasticsearch/indices/IndicesModule.java
@@ -22,18 +22,86 @@ package org.elasticsearch.indices;
import org.elasticsearch.action.update.UpdateHelper;
import org.elasticsearch.cluster.metadata.MetaDataIndexUpgradeService;
import org.elasticsearch.common.geo.ShapesAvailability;
+import org.elasticsearch.common.geo.builders.ShapeBuilderRegistry;
import org.elasticsearch.common.inject.AbstractModule;
import org.elasticsearch.common.util.ExtensionPoint;
import org.elasticsearch.index.NodeServicesProvider;
import org.elasticsearch.index.mapper.Mapper;
import org.elasticsearch.index.mapper.MetadataFieldMapper;
-import org.elasticsearch.index.mapper.core.*;
+import org.elasticsearch.index.mapper.core.BinaryFieldMapper;
+import org.elasticsearch.index.mapper.core.BooleanFieldMapper;
+import org.elasticsearch.index.mapper.core.ByteFieldMapper;
+import org.elasticsearch.index.mapper.core.CompletionFieldMapper;
+import org.elasticsearch.index.mapper.core.DateFieldMapper;
+import org.elasticsearch.index.mapper.core.DoubleFieldMapper;
+import org.elasticsearch.index.mapper.core.FloatFieldMapper;
+import org.elasticsearch.index.mapper.core.IntegerFieldMapper;
+import org.elasticsearch.index.mapper.core.LongFieldMapper;
+import org.elasticsearch.index.mapper.core.ShortFieldMapper;
+import org.elasticsearch.index.mapper.core.StringFieldMapper;
+import org.elasticsearch.index.mapper.core.TokenCountFieldMapper;
+import org.elasticsearch.index.mapper.core.TypeParsers;
import org.elasticsearch.index.mapper.geo.GeoPointFieldMapper;
import org.elasticsearch.index.mapper.geo.GeoShapeFieldMapper;
-import org.elasticsearch.index.mapper.internal.*;
+import org.elasticsearch.index.mapper.internal.AllFieldMapper;
+import org.elasticsearch.index.mapper.internal.FieldNamesFieldMapper;
+import org.elasticsearch.index.mapper.internal.IdFieldMapper;
+import org.elasticsearch.index.mapper.internal.IndexFieldMapper;
+import org.elasticsearch.index.mapper.internal.ParentFieldMapper;
+import org.elasticsearch.index.mapper.internal.RoutingFieldMapper;
+import org.elasticsearch.index.mapper.internal.SourceFieldMapper;
+import org.elasticsearch.index.mapper.internal.TTLFieldMapper;
+import org.elasticsearch.index.mapper.internal.TimestampFieldMapper;
+import org.elasticsearch.index.mapper.internal.TypeFieldMapper;
+import org.elasticsearch.index.mapper.internal.UidFieldMapper;
+import org.elasticsearch.index.mapper.internal.VersionFieldMapper;
import org.elasticsearch.index.mapper.ip.IpFieldMapper;
import org.elasticsearch.index.mapper.object.ObjectMapper;
-import org.elasticsearch.index.query.*;
+import org.elasticsearch.index.query.BoolQueryParser;
+import org.elasticsearch.index.query.BoostingQueryParser;
+import org.elasticsearch.index.query.CommonTermsQueryParser;
+import org.elasticsearch.index.query.ConstantScoreQueryParser;
+import org.elasticsearch.index.query.DisMaxQueryParser;
+import org.elasticsearch.index.query.ExistsQueryParser;
+import org.elasticsearch.index.query.FieldMaskingSpanQueryParser;
+import org.elasticsearch.index.query.FuzzyQueryParser;
+import org.elasticsearch.index.query.GeoBoundingBoxQueryParser;
+import org.elasticsearch.index.query.GeoDistanceQueryParser;
+import org.elasticsearch.index.query.GeoDistanceRangeQueryParser;
+import org.elasticsearch.index.query.GeoPolygonQueryParser;
+import org.elasticsearch.index.query.GeoShapeQueryParser;
+import org.elasticsearch.index.query.GeohashCellQuery;
+import org.elasticsearch.index.query.HasChildQueryParser;
+import org.elasticsearch.index.query.HasParentQueryParser;
+import org.elasticsearch.index.query.IdsQueryParser;
+import org.elasticsearch.index.query.IndicesQueryParser;
+import org.elasticsearch.index.query.MatchAllQueryParser;
+import org.elasticsearch.index.query.MatchNoneQueryParser;
+import org.elasticsearch.index.query.MatchQueryParser;
+import org.elasticsearch.index.query.MoreLikeThisQueryParser;
+import org.elasticsearch.index.query.MultiMatchQueryParser;
+import org.elasticsearch.index.query.NestedQueryParser;
+import org.elasticsearch.index.query.PrefixQueryParser;
+import org.elasticsearch.index.query.QueryParser;
+import org.elasticsearch.index.query.QueryStringQueryParser;
+import org.elasticsearch.index.query.RangeQueryParser;
+import org.elasticsearch.index.query.RegexpQueryParser;
+import org.elasticsearch.index.query.ScriptQueryParser;
+import org.elasticsearch.index.query.SimpleQueryStringParser;
+import org.elasticsearch.index.query.SpanContainingQueryParser;
+import org.elasticsearch.index.query.SpanFirstQueryParser;
+import org.elasticsearch.index.query.SpanMultiTermQueryParser;
+import org.elasticsearch.index.query.SpanNearQueryParser;
+import org.elasticsearch.index.query.SpanNotQueryParser;
+import org.elasticsearch.index.query.SpanOrQueryParser;
+import org.elasticsearch.index.query.SpanTermQueryParser;
+import org.elasticsearch.index.query.SpanWithinQueryParser;
+import org.elasticsearch.index.query.TemplateQueryParser;
+import org.elasticsearch.index.query.TermQueryParser;
+import org.elasticsearch.index.query.TermsQueryParser;
+import org.elasticsearch.index.query.TypeQueryParser;
+import org.elasticsearch.index.query.WildcardQueryParser;
+import org.elasticsearch.index.query.WrapperQueryParser;
import org.elasticsearch.index.query.functionscore.FunctionScoreQueryParser;
import org.elasticsearch.index.termvectors.TermVectorsService;
import org.elasticsearch.indices.cache.query.IndicesQueryCache;
@@ -120,10 +188,9 @@ public class IndicesModule extends AbstractModule {
registerQueryParser(GeohashCellQuery.Parser.class);
registerQueryParser(GeoPolygonQueryParser.class);
registerQueryParser(ExistsQueryParser.class);
- registerQueryParser(MissingQueryParser.class);
registerQueryParser(MatchNoneQueryParser.class);
- if (ShapesAvailability.JTS_AVAILABLE) {
+ if (ShapesAvailability.JTS_AVAILABLE && ShapesAvailability.SPATIAL4J_AVAILABLE) {
registerQueryParser(GeoShapeQueryParser.class);
}
}
@@ -147,7 +214,7 @@ public class IndicesModule extends AbstractModule {
registerMapper(CompletionFieldMapper.CONTENT_TYPE, new CompletionFieldMapper.TypeParser());
registerMapper(GeoPointFieldMapper.CONTENT_TYPE, new GeoPointFieldMapper.TypeParser());
- if (ShapesAvailability.JTS_AVAILABLE) {
+ if (ShapesAvailability.JTS_AVAILABLE && ShapesAvailability.SPATIAL4J_AVAILABLE) {
registerMapper(GeoShapeFieldMapper.CONTENT_TYPE, new GeoShapeFieldMapper.TypeParser());
}
}
@@ -219,6 +286,7 @@ public class IndicesModule extends AbstractModule {
bind(IndicesFieldDataCacheListener.class).asEagerSingleton();
bind(TermVectorsService.class).asEagerSingleton();
bind(NodeServicesProvider.class).asEagerSingleton();
+ bind(ShapeBuilderRegistry.class).asEagerSingleton();
}
// public for testing
diff --git a/core/src/main/java/org/elasticsearch/indices/IndicesService.java b/core/src/main/java/org/elasticsearch/indices/IndicesService.java
index dead72aee8..d8c142f478 100644
--- a/core/src/main/java/org/elasticsearch/indices/IndicesService.java
+++ b/core/src/main/java/org/elasticsearch/indices/IndicesService.java
@@ -36,6 +36,7 @@ import org.elasticsearch.common.Nullable;
import org.elasticsearch.common.component.AbstractLifecycleComponent;
import org.elasticsearch.common.inject.Inject;
import org.elasticsearch.common.io.FileSystemUtils;
+import org.elasticsearch.common.settings.ClusterSettings;
import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.common.unit.TimeValue;
import org.elasticsearch.common.util.concurrent.EsExecutors;
@@ -58,7 +59,6 @@ import org.elasticsearch.index.shard.ShardId;
import org.elasticsearch.index.store.IndexStoreConfig;
import org.elasticsearch.indices.mapper.MapperRegistry;
import org.elasticsearch.indices.query.IndicesQueriesRegistry;
-import org.elasticsearch.node.settings.NodeSettingsService;
import org.elasticsearch.plugins.PluginsService;
import java.io.IOException;
@@ -100,9 +100,9 @@ public class IndicesService extends AbstractLifecycleComponent<IndicesService> i
@Inject
public IndicesService(Settings settings, PluginsService pluginsService, NodeEnvironment nodeEnv,
- NodeSettingsService nodeSettingsService, AnalysisRegistry analysisRegistry,
- IndicesQueriesRegistry indicesQueriesRegistry, IndexNameExpressionResolver indexNameExpressionResolver,
- ClusterService clusterService, MapperRegistry mapperRegistry) {
+ ClusterSettings clusterSettings, AnalysisRegistry analysisRegistry,
+ IndicesQueriesRegistry indicesQueriesRegistry, IndexNameExpressionResolver indexNameExpressionResolver,
+ ClusterService clusterService, MapperRegistry mapperRegistry) {
super(settings);
this.pluginsService = pluginsService;
this.nodeEnv = nodeEnv;
@@ -113,7 +113,9 @@ public class IndicesService extends AbstractLifecycleComponent<IndicesService> i
this.clusterService = clusterService;
this.indexNameExpressionResolver = indexNameExpressionResolver;
this.mapperRegistry = mapperRegistry;
- nodeSettingsService.addListener(indexStoreConfig);
+ clusterSettings.addSettingsUpdateConsumer(IndexStoreConfig.INDICES_STORE_THROTTLE_TYPE_SETTING, indexStoreConfig::setRateLimitingType);
+ clusterSettings.addSettingsUpdateConsumer(IndexStoreConfig.INDICES_STORE_THROTTLE_MAX_BYTES_PER_SEC_SETTING, indexStoreConfig::setRateLimitingThrottle);
+
}
@Override
diff --git a/core/src/main/java/org/elasticsearch/indices/breaker/HierarchyCircuitBreakerService.java b/core/src/main/java/org/elasticsearch/indices/breaker/HierarchyCircuitBreakerService.java
index 33f3c127d6..0e1532bc6b 100644
--- a/core/src/main/java/org/elasticsearch/indices/breaker/HierarchyCircuitBreakerService.java
+++ b/core/src/main/java/org/elasticsearch/indices/breaker/HierarchyCircuitBreakerService.java
@@ -25,9 +25,10 @@ import org.elasticsearch.common.breaker.CircuitBreakingException;
import org.elasticsearch.common.breaker.NoopCircuitBreaker;
import org.elasticsearch.common.inject.Inject;
import org.elasticsearch.common.logging.Loggers;
+import org.elasticsearch.common.settings.ClusterSettings;
+import org.elasticsearch.common.settings.Setting;
import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.common.unit.ByteSizeValue;
-import org.elasticsearch.node.settings.NodeSettingsService;
import java.util.ArrayList;
import java.util.List;
@@ -45,25 +46,17 @@ public class HierarchyCircuitBreakerService extends CircuitBreakerService {
private final ConcurrentMap<String, CircuitBreaker> breakers = new ConcurrentHashMap();
- // Old pre-1.4.0 backwards compatible settings
- public static final String OLD_CIRCUIT_BREAKER_MAX_BYTES_SETTING = "indices.fielddata.breaker.limit";
- public static final String OLD_CIRCUIT_BREAKER_OVERHEAD_SETTING = "indices.fielddata.breaker.overhead";
+ public static final Setting<ByteSizeValue> TOTAL_CIRCUIT_BREAKER_LIMIT_SETTING = Setting.byteSizeSetting("indices.breaker.total.limit", "70%", true, Setting.Scope.CLUSTER);
- public static final String TOTAL_CIRCUIT_BREAKER_LIMIT_SETTING = "indices.breaker.total.limit";
- public static final String DEFAULT_TOTAL_CIRCUIT_BREAKER_LIMIT = "70%";
+ public static final Setting<ByteSizeValue> FIELDDATA_CIRCUIT_BREAKER_LIMIT_SETTING = Setting.byteSizeSetting("indices.breaker.fielddata.limit", "60%", true, Setting.Scope.CLUSTER);
+ public static final Setting<Double> FIELDDATA_CIRCUIT_BREAKER_OVERHEAD_SETTING = Setting.doubleSetting("indices.breaker.fielddata.overhead", 1.03d, 0.0d, true, Setting.Scope.CLUSTER);
+ public static final Setting<CircuitBreaker.Type> FIELDDATA_CIRCUIT_BREAKER_TYPE_SETTING = new Setting<>("indices.breaker.fielddata.type", "memory", CircuitBreaker.Type::parseValue, false, Setting.Scope.CLUSTER);
- public static final String FIELDDATA_CIRCUIT_BREAKER_LIMIT_SETTING = "indices.breaker.fielddata.limit";
- public static final String FIELDDATA_CIRCUIT_BREAKER_OVERHEAD_SETTING = "indices.breaker.fielddata.overhead";
- public static final String FIELDDATA_CIRCUIT_BREAKER_TYPE_SETTING = "indices.breaker.fielddata.type";
- public static final String DEFAULT_FIELDDATA_BREAKER_LIMIT = "60%";
- public static final double DEFAULT_FIELDDATA_OVERHEAD_CONSTANT = 1.03;
+ public static final Setting<ByteSizeValue> REQUEST_CIRCUIT_BREAKER_LIMIT_SETTING = Setting.byteSizeSetting("indices.breaker.request.limit", "40%", true, Setting.Scope.CLUSTER);
+ public static final Setting<Double> REQUEST_CIRCUIT_BREAKER_OVERHEAD_SETTING = Setting.doubleSetting("indices.breaker.request.overhead", 1.0d, 0.0d, true, Setting.Scope.CLUSTER);
+ public static final Setting<CircuitBreaker.Type> REQUEST_CIRCUIT_BREAKER_TYPE_SETTING = new Setting<>("indices.breaker.request.type", "memory", CircuitBreaker.Type::parseValue, false, Setting.Scope.CLUSTER);
- public static final String REQUEST_CIRCUIT_BREAKER_LIMIT_SETTING = "indices.breaker.request.limit";
- public static final String REQUEST_CIRCUIT_BREAKER_OVERHEAD_SETTING = "indices.breaker.request.overhead";
- public static final String REQUEST_CIRCUIT_BREAKER_TYPE_SETTING = "indices.breaker.request.type";
- public static final String DEFAULT_REQUEST_BREAKER_LIMIT = "40%";
- public static final String DEFAULT_BREAKER_TYPE = "memory";
private volatile BreakerSettings parentSettings;
private volatile BreakerSettings fielddataSettings;
@@ -73,41 +66,21 @@ public class HierarchyCircuitBreakerService extends CircuitBreakerService {
private final AtomicLong parentTripCount = new AtomicLong(0);
@Inject
- public HierarchyCircuitBreakerService(Settings settings, NodeSettingsService nodeSettingsService) {
+ public HierarchyCircuitBreakerService(Settings settings, ClusterSettings clusterSettings) {
super(settings);
-
- // This uses the old InternalCircuitBreakerService.CIRCUIT_BREAKER_MAX_BYTES_SETTING
- // setting to keep backwards compatibility with 1.3, it can be safely
- // removed when compatibility with 1.3 is no longer needed
- String compatibilityFielddataLimitDefault = DEFAULT_FIELDDATA_BREAKER_LIMIT;
- ByteSizeValue compatibilityFielddataLimit = settings.getAsMemory(OLD_CIRCUIT_BREAKER_MAX_BYTES_SETTING, null);
- if (compatibilityFielddataLimit != null) {
- compatibilityFielddataLimitDefault = compatibilityFielddataLimit.toString();
- }
-
- // This uses the old InternalCircuitBreakerService.CIRCUIT_BREAKER_OVERHEAD_SETTING
- // setting to keep backwards compatibility with 1.3, it can be safely
- // removed when compatibility with 1.3 is no longer needed
- double compatibilityFielddataOverheadDefault = DEFAULT_FIELDDATA_OVERHEAD_CONSTANT;
- Double compatibilityFielddataOverhead = settings.getAsDouble(OLD_CIRCUIT_BREAKER_OVERHEAD_SETTING, null);
- if (compatibilityFielddataOverhead != null) {
- compatibilityFielddataOverheadDefault = compatibilityFielddataOverhead;
- }
-
this.fielddataSettings = new BreakerSettings(CircuitBreaker.FIELDDATA,
- settings.getAsMemory(FIELDDATA_CIRCUIT_BREAKER_LIMIT_SETTING, compatibilityFielddataLimitDefault).bytes(),
- settings.getAsDouble(FIELDDATA_CIRCUIT_BREAKER_OVERHEAD_SETTING, compatibilityFielddataOverheadDefault),
- CircuitBreaker.Type.parseValue(settings.get(FIELDDATA_CIRCUIT_BREAKER_TYPE_SETTING, DEFAULT_BREAKER_TYPE))
+ FIELDDATA_CIRCUIT_BREAKER_LIMIT_SETTING.get(settings).bytes(),
+ FIELDDATA_CIRCUIT_BREAKER_OVERHEAD_SETTING.get(settings),
+ FIELDDATA_CIRCUIT_BREAKER_TYPE_SETTING.get(settings)
);
this.requestSettings = new BreakerSettings(CircuitBreaker.REQUEST,
- settings.getAsMemory(REQUEST_CIRCUIT_BREAKER_LIMIT_SETTING, DEFAULT_REQUEST_BREAKER_LIMIT).bytes(),
- settings.getAsDouble(REQUEST_CIRCUIT_BREAKER_OVERHEAD_SETTING, 1.0),
- CircuitBreaker.Type.parseValue(settings.get(REQUEST_CIRCUIT_BREAKER_TYPE_SETTING, DEFAULT_BREAKER_TYPE))
+ REQUEST_CIRCUIT_BREAKER_LIMIT_SETTING.get(settings).bytes(),
+ REQUEST_CIRCUIT_BREAKER_OVERHEAD_SETTING.get(settings),
+ REQUEST_CIRCUIT_BREAKER_TYPE_SETTING.get(settings)
);
- this.parentSettings = new BreakerSettings(CircuitBreaker.PARENT,
- settings.getAsMemory(TOTAL_CIRCUIT_BREAKER_LIMIT_SETTING, DEFAULT_TOTAL_CIRCUIT_BREAKER_LIMIT).bytes(), 1.0, CircuitBreaker.Type.PARENT);
+ this.parentSettings = new BreakerSettings(CircuitBreaker.PARENT, TOTAL_CIRCUIT_BREAKER_LIMIT_SETTING.get(settings).bytes(), 1.0, CircuitBreaker.Type.PARENT);
if (logger.isTraceEnabled()) {
logger.trace("parent circuit breaker with settings {}", this.parentSettings);
}
@@ -115,52 +88,38 @@ public class HierarchyCircuitBreakerService extends CircuitBreakerService {
registerBreaker(this.requestSettings);
registerBreaker(this.fielddataSettings);
- nodeSettingsService.addListener(new ApplySettings());
+ clusterSettings.addSettingsUpdateConsumer(TOTAL_CIRCUIT_BREAKER_LIMIT_SETTING, this::setTotalCircuitBreakerLimit, this::validateTotalCircuitBreakerLimit);
+ clusterSettings.addSettingsUpdateConsumer(FIELDDATA_CIRCUIT_BREAKER_LIMIT_SETTING, FIELDDATA_CIRCUIT_BREAKER_OVERHEAD_SETTING, this::setFieldDataBreakerLimit);
+ clusterSettings.addSettingsUpdateConsumer(REQUEST_CIRCUIT_BREAKER_LIMIT_SETTING, REQUEST_CIRCUIT_BREAKER_OVERHEAD_SETTING, this::setRequestBreakerLimit);
+ }
+ private void setRequestBreakerLimit(ByteSizeValue newRequestMax, Double newRequestOverhead) {
+ BreakerSettings newRequestSettings = new BreakerSettings(CircuitBreaker.REQUEST, newRequestMax.bytes(), newRequestOverhead,
+ HierarchyCircuitBreakerService.this.requestSettings.getType());
+ registerBreaker(newRequestSettings);
+ HierarchyCircuitBreakerService.this.requestSettings = newRequestSettings;
+ logger.info("Updated breaker settings request: {}", newRequestSettings);
}
- public class ApplySettings implements NodeSettingsService.Listener {
-
- @Override
- public void onRefreshSettings(Settings settings) {
-
- // Fielddata settings
- ByteSizeValue newFielddataMax = settings.getAsMemory(FIELDDATA_CIRCUIT_BREAKER_LIMIT_SETTING, null);
- Double newFielddataOverhead = settings.getAsDouble(FIELDDATA_CIRCUIT_BREAKER_OVERHEAD_SETTING, null);
- if (newFielddataMax != null || newFielddataOverhead != null) {
- long newFielddataLimitBytes = newFielddataMax == null ? HierarchyCircuitBreakerService.this.fielddataSettings.getLimit() : newFielddataMax.bytes();
- newFielddataOverhead = newFielddataOverhead == null ? HierarchyCircuitBreakerService.this.fielddataSettings.getOverhead() : newFielddataOverhead;
+ private void setFieldDataBreakerLimit(ByteSizeValue newFielddataMax, Double newFielddataOverhead) {
+ long newFielddataLimitBytes = newFielddataMax == null ? HierarchyCircuitBreakerService.this.fielddataSettings.getLimit() : newFielddataMax.bytes();
+ newFielddataOverhead = newFielddataOverhead == null ? HierarchyCircuitBreakerService.this.fielddataSettings.getOverhead() : newFielddataOverhead;
+ BreakerSettings newFielddataSettings = new BreakerSettings(CircuitBreaker.FIELDDATA, newFielddataLimitBytes, newFielddataOverhead,
+ HierarchyCircuitBreakerService.this.fielddataSettings.getType());
+ registerBreaker(newFielddataSettings);
+ HierarchyCircuitBreakerService.this.fielddataSettings = newFielddataSettings;
+ logger.info("Updated breaker settings field data: {}", newFielddataSettings);
- BreakerSettings newFielddataSettings = new BreakerSettings(CircuitBreaker.FIELDDATA, newFielddataLimitBytes, newFielddataOverhead,
- HierarchyCircuitBreakerService.this.fielddataSettings.getType());
- registerBreaker(newFielddataSettings);
- HierarchyCircuitBreakerService.this.fielddataSettings = newFielddataSettings;
- logger.info("Updated breaker settings fielddata: {}", newFielddataSettings);
- }
+ }
- // Request settings
- ByteSizeValue newRequestMax = settings.getAsMemory(REQUEST_CIRCUIT_BREAKER_LIMIT_SETTING, null);
- Double newRequestOverhead = settings.getAsDouble(REQUEST_CIRCUIT_BREAKER_OVERHEAD_SETTING, null);
- if (newRequestMax != null || newRequestOverhead != null) {
- long newRequestLimitBytes = newRequestMax == null ? HierarchyCircuitBreakerService.this.requestSettings.getLimit() : newRequestMax.bytes();
- newRequestOverhead = newRequestOverhead == null ? HierarchyCircuitBreakerService.this.requestSettings.getOverhead() : newRequestOverhead;
-
- BreakerSettings newRequestSettings = new BreakerSettings(CircuitBreaker.REQUEST, newRequestLimitBytes, newRequestOverhead,
- HierarchyCircuitBreakerService.this.requestSettings.getType());
- registerBreaker(newRequestSettings);
- HierarchyCircuitBreakerService.this.requestSettings = newRequestSettings;
- logger.info("Updated breaker settings request: {}", newRequestSettings);
- }
+ private boolean validateTotalCircuitBreakerLimit(ByteSizeValue byteSizeValue) {
+ BreakerSettings newParentSettings = new BreakerSettings(CircuitBreaker.PARENT, byteSizeValue.bytes(), 1.0, CircuitBreaker.Type.PARENT);
+ validateSettings(new BreakerSettings[]{newParentSettings});
+ return true;
+ }
- // Parent settings
- long oldParentMax = HierarchyCircuitBreakerService.this.parentSettings.getLimit();
- ByteSizeValue newParentMax = settings.getAsMemory(TOTAL_CIRCUIT_BREAKER_LIMIT_SETTING, null);
- if (newParentMax != null && (newParentMax.bytes() != oldParentMax)) {
- BreakerSettings newParentSettings = new BreakerSettings(CircuitBreaker.PARENT, newParentMax.bytes(), 1.0, CircuitBreaker.Type.PARENT);
- validateSettings(new BreakerSettings[]{newParentSettings});
- HierarchyCircuitBreakerService.this.parentSettings = newParentSettings;
- logger.info("Updated breaker settings parent: {}", newParentSettings);
- }
- }
+ private void setTotalCircuitBreakerLimit(ByteSizeValue byteSizeValue) {
+ BreakerSettings newParentSettings = new BreakerSettings(CircuitBreaker.PARENT, byteSizeValue.bytes(), 1.0, CircuitBreaker.Type.PARENT);
+ this.parentSettings = newParentSettings;
}
/**
diff --git a/core/src/main/java/org/elasticsearch/indices/flush/ShardsSyncedFlushResult.java b/core/src/main/java/org/elasticsearch/indices/flush/ShardsSyncedFlushResult.java
index f7ae5f94b9..220ce9120e 100644
--- a/core/src/main/java/org/elasticsearch/indices/flush/ShardsSyncedFlushResult.java
+++ b/core/src/main/java/org/elasticsearch/indices/flush/ShardsSyncedFlushResult.java
@@ -19,8 +19,12 @@
package org.elasticsearch.indices.flush;
import org.elasticsearch.cluster.routing.ShardRouting;
+import org.elasticsearch.common.io.stream.StreamInput;
+import org.elasticsearch.common.io.stream.StreamOutput;
+import org.elasticsearch.common.io.stream.Streamable;
import org.elasticsearch.index.shard.ShardId;
+import java.io.IOException;
import java.util.HashMap;
import java.util.Map;
@@ -30,15 +34,15 @@ import static java.util.Collections.unmodifiableMap;
/**
* Result for all copies of a shard
*/
-public class ShardsSyncedFlushResult {
+public class ShardsSyncedFlushResult implements Streamable {
private String failureReason;
- private Map<ShardRouting, SyncedFlushService.SyncedFlushResponse> shardResponses;
+ private Map<ShardRouting, SyncedFlushService.ShardSyncedFlushResponse> shardResponses;
private String syncId;
private ShardId shardId;
// some shards may be unassigned, so we need this as state
private int totalShards;
- public ShardsSyncedFlushResult() {
+ private ShardsSyncedFlushResult() {
}
public ShardId getShardId() {
@@ -59,7 +63,7 @@ public class ShardsSyncedFlushResult {
/**
* success constructor
*/
- public ShardsSyncedFlushResult(ShardId shardId, String syncId, int totalShards, Map<ShardRouting, SyncedFlushService.SyncedFlushResponse> shardResponses) {
+ public ShardsSyncedFlushResult(ShardId shardId, String syncId, int totalShards, Map<ShardRouting, SyncedFlushService.ShardSyncedFlushResponse> shardResponses) {
this.failureReason = null;
this.shardResponses = unmodifiableMap(new HashMap<>(shardResponses));
this.syncId = syncId;
@@ -98,7 +102,7 @@ public class ShardsSyncedFlushResult {
*/
public int successfulShards() {
int i = 0;
- for (SyncedFlushService.SyncedFlushResponse result : shardResponses.values()) {
+ for (SyncedFlushService.ShardSyncedFlushResponse result : shardResponses.values()) {
if (result.success()) {
i++;
}
@@ -109,9 +113,9 @@ public class ShardsSyncedFlushResult {
/**
* @return an array of shard failures
*/
- public Map<ShardRouting, SyncedFlushService.SyncedFlushResponse> failedShards() {
- Map<ShardRouting, SyncedFlushService.SyncedFlushResponse> failures = new HashMap<>();
- for (Map.Entry<ShardRouting, SyncedFlushService.SyncedFlushResponse> result : shardResponses.entrySet()) {
+ public Map<ShardRouting, SyncedFlushService.ShardSyncedFlushResponse> failedShards() {
+ Map<ShardRouting, SyncedFlushService.ShardSyncedFlushResponse> failures = new HashMap<>();
+ for (Map.Entry<ShardRouting, SyncedFlushService.ShardSyncedFlushResponse> result : shardResponses.entrySet()) {
if (result.getValue().success() == false) {
failures.put(result.getKey(), result.getValue());
}
@@ -123,11 +127,45 @@ public class ShardsSyncedFlushResult {
* @return Individual responses for each shard copy with a detailed failure message if the copy failed to perform the synced flush.
* Empty if synced flush failed before step three.
*/
- public Map<ShardRouting, SyncedFlushService.SyncedFlushResponse> shardResponses() {
+ public Map<ShardRouting, SyncedFlushService.ShardSyncedFlushResponse> shardResponses() {
return shardResponses;
}
public ShardId shardId() {
return shardId;
}
+
+ @Override
+ public void readFrom(StreamInput in) throws IOException {
+ failureReason = in.readOptionalString();
+ int numResponses = in.readInt();
+ shardResponses = new HashMap<>();
+ for (int i = 0; i < numResponses; i++) {
+ ShardRouting shardRouting = ShardRouting.readShardRoutingEntry(in);
+ SyncedFlushService.ShardSyncedFlushResponse response = SyncedFlushService.ShardSyncedFlushResponse.readSyncedFlushResponse(in);
+ shardResponses.put(shardRouting, response);
+ }
+ syncId = in.readOptionalString();
+ shardId = ShardId.readShardId(in);
+ totalShards = in.readInt();
+ }
+
+ @Override
+ public void writeTo(StreamOutput out) throws IOException {
+ out.writeOptionalString(failureReason);
+ out.writeInt(shardResponses.size());
+ for (Map.Entry<ShardRouting, SyncedFlushService.ShardSyncedFlushResponse> entry : shardResponses.entrySet()) {
+ entry.getKey().writeTo(out);
+ entry.getValue().writeTo(out);
+ }
+ out.writeOptionalString(syncId);
+ shardId.writeTo(out);
+ out.writeInt(totalShards);
+ }
+
+ public static ShardsSyncedFlushResult readShardsSyncedFlushResult(StreamInput in) throws IOException {
+ ShardsSyncedFlushResult shardsSyncedFlushResult = new ShardsSyncedFlushResult();
+ shardsSyncedFlushResult.readFrom(in);
+ return shardsSyncedFlushResult;
+ }
}
diff --git a/core/src/main/java/org/elasticsearch/indices/flush/SyncedFlushService.java b/core/src/main/java/org/elasticsearch/indices/flush/SyncedFlushService.java
index ad264c2ac0..0918ad2afe 100644
--- a/core/src/main/java/org/elasticsearch/indices/flush/SyncedFlushService.java
+++ b/core/src/main/java/org/elasticsearch/indices/flush/SyncedFlushService.java
@@ -21,6 +21,7 @@ package org.elasticsearch.indices.flush;
import org.elasticsearch.ElasticsearchException;
import org.elasticsearch.action.ActionListener;
import org.elasticsearch.action.admin.indices.flush.FlushRequest;
+import org.elasticsearch.action.admin.indices.flush.SyncedFlushResponse;
import org.elasticsearch.action.support.IndicesOptions;
import org.elasticsearch.cluster.ClusterService;
import org.elasticsearch.cluster.ClusterState;
@@ -81,9 +82,8 @@ public class SyncedFlushService extends AbstractComponent implements IndexEventL
this.clusterService = clusterService;
this.transportService = transportService;
this.indexNameExpressionResolver = indexNameExpressionResolver;
-
- transportService.registerRequestHandler(PRE_SYNCED_FLUSH_ACTION_NAME, PreSyncedFlushRequest::new, ThreadPool.Names.FLUSH, new PreSyncedFlushTransportHandler());
- transportService.registerRequestHandler(SYNCED_FLUSH_ACTION_NAME, SyncedFlushRequest::new, ThreadPool.Names.FLUSH, new SyncedFlushTransportHandler());
+ transportService.registerRequestHandler(PRE_SYNCED_FLUSH_ACTION_NAME, PreShardSyncedFlushRequest::new, ThreadPool.Names.FLUSH, new PreSyncedFlushTransportHandler());
+ transportService.registerRequestHandler(SYNCED_FLUSH_ACTION_NAME, ShardSyncedFlushRequest::new, ThreadPool.Names.FLUSH, new SyncedFlushTransportHandler());
transportService.registerRequestHandler(IN_FLIGHT_OPS_ACTION_NAME, InFlightOpsRequest::new, ThreadPool.Names.SAME, new InFlightOpCountTransportHandler());
}
@@ -109,7 +109,7 @@ public class SyncedFlushService extends AbstractComponent implements IndexEventL
* a utility method to perform a synced flush for all shards of multiple indices. see {@link #attemptSyncedFlush(ShardId, ActionListener)}
* for more details.
*/
- public void attemptSyncedFlush(final String[] aliasesOrIndices, IndicesOptions indicesOptions, final ActionListener<IndicesSyncedFlushResult> listener) {
+ public void attemptSyncedFlush(final String[] aliasesOrIndices, IndicesOptions indicesOptions, final ActionListener<SyncedFlushResponse> listener) {
final ClusterState state = clusterService.state();
final String[] concreteIndices = indexNameExpressionResolver.concreteIndices(state, indicesOptions, aliasesOrIndices);
final Map<String, List<ShardsSyncedFlushResult>> results = ConcurrentCollections.newConcurrentMap();
@@ -123,7 +123,7 @@ public class SyncedFlushService extends AbstractComponent implements IndexEventL
}
if (numberOfShards == 0) {
- listener.onResponse(new IndicesSyncedFlushResult(results));
+ listener.onResponse(new SyncedFlushResponse(results));
return;
}
final int finalTotalNumberOfShards = totalNumberOfShards;
@@ -138,7 +138,7 @@ public class SyncedFlushService extends AbstractComponent implements IndexEventL
public void onResponse(ShardsSyncedFlushResult syncedFlushResult) {
results.get(index).add(syncedFlushResult);
if (countDown.countDown()) {
- listener.onResponse(new IndicesSyncedFlushResult(results));
+ listener.onResponse(new SyncedFlushResponse(results));
}
}
@@ -147,7 +147,7 @@ public class SyncedFlushService extends AbstractComponent implements IndexEventL
logger.debug("{} unexpected error while executing synced flush", shardId);
results.get(index).add(new ShardsSyncedFlushResult(shardId, finalTotalNumberOfShards, e.getMessage()));
if (countDown.countDown()) {
- listener.onResponse(new IndicesSyncedFlushResult(results));
+ listener.onResponse(new SyncedFlushResponse(results));
}
}
});
@@ -297,33 +297,33 @@ public class SyncedFlushService extends AbstractComponent implements IndexEventL
void sendSyncRequests(final String syncId, final List<ShardRouting> shards, ClusterState state, Map<String, Engine.CommitId> expectedCommitIds,
final ShardId shardId, final int totalShards, final ActionListener<ShardsSyncedFlushResult> listener) {
final CountDown countDown = new CountDown(shards.size());
- final Map<ShardRouting, SyncedFlushResponse> results = ConcurrentCollections.newConcurrentMap();
+ final Map<ShardRouting, ShardSyncedFlushResponse> results = ConcurrentCollections.newConcurrentMap();
for (final ShardRouting shard : shards) {
final DiscoveryNode node = state.nodes().get(shard.currentNodeId());
if (node == null) {
logger.trace("{} is assigned to an unknown node. skipping for sync id [{}]. shard routing {}", shardId, syncId, shard);
- results.put(shard, new SyncedFlushResponse("unknown node"));
+ results.put(shard, new ShardSyncedFlushResponse("unknown node"));
contDownAndSendResponseIfDone(syncId, shards, shardId, totalShards, listener, countDown, results);
continue;
}
final Engine.CommitId expectedCommitId = expectedCommitIds.get(shard.currentNodeId());
if (expectedCommitId == null) {
logger.trace("{} can't resolve expected commit id for {}, skipping for sync id [{}]. shard routing {}", shardId, syncId, shard);
- results.put(shard, new SyncedFlushResponse("no commit id from pre-sync flush"));
+ results.put(shard, new ShardSyncedFlushResponse("no commit id from pre-sync flush"));
contDownAndSendResponseIfDone(syncId, shards, shardId, totalShards, listener, countDown, results);
continue;
}
logger.trace("{} sending synced flush request to {}. sync id [{}].", shardId, shard, syncId);
- transportService.sendRequest(node, SYNCED_FLUSH_ACTION_NAME, new SyncedFlushRequest(shard.shardId(), syncId, expectedCommitId),
- new BaseTransportResponseHandler<SyncedFlushResponse>() {
+ transportService.sendRequest(node, SYNCED_FLUSH_ACTION_NAME, new ShardSyncedFlushRequest(shard.shardId(), syncId, expectedCommitId),
+ new BaseTransportResponseHandler<ShardSyncedFlushResponse>() {
@Override
- public SyncedFlushResponse newInstance() {
- return new SyncedFlushResponse();
+ public ShardSyncedFlushResponse newInstance() {
+ return new ShardSyncedFlushResponse();
}
@Override
- public void handleResponse(SyncedFlushResponse response) {
- SyncedFlushResponse existing = results.put(shard, response);
+ public void handleResponse(ShardSyncedFlushResponse response) {
+ ShardSyncedFlushResponse existing = results.put(shard, response);
assert existing == null : "got two answers for node [" + node + "]";
// count after the assert so we won't decrement twice in handleException
contDownAndSendResponseIfDone(syncId, shards, shardId, totalShards, listener, countDown, results);
@@ -332,7 +332,7 @@ public class SyncedFlushService extends AbstractComponent implements IndexEventL
@Override
public void handleException(TransportException exp) {
logger.trace("{} error while performing synced flush on [{}], skipping", exp, shardId, shard);
- results.put(shard, new SyncedFlushResponse(exp.getMessage()));
+ results.put(shard, new ShardSyncedFlushResponse(exp.getMessage()));
contDownAndSendResponseIfDone(syncId, shards, shardId, totalShards, listener, countDown, results);
}
@@ -346,7 +346,7 @@ public class SyncedFlushService extends AbstractComponent implements IndexEventL
}
private void contDownAndSendResponseIfDone(String syncId, List<ShardRouting> shards, ShardId shardId, int totalShards,
- ActionListener<ShardsSyncedFlushResult> listener, CountDown countDown, Map<ShardRouting, SyncedFlushResponse> results) {
+ ActionListener<ShardsSyncedFlushResult> listener, CountDown countDown, Map<ShardRouting, ShardSyncedFlushResponse> results) {
if (countDown.countDown()) {
assert results.size() == shards.size();
listener.onResponse(new ShardsSyncedFlushResult(shardId, syncId, totalShards, results));
@@ -369,7 +369,7 @@ public class SyncedFlushService extends AbstractComponent implements IndexEventL
}
continue;
}
- transportService.sendRequest(node, PRE_SYNCED_FLUSH_ACTION_NAME, new PreSyncedFlushRequest(shard.shardId()), new BaseTransportResponseHandler<PreSyncedFlushResponse>() {
+ transportService.sendRequest(node, PRE_SYNCED_FLUSH_ACTION_NAME, new PreShardSyncedFlushRequest(shard.shardId()), new BaseTransportResponseHandler<PreSyncedFlushResponse>() {
@Override
public PreSyncedFlushResponse newInstance() {
return new PreSyncedFlushResponse();
@@ -401,7 +401,7 @@ public class SyncedFlushService extends AbstractComponent implements IndexEventL
}
}
- private PreSyncedFlushResponse performPreSyncedFlush(PreSyncedFlushRequest request) {
+ private PreSyncedFlushResponse performPreSyncedFlush(PreShardSyncedFlushRequest request) {
IndexShard indexShard = indicesService.indexServiceSafe(request.shardId().getIndex()).getShard(request.shardId().id());
FlushRequest flushRequest = new FlushRequest().force(false).waitIfOngoing(true);
logger.trace("{} performing pre sync flush", request.shardId());
@@ -410,7 +410,7 @@ public class SyncedFlushService extends AbstractComponent implements IndexEventL
return new PreSyncedFlushResponse(commitId);
}
- private SyncedFlushResponse performSyncedFlush(SyncedFlushRequest request) {
+ private ShardSyncedFlushResponse performSyncedFlush(ShardSyncedFlushRequest request) {
IndexService indexService = indicesService.indexServiceSafe(request.shardId().getIndex());
IndexShard indexShard = indexService.getShard(request.shardId().id());
logger.trace("{} performing sync flush. sync id [{}], expected commit id {}", request.shardId(), request.syncId(), request.expectedCommitId());
@@ -418,11 +418,11 @@ public class SyncedFlushService extends AbstractComponent implements IndexEventL
logger.trace("{} sync flush done. sync id [{}], result [{}]", request.shardId(), request.syncId(), result);
switch (result) {
case SUCCESS:
- return new SyncedFlushResponse();
+ return new ShardSyncedFlushResponse();
case COMMIT_MISMATCH:
- return new SyncedFlushResponse("commit has changed");
+ return new ShardSyncedFlushResponse("commit has changed");
case PENDING_OPERATIONS:
- return new SyncedFlushResponse("pending operations");
+ return new ShardSyncedFlushResponse("pending operations");
default:
throw new ElasticsearchException("unknown synced flush result [" + result + "]");
}
@@ -439,19 +439,19 @@ public class SyncedFlushService extends AbstractComponent implements IndexEventL
return new InFlightOpsResponse(opCount);
}
- public final static class PreSyncedFlushRequest extends TransportRequest {
+ public final static class PreShardSyncedFlushRequest extends TransportRequest {
private ShardId shardId;
- public PreSyncedFlushRequest() {
+ public PreShardSyncedFlushRequest() {
}
- public PreSyncedFlushRequest(ShardId shardId) {
+ public PreShardSyncedFlushRequest(ShardId shardId) {
this.shardId = shardId;
}
@Override
public String toString() {
- return "PreSyncedFlushRequest{" +
+ return "PreShardSyncedFlushRequest{" +
"shardId=" + shardId +
'}';
}
@@ -504,16 +504,16 @@ public class SyncedFlushService extends AbstractComponent implements IndexEventL
}
}
- public static final class SyncedFlushRequest extends TransportRequest {
+ public static final class ShardSyncedFlushRequest extends TransportRequest {
private String syncId;
private Engine.CommitId expectedCommitId;
private ShardId shardId;
- public SyncedFlushRequest() {
+ public ShardSyncedFlushRequest() {
}
- public SyncedFlushRequest(ShardId shardId, String syncId, Engine.CommitId expectedCommitId) {
+ public ShardSyncedFlushRequest(ShardId shardId, String syncId, Engine.CommitId expectedCommitId) {
this.expectedCommitId = expectedCommitId;
this.shardId = shardId;
this.syncId = syncId;
@@ -549,7 +549,7 @@ public class SyncedFlushService extends AbstractComponent implements IndexEventL
@Override
public String toString() {
- return "SyncedFlushRequest{" +
+ return "ShardSyncedFlushRequest{" +
"shardId=" + shardId +
",syncId='" + syncId + '\'' +
'}';
@@ -559,18 +559,18 @@ public class SyncedFlushService extends AbstractComponent implements IndexEventL
/**
* Response for third step of synced flush (writing the sync id) for one shard copy
*/
- public static final class SyncedFlushResponse extends TransportResponse {
+ public static final class ShardSyncedFlushResponse extends TransportResponse {
/**
* a non null value indicates a failure to sync flush. null means success
*/
String failureReason;
- public SyncedFlushResponse() {
+ public ShardSyncedFlushResponse() {
failureReason = null;
}
- public SyncedFlushResponse(String failureReason) {
+ public ShardSyncedFlushResponse(String failureReason) {
this.failureReason = failureReason;
}
@@ -596,11 +596,17 @@ public class SyncedFlushService extends AbstractComponent implements IndexEventL
@Override
public String toString() {
- return "SyncedFlushResponse{" +
+ return "ShardSyncedFlushResponse{" +
"success=" + success() +
", failureReason='" + failureReason + '\'' +
'}';
}
+
+ public static ShardSyncedFlushResponse readSyncedFlushResponse(StreamInput in) throws IOException {
+ ShardSyncedFlushResponse shardSyncedFlushResponse = new ShardSyncedFlushResponse();
+ shardSyncedFlushResponse.readFrom(in);
+ return shardSyncedFlushResponse;
+ }
}
@@ -677,18 +683,18 @@ public class SyncedFlushService extends AbstractComponent implements IndexEventL
}
}
- private final class PreSyncedFlushTransportHandler implements TransportRequestHandler<PreSyncedFlushRequest> {
+ private final class PreSyncedFlushTransportHandler implements TransportRequestHandler<PreShardSyncedFlushRequest> {
@Override
- public void messageReceived(PreSyncedFlushRequest request, TransportChannel channel) throws Exception {
+ public void messageReceived(PreShardSyncedFlushRequest request, TransportChannel channel) throws Exception {
channel.sendResponse(performPreSyncedFlush(request));
}
}
- private final class SyncedFlushTransportHandler implements TransportRequestHandler<SyncedFlushRequest> {
+ private final class SyncedFlushTransportHandler implements TransportRequestHandler<ShardSyncedFlushRequest> {
@Override
- public void messageReceived(SyncedFlushRequest request, TransportChannel channel) throws Exception {
+ public void messageReceived(ShardSyncedFlushRequest request, TransportChannel channel) throws Exception {
channel.sendResponse(performSyncedFlush(request));
}
}
diff --git a/core/src/main/java/org/elasticsearch/indices/query/IndicesQueriesRegistry.java b/core/src/main/java/org/elasticsearch/indices/query/IndicesQueriesRegistry.java
index 0cec415d63..08b7b34e91 100644
--- a/core/src/main/java/org/elasticsearch/indices/query/IndicesQueriesRegistry.java
+++ b/core/src/main/java/org/elasticsearch/indices/query/IndicesQueriesRegistry.java
@@ -21,6 +21,7 @@ package org.elasticsearch.indices.query;
import org.elasticsearch.common.component.AbstractComponent;
import org.elasticsearch.common.inject.Inject;
+import org.elasticsearch.common.io.stream.NamedWriteable;
import org.elasticsearch.common.io.stream.NamedWriteableRegistry;
import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.index.query.EmptyQueryBuilder;
@@ -40,11 +41,12 @@ public class IndicesQueriesRegistry extends AbstractComponent {
public IndicesQueriesRegistry(Settings settings, Set<QueryParser> injectedQueryParsers, NamedWriteableRegistry namedWriteableRegistry) {
super(settings);
Map<String, QueryParser<?>> queryParsers = new HashMap<>();
- for (QueryParser<?> queryParser : injectedQueryParsers) {
+ for (@SuppressWarnings("unchecked") QueryParser<? extends QueryBuilder> queryParser : injectedQueryParsers) {
for (String name : queryParser.names()) {
queryParsers.put(name, queryParser);
}
- namedWriteableRegistry.registerPrototype(QueryBuilder.class, queryParser.getBuilderPrototype());
+ @SuppressWarnings("unchecked") NamedWriteable<? extends QueryBuilder> qb = queryParser.getBuilderPrototype();
+ namedWriteableRegistry.registerPrototype(QueryBuilder.class, qb);
}
// EmptyQueryBuilder is not registered as query parser but used internally.
// We need to register it with the NamedWriteableRegistry in order to serialize it
@@ -58,4 +60,4 @@ public class IndicesQueriesRegistry extends AbstractComponent {
public Map<String, QueryParser<?>> queryParsers() {
return queryParsers;
}
-} \ No newline at end of file
+}
diff --git a/core/src/main/java/org/elasticsearch/indices/recovery/RecoverySettings.java b/core/src/main/java/org/elasticsearch/indices/recovery/RecoverySettings.java
index 6db38d59e8..682b66e084 100644
--- a/core/src/main/java/org/elasticsearch/indices/recovery/RecoverySettings.java
+++ b/core/src/main/java/org/elasticsearch/indices/recovery/RecoverySettings.java
@@ -23,16 +23,16 @@ import org.apache.lucene.store.RateLimiter;
import org.apache.lucene.store.RateLimiter.SimpleRateLimiter;
import org.elasticsearch.common.component.AbstractComponent;
import org.elasticsearch.common.inject.Inject;
+import org.elasticsearch.common.settings.ClusterSettings;
+import org.elasticsearch.common.settings.Setting;
import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.common.unit.ByteSizeUnit;
import org.elasticsearch.common.unit.ByteSizeValue;
import org.elasticsearch.common.unit.TimeValue;
import org.elasticsearch.common.util.concurrent.EsExecutors;
-import org.elasticsearch.node.settings.NodeSettingsService;
import org.elasticsearch.threadpool.ThreadPool;
import java.io.Closeable;
-import java.util.Objects;
import java.util.concurrent.ThreadPoolExecutor;
import java.util.concurrent.TimeUnit;
@@ -40,34 +40,33 @@ import java.util.concurrent.TimeUnit;
*/
public class RecoverySettings extends AbstractComponent implements Closeable {
- public static final String INDICES_RECOVERY_CONCURRENT_STREAMS = "indices.recovery.concurrent_streams";
- public static final String INDICES_RECOVERY_CONCURRENT_SMALL_FILE_STREAMS = "indices.recovery.concurrent_small_file_streams";
- public static final String INDICES_RECOVERY_MAX_BYTES_PER_SEC = "indices.recovery.max_bytes_per_sec";
+ public static final Setting<Integer> INDICES_RECOVERY_CONCURRENT_STREAMS_SETTING = Setting.intSetting("indices.recovery.concurrent_streams", 3, true, Setting.Scope.CLUSTER);
+ public static final Setting<Integer> INDICES_RECOVERY_CONCURRENT_SMALL_FILE_STREAMS_SETTING = Setting.intSetting("indices.recovery.concurrent_small_file_streams", 2, true, Setting.Scope.CLUSTER);
+ public static final Setting<ByteSizeValue> INDICES_RECOVERY_MAX_BYTES_PER_SEC_SETTING = Setting.byteSizeSetting("indices.recovery.max_bytes_per_sec", new ByteSizeValue(40, ByteSizeUnit.MB), true, Setting.Scope.CLUSTER);
/**
* how long to wait before retrying after issues cause by cluster state syncing between nodes
* i.e., local node is not yet known on remote node, remote shard not yet started etc.
*/
- public static final String INDICES_RECOVERY_RETRY_DELAY_STATE_SYNC = "indices.recovery.retry_delay_state_sync";
+ public static final Setting<TimeValue> INDICES_RECOVERY_RETRY_DELAY_STATE_SYNC_SETTING = Setting.positiveTimeSetting("indices.recovery.retry_delay_state_sync", TimeValue.timeValueMillis(500), true, Setting.Scope.CLUSTER);
/** how long to wait before retrying after network related issues */
- public static final String INDICES_RECOVERY_RETRY_DELAY_NETWORK = "indices.recovery.retry_delay_network";
-
- /**
- * recoveries that don't show any activity for more then this interval will be failed.
- * defaults to `indices.recovery.internal_action_long_timeout`
- */
- public static final String INDICES_RECOVERY_ACTIVITY_TIMEOUT = "indices.recovery.recovery_activity_timeout";
+ public static final Setting<TimeValue> INDICES_RECOVERY_RETRY_DELAY_NETWORK_SETTING = Setting.positiveTimeSetting("indices.recovery.retry_delay_network", TimeValue.timeValueSeconds(5), true, Setting.Scope.CLUSTER);
/** timeout value to use for requests made as part of the recovery process */
- public static final String INDICES_RECOVERY_INTERNAL_ACTION_TIMEOUT = "indices.recovery.internal_action_timeout";
+ public static final Setting<TimeValue> INDICES_RECOVERY_INTERNAL_ACTION_TIMEOUT_SETTING = Setting.positiveTimeSetting("indices.recovery.internal_action_timeout", TimeValue.timeValueMinutes(15), true, Setting.Scope.CLUSTER);
/**
* timeout value to use for requests made as part of the recovery process that are expected to take long time.
* defaults to twice `indices.recovery.internal_action_timeout`.
*/
- public static final String INDICES_RECOVERY_INTERNAL_LONG_ACTION_TIMEOUT = "indices.recovery.internal_action_long_timeout";
+ public static final Setting<TimeValue> INDICES_RECOVERY_INTERNAL_LONG_ACTION_TIMEOUT_SETTING = Setting.timeSetting("indices.recovery.internal_action_long_timeout", (s) -> TimeValue.timeValueMillis(INDICES_RECOVERY_INTERNAL_ACTION_TIMEOUT_SETTING.get(s).millis() * 2).toString(), TimeValue.timeValueSeconds(0), true, Setting.Scope.CLUSTER);
+ /**
+ * recoveries that don't show any activity for more then this interval will be failed.
+ * defaults to `indices.recovery.internal_action_long_timeout`
+ */
+ public static final Setting<TimeValue> INDICES_RECOVERY_ACTIVITY_TIMEOUT_SETTING = Setting.timeSetting("indices.recovery.recovery_activity_timeout", (s) -> INDICES_RECOVERY_INTERNAL_LONG_ACTION_TIMEOUT_SETTING.getRaw(s) , TimeValue.timeValueSeconds(0), true, Setting.Scope.CLUSTER);
public static final long SMALL_FILE_CUTOFF_BYTES = ByteSizeValue.parseBytesSizeValue("5mb", "SMALL_FILE_CUTOFF_BYTES").bytes();
@@ -89,31 +88,28 @@ public class RecoverySettings extends AbstractComponent implements Closeable {
private volatile ByteSizeValue chunkSize = DEFAULT_CHUNK_SIZE;
@Inject
- public RecoverySettings(Settings settings, NodeSettingsService nodeSettingsService) {
+ public RecoverySettings(Settings settings, ClusterSettings clusterSettings) {
super(settings);
- this.retryDelayStateSync = settings.getAsTime(INDICES_RECOVERY_RETRY_DELAY_STATE_SYNC, TimeValue.timeValueMillis(500));
+ this.retryDelayStateSync = INDICES_RECOVERY_RETRY_DELAY_STATE_SYNC_SETTING.get(settings);
// doesn't have to be fast as nodes are reconnected every 10s by default (see InternalClusterService.ReconnectToNodes)
// and we want to give the master time to remove a faulty node
- this.retryDelayNetwork = settings.getAsTime(INDICES_RECOVERY_RETRY_DELAY_NETWORK, TimeValue.timeValueSeconds(5));
+ this.retryDelayNetwork = INDICES_RECOVERY_RETRY_DELAY_NETWORK_SETTING.get(settings);
- this.internalActionTimeout = settings.getAsTime(INDICES_RECOVERY_INTERNAL_ACTION_TIMEOUT, TimeValue.timeValueMinutes(15));
- this.internalActionLongTimeout = settings.getAsTime(INDICES_RECOVERY_INTERNAL_LONG_ACTION_TIMEOUT, new TimeValue(internalActionTimeout.millis() * 2));
+ this.internalActionTimeout = INDICES_RECOVERY_INTERNAL_ACTION_TIMEOUT_SETTING.get(settings);
+ this.internalActionLongTimeout = INDICES_RECOVERY_INTERNAL_LONG_ACTION_TIMEOUT_SETTING.get(settings);
- this.activityTimeout = settings.getAsTime(INDICES_RECOVERY_ACTIVITY_TIMEOUT,
- // default to the internalActionLongTimeout used as timeouts on RecoverySource
- internalActionLongTimeout
- );
+ this.activityTimeout = INDICES_RECOVERY_ACTIVITY_TIMEOUT_SETTING.get(settings);
- this.concurrentStreams = settings.getAsInt(INDICES_RECOVERY_CONCURRENT_STREAMS, 3);
+ this.concurrentStreams = INDICES_RECOVERY_CONCURRENT_STREAMS_SETTING.get(settings);
this.concurrentStreamPool = EsExecutors.newScaling("recovery_stream", 0, concurrentStreams, 60, TimeUnit.SECONDS,
EsExecutors.daemonThreadFactory(settings, "[recovery_stream]"));
- this.concurrentSmallFileStreams = settings.getAsInt(INDICES_RECOVERY_CONCURRENT_SMALL_FILE_STREAMS, 2);
+ this.concurrentSmallFileStreams = INDICES_RECOVERY_CONCURRENT_SMALL_FILE_STREAMS_SETTING.get(settings);
this.concurrentSmallFileStreamPool = EsExecutors.newScaling("small_file_recovery_stream", 0, concurrentSmallFileStreams, 60,
TimeUnit.SECONDS, EsExecutors.daemonThreadFactory(settings, "[small_file_recovery_stream]"));
- this.maxBytesPerSec = settings.getAsBytesSize(INDICES_RECOVERY_MAX_BYTES_PER_SEC, new ByteSizeValue(40, ByteSizeUnit.MB));
+ this.maxBytesPerSec = INDICES_RECOVERY_MAX_BYTES_PER_SEC_SETTING.get(settings);
if (maxBytesPerSec.bytes() <= 0) {
rateLimiter = null;
} else {
@@ -123,7 +119,14 @@ public class RecoverySettings extends AbstractComponent implements Closeable {
logger.debug("using max_bytes_per_sec[{}], concurrent_streams [{}]",
maxBytesPerSec, concurrentStreams);
- nodeSettingsService.addListener(new ApplySettings());
+ clusterSettings.addSettingsUpdateConsumer(INDICES_RECOVERY_CONCURRENT_STREAMS_SETTING, this::setConcurrentStreams);
+ clusterSettings.addSettingsUpdateConsumer(INDICES_RECOVERY_CONCURRENT_SMALL_FILE_STREAMS_SETTING, this::setConcurrentSmallFileStreams);
+ clusterSettings.addSettingsUpdateConsumer(INDICES_RECOVERY_MAX_BYTES_PER_SEC_SETTING, this::setMaxBytesPerSec);
+ clusterSettings.addSettingsUpdateConsumer(INDICES_RECOVERY_RETRY_DELAY_STATE_SYNC_SETTING, this::setRetryDelayStateSync);
+ clusterSettings.addSettingsUpdateConsumer(INDICES_RECOVERY_RETRY_DELAY_NETWORK_SETTING, this::setRetryDelayNetwork);
+ clusterSettings.addSettingsUpdateConsumer(INDICES_RECOVERY_INTERNAL_ACTION_TIMEOUT_SETTING, this::setInternalActionTimeout);
+ clusterSettings.addSettingsUpdateConsumer(INDICES_RECOVERY_INTERNAL_LONG_ACTION_TIMEOUT_SETTING, this::setInternalActionLongTimeout);
+ clusterSettings.addSettingsUpdateConsumer(INDICES_RECOVERY_ACTIVITY_TIMEOUT_SETTING, this::setActivityTimeout);
}
@Override
@@ -173,51 +176,44 @@ public class RecoverySettings extends AbstractComponent implements Closeable {
this.chunkSize = chunkSize;
}
+ private void setConcurrentStreams(int concurrentStreams) {
+ this.concurrentStreams = concurrentStreams;
+ concurrentStreamPool.setMaximumPoolSize(concurrentStreams);
+ }
+
+ public void setRetryDelayStateSync(TimeValue retryDelayStateSync) {
+ this.retryDelayStateSync = retryDelayStateSync;
+ }
- class ApplySettings implements NodeSettingsService.Listener {
- @Override
- public void onRefreshSettings(Settings settings) {
- ByteSizeValue maxSizePerSec = settings.getAsBytesSize(INDICES_RECOVERY_MAX_BYTES_PER_SEC, RecoverySettings.this.maxBytesPerSec);
- if (!Objects.equals(maxSizePerSec, RecoverySettings.this.maxBytesPerSec)) {
- logger.info("updating [{}] from [{}] to [{}]", INDICES_RECOVERY_MAX_BYTES_PER_SEC, RecoverySettings.this.maxBytesPerSec, maxSizePerSec);
- RecoverySettings.this.maxBytesPerSec = maxSizePerSec;
- if (maxSizePerSec.bytes() <= 0) {
- rateLimiter = null;
- } else if (rateLimiter != null) {
- rateLimiter.setMBPerSec(maxSizePerSec.mbFrac());
- } else {
- rateLimiter = new SimpleRateLimiter(maxSizePerSec.mbFrac());
- }
- }
-
- int concurrentStreams = settings.getAsInt(INDICES_RECOVERY_CONCURRENT_STREAMS, RecoverySettings.this.concurrentStreams);
- if (concurrentStreams != RecoverySettings.this.concurrentStreams) {
- logger.info("updating [indices.recovery.concurrent_streams] from [{}] to [{}]", RecoverySettings.this.concurrentStreams, concurrentStreams);
- RecoverySettings.this.concurrentStreams = concurrentStreams;
- RecoverySettings.this.concurrentStreamPool.setMaximumPoolSize(concurrentStreams);
- }
-
- int concurrentSmallFileStreams = settings.getAsInt(INDICES_RECOVERY_CONCURRENT_SMALL_FILE_STREAMS, RecoverySettings.this.concurrentSmallFileStreams);
- if (concurrentSmallFileStreams != RecoverySettings.this.concurrentSmallFileStreams) {
- logger.info("updating [indices.recovery.concurrent_small_file_streams] from [{}] to [{}]", RecoverySettings.this.concurrentSmallFileStreams, concurrentSmallFileStreams);
- RecoverySettings.this.concurrentSmallFileStreams = concurrentSmallFileStreams;
- RecoverySettings.this.concurrentSmallFileStreamPool.setMaximumPoolSize(concurrentSmallFileStreams);
- }
-
- RecoverySettings.this.retryDelayNetwork = maybeUpdate(RecoverySettings.this.retryDelayNetwork, settings, INDICES_RECOVERY_RETRY_DELAY_NETWORK);
- RecoverySettings.this.retryDelayStateSync = maybeUpdate(RecoverySettings.this.retryDelayStateSync, settings, INDICES_RECOVERY_RETRY_DELAY_STATE_SYNC);
- RecoverySettings.this.activityTimeout = maybeUpdate(RecoverySettings.this.activityTimeout, settings, INDICES_RECOVERY_ACTIVITY_TIMEOUT);
- RecoverySettings.this.internalActionTimeout = maybeUpdate(RecoverySettings.this.internalActionTimeout, settings, INDICES_RECOVERY_INTERNAL_ACTION_TIMEOUT);
- RecoverySettings.this.internalActionLongTimeout = maybeUpdate(RecoverySettings.this.internalActionLongTimeout, settings, INDICES_RECOVERY_INTERNAL_LONG_ACTION_TIMEOUT);
- }
+ public void setRetryDelayNetwork(TimeValue retryDelayNetwork) {
+ this.retryDelayNetwork = retryDelayNetwork;
+ }
+
+ public void setActivityTimeout(TimeValue activityTimeout) {
+ this.activityTimeout = activityTimeout;
+ }
+
+ public void setInternalActionTimeout(TimeValue internalActionTimeout) {
+ this.internalActionTimeout = internalActionTimeout;
+ }
+
+ public void setInternalActionLongTimeout(TimeValue internalActionLongTimeout) {
+ this.internalActionLongTimeout = internalActionLongTimeout;
+ }
- private TimeValue maybeUpdate(final TimeValue currentValue, final Settings settings, final String key) {
- final TimeValue value = settings.getAsTime(key, currentValue);
- if (value.equals(currentValue)) {
- return currentValue;
- }
- logger.info("updating [] from [{}] to [{}]", key, currentValue, value);
- return value;
+ private void setMaxBytesPerSec(ByteSizeValue maxBytesPerSec) {
+ this.maxBytesPerSec = maxBytesPerSec;
+ if (maxBytesPerSec.bytes() <= 0) {
+ rateLimiter = null;
+ } else if (rateLimiter != null) {
+ rateLimiter.setMBPerSec(maxBytesPerSec.mbFrac());
+ } else {
+ rateLimiter = new SimpleRateLimiter(maxBytesPerSec.mbFrac());
}
}
+
+ private void setConcurrentSmallFileStreams(int concurrentSmallFileStreams) {
+ this.concurrentSmallFileStreams = concurrentSmallFileStreams;
+ concurrentSmallFileStreamPool.setMaximumPoolSize(concurrentSmallFileStreams);
+ }
}
diff --git a/core/src/main/java/org/elasticsearch/indices/ttl/IndicesTTLService.java b/core/src/main/java/org/elasticsearch/indices/ttl/IndicesTTLService.java
index f095cc355e..0eed82561a 100644
--- a/core/src/main/java/org/elasticsearch/indices/ttl/IndicesTTLService.java
+++ b/core/src/main/java/org/elasticsearch/indices/ttl/IndicesTTLService.java
@@ -36,6 +36,8 @@ import org.elasticsearch.cluster.metadata.MetaData;
import org.elasticsearch.common.component.AbstractLifecycleComponent;
import org.elasticsearch.common.inject.Inject;
import org.elasticsearch.common.lucene.uid.Versions;
+import org.elasticsearch.common.settings.ClusterSettings;
+import org.elasticsearch.common.settings.Setting;
import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.common.unit.TimeValue;
import org.elasticsearch.common.util.concurrent.EsExecutors;
@@ -49,7 +51,6 @@ import org.elasticsearch.index.IndexService;
import org.elasticsearch.index.shard.IndexShardState;
import org.elasticsearch.index.shard.IndexShard;
import org.elasticsearch.indices.IndicesService;
-import org.elasticsearch.node.settings.NodeSettingsService;
import java.io.IOException;
import java.util.ArrayList;
@@ -66,7 +67,7 @@ import java.util.concurrent.locks.ReentrantLock;
*/
public class IndicesTTLService extends AbstractLifecycleComponent<IndicesTTLService> {
- public static final String INDICES_TTL_INTERVAL = "indices.ttl.interval";
+ public static final Setting<TimeValue> INDICES_TTL_INTERVAL_SETTING = Setting.positiveTimeSetting("indices.ttl.interval", TimeValue.timeValueSeconds(60), true, Setting.Scope.CLUSTER);
public static final String INDEX_TTL_DISABLE_PURGE = "index.ttl.disable_purge";
private final ClusterService clusterService;
@@ -77,16 +78,15 @@ public class IndicesTTLService extends AbstractLifecycleComponent<IndicesTTLServ
private PurgerThread purgerThread;
@Inject
- public IndicesTTLService(Settings settings, ClusterService clusterService, IndicesService indicesService, NodeSettingsService nodeSettingsService, TransportBulkAction bulkAction) {
+ public IndicesTTLService(Settings settings, ClusterService clusterService, IndicesService indicesService, ClusterSettings clusterSettings, TransportBulkAction bulkAction) {
super(settings);
this.clusterService = clusterService;
this.indicesService = indicesService;
- TimeValue interval = this.settings.getAsTime("indices.ttl.interval", TimeValue.timeValueSeconds(60));
+ TimeValue interval = INDICES_TTL_INTERVAL_SETTING.get(settings);
this.bulkAction = bulkAction;
this.bulkSize = this.settings.getAsInt("indices.ttl.bulk_size", 10000);
this.purgerThread = new PurgerThread(EsExecutors.threadName(settings, "[ttl_expire]"), interval);
-
- nodeSettingsService.addListener(new ApplySettings());
+ clusterSettings.addSettingsUpdateConsumer(INDICES_TTL_INTERVAL_SETTING, this.purgerThread::resetInterval);
}
@Override
@@ -310,20 +310,6 @@ public class IndicesTTLService extends AbstractLifecycleComponent<IndicesTTLServ
return bulkRequest;
}
- class ApplySettings implements NodeSettingsService.Listener {
- @Override
- public void onRefreshSettings(Settings settings) {
- final TimeValue currentInterval = IndicesTTLService.this.purgerThread.getInterval();
- final TimeValue interval = settings.getAsTime(INDICES_TTL_INTERVAL, currentInterval);
- if (!interval.equals(currentInterval)) {
- logger.info("updating indices.ttl.interval from [{}] to [{}]",currentInterval, interval);
- IndicesTTLService.this.purgerThread.resetInterval(interval);
-
- }
- }
- }
-
-
private static final class Notifier {
private final ReentrantLock lock = new ReentrantLock();
diff --git a/core/src/main/java/org/elasticsearch/monitor/jvm/JvmInfo.java b/core/src/main/java/org/elasticsearch/monitor/jvm/JvmInfo.java
index e224c722d4..82a99bd0bd 100644
--- a/core/src/main/java/org/elasticsearch/monitor/jvm/JvmInfo.java
+++ b/core/src/main/java/org/elasticsearch/monitor/jvm/JvmInfo.java
@@ -29,7 +29,14 @@ import org.elasticsearch.common.xcontent.XContentBuilder;
import org.elasticsearch.common.xcontent.XContentBuilderString;
import java.io.IOException;
-import java.lang.management.*;
+import java.lang.management.GarbageCollectorMXBean;
+import java.lang.management.ManagementFactory;
+import java.lang.management.ManagementPermission;
+import java.lang.management.MemoryMXBean;
+import java.lang.management.MemoryPoolMXBean;
+import java.lang.management.PlatformManagedObject;
+import java.lang.management.RuntimeMXBean;
+import java.lang.reflect.Method;
import java.util.Collections;
import java.util.HashMap;
import java.util.List;
@@ -101,6 +108,20 @@ public class JvmInfo implements Streamable, ToXContent {
info.memoryPools[i] = memoryPoolMXBean.getName();
}
+ try {
+ @SuppressWarnings("unchecked") Class<? extends PlatformManagedObject> clazz =
+ (Class<? extends PlatformManagedObject>)Class.forName("com.sun.management.HotSpotDiagnosticMXBean");
+ Class<?> vmOptionClazz = Class.forName("com.sun.management.VMOption");
+ PlatformManagedObject hotSpotDiagnosticMXBean = ManagementFactory.getPlatformMXBean(clazz);
+ Method vmOptionMethod = clazz.getMethod("getVMOption", String.class);
+ Object useCompressedOopsVmOption = vmOptionMethod.invoke(hotSpotDiagnosticMXBean, "UseCompressedOops");
+ Method valueMethod = vmOptionClazz.getMethod("getValue");
+ info.useCompressedOops = (String)valueMethod.invoke(useCompressedOopsVmOption);
+ } catch (Throwable t) {
+ // unable to deduce the state of compressed oops
+ info.useCompressedOops = "unknown";
+ }
+
INSTANCE = info;
}
@@ -135,6 +156,8 @@ public class JvmInfo implements Streamable, ToXContent {
String[] gcCollectors = Strings.EMPTY_ARRAY;
String[] memoryPools = Strings.EMPTY_ARRAY;
+ private String useCompressedOops;
+
private JvmInfo() {
}
@@ -258,6 +281,18 @@ public class JvmInfo implements Streamable, ToXContent {
return this.systemProperties;
}
+ /**
+ * The value of the JVM flag UseCompressedOops, if available otherwise
+ * "unknown". The value "unknown" indicates that an attempt was
+ * made to obtain the value of the flag on this JVM and the attempt
+ * failed.
+ *
+ * @return the value of the JVM flag UseCompressedOops or "unknown"
+ */
+ public String useCompressedOops() {
+ return this.useCompressedOops;
+ }
+
@Override
public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException {
builder.startObject(Fields.JVM);
@@ -279,6 +314,8 @@ public class JvmInfo implements Streamable, ToXContent {
builder.field(Fields.GC_COLLECTORS, gcCollectors);
builder.field(Fields.MEMORY_POOLS, memoryPools);
+ builder.field(Fields.USING_COMPRESSED_OOPS, useCompressedOops);
+
builder.endObject();
return builder;
}
@@ -306,6 +343,7 @@ public class JvmInfo implements Streamable, ToXContent {
static final XContentBuilderString DIRECT_MAX_IN_BYTES = new XContentBuilderString("direct_max_in_bytes");
static final XContentBuilderString GC_COLLECTORS = new XContentBuilderString("gc_collectors");
static final XContentBuilderString MEMORY_POOLS = new XContentBuilderString("memory_pools");
+ static final XContentBuilderString USING_COMPRESSED_OOPS = new XContentBuilderString("using_compressed_ordinary_object_pointers");
}
public static JvmInfo readJvmInfo(StreamInput in) throws IOException {
@@ -337,6 +375,7 @@ public class JvmInfo implements Streamable, ToXContent {
mem.readFrom(in);
gcCollectors = in.readStringArray();
memoryPools = in.readStringArray();
+ useCompressedOops = in.readString();
}
@Override
@@ -361,6 +400,7 @@ public class JvmInfo implements Streamable, ToXContent {
mem.writeTo(out);
out.writeStringArray(gcCollectors);
out.writeStringArray(memoryPools);
+ out.writeString(useCompressedOops);
}
public static class Mem implements Streamable {
diff --git a/core/src/main/java/org/elasticsearch/client/transport/ClientTransportModule.java b/core/src/main/java/org/elasticsearch/monitor/os/DummyOsInfo.java
index 895b3d844f..599755e78a 100644
--- a/core/src/main/java/org/elasticsearch/client/transport/ClientTransportModule.java
+++ b/core/src/main/java/org/elasticsearch/monitor/os/DummyOsInfo.java
@@ -17,21 +17,18 @@
* under the License.
*/
-package org.elasticsearch.client.transport;
+package org.elasticsearch.monitor.os;
-import org.elasticsearch.client.support.Headers;
-import org.elasticsearch.client.transport.support.TransportProxyClient;
-import org.elasticsearch.common.inject.AbstractModule;
+public class DummyOsInfo extends OsInfo {
-/**
- *
- */
-public class ClientTransportModule extends AbstractModule {
-
- @Override
- protected void configure() {
- bind(Headers.class).asEagerSingleton();
- bind(TransportProxyClient.class).asEagerSingleton();
- bind(TransportClientNodesService.class).asEagerSingleton();
+ DummyOsInfo() {
+ refreshInterval = 0;
+ availableProcessors = 0;
+ allocatedProcessors = 0;
+ name = "dummy_name";
+ arch = "dummy_arch";
+ version = "dummy_version";
}
+
+ public static final DummyOsInfo INSTANCE = new DummyOsInfo();
}
diff --git a/core/src/main/java/org/elasticsearch/monitor/os/OsInfo.java b/core/src/main/java/org/elasticsearch/monitor/os/OsInfo.java
index f34cd51a14..d94447221c 100644
--- a/core/src/main/java/org/elasticsearch/monitor/os/OsInfo.java
+++ b/core/src/main/java/org/elasticsearch/monitor/os/OsInfo.java
@@ -108,6 +108,9 @@ public class OsInfo implements Streamable, ToXContent {
refreshInterval = in.readLong();
availableProcessors = in.readInt();
allocatedProcessors = in.readInt();
+ name = in.readOptionalString();
+ arch = in.readOptionalString();
+ version = in.readOptionalString();
}
@Override
@@ -115,5 +118,8 @@ public class OsInfo implements Streamable, ToXContent {
out.writeLong(refreshInterval);
out.writeInt(availableProcessors);
out.writeInt(allocatedProcessors);
+ out.writeOptionalString(name);
+ out.writeOptionalString(arch);
+ out.writeOptionalString(version);
}
}
diff --git a/core/src/main/java/org/elasticsearch/node/Node.java b/core/src/main/java/org/elasticsearch/node/Node.java
index f4bc34a91e..c964e79587 100644
--- a/core/src/main/java/org/elasticsearch/node/Node.java
+++ b/core/src/main/java/org/elasticsearch/node/Node.java
@@ -23,7 +23,6 @@ import org.elasticsearch.Build;
import org.elasticsearch.ElasticsearchException;
import org.elasticsearch.Version;
import org.elasticsearch.action.ActionModule;
-import org.elasticsearch.bootstrap.Elasticsearch;
import org.elasticsearch.cache.recycler.PageCacheRecycler;
import org.elasticsearch.client.Client;
import org.elasticsearch.client.node.NodeClientModule;
@@ -42,11 +41,15 @@ import org.elasticsearch.common.lease.Releasable;
import org.elasticsearch.common.lease.Releasables;
import org.elasticsearch.common.logging.ESLogger;
import org.elasticsearch.common.logging.Loggers;
+import org.elasticsearch.common.network.NetworkAddress;
import org.elasticsearch.common.network.NetworkModule;
import org.elasticsearch.common.network.NetworkService;
+import org.elasticsearch.common.settings.ClusterSettings;
import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.common.settings.SettingsFilter;
import org.elasticsearch.common.settings.SettingsModule;
+import org.elasticsearch.common.transport.BoundTransportAddress;
+import org.elasticsearch.common.transport.TransportAddress;
import org.elasticsearch.discovery.Discovery;
import org.elasticsearch.discovery.DiscoveryModule;
import org.elasticsearch.discovery.DiscoveryService;
@@ -58,7 +61,7 @@ import org.elasticsearch.gateway.GatewayAllocator;
import org.elasticsearch.gateway.GatewayModule;
import org.elasticsearch.gateway.GatewayService;
import org.elasticsearch.http.HttpServer;
-import org.elasticsearch.http.HttpServerModule;
+import org.elasticsearch.http.HttpServerTransport;
import org.elasticsearch.indices.IndicesModule;
import org.elasticsearch.indices.IndicesService;
import org.elasticsearch.indices.analysis.AnalysisModule;
@@ -73,7 +76,6 @@ import org.elasticsearch.indices.ttl.IndicesTTLService;
import org.elasticsearch.monitor.MonitorService;
import org.elasticsearch.monitor.jvm.JvmInfo;
import org.elasticsearch.node.internal.InternalSettingsPreparer;
-import org.elasticsearch.node.settings.NodeSettingsService;
import org.elasticsearch.percolator.PercolatorModule;
import org.elasticsearch.percolator.PercolatorService;
import org.elasticsearch.plugins.Plugin;
@@ -81,7 +83,6 @@ import org.elasticsearch.plugins.PluginsModule;
import org.elasticsearch.plugins.PluginsService;
import org.elasticsearch.repositories.RepositoriesModule;
import org.elasticsearch.rest.RestController;
-import org.elasticsearch.rest.RestModule;
import org.elasticsearch.script.ScriptModule;
import org.elasticsearch.script.ScriptService;
import org.elasticsearch.search.SearchModule;
@@ -90,14 +91,21 @@ import org.elasticsearch.snapshots.SnapshotShardsService;
import org.elasticsearch.snapshots.SnapshotsService;
import org.elasticsearch.threadpool.ThreadPool;
import org.elasticsearch.threadpool.ThreadPoolModule;
-import org.elasticsearch.transport.TransportModule;
import org.elasticsearch.transport.TransportService;
import org.elasticsearch.tribe.TribeModule;
import org.elasticsearch.tribe.TribeService;
import org.elasticsearch.watcher.ResourceWatcherModule;
import org.elasticsearch.watcher.ResourceWatcherService;
+import java.io.BufferedWriter;
import java.io.IOException;
+import java.net.Inet6Address;
+import java.net.InetAddress;
+import java.net.InetSocketAddress;
+import java.nio.charset.Charset;
+import java.nio.file.Files;
+import java.nio.file.Path;
+import java.nio.file.StandardCopyOption;
import java.util.Arrays;
import java.util.Collection;
import java.util.Collections;
@@ -156,7 +164,6 @@ public class Node implements Releasable {
throw new IllegalStateException("Failed to created node environment", ex);
}
final NetworkService networkService = new NetworkService(settings);
- final NodeSettingsService nodeSettingsService = new NodeSettingsService(settings);
final SettingsFilter settingsFilter = new SettingsFilter(settings);
final ThreadPool threadPool = new ThreadPool(settings);
boolean success = false;
@@ -171,20 +178,15 @@ public class Node implements Releasable {
}
modules.add(new PluginsModule(pluginsService));
modules.add(new SettingsModule(this.settings, settingsFilter));
- modules.add(new NodeModule(this, nodeSettingsService, monitorService));
- modules.add(new NetworkModule(networkService));
- modules.add(new ScriptModule(this.settings));
modules.add(new EnvironmentModule(environment));
+ modules.add(new NodeModule(this, monitorService));
+ modules.add(new NetworkModule(networkService, settings, false));
+ modules.add(new ScriptModule(this.settings));
modules.add(new NodeEnvironmentModule(nodeEnvironment));
modules.add(new ClusterNameModule(this.settings));
modules.add(new ThreadPoolModule(threadPool));
modules.add(new DiscoveryModule(this.settings));
modules.add(new ClusterModule(this.settings));
- modules.add(new RestModule(this.settings));
- modules.add(new TransportModule(settings));
- if (settings.getAsBoolean(HTTP_ENABLED, true)) {
- modules.add(new HttpServerModule(settings));
- }
modules.add(new IndicesModule());
modules.add(new SearchModule());
modules.add(new ActionModule(false));
@@ -201,7 +203,7 @@ public class Node implements Releasable {
injector = modules.createInjector();
client = injector.getInstance(Client.class);
- threadPool.setNodeSettingsService(injector.getInstance(NodeSettingsService.class));
+ threadPool.setClusterSettings(injector.getInstance(ClusterSettings.class));
success = true;
} catch (IOException ex) {
throw new ElasticsearchException("failed to bind service", ex);
@@ -274,6 +276,15 @@ public class Node implements Releasable {
injector.getInstance(ResourceWatcherService.class).start();
injector.getInstance(TribeService.class).start();
+ if (System.getProperty("es.tests.portsfile", "false").equals("true")) {
+ if (settings.getAsBoolean("http.enabled", true)) {
+ HttpServerTransport http = injector.getInstance(HttpServerTransport.class);
+ writePortsFile("http", http.boundAddress());
+ }
+ TransportService transport = injector.getInstance(TransportService.class);
+ writePortsFile("transport", transport.boundAddress());
+ }
+
logger.info("started");
return this;
@@ -425,4 +436,27 @@ public class Node implements Releasable {
public Injector injector() {
return this.injector;
}
+
+ /** Writes a file to the logs dir containing the ports for the given transport type */
+ private void writePortsFile(String type, BoundTransportAddress boundAddress) {
+ Path tmpPortsFile = environment.logsFile().resolve(type + ".ports.tmp");
+ try (BufferedWriter writer = Files.newBufferedWriter(tmpPortsFile, Charset.forName("UTF-8"))) {
+ for (TransportAddress address : boundAddress.boundAddresses()) {
+ InetAddress inetAddress = InetAddress.getByName(address.getAddress());
+ if (inetAddress instanceof Inet6Address && inetAddress.isLinkLocalAddress()) {
+ // no link local, just causes problems
+ continue;
+ }
+ writer.write(NetworkAddress.formatAddress(new InetSocketAddress(inetAddress, address.getPort())) + "\n");
+ }
+ } catch (IOException e) {
+ throw new RuntimeException("Failed to write ports file", e);
+ }
+ Path portsFile = environment.logsFile().resolve(type + ".ports");
+ try {
+ Files.move(tmpPortsFile, portsFile, StandardCopyOption.ATOMIC_MOVE);
+ } catch (IOException e) {
+ throw new RuntimeException("Failed to rename ports file", e);
+ }
+ }
}
diff --git a/core/src/main/java/org/elasticsearch/node/NodeModule.java b/core/src/main/java/org/elasticsearch/node/NodeModule.java
index 3641c32503..aa52d38934 100644
--- a/core/src/main/java/org/elasticsearch/node/NodeModule.java
+++ b/core/src/main/java/org/elasticsearch/node/NodeModule.java
@@ -23,9 +23,7 @@ import org.elasticsearch.cache.recycler.PageCacheRecycler;
import org.elasticsearch.common.inject.AbstractModule;
import org.elasticsearch.common.util.BigArrays;
import org.elasticsearch.monitor.MonitorService;
-import org.elasticsearch.node.Node;
import org.elasticsearch.node.service.NodeService;
-import org.elasticsearch.node.settings.NodeSettingsService;
/**
*
@@ -33,16 +31,14 @@ import org.elasticsearch.node.settings.NodeSettingsService;
public class NodeModule extends AbstractModule {
private final Node node;
- private final NodeSettingsService nodeSettingsService;
private final MonitorService monitorService;
// pkg private so tests can mock
Class<? extends PageCacheRecycler> pageCacheRecyclerImpl = PageCacheRecycler.class;
Class<? extends BigArrays> bigArraysImpl = BigArrays.class;
- public NodeModule(Node node, NodeSettingsService nodeSettingsService, MonitorService monitorService) {
+ public NodeModule(Node node, MonitorService monitorService) {
this.node = node;
- this.nodeSettingsService = nodeSettingsService;
this.monitorService = monitorService;
}
@@ -60,7 +56,6 @@ public class NodeModule extends AbstractModule {
}
bind(Node.class).toInstance(node);
- bind(NodeSettingsService.class).toInstance(nodeSettingsService);
bind(MonitorService.class).toInstance(monitorService);
bind(NodeService.class).asEagerSingleton();
}
diff --git a/core/src/main/java/org/elasticsearch/node/settings/NodeSettingsService.java b/core/src/main/java/org/elasticsearch/node/settings/NodeSettingsService.java
deleted file mode 100644
index dbe6a33172..0000000000
--- a/core/src/main/java/org/elasticsearch/node/settings/NodeSettingsService.java
+++ /dev/null
@@ -1,122 +0,0 @@
-/*
- * Licensed to Elasticsearch under one or more contributor
- * license agreements. See the NOTICE file distributed with
- * this work for additional information regarding copyright
- * ownership. Elasticsearch licenses this file to you under
- * the Apache License, Version 2.0 (the "License"); you may
- * not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing,
- * software distributed under the License is distributed on an
- * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
- * KIND, either express or implied. See the License for the
- * specific language governing permissions and limitations
- * under the License.
- */
-
-package org.elasticsearch.node.settings;
-
-import org.elasticsearch.cluster.ClusterChangedEvent;
-import org.elasticsearch.cluster.ClusterService;
-import org.elasticsearch.cluster.ClusterStateListener;
-import org.elasticsearch.common.component.AbstractComponent;
-import org.elasticsearch.common.inject.Inject;
-import org.elasticsearch.common.logging.ESLoggerFactory;
-import org.elasticsearch.common.settings.Settings;
-
-import java.util.Map;
-import java.util.concurrent.CopyOnWriteArrayList;
-
-/**
- * A service that allows to register for node settings change that can come from cluster
- * events holding new settings.
- */
-public class NodeSettingsService extends AbstractComponent implements ClusterStateListener {
-
- private static volatile Settings globalSettings = Settings.Builder.EMPTY_SETTINGS;
-
- /**
- * Returns the global (static) settings last updated by a node. Note, if you have multiple
- * nodes on the same JVM, it will just return the latest one set...
- */
- public static Settings getGlobalSettings() {
- return globalSettings;
- }
-
- private volatile Settings lastSettingsApplied;
-
- private final CopyOnWriteArrayList<Listener> listeners = new CopyOnWriteArrayList<>();
-
- @Inject
- public NodeSettingsService(Settings settings) {
- super(settings);
- globalSettings = settings;
- }
-
- // inject it as a member, so we won't get into possible cyclic problems
- public void setClusterService(ClusterService clusterService) {
- clusterService.add(this);
- }
-
- @Override
- public void clusterChanged(ClusterChangedEvent event) {
- // nothing to do until we actually recover from the gateway or any other block indicates we need to disable persistency
- if (event.state().blocks().disableStatePersistence()) {
- return;
- }
-
- if (!event.metaDataChanged()) {
- // nothing changed in the metadata, no need to check
- return;
- }
-
- if (lastSettingsApplied != null && event.state().metaData().settings().equals(lastSettingsApplied)) {
- // nothing changed in the settings, ignore
- return;
- }
-
- for (Listener listener : listeners) {
- try {
- listener.onRefreshSettings(event.state().metaData().settings());
- } catch (Exception e) {
- logger.warn("failed to refresh settings for [{}]", e, listener);
- }
- }
-
- try {
- for (Map.Entry<String, String> entry : event.state().metaData().settings().getAsMap().entrySet()) {
- if (entry.getKey().startsWith("logger.")) {
- String component = entry.getKey().substring("logger.".length());
- if ("_root".equals(component)) {
- ESLoggerFactory.getRootLogger().setLevel(entry.getValue());
- } else {
- ESLoggerFactory.getLogger(component).setLevel(entry.getValue());
- }
- }
- }
- } catch (Exception e) {
- logger.warn("failed to refresh settings for [{}]", e, "logger");
- }
-
- lastSettingsApplied = event.state().metaData().settings();
- globalSettings = lastSettingsApplied;
- }
-
- /**
- * Only settings registered in {@link org.elasticsearch.cluster.ClusterModule} can be changed dynamically.
- */
- public void addListener(Listener listener) {
- this.listeners.add(listener);
- }
-
- public void removeListener(Listener listener) {
- this.listeners.remove(listener);
- }
-
- public interface Listener {
- void onRefreshSettings(Settings settings);
- }
-}
diff --git a/core/src/main/java/org/elasticsearch/percolator/PercolateContext.java b/core/src/main/java/org/elasticsearch/percolator/PercolateContext.java
index 70abaaaff3..8df956f2ce 100644
--- a/core/src/main/java/org/elasticsearch/percolator/PercolateContext.java
+++ b/core/src/main/java/org/elasticsearch/percolator/PercolateContext.java
@@ -38,12 +38,11 @@ import org.elasticsearch.common.HasHeaders;
import org.elasticsearch.common.ParseFieldMatcher;
import org.elasticsearch.common.collect.ImmutableOpenMap;
import org.elasticsearch.common.lease.Releasables;
-import org.elasticsearch.common.text.StringText;
+import org.elasticsearch.common.text.Text;
import org.elasticsearch.common.util.BigArrays;
import org.elasticsearch.index.IndexService;
import org.elasticsearch.index.analysis.AnalysisService;
import org.elasticsearch.index.cache.bitset.BitsetFilterCache;
-import org.elasticsearch.index.cache.query.QueryCache;
import org.elasticsearch.index.engine.Engine;
import org.elasticsearch.index.fielddata.IndexFieldDataService;
import org.elasticsearch.index.mapper.MappedFieldType;
@@ -74,6 +73,8 @@ import org.elasticsearch.search.internal.SearchContext;
import org.elasticsearch.search.internal.ShardSearchRequest;
import org.elasticsearch.search.lookup.LeafSearchLookup;
import org.elasticsearch.search.lookup.SearchLookup;
+import org.elasticsearch.search.profile.Profiler;
+import org.elasticsearch.search.profile.Profilers;
import org.elasticsearch.search.query.QuerySearchResult;
import org.elasticsearch.search.rescore.RescoreSearchContext;
import org.elasticsearch.search.suggest.SuggestionSearchContext;
@@ -139,7 +140,7 @@ public class PercolateContext extends SearchContext {
this.bigArrays = bigArrays.withCircuitBreaking();
this.querySearchResult = new QuerySearchResult(0, searchShardTarget);
this.engineSearcher = indexShard.acquireSearcher("percolate");
- this.searcher = new ContextIndexSearcher(this, engineSearcher);
+ this.searcher = new ContextIndexSearcher(engineSearcher, indexService.cache().query(), indexShard.getQueryCachingPolicy());
this.scriptService = scriptService;
this.numberOfShards = request.getNumberOfShards();
this.aliasFilter = aliasFilter;
@@ -164,7 +165,7 @@ public class PercolateContext extends SearchContext {
fields.put(field.name(), new InternalSearchHitField(field.name(), Collections.emptyList()));
}
hitContext().reset(
- new InternalSearchHit(0, "unknown", new StringText(parsedDocument.type()), fields),
+ new InternalSearchHit(0, "unknown", new Text(parsedDocument.type()), fields),
atomicReaderContext, 0, docSearcher.searcher()
);
}
@@ -748,5 +749,7 @@ public class PercolateContext extends SearchContext {
}
@Override
- public QueryCache getQueryCache() { return indexService.cache().query();}
+ public Profilers getProfilers() {
+ throw new UnsupportedOperationException();
+ }
}
diff --git a/core/src/main/java/org/elasticsearch/percolator/PercolatorService.java b/core/src/main/java/org/elasticsearch/percolator/PercolatorService.java
index fa7b47766a..eb33f3832b 100644
--- a/core/src/main/java/org/elasticsearch/percolator/PercolatorService.java
+++ b/core/src/main/java/org/elasticsearch/percolator/PercolatorService.java
@@ -52,8 +52,6 @@ import org.elasticsearch.common.io.stream.BytesStreamOutput;
import org.elasticsearch.common.lucene.Lucene;
import org.elasticsearch.common.lucene.search.Queries;
import org.elasticsearch.common.settings.Settings;
-import org.elasticsearch.common.text.BytesText;
-import org.elasticsearch.common.text.StringText;
import org.elasticsearch.common.text.Text;
import org.elasticsearch.common.unit.ByteSizeUnit;
import org.elasticsearch.common.unit.ByteSizeValue;
@@ -533,10 +531,10 @@ public class PercolatorService extends AbstractComponent {
List<PercolateResponse.Match> finalMatches = new ArrayList<>(requestedSize == 0 ? numMatches : requestedSize);
outer:
for (PercolateShardResponse response : shardResults) {
- Text index = new StringText(response.getIndex());
+ Text index = new Text(response.getIndex());
for (int i = 0; i < response.matches().length; i++) {
float score = response.scores().length == 0 ? NO_SCORE : response.scores()[i];
- Text match = new BytesText(new BytesArray(response.matches()[i]));
+ Text match = new Text(new BytesArray(response.matches()[i]));
Map<String, HighlightField> hl = response.hls().isEmpty() ? null : response.hls().get(i);
finalMatches.add(new PercolateResponse.Match(index, match, score, hl));
if (requestedSize != 0 && finalMatches.size() == requestedSize) {
@@ -686,10 +684,10 @@ public class PercolatorService extends AbstractComponent {
List<PercolateResponse.Match> finalMatches = new ArrayList<>(requestedSize);
if (nonEmptyResponses == 1) {
PercolateShardResponse response = shardResults.get(firstNonEmptyIndex);
- Text index = new StringText(response.getIndex());
+ Text index = new Text(response.getIndex());
for (int i = 0; i < response.matches().length; i++) {
float score = response.scores().length == 0 ? Float.NaN : response.scores()[i];
- Text match = new BytesText(new BytesArray(response.matches()[i]));
+ Text match = new Text(new BytesArray(response.matches()[i]));
if (!response.hls().isEmpty()) {
Map<String, HighlightField> hl = response.hls().get(i);
finalMatches.add(new PercolateResponse.Match(index, match, score, hl));
@@ -728,8 +726,8 @@ public class PercolatorService extends AbstractComponent {
slots[requestIndex]++;
PercolateShardResponse shardResponse = shardResults.get(requestIndex);
- Text index = new StringText(shardResponse.getIndex());
- Text match = new BytesText(new BytesArray(shardResponse.matches()[itemIndex]));
+ Text index = new Text(shardResponse.getIndex());
+ Text match = new Text(new BytesArray(shardResponse.matches()[itemIndex]));
float score = shardResponse.scores()[itemIndex];
if (!shardResponse.hls().isEmpty()) {
Map<String, HighlightField> hl = shardResponse.hls().get(itemIndex);
diff --git a/core/src/main/java/org/elasticsearch/cluster/settings/ClusterDynamicSettings.java b/core/src/main/java/org/elasticsearch/plugins/DummyPluginInfo.java
index b537c448be..a57a96c631 100644
--- a/core/src/main/java/org/elasticsearch/cluster/settings/ClusterDynamicSettings.java
+++ b/core/src/main/java/org/elasticsearch/plugins/DummyPluginInfo.java
@@ -16,23 +16,13 @@
* specific language governing permissions and limitations
* under the License.
*/
+package org.elasticsearch.plugins;
-package org.elasticsearch.cluster.settings;
+public class DummyPluginInfo extends PluginInfo {
-import org.elasticsearch.common.inject.BindingAnnotation;
+ private DummyPluginInfo(String name, String description, boolean site, String version, boolean jvm, String classname, boolean isolated) {
+ super(name, description, site, version, jvm, classname, isolated);
+ }
-import java.lang.annotation.Documented;
-import java.lang.annotation.Retention;
-import java.lang.annotation.Target;
-
-import static java.lang.annotation.ElementType.FIELD;
-import static java.lang.annotation.ElementType.PARAMETER;
-import static java.lang.annotation.RetentionPolicy.RUNTIME;
-
-
-@BindingAnnotation
-@Target({FIELD, PARAMETER})
-@Retention(RUNTIME)
-@Documented
-public @interface ClusterDynamicSettings {
-} \ No newline at end of file
+ public static final DummyPluginInfo INSTANCE = new DummyPluginInfo("dummy_plugin_name", "dummy plugin description", true, "dummy_plugin_version", true, "DummyPluginName", true);
+}
diff --git a/core/src/main/java/org/elasticsearch/plugins/PluginManager.java b/core/src/main/java/org/elasticsearch/plugins/PluginManager.java
index 6600bf7035..1ebe7813d3 100644
--- a/core/src/main/java/org/elasticsearch/plugins/PluginManager.java
+++ b/core/src/main/java/org/elasticsearch/plugins/PluginManager.java
@@ -22,6 +22,7 @@ package org.elasticsearch.plugins;
import org.apache.lucene.util.IOUtils;
import org.elasticsearch.*;
import org.elasticsearch.bootstrap.JarHell;
+import org.elasticsearch.common.Randomness;
import org.elasticsearch.common.Strings;
import org.elasticsearch.common.cli.Terminal;
import org.elasticsearch.common.collect.Tuple;
@@ -66,7 +67,7 @@ public class PluginManager {
"plugin",
"plugin.bat",
"service.bat"));
-
+
static final Set<String> MODULES = unmodifiableSet(newHashSet(
"lang-expression",
"lang-groovy"));
@@ -89,6 +90,7 @@ public class PluginManager {
"mapper-murmur3",
"mapper-size",
"repository-azure",
+ "repository-hdfs",
"repository-s3",
"store-smb"));
@@ -124,7 +126,7 @@ public class PluginManager {
checkForForbiddenName(pluginHandle.name);
} else {
// if we have no name but url, use temporary name that will be overwritten later
- pluginHandle = new PluginHandle("temp_name" + new Random().nextInt(), null, null);
+ pluginHandle = new PluginHandle("temp_name" + Randomness.get().nextInt(), null, null);
}
Path pluginFile = download(pluginHandle, terminal);
@@ -224,7 +226,7 @@ public class PluginManager {
PluginInfo info = PluginInfo.readFromProperties(root);
terminal.println(VERBOSE, "%s", info);
- // don't let luser install plugin as a module...
+ // don't let luser install plugin as a module...
// they might be unavoidably in maven central and are packaged up the same way)
if (MODULES.contains(info.getName())) {
throw new IOException("plugin '" + info.getName() + "' cannot be installed like this, it is a system module");
diff --git a/core/src/main/java/org/elasticsearch/rest/action/RestActionModule.java b/core/src/main/java/org/elasticsearch/rest/action/RestActionModule.java
deleted file mode 100644
index f0e4d10d7c..0000000000
--- a/core/src/main/java/org/elasticsearch/rest/action/RestActionModule.java
+++ /dev/null
@@ -1,273 +0,0 @@
-/*
- * Licensed to Elasticsearch under one or more contributor
- * license agreements. See the NOTICE file distributed with
- * this work for additional information regarding copyright
- * ownership. Elasticsearch licenses this file to you under
- * the Apache License, Version 2.0 (the "License"); you may
- * not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing,
- * software distributed under the License is distributed on an
- * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
- * KIND, either express or implied. See the License for the
- * specific language governing permissions and limitations
- * under the License.
- */
-
-package org.elasticsearch.rest.action;
-
-import org.elasticsearch.common.inject.AbstractModule;
-import org.elasticsearch.common.inject.multibindings.Multibinder;
-import org.elasticsearch.rest.BaseRestHandler;
-import org.elasticsearch.rest.action.admin.cluster.health.RestClusterHealthAction;
-import org.elasticsearch.rest.action.admin.cluster.node.hotthreads.RestNodesHotThreadsAction;
-import org.elasticsearch.rest.action.admin.cluster.node.info.RestNodesInfoAction;
-import org.elasticsearch.rest.action.admin.cluster.node.stats.RestNodesStatsAction;
-import org.elasticsearch.rest.action.admin.cluster.repositories.delete.RestDeleteRepositoryAction;
-import org.elasticsearch.rest.action.admin.cluster.repositories.get.RestGetRepositoriesAction;
-import org.elasticsearch.rest.action.admin.cluster.repositories.put.RestPutRepositoryAction;
-import org.elasticsearch.rest.action.admin.cluster.repositories.verify.RestVerifyRepositoryAction;
-import org.elasticsearch.rest.action.admin.cluster.reroute.RestClusterRerouteAction;
-import org.elasticsearch.rest.action.admin.cluster.settings.RestClusterGetSettingsAction;
-import org.elasticsearch.rest.action.admin.cluster.settings.RestClusterUpdateSettingsAction;
-import org.elasticsearch.rest.action.admin.cluster.shards.RestClusterSearchShardsAction;
-import org.elasticsearch.rest.action.admin.cluster.snapshots.create.RestCreateSnapshotAction;
-import org.elasticsearch.rest.action.admin.cluster.snapshots.delete.RestDeleteSnapshotAction;
-import org.elasticsearch.rest.action.admin.cluster.snapshots.get.RestGetSnapshotsAction;
-import org.elasticsearch.rest.action.admin.cluster.snapshots.restore.RestRestoreSnapshotAction;
-import org.elasticsearch.rest.action.admin.cluster.snapshots.status.RestSnapshotsStatusAction;
-import org.elasticsearch.rest.action.admin.cluster.state.RestClusterStateAction;
-import org.elasticsearch.rest.action.admin.cluster.stats.RestClusterStatsAction;
-import org.elasticsearch.rest.action.admin.cluster.tasks.RestPendingClusterTasksAction;
-import org.elasticsearch.rest.action.admin.indices.alias.RestIndicesAliasesAction;
-import org.elasticsearch.rest.action.admin.indices.alias.delete.RestIndexDeleteAliasesAction;
-import org.elasticsearch.rest.action.admin.indices.alias.get.RestGetAliasesAction;
-import org.elasticsearch.rest.action.admin.indices.alias.get.RestGetIndicesAliasesAction;
-import org.elasticsearch.rest.action.admin.indices.alias.head.RestAliasesExistAction;
-import org.elasticsearch.rest.action.admin.indices.alias.put.RestIndexPutAliasAction;
-import org.elasticsearch.rest.action.admin.indices.analyze.RestAnalyzeAction;
-import org.elasticsearch.rest.action.admin.indices.cache.clear.RestClearIndicesCacheAction;
-import org.elasticsearch.rest.action.admin.indices.close.RestCloseIndexAction;
-import org.elasticsearch.rest.action.admin.indices.create.RestCreateIndexAction;
-import org.elasticsearch.rest.action.admin.indices.delete.RestDeleteIndexAction;
-import org.elasticsearch.rest.action.admin.indices.exists.indices.RestIndicesExistsAction;
-import org.elasticsearch.rest.action.admin.indices.exists.types.RestTypesExistsAction;
-import org.elasticsearch.rest.action.admin.indices.flush.RestFlushAction;
-import org.elasticsearch.rest.action.admin.indices.flush.RestSyncedFlushAction;
-import org.elasticsearch.rest.action.admin.indices.forcemerge.RestForceMergeAction;
-import org.elasticsearch.rest.action.admin.indices.get.RestGetIndicesAction;
-import org.elasticsearch.rest.action.admin.indices.mapping.get.RestGetFieldMappingAction;
-import org.elasticsearch.rest.action.admin.indices.mapping.get.RestGetMappingAction;
-import org.elasticsearch.rest.action.admin.indices.mapping.put.RestPutMappingAction;
-import org.elasticsearch.rest.action.admin.indices.open.RestOpenIndexAction;
-import org.elasticsearch.rest.action.admin.indices.recovery.RestRecoveryAction;
-import org.elasticsearch.rest.action.admin.indices.refresh.RestRefreshAction;
-import org.elasticsearch.rest.action.admin.indices.segments.RestIndicesSegmentsAction;
-import org.elasticsearch.rest.action.admin.indices.settings.RestGetSettingsAction;
-import org.elasticsearch.rest.action.admin.indices.settings.RestUpdateSettingsAction;
-import org.elasticsearch.rest.action.admin.indices.shards.RestIndicesShardStoresAction;
-import org.elasticsearch.rest.action.admin.indices.stats.RestIndicesStatsAction;
-import org.elasticsearch.rest.action.admin.indices.template.delete.RestDeleteIndexTemplateAction;
-import org.elasticsearch.rest.action.admin.indices.template.get.RestGetIndexTemplateAction;
-import org.elasticsearch.rest.action.admin.indices.template.head.RestHeadIndexTemplateAction;
-import org.elasticsearch.rest.action.admin.indices.template.put.RestPutIndexTemplateAction;
-import org.elasticsearch.rest.action.admin.indices.upgrade.RestUpgradeAction;
-import org.elasticsearch.rest.action.admin.indices.validate.query.RestValidateQueryAction;
-import org.elasticsearch.rest.action.admin.indices.validate.template.RestRenderSearchTemplateAction;
-import org.elasticsearch.rest.action.admin.indices.warmer.delete.RestDeleteWarmerAction;
-import org.elasticsearch.rest.action.admin.indices.warmer.get.RestGetWarmerAction;
-import org.elasticsearch.rest.action.admin.indices.warmer.put.RestPutWarmerAction;
-import org.elasticsearch.rest.action.bulk.RestBulkAction;
-import org.elasticsearch.rest.action.cat.AbstractCatAction;
-import org.elasticsearch.rest.action.cat.RestAliasAction;
-import org.elasticsearch.rest.action.cat.RestAllocationAction;
-import org.elasticsearch.rest.action.cat.RestCatAction;
-import org.elasticsearch.rest.action.cat.RestFielddataAction;
-import org.elasticsearch.rest.action.cat.RestHealthAction;
-import org.elasticsearch.rest.action.cat.RestIndicesAction;
-import org.elasticsearch.rest.action.cat.RestMasterAction;
-import org.elasticsearch.rest.action.cat.RestNodeAttrsAction;
-import org.elasticsearch.rest.action.cat.RestNodesAction;
-import org.elasticsearch.rest.action.cat.RestPluginsAction;
-import org.elasticsearch.rest.action.cat.RestRepositoriesAction;
-import org.elasticsearch.rest.action.cat.RestSegmentsAction;
-import org.elasticsearch.rest.action.cat.RestShardsAction;
-import org.elasticsearch.rest.action.cat.RestSnapshotAction;
-import org.elasticsearch.rest.action.cat.RestThreadPoolAction;
-import org.elasticsearch.rest.action.delete.RestDeleteAction;
-import org.elasticsearch.rest.action.explain.RestExplainAction;
-import org.elasticsearch.rest.action.fieldstats.RestFieldStatsAction;
-import org.elasticsearch.rest.action.get.RestGetAction;
-import org.elasticsearch.rest.action.get.RestGetSourceAction;
-import org.elasticsearch.rest.action.get.RestHeadAction;
-import org.elasticsearch.rest.action.get.RestMultiGetAction;
-import org.elasticsearch.rest.action.index.RestIndexAction;
-import org.elasticsearch.rest.action.main.RestMainAction;
-import org.elasticsearch.rest.action.percolate.RestMultiPercolateAction;
-import org.elasticsearch.rest.action.percolate.RestPercolateAction;
-import org.elasticsearch.rest.action.script.RestDeleteIndexedScriptAction;
-import org.elasticsearch.rest.action.script.RestGetIndexedScriptAction;
-import org.elasticsearch.rest.action.script.RestPutIndexedScriptAction;
-import org.elasticsearch.rest.action.search.RestClearScrollAction;
-import org.elasticsearch.rest.action.search.RestMultiSearchAction;
-import org.elasticsearch.rest.action.search.RestSearchAction;
-import org.elasticsearch.rest.action.search.RestSearchScrollAction;
-import org.elasticsearch.rest.action.suggest.RestSuggestAction;
-import org.elasticsearch.rest.action.template.RestDeleteSearchTemplateAction;
-import org.elasticsearch.rest.action.template.RestGetSearchTemplateAction;
-import org.elasticsearch.rest.action.template.RestPutSearchTemplateAction;
-import org.elasticsearch.rest.action.termvectors.RestMultiTermVectorsAction;
-import org.elasticsearch.rest.action.termvectors.RestTermVectorsAction;
-import org.elasticsearch.rest.action.update.RestUpdateAction;
-
-import java.util.ArrayList;
-import java.util.List;
-
-/**
- *
- */
-public class RestActionModule extends AbstractModule {
- private List<Class<? extends BaseRestHandler>> restPluginsActions = new ArrayList<>();
-
- public RestActionModule(List<Class<? extends BaseRestHandler>> restPluginsActions) {
- this.restPluginsActions = restPluginsActions;
- }
-
- @Override
- protected void configure() {
- for (Class<? extends BaseRestHandler> restAction : restPluginsActions) {
- bind(restAction).asEagerSingleton();
- }
-
- bind(RestMainAction.class).asEagerSingleton();
-
- bind(RestNodesInfoAction.class).asEagerSingleton();
- bind(RestNodesStatsAction.class).asEagerSingleton();
- bind(RestNodesHotThreadsAction.class).asEagerSingleton();
- bind(RestClusterStatsAction.class).asEagerSingleton();
- bind(RestClusterStateAction.class).asEagerSingleton();
- bind(RestClusterHealthAction.class).asEagerSingleton();
- bind(RestClusterUpdateSettingsAction.class).asEagerSingleton();
- bind(RestClusterGetSettingsAction.class).asEagerSingleton();
- bind(RestClusterRerouteAction.class).asEagerSingleton();
- bind(RestClusterSearchShardsAction.class).asEagerSingleton();
- bind(RestPendingClusterTasksAction.class).asEagerSingleton();
- bind(RestPutRepositoryAction.class).asEagerSingleton();
- bind(RestGetRepositoriesAction.class).asEagerSingleton();
- bind(RestDeleteRepositoryAction.class).asEagerSingleton();
- bind(RestVerifyRepositoryAction.class).asEagerSingleton();
- bind(RestGetSnapshotsAction.class).asEagerSingleton();
- bind(RestCreateSnapshotAction.class).asEagerSingleton();
- bind(RestRestoreSnapshotAction.class).asEagerSingleton();
- bind(RestDeleteSnapshotAction.class).asEagerSingleton();
- bind(RestSnapshotsStatusAction.class).asEagerSingleton();
-
- bind(RestIndicesExistsAction.class).asEagerSingleton();
- bind(RestTypesExistsAction.class).asEagerSingleton();
- bind(RestGetIndicesAction.class).asEagerSingleton();
- bind(RestIndicesStatsAction.class).asEagerSingleton();
- bind(RestIndicesSegmentsAction.class).asEagerSingleton();
- bind(RestIndicesShardStoresAction.class).asEagerSingleton();
- bind(RestGetAliasesAction.class).asEagerSingleton();
- bind(RestAliasesExistAction.class).asEagerSingleton();
- bind(RestIndexDeleteAliasesAction.class).asEagerSingleton();
- bind(RestIndexPutAliasAction.class).asEagerSingleton();
- bind(RestIndicesAliasesAction.class).asEagerSingleton();
- bind(RestGetIndicesAliasesAction.class).asEagerSingleton();
- bind(RestCreateIndexAction.class).asEagerSingleton();
- bind(RestDeleteIndexAction.class).asEagerSingleton();
- bind(RestCloseIndexAction.class).asEagerSingleton();
- bind(RestOpenIndexAction.class).asEagerSingleton();
-
- bind(RestUpdateSettingsAction.class).asEagerSingleton();
- bind(RestGetSettingsAction.class).asEagerSingleton();
-
- bind(RestAnalyzeAction.class).asEagerSingleton();
- bind(RestGetIndexTemplateAction.class).asEagerSingleton();
- bind(RestPutIndexTemplateAction.class).asEagerSingleton();
- bind(RestDeleteIndexTemplateAction.class).asEagerSingleton();
- bind(RestHeadIndexTemplateAction.class).asEagerSingleton();
-
- bind(RestPutWarmerAction.class).asEagerSingleton();
- bind(RestDeleteWarmerAction.class).asEagerSingleton();
- bind(RestGetWarmerAction.class).asEagerSingleton();
-
- bind(RestPutMappingAction.class).asEagerSingleton();
- bind(RestGetMappingAction.class).asEagerSingleton();
- bind(RestGetFieldMappingAction.class).asEagerSingleton();
-
- bind(RestRefreshAction.class).asEagerSingleton();
- bind(RestFlushAction.class).asEagerSingleton();
- bind(RestSyncedFlushAction.class).asEagerSingleton();
- bind(RestForceMergeAction.class).asEagerSingleton();
- bind(RestUpgradeAction.class).asEagerSingleton();
- bind(RestClearIndicesCacheAction.class).asEagerSingleton();
-
- bind(RestIndexAction.class).asEagerSingleton();
- bind(RestGetAction.class).asEagerSingleton();
- bind(RestGetSourceAction.class).asEagerSingleton();
- bind(RestHeadAction.class).asEagerSingleton();
- bind(RestMultiGetAction.class).asEagerSingleton();
- bind(RestDeleteAction.class).asEagerSingleton();
- bind(org.elasticsearch.rest.action.count.RestCountAction.class).asEagerSingleton();
- bind(RestSuggestAction.class).asEagerSingleton();
- bind(RestTermVectorsAction.class).asEagerSingleton();
- bind(RestMultiTermVectorsAction.class).asEagerSingleton();
- bind(RestBulkAction.class).asEagerSingleton();
- bind(RestUpdateAction.class).asEagerSingleton();
- bind(RestPercolateAction.class).asEagerSingleton();
- bind(RestMultiPercolateAction.class).asEagerSingleton();
-
- bind(RestSearchAction.class).asEagerSingleton();
- bind(RestSearchScrollAction.class).asEagerSingleton();
- bind(RestClearScrollAction.class).asEagerSingleton();
- bind(RestMultiSearchAction.class).asEagerSingleton();
- bind(RestRenderSearchTemplateAction.class).asEagerSingleton();
-
- bind(RestValidateQueryAction.class).asEagerSingleton();
-
- bind(RestExplainAction.class).asEagerSingleton();
-
- bind(RestRecoveryAction.class).asEagerSingleton();
-
- // Templates API
- bind(RestGetSearchTemplateAction.class).asEagerSingleton();
- bind(RestPutSearchTemplateAction.class).asEagerSingleton();
- bind(RestDeleteSearchTemplateAction.class).asEagerSingleton();
-
- // Scripts API
- bind(RestGetIndexedScriptAction.class).asEagerSingleton();
- bind(RestPutIndexedScriptAction.class).asEagerSingleton();
- bind(RestDeleteIndexedScriptAction.class).asEagerSingleton();
-
-
- bind(RestFieldStatsAction.class).asEagerSingleton();
-
- // cat API
- Multibinder<AbstractCatAction> catActionMultibinder = Multibinder.newSetBinder(binder(), AbstractCatAction.class);
- catActionMultibinder.addBinding().to(RestAllocationAction.class).asEagerSingleton();
- catActionMultibinder.addBinding().to(RestShardsAction.class).asEagerSingleton();
- catActionMultibinder.addBinding().to(RestMasterAction.class).asEagerSingleton();
- catActionMultibinder.addBinding().to(RestNodesAction.class).asEagerSingleton();
- catActionMultibinder.addBinding().to(RestIndicesAction.class).asEagerSingleton();
- catActionMultibinder.addBinding().to(RestSegmentsAction.class).asEagerSingleton();
- // Fully qualified to prevent interference with rest.action.count.RestCountAction
- catActionMultibinder.addBinding().to(org.elasticsearch.rest.action.cat.RestCountAction.class).asEagerSingleton();
- // Fully qualified to prevent interference with rest.action.indices.RestRecoveryAction
- catActionMultibinder.addBinding().to(org.elasticsearch.rest.action.cat.RestRecoveryAction.class).asEagerSingleton();
- catActionMultibinder.addBinding().to(RestHealthAction.class).asEagerSingleton();
- catActionMultibinder.addBinding().to(org.elasticsearch.rest.action.cat.RestPendingClusterTasksAction.class).asEagerSingleton();
- catActionMultibinder.addBinding().to(RestAliasAction.class).asEagerSingleton();
- catActionMultibinder.addBinding().to(RestThreadPoolAction.class).asEagerSingleton();
- catActionMultibinder.addBinding().to(RestPluginsAction.class).asEagerSingleton();
- catActionMultibinder.addBinding().to(RestFielddataAction.class).asEagerSingleton();
- catActionMultibinder.addBinding().to(RestNodeAttrsAction.class).asEagerSingleton();
- catActionMultibinder.addBinding().to(RestRepositoriesAction.class).asEagerSingleton();
- catActionMultibinder.addBinding().to(RestSnapshotAction.class).asEagerSingleton();
- // no abstract cat action
- bind(RestCatAction.class).asEagerSingleton();
- }
-}
diff --git a/core/src/main/java/org/elasticsearch/rest/action/admin/cluster/settings/RestClusterGetSettingsAction.java b/core/src/main/java/org/elasticsearch/rest/action/admin/cluster/settings/RestClusterGetSettingsAction.java
index a1cfdb48dd..b7b5064c09 100644
--- a/core/src/main/java/org/elasticsearch/rest/action/admin/cluster/settings/RestClusterGetSettingsAction.java
+++ b/core/src/main/java/org/elasticsearch/rest/action/admin/cluster/settings/RestClusterGetSettingsAction.java
@@ -23,19 +23,27 @@ import org.elasticsearch.action.admin.cluster.state.ClusterStateRequest;
import org.elasticsearch.action.admin.cluster.state.ClusterStateResponse;
import org.elasticsearch.client.Client;
import org.elasticsearch.client.Requests;
+import org.elasticsearch.cluster.ClusterState;
import org.elasticsearch.common.inject.Inject;
+import org.elasticsearch.common.settings.ClusterSettings;
import org.elasticsearch.common.settings.Settings;
+import org.elasticsearch.common.xcontent.ToXContent;
import org.elasticsearch.common.xcontent.XContentBuilder;
import org.elasticsearch.rest.*;
import org.elasticsearch.rest.action.support.RestBuilderListener;
+import java.io.IOException;
+
/**
*/
public class RestClusterGetSettingsAction extends BaseRestHandler {
+ private final ClusterSettings clusterSettings;
+
@Inject
- public RestClusterGetSettingsAction(Settings settings, RestController controller, Client client) {
+ public RestClusterGetSettingsAction(Settings settings, RestController controller, Client client, ClusterSettings clusterSettings) {
super(settings, controller, client);
+ this.clusterSettings = clusterSettings;
controller.registerHandler(RestRequest.Method.GET, "/_cluster/settings", this);
}
@@ -44,24 +52,34 @@ public class RestClusterGetSettingsAction extends BaseRestHandler {
ClusterStateRequest clusterStateRequest = Requests.clusterStateRequest()
.routingTable(false)
.nodes(false);
+ final boolean renderDefaults = request.paramAsBoolean("defaults", false);
clusterStateRequest.local(request.paramAsBoolean("local", clusterStateRequest.local()));
client.admin().cluster().state(clusterStateRequest, new RestBuilderListener<ClusterStateResponse>(channel) {
@Override
public RestResponse buildResponse(ClusterStateResponse response, XContentBuilder builder) throws Exception {
- builder.startObject();
+ return new BytesRestResponse(RestStatus.OK, renderResponse(response.getState(), renderDefaults, builder, request));
+ }
+ });
+ }
- builder.startObject("persistent");
- response.getState().metaData().persistentSettings().toXContent(builder, request);
- builder.endObject();
+ private XContentBuilder renderResponse(ClusterState state, boolean renderDefaults, XContentBuilder builder, ToXContent.Params params) throws IOException {
+ builder.startObject();
- builder.startObject("transient");
- response.getState().metaData().transientSettings().toXContent(builder, request);
- builder.endObject();
+ builder.startObject("persistent");
+ state.metaData().persistentSettings().toXContent(builder, params);
+ builder.endObject();
- builder.endObject();
+ builder.startObject("transient");
+ state.metaData().transientSettings().toXContent(builder, params);
+ builder.endObject();
- return new BytesRestResponse(RestStatus.OK, builder);
- }
- });
+ if (renderDefaults) {
+ builder.startObject("defaults");
+ clusterSettings.diff(state.metaData().settings(), this.settings).toXContent(builder, params);
+ builder.endObject();
+ }
+
+ builder.endObject();
+ return builder;
}
}
diff --git a/core/src/main/java/org/elasticsearch/rest/action/admin/indices/flush/RestSyncedFlushAction.java b/core/src/main/java/org/elasticsearch/rest/action/admin/indices/flush/RestSyncedFlushAction.java
index 9a3f844abb..0b8ffcf94d 100644
--- a/core/src/main/java/org/elasticsearch/rest/action/admin/indices/flush/RestSyncedFlushAction.java
+++ b/core/src/main/java/org/elasticsearch/rest/action/admin/indices/flush/RestSyncedFlushAction.java
@@ -19,14 +19,14 @@
package org.elasticsearch.rest.action.admin.indices.flush;
+import org.elasticsearch.action.admin.indices.flush.SyncedFlushRequest;
+import org.elasticsearch.action.admin.indices.flush.SyncedFlushResponse;
import org.elasticsearch.action.support.IndicesOptions;
import org.elasticsearch.client.Client;
import org.elasticsearch.common.Strings;
import org.elasticsearch.common.inject.Inject;
import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.common.xcontent.XContentBuilder;
-import org.elasticsearch.indices.flush.IndicesSyncedFlushResult;
-import org.elasticsearch.indices.flush.SyncedFlushService;
import org.elasticsearch.rest.*;
import org.elasticsearch.rest.action.support.RestBuilderListener;
@@ -38,12 +38,9 @@ import static org.elasticsearch.rest.RestRequest.Method.POST;
*/
public class RestSyncedFlushAction extends BaseRestHandler {
- private final SyncedFlushService syncedFlushService;
-
@Inject
- public RestSyncedFlushAction(Settings settings, RestController controller, Client client, SyncedFlushService syncedFlushService) {
+ public RestSyncedFlushAction(Settings settings, RestController controller, Client client) {
super(settings, controller, client);
- this.syncedFlushService = syncedFlushService;
controller.registerHandler(POST, "/_flush/synced", this);
controller.registerHandler(POST, "/{index}/_flush/synced", this);
@@ -53,12 +50,12 @@ public class RestSyncedFlushAction extends BaseRestHandler {
@Override
public void handleRequest(final RestRequest request, final RestChannel channel, final Client client) {
- String[] indices = Strings.splitStringByCommaToArray(request.param("index"));
IndicesOptions indicesOptions = IndicesOptions.fromRequest(request, IndicesOptions.lenientExpandOpen());
-
- syncedFlushService.attemptSyncedFlush(indices, indicesOptions, new RestBuilderListener<IndicesSyncedFlushResult>(channel) {
+ SyncedFlushRequest syncedFlushRequest = new SyncedFlushRequest(Strings.splitStringByCommaToArray(request.param("index")));
+ syncedFlushRequest.indicesOptions(indicesOptions);
+ client.admin().indices().syncedFlush(syncedFlushRequest, new RestBuilderListener<SyncedFlushResponse>(channel) {
@Override
- public RestResponse buildResponse(IndicesSyncedFlushResult results, XContentBuilder builder) throws Exception {
+ public RestResponse buildResponse(SyncedFlushResponse results, XContentBuilder builder) throws Exception {
builder.startObject();
results.toXContent(builder, request);
builder.endObject();
diff --git a/core/src/main/java/org/elasticsearch/rest/action/admin/indices/settings/RestUpdateSettingsAction.java b/core/src/main/java/org/elasticsearch/rest/action/admin/indices/settings/RestUpdateSettingsAction.java
index 005b30e620..bd7e62abf4 100644
--- a/core/src/main/java/org/elasticsearch/rest/action/admin/indices/settings/RestUpdateSettingsAction.java
+++ b/core/src/main/java/org/elasticsearch/rest/action/admin/indices/settings/RestUpdateSettingsAction.java
@@ -88,6 +88,6 @@ public class RestUpdateSettingsAction extends BaseRestHandler {
}
updateSettingsRequest.settings(updateSettings);
- client.admin().indices().updateSettings(updateSettingsRequest, new AcknowledgedRestListener<UpdateSettingsResponse>(channel));
+ client.admin().indices().updateSettings(updateSettingsRequest, new AcknowledgedRestListener<>(channel));
}
}
diff --git a/core/src/main/java/org/elasticsearch/script/NativeScriptEngineService.java b/core/src/main/java/org/elasticsearch/script/NativeScriptEngineService.java
index 073a4eb5fa..19bc447888 100644
--- a/core/src/main/java/org/elasticsearch/script/NativeScriptEngineService.java
+++ b/core/src/main/java/org/elasticsearch/script/NativeScriptEngineService.java
@@ -62,7 +62,7 @@ public class NativeScriptEngineService extends AbstractComponent implements Scri
}
@Override
- public Object compile(String script) {
+ public Object compile(String script, Map<String, String> params) {
NativeScriptFactory scriptFactory = scripts.get(script);
if (scriptFactory != null) {
return scriptFactory;
diff --git a/core/src/main/java/org/elasticsearch/script/ScriptEngineService.java b/core/src/main/java/org/elasticsearch/script/ScriptEngineService.java
index 993c95ad79..41befc9406 100644
--- a/core/src/main/java/org/elasticsearch/script/ScriptEngineService.java
+++ b/core/src/main/java/org/elasticsearch/script/ScriptEngineService.java
@@ -36,7 +36,7 @@ public interface ScriptEngineService extends Closeable {
boolean sandboxed();
- Object compile(String script);
+ Object compile(String script, Map<String, String> params);
ExecutableScript executable(CompiledScript compiledScript, @Nullable Map<String, Object> vars);
diff --git a/core/src/main/java/org/elasticsearch/script/ScriptService.java b/core/src/main/java/org/elasticsearch/script/ScriptService.java
index 3b91f2d311..c9e9f9a873 100644
--- a/core/src/main/java/org/elasticsearch/script/ScriptService.java
+++ b/core/src/main/java/org/elasticsearch/script/ScriptService.java
@@ -67,6 +67,7 @@ import java.io.InputStreamReader;
import java.nio.charset.StandardCharsets;
import java.nio.file.Files;
import java.nio.file.Path;
+import java.util.Collections;
import java.util.HashMap;
import java.util.Locale;
import java.util.Map;
@@ -96,9 +97,9 @@ public class ScriptService extends AbstractComponent implements Closeable {
private final Map<String, ScriptEngineService> scriptEnginesByLang;
private final Map<String, ScriptEngineService> scriptEnginesByExt;
- private final ConcurrentMap<String, CompiledScript> staticCache = ConcurrentCollections.newConcurrentMap();
+ private final ConcurrentMap<CacheKey, CompiledScript> staticCache = ConcurrentCollections.newConcurrentMap();
- private final Cache<String, CompiledScript> cache;
+ private final Cache<CacheKey, CompiledScript> cache;
private final Path scriptsDirectory;
private final ScriptModes scriptModes;
@@ -153,7 +154,7 @@ public class ScriptService extends AbstractComponent implements Closeable {
this.defaultLang = settings.get(DEFAULT_SCRIPTING_LANGUAGE_SETTING, DEFAULT_LANG);
- CacheBuilder<String, CompiledScript> cacheBuilder = CacheBuilder.builder();
+ CacheBuilder<CacheKey, CompiledScript> cacheBuilder = CacheBuilder.builder();
if (cacheMaxSize >= 0) {
cacheBuilder.setMaximumWeight(cacheMaxSize);
}
@@ -224,7 +225,7 @@ public class ScriptService extends AbstractComponent implements Closeable {
/**
* Checks if a script can be executed and compiles it if needed, or returns the previously compiled and cached script.
*/
- public CompiledScript compile(Script script, ScriptContext scriptContext, HasContextAndHeaders headersContext) {
+ public CompiledScript compile(Script script, ScriptContext scriptContext, HasContextAndHeaders headersContext, Map<String, String> params) {
if (script == null) {
throw new IllegalArgumentException("The parameter script (Script) must not be null.");
}
@@ -252,14 +253,14 @@ public class ScriptService extends AbstractComponent implements Closeable {
" operation [" + scriptContext.getKey() + "] and lang [" + lang + "] are not supported");
}
- return compileInternal(script, headersContext);
+ return compileInternal(script, headersContext, params);
}
/**
* Compiles a script straight-away, or returns the previously compiled and cached script,
* without checking if it can be executed based on settings.
*/
- public CompiledScript compileInternal(Script script, HasContextAndHeaders context) {
+ public CompiledScript compileInternal(Script script, HasContextAndHeaders context, Map<String, String> params) {
if (script == null) {
throw new IllegalArgumentException("The parameter script (Script) must not be null.");
}
@@ -277,7 +278,7 @@ public class ScriptService extends AbstractComponent implements Closeable {
ScriptEngineService scriptEngineService = getScriptEngineServiceForLang(lang);
if (type == ScriptType.FILE) {
- String cacheKey = getCacheKey(scriptEngineService, name, null);
+ CacheKey cacheKey = new CacheKey(scriptEngineService, name, null, params);
//On disk scripts will be loaded into the staticCache by the listener
CompiledScript compiledScript = staticCache.get(cacheKey);
@@ -299,14 +300,14 @@ public class ScriptService extends AbstractComponent implements Closeable {
code = getScriptFromIndex(indexedScript.lang, indexedScript.id, context);
}
- String cacheKey = getCacheKey(scriptEngineService, type == ScriptType.INLINE ? null : name, code);
+ CacheKey cacheKey = new CacheKey(scriptEngineService, type == ScriptType.INLINE ? null : name, code, params);
CompiledScript compiledScript = cache.get(cacheKey);
if (compiledScript == null) {
//Either an un-cached inline script or indexed script
//If the script type is inline the name will be the same as the code for identification in exceptions
try {
- compiledScript = new CompiledScript(type, name, lang, scriptEngineService.compile(code));
+ compiledScript = new CompiledScript(type, name, lang, scriptEngineService.compile(code, params));
} catch (Exception exception) {
throw new ScriptException("Failed to compile " + type + " script [" + name + "] using lang [" + lang + "]", exception);
}
@@ -364,7 +365,7 @@ public class ScriptService extends AbstractComponent implements Closeable {
//we don't know yet what the script will be used for, but if all of the operations for this lang with
//indexed scripts are disabled, it makes no sense to even compile it.
if (isAnyScriptContextEnabled(scriptLang, scriptEngineService, ScriptType.INDEXED)) {
- Object compiled = scriptEngineService.compile(template.getScript());
+ Object compiled = scriptEngineService.compile(template.getScript(), Collections.emptyMap());
if (compiled == null) {
throw new IllegalArgumentException("Unable to parse [" + template.getScript() +
"] lang [" + scriptLang + "] (ScriptService.compile returned null)");
@@ -436,8 +437,8 @@ public class ScriptService extends AbstractComponent implements Closeable {
/**
* Compiles (or retrieves from cache) and executes the provided script
*/
- public ExecutableScript executable(Script script, ScriptContext scriptContext, HasContextAndHeaders headersContext) {
- return executable(compile(script, scriptContext, headersContext), script.getParams());
+ public ExecutableScript executable(Script script, ScriptContext scriptContext, HasContextAndHeaders headersContext, Map<String, String> params) {
+ return executable(compile(script, scriptContext, headersContext, params), script.getParams());
}
/**
@@ -450,8 +451,8 @@ public class ScriptService extends AbstractComponent implements Closeable {
/**
* Compiles (or retrieves from cache) and executes the provided search script
*/
- public SearchScript search(SearchLookup lookup, Script script, ScriptContext scriptContext) {
- CompiledScript compiledScript = compile(script, scriptContext, SearchContext.current());
+ public SearchScript search(SearchLookup lookup, Script script, ScriptContext scriptContext, Map<String, String> params) {
+ CompiledScript compiledScript = compile(script, scriptContext, SearchContext.current(), params);
return getScriptEngineServiceForLang(compiledScript.lang()).search(compiledScript, lookup, script.getParams());
}
@@ -491,9 +492,9 @@ public class ScriptService extends AbstractComponent implements Closeable {
* {@code ScriptEngineService}'s {@code scriptRemoved} method when the
* script has been removed from the cache
*/
- private class ScriptCacheRemovalListener implements RemovalListener<String, CompiledScript> {
+ private class ScriptCacheRemovalListener implements RemovalListener<CacheKey, CompiledScript> {
@Override
- public void onRemoval(RemovalNotification<String, CompiledScript> notification) {
+ public void onRemoval(RemovalNotification<CacheKey, CompiledScript> notification) {
scriptMetrics.onCacheEviction();
for (ScriptEngineService service : scriptEngines) {
try {
@@ -539,8 +540,8 @@ public class ScriptService extends AbstractComponent implements Closeable {
logger.info("compiling script file [{}]", file.toAbsolutePath());
try(InputStreamReader reader = new InputStreamReader(Files.newInputStream(file), StandardCharsets.UTF_8)) {
String script = Streams.copyToString(reader);
- String cacheKey = getCacheKey(engineService, scriptNameExt.v1(), null);
- staticCache.put(cacheKey, new CompiledScript(ScriptType.FILE, scriptNameExt.v1(), engineService.types()[0], engineService.compile(script)));
+ CacheKey cacheKey = new CacheKey(engineService, scriptNameExt.v1(), null, Collections.emptyMap());
+ staticCache.put(cacheKey, new CompiledScript(ScriptType.FILE, scriptNameExt.v1(), engineService.types()[0], engineService.compile(script, Collections.emptyMap())));
scriptMetrics.onCompilation();
}
} else {
@@ -565,7 +566,7 @@ public class ScriptService extends AbstractComponent implements Closeable {
ScriptEngineService engineService = getScriptEngineServiceForFileExt(scriptNameExt.v2());
assert engineService != null;
logger.info("removing script file [{}]", file.toAbsolutePath());
- staticCache.remove(getCacheKey(engineService, scriptNameExt.v1(), null));
+ staticCache.remove(new CacheKey(engineService, scriptNameExt.v1(), null, Collections.emptyMap()));
}
}
@@ -625,11 +626,44 @@ public class ScriptService extends AbstractComponent implements Closeable {
}
}
- private static String getCacheKey(ScriptEngineService scriptEngineService, String name, String code) {
- String lang = scriptEngineService.types()[0];
- return lang + ":" + (name != null ? ":" + name : "") + (code != null ? ":" + code : "");
+ private static final class CacheKey {
+ final String lang;
+ final String name;
+ final String code;
+ final Map<String, String> params;
+
+ private CacheKey(final ScriptEngineService service, final String name, final String code, final Map<String, String> params) {
+ this.lang = service.types()[0];
+ this.name = name;
+ this.code = code;
+ this.params = params;
+ }
+
+ @Override
+ public boolean equals(Object o) {
+ if (this == o) return true;
+ if (o == null || getClass() != o.getClass()) return false;
+
+ CacheKey cacheKey = (CacheKey)o;
+
+ if (!lang.equals(cacheKey.lang)) return false;
+ if (name != null ? !name.equals(cacheKey.name) : cacheKey.name != null) return false;
+ if (code != null ? !code.equals(cacheKey.code) : cacheKey.code != null) return false;
+ return params.equals(cacheKey.params);
+
+ }
+
+ @Override
+ public int hashCode() {
+ int result = lang.hashCode();
+ result = 31 * result + (name != null ? name.hashCode() : 0);
+ result = 31 * result + (code != null ? code.hashCode() : 0);
+ result = 31 * result + params.hashCode();
+ return result;
+ }
}
+
private static class IndexedScript {
private final String lang;
private final String id;
diff --git a/core/src/main/java/org/elasticsearch/search/SearchService.java b/core/src/main/java/org/elasticsearch/search/SearchService.java
index 9501099997..99f9b0ea0d 100644
--- a/core/src/main/java/org/elasticsearch/search/SearchService.java
+++ b/core/src/main/java/org/elasticsearch/search/SearchService.java
@@ -23,6 +23,7 @@ import com.carrotsearch.hppc.ObjectFloatHashMap;
import com.carrotsearch.hppc.ObjectHashSet;
import com.carrotsearch.hppc.ObjectSet;
import com.carrotsearch.hppc.cursors.ObjectCursor;
+
import org.apache.lucene.index.IndexOptions;
import org.apache.lucene.index.LeafReaderContext;
import org.apache.lucene.index.NumericDocValues;
@@ -38,6 +39,8 @@ import org.elasticsearch.common.bytes.BytesReference;
import org.elasticsearch.common.component.AbstractLifecycleComponent;
import org.elasticsearch.common.inject.Inject;
import org.elasticsearch.common.lucene.Lucene;
+import org.elasticsearch.common.settings.ClusterSettings;
+import org.elasticsearch.common.settings.Setting;
import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.common.unit.TimeValue;
import org.elasticsearch.common.util.BigArrays;
@@ -70,7 +73,6 @@ import org.elasticsearch.indices.IndicesService;
import org.elasticsearch.indices.IndicesWarmer;
import org.elasticsearch.indices.IndicesWarmer.TerminationHandle;
import org.elasticsearch.indices.cache.request.IndicesRequestCache;
-import org.elasticsearch.node.settings.NodeSettingsService;
import org.elasticsearch.script.ExecutableScript;
import org.elasticsearch.script.ScriptContext;
import org.elasticsearch.script.ScriptService;
@@ -83,12 +85,16 @@ import org.elasticsearch.search.fetch.fielddata.FieldDataFieldsContext;
import org.elasticsearch.search.fetch.fielddata.FieldDataFieldsContext.FieldDataField;
import org.elasticsearch.search.fetch.fielddata.FieldDataFieldsFetchSubPhase;
import org.elasticsearch.search.fetch.script.ScriptFieldsContext.ScriptField;
+import org.elasticsearch.search.highlight.HighlightBuilder;
import org.elasticsearch.search.internal.*;
import org.elasticsearch.search.internal.SearchContext.Lifetime;
+import org.elasticsearch.search.profile.Profilers;
import org.elasticsearch.search.query.*;
import org.elasticsearch.search.warmer.IndexWarmersMetaData;
import org.elasticsearch.threadpool.ThreadPool;
+import java.io.IOException;
+import java.util.Collections;
import java.util.HashMap;
import java.util.Map;
import java.util.concurrent.CountDownLatch;
@@ -109,9 +115,10 @@ public class SearchService extends AbstractLifecycleComponent<SearchService> imp
public static final String NORMS_LOADING_KEY = "index.norms.loading";
public static final String DEFAULT_KEEPALIVE_KEY = "search.default_keep_alive";
public static final String KEEPALIVE_INTERVAL_KEY = "search.keep_alive_interval";
- public static final String DEFAULT_SEARCH_TIMEOUT = "search.default_search_timeout";
public static final TimeValue NO_TIMEOUT = timeValueMillis(-1);
+ public static final Setting<TimeValue> DEFAULT_SEARCH_TIMEOUT_SETTING = Setting.timeSetting("search.default_search_timeout", NO_TIMEOUT, true, Setting.Scope.CLUSTER);
+
private final ThreadPool threadPool;
@@ -150,7 +157,7 @@ public class SearchService extends AbstractLifecycleComponent<SearchService> imp
private final ParseFieldMatcher parseFieldMatcher;
@Inject
- public SearchService(Settings settings, NodeSettingsService nodeSettingsService, ClusterService clusterService, IndicesService indicesService,IndicesWarmer indicesWarmer, ThreadPool threadPool,
+ public SearchService(Settings settings, ClusterSettings clusterSettings, ClusterService clusterService, IndicesService indicesService, IndicesWarmer indicesWarmer, ThreadPool threadPool,
ScriptService scriptService, PageCacheRecycler pageCacheRecycler, BigArrays bigArrays, DfsPhase dfsPhase, QueryPhase queryPhase, FetchPhase fetchPhase,
IndicesRequestCache indicesQueryCache) {
super(settings);
@@ -184,19 +191,12 @@ public class SearchService extends AbstractLifecycleComponent<SearchService> imp
this.indicesWarmer.addListener(new FieldDataWarmer(indicesWarmer));
this.indicesWarmer.addListener(new SearchWarmer());
- defaultSearchTimeout = settings.getAsTime(DEFAULT_SEARCH_TIMEOUT, NO_TIMEOUT);
- nodeSettingsService.addListener(new SearchSettingsListener());
+ defaultSearchTimeout = DEFAULT_SEARCH_TIMEOUT_SETTING.get(settings);
+ clusterSettings.addSettingsUpdateConsumer(DEFAULT_SEARCH_TIMEOUT_SETTING, this::setDefaultSearchTimeout);
}
- class SearchSettingsListener implements NodeSettingsService.Listener {
- @Override
- public void onRefreshSettings(Settings settings) {
- final TimeValue maybeNewDefaultSearchTimeout = settings.getAsTime(SearchService.DEFAULT_SEARCH_TIMEOUT, SearchService.this.defaultSearchTimeout);
- if (!maybeNewDefaultSearchTimeout.equals(SearchService.this.defaultSearchTimeout)) {
- logger.info("updating [{}] from [{}] to [{}]", SearchService.DEFAULT_SEARCH_TIMEOUT, SearchService.this.defaultSearchTimeout, maybeNewDefaultSearchTimeout);
- SearchService.this.defaultSearchTimeout = maybeNewDefaultSearchTimeout;
- }
- }
+ private void setDefaultSearchTimeout(TimeValue defaultSearchTimeout) {
+ this.defaultSearchTimeout = defaultSearchTimeout;
}
@Override
@@ -549,7 +549,7 @@ public class SearchService extends AbstractLifecycleComponent<SearchService> imp
Engine.Searcher engineSearcher = searcher == null ? indexShard.acquireSearcher("search") : searcher;
- SearchContext context = new DefaultSearchContext(idGenerator.incrementAndGet(), request, shardTarget, engineSearcher, indexService, indexShard, scriptService, pageCacheRecycler, bigArrays, threadPool.estimatedTimeInMillisCounter(), parseFieldMatcher, defaultSearchTimeout);
+ DefaultSearchContext context = new DefaultSearchContext(idGenerator.incrementAndGet(), request, shardTarget, engineSearcher, indexService, indexShard, scriptService, pageCacheRecycler, bigArrays, threadPool.estimatedTimeInMillisCounter(), parseFieldMatcher, defaultSearchTimeout);
SearchContext.setCurrent(context);
try {
@@ -558,7 +558,7 @@ public class SearchService extends AbstractLifecycleComponent<SearchService> imp
context.scrollContext().scroll = request.scroll();
}
if (request.template() != null) {
- ExecutableScript executable = this.scriptService.executable(request.template(), ScriptContext.Standard.SEARCH, context);
+ ExecutableScript executable = this.scriptService.executable(request.template(), ScriptContext.Standard.SEARCH, context, Collections.emptyMap());
BytesReference run = (BytesReference) executable.run();
try (XContentParser parser = XContentFactory.xContent(run).createParser(run)) {
QueryParseContext queryParseContext = new QueryParseContext(indicesService.getIndicesQueryRegistry());
@@ -656,7 +656,7 @@ public class SearchService extends AbstractLifecycleComponent<SearchService> imp
}
}
- private void parseSource(SearchContext context, SearchSourceBuilder source) throws SearchParseException {
+ private void parseSource(DefaultSearchContext context, SearchSourceBuilder source) throws SearchContextException {
// nothing to parse...
if (source == null) {
return;
@@ -712,6 +712,9 @@ public class SearchService extends AbstractLifecycleComponent<SearchService> imp
if (source.minScore() != null) {
context.minimumScore(source.minScore());
}
+ if (source.profile()) {
+ context.setProfilers(new Profilers(context.searcher()));
+ }
context.timeoutInMillis(source.timeoutInMillis());
context.terminateAfter(source.terminateAfter());
if (source.aggregations() != null) {
@@ -807,19 +810,11 @@ public class SearchService extends AbstractLifecycleComponent<SearchService> imp
fieldDataFieldsContext.setHitExecutionNeeded(true);
}
if (source.highlighter() != null) {
- XContentParser highlighterParser = null;
+ HighlightBuilder highlightBuilder = source.highlighter();
try {
- highlighterParser = XContentFactory.xContent(source.highlighter()).createParser(source.highlighter());
- this.elementParsers.get("highlight").parse(highlighterParser, context);
- } catch (Exception e) {
- String sSource = "_na_";
- try {
- sSource = source.toString();
- } catch (Throwable e1) {
- // ignore
- }
- XContentLocation location = highlighterParser != null ? highlighterParser.getTokenLocation() : null;
- throw new SearchParseException(context, "failed to parse suggest source [" + sSource + "]", location, e);
+ context.highlight(highlightBuilder.build(context.indexShard().getQueryShardContext()));
+ } catch (IOException e) {
+ throw new SearchContextException(context, "failed to create SearchContextHighlighter", e);
}
}
if (source.innerHits() != null) {
@@ -841,7 +836,7 @@ public class SearchService extends AbstractLifecycleComponent<SearchService> imp
}
if (source.scriptFields() != null) {
for (org.elasticsearch.search.builder.SearchSourceBuilder.ScriptField field : source.scriptFields()) {
- SearchScript searchScript = context.scriptService().search(context.lookup(), field.script(), ScriptContext.Standard.SEARCH);
+ SearchScript searchScript = context.scriptService().search(context.lookup(), field.script(), ScriptContext.Standard.SEARCH, Collections.emptyMap());
context.scriptFields().add(new ScriptField(field.fieldName(), searchScript, field.ignoreFailure()));
}
}
diff --git a/core/src/main/java/org/elasticsearch/search/SearchShardTarget.java b/core/src/main/java/org/elasticsearch/search/SearchShardTarget.java
index 1a12751d39..c648436c3a 100644
--- a/core/src/main/java/org/elasticsearch/search/SearchShardTarget.java
+++ b/core/src/main/java/org/elasticsearch/search/SearchShardTarget.java
@@ -23,7 +23,7 @@ import org.elasticsearch.common.Nullable;
import org.elasticsearch.common.io.stream.StreamInput;
import org.elasticsearch.common.io.stream.StreamOutput;
import org.elasticsearch.common.io.stream.Streamable;
-import org.elasticsearch.common.text.StringAndBytesText;
+import org.elasticsearch.common.text.Text;
import org.elasticsearch.common.text.Text;
import java.io.IOException;
@@ -42,8 +42,8 @@ public class SearchShardTarget implements Streamable, Comparable<SearchShardTarg
}
public SearchShardTarget(String nodeId, String index, int shardId) {
- this.nodeId = nodeId == null ? null : new StringAndBytesText(nodeId);
- this.index = new StringAndBytesText(index);
+ this.nodeId = nodeId == null ? null : new Text(nodeId);
+ this.index = new Text(index);
this.shardId = shardId;
}
diff --git a/core/src/main/java/org/elasticsearch/search/aggregations/AggregationPhase.java b/core/src/main/java/org/elasticsearch/search/aggregations/AggregationPhase.java
index 742f678f6f..0681996e3e 100644
--- a/core/src/main/java/org/elasticsearch/search/aggregations/AggregationPhase.java
+++ b/core/src/main/java/org/elasticsearch/search/aggregations/AggregationPhase.java
@@ -20,6 +20,7 @@ package org.elasticsearch.search.aggregations;
import org.apache.lucene.search.BooleanClause.Occur;
import org.apache.lucene.search.BooleanQuery;
+import org.apache.lucene.search.Collector;
import org.apache.lucene.search.Query;
import org.elasticsearch.common.inject.Inject;
import org.elasticsearch.common.lucene.search.Queries;
@@ -30,10 +31,13 @@ import org.elasticsearch.search.aggregations.pipeline.PipelineAggregator;
import org.elasticsearch.search.aggregations.pipeline.SiblingPipelineAggregator;
import org.elasticsearch.search.aggregations.support.AggregationContext;
import org.elasticsearch.search.internal.SearchContext;
+import org.elasticsearch.search.profile.CollectorResult;
+import org.elasticsearch.search.profile.InternalProfileCollector;
import org.elasticsearch.search.query.QueryPhaseExecutionException;
import java.io.IOException;
import java.util.ArrayList;
+import java.util.Collections;
import java.util.HashMap;
import java.util.List;
import java.util.Map;
@@ -81,8 +85,13 @@ public class AggregationPhase implements SearchPhase {
}
context.aggregations().aggregators(aggregators);
if (!collectors.isEmpty()) {
- final BucketCollector collector = BucketCollector.wrap(collectors);
- collector.preCollection();
+ Collector collector = BucketCollector.wrap(collectors);
+ ((BucketCollector)collector).preCollection();
+ if (context.getProfilers() != null) {
+ collector = new InternalProfileCollector(collector, CollectorResult.REASON_AGGREGATION,
+ // TODO: report on child aggs as well
+ Collections.emptyList());
+ }
context.queryCollectors().put(AggregationPhase.class, collector);
}
} catch (IOException e) {
@@ -116,6 +125,7 @@ public class AggregationPhase implements SearchPhase {
BucketCollector globalsCollector = BucketCollector.wrap(globals);
Query query = Queries.newMatchAllQuery();
Query searchFilter = context.searchFilter(context.types());
+
if (searchFilter != null) {
BooleanQuery filtered = new BooleanQuery.Builder()
.add(query, Occur.MUST)
@@ -124,8 +134,20 @@ public class AggregationPhase implements SearchPhase {
query = filtered;
}
try {
+ final Collector collector;
+ if (context.getProfilers() == null) {
+ collector = globalsCollector;
+ } else {
+ InternalProfileCollector profileCollector = new InternalProfileCollector(
+ globalsCollector, CollectorResult.REASON_AGGREGATION_GLOBAL,
+ // TODO: report on sub collectors
+ Collections.emptyList());
+ collector = profileCollector;
+ // start a new profile with this collector
+ context.getProfilers().addProfiler().setCollector(profileCollector);
+ }
globalsCollector.preCollection();
- context.searcher().search(query, globalsCollector);
+ context.searcher().search(query, collector);
} catch (Exception e) {
throw new QueryPhaseExecutionException(context, "Failed to execute global aggregators", e);
} finally {
diff --git a/core/src/main/java/org/elasticsearch/search/aggregations/BucketCollector.java b/core/src/main/java/org/elasticsearch/search/aggregations/BucketCollector.java
index ee38e2b361..c1c1bff1ad 100644
--- a/core/src/main/java/org/elasticsearch/search/aggregations/BucketCollector.java
+++ b/core/src/main/java/org/elasticsearch/search/aggregations/BucketCollector.java
@@ -25,6 +25,7 @@ import org.apache.lucene.search.Collector;
import java.io.IOException;
import java.util.ArrayList;
+import java.util.Arrays;
import java.util.List;
import java.util.stream.StreamSupport;
@@ -99,6 +100,11 @@ public abstract class BucketCollector implements Collector {
}
return false;
}
+
+ @Override
+ public String toString() {
+ return Arrays.toString(collectors);
+ }
};
}
}
diff --git a/core/src/main/java/org/elasticsearch/search/aggregations/bucket/children/ParentToChildrenAggregator.java b/core/src/main/java/org/elasticsearch/search/aggregations/bucket/children/ParentToChildrenAggregator.java
index 6d9a1edc71..0678338fcf 100644
--- a/core/src/main/java/org/elasticsearch/search/aggregations/bucket/children/ParentToChildrenAggregator.java
+++ b/core/src/main/java/org/elasticsearch/search/aggregations/bucket/children/ParentToChildrenAggregator.java
@@ -18,6 +18,7 @@
*/
package org.elasticsearch.search.aggregations.bucket.children;
+import org.apache.lucene.index.IndexReader;
import org.apache.lucene.index.LeafReaderContext;
import org.apache.lucene.index.SortedDocValues;
import org.apache.lucene.search.*;
@@ -64,9 +65,6 @@ public class ParentToChildrenAggregator extends SingleBucketAggregator {
private final LongObjectPagedHashMap<long[]> parentOrdToOtherBuckets;
private boolean multipleBucketsPerParentOrd = false;
- // This needs to be a Set to avoid duplicate reader context entries via (#setNextReader(...), it can get invoked multiple times with the same reader context)
- private Set<LeafReaderContext> replay = new LinkedHashSet<>();
-
public ParentToChildrenAggregator(String name, AggregatorFactories factories, AggregationContext aggregationContext,
Aggregator parent, String parentType, Query childFilter, Query parentFilter,
ValuesSource.Bytes.WithOrdinals.ParentChild valuesSource,
@@ -99,17 +97,11 @@ public class ParentToChildrenAggregator extends SingleBucketAggregator {
if (valuesSource == null) {
return LeafBucketCollector.NO_OP_COLLECTOR;
}
- if (replay == null) {
- throw new IllegalStateException();
- }
final SortedDocValues globalOrdinals = valuesSource.globalOrdinalsValues(parentType, ctx);
assert globalOrdinals != null;
Scorer parentScorer = parentFilter.scorer(ctx);
final Bits parentDocs = Lucene.asSequentialAccessBits(ctx.reader().maxDoc(), parentScorer);
- if (childFilter.scorer(ctx) != null) {
- replay.add(ctx);
- }
return new LeafBucketCollector() {
@Override
@@ -138,10 +130,8 @@ public class ParentToChildrenAggregator extends SingleBucketAggregator {
@Override
protected void doPostCollection() throws IOException {
- final Set<LeafReaderContext> replay = this.replay;
- this.replay = null;
-
- for (LeafReaderContext ctx : replay) {
+ IndexReader indexReader = context().searchContext().searcher().getIndexReader();
+ for (LeafReaderContext ctx : indexReader.leaves()) {
DocIdSetIterator childDocsIter = childFilter.scorer(ctx);
if (childDocsIter == null) {
continue;
diff --git a/core/src/main/java/org/elasticsearch/search/aggregations/bucket/histogram/InternalHistogram.java b/core/src/main/java/org/elasticsearch/search/aggregations/bucket/histogram/InternalHistogram.java
index da3bc286ff..faca359d76 100644
--- a/core/src/main/java/org/elasticsearch/search/aggregations/bucket/histogram/InternalHistogram.java
+++ b/core/src/main/java/org/elasticsearch/search/aggregations/bucket/histogram/InternalHistogram.java
@@ -23,7 +23,6 @@ import org.apache.lucene.util.PriorityQueue;
import org.elasticsearch.common.io.stream.StreamInput;
import org.elasticsearch.common.io.stream.StreamOutput;
import org.elasticsearch.common.rounding.Rounding;
-import org.elasticsearch.common.text.StringText;
import org.elasticsearch.common.text.Text;
import org.elasticsearch.common.xcontent.XContentBuilder;
import org.elasticsearch.search.aggregations.AggregationExecutionException;
@@ -151,7 +150,7 @@ public class InternalHistogram<B extends InternalHistogram.Bucket> extends Inter
@Override
public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException {
if (formatter != ValueFormatter.RAW) {
- Text keyTxt = new StringText(formatter.format(key));
+ Text keyTxt = new Text(formatter.format(key));
if (keyed) {
builder.startObject(keyTxt.string());
} else {
@@ -392,12 +391,14 @@ public class InternalHistogram<B extends InternalHistogram.Bucket> extends Inter
return reducedBuckets;
}
- private void addEmptyBuckets(List<B> list) {
+ private void addEmptyBuckets(List<B> list, ReduceContext reduceContext) {
B lastBucket = null;
ExtendedBounds bounds = emptyBucketInfo.bounds;
ListIterator<B> iter = list.listIterator();
// first adding all the empty buckets *before* the actual data (based on th extended_bounds.min the user requested)
+ InternalAggregations reducedEmptySubAggs = InternalAggregations.reduce(Collections.singletonList(emptyBucketInfo.subAggregations),
+ reduceContext);
if (bounds != null) {
B firstBucket = iter.hasNext() ? list.get(iter.nextIndex()) : null;
if (firstBucket == null) {
@@ -405,7 +406,9 @@ public class InternalHistogram<B extends InternalHistogram.Bucket> extends Inter
long key = bounds.min;
long max = bounds.max;
while (key <= max) {
- iter.add(getFactory().createBucket(key, 0, emptyBucketInfo.subAggregations, keyed, formatter));
+ iter.add(getFactory().createBucket(key, 0,
+ reducedEmptySubAggs,
+ keyed, formatter));
key = emptyBucketInfo.rounding.nextRoundingValue(key);
}
}
@@ -414,7 +417,9 @@ public class InternalHistogram<B extends InternalHistogram.Bucket> extends Inter
long key = bounds.min;
if (key < firstBucket.key) {
while (key < firstBucket.key) {
- iter.add(getFactory().createBucket(key, 0, emptyBucketInfo.subAggregations, keyed, formatter));
+ iter.add(getFactory().createBucket(key, 0,
+ reducedEmptySubAggs,
+ keyed, formatter));
key = emptyBucketInfo.rounding.nextRoundingValue(key);
}
}
@@ -429,7 +434,9 @@ public class InternalHistogram<B extends InternalHistogram.Bucket> extends Inter
if (lastBucket != null) {
long key = emptyBucketInfo.rounding.nextRoundingValue(lastBucket.key);
while (key < nextBucket.key) {
- iter.add(getFactory().createBucket(key, 0, emptyBucketInfo.subAggregations, keyed, formatter));
+ iter.add(getFactory().createBucket(key, 0,
+ reducedEmptySubAggs, keyed,
+ formatter));
key = emptyBucketInfo.rounding.nextRoundingValue(key);
}
assert key == nextBucket.key;
@@ -442,7 +449,9 @@ public class InternalHistogram<B extends InternalHistogram.Bucket> extends Inter
long key = emptyBucketInfo.rounding.nextRoundingValue(lastBucket.key);
long max = bounds.max;
while (key <= max) {
- iter.add(getFactory().createBucket(key, 0, emptyBucketInfo.subAggregations, keyed, formatter));
+ iter.add(getFactory().createBucket(key, 0,
+ reducedEmptySubAggs, keyed,
+ formatter));
key = emptyBucketInfo.rounding.nextRoundingValue(key);
}
}
@@ -454,7 +463,7 @@ public class InternalHistogram<B extends InternalHistogram.Bucket> extends Inter
// adding empty buckets if needed
if (minDocCount == 0) {
- addEmptyBuckets(reducedBuckets);
+ addEmptyBuckets(reducedBuckets, reduceContext);
}
if (order == InternalOrder.KEY_ASC) {
diff --git a/core/src/main/java/org/elasticsearch/search/aggregations/bucket/significant/heuristics/ScriptHeuristic.java b/core/src/main/java/org/elasticsearch/search/aggregations/bucket/significant/heuristics/ScriptHeuristic.java
index 046ca717b9..b6d1d56d07 100644
--- a/core/src/main/java/org/elasticsearch/search/aggregations/bucket/significant/heuristics/ScriptHeuristic.java
+++ b/core/src/main/java/org/elasticsearch/search/aggregations/bucket/significant/heuristics/ScriptHeuristic.java
@@ -37,6 +37,7 @@ import org.elasticsearch.search.aggregations.InternalAggregation;
import org.elasticsearch.search.internal.SearchContext;
import java.io.IOException;
+import java.util.Collections;
import java.util.HashMap;
import java.util.Map;
@@ -82,7 +83,7 @@ public class ScriptHeuristic extends SignificanceHeuristic {
@Override
public void initialize(InternalAggregation.ReduceContext context) {
- searchScript = context.scriptService().executable(script, ScriptContext.Standard.AGGS, context);
+ searchScript = context.scriptService().executable(script, ScriptContext.Standard.AGGS, context, Collections.emptyMap());
searchScript.setNextVar("_subset_freq", subsetDfHolder);
searchScript.setNextVar("_subset_size", subsetSizeHolder);
searchScript.setNextVar("_superset_freq", supersetDfHolder);
@@ -170,7 +171,7 @@ public class ScriptHeuristic extends SignificanceHeuristic {
}
ExecutableScript searchScript;
try {
- searchScript = scriptService.executable(script, ScriptContext.Standard.AGGS, context);
+ searchScript = scriptService.executable(script, ScriptContext.Standard.AGGS, context, Collections.emptyMap());
} catch (Exception e) {
throw new ElasticsearchParseException("failed to parse [{}] significance heuristic. the script [{}] could not be loaded", e, script, heuristicName);
}
diff --git a/core/src/main/java/org/elasticsearch/search/aggregations/bucket/terms/LongTerms.java b/core/src/main/java/org/elasticsearch/search/aggregations/bucket/terms/LongTerms.java
index 26c2eee2f6..c270517cd9 100644
--- a/core/src/main/java/org/elasticsearch/search/aggregations/bucket/terms/LongTerms.java
+++ b/core/src/main/java/org/elasticsearch/search/aggregations/bucket/terms/LongTerms.java
@@ -91,7 +91,7 @@ public class LongTerms extends InternalTerms<LongTerms, LongTerms.Bucket> {
@Override
public String getKeyAsString() {
- return String.valueOf(term);
+ return formatter.format(term);
}
@Override
diff --git a/core/src/main/java/org/elasticsearch/search/aggregations/metrics/scripted/InternalScriptedMetric.java b/core/src/main/java/org/elasticsearch/search/aggregations/metrics/scripted/InternalScriptedMetric.java
index d39a0335ac..00c6b6b49b 100644
--- a/core/src/main/java/org/elasticsearch/search/aggregations/metrics/scripted/InternalScriptedMetric.java
+++ b/core/src/main/java/org/elasticsearch/search/aggregations/metrics/scripted/InternalScriptedMetric.java
@@ -33,6 +33,7 @@ import org.elasticsearch.search.aggregations.pipeline.PipelineAggregator;
import java.io.IOException;
import java.util.ArrayList;
+import java.util.Collections;
import java.util.HashMap;
import java.util.List;
import java.util.Map;
@@ -91,7 +92,7 @@ public class InternalScriptedMetric extends InternalMetricsAggregation implement
vars.putAll(firstAggregation.reduceScript.getParams());
}
CompiledScript compiledScript = reduceContext.scriptService().compile(firstAggregation.reduceScript,
- ScriptContext.Standard.AGGS, reduceContext);
+ ScriptContext.Standard.AGGS, reduceContext, Collections.emptyMap());
ExecutableScript script = reduceContext.scriptService().executable(compiledScript, vars);
aggregation = script.run();
} else {
diff --git a/core/src/main/java/org/elasticsearch/search/aggregations/metrics/scripted/ScriptedMetricAggregator.java b/core/src/main/java/org/elasticsearch/search/aggregations/metrics/scripted/ScriptedMetricAggregator.java
index 2c1caaa524..6603c6289b 100644
--- a/core/src/main/java/org/elasticsearch/search/aggregations/metrics/scripted/ScriptedMetricAggregator.java
+++ b/core/src/main/java/org/elasticsearch/search/aggregations/metrics/scripted/ScriptedMetricAggregator.java
@@ -39,6 +39,7 @@ import org.elasticsearch.search.internal.SearchContext;
import java.io.IOException;
import java.util.ArrayList;
+import java.util.Collections;
import java.util.HashMap;
import java.util.List;
import java.util.Map;
@@ -58,11 +59,11 @@ public class ScriptedMetricAggregator extends MetricsAggregator {
this.params = params;
ScriptService scriptService = context.searchContext().scriptService();
if (initScript != null) {
- scriptService.executable(initScript, ScriptContext.Standard.AGGS, context.searchContext()).run();
+ scriptService.executable(initScript, ScriptContext.Standard.AGGS, context.searchContext(), Collections.emptyMap()).run();
}
- this.mapScript = scriptService.search(context.searchContext().lookup(), mapScript, ScriptContext.Standard.AGGS);
+ this.mapScript = scriptService.search(context.searchContext().lookup(), mapScript, ScriptContext.Standard.AGGS, Collections.emptyMap());
if (combineScript != null) {
- this.combineScript = scriptService.executable(combineScript, ScriptContext.Standard.AGGS, context.searchContext());
+ this.combineScript = scriptService.executable(combineScript, ScriptContext.Standard.AGGS, context.searchContext(), Collections.emptyMap());
} else {
this.combineScript = null;
}
diff --git a/core/src/main/java/org/elasticsearch/search/aggregations/metrics/tophits/TopHitsBuilder.java b/core/src/main/java/org/elasticsearch/search/aggregations/metrics/tophits/TopHitsBuilder.java
index 32b5d7390d..1efd4a7cd2 100644
--- a/core/src/main/java/org/elasticsearch/search/aggregations/metrics/tophits/TopHitsBuilder.java
+++ b/core/src/main/java/org/elasticsearch/search/aggregations/metrics/tophits/TopHitsBuilder.java
@@ -19,7 +19,6 @@
package org.elasticsearch.search.aggregations.metrics.tophits;
import org.elasticsearch.common.Nullable;
-import org.elasticsearch.common.bytes.BytesReference;
import org.elasticsearch.common.xcontent.XContentBuilder;
import org.elasticsearch.script.Script;
import org.elasticsearch.search.aggregations.AbstractAggregationBuilder;
@@ -194,7 +193,7 @@ public class TopHitsBuilder extends AbstractAggregationBuilder {
return sourceBuilder;
}
- public BytesReference highlighter() {
+ public HighlightBuilder highlighter() {
return sourceBuilder().highlighter();
}
diff --git a/core/src/main/java/org/elasticsearch/search/aggregations/pipeline/bucketscript/BucketScriptPipelineAggregator.java b/core/src/main/java/org/elasticsearch/search/aggregations/pipeline/bucketscript/BucketScriptPipelineAggregator.java
index 789f8c961a..e5ccbf6971 100644
--- a/core/src/main/java/org/elasticsearch/search/aggregations/pipeline/bucketscript/BucketScriptPipelineAggregator.java
+++ b/core/src/main/java/org/elasticsearch/search/aggregations/pipeline/bucketscript/BucketScriptPipelineAggregator.java
@@ -90,7 +90,7 @@ public class BucketScriptPipelineAggregator extends PipelineAggregator {
InternalMultiBucketAggregation<InternalMultiBucketAggregation, InternalMultiBucketAggregation.InternalBucket> originalAgg = (InternalMultiBucketAggregation<InternalMultiBucketAggregation, InternalMultiBucketAggregation.InternalBucket>) aggregation;
List<? extends Bucket> buckets = originalAgg.getBuckets();
- CompiledScript compiledScript = reduceContext.scriptService().compile(script, ScriptContext.Standard.AGGS, reduceContext);
+ CompiledScript compiledScript = reduceContext.scriptService().compile(script, ScriptContext.Standard.AGGS, reduceContext, Collections.emptyMap());
List newBuckets = new ArrayList<>();
for (Bucket bucket : buckets) {
Map<String, Object> vars = new HashMap<>();
diff --git a/core/src/main/java/org/elasticsearch/search/aggregations/pipeline/having/BucketSelectorPipelineAggregator.java b/core/src/main/java/org/elasticsearch/search/aggregations/pipeline/having/BucketSelectorPipelineAggregator.java
index 669a223b21..edc3b4e87c 100644
--- a/core/src/main/java/org/elasticsearch/search/aggregations/pipeline/having/BucketSelectorPipelineAggregator.java
+++ b/core/src/main/java/org/elasticsearch/search/aggregations/pipeline/having/BucketSelectorPipelineAggregator.java
@@ -38,6 +38,7 @@ import org.elasticsearch.search.aggregations.pipeline.PipelineAggregatorStreams;
import java.io.IOException;
import java.util.ArrayList;
+import java.util.Collections;
import java.util.HashMap;
import java.util.List;
import java.util.Map;
@@ -88,7 +89,7 @@ public class BucketSelectorPipelineAggregator extends PipelineAggregator {
InternalMultiBucketAggregation<InternalMultiBucketAggregation, InternalMultiBucketAggregation.InternalBucket> originalAgg = (InternalMultiBucketAggregation<InternalMultiBucketAggregation, InternalMultiBucketAggregation.InternalBucket>) aggregation;
List<? extends Bucket> buckets = originalAgg.getBuckets();
- CompiledScript compiledScript = reduceContext.scriptService().compile(script, ScriptContext.Standard.AGGS, reduceContext);
+ CompiledScript compiledScript = reduceContext.scriptService().compile(script, ScriptContext.Standard.AGGS, reduceContext, Collections.emptyMap());
List newBuckets = new ArrayList<>();
for (Bucket bucket : buckets) {
Map<String, Object> vars = new HashMap<>();
diff --git a/core/src/main/java/org/elasticsearch/search/aggregations/support/ValuesSourceParser.java b/core/src/main/java/org/elasticsearch/search/aggregations/support/ValuesSourceParser.java
index 506c9d16d7..a9dcc77ee9 100644
--- a/core/src/main/java/org/elasticsearch/search/aggregations/support/ValuesSourceParser.java
+++ b/core/src/main/java/org/elasticsearch/search/aggregations/support/ValuesSourceParser.java
@@ -43,6 +43,7 @@ import org.elasticsearch.search.internal.SearchContext;
import org.joda.time.DateTimeZone;
import java.io.IOException;
+import java.util.Collections;
import java.util.HashMap;
import java.util.Map;
@@ -227,7 +228,7 @@ public class ValuesSourceParser<VS extends ValuesSource> {
}
private SearchScript createScript() {
- return input.script == null ? null : context.scriptService().search(context.lookup(), input.script, ScriptContext.Standard.AGGS);
+ return input.script == null ? null : context.scriptService().search(context.lookup(), input.script, ScriptContext.Standard.AGGS, Collections.emptyMap());
}
private static ValueFormat resolveFormat(@Nullable String format, @Nullable ValueType valueType) {
diff --git a/core/src/main/java/org/elasticsearch/search/builder/SearchSourceBuilder.java b/core/src/main/java/org/elasticsearch/search/builder/SearchSourceBuilder.java
index 7963b678fb..3ea2d604b8 100644
--- a/core/src/main/java/org/elasticsearch/search/builder/SearchSourceBuilder.java
+++ b/core/src/main/java/org/elasticsearch/search/builder/SearchSourceBuilder.java
@@ -22,6 +22,7 @@ package org.elasticsearch.search.builder;
import com.carrotsearch.hppc.ObjectFloatHashMap;
import com.carrotsearch.hppc.cursors.ObjectCursor;
+import org.elasticsearch.Version;
import org.elasticsearch.action.support.ToXContentToBytes;
import org.elasticsearch.common.Nullable;
import org.elasticsearch.common.ParseField;
@@ -91,6 +92,7 @@ public final class SearchSourceBuilder extends ToXContentToBytes implements Writ
public static final ParseField RESCORE_FIELD = new ParseField("rescore");
public static final ParseField STATS_FIELD = new ParseField("stats");
public static final ParseField EXT_FIELD = new ParseField("ext");
+ public static final ParseField PROFILE_FIELD = new ParseField("profile");
private static final SearchSourceBuilder PROTOTYPE = new SearchSourceBuilder();
@@ -144,7 +146,7 @@ public final class SearchSourceBuilder extends ToXContentToBytes implements Writ
private List<BytesReference> aggregations;
- private BytesReference highlightBuilder;
+ private HighlightBuilder highlightBuilder;
private BytesReference suggestBuilder;
@@ -158,6 +160,9 @@ public final class SearchSourceBuilder extends ToXContentToBytes implements Writ
private BytesReference ext = null;
+ private boolean profile = false;
+
+
/**
* Constructs a new search source builder.
*/
@@ -405,22 +410,14 @@ public final class SearchSourceBuilder extends ToXContentToBytes implements Writ
* Adds highlight to perform as part of the search.
*/
public SearchSourceBuilder highlighter(HighlightBuilder highlightBuilder) {
- try {
- XContentBuilder builder = XContentFactory.jsonBuilder();
- builder.startObject();
- highlightBuilder.innerXContent(builder);
- builder.endObject();
- this.highlightBuilder = builder.bytes();
- return this;
- } catch (IOException e) {
- throw new RuntimeException(e);
- }
+ this.highlightBuilder = highlightBuilder;
+ return this;
}
/**
- * Gets the bytes representing the hightlighter builder for this request.
+ * Gets the hightlighter builder for this request.
*/
- public BytesReference highlighter() {
+ public HighlightBuilder highlighter() {
return highlightBuilder;
}
@@ -484,6 +481,22 @@ public final class SearchSourceBuilder extends ToXContentToBytes implements Writ
}
/**
+ * Should the query be profiled. Defaults to <tt>false</tt>
+ */
+ public SearchSourceBuilder profile(boolean profile) {
+ this.profile = profile;
+ return this;
+ }
+
+ /**
+ * Return whether to profile query execution, or {@code null} if
+ * unspecified.
+ */
+ public boolean profile() {
+ return profile;
+ }
+
+ /**
* Gets the bytes representing the rescore builders for this request.
*/
public List<BytesReference> rescores() {
@@ -731,6 +744,8 @@ public final class SearchSourceBuilder extends ToXContentToBytes implements Writ
builder.fieldNames = fieldNames;
} else if (context.parseFieldMatcher().match(currentFieldName, SORT_FIELD)) {
builder.sort(parser.text());
+ } else if (context.parseFieldMatcher().match(currentFieldName, PROFILE_FIELD)) {
+ builder.profile = parser.booleanValue();
} else {
throw new ParsingException(parser.getTokenLocation(), "Unknown key for a " + token + " in [" + currentFieldName + "].",
parser.getTokenLocation());
@@ -813,8 +828,7 @@ public final class SearchSourceBuilder extends ToXContentToBytes implements Writ
}
builder.aggregations = aggregations;
} else if (context.parseFieldMatcher().match(currentFieldName, HIGHLIGHT_FIELD)) {
- XContentBuilder xContentBuilder = XContentFactory.jsonBuilder().copyCurrentStructure(parser);
- builder.highlightBuilder = xContentBuilder.bytes();
+ builder.highlightBuilder = HighlightBuilder.PROTOTYPE.fromXContent(context);
} else if (context.parseFieldMatcher().match(currentFieldName, INNER_HITS_FIELD)) {
XContentBuilder xContentBuilder = XContentFactory.jsonBuilder().copyCurrentStructure(parser);
builder.innerHitsBuilder = xContentBuilder.bytes();
@@ -940,6 +954,10 @@ public final class SearchSourceBuilder extends ToXContentToBytes implements Writ
builder.field(EXPLAIN_FIELD.getPreferredName(), explain);
}
+ if (profile) {
+ builder.field("profile", true);
+ }
+
if (fetchSourceContext != null) {
builder.field(_SOURCE_FIELD.getPreferredName(), fetchSourceContext);
}
@@ -1012,10 +1030,7 @@ public final class SearchSourceBuilder extends ToXContentToBytes implements Writ
}
if (highlightBuilder != null) {
- builder.field(HIGHLIGHT_FIELD.getPreferredName());
- XContentParser parser = XContentFactory.xContent(XContentType.JSON).createParser(highlightBuilder);
- parser.nextToken();
- builder.copyCurrentStructure(parser);
+ this.highlightBuilder.toXContent(builder, params);
}
if (innerHitsBuilder != null) {
@@ -1158,7 +1173,7 @@ public final class SearchSourceBuilder extends ToXContentToBytes implements Writ
}
builder.from = in.readVInt();
if (in.readBoolean()) {
- builder.highlightBuilder = in.readBytesReference();
+ builder.highlightBuilder = HighlightBuilder.PROTOTYPE.readFrom(in);
}
boolean hasIndexBoost = in.readBoolean();
if (hasIndexBoost) {
@@ -1224,6 +1239,11 @@ public final class SearchSourceBuilder extends ToXContentToBytes implements Writ
if (in.readBoolean()) {
builder.ext = in.readBytesReference();
}
+ if (in.getVersion().onOrAfter(Version.V_2_2_0)) {
+ builder.profile = in.readBoolean();
+ } else {
+ builder.profile = false;
+ }
return builder;
}
@@ -1259,7 +1279,7 @@ public final class SearchSourceBuilder extends ToXContentToBytes implements Writ
boolean hasHighlightBuilder = highlightBuilder != null;
out.writeBoolean(hasHighlightBuilder);
if (hasHighlightBuilder) {
- out.writeBytesReference(highlightBuilder);
+ highlightBuilder.writeTo(out);
}
boolean hasIndexBoost = indexBoost != null;
out.writeBoolean(hasIndexBoost);
@@ -1337,13 +1357,16 @@ public final class SearchSourceBuilder extends ToXContentToBytes implements Writ
if (hasExt) {
out.writeBytesReference(ext);
}
+ if (out.getVersion().onOrAfter(Version.V_2_2_0)) {
+ out.writeBoolean(profile);
+ }
}
@Override
public int hashCode() {
return Objects.hash(aggregations, explain, fetchSourceContext, fieldDataFields, fieldNames, from,
highlightBuilder, indexBoost, innerHitsBuilder, minScore, postQueryBuilder, queryBuilder, rescoreBuilders, scriptFields,
- size, sorts, stats, suggestBuilder, terminateAfter, timeoutInMillis, trackScores, version);
+ size, sorts, stats, suggestBuilder, terminateAfter, timeoutInMillis, trackScores, version, profile);
}
@Override
@@ -1376,6 +1399,7 @@ public final class SearchSourceBuilder extends ToXContentToBytes implements Writ
&& Objects.equals(terminateAfter, other.terminateAfter)
&& Objects.equals(timeoutInMillis, other.timeoutInMillis)
&& Objects.equals(trackScores, other.trackScores)
- && Objects.equals(version, other.version);
+ && Objects.equals(version, other.version)
+ && Objects.equals(profile, other.profile);
}
}
diff --git a/core/src/main/java/org/elasticsearch/search/controller/SearchPhaseController.java b/core/src/main/java/org/elasticsearch/search/controller/SearchPhaseController.java
index f76527163c..835e6e7142 100644
--- a/core/src/main/java/org/elasticsearch/search/controller/SearchPhaseController.java
+++ b/core/src/main/java/org/elasticsearch/search/controller/SearchPhaseController.java
@@ -43,7 +43,6 @@ import org.elasticsearch.script.ScriptService;
import org.elasticsearch.search.aggregations.InternalAggregation;
import org.elasticsearch.search.aggregations.InternalAggregation.ReduceContext;
import org.elasticsearch.search.aggregations.InternalAggregations;
-import org.elasticsearch.search.aggregations.pipeline.PipelineAggregator;
import org.elasticsearch.search.aggregations.pipeline.SiblingPipelineAggregator;
import org.elasticsearch.search.dfs.AggregatedDfs;
import org.elasticsearch.search.dfs.DfsSearchResult;
@@ -52,9 +51,11 @@ import org.elasticsearch.search.fetch.FetchSearchResultProvider;
import org.elasticsearch.search.internal.InternalSearchHit;
import org.elasticsearch.search.internal.InternalSearchHits;
import org.elasticsearch.search.internal.InternalSearchResponse;
+import org.elasticsearch.search.profile.InternalProfileShardResults;
import org.elasticsearch.search.query.QuerySearchResult;
import org.elasticsearch.search.query.QuerySearchResultProvider;
import org.elasticsearch.search.suggest.Suggest;
+import org.elasticsearch.search.profile.ProfileShardResult;
import java.io.IOException;
import java.util.ArrayList;
@@ -410,6 +411,17 @@ public class SearchPhaseController extends AbstractComponent {
}
}
+ //Collect profile results
+ InternalProfileShardResults shardResults = null;
+ if (!queryResults.isEmpty() && firstResult.profileResults() != null) {
+ Map<String, List<ProfileShardResult>> profileResults = new HashMap<>(queryResults.size());
+ for (AtomicArray.Entry<? extends QuerySearchResultProvider> entry : queryResults) {
+ String key = entry.value.queryResult().shardTarget().toString();
+ profileResults.put(key, entry.value.queryResult().profileResults());
+ }
+ shardResults = new InternalProfileShardResults(profileResults);
+ }
+
if (aggregations != null) {
List<SiblingPipelineAggregator> pipelineAggregators = firstResult.pipelineAggregators();
if (pipelineAggregators != null) {
@@ -427,7 +439,7 @@ public class SearchPhaseController extends AbstractComponent {
InternalSearchHits searchHits = new InternalSearchHits(hits.toArray(new InternalSearchHit[hits.size()]), totalHits, maxScore);
- return new InternalSearchResponse(searchHits, aggregations, suggest, timedOut, terminatedEarly);
+ return new InternalSearchResponse(searchHits, aggregations, suggest, shardResults, timedOut, terminatedEarly);
}
}
diff --git a/core/src/main/java/org/elasticsearch/search/fetch/FetchPhase.java b/core/src/main/java/org/elasticsearch/search/fetch/FetchPhase.java
index 227141e4dd..04890700be 100644
--- a/core/src/main/java/org/elasticsearch/search/fetch/FetchPhase.java
+++ b/core/src/main/java/org/elasticsearch/search/fetch/FetchPhase.java
@@ -31,7 +31,7 @@ import org.elasticsearch.common.collect.Tuple;
import org.elasticsearch.common.inject.Inject;
import org.elasticsearch.common.lucene.search.Queries;
import org.elasticsearch.common.regex.Regex;
-import org.elasticsearch.common.text.StringAndBytesText;
+import org.elasticsearch.common.text.Text;
import org.elasticsearch.common.text.Text;
import org.elasticsearch.common.xcontent.XContentHelper;
import org.elasticsearch.common.xcontent.XContentType;
@@ -198,7 +198,7 @@ public class FetchPhase implements SearchPhase {
DocumentMapper documentMapper = context.mapperService().documentMapper(fieldsVisitor.uid().type());
Text typeText;
if (documentMapper == null) {
- typeText = new StringAndBytesText(fieldsVisitor.uid().type());
+ typeText = new Text(fieldsVisitor.uid().type());
} else {
typeText = documentMapper.typeText();
}
diff --git a/core/src/main/java/org/elasticsearch/search/fetch/innerhits/InnerHitsBuilder.java b/core/src/main/java/org/elasticsearch/search/fetch/innerhits/InnerHitsBuilder.java
index 7941e17775..2e76a4c370 100644
--- a/core/src/main/java/org/elasticsearch/search/fetch/innerhits/InnerHitsBuilder.java
+++ b/core/src/main/java/org/elasticsearch/search/fetch/innerhits/InnerHitsBuilder.java
@@ -20,7 +20,6 @@
package org.elasticsearch.search.fetch.innerhits;
import org.elasticsearch.common.Nullable;
-import org.elasticsearch.common.bytes.BytesReference;
import org.elasticsearch.common.xcontent.ToXContent;
import org.elasticsearch.common.xcontent.XContentBuilder;
import org.elasticsearch.index.query.QueryBuilder;
@@ -266,7 +265,7 @@ public class InnerHitsBuilder implements ToXContent {
return this;
}
- public BytesReference highlighter() {
+ public HighlightBuilder highlighter() {
return sourceBuilder().highlighter();
}
diff --git a/core/src/main/java/org/elasticsearch/search/fetch/script/ScriptFieldsParseElement.java b/core/src/main/java/org/elasticsearch/search/fetch/script/ScriptFieldsParseElement.java
index 6dbdcbd589..de1703b5c9 100644
--- a/core/src/main/java/org/elasticsearch/search/fetch/script/ScriptFieldsParseElement.java
+++ b/core/src/main/java/org/elasticsearch/search/fetch/script/ScriptFieldsParseElement.java
@@ -30,6 +30,7 @@ import org.elasticsearch.search.SearchParseElement;
import org.elasticsearch.search.SearchParseException;
import org.elasticsearch.search.internal.SearchContext;
+import java.util.Collections;
import java.util.HashMap;
import java.util.Map;
@@ -97,9 +98,9 @@ public class ScriptFieldsParseElement implements SearchParseElement {
throw new SearchParseException(context, "must specify a script in script fields", parser.getTokenLocation());
}
- SearchScript searchScript = context.scriptService().search(context.lookup(), script, ScriptContext.Standard.SEARCH);
+ SearchScript searchScript = context.scriptService().search(context.lookup(), script, ScriptContext.Standard.SEARCH, Collections.emptyMap());
context.scriptFields().add(new ScriptFieldsContext.ScriptField(fieldName, searchScript, ignoreException));
}
}
}
-} \ No newline at end of file
+}
diff --git a/core/src/main/java/org/elasticsearch/search/highlight/AbstractHighlighterBuilder.java b/core/src/main/java/org/elasticsearch/search/highlight/AbstractHighlighterBuilder.java
index d30144f777..b4de465cc7 100644
--- a/core/src/main/java/org/elasticsearch/search/highlight/AbstractHighlighterBuilder.java
+++ b/core/src/main/java/org/elasticsearch/search/highlight/AbstractHighlighterBuilder.java
@@ -22,13 +22,19 @@ package org.elasticsearch.search.highlight;
import org.apache.lucene.search.highlight.SimpleFragmenter;
import org.apache.lucene.search.highlight.SimpleSpanFragmenter;
import org.elasticsearch.common.ParseField;
+import org.elasticsearch.common.ParsingException;
import org.elasticsearch.common.io.stream.StreamInput;
import org.elasticsearch.common.io.stream.StreamOutput;
import org.elasticsearch.common.xcontent.XContentBuilder;
+import org.elasticsearch.common.xcontent.XContentParser;
import org.elasticsearch.index.query.QueryBuilder;
+import org.elasticsearch.index.query.QueryParseContext;
+import org.elasticsearch.search.highlight.HighlightBuilder.Order;
import java.io.IOException;
+import java.util.ArrayList;
import java.util.Arrays;
+import java.util.List;
import java.util.Map;
import java.util.Objects;
@@ -74,7 +80,7 @@ public abstract class AbstractHighlighterBuilder<HB extends AbstractHighlighterB
protected QueryBuilder<?> highlightQuery;
- protected String order;
+ protected Order order;
protected Boolean highlightFilter;
@@ -213,18 +219,26 @@ public abstract class AbstractHighlighterBuilder<HB extends AbstractHighlighterB
/**
* The order of fragments per field. By default, ordered by the order in the
* highlighted text. Can be <tt>score</tt>, which then it will be ordered
- * by score of the fragments.
+ * by score of the fragments, or <tt>none</TT>.
*/
- @SuppressWarnings("unchecked")
public HB order(String order) {
- this.order = order;
+ return order(Order.fromString(order));
+ }
+
+ /**
+ * By default, fragments of a field are ordered by the order in the highlighted text.
+ * If set to {@link Order#SCORE}, this changes order to score of the fragments.
+ */
+ @SuppressWarnings("unchecked")
+ public HB order(Order scoreOrdered) {
+ this.order = scoreOrdered;
return (HB) this;
}
/**
- * @return the value set by {@link #order(String)}
+ * @return the value set by {@link #order(Order)}
*/
- public String order() {
+ public Order order() {
return this.order;
}
@@ -391,7 +405,7 @@ public abstract class AbstractHighlighterBuilder<HB extends AbstractHighlighterB
builder.field(HIGHLIGHT_QUERY_FIELD.getPreferredName(), highlightQuery);
}
if (order != null) {
- builder.field(ORDER_FIELD.getPreferredName(), order);
+ builder.field(ORDER_FIELD.getPreferredName(), order.toString());
}
if (highlightFilter != null) {
builder.field(HIGHLIGHT_FILTER_FIELD.getPreferredName(), highlightFilter);
@@ -419,6 +433,100 @@ public abstract class AbstractHighlighterBuilder<HB extends AbstractHighlighterB
}
}
+ /**
+ * Creates a new {@link HighlightBuilder} from the highlighter held by the {@link QueryParseContext}
+ * in {@link org.elasticsearch.common.xcontent.XContent} format
+ *
+ * @param parseContext containing the parser positioned at the structure to be parsed from.
+ * the state on the parser contained in this context will be changed as a side effect of this
+ * method call
+ * @return the new {@link AbstractHighlighterBuilder}
+ */
+ public HB fromXContent(QueryParseContext parseContext) throws IOException {
+ XContentParser parser = parseContext.parser();
+ XContentParser.Token token = parser.currentToken();
+ String currentFieldName = null;
+ HB highlightBuilder = createInstance(parser);
+ while ((token = parser.nextToken()) != XContentParser.Token.END_OBJECT) {
+ if (token == XContentParser.Token.FIELD_NAME) {
+ currentFieldName = parser.currentName();
+ } else if (token == XContentParser.Token.START_ARRAY) {
+ if (parseContext.parseFieldMatcher().match(currentFieldName, PRE_TAGS_FIELD)) {
+ List<String> preTagsList = new ArrayList<>();
+ while ((token = parser.nextToken()) != XContentParser.Token.END_ARRAY) {
+ preTagsList.add(parser.text());
+ }
+ highlightBuilder.preTags(preTagsList.toArray(new String[preTagsList.size()]));
+ } else if (parseContext.parseFieldMatcher().match(currentFieldName, POST_TAGS_FIELD)) {
+ List<String> postTagsList = new ArrayList<>();
+ while ((token = parser.nextToken()) != XContentParser.Token.END_ARRAY) {
+ postTagsList.add(parser.text());
+ }
+ highlightBuilder.postTags(postTagsList.toArray(new String[postTagsList.size()]));
+ } else if (false == highlightBuilder.doFromXContent(parseContext, currentFieldName, token)) {
+ throw new ParsingException(parser.getTokenLocation(), "cannot parse array with name [{}]", currentFieldName);
+ }
+ } else if (token.isValue()) {
+ if (parseContext.parseFieldMatcher().match(currentFieldName, ORDER_FIELD)) {
+ highlightBuilder.order(Order.fromString(parser.text()));
+ } else if (parseContext.parseFieldMatcher().match(currentFieldName, HIGHLIGHT_FILTER_FIELD)) {
+ highlightBuilder.highlightFilter(parser.booleanValue());
+ } else if (parseContext.parseFieldMatcher().match(currentFieldName, FRAGMENT_SIZE_FIELD)) {
+ highlightBuilder.fragmentSize(parser.intValue());
+ } else if (parseContext.parseFieldMatcher().match(currentFieldName, NUMBER_OF_FRAGMENTS_FIELD)) {
+ highlightBuilder.numOfFragments(parser.intValue());
+ } else if (parseContext.parseFieldMatcher().match(currentFieldName, REQUIRE_FIELD_MATCH_FIELD)) {
+ highlightBuilder.requireFieldMatch(parser.booleanValue());
+ } else if (parseContext.parseFieldMatcher().match(currentFieldName, BOUNDARY_MAX_SCAN_FIELD)) {
+ highlightBuilder.boundaryMaxScan(parser.intValue());
+ } else if (parseContext.parseFieldMatcher().match(currentFieldName, BOUNDARY_CHARS_FIELD)) {
+ highlightBuilder.boundaryChars(parser.text().toCharArray());
+ } else if (parseContext.parseFieldMatcher().match(currentFieldName, TYPE_FIELD)) {
+ highlightBuilder.highlighterType(parser.text());
+ } else if (parseContext.parseFieldMatcher().match(currentFieldName, FRAGMENTER_FIELD)) {
+ highlightBuilder.fragmenter(parser.text());
+ } else if (parseContext.parseFieldMatcher().match(currentFieldName, NO_MATCH_SIZE_FIELD)) {
+ highlightBuilder.noMatchSize(parser.intValue());
+ } else if (parseContext.parseFieldMatcher().match(currentFieldName, FORCE_SOURCE_FIELD)) {
+ highlightBuilder.forceSource(parser.booleanValue());
+ } else if (parseContext.parseFieldMatcher().match(currentFieldName, PHRASE_LIMIT_FIELD)) {
+ highlightBuilder.phraseLimit(parser.intValue());
+ } else if (false == highlightBuilder.doFromXContent(parseContext, currentFieldName, token)) {
+ throw new ParsingException(parser.getTokenLocation(), "unexpected fieldname [{}]", currentFieldName);
+ }
+ } else if (token == XContentParser.Token.START_OBJECT && currentFieldName != null) {
+ if (parseContext.parseFieldMatcher().match(currentFieldName, OPTIONS_FIELD)) {
+ highlightBuilder.options(parser.map());
+ } else if (parseContext.parseFieldMatcher().match(currentFieldName, HIGHLIGHT_QUERY_FIELD)) {
+ highlightBuilder.highlightQuery(parseContext.parseInnerQueryBuilder());
+ } else if (false == highlightBuilder.doFromXContent(parseContext, currentFieldName, token)) {
+ throw new ParsingException(parser.getTokenLocation(), "cannot parse object with name [{}]", currentFieldName);
+ }
+ } else if (currentFieldName != null) {
+ throw new ParsingException(parser.getTokenLocation(), "unexpected token [{}] after [{}]", token, currentFieldName);
+ }
+ }
+
+ if (highlightBuilder.preTags() != null && highlightBuilder.postTags() == null) {
+ throw new ParsingException(parser.getTokenLocation(), "Highlighter global preTags are set, but global postTags are not set");
+ }
+ return highlightBuilder;
+ }
+
+ /**
+ * @param parser the input parser. Implementing classes might advance the parser depending on the
+ * information they need to instantiate a new instance
+ * @return a new instance
+ */
+ protected abstract HB createInstance(XContentParser parser) throws IOException;
+
+ /**
+ * Implementing subclasses can handle parsing special options depending on the
+ * current token, field name and the parse context.
+ * @return <tt>true</tt> if an option was found and successfully parsed, otherwise <tt>false</tt>
+ */
+ protected abstract boolean doFromXContent(QueryParseContext parseContext, String currentFieldName, XContentParser.Token endMarkerToken) throws IOException;
+
@Override
public final int hashCode() {
return Objects.hash(getClass(), Arrays.hashCode(preTags), Arrays.hashCode(postTags), fragmentSize,
@@ -480,7 +588,9 @@ public abstract class AbstractHighlighterBuilder<HB extends AbstractHighlighterB
if (in.readBoolean()) {
highlightQuery(in.readQuery());
}
- order(in.readOptionalString());
+ if (in.readBoolean()) {
+ order(Order.PROTOTYPE.readFrom(in));
+ }
highlightFilter(in.readOptionalBoolean());
forceSource(in.readOptionalBoolean());
boundaryMaxScan(in.readOptionalVInt());
@@ -511,7 +621,11 @@ public abstract class AbstractHighlighterBuilder<HB extends AbstractHighlighterB
if (hasQuery) {
out.writeQuery(highlightQuery);
}
- out.writeOptionalString(order);
+ boolean hasSetOrder = order != null;
+ out.writeBoolean(hasSetOrder);
+ if (hasSetOrder) {
+ order.writeTo(out);
+ }
out.writeOptionalBoolean(highlightFilter);
out.writeOptionalBoolean(forceSource);
out.writeOptionalVInt(boundaryMaxScan);
diff --git a/core/src/main/java/org/elasticsearch/search/highlight/CustomQueryScorer.java b/core/src/main/java/org/elasticsearch/search/highlight/CustomQueryScorer.java
index 9e86edef47..8ad24b5cb1 100644
--- a/core/src/main/java/org/elasticsearch/search/highlight/CustomQueryScorer.java
+++ b/core/src/main/java/org/elasticsearch/search/highlight/CustomQueryScorer.java
@@ -82,7 +82,7 @@ public final class CustomQueryScorer extends QueryScorer {
} else if (query instanceof FiltersFunctionScoreQuery) {
query = ((FiltersFunctionScoreQuery) query).getSubQuery();
extract(query, query.getBoost(), terms);
- } else {
+ } else if (terms.isEmpty()) {
extractWeightedTerms(terms, query, query.getBoost());
}
}
diff --git a/core/src/main/java/org/elasticsearch/search/highlight/FastVectorHighlighter.java b/core/src/main/java/org/elasticsearch/search/highlight/FastVectorHighlighter.java
index 65702dd24b..b57899b2e1 100644
--- a/core/src/main/java/org/elasticsearch/search/highlight/FastVectorHighlighter.java
+++ b/core/src/main/java/org/elasticsearch/search/highlight/FastVectorHighlighter.java
@@ -33,7 +33,7 @@ import org.apache.lucene.search.vectorhighlight.SimpleFieldFragList;
import org.apache.lucene.search.vectorhighlight.SimpleFragListBuilder;
import org.apache.lucene.search.vectorhighlight.SingleFragListBuilder;
import org.elasticsearch.common.settings.Settings;
-import org.elasticsearch.common.text.StringText;
+import org.elasticsearch.common.text.Text;
import org.elasticsearch.index.mapper.FieldMapper;
import org.elasticsearch.search.fetch.FetchPhaseExecutionException;
import org.elasticsearch.search.fetch.FetchSubPhase;
@@ -159,7 +159,7 @@ public class FastVectorHighlighter implements Highlighter {
}
if (fragments != null && fragments.length > 0) {
- return new HighlightField(highlighterContext.fieldName, StringText.convertFromStringArray(fragments));
+ return new HighlightField(highlighterContext.fieldName, Text.convertFromStringArray(fragments));
}
int noMatchSize = highlighterContext.field.fieldOptions().noMatchSize();
@@ -170,7 +170,7 @@ public class FastVectorHighlighter implements Highlighter {
fragments = entry.fragmentsBuilder.createFragments(hitContext.reader(), hitContext.docId(), mapper.fieldType().names().indexName(),
fieldFragList, 1, field.fieldOptions().preTags(), field.fieldOptions().postTags(), encoder);
if (fragments != null && fragments.length > 0) {
- return new HighlightField(highlighterContext.fieldName, StringText.convertFromStringArray(fragments));
+ return new HighlightField(highlighterContext.fieldName, Text.convertFromStringArray(fragments));
}
}
diff --git a/core/src/main/java/org/elasticsearch/search/highlight/HighlightBuilder.java b/core/src/main/java/org/elasticsearch/search/highlight/HighlightBuilder.java
index e45303ccb5..c0b1aeea3b 100644
--- a/core/src/main/java/org/elasticsearch/search/highlight/HighlightBuilder.java
+++ b/core/src/main/java/org/elasticsearch/search/highlight/HighlightBuilder.java
@@ -30,11 +30,13 @@ import org.elasticsearch.common.xcontent.ToXContent;
import org.elasticsearch.common.xcontent.XContentBuilder;
import org.elasticsearch.common.xcontent.XContentFactory;
import org.elasticsearch.common.xcontent.XContentParser;
+import org.elasticsearch.common.xcontent.XContentParser.Token;
import org.elasticsearch.index.query.QueryBuilder;
import org.elasticsearch.index.query.QueryParseContext;
import org.elasticsearch.index.query.QueryShardContext;
import org.elasticsearch.search.highlight.SearchContextHighlight.FieldOptions;
import org.elasticsearch.search.highlight.SearchContextHighlight.FieldOptions.Builder;
+
import java.io.IOException;
import java.util.ArrayList;
import java.util.Arrays;
@@ -42,6 +44,7 @@ import java.util.Collection;
import java.util.Collections;
import java.util.HashSet;
import java.util.List;
+import java.util.Locale;
import java.util.Objects;
import java.util.Set;
@@ -230,117 +233,45 @@ public class HighlightBuilder extends AbstractHighlighterBuilder<HighlightBuilde
}
/**
- * Creates a new {@link HighlightBuilder} from the highlighter held by the {@link QueryParseContext}
- * in {@link org.elasticsearch.common.xcontent.XContent} format
- *
- * @param parseContext
- * the input parse context. The state on the parser contained in
- * this context will be changed as a side effect of this method
- * call
- * @return the new {@link HighlightBuilder}
+ * parse options only present in top level highlight builder (`tags_schema`, `encoder` and nested `fields`)
*/
- public static HighlightBuilder fromXContent(QueryParseContext parseContext) throws IOException {
+ @Override
+ protected boolean doFromXContent(QueryParseContext parseContext, String currentFieldName, Token currentToken) throws IOException {
XContentParser parser = parseContext.parser();
XContentParser.Token token;
- String topLevelFieldName = null;
-
- HighlightBuilder highlightBuilder = new HighlightBuilder();
- while ((token = parser.nextToken()) != XContentParser.Token.END_OBJECT) {
- if (token == XContentParser.Token.FIELD_NAME) {
- topLevelFieldName = parser.currentName();
- } else if (token == XContentParser.Token.START_ARRAY) {
- if (parseContext.parseFieldMatcher().match(topLevelFieldName, PRE_TAGS_FIELD)) {
- List<String> preTagsList = new ArrayList<>();
- while ((token = parser.nextToken()) != XContentParser.Token.END_ARRAY) {
- preTagsList.add(parser.text());
- }
- highlightBuilder.preTags(preTagsList.toArray(new String[preTagsList.size()]));
- } else if (parseContext.parseFieldMatcher().match(topLevelFieldName, POST_TAGS_FIELD)) {
- List<String> postTagsList = new ArrayList<>();
- while ((token = parser.nextToken()) != XContentParser.Token.END_ARRAY) {
- postTagsList.add(parser.text());
- }
- highlightBuilder.postTags(postTagsList.toArray(new String[postTagsList.size()]));
- } else if (parseContext.parseFieldMatcher().match(topLevelFieldName, FIELDS_FIELD)) {
- highlightBuilder.useExplicitFieldOrder(true);
- while ((token = parser.nextToken()) != XContentParser.Token.END_ARRAY) {
- if (token == XContentParser.Token.START_OBJECT) {
- String highlightFieldName = null;
- while ((token = parser.nextToken()) != XContentParser.Token.END_OBJECT) {
- if (token == XContentParser.Token.FIELD_NAME) {
- if (highlightFieldName != null) {
- throw new ParsingException(parser.getTokenLocation(), "If highlighter fields is an array it must contain objects containing a single field");
- }
- highlightFieldName = parser.currentName();
- } else if (token == XContentParser.Token.START_OBJECT) {
- highlightBuilder.field(Field.fromXContent(highlightFieldName, parseContext));
- }
- }
- } else {
- throw new ParsingException(parser.getTokenLocation(), "If highlighter fields is an array it must contain objects containing a single field");
- }
- }
- } else {
- throw new ParsingException(parser.getTokenLocation(), "cannot parse array with name [{}]", topLevelFieldName);
- }
- } else if (token.isValue()) {
- if (parseContext.parseFieldMatcher().match(topLevelFieldName, ORDER_FIELD)) {
- highlightBuilder.order(parser.text());
- } else if (parseContext.parseFieldMatcher().match(topLevelFieldName, TAGS_SCHEMA_FIELD)) {
- highlightBuilder.tagsSchema(parser.text());
- } else if (parseContext.parseFieldMatcher().match(topLevelFieldName, HIGHLIGHT_FILTER_FIELD)) {
- highlightBuilder.highlightFilter(parser.booleanValue());
- } else if (parseContext.parseFieldMatcher().match(topLevelFieldName, FRAGMENT_SIZE_FIELD)) {
- highlightBuilder.fragmentSize(parser.intValue());
- } else if (parseContext.parseFieldMatcher().match(topLevelFieldName, NUMBER_OF_FRAGMENTS_FIELD)) {
- highlightBuilder.numOfFragments(parser.intValue());
- } else if (parseContext.parseFieldMatcher().match(topLevelFieldName, ENCODER_FIELD)) {
- highlightBuilder.encoder(parser.text());
- } else if (parseContext.parseFieldMatcher().match(topLevelFieldName, REQUIRE_FIELD_MATCH_FIELD)) {
- highlightBuilder.requireFieldMatch(parser.booleanValue());
- } else if (parseContext.parseFieldMatcher().match(topLevelFieldName, BOUNDARY_MAX_SCAN_FIELD)) {
- highlightBuilder.boundaryMaxScan(parser.intValue());
- } else if (parseContext.parseFieldMatcher().match(topLevelFieldName, BOUNDARY_CHARS_FIELD)) {
- highlightBuilder.boundaryChars(parser.text().toCharArray());
- } else if (parseContext.parseFieldMatcher().match(topLevelFieldName, TYPE_FIELD)) {
- highlightBuilder.highlighterType(parser.text());
- } else if (parseContext.parseFieldMatcher().match(topLevelFieldName, FRAGMENTER_FIELD)) {
- highlightBuilder.fragmenter(parser.text());
- } else if (parseContext.parseFieldMatcher().match(topLevelFieldName, NO_MATCH_SIZE_FIELD)) {
- highlightBuilder.noMatchSize(parser.intValue());
- } else if (parseContext.parseFieldMatcher().match(topLevelFieldName, FORCE_SOURCE_FIELD)) {
- highlightBuilder.forceSource(parser.booleanValue());
- } else if (parseContext.parseFieldMatcher().match(topLevelFieldName, PHRASE_LIMIT_FIELD)) {
- highlightBuilder.phraseLimit(parser.intValue());
- } else {
- throw new ParsingException(parser.getTokenLocation(), "unexpected fieldname [{}]", topLevelFieldName);
- }
- } else if (token == XContentParser.Token.START_OBJECT && topLevelFieldName != null) {
- if (parseContext.parseFieldMatcher().match(topLevelFieldName, OPTIONS_FIELD)) {
- highlightBuilder.options(parser.map());
- } else if (parseContext.parseFieldMatcher().match(topLevelFieldName, FIELDS_FIELD)) {
- String highlightFieldName = null;
+ boolean foundCurrentFieldMatch = false;
+ if (currentToken.isValue()) {
+ if (parseContext.parseFieldMatcher().match(currentFieldName, TAGS_SCHEMA_FIELD)) {
+ tagsSchema(parser.text());
+ foundCurrentFieldMatch = true;
+ } else if (parseContext.parseFieldMatcher().match(currentFieldName, ENCODER_FIELD)) {
+ encoder(parser.text());
+ foundCurrentFieldMatch = true;
+ }
+ } else if (currentToken == Token.START_ARRAY && parseContext.parseFieldMatcher().match(currentFieldName, FIELDS_FIELD)) {
+ useExplicitFieldOrder(true);
+ while ((token = parser.nextToken()) != XContentParser.Token.END_ARRAY) {
+ if (token == XContentParser.Token.START_OBJECT) {
while ((token = parser.nextToken()) != XContentParser.Token.END_OBJECT) {
if (token == XContentParser.Token.FIELD_NAME) {
- highlightFieldName = parser.currentName();
- } else if (token == XContentParser.Token.START_OBJECT) {
- highlightBuilder.field(Field.fromXContent(highlightFieldName, parseContext));
+ field(HighlightBuilder.Field.PROTOTYPE.fromXContent(parseContext));
}
}
- } else if (parseContext.parseFieldMatcher().match(topLevelFieldName, HIGHLIGHT_QUERY_FIELD)) {
- highlightBuilder.highlightQuery(parseContext.parseInnerQueryBuilder());
+ foundCurrentFieldMatch = true;
} else {
- throw new ParsingException(parser.getTokenLocation(), "cannot parse object with name [{}]", topLevelFieldName);
+ throw new ParsingException(parser.getTokenLocation(),
+ "If highlighter fields is an array it must contain objects containing a single field");
}
- } else if (topLevelFieldName != null) {
- throw new ParsingException(parser.getTokenLocation(), "unexpected token [{}] after [{}]", token, topLevelFieldName);
}
+ } else if (currentToken == Token.START_OBJECT && parseContext.parseFieldMatcher().match(currentFieldName, FIELDS_FIELD)) {
+ while ((token = parser.nextToken()) != XContentParser.Token.END_OBJECT) {
+ if (token == XContentParser.Token.FIELD_NAME) {
+ field(HighlightBuilder.Field.PROTOTYPE.fromXContent(parseContext));
+ }
+ }
+ foundCurrentFieldMatch = true;
}
-
- if (highlightBuilder.preTags() != null && highlightBuilder.postTags() == null) {
- throw new ParsingException(parser.getTokenLocation(), "Highlighter global preTags are set, but global postTags are not set");
- }
- return highlightBuilder;
+ return foundCurrentFieldMatch;
}
public SearchContextHighlight build(QueryShardContext context) throws IOException {
@@ -378,9 +309,15 @@ public class HighlightBuilder extends AbstractHighlighterBuilder<HighlightBuilde
*/
@SuppressWarnings({ "rawtypes", "unchecked" })
private static void transferOptions(AbstractHighlighterBuilder highlighterBuilder, SearchContextHighlight.FieldOptions.Builder targetOptionsBuilder, QueryShardContext context) throws IOException {
- targetOptionsBuilder.preTags(highlighterBuilder.preTags);
- targetOptionsBuilder.postTags(highlighterBuilder.postTags);
- targetOptionsBuilder.scoreOrdered("score".equals(highlighterBuilder.order));
+ if (highlighterBuilder.preTags != null) {
+ targetOptionsBuilder.preTags(highlighterBuilder.preTags);
+ }
+ if (highlighterBuilder.postTags != null) {
+ targetOptionsBuilder.postTags(highlighterBuilder.postTags);
+ }
+ if (highlighterBuilder.order != null) {
+ targetOptionsBuilder.scoreOrdered(highlighterBuilder.order == Order.SCORE);
+ }
if (highlighterBuilder.highlightFilter != null) {
targetOptionsBuilder.highlightFilter(highlighterBuilder.highlightFilter);
}
@@ -396,9 +333,15 @@ public class HighlightBuilder extends AbstractHighlighterBuilder<HighlightBuilde
if (highlighterBuilder.boundaryMaxScan != null) {
targetOptionsBuilder.boundaryMaxScan(highlighterBuilder.boundaryMaxScan);
}
- targetOptionsBuilder.boundaryChars(convertCharArray(highlighterBuilder.boundaryChars));
- targetOptionsBuilder.highlighterType(highlighterBuilder.highlighterType);
- targetOptionsBuilder.fragmenter(highlighterBuilder.fragmenter);
+ if (highlighterBuilder.boundaryChars != null) {
+ targetOptionsBuilder.boundaryChars(convertCharArray(highlighterBuilder.boundaryChars));
+ }
+ if (highlighterBuilder.highlighterType != null) {
+ targetOptionsBuilder.highlighterType(highlighterBuilder.highlighterType);
+ }
+ if (highlighterBuilder.fragmenter != null) {
+ targetOptionsBuilder.fragmenter(highlighterBuilder.fragmenter);
+ }
if (highlighterBuilder.noMatchSize != null) {
targetOptionsBuilder.noMatchSize(highlighterBuilder.noMatchSize);
}
@@ -408,7 +351,9 @@ public class HighlightBuilder extends AbstractHighlighterBuilder<HighlightBuilde
if (highlighterBuilder.phraseLimit != null) {
targetOptionsBuilder.phraseLimit(highlighterBuilder.phraseLimit);
}
- targetOptionsBuilder.options(highlighterBuilder.options);
+ if (highlighterBuilder.options != null) {
+ targetOptionsBuilder.options(highlighterBuilder.options);
+ }
if (highlighterBuilder.highlightQuery != null) {
targetOptionsBuilder.highlightQuery(highlighterBuilder.highlightQuery.toQuery(context));
}
@@ -468,6 +413,11 @@ public class HighlightBuilder extends AbstractHighlighterBuilder<HighlightBuilde
}
@Override
+ protected HighlightBuilder createInstance(XContentParser parser) {
+ return new HighlightBuilder();
+ }
+
+ @Override
protected int doHashCode() {
return Objects.hash(encoder, useExplicitFieldOrder, fields);
}
@@ -549,80 +499,36 @@ public class HighlightBuilder extends AbstractHighlighterBuilder<HighlightBuilde
builder.endObject();
}
- private static HighlightBuilder.Field fromXContent(String fieldname, QueryParseContext parseContext) throws IOException {
+ /**
+ * parse options only present in field highlight builder (`fragment_offset`, `matched_fields`)
+ */
+ @Override
+ protected boolean doFromXContent(QueryParseContext parseContext, String currentFieldName, Token currentToken) throws IOException {
XContentParser parser = parseContext.parser();
- XContentParser.Token token;
-
- final HighlightBuilder.Field field = new HighlightBuilder.Field(fieldname);
- String currentFieldName = null;
- while ((token = parser.nextToken()) != XContentParser.Token.END_OBJECT) {
- if (token == XContentParser.Token.FIELD_NAME) {
- currentFieldName = parser.currentName();
- } else if (token == XContentParser.Token.START_ARRAY) {
- if (parseContext.parseFieldMatcher().match(currentFieldName, PRE_TAGS_FIELD)) {
- List<String> preTagsList = new ArrayList<>();
- while ((token = parser.nextToken()) != XContentParser.Token.END_ARRAY) {
- preTagsList.add(parser.text());
- }
- field.preTags(preTagsList.toArray(new String[preTagsList.size()]));
- } else if (parseContext.parseFieldMatcher().match(currentFieldName, POST_TAGS_FIELD)) {
- List<String> postTagsList = new ArrayList<>();
- while ((token = parser.nextToken()) != XContentParser.Token.END_ARRAY) {
- postTagsList.add(parser.text());
- }
- field.postTags(postTagsList.toArray(new String[postTagsList.size()]));
- } else if (parseContext.parseFieldMatcher().match(currentFieldName, MATCHED_FIELDS_FIELD)) {
- List<String> matchedFields = new ArrayList<>();
- while ((token = parser.nextToken()) != XContentParser.Token.END_ARRAY) {
- matchedFields.add(parser.text());
- }
- field.matchedFields(matchedFields.toArray(new String[matchedFields.size()]));
- } else {
- throw new ParsingException(parser.getTokenLocation(), "cannot parse array with name [{}]", currentFieldName);
- }
- } else if (token.isValue()) {
- if (parseContext.parseFieldMatcher().match(currentFieldName, FRAGMENT_SIZE_FIELD)) {
- field.fragmentSize(parser.intValue());
- } else if (parseContext.parseFieldMatcher().match(currentFieldName, NUMBER_OF_FRAGMENTS_FIELD)) {
- field.numOfFragments(parser.intValue());
- } else if (parseContext.parseFieldMatcher().match(currentFieldName, FRAGMENT_OFFSET_FIELD)) {
- field.fragmentOffset(parser.intValue());
- } else if (parseContext.parseFieldMatcher().match(currentFieldName, HIGHLIGHT_FILTER_FIELD)) {
- field.highlightFilter(parser.booleanValue());
- } else if (parseContext.parseFieldMatcher().match(currentFieldName, ORDER_FIELD)) {
- field.order(parser.text());
- } else if (parseContext.parseFieldMatcher().match(currentFieldName, REQUIRE_FIELD_MATCH_FIELD)) {
- field.requireFieldMatch(parser.booleanValue());
- } else if (parseContext.parseFieldMatcher().match(currentFieldName, BOUNDARY_MAX_SCAN_FIELD)) {
- field.boundaryMaxScan(parser.intValue());
- } else if (parseContext.parseFieldMatcher().match(currentFieldName, BOUNDARY_CHARS_FIELD)) {
- field.boundaryChars(parser.text().toCharArray());
- } else if (parseContext.parseFieldMatcher().match(currentFieldName, TYPE_FIELD)) {
- field.highlighterType(parser.text());
- } else if (parseContext.parseFieldMatcher().match(currentFieldName, FRAGMENTER_FIELD)) {
- field.fragmenter(parser.text());
- } else if (parseContext.parseFieldMatcher().match(currentFieldName, NO_MATCH_SIZE_FIELD)) {
- field.noMatchSize(parser.intValue());
- } else if (parseContext.parseFieldMatcher().match(currentFieldName, FORCE_SOURCE_FIELD)) {
- field.forceSource(parser.booleanValue());
- } else if (parseContext.parseFieldMatcher().match(currentFieldName, PHRASE_LIMIT_FIELD)) {
- field.phraseLimit(parser.intValue());
- } else {
- throw new ParsingException(parser.getTokenLocation(), "unexpected fieldname [{}]", currentFieldName);
- }
- } else if (token == XContentParser.Token.START_OBJECT && currentFieldName != null) {
- if (parseContext.parseFieldMatcher().match(currentFieldName, HIGHLIGHT_QUERY_FIELD)) {
- field.highlightQuery(parseContext.parseInnerQueryBuilder());
- } else if (parseContext.parseFieldMatcher().match(currentFieldName, OPTIONS_FIELD)) {
- field.options(parser.map());
- } else {
- throw new ParsingException(parser.getTokenLocation(), "cannot parse object with name [{}]", currentFieldName);
- }
- } else if (currentFieldName != null) {
- throw new ParsingException(parser.getTokenLocation(), "unexpected token [{}] after [{}]", token, currentFieldName);
+ boolean foundCurrentFieldMatch = false;
+ if (parseContext.parseFieldMatcher().match(currentFieldName, FRAGMENT_OFFSET_FIELD) && currentToken.isValue()) {
+ fragmentOffset(parser.intValue());
+ foundCurrentFieldMatch = true;
+ } else if (parseContext.parseFieldMatcher().match(currentFieldName, MATCHED_FIELDS_FIELD)
+ && currentToken == XContentParser.Token.START_ARRAY) {
+ List<String> matchedFields = new ArrayList<>();
+ while (parser.nextToken() != XContentParser.Token.END_ARRAY) {
+ matchedFields.add(parser.text());
}
+ matchedFields(matchedFields.toArray(new String[matchedFields.size()]));
+ foundCurrentFieldMatch = true;
+ }
+ return foundCurrentFieldMatch;
+ }
+
+ @Override
+ protected Field createInstance(XContentParser parser) throws IOException {
+ if (parser.currentToken() == XContentParser.Token.FIELD_NAME) {
+ String fieldname = parser.currentName();
+ return new Field(fieldname);
+ } else {
+ throw new ParsingException(parser.getTokenLocation(), "unknown token type [{}], expected field name", parser.currentToken());
}
- return field;
}
@Override
@@ -654,4 +560,36 @@ public class HighlightBuilder extends AbstractHighlighterBuilder<HighlightBuilde
writeOptionsTo(out);
}
}
+
+ public enum Order implements Writeable<Order> {
+ NONE, SCORE;
+
+ static Order PROTOTYPE = NONE;
+
+ @Override
+ public Order readFrom(StreamInput in) throws IOException {
+ int ordinal = in.readVInt();
+ if (ordinal < 0 || ordinal >= values().length) {
+ throw new IOException("Unknown Order ordinal [" + ordinal + "]");
+ }
+ return values()[ordinal];
+ }
+
+ @Override
+ public void writeTo(StreamOutput out) throws IOException {
+ out.writeVInt(this.ordinal());
+ }
+
+ public static Order fromString(String order) {
+ if (order.toUpperCase(Locale.ROOT).equals(SCORE.name())) {
+ return Order.SCORE;
+ }
+ return NONE;
+ }
+
+ @Override
+ public String toString() {
+ return name().toLowerCase(Locale.ROOT);
+ }
+ }
}
diff --git a/core/src/main/java/org/elasticsearch/search/highlight/HighlightField.java b/core/src/main/java/org/elasticsearch/search/highlight/HighlightField.java
index 9077278d51..30530b697f 100644
--- a/core/src/main/java/org/elasticsearch/search/highlight/HighlightField.java
+++ b/core/src/main/java/org/elasticsearch/search/highlight/HighlightField.java
@@ -22,7 +22,6 @@ package org.elasticsearch.search.highlight;
import org.elasticsearch.common.io.stream.StreamInput;
import org.elasticsearch.common.io.stream.StreamOutput;
import org.elasticsearch.common.io.stream.Streamable;
-import org.elasticsearch.common.text.StringText;
import org.elasticsearch.common.text.Text;
import java.io.IOException;
@@ -90,7 +89,7 @@ public class HighlightField implements Streamable {
if (in.readBoolean()) {
int size = in.readVInt();
if (size == 0) {
- fragments = StringText.EMPTY_ARRAY;
+ fragments = Text.EMPTY_ARRAY;
} else {
fragments = new Text[size];
for (int i = 0; i < size; i++) {
diff --git a/core/src/main/java/org/elasticsearch/search/highlight/PlainHighlighter.java b/core/src/main/java/org/elasticsearch/search/highlight/PlainHighlighter.java
index 041ed754d7..5f4cdddb06 100644
--- a/core/src/main/java/org/elasticsearch/search/highlight/PlainHighlighter.java
+++ b/core/src/main/java/org/elasticsearch/search/highlight/PlainHighlighter.java
@@ -33,9 +33,7 @@ import org.apache.lucene.search.highlight.SimpleSpanFragmenter;
import org.apache.lucene.search.highlight.TextFragment;
import org.apache.lucene.util.BytesRefHash;
import org.apache.lucene.util.CollectionUtil;
-import org.apache.lucene.util.IOUtils;
import org.elasticsearch.ExceptionsHelper;
-import org.elasticsearch.common.text.StringText;
import org.elasticsearch.common.text.Text;
import org.elasticsearch.index.mapper.FieldMapper;
import org.elasticsearch.search.fetch.FetchPhaseExecutionException;
@@ -158,7 +156,7 @@ public class PlainHighlighter implements Highlighter {
}
if (fragments.length > 0) {
- return new HighlightField(highlighterContext.fieldName, StringText.convertFromStringArray(fragments));
+ return new HighlightField(highlighterContext.fieldName, Text.convertFromStringArray(fragments));
}
int noMatchSize = highlighterContext.field.fieldOptions().noMatchSize();
@@ -172,7 +170,7 @@ public class PlainHighlighter implements Highlighter {
throw new FetchPhaseExecutionException(context, "Failed to highlight field [" + highlighterContext.fieldName + "]", e);
}
if (end > 0) {
- return new HighlightField(highlighterContext.fieldName, new Text[] { new StringText(fieldContents.substring(0, end)) });
+ return new HighlightField(highlighterContext.fieldName, new Text[] { new Text(fieldContents.substring(0, end)) });
}
}
return null;
diff --git a/core/src/main/java/org/elasticsearch/search/highlight/PostingsHighlighter.java b/core/src/main/java/org/elasticsearch/search/highlight/PostingsHighlighter.java
index e11840e89e..2509f95da5 100644
--- a/core/src/main/java/org/elasticsearch/search/highlight/PostingsHighlighter.java
+++ b/core/src/main/java/org/elasticsearch/search/highlight/PostingsHighlighter.java
@@ -28,7 +28,7 @@ import org.apache.lucene.search.postingshighlight.CustomSeparatorBreakIterator;
import org.apache.lucene.search.postingshighlight.Snippet;
import org.apache.lucene.util.CollectionUtil;
import org.elasticsearch.common.Strings;
-import org.elasticsearch.common.text.StringText;
+import org.elasticsearch.common.text.Text;
import org.elasticsearch.index.mapper.FieldMapper;
import org.elasticsearch.search.fetch.FetchPhaseExecutionException;
import org.elasticsearch.search.fetch.FetchSubPhase;
@@ -122,7 +122,7 @@ public class PostingsHighlighter implements Highlighter {
}
if (fragments.length > 0) {
- return new HighlightField(highlighterContext.fieldName, StringText.convertFromStringArray(fragments));
+ return new HighlightField(highlighterContext.fieldName, Text.convertFromStringArray(fragments));
}
return null;
diff --git a/core/src/main/java/org/elasticsearch/search/internal/ContextIndexSearcher.java b/core/src/main/java/org/elasticsearch/search/internal/ContextIndexSearcher.java
index 0a9b860edb..a7bacb64d9 100644
--- a/core/src/main/java/org/elasticsearch/search/internal/ContextIndexSearcher.java
+++ b/core/src/main/java/org/elasticsearch/search/internal/ContextIndexSearcher.java
@@ -26,6 +26,9 @@ import org.apache.lucene.search.*;
import org.elasticsearch.common.lease.Releasable;
import org.elasticsearch.index.engine.Engine;
import org.elasticsearch.search.dfs.AggregatedDfs;
+import org.elasticsearch.search.profile.ProfileBreakdown;
+import org.elasticsearch.search.profile.ProfileWeight;
+import org.elasticsearch.search.profile.Profiler;
import java.io.IOException;
@@ -43,26 +46,44 @@ public class ContextIndexSearcher extends IndexSearcher implements Releasable {
private final Engine.Searcher engineSearcher;
- public ContextIndexSearcher(SearchContext searchContext, Engine.Searcher searcher) {
+ // TODO revisit moving the profiler to inheritance or wrapping model in the future
+ private Profiler profiler;
+
+ public ContextIndexSearcher(Engine.Searcher searcher,
+ QueryCache queryCache, QueryCachingPolicy queryCachingPolicy) {
super(searcher.reader());
in = searcher.searcher();
engineSearcher = searcher;
setSimilarity(searcher.searcher().getSimilarity(true));
- setQueryCache(searchContext.getQueryCache());
- setQueryCachingPolicy(searchContext.indexShard().getQueryCachingPolicy());
+ setQueryCache(queryCache);
+ setQueryCachingPolicy(queryCachingPolicy);
}
@Override
public void close() {
}
+ public void setProfiler(Profiler profiler) {
+ this.profiler = profiler;
+ }
+
public void setAggregatedDfs(AggregatedDfs aggregatedDfs) {
this.aggregatedDfs = aggregatedDfs;
}
@Override
public Query rewrite(Query original) throws IOException {
- return in.rewrite(original);
+ if (profiler != null) {
+ profiler.startRewriteTime();
+ }
+
+ try {
+ return in.rewrite(original);
+ } finally {
+ if (profiler != null) {
+ profiler.stopAndAddRewriteTime();
+ }
+ }
}
@Override
@@ -72,8 +93,34 @@ public class ContextIndexSearcher extends IndexSearcher implements Releasable {
if (aggregatedDfs != null && needsScores) {
// if scores are needed and we have dfs data then use it
return super.createNormalizedWeight(query, needsScores);
+ } else if (profiler != null) {
+ // we need to use the createWeight method to insert the wrappers
+ return super.createNormalizedWeight(query, needsScores);
+ } else {
+ return in.createNormalizedWeight(query, needsScores);
+ }
+ }
+
+ @Override
+ public Weight createWeight(Query query, boolean needsScores) throws IOException {
+ if (profiler != null) {
+ // createWeight() is called for each query in the tree, so we tell the queryProfiler
+ // each invocation so that it can build an internal representation of the query
+ // tree
+ ProfileBreakdown profile = profiler.getQueryBreakdown(query);
+ profile.startTime(ProfileBreakdown.TimingType.CREATE_WEIGHT);
+ final Weight weight;
+ try {
+ weight = super.createWeight(query, needsScores);
+ } finally {
+ profile.stopAndRecordTime();
+ profiler.pollLastQuery();
+ }
+ return new ProfileWeight(query, weight, profile);
+ } else {
+ // needs to be 'super', not 'in' in order to use aggregated DFS
+ return super.createWeight(query, needsScores);
}
- return in.createNormalizedWeight(query, needsScores);
}
@Override
diff --git a/core/src/main/java/org/elasticsearch/search/internal/DefaultSearchContext.java b/core/src/main/java/org/elasticsearch/search/internal/DefaultSearchContext.java
index 1174fcdd8a..2d3f659062 100644
--- a/core/src/main/java/org/elasticsearch/search/internal/DefaultSearchContext.java
+++ b/core/src/main/java/org/elasticsearch/search/internal/DefaultSearchContext.java
@@ -58,6 +58,8 @@ import org.elasticsearch.search.fetch.source.FetchSourceContext;
import org.elasticsearch.search.highlight.SearchContextHighlight;
import org.elasticsearch.search.lookup.SearchLookup;
import org.elasticsearch.search.query.QueryPhaseExecutionException;
+import org.elasticsearch.search.profile.Profiler;
+import org.elasticsearch.search.profile.Profilers;
import org.elasticsearch.search.query.QuerySearchResult;
import org.elasticsearch.search.rescore.RescoreSearchContext;
import org.elasticsearch.search.suggest.SuggestionSearchContext;
@@ -129,10 +131,10 @@ public class DefaultSearchContext extends SearchContext {
private List<RescoreSearchContext> rescore;
private SearchLookup searchLookup;
private volatile long keepAlive;
- private ScoreDoc lastEmittedDoc;
private final long originNanoTime = System.nanoTime();
private volatile long lastAccessTime = -1;
private InnerHitsContext innerHitsContext;
+ private Profilers profilers;
private final Map<String, FetchSubPhaseContext> subPhaseContexts = new HashMap<>();
private final Map<Class<?>, Collector> queryCollectors = new HashMap<>();
@@ -158,7 +160,7 @@ public class DefaultSearchContext extends SearchContext {
this.fetchResult = new FetchSearchResult(id, shardTarget);
this.indexShard = indexShard;
this.indexService = indexService;
- this.searcher = new ContextIndexSearcher(this, engineSearcher);
+ this.searcher = new ContextIndexSearcher(engineSearcher, indexService.cache().query(), indexShard.getQueryCachingPolicy());
this.timeEstimateCounter = timeEstimateCounter;
this.timeoutInMillis = timeout.millis();
}
@@ -724,5 +726,11 @@ public class DefaultSearchContext extends SearchContext {
}
@Override
- public QueryCache getQueryCache() { return indexService.cache().query();}
+ public Profilers getProfilers() {
+ return profilers;
+ }
+
+ public void setProfilers(Profilers profilers) {
+ this.profilers = profilers;
+ }
}
diff --git a/core/src/main/java/org/elasticsearch/search/internal/FilteredSearchContext.java b/core/src/main/java/org/elasticsearch/search/internal/FilteredSearchContext.java
index 7225c7b32b..1f04d01340 100644
--- a/core/src/main/java/org/elasticsearch/search/internal/FilteredSearchContext.java
+++ b/core/src/main/java/org/elasticsearch/search/internal/FilteredSearchContext.java
@@ -29,7 +29,6 @@ import org.elasticsearch.common.ParseFieldMatcher;
import org.elasticsearch.common.util.BigArrays;
import org.elasticsearch.index.analysis.AnalysisService;
import org.elasticsearch.index.cache.bitset.BitsetFilterCache;
-import org.elasticsearch.index.cache.query.QueryCache;
import org.elasticsearch.index.fielddata.IndexFieldDataService;
import org.elasticsearch.index.mapper.MappedFieldType;
import org.elasticsearch.index.mapper.MapperService;
@@ -49,6 +48,7 @@ import org.elasticsearch.search.fetch.script.ScriptFieldsContext;
import org.elasticsearch.search.fetch.source.FetchSourceContext;
import org.elasticsearch.search.highlight.SearchContextHighlight;
import org.elasticsearch.search.lookup.SearchLookup;
+import org.elasticsearch.search.profile.Profilers;
import org.elasticsearch.search.query.QuerySearchResult;
import org.elasticsearch.search.rescore.RescoreSearchContext;
import org.elasticsearch.search.suggest.SuggestionSearchContext;
@@ -517,8 +517,11 @@ public abstract class FilteredSearchContext extends SearchContext {
}
@Override
- public Map<Class<?>, Collector> queryCollectors() { return in.queryCollectors();}
+ public Profilers getProfilers() {
+ return in.getProfilers();
+ }
@Override
- public QueryCache getQueryCache() { return in.getQueryCache();}
+ public Map<Class<?>, Collector> queryCollectors() { return in.queryCollectors();}
+
}
diff --git a/core/src/main/java/org/elasticsearch/search/internal/InternalSearchHit.java b/core/src/main/java/org/elasticsearch/search/internal/InternalSearchHit.java
index 96fd103fa6..fcac5b1cc8 100644
--- a/core/src/main/java/org/elasticsearch/search/internal/InternalSearchHit.java
+++ b/core/src/main/java/org/elasticsearch/search/internal/InternalSearchHit.java
@@ -30,7 +30,7 @@ import org.elasticsearch.common.compress.CompressorFactory;
import org.elasticsearch.common.io.stream.StreamInput;
import org.elasticsearch.common.io.stream.StreamOutput;
import org.elasticsearch.common.io.stream.Streamable;
-import org.elasticsearch.common.text.StringAndBytesText;
+import org.elasticsearch.common.text.Text;
import org.elasticsearch.common.text.Text;
import org.elasticsearch.common.xcontent.ToXContent;
import org.elasticsearch.common.xcontent.XContentBuilder;
@@ -104,14 +104,14 @@ public class InternalSearchHit implements SearchHit {
public InternalSearchHit(int docId, String id, Text type, Map<String, SearchHitField> fields) {
this.docId = docId;
- this.id = new StringAndBytesText(id);
+ this.id = new Text(id);
this.type = type;
this.fields = fields;
}
public InternalSearchHit(int nestedTopDocId, String id, Text type, InternalNestedIdentity nestedIdentity, Map<String, SearchHitField> fields) {
this.docId = nestedTopDocId;
- this.id = new StringAndBytesText(id);
+ this.id = new Text(id);
this.type = type;
this.nestedIdentity = nestedIdentity;
this.fields = fields;
@@ -339,7 +339,7 @@ public class InternalSearchHit implements SearchHit {
if (sortValues != null) {
for (int i = 0; i < sortValues.length; i++) {
if (sortValues[i] instanceof BytesRef) {
- sortValuesCopy[i] = new StringAndBytesText(new BytesArray((BytesRef) sortValues[i]));
+ sortValuesCopy[i] = new Text(new BytesArray((BytesRef) sortValues[i]));
}
}
}
@@ -783,7 +783,7 @@ public class InternalSearchHit implements SearchHit {
private InternalNestedIdentity child;
public InternalNestedIdentity(String field, int offset, InternalNestedIdentity child) {
- this.field = new StringAndBytesText(field);
+ this.field = new Text(field);
this.offset = offset;
this.child = child;
}
diff --git a/core/src/main/java/org/elasticsearch/search/internal/InternalSearchResponse.java b/core/src/main/java/org/elasticsearch/search/internal/InternalSearchResponse.java
index 7b73772f9d..b8255e0bb5 100644
--- a/core/src/main/java/org/elasticsearch/search/internal/InternalSearchResponse.java
+++ b/core/src/main/java/org/elasticsearch/search/internal/InternalSearchResponse.java
@@ -28,9 +28,14 @@ import org.elasticsearch.common.xcontent.XContentBuilder;
import org.elasticsearch.search.SearchHits;
import org.elasticsearch.search.aggregations.Aggregations;
import org.elasticsearch.search.aggregations.InternalAggregations;
+import org.elasticsearch.search.profile.InternalProfileShardResults;
+import org.elasticsearch.search.profile.ProfileShardResult;
import org.elasticsearch.search.suggest.Suggest;
import java.io.IOException;
+import java.util.Collections;
+import java.util.List;
+import java.util.Map;
import static org.elasticsearch.search.internal.InternalSearchHits.readSearchHits;
@@ -40,7 +45,7 @@ import static org.elasticsearch.search.internal.InternalSearchHits.readSearchHit
public class InternalSearchResponse implements Streamable, ToXContent {
public static InternalSearchResponse empty() {
- return new InternalSearchResponse(InternalSearchHits.empty(), null, null, false, null);
+ return new InternalSearchResponse(InternalSearchHits.empty(), null, null, null, false, null);
}
private InternalSearchHits hits;
@@ -49,6 +54,8 @@ public class InternalSearchResponse implements Streamable, ToXContent {
private Suggest suggest;
+ private InternalProfileShardResults profileResults;
+
private boolean timedOut;
private Boolean terminatedEarly = null;
@@ -56,10 +63,12 @@ public class InternalSearchResponse implements Streamable, ToXContent {
private InternalSearchResponse() {
}
- public InternalSearchResponse(InternalSearchHits hits, InternalAggregations aggregations, Suggest suggest, boolean timedOut, Boolean terminatedEarly) {
+ public InternalSearchResponse(InternalSearchHits hits, InternalAggregations aggregations, Suggest suggest,
+ InternalProfileShardResults profileResults, boolean timedOut, Boolean terminatedEarly) {
this.hits = hits;
this.aggregations = aggregations;
this.suggest = suggest;
+ this.profileResults = profileResults;
this.timedOut = timedOut;
this.terminatedEarly = terminatedEarly;
}
@@ -84,6 +93,19 @@ public class InternalSearchResponse implements Streamable, ToXContent {
return suggest;
}
+ /**
+ * Returns the profile results for this search response (including all shards).
+ * An empty map is returned if profiling was not enabled
+ *
+ * @return Profile results
+ */
+ public Map<String, List<ProfileShardResult>> profile() {
+ if (profileResults == null) {
+ return Collections.emptyMap();
+ }
+ return profileResults.getShardResults();
+ }
+
@Override
public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException {
hits.toXContent(builder, params);
@@ -93,6 +115,9 @@ public class InternalSearchResponse implements Streamable, ToXContent {
if (suggest != null) {
suggest.toXContent(builder, params);
}
+ if (profileResults != null) {
+ profileResults.toXContent(builder, params);
+ }
return builder;
}
@@ -114,6 +139,12 @@ public class InternalSearchResponse implements Streamable, ToXContent {
timedOut = in.readBoolean();
terminatedEarly = in.readOptionalBoolean();
+
+ if (in.getVersion().onOrAfter(Version.V_2_2_0) && in.readBoolean()) {
+ profileResults = new InternalProfileShardResults(in);
+ } else {
+ profileResults = null;
+ }
}
@Override
@@ -134,5 +165,14 @@ public class InternalSearchResponse implements Streamable, ToXContent {
out.writeBoolean(timedOut);
out.writeOptionalBoolean(terminatedEarly);
+
+ if (out.getVersion().onOrAfter(Version.V_2_2_0)) {
+ if (profileResults == null) {
+ out.writeBoolean(false);
+ } else {
+ out.writeBoolean(true);
+ profileResults.writeTo(out);
+ }
+ }
}
}
diff --git a/core/src/main/java/org/elasticsearch/search/internal/SearchContext.java b/core/src/main/java/org/elasticsearch/search/internal/SearchContext.java
index 0f61b2bc6a..4e4e9dd5dd 100644
--- a/core/src/main/java/org/elasticsearch/search/internal/SearchContext.java
+++ b/core/src/main/java/org/elasticsearch/search/internal/SearchContext.java
@@ -35,7 +35,6 @@ import org.elasticsearch.common.util.BigArrays;
import org.elasticsearch.common.util.iterable.Iterables;
import org.elasticsearch.index.analysis.AnalysisService;
import org.elasticsearch.index.cache.bitset.BitsetFilterCache;
-import org.elasticsearch.index.cache.query.QueryCache;
import org.elasticsearch.index.fielddata.IndexFieldDataService;
import org.elasticsearch.index.mapper.MappedFieldType;
import org.elasticsearch.index.mapper.MapperService;
@@ -56,6 +55,7 @@ import org.elasticsearch.search.fetch.script.ScriptFieldsContext;
import org.elasticsearch.search.fetch.source.FetchSourceContext;
import org.elasticsearch.search.highlight.SearchContextHighlight;
import org.elasticsearch.search.lookup.SearchLookup;
+import org.elasticsearch.search.profile.Profilers;
import org.elasticsearch.search.query.QuerySearchResult;
import org.elasticsearch.search.rescore.RescoreSearchContext;
import org.elasticsearch.search.suggest.SuggestionSearchContext;
@@ -304,6 +304,11 @@ public abstract class SearchContext extends DelegatingHasContextAndHeaders imple
public abstract FetchSearchResult fetchResult();
/**
+ * Return a handle over the profilers for the current search request, or {@code null} if profiling is not enabled.
+ */
+ public abstract Profilers getProfilers();
+
+ /**
* Schedule the release of a resource. The time when {@link Releasable#close()} will be called on this object
* is function of the provided {@link Lifetime}.
*/
@@ -367,5 +372,4 @@ public abstract class SearchContext extends DelegatingHasContextAndHeaders imple
CONTEXT
}
- public abstract QueryCache getQueryCache();
}
diff --git a/core/src/main/java/org/elasticsearch/search/internal/ShardSearchLocalRequest.java b/core/src/main/java/org/elasticsearch/search/internal/ShardSearchLocalRequest.java
index 47791aeddf..9d15dfd579 100644
--- a/core/src/main/java/org/elasticsearch/search/internal/ShardSearchLocalRequest.java
+++ b/core/src/main/java/org/elasticsearch/search/internal/ShardSearchLocalRequest.java
@@ -71,6 +71,8 @@ public class ShardSearchLocalRequest extends ContextAndHeaderHolder implements S
private Boolean requestCache;
private long nowInMillis;
+ private boolean profile;
+
ShardSearchLocalRequest() {
}
@@ -165,6 +167,16 @@ public class ShardSearchLocalRequest extends ContextAndHeaderHolder implements S
return scroll;
}
+ @Override
+ public void setProfile(boolean profile) {
+ this.profile = profile;
+ }
+
+ @Override
+ public boolean isProfile() {
+ return profile;
+ }
+
@SuppressWarnings("unchecked")
protected void innerReadFrom(StreamInput in) throws IOException {
index = in.readString();
diff --git a/core/src/main/java/org/elasticsearch/search/internal/ShardSearchRequest.java b/core/src/main/java/org/elasticsearch/search/internal/ShardSearchRequest.java
index fb631b0827..b1730b6a14 100644
--- a/core/src/main/java/org/elasticsearch/search/internal/ShardSearchRequest.java
+++ b/core/src/main/java/org/elasticsearch/search/internal/ShardSearchRequest.java
@@ -60,6 +60,17 @@ public interface ShardSearchRequest extends HasContextAndHeaders {
Scroll scroll();
/**
+ * Sets if this shard search needs to be profiled or not
+ * @param profile True if the shard should be profiled
+ */
+ void setProfile(boolean profile);
+
+ /**
+ * Returns true if this shard search is being profiled or not
+ */
+ boolean isProfile();
+
+ /**
* Returns the cache key for this shard search request, based on its content
*/
BytesReference cacheKey() throws IOException;
diff --git a/core/src/main/java/org/elasticsearch/search/internal/ShardSearchTransportRequest.java b/core/src/main/java/org/elasticsearch/search/internal/ShardSearchTransportRequest.java
index 279d9d6bd2..0f9c0ced41 100644
--- a/core/src/main/java/org/elasticsearch/search/internal/ShardSearchTransportRequest.java
+++ b/core/src/main/java/org/elasticsearch/search/internal/ShardSearchTransportRequest.java
@@ -150,4 +150,14 @@ public class ShardSearchTransportRequest extends TransportRequest implements Sha
public BytesReference cacheKey() throws IOException {
return shardSearchLocalRequest.cacheKey();
}
+
+ @Override
+ public void setProfile(boolean profile) {
+ shardSearchLocalRequest.setProfile(profile);
+ }
+
+ @Override
+ public boolean isProfile() {
+ return shardSearchLocalRequest.isProfile();
+ }
}
diff --git a/core/src/main/java/org/elasticsearch/search/profile/CollectorResult.java b/core/src/main/java/org/elasticsearch/search/profile/CollectorResult.java
new file mode 100644
index 0000000000..4949c6388d
--- /dev/null
+++ b/core/src/main/java/org/elasticsearch/search/profile/CollectorResult.java
@@ -0,0 +1,156 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.search.profile;
+
+import org.elasticsearch.common.ParseField;
+import org.elasticsearch.common.io.stream.StreamInput;
+import org.elasticsearch.common.io.stream.StreamOutput;
+import org.elasticsearch.common.io.stream.Writeable;
+import org.elasticsearch.common.xcontent.ToXContent;
+import org.elasticsearch.common.xcontent.XContentBuilder;
+
+import java.io.IOException;
+import java.util.ArrayList;
+import java.util.List;
+import java.util.Locale;
+
+/**
+ * Public interface and serialization container for profiled timings of the
+ * Collectors used in the search. Children CollectorResult's may be
+ * embedded inside of a parent CollectorResult
+ */
+public class CollectorResult implements ToXContent, Writeable {
+
+ public static final String REASON_SEARCH_COUNT = "search_count";
+ public static final String REASON_SEARCH_TOP_HITS = "search_top_hits";
+ public static final String REASON_SEARCH_TERMINATE_AFTER_COUNT = "search_terminate_after_count";
+ public static final String REASON_SEARCH_POST_FILTER = "search_post_filter";
+ public static final String REASON_SEARCH_MIN_SCORE = "search_min_score";
+ public static final String REASON_SEARCH_MULTI = "search_multi";
+ public static final String REASON_SEARCH_TIMEOUT = "search_timeout";
+ public static final String REASON_AGGREGATION = "aggregation";
+ public static final String REASON_AGGREGATION_GLOBAL = "aggregation_global";
+
+ private static final ParseField NAME = new ParseField("name");
+ private static final ParseField REASON = new ParseField("reason");
+ private static final ParseField TIME = new ParseField("time");
+ private static final ParseField CHILDREN = new ParseField("children");
+
+ /**
+ * A more friendly representation of the Collector's class name
+ */
+ private final String collectorName;
+
+ /**
+ * A "hint" to help provide some context about this Collector
+ */
+ private final String reason;
+
+ /**
+ * The total elapsed time for this Collector
+ */
+ private final Long time;
+
+ /**
+ * A list of children collectors "embedded" inside this collector
+ */
+ private List<CollectorResult> children;
+
+ public CollectorResult(String collectorName, String reason, Long time, List<CollectorResult> children) {
+ this.collectorName = collectorName;
+ this.reason = reason;
+ this.time = time;
+ this.children = children;
+ }
+
+ public CollectorResult(StreamInput in) throws IOException {
+ this.collectorName = in.readString();
+ this.reason = in.readString();
+ this.time = in.readLong();
+ int size = in.readVInt();
+ this.children = new ArrayList<>(size);
+ for (int i = 0; i < size; i++) {
+ CollectorResult child = new CollectorResult(in);
+ this.children.add(child);
+ }
+ }
+
+ /**
+ * @return the profiled time for this collector (inclusive of children)
+ */
+ public long getTime() {
+ return this.time;
+ }
+
+ /**
+ * @return a human readable "hint" about what this collector was used for
+ */
+ public String getReason() {
+ return this.reason;
+ }
+
+ /**
+ * @return the lucene class name of the collector
+ */
+ public String getName() {
+ return this.collectorName;
+ }
+
+ /**
+ * @return a list of children collectors
+ */
+ public List<CollectorResult> getProfiledChildren() {
+ return children;
+ }
+
+ @Override
+ public XContentBuilder toXContent(XContentBuilder builder, ToXContent.Params params) throws IOException {
+ builder = builder.startObject()
+ .field(NAME.getPreferredName(), toString())
+ .field(REASON.getPreferredName(), reason)
+ .field(TIME.getPreferredName(), String.format(Locale.US, "%.10gms", (double) (getTime() / 1000000.0)));
+
+ if (!children.isEmpty()) {
+ builder = builder.startArray(CHILDREN.getPreferredName());
+ for (CollectorResult child : children) {
+ builder = child.toXContent(builder, params);
+ }
+ builder = builder.endArray();
+ }
+ builder = builder.endObject();
+ return builder;
+ }
+
+ @Override
+ public void writeTo(StreamOutput out) throws IOException {
+ out.writeString(collectorName);
+ out.writeString(reason);
+ out.writeLong(time);
+ out.writeVInt(children.size());
+ for (CollectorResult child : children) {
+ child.writeTo(out);
+ }
+ }
+
+ @Override
+ public Object readFrom(StreamInput in) throws IOException {
+ return new CollectorResult(in);
+ }
+}
diff --git a/core/src/main/java/org/elasticsearch/search/profile/InternalProfileCollector.java b/core/src/main/java/org/elasticsearch/search/profile/InternalProfileCollector.java
new file mode 100644
index 0000000000..132731f37c
--- /dev/null
+++ b/core/src/main/java/org/elasticsearch/search/profile/InternalProfileCollector.java
@@ -0,0 +1,135 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.search.profile;
+
+import org.apache.lucene.index.LeafReaderContext;
+import org.apache.lucene.search.Collector;
+import org.apache.lucene.search.LeafCollector;
+
+import java.io.IOException;
+import java.util.ArrayList;
+import java.util.List;
+
+/**
+ * This class wraps a Lucene Collector and times the execution of:
+ * - setScorer()
+ * - collect()
+ * - doSetNextReader()
+ * - needsScores()
+ *
+ * InternalProfiler facilitates the linking of the the Collector graph
+ */
+public class InternalProfileCollector implements Collector {
+
+ /**
+ * A more friendly representation of the Collector's class name
+ */
+ private final String collectorName;
+
+ /**
+ * A "hint" to help provide some context about this Collector
+ */
+ private final String reason;
+
+ /** The wrapped collector */
+ private final ProfileCollector collector;
+
+ /**
+ * A list of "embedded" children collectors
+ */
+ private final List<InternalProfileCollector> children;
+
+ public InternalProfileCollector(Collector collector, String reason, List<InternalProfileCollector> children) {
+ this.collector = new ProfileCollector(collector);
+ this.reason = reason;
+ this.collectorName = deriveCollectorName(collector);
+ this.children = children;
+ }
+
+ /**
+ * @return the profiled time for this collector (inclusive of children)
+ */
+ public long getTime() {
+ return collector.getTime();
+ }
+
+ /**
+ * @return a human readable "hint" about what this collector was used for
+ */
+ public String getReason() {
+ return this.reason;
+ }
+
+ /**
+ * @return the lucene class name of the collector
+ */
+ public String getName() {
+ return this.collectorName;
+ }
+
+ /**
+ * Creates a human-friendly representation of the Collector name.
+ *
+ * Bucket Collectors use the aggregation name in their toString() method,
+ * which makes the profiled output a bit nicer.
+ *
+ * @param c The Collector to derive a name from
+ * @return A (hopefully) prettier name
+ */
+ private String deriveCollectorName(Collector c) {
+ String s = c.getClass().getSimpleName();
+
+ // MutiCollector which wraps multiple BucketCollectors is generated
+ // via an anonymous class, so this corrects the lack of a name by
+ // asking the enclosingClass
+ if (s.equals("")) {
+ s = c.getClass().getEnclosingClass().getSimpleName();
+ }
+
+ // Aggregation collector toString()'s include the user-defined agg name
+ if (reason.equals(CollectorResult.REASON_AGGREGATION) || reason.equals(CollectorResult.REASON_AGGREGATION_GLOBAL)) {
+ s += ": [" + c.toString() + "]";
+ }
+ return s;
+ }
+
+ @Override
+ public LeafCollector getLeafCollector(LeafReaderContext context) throws IOException {
+ return collector.getLeafCollector(context);
+ }
+
+ @Override
+ public boolean needsScores() {
+ return collector.needsScores();
+ }
+
+ public CollectorResult getCollectorTree() {
+ return InternalProfileCollector.doGetCollectorTree(this);
+ }
+
+ private static CollectorResult doGetCollectorTree(InternalProfileCollector collector) {
+ List<CollectorResult> childResults = new ArrayList<>(collector.children.size());
+ for (InternalProfileCollector child : collector.children) {
+ CollectorResult result = doGetCollectorTree(child);
+ childResults.add(result);
+ }
+ return new CollectorResult(collector.getName(), collector.getReason(), collector.getTime(), childResults);
+ }
+}
diff --git a/core/src/main/java/org/elasticsearch/search/profile/InternalProfileShardResults.java b/core/src/main/java/org/elasticsearch/search/profile/InternalProfileShardResults.java
new file mode 100644
index 0000000000..e6052ff509
--- /dev/null
+++ b/core/src/main/java/org/elasticsearch/search/profile/InternalProfileShardResults.java
@@ -0,0 +1,108 @@
+package org.elasticsearch.search.profile;
+
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+import org.elasticsearch.common.io.stream.StreamInput;
+import org.elasticsearch.common.io.stream.StreamOutput;
+import org.elasticsearch.common.io.stream.Writeable;
+import org.elasticsearch.common.xcontent.ToXContent;
+import org.elasticsearch.common.xcontent.XContentBuilder;
+
+import java.io.IOException;
+import java.util.*;
+import java.util.stream.Collectors;
+
+/**
+ * A container class to hold all the profile results across all shards. Internally
+ * holds a map of shard ID -&gt; Profiled results
+ */
+public final class InternalProfileShardResults implements Writeable<InternalProfileShardResults>, ToXContent{
+
+ private Map<String, List<ProfileShardResult>> shardResults;
+
+ public InternalProfileShardResults(Map<String, List<ProfileShardResult>> shardResults) {
+ Map<String, List<ProfileShardResult>> transformed =
+ shardResults.entrySet()
+ .stream()
+ .collect(Collectors.toMap(
+ Map.Entry::getKey,
+ e -> Collections.unmodifiableList(e.getValue()))
+ );
+ this.shardResults = Collections.unmodifiableMap(transformed);
+ }
+
+ public InternalProfileShardResults(StreamInput in) throws IOException {
+ int size = in.readInt();
+ shardResults = new HashMap<>(size);
+
+ for (int i = 0; i < size; i++) {
+ String key = in.readString();
+ int shardResultsSize = in.readInt();
+
+ List<ProfileShardResult> shardResult = new ArrayList<>(shardResultsSize);
+
+ for (int j = 0; j < shardResultsSize; j++) {
+ ProfileShardResult result = new ProfileShardResult(in);
+ shardResult.add(result);
+ }
+ shardResults.put(key, shardResult);
+ }
+ }
+
+ public Map<String, List<ProfileShardResult>> getShardResults() {
+ return this.shardResults;
+ }
+
+ @Override
+ public InternalProfileShardResults readFrom(StreamInput in) throws IOException {
+ return new InternalProfileShardResults(in);
+ }
+
+ @Override
+ public void writeTo(StreamOutput out) throws IOException {
+ out.writeInt(shardResults.size());
+ for (Map.Entry<String, List<ProfileShardResult>> entry : shardResults.entrySet()) {
+ out.writeString(entry.getKey());
+ out.writeInt(entry.getValue().size());
+
+ for (ProfileShardResult result : entry.getValue()) {
+ result.writeTo(out);
+ }
+ }
+ }
+
+ @Override
+ public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException {
+ builder.startObject("profile").startArray("shards");
+
+ for (Map.Entry<String, List<ProfileShardResult>> entry : shardResults.entrySet()) {
+ builder.startObject().field("id",entry.getKey()).startArray("searches");
+ for (ProfileShardResult result : entry.getValue()) {
+ builder.startObject();
+ result.toXContent(builder, params);
+ builder.endObject();
+ }
+ builder.endArray().endObject();
+ }
+
+ builder.endArray().endObject();
+ return builder;
+ }
+}
diff --git a/core/src/main/java/org/elasticsearch/search/profile/InternalProfileTree.java b/core/src/main/java/org/elasticsearch/search/profile/InternalProfileTree.java
new file mode 100644
index 0000000000..4bc8a85a78
--- /dev/null
+++ b/core/src/main/java/org/elasticsearch/search/profile/InternalProfileTree.java
@@ -0,0 +1,235 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.search.profile;
+
+import org.apache.lucene.search.Query;
+
+import java.util.*;
+import java.util.concurrent.LinkedBlockingDeque;
+
+/**
+ * This class tracks the dependency tree for queries (scoring and rewriting) and
+ * generates {@link ProfileBreakdown} for each node in the tree. It also finalizes the tree
+ * and returns a list of {@link ProfileResult} that can be serialized back to the client
+ */
+final class InternalProfileTree {
+
+ private ArrayList<ProfileBreakdown> timings;
+
+ /** Maps the Query to it's list of children. This is basically the dependency tree */
+ private ArrayList<ArrayList<Integer>> tree;
+
+ /** A list of the original queries, keyed by index position */
+ private ArrayList<Query> queries;
+
+ /** A list of top-level "roots". Each root can have its own tree of profiles */
+ private ArrayList<Integer> roots;
+
+ /** Rewrite time */
+ private long rewriteTime;
+ private long rewriteScratch;
+
+ /** A temporary stack used to record where we are in the dependency tree. Only used by scoring queries */
+ private Deque<Integer> stack;
+
+ private int currentToken = 0;
+
+ public InternalProfileTree() {
+ timings = new ArrayList<>(10);
+ stack = new LinkedBlockingDeque<>(10);
+ tree = new ArrayList<>(10);
+ queries = new ArrayList<>(10);
+ roots = new ArrayList<>(10);
+ }
+
+ /**
+ * Returns a {@link ProfileBreakdown} for a scoring query. Scoring queries (e.g. those
+ * that are past the rewrite phase and are now being wrapped by createWeight() ) follow
+ * a recursive progression. We can track the dependency tree by a simple stack
+ *
+ * The only hiccup is that the first scoring query will be identical to the last rewritten
+ * query, so we need to take special care to fix that
+ *
+ * @param query The scoring query we wish to profile
+ * @return A ProfileBreakdown for this query
+ */
+ public ProfileBreakdown getQueryBreakdown(Query query) {
+ int token = currentToken;
+
+ boolean stackEmpty = stack.isEmpty();
+
+ // If the stack is empty, we are a new root query
+ if (stackEmpty) {
+
+ // We couldn't find a rewritten query to attach to, so just add it as a
+ // top-level root. This is just a precaution: it really shouldn't happen.
+ // We would only get here if a top-level query that never rewrites for some reason.
+ roots.add(token);
+
+ // Increment the token since we are adding a new node, but notably, do not
+ // updateParent() because this was added as a root
+ currentToken += 1;
+ stack.add(token);
+
+ return addDependencyNode(query, token);
+ }
+
+ updateParent(token);
+
+ // Increment the token since we are adding a new node
+ currentToken += 1;
+ stack.add(token);
+
+ return addDependencyNode(query, token);
+ }
+
+ /**
+ * Begin timing a query for a specific Timing context
+ */
+ public void startRewriteTime() {
+ assert rewriteScratch == 0;
+ rewriteScratch = System.nanoTime();
+ }
+
+ /**
+ * Halt the timing process and add the elapsed rewriting time.
+ * startRewriteTime() must be called for a particular context prior to calling
+ * stopAndAddRewriteTime(), otherwise the elapsed time will be negative and
+ * nonsensical
+ *
+ * @return The elapsed time
+ */
+ public long stopAndAddRewriteTime() {
+ long time = Math.max(1, System.nanoTime() - rewriteScratch);
+ rewriteTime += time;
+ rewriteScratch = 0;
+ return time;
+ }
+
+ /**
+ * Helper method to add a new node to the dependency tree.
+ *
+ * Initializes a new list in the dependency tree, saves the query and
+ * generates a new {@link ProfileBreakdown} to track the timings
+ * of this query
+ *
+ * @param query The query to profile
+ * @param token The assigned token for this query
+ * @return A ProfileBreakdown to profile this query
+ */
+ private ProfileBreakdown addDependencyNode(Query query, int token) {
+
+ // Add a new slot in the dependency tree
+ tree.add(new ArrayList<>(5));
+
+ // Save our query for lookup later
+ queries.add(query);
+
+ ProfileBreakdown queryTimings = new ProfileBreakdown();
+ timings.add(token, queryTimings);
+ return queryTimings;
+ }
+
+ /**
+ * Removes the last (e.g. most recent) value on the stack
+ */
+ public void pollLast() {
+ stack.pollLast();
+ }
+
+ /**
+ * After the query has been run and profiled, we need to merge the flat timing map
+ * with the dependency graph to build a data structure that mirrors the original
+ * query tree
+ *
+ * @return a hierarchical representation of the profiled query tree
+ */
+ public List<ProfileResult> getQueryTree() {
+ ArrayList<ProfileResult> results = new ArrayList<>(5);
+ for (Integer root : roots) {
+ results.add(doGetQueryTree(root));
+ }
+ return results;
+ }
+
+ /**
+ * Recursive helper to finalize a node in the dependency tree
+ * @param token The node we are currently finalizing
+ * @return A hierarchical representation of the tree inclusive of children at this level
+ */
+ private ProfileResult doGetQueryTree(int token) {
+ Query query = queries.get(token);
+ ProfileBreakdown breakdown = timings.get(token);
+ Map<String, Long> timings = breakdown.toTimingMap();
+ List<Integer> children = tree.get(token);
+ List<ProfileResult> childrenProfileResults = Collections.emptyList();
+
+ if (children != null) {
+ childrenProfileResults = new ArrayList<>(children.size());
+ for (Integer child : children) {
+ ProfileResult childNode = doGetQueryTree(child);
+ childrenProfileResults.add(childNode);
+ }
+ }
+
+ // TODO this would be better done bottom-up instead of top-down to avoid
+ // calculating the same times over and over...but worth the effort?
+ long nodeTime = getNodeTime(timings, childrenProfileResults);
+ String queryDescription = query.getClass().getSimpleName();
+ String luceneName = query.toString();
+ return new ProfileResult(queryDescription, luceneName, timings, childrenProfileResults, nodeTime);
+ }
+
+ public long getRewriteTime() {
+ return rewriteTime;
+ }
+
+ /**
+ * Internal helper to add a child to the current parent node
+ *
+ * @param childToken The child to add to the current parent
+ */
+ private void updateParent(int childToken) {
+ Integer parent = stack.peekLast();
+ ArrayList<Integer> parentNode = tree.get(parent);
+ parentNode.add(childToken);
+ tree.set(parent, parentNode);
+ }
+
+ /**
+ * Internal helper to calculate the time of a node, inclusive of children
+ *
+ * @param timings A map of breakdown timing for the node
+ * @param children All children profile results at this node
+ * @return The total time at this node, inclusive of children
+ */
+ private static long getNodeTime(Map<String, Long> timings, List<ProfileResult> children) {
+ long nodeTime = 0;
+ for (long time : timings.values()) {
+ nodeTime += time;
+ }
+
+ // Then add up our children
+ for (ProfileResult child : children) {
+ nodeTime += getNodeTime(child.getTimeBreakdown(), child.getProfiledChildren());
+ }
+ return nodeTime;
+ }
+}
diff --git a/core/src/main/java/org/elasticsearch/search/profile/ProfileBreakdown.java b/core/src/main/java/org/elasticsearch/search/profile/ProfileBreakdown.java
new file mode 100644
index 0000000000..55ad77b693
--- /dev/null
+++ b/core/src/main/java/org/elasticsearch/search/profile/ProfileBreakdown.java
@@ -0,0 +1,113 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.search.profile;
+
+import java.util.Collections;
+import java.util.HashMap;
+import java.util.Locale;
+import java.util.Map;
+
+/**
+ * A record of timings for the various operations that may happen during query execution.
+ * A node's time may be composed of several internal attributes (rewriting, weighting,
+ * scoring, etc).
+ */
+public final class ProfileBreakdown {
+
+ /** Enumeration of all supported timing types. */
+ public enum TimingType {
+ CREATE_WEIGHT,
+ BUILD_SCORER,
+ NEXT_DOC,
+ ADVANCE,
+ MATCH,
+ SCORE;
+
+ @Override
+ public String toString() {
+ return name().toLowerCase(Locale.ROOT);
+ }
+ }
+
+ /**
+ * The accumulated timings for this query node
+ */
+ private final long[] timings;
+
+ /** Scrach to store the current timing type. */
+ private TimingType currentTimingType;
+
+ /**
+ * The temporary scratch space for holding start-times
+ */
+ private long scratch;
+
+ /** Sole constructor. */
+ public ProfileBreakdown() {
+ timings = new long[TimingType.values().length];
+ }
+
+ /**
+ * Begin timing a query for a specific Timing context
+ * @param timing The timing context being profiled
+ */
+ public void startTime(TimingType timing) {
+ assert currentTimingType == null;
+ assert scratch == 0;
+ currentTimingType = timing;
+ scratch = System.nanoTime();
+ }
+
+ /**
+ * Halt the timing process and save the elapsed time.
+ * startTime() must be called for a particular context prior to calling
+ * stopAndRecordTime(), otherwise the elapsed time will be negative and
+ * nonsensical
+ *
+ * @return The elapsed time
+ */
+ public long stopAndRecordTime() {
+ long time = Math.max(1, System.nanoTime() - scratch);
+ timings[currentTimingType.ordinal()] += time;
+ currentTimingType = null;
+ scratch = 0L;
+ return time;
+ }
+
+ /** Convert this record to a map from {@link TimingType} to times. */
+ public Map<String, Long> toTimingMap() {
+ Map<String, Long> map = new HashMap<>();
+ for (TimingType timingType : TimingType.values()) {
+ map.put(timingType.toString(), timings[timingType.ordinal()]);
+ }
+ return Collections.unmodifiableMap(map);
+ }
+
+ /**
+ * Add <code>other</code>'s timings into this breakdown
+ * @param other Another Breakdown to merge with this one
+ */
+ public void merge(ProfileBreakdown other) {
+ assert(timings.length == other.timings.length);
+ for (int i = 0; i < timings.length; ++i) {
+ timings[i] += other.timings[i];
+ }
+ }
+}
diff --git a/core/src/main/java/org/elasticsearch/search/profile/ProfileCollector.java b/core/src/main/java/org/elasticsearch/search/profile/ProfileCollector.java
new file mode 100644
index 0000000000..7d7538c911
--- /dev/null
+++ b/core/src/main/java/org/elasticsearch/search/profile/ProfileCollector.java
@@ -0,0 +1,94 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.search.profile;
+
+import org.apache.lucene.index.LeafReaderContext;
+import org.apache.lucene.search.Collector;
+import org.apache.lucene.search.FilterCollector;
+import org.apache.lucene.search.FilterLeafCollector;
+import org.apache.lucene.search.LeafCollector;
+import org.apache.lucene.search.Scorer;
+
+import java.io.IOException;
+
+/** A collector that profiles how much time is spent calling it. */
+final class ProfileCollector extends FilterCollector {
+
+ private long time;
+
+ /** Sole constructor. */
+ public ProfileCollector(Collector in) {
+ super(in);
+ }
+
+ /** Return the wrapped collector. */
+ public Collector getDelegate() {
+ return in;
+ }
+
+ @Override
+ public boolean needsScores() {
+ final long start = System.nanoTime();
+ try {
+ return super.needsScores();
+ } finally {
+ time += Math.max(1, System.nanoTime() - start);
+ }
+ }
+
+ @Override
+ public LeafCollector getLeafCollector(LeafReaderContext context) throws IOException {
+ final long start = System.nanoTime();
+ final LeafCollector inLeafCollector;
+ try {
+ inLeafCollector = super.getLeafCollector(context);
+ } finally {
+ time += Math.max(1, System.nanoTime() - start);
+ }
+ return new FilterLeafCollector(inLeafCollector) {
+
+ @Override
+ public void collect(int doc) throws IOException {
+ final long start = System.nanoTime();
+ try {
+ super.collect(doc);
+ } finally {
+ time += Math.max(1, System.nanoTime() - start);
+ }
+ }
+
+ @Override
+ public void setScorer(Scorer scorer) throws IOException {
+ final long start = System.nanoTime();
+ try {
+ super.setScorer(scorer);
+ } finally {
+ time += Math.max(1, System.nanoTime() - start);
+ }
+ }
+ };
+ }
+
+ /** Return the total time spent on this collector. */
+ public long getTime() {
+ return time;
+ }
+
+}
diff --git a/core/src/main/java/org/elasticsearch/search/profile/ProfileResult.java b/core/src/main/java/org/elasticsearch/search/profile/ProfileResult.java
new file mode 100644
index 0000000000..4c8752fdaf
--- /dev/null
+++ b/core/src/main/java/org/elasticsearch/search/profile/ProfileResult.java
@@ -0,0 +1,165 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.search.profile;
+
+import org.elasticsearch.common.ParseField;
+import org.elasticsearch.common.io.stream.StreamInput;
+import org.elasticsearch.common.io.stream.StreamOutput;
+import org.elasticsearch.common.io.stream.Writeable;
+import org.elasticsearch.common.xcontent.ToXContent;
+import org.elasticsearch.common.xcontent.XContentBuilder;
+
+import java.io.IOException;
+import java.util.ArrayList;
+import java.util.Collections;
+import java.util.HashMap;
+import java.util.List;
+import java.util.Locale;
+import java.util.Map;
+
+/**
+ * This class is the internal representation of a profiled Query, corresponding
+ * to a single node in the query tree. It is built after the query has finished executing
+ * and is merely a structured representation, rather than the entity that collects the timing
+ * profile (see InternalProfiler for that)
+ *
+ * Each InternalProfileResult has a List of InternalProfileResults, which will contain
+ * "children" queries if applicable
+ */
+final class ProfileResult implements Writeable<ProfileResult>, ToXContent {
+
+ private static final ParseField QUERY_TYPE = new ParseField("query_type");
+ private static final ParseField LUCENE_DESCRIPTION = new ParseField("lucene");
+ private static final ParseField NODE_TIME = new ParseField("time");
+ private static final ParseField CHILDREN = new ParseField("children");
+ private static final ParseField BREAKDOWN = new ParseField("breakdown");
+
+ private final String queryType;
+ private final String luceneDescription;
+ private final Map<String, Long> timings;
+ private final long nodeTime;
+ private final List<ProfileResult> children;
+
+ public ProfileResult(String queryType, String luceneDescription, Map<String, Long> timings, List<ProfileResult> children, long nodeTime) {
+ this.queryType = queryType;
+ this.luceneDescription = luceneDescription;
+ this.timings = timings;
+ this.children = children;
+ this.nodeTime = nodeTime;
+ }
+
+ public ProfileResult(StreamInput in) throws IOException{
+ this.queryType = in.readString();
+ this.luceneDescription = in.readString();
+ this.nodeTime = in.readLong();
+
+ int timingsSize = in.readVInt();
+ this.timings = new HashMap<>(timingsSize);
+ for (int i = 0; i < timingsSize; ++i) {
+ timings.put(in.readString(), in.readLong());
+ }
+
+ int size = in.readVInt();
+ this.children = new ArrayList<>(size);
+
+ for (int i = 0; i < size; i++) {
+ children.add(new ProfileResult(in));
+ }
+ }
+
+ /**
+ * Retrieve the lucene description of this query (e.g. the "explain" text)
+ */
+ public String getLuceneDescription() {
+ return luceneDescription;
+ }
+
+ /**
+ * Retrieve the name of the query (e.g. "TermQuery")
+ */
+ public String getQueryName() {
+ return queryType;
+ }
+
+ /**
+ * Returns the timing breakdown for this particular query node
+ */
+ public Map<String, Long> getTimeBreakdown() {
+ return Collections.unmodifiableMap(timings);
+ }
+
+ /**
+ * Returns the total time (inclusive of children) for this query node.
+ *
+ * @return elapsed time in nanoseconds
+ */
+ public long getTime() {
+ return nodeTime;
+ }
+
+ /**
+ * Returns a list of all profiled children queries
+ */
+ public List<ProfileResult> getProfiledChildren() {
+ return Collections.unmodifiableList(children);
+ }
+
+ @Override
+ public ProfileResult readFrom(StreamInput in) throws IOException {
+ return new ProfileResult(in);
+ }
+
+ @Override
+ public void writeTo(StreamOutput out) throws IOException {
+ out.writeString(queryType);
+ out.writeString(luceneDescription);
+ out.writeLong(nodeTime); // not Vlong because can be negative
+ out.writeVInt(timings.size());
+ for (Map.Entry<String, Long> entry : timings.entrySet()) {
+ out.writeString(entry.getKey());
+ out.writeLong(entry.getValue());
+ }
+ out.writeVInt(children.size());
+ for (ProfileResult child : children) {
+ child.writeTo(out);
+ }
+ }
+
+ @Override
+ public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException {
+ builder = builder.startObject()
+ .field(QUERY_TYPE.getPreferredName(), queryType)
+ .field(LUCENE_DESCRIPTION.getPreferredName(), luceneDescription)
+ .field(NODE_TIME.getPreferredName(), String.format(Locale.US, "%.10gms", (double)(getTime() / 1000000.0)))
+ .field(BREAKDOWN.getPreferredName(), timings);
+
+ if (!children.isEmpty()) {
+ builder = builder.startArray(CHILDREN.getPreferredName());
+ for (ProfileResult child : children) {
+ builder = child.toXContent(builder, params);
+ }
+ builder = builder.endArray();
+ }
+
+ builder = builder.endObject();
+ return builder;
+ }
+
+}
diff --git a/core/src/main/java/org/elasticsearch/search/profile/ProfileScorer.java b/core/src/main/java/org/elasticsearch/search/profile/ProfileScorer.java
new file mode 100644
index 0000000000..b0dc6f2cd4
--- /dev/null
+++ b/core/src/main/java/org/elasticsearch/search/profile/ProfileScorer.java
@@ -0,0 +1,158 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.search.profile;
+
+import org.apache.lucene.search.DocIdSetIterator;
+import org.apache.lucene.search.Scorer;
+import org.apache.lucene.search.TwoPhaseIterator;
+import org.apache.lucene.search.Weight;
+
+import java.io.IOException;
+import java.util.Collection;
+
+/**
+ * {@link Scorer} wrapper that will compute how much time is spent on moving
+ * the iterator, confirming matches and computing scores.
+ */
+final class ProfileScorer extends Scorer {
+
+ private final Scorer scorer;
+ private ProfileWeight profileWeight;
+ private final ProfileBreakdown profile;
+
+ ProfileScorer(ProfileWeight w, Scorer scorer, ProfileBreakdown profile) throws IOException {
+ super(w);
+ this.scorer = scorer;
+ this.profileWeight = w;
+ this.profile = profile;
+ }
+
+ @Override
+ public int docID() {
+ return scorer.docID();
+ }
+
+ @Override
+ public int advance(int target) throws IOException {
+ profile.startTime(ProfileBreakdown.TimingType.ADVANCE);
+ try {
+ return scorer.advance(target);
+ } finally {
+ profile.stopAndRecordTime();
+ }
+ }
+
+ @Override
+ public int nextDoc() throws IOException {
+ profile.startTime(ProfileBreakdown.TimingType.NEXT_DOC);
+ try {
+ return scorer.nextDoc();
+ } finally {
+ profile.stopAndRecordTime();
+ }
+ }
+
+ @Override
+ public float score() throws IOException {
+ profile.startTime(ProfileBreakdown.TimingType.SCORE);
+ try {
+ return scorer.score();
+ } finally {
+ profile.stopAndRecordTime();
+ }
+ }
+
+ @Override
+ public int freq() throws IOException {
+ return scorer.freq();
+ }
+
+ @Override
+ public long cost() {
+ return scorer.cost();
+ }
+
+ @Override
+ public Weight getWeight() {
+ return profileWeight;
+ }
+
+ @Override
+ public Collection<ChildScorer> getChildren() {
+ return scorer.getChildren();
+ }
+
+ @Override
+ public TwoPhaseIterator asTwoPhaseIterator() {
+ final TwoPhaseIterator in = scorer.asTwoPhaseIterator();
+ if (in == null) {
+ return null;
+ }
+ final DocIdSetIterator inApproximation = in.approximation();
+ final DocIdSetIterator approximation = new DocIdSetIterator() {
+
+ @Override
+ public int advance(int target) throws IOException {
+ profile.startTime(ProfileBreakdown.TimingType.ADVANCE);
+ try {
+ return inApproximation.advance(target);
+ } finally {
+ profile.stopAndRecordTime();
+ }
+ }
+
+ @Override
+ public int nextDoc() throws IOException {
+ profile.startTime(ProfileBreakdown.TimingType.NEXT_DOC);
+ try {
+ return inApproximation.nextDoc();
+ } finally {
+ profile.stopAndRecordTime();
+ }
+ }
+
+ @Override
+ public int docID() {
+ return inApproximation.docID();
+ }
+
+ @Override
+ public long cost() {
+ return inApproximation.cost();
+ }
+ };
+ return new TwoPhaseIterator(approximation) {
+ @Override
+ public boolean matches() throws IOException {
+ profile.startTime(ProfileBreakdown.TimingType.MATCH);
+ try {
+ return in.matches();
+ } finally {
+ profile.stopAndRecordTime();
+ }
+ }
+
+ @Override
+ public float matchCost() {
+ return in.matchCost();
+ }
+ };
+ }
+}
diff --git a/core/src/main/java/org/elasticsearch/search/profile/ProfileShardResult.java b/core/src/main/java/org/elasticsearch/search/profile/ProfileShardResult.java
new file mode 100644
index 0000000000..6e005babb3
--- /dev/null
+++ b/core/src/main/java/org/elasticsearch/search/profile/ProfileShardResult.java
@@ -0,0 +1,103 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.search.profile;
+
+import org.elasticsearch.common.io.stream.StreamInput;
+import org.elasticsearch.common.io.stream.StreamOutput;
+import org.elasticsearch.common.io.stream.Writeable;
+import org.elasticsearch.common.xcontent.ToXContent;
+import org.elasticsearch.common.xcontent.XContentBuilder;
+
+import java.io.IOException;
+import java.util.*;
+
+/**
+ * A container class to hold the profile results for a single shard in the request.
+ * Contains a list of query profiles, a collector tree and a total rewrite tree.
+ */
+public final class ProfileShardResult implements Writeable<ProfileShardResult>, ToXContent {
+
+ private final List<ProfileResult> profileResults;
+
+ private final CollectorResult profileCollector;
+
+ private final long rewriteTime;
+
+ public ProfileShardResult(List<ProfileResult> profileResults, long rewriteTime,
+ CollectorResult profileCollector) {
+ assert(profileCollector != null);
+ this.profileResults = profileResults;
+ this.profileCollector = profileCollector;
+ this.rewriteTime = rewriteTime;
+ }
+
+ public ProfileShardResult(StreamInput in) throws IOException {
+ int profileSize = in.readVInt();
+ profileResults = new ArrayList<>(profileSize);
+ for (int j = 0; j < profileSize; j++) {
+ profileResults.add(new ProfileResult(in));
+ }
+
+ profileCollector = new CollectorResult(in);
+ rewriteTime = in.readLong();
+ }
+
+ public List<ProfileResult> getQueryResults() {
+ return Collections.unmodifiableList(profileResults);
+ }
+
+ public long getRewriteTime() {
+ return rewriteTime;
+ }
+
+ public CollectorResult getCollectorResult() {
+ return profileCollector;
+ }
+
+ @Override
+ public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException {
+ builder.startArray("query");
+ for (ProfileResult p : profileResults) {
+ p.toXContent(builder, params);
+ }
+ builder.endArray();
+ builder.field("rewrite_time", rewriteTime);
+ builder.startArray("collector");
+ profileCollector.toXContent(builder, params);
+ builder.endArray();
+ return builder;
+ }
+
+ @Override
+ public ProfileShardResult readFrom(StreamInput in) throws IOException {
+ return new ProfileShardResult(in);
+ }
+
+ @Override
+ public void writeTo(StreamOutput out) throws IOException {
+ out.writeVInt(profileResults.size());
+ for (ProfileResult p : profileResults) {
+ p.writeTo(out);
+ }
+ profileCollector.writeTo(out);
+ out.writeLong(rewriteTime);
+ }
+
+}
diff --git a/core/src/main/java/org/elasticsearch/search/profile/ProfileWeight.java b/core/src/main/java/org/elasticsearch/search/profile/ProfileWeight.java
new file mode 100644
index 0000000000..1ce5cd721f
--- /dev/null
+++ b/core/src/main/java/org/elasticsearch/search/profile/ProfileWeight.java
@@ -0,0 +1,97 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.search.profile;
+
+import org.apache.lucene.index.LeafReaderContext;
+import org.apache.lucene.index.Term;
+import org.apache.lucene.search.BulkScorer;
+import org.apache.lucene.search.Explanation;
+import org.apache.lucene.search.Query;
+import org.apache.lucene.search.Scorer;
+import org.apache.lucene.search.Weight;
+
+import java.io.IOException;
+import java.util.Set;
+
+/**
+ * Weight wrapper that will compute how much time it takes to build the
+ * {@link Scorer} and then return a {@link Scorer} that is wrapped in
+ * order to compute timings as well.
+ */
+public final class ProfileWeight extends Weight {
+
+ private final Weight subQueryWeight;
+ private final ProfileBreakdown profile;
+
+ public ProfileWeight(Query query, Weight subQueryWeight, ProfileBreakdown profile) throws IOException {
+ super(query);
+ this.subQueryWeight = subQueryWeight;
+ this.profile = profile;
+ }
+
+ @Override
+ public Scorer scorer(LeafReaderContext context) throws IOException {
+ profile.startTime(ProfileBreakdown.TimingType.BUILD_SCORER);
+ final Scorer subQueryScorer;
+ try {
+ subQueryScorer = subQueryWeight.scorer(context);
+ } finally {
+ profile.stopAndRecordTime();
+ }
+ if (subQueryScorer == null) {
+ return null;
+ }
+
+ return new ProfileScorer(this, subQueryScorer, profile);
+ }
+
+ @Override
+ public BulkScorer bulkScorer(LeafReaderContext context) throws IOException {
+ // We use the default bulk scorer instead of the specialized one. The reason
+ // is that Lucene's BulkScorers do everything at once: finding matches,
+ // scoring them and calling the collector, so they make it impossible to
+ // see where time is spent, which is the purpose of query profiling.
+ // The default bulk scorer will pull a scorer and iterate over matches,
+ // this might be a significantly different execution path for some queries
+ // like disjunctions, but in general this is what is done anyway
+ return super.bulkScorer(context);
+ }
+
+ @Override
+ public Explanation explain(LeafReaderContext context, int doc) throws IOException {
+ return subQueryWeight.explain(context, doc);
+ }
+
+ @Override
+ public float getValueForNormalization() throws IOException {
+ return subQueryWeight.getValueForNormalization();
+ }
+
+ @Override
+ public void normalize(float norm, float topLevelBoost) {
+ subQueryWeight.normalize(norm, topLevelBoost);
+ }
+
+ @Override
+ public void extractTerms(Set<Term> set) {
+ subQueryWeight.extractTerms(set);
+ }
+
+}
diff --git a/core/src/main/java/org/elasticsearch/search/profile/Profiler.java b/core/src/main/java/org/elasticsearch/search/profile/Profiler.java
new file mode 100644
index 0000000000..bf0c9ec01b
--- /dev/null
+++ b/core/src/main/java/org/elasticsearch/search/profile/Profiler.java
@@ -0,0 +1,130 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.search.profile;
+
+import org.apache.lucene.search.Query;
+
+import java.util.*;
+
+/**
+ * This class acts as a thread-local storage for profiling a query. It also
+ * builds a representation of the query tree which is built constructed
+ * "online" as the weights are wrapped by ContextIndexSearcher. This allows us
+ * to know the relationship between nodes in tree without explicitly
+ * walking the tree or pre-wrapping everything
+ *
+ * A Profiler is associated with every Search, not per Search-Request. E.g. a
+ * request may execute two searches (query + global agg). A Profiler just
+ * represents one of those
+ */
+public final class Profiler {
+
+ private final InternalProfileTree queryTree = new InternalProfileTree();
+
+ /**
+ * The root Collector used in the search
+ */
+ private InternalProfileCollector collector;
+
+ public Profiler() {}
+
+ /** Set the collector that is associated with this profiler. */
+ public void setCollector(InternalProfileCollector collector) {
+ if (this.collector != null) {
+ throw new IllegalStateException("The collector can only be set once.");
+ }
+ this.collector = Objects.requireNonNull(collector);
+ }
+
+ /**
+ * Get the {@link ProfileBreakdown} for the given query, potentially creating it if it did not exist.
+ * This should only be used for queries that will be undergoing scoring. Do not use it to profile the
+ * rewriting phase
+ */
+ public ProfileBreakdown getQueryBreakdown(Query query) {
+ return queryTree.getQueryBreakdown(query);
+ }
+
+ /**
+ * Begin timing the rewrite phase of a request. All rewrites are accumulated together into a
+ * single metric
+ */
+ public void startRewriteTime() {
+ queryTree.startRewriteTime();
+ }
+
+ /**
+ * Stop recording the current rewrite and add it's time to the total tally, returning the
+ * cumulative time so far.
+ *
+ * @return cumulative rewrite time
+ */
+ public long stopAndAddRewriteTime() {
+ return queryTree.stopAndAddRewriteTime();
+ }
+
+ /**
+ * Removes the last (e.g. most recent) query on the stack. This should only be called for scoring
+ * queries, not rewritten queries
+ */
+ public void pollLastQuery() {
+ queryTree.pollLast();
+ }
+
+ /**
+ * @return a hierarchical representation of the profiled query tree
+ */
+ public List<ProfileResult> getQueryTree() {
+ return queryTree.getQueryTree();
+ }
+
+ /**
+ * @return total time taken to rewrite all queries in this profile
+ */
+ public long getRewriteTime() {
+ return queryTree.getRewriteTime();
+ }
+
+ /**
+ * Return the current root Collector for this search
+ */
+ public CollectorResult getCollector() {
+ return collector.getCollectorTree();
+ }
+
+ /**
+ * Helper method to convert Profiler into InternalProfileShardResults, which can be
+ * serialized to other nodes, emitted as JSON, etc.
+ *
+ * @param profilers A list of Profilers to convert into InternalProfileShardResults
+ * @return A list of corresponding InternalProfileShardResults
+ */
+ public static List<ProfileShardResult> buildShardResults(List<Profiler> profilers) {
+ List<ProfileShardResult> results = new ArrayList<>(profilers.size());
+ for (Profiler profiler : profilers) {
+ ProfileShardResult result = new ProfileShardResult(
+ profiler.getQueryTree(), profiler.getRewriteTime(), profiler.getCollector());
+ results.add(result);
+ }
+ return results;
+ }
+
+
+}
diff --git a/core/src/main/java/org/elasticsearch/search/profile/Profilers.java b/core/src/main/java/org/elasticsearch/search/profile/Profilers.java
new file mode 100644
index 0000000000..0fb7d9ac1c
--- /dev/null
+++ b/core/src/main/java/org/elasticsearch/search/profile/Profilers.java
@@ -0,0 +1,59 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.search.profile;
+
+import org.elasticsearch.search.internal.ContextIndexSearcher;
+
+import java.util.ArrayList;
+import java.util.Collections;
+import java.util.List;
+
+/** Wrapper around several {@link Profiler}s that makes management easier. */
+public final class Profilers {
+
+ private final ContextIndexSearcher searcher;
+ private final List<Profiler> profilers;
+
+ /** Sole constructor. This {@link Profilers} instance will initiall wrap one {@link Profiler}. */
+ public Profilers(ContextIndexSearcher searcher) {
+ this.searcher = searcher;
+ this.profilers = new ArrayList<>();
+ addProfiler();
+ }
+
+ /** Switch to a new profile. */
+ public Profiler addProfiler() {
+ Profiler profiler = new Profiler();
+ searcher.setProfiler(profiler);
+ profilers.add(profiler);
+ return profiler;
+ }
+
+ /** Get the current profiler. */
+ public Profiler getCurrent() {
+ return profilers.get(profilers.size() - 1);
+ }
+
+ /** Return the list of all created {@link Profiler}s so far. */
+ public List<Profiler> getProfilers() {
+ return Collections.unmodifiableList(profilers);
+ }
+
+}
diff --git a/core/src/main/java/org/elasticsearch/search/query/QueryPhase.java b/core/src/main/java/org/elasticsearch/search/query/QueryPhase.java
index ce8836cd33..08ff849871 100644
--- a/core/src/main/java/org/elasticsearch/search/query/QueryPhase.java
+++ b/core/src/main/java/org/elasticsearch/search/query/QueryPhase.java
@@ -52,13 +52,16 @@ import org.elasticsearch.search.SearchService;
import org.elasticsearch.search.aggregations.AggregationPhase;
import org.elasticsearch.search.internal.ScrollContext;
import org.elasticsearch.search.internal.SearchContext;
+import org.elasticsearch.search.profile.*;
import org.elasticsearch.search.rescore.RescorePhase;
import org.elasticsearch.search.rescore.RescoreSearchContext;
import org.elasticsearch.search.sort.SortParseElement;
import org.elasticsearch.search.sort.TrackScoresParseElement;
import org.elasticsearch.search.suggest.SuggestPhase;
+import java.util.AbstractList;
import java.util.ArrayList;
+import java.util.Collections;
import java.util.HashMap;
import java.util.List;
import java.util.Map;
@@ -124,6 +127,11 @@ public class QueryPhase implements SearchPhase {
}
suggestPhase.execute(searchContext);
aggregationPhase.execute(searchContext);
+
+ if (searchContext.getProfilers() != null) {
+ List<ProfileShardResult> shardResults = Profiler.buildShardResults(searchContext.getProfilers().getProfilers());
+ searchContext.queryResult().profileResults(shardResults);
+ }
}
private static boolean returnsDocsInOrder(Query query, Sort sort) {
@@ -147,6 +155,7 @@ public class QueryPhase implements SearchPhase {
QuerySearchResult queryResult = searchContext.queryResult();
queryResult.searchTimedOut(false);
+ final boolean doProfile = searchContext.getProfilers() != null;
final SearchType searchType = searchContext.searchType();
boolean rescore = false;
try {
@@ -162,9 +171,13 @@ public class QueryPhase implements SearchPhase {
Callable<TopDocs> topDocsCallable;
assert query == searcher.rewrite(query); // already rewritten
+
if (searchContext.size() == 0) { // no matter what the value of from is
final TotalHitCountCollector totalHitCountCollector = new TotalHitCountCollector();
collector = totalHitCountCollector;
+ if (searchContext.getProfilers() != null) {
+ collector = new InternalProfileCollector(collector, CollectorResult.REASON_SEARCH_COUNT, Collections.emptyList());
+ }
topDocsCallable = new Callable<TopDocs>() {
@Override
public TopDocs call() throws Exception {
@@ -219,6 +232,9 @@ public class QueryPhase implements SearchPhase {
topDocsCollector = TopScoreDocCollector.create(numDocs, lastEmittedDoc);
}
collector = topDocsCollector;
+ if (doProfile) {
+ collector = new InternalProfileCollector(collector, CollectorResult.REASON_SEARCH_TOP_HITS, Collections.emptyList());
+ }
topDocsCallable = new Callable<TopDocs>() {
@Override
public TopDocs call() throws Exception {
@@ -254,27 +270,57 @@ public class QueryPhase implements SearchPhase {
final boolean terminateAfterSet = searchContext.terminateAfter() != SearchContext.DEFAULT_TERMINATE_AFTER;
if (terminateAfterSet) {
+ final Collector child = collector;
// throws Lucene.EarlyTerminationException when given count is reached
collector = Lucene.wrapCountBasedEarlyTerminatingCollector(collector, searchContext.terminateAfter());
+ if (doProfile) {
+ collector = new InternalProfileCollector(collector, CollectorResult.REASON_SEARCH_TERMINATE_AFTER_COUNT,
+ Collections.singletonList((InternalProfileCollector) child));
+ }
}
if (searchContext.parsedPostFilter() != null) {
+ final Collector child = collector;
// this will only get applied to the actual search collector and not
// to any scoped collectors, also, it will only be applied to the main collector
// since that is where the filter should only work
final Weight filterWeight = searcher.createNormalizedWeight(searchContext.parsedPostFilter().query(), false);
collector = new FilteredCollector(collector, filterWeight);
+ if (doProfile) {
+ collector = new InternalProfileCollector(collector, CollectorResult.REASON_SEARCH_POST_FILTER,
+ Collections.singletonList((InternalProfileCollector) child));
+ }
}
// plug in additional collectors, like aggregations
- List<Collector> allCollectors = new ArrayList<>();
- allCollectors.add(collector);
- allCollectors.addAll(searchContext.queryCollectors().values());
- collector = MultiCollector.wrap(allCollectors);
+ final List<Collector> subCollectors = new ArrayList<>();
+ subCollectors.add(collector);
+ subCollectors.addAll(searchContext.queryCollectors().values());
+ collector = MultiCollector.wrap(subCollectors);
+ if (doProfile && collector instanceof InternalProfileCollector == false) {
+ // When there is a single collector to wrap, MultiCollector returns it
+ // directly, so only wrap in the case that there are several sub collectors
+ final List<InternalProfileCollector> children = new AbstractList<InternalProfileCollector>() {
+ @Override
+ public InternalProfileCollector get(int index) {
+ return (InternalProfileCollector) subCollectors.get(index);
+ }
+ @Override
+ public int size() {
+ return subCollectors.size();
+ }
+ };
+ collector = new InternalProfileCollector(collector, CollectorResult.REASON_SEARCH_MULTI, children);
+ }
// apply the minimum score after multi collector so we filter aggs as well
if (searchContext.minimumScore() != null) {
+ final Collector child = collector;
collector = new MinimumScoreCollector(collector, searchContext.minimumScore());
+ if (doProfile) {
+ collector = new InternalProfileCollector(collector, CollectorResult.REASON_SEARCH_MIN_SCORE,
+ Collections.singletonList((InternalProfileCollector) child));
+ }
}
if (collector.getClass() == TotalHitCountCollector.class) {
@@ -319,13 +365,21 @@ public class QueryPhase implements SearchPhase {
final boolean timeoutSet = searchContext.timeoutInMillis() != SearchService.NO_TIMEOUT.millis();
if (timeoutSet && collector != null) { // collector might be null if no collection is actually needed
+ final Collector child = collector;
// TODO: change to use our own counter that uses the scheduler in ThreadPool
// throws TimeLimitingCollector.TimeExceededException when timeout has reached
collector = Lucene.wrapTimeLimitingCollector(collector, searchContext.timeEstimateCounter(), searchContext.timeoutInMillis());
+ if (doProfile) {
+ collector = new InternalProfileCollector(collector, CollectorResult.REASON_SEARCH_TIMEOUT,
+ Collections.singletonList((InternalProfileCollector) child));
+ }
}
try {
if (collector != null) {
+ if (doProfile) {
+ searchContext.getProfilers().getCurrent().setCollector((InternalProfileCollector) collector);
+ }
searcher.search(query, collector);
}
} catch (TimeLimitingCollector.TimeExceededException e) {
@@ -343,7 +397,13 @@ public class QueryPhase implements SearchPhase {
queryResult.topDocs(topDocsCallable.call());
+ if (searchContext.getProfilers() != null) {
+ List<ProfileShardResult> shardResults = Profiler.buildShardResults(searchContext.getProfilers().getProfilers());
+ searchContext.queryResult().profileResults(shardResults);
+ }
+
return rescore;
+
} catch (Throwable e) {
throw new QueryPhaseExecutionException(searchContext, "Failed to execute main query", e);
}
diff --git a/core/src/main/java/org/elasticsearch/search/query/QuerySearchResult.java b/core/src/main/java/org/elasticsearch/search/query/QuerySearchResult.java
index 7f8d12a9c9..9223eb5a82 100644
--- a/core/src/main/java/org/elasticsearch/search/query/QuerySearchResult.java
+++ b/core/src/main/java/org/elasticsearch/search/query/QuerySearchResult.java
@@ -20,6 +20,8 @@
package org.elasticsearch.search.query;
import org.apache.lucene.search.TopDocs;
+import org.elasticsearch.Version;
+import org.elasticsearch.common.Nullable;
import org.elasticsearch.common.bytes.BytesReference;
import org.elasticsearch.common.io.stream.StreamInput;
import org.elasticsearch.common.io.stream.StreamOutput;
@@ -29,6 +31,7 @@ import org.elasticsearch.search.aggregations.InternalAggregations;
import org.elasticsearch.search.aggregations.pipeline.PipelineAggregator;
import org.elasticsearch.search.aggregations.pipeline.PipelineAggregatorStreams;
import org.elasticsearch.search.aggregations.pipeline.SiblingPipelineAggregator;
+import org.elasticsearch.search.profile.ProfileShardResult;
import org.elasticsearch.search.suggest.Suggest;
import java.io.IOException;
@@ -53,6 +56,7 @@ public class QuerySearchResult extends QuerySearchResultProvider {
private Suggest suggest;
private boolean searchTimedOut;
private Boolean terminatedEarly = null;
+ private List<ProfileShardResult> profileShardResults;
public QuerySearchResult() {
@@ -120,6 +124,22 @@ public class QuerySearchResult extends QuerySearchResultProvider {
this.aggregations = aggregations;
}
+ /**
+ * Returns the profiled results for this search, or potentially null if result was empty
+ * @return The profiled results, or null
+ */
+ public @Nullable List<ProfileShardResult> profileResults() {
+ return profileShardResults;
+ }
+
+ /**
+ * Sets the finalized profiling results for this query
+ * @param shardResults The finalized profile
+ */
+ public void profileResults(List<ProfileShardResult> shardResults) {
+ this.profileShardResults = shardResults;
+ }
+
public List<SiblingPipelineAggregator> pipelineAggregators() {
return pipelineAggregators;
}
@@ -191,6 +211,15 @@ public class QuerySearchResult extends QuerySearchResultProvider {
}
searchTimedOut = in.readBoolean();
terminatedEarly = in.readOptionalBoolean();
+
+ if (in.getVersion().onOrAfter(Version.V_2_2_0) && in.readBoolean()) {
+ int profileSize = in.readVInt();
+ profileShardResults = new ArrayList<>(profileSize);
+ for (int i = 0; i < profileSize; i++) {
+ ProfileShardResult result = new ProfileShardResult(in);
+ profileShardResults.add(result);
+ }
+ }
}
@Override
@@ -229,5 +258,17 @@ public class QuerySearchResult extends QuerySearchResultProvider {
}
out.writeBoolean(searchTimedOut);
out.writeOptionalBoolean(terminatedEarly);
+
+ if (out.getVersion().onOrAfter(Version.V_2_2_0)) {
+ if (profileShardResults == null) {
+ out.writeBoolean(false);
+ } else {
+ out.writeBoolean(true);
+ out.writeVInt(profileShardResults.size());
+ for (ProfileShardResult shardResult : profileShardResults) {
+ shardResult.writeTo(out);
+ }
+ }
+ }
}
}
diff --git a/core/src/main/java/org/elasticsearch/search/sort/ScriptSortParser.java b/core/src/main/java/org/elasticsearch/search/sort/ScriptSortParser.java
index c465eaf6ef..e4fe2c08f7 100644
--- a/core/src/main/java/org/elasticsearch/search/sort/ScriptSortParser.java
+++ b/core/src/main/java/org/elasticsearch/search/sort/ScriptSortParser.java
@@ -50,6 +50,7 @@ import org.elasticsearch.search.SearchParseException;
import org.elasticsearch.search.internal.SearchContext;
import java.io.IOException;
+import java.util.Collections;
import java.util.HashMap;
import java.util.Map;
@@ -130,7 +131,7 @@ public class ScriptSortParser implements SortParser {
if (type == null) {
throw new SearchParseException(context, "_script sorting requires setting the type of the script", parser.getTokenLocation());
}
- final SearchScript searchScript = context.scriptService().search(context.lookup(), script, ScriptContext.Standard.SEARCH);
+ final SearchScript searchScript = context.scriptService().search(context.lookup(), script, ScriptContext.Standard.SEARCH, Collections.emptyMap());
if (STRING_SORT_TYPE.equals(type) && (sortMode == MultiValueMode.SUM || sortMode == MultiValueMode.AVG)) {
throw new SearchParseException(context, "type [string] doesn't support mode [" + sortMode + "]", parser.getTokenLocation());
diff --git a/core/src/main/java/org/elasticsearch/search/suggest/completion/CompletionSuggester.java b/core/src/main/java/org/elasticsearch/search/suggest/completion/CompletionSuggester.java
index 106672ae7a..6a0155ffb7 100644
--- a/core/src/main/java/org/elasticsearch/search/suggest/completion/CompletionSuggester.java
+++ b/core/src/main/java/org/elasticsearch/search/suggest/completion/CompletionSuggester.java
@@ -30,7 +30,7 @@ import org.apache.lucene.search.suggest.document.TopSuggestDocs;
import org.apache.lucene.search.suggest.document.TopSuggestDocsCollector;
import org.apache.lucene.util.*;
import org.apache.lucene.util.PriorityQueue;
-import org.elasticsearch.common.text.StringText;
+import org.elasticsearch.common.text.Text;
import org.elasticsearch.index.fielddata.AtomicFieldData;
import org.elasticsearch.index.fielddata.ScriptDocValues;
import org.elasticsearch.index.mapper.MappedFieldType;
@@ -57,7 +57,7 @@ public class CompletionSuggester extends Suggester<CompletionSuggestionContext>
}
CompletionSuggestion completionSuggestion = new CompletionSuggestion(name, suggestionContext.getSize());
spare.copyUTF8Bytes(suggestionContext.getText());
- CompletionSuggestion.Entry completionSuggestEntry = new CompletionSuggestion.Entry(new StringText(spare.toString()), 0, spare.length());
+ CompletionSuggestion.Entry completionSuggestEntry = new CompletionSuggestion.Entry(new Text(spare.toString()), 0, spare.length());
completionSuggestion.addTerm(completionSuggestEntry);
TopSuggestDocsCollector collector = new TopDocumentsCollector(suggestionContext.getSize());
suggest(searcher, suggestionContext.toQuery(), collector);
@@ -91,7 +91,7 @@ public class CompletionSuggester extends Suggester<CompletionSuggestionContext>
}
if (numResult++ < suggestionContext.getSize()) {
CompletionSuggestion.Entry.Option option = new CompletionSuggestion.Entry.Option(
- new StringText(suggestDoc.key.toString()), suggestDoc.score, contexts, payload);
+ new Text(suggestDoc.key.toString()), suggestDoc.score, contexts, payload);
completionSuggestEntry.addOption(option);
} else {
break;
diff --git a/core/src/main/java/org/elasticsearch/search/suggest/phrase/PhraseSuggestParser.java b/core/src/main/java/org/elasticsearch/search/suggest/phrase/PhraseSuggestParser.java
index 4bbdaf9c49..9b083a9178 100644
--- a/core/src/main/java/org/elasticsearch/search/suggest/phrase/PhraseSuggestParser.java
+++ b/core/src/main/java/org/elasticsearch/search/suggest/phrase/PhraseSuggestParser.java
@@ -39,6 +39,7 @@ import org.elasticsearch.search.suggest.SuggestionSearchContext;
import org.elasticsearch.search.suggest.phrase.PhraseSuggestionContext.DirectCandidateGenerator;
import java.io.IOException;
+import java.util.Collections;
public final class PhraseSuggestParser implements SuggestContextParser {
@@ -143,7 +144,7 @@ public final class PhraseSuggestParser implements SuggestContextParser {
}
Template template = Template.parse(parser, parseFieldMatcher);
CompiledScript compiledScript = suggester.scriptService().compile(template, ScriptContext.Standard.SEARCH,
- headersContext);
+ headersContext, Collections.emptyMap());
suggestion.setCollateQueryScript(compiledScript);
} else if ("params".equals(fieldName)) {
suggestion.setCollateScriptParams(parser.map());
diff --git a/core/src/main/java/org/elasticsearch/search/suggest/phrase/PhraseSuggester.java b/core/src/main/java/org/elasticsearch/search/suggest/phrase/PhraseSuggester.java
index fccf9ebc30..c7fa6fae30 100644
--- a/core/src/main/java/org/elasticsearch/search/suggest/phrase/PhraseSuggester.java
+++ b/core/src/main/java/org/elasticsearch/search/suggest/phrase/PhraseSuggester.java
@@ -30,7 +30,6 @@ import org.apache.lucene.util.BytesRefBuilder;
import org.apache.lucene.util.CharsRefBuilder;
import org.elasticsearch.common.bytes.BytesReference;
import org.elasticsearch.common.lucene.Lucene;
-import org.elasticsearch.common.text.StringText;
import org.elasticsearch.common.text.Text;
import org.elasticsearch.index.IndexService;
import org.elasticsearch.index.query.ParsedQuery;
@@ -127,11 +126,11 @@ public final class PhraseSuggester extends Suggester<PhraseSuggestionContext> {
if (!collateMatch && !collatePrune) {
continue;
}
- Text phrase = new StringText(spare.toString());
+ Text phrase = new Text(spare.toString());
Text highlighted = null;
if (suggestion.getPreTag() != null) {
spare.copyUTF8Bytes(correction.join(SEPARATOR, byteSpare, suggestion.getPreTag(), suggestion.getPostTag()));
- highlighted = new StringText(spare.toString());
+ highlighted = new Text(spare.toString());
}
if (collatePrune) {
resultEntry.addOption(new Suggestion.Entry.Option(phrase, highlighted, (float) (correction.score), collateMatch));
@@ -147,7 +146,7 @@ public final class PhraseSuggester extends Suggester<PhraseSuggestionContext> {
private PhraseSuggestion.Entry buildResultEntry(PhraseSuggestionContext suggestion, CharsRefBuilder spare, double cutoffScore) {
spare.copyUTF8Bytes(suggestion.getText());
- return new PhraseSuggestion.Entry(new StringText(spare.toString()), 0, spare.length(), cutoffScore);
+ return new PhraseSuggestion.Entry(new Text(spare.toString()), 0, spare.length(), cutoffScore);
}
ScriptService scriptService() {
diff --git a/core/src/main/java/org/elasticsearch/search/suggest/term/TermSuggester.java b/core/src/main/java/org/elasticsearch/search/suggest/term/TermSuggester.java
index 4c1b176c99..34cd3ad4d5 100644
--- a/core/src/main/java/org/elasticsearch/search/suggest/term/TermSuggester.java
+++ b/core/src/main/java/org/elasticsearch/search/suggest/term/TermSuggester.java
@@ -27,8 +27,6 @@ import org.apache.lucene.util.BytesRef;
import org.apache.lucene.util.BytesRefBuilder;
import org.apache.lucene.util.CharsRefBuilder;
import org.elasticsearch.common.bytes.BytesArray;
-import org.elasticsearch.common.text.BytesText;
-import org.elasticsearch.common.text.StringText;
import org.elasticsearch.common.text.Text;
import org.elasticsearch.search.suggest.SuggestContextParser;
import org.elasticsearch.search.suggest.SuggestUtils;
@@ -54,10 +52,10 @@ public final class TermSuggester extends Suggester<TermSuggestionContext> {
SuggestWord[] suggestedWords = directSpellChecker.suggestSimilar(
token.term, suggestion.getShardSize(), indexReader, suggestion.getDirectSpellCheckerSettings().suggestMode()
);
- Text key = new BytesText(new BytesArray(token.term.bytes()));
+ Text key = new Text(new BytesArray(token.term.bytes()));
TermSuggestion.Entry resultEntry = new TermSuggestion.Entry(key, token.startOffset, token.endOffset - token.startOffset);
for (SuggestWord suggestWord : suggestedWords) {
- Text word = new StringText(suggestWord.string);
+ Text word = new Text(suggestWord.string);
resultEntry.addOption(new TermSuggestion.Entry.Option(word, suggestWord.freq, suggestWord.score));
}
response.addTerm(resultEntry);
diff --git a/core/src/main/java/org/elasticsearch/snapshots/RestoreService.java b/core/src/main/java/org/elasticsearch/snapshots/RestoreService.java
index cd710d52cd..14b2680d25 100644
--- a/core/src/main/java/org/elasticsearch/snapshots/RestoreService.java
+++ b/core/src/main/java/org/elasticsearch/snapshots/RestoreService.java
@@ -33,8 +33,7 @@ import org.elasticsearch.cluster.node.DiscoveryNode;
import org.elasticsearch.cluster.routing.*;
import org.elasticsearch.cluster.routing.allocation.AllocationService;
import org.elasticsearch.cluster.routing.allocation.RoutingAllocation;
-import org.elasticsearch.cluster.settings.ClusterDynamicSettings;
-import org.elasticsearch.cluster.settings.DynamicSettings;
+import org.elasticsearch.common.settings.ClusterSettings;
import org.elasticsearch.common.Nullable;
import org.elasticsearch.common.Strings;
import org.elasticsearch.common.collect.ImmutableOpenMap;
@@ -118,18 +117,19 @@ public class RestoreService extends AbstractComponent implements ClusterStateLis
private final MetaDataCreateIndexService createIndexService;
- private final DynamicSettings dynamicSettings;
+ private final ClusterSettings dynamicSettings;
private final MetaDataIndexUpgradeService metaDataIndexUpgradeService;
private final CopyOnWriteArrayList<ActionListener<RestoreCompletionResponse>> listeners = new CopyOnWriteArrayList<>();
private final BlockingQueue<UpdateIndexShardRestoreStatusRequest> updatedSnapshotStateQueue = ConcurrentCollections.newBlockingQueue();
+ private final ClusterSettings clusterSettings;
@Inject
public RestoreService(Settings settings, ClusterService clusterService, RepositoriesService repositoriesService, TransportService transportService,
- AllocationService allocationService, MetaDataCreateIndexService createIndexService, @ClusterDynamicSettings DynamicSettings dynamicSettings,
- MetaDataIndexUpgradeService metaDataIndexUpgradeService) {
+ AllocationService allocationService, MetaDataCreateIndexService createIndexService, ClusterSettings dynamicSettings,
+ MetaDataIndexUpgradeService metaDataIndexUpgradeService, ClusterSettings clusterSettings) {
super(settings);
this.clusterService = clusterService;
this.repositoriesService = repositoriesService;
@@ -140,6 +140,7 @@ public class RestoreService extends AbstractComponent implements ClusterStateLis
this.metaDataIndexUpgradeService = metaDataIndexUpgradeService;
transportService.registerRequestHandler(UPDATE_RESTORE_ACTION_NAME, UpdateIndexShardRestoreStatusRequest::new, ThreadPool.Names.SAME, new UpdateRestoreStateRequestHandler());
clusterService.add(this);
+ this.clusterSettings = clusterSettings;
}
/**
@@ -389,24 +390,9 @@ public class RestoreService extends AbstractComponent implements ClusterStateLis
private void restoreGlobalStateIfRequested(MetaData.Builder mdBuilder) {
if (request.includeGlobalState()) {
if (metaData.persistentSettings() != null) {
- boolean changed = false;
- Settings.Builder persistentSettings = Settings.settingsBuilder().put();
- for (Map.Entry<String, String> entry : metaData.persistentSettings().getAsMap().entrySet()) {
- if (dynamicSettings.isDynamicOrLoggingSetting(entry.getKey())) {
- String error = dynamicSettings.validateDynamicSetting(entry.getKey(), entry.getValue(), clusterService.state());
- if (error == null) {
- persistentSettings.put(entry.getKey(), entry.getValue());
- changed = true;
- } else {
- logger.warn("ignoring persistent setting [{}], [{}]", entry.getKey(), error);
- }
- } else {
- logger.warn("ignoring persistent setting [{}], not dynamically updateable", entry.getKey());
- }
- }
- if (changed) {
- mdBuilder.persistentSettings(persistentSettings.build());
- }
+ Settings settings = metaData.persistentSettings();
+ clusterSettings.dryRun(settings);
+ mdBuilder.persistentSettings(settings);
}
if (metaData.templates() != null) {
// TODO: Should all existing templates be deleted first?
diff --git a/core/src/main/java/org/elasticsearch/threadpool/ThreadPool.java b/core/src/main/java/org/elasticsearch/threadpool/ThreadPool.java
index b0d81279b0..56e02926ed 100644
--- a/core/src/main/java/org/elasticsearch/threadpool/ThreadPool.java
+++ b/core/src/main/java/org/elasticsearch/threadpool/ThreadPool.java
@@ -20,13 +20,13 @@
package org.elasticsearch.threadpool;
import org.apache.lucene.util.Counter;
-import org.elasticsearch.cluster.ClusterState;
-import org.elasticsearch.cluster.settings.Validator;
import org.elasticsearch.common.Nullable;
import org.elasticsearch.common.component.AbstractComponent;
import org.elasticsearch.common.io.stream.StreamInput;
import org.elasticsearch.common.io.stream.StreamOutput;
import org.elasticsearch.common.io.stream.Streamable;
+import org.elasticsearch.common.settings.ClusterSettings;
+import org.elasticsearch.common.settings.Setting;
import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.common.settings.SettingsException;
import org.elasticsearch.common.unit.SizeValue;
@@ -38,14 +38,11 @@ import org.elasticsearch.common.util.concurrent.XRejectedExecutionHandler;
import org.elasticsearch.common.xcontent.ToXContent;
import org.elasticsearch.common.xcontent.XContentBuilder;
import org.elasticsearch.common.xcontent.XContentBuilderString;
-import org.elasticsearch.node.settings.NodeSettingsService;
import java.io.IOException;
import java.util.*;
import java.util.concurrent.*;
-import java.util.function.Function;
-import java.util.regex.Matcher;
-import java.util.regex.Pattern;
+import java.util.concurrent.atomic.AtomicBoolean;
import static java.util.Collections.unmodifiableMap;
import static org.elasticsearch.common.settings.Settings.settingsBuilder;
@@ -172,7 +169,7 @@ public class ThreadPool extends AbstractComponent {
}
}
- public static final String THREADPOOL_GROUP = "threadpool.";
+ public static final Setting<Settings> THREADPOOL_GROUP_SETTING = Setting.groupSetting("threadpool.", true, Setting.Scope.CLUSTER);
private volatile Map<String, ExecutorHolder> executors;
@@ -184,7 +181,7 @@ public class ThreadPool extends AbstractComponent {
private final EstimatedTimeThread estimatedTimeThread;
- private boolean settingsListenerIsSet = false;
+ private final AtomicBoolean settingsListenerIsSet = new AtomicBoolean(false);
static final Executor DIRECT_EXECUTOR = command -> command.run();
@@ -197,7 +194,8 @@ public class ThreadPool extends AbstractComponent {
assert settings.get("name") != null : "ThreadPool's settings should contain a name";
- Map<String, Settings> groupSettings = getThreadPoolSettingsGroup(settings);
+ Map<String, Settings> groupSettings = THREADPOOL_GROUP_SETTING.get(settings).getAsGroups();
+ validate(groupSettings);
int availableProcessors = EsExecutors.boundedNumberOfProcessors(settings);
int halfProcMaxAt5 = Math.min(((availableProcessors + 1) / 2), 5);
@@ -252,18 +250,12 @@ public class ThreadPool extends AbstractComponent {
this.estimatedTimeThread.start();
}
- private Map<String, Settings> getThreadPoolSettingsGroup(Settings settings) {
- Map<String, Settings> groupSettings = settings.getGroups(THREADPOOL_GROUP);
- validate(groupSettings);
- return groupSettings;
- }
-
- public void setNodeSettingsService(NodeSettingsService nodeSettingsService) {
- if(settingsListenerIsSet) {
+ public void setClusterSettings(ClusterSettings clusterSettings) {
+ if(settingsListenerIsSet.compareAndSet(false, true)) {
+ clusterSettings.addSettingsUpdateConsumer(THREADPOOL_GROUP_SETTING, this::updateSettings, (s) -> validate(s.getAsGroups()));
+ } else {
throw new IllegalStateException("the node settings listener was set more then once");
}
- nodeSettingsService.addListener(new ApplySettings());
- settingsListenerIsSet = true;
}
public long estimatedTimeInMillis() {
@@ -526,8 +518,8 @@ public class ThreadPool extends AbstractComponent {
throw new IllegalArgumentException("No type found [" + type + "], for [" + name + "]");
}
- public void updateSettings(Settings settings) {
- Map<String, Settings> groupSettings = getThreadPoolSettingsGroup(settings);
+ private void updateSettings(Settings settings) {
+ Map<String, Settings> groupSettings = settings.getAsGroups();
if (groupSettings.isEmpty()) {
return;
}
@@ -583,7 +575,7 @@ public class ThreadPool extends AbstractComponent {
ThreadPoolType correctThreadPoolType = THREAD_POOL_TYPES.get(key);
// TODO: the type equality check can be removed after #3760/#6732 are addressed
if (type != null && !correctThreadPoolType.getType().equals(type)) {
- throw new IllegalArgumentException("setting " + THREADPOOL_GROUP + key + ".type to " + type + " is not permitted; must be " + correctThreadPoolType.getType());
+ throw new IllegalArgumentException("setting " + THREADPOOL_GROUP_SETTING.getKey() + key + ".type to " + type + " is not permitted; must be " + correctThreadPoolType.getType());
}
}
}
@@ -866,13 +858,6 @@ public class ThreadPool extends AbstractComponent {
}
- class ApplySettings implements NodeSettingsService.Listener {
- @Override
- public void onRefreshSettings(Settings settings) {
- updateSettings(settings);
- }
- }
-
/**
* Returns <code>true</code> if the given service was terminated successfully. If the termination timed out,
* the service is <code>null</code> this method will return <code>false</code>.
@@ -911,38 +896,4 @@ public class ThreadPool extends AbstractComponent {
}
return false;
}
-
- public static ThreadPoolTypeSettingsValidator THREAD_POOL_TYPE_SETTINGS_VALIDATOR = new ThreadPoolTypeSettingsValidator();
- private static class ThreadPoolTypeSettingsValidator implements Validator {
- @Override
- public String validate(String setting, String value, ClusterState clusterState) {
- // TODO: the type equality validation can be removed after #3760/#6732 are addressed
- Matcher matcher = Pattern.compile("threadpool\\.(.*)\\.type").matcher(setting);
- if (!matcher.matches()) {
- return null;
- } else {
- String threadPool = matcher.group(1);
- ThreadPool.ThreadPoolType defaultThreadPoolType = ThreadPool.THREAD_POOL_TYPES.get(threadPool);
- ThreadPool.ThreadPoolType threadPoolType;
- try {
- threadPoolType = ThreadPool.ThreadPoolType.fromType(value);
- } catch (IllegalArgumentException e) {
- return e.getMessage();
- }
- if (defaultThreadPoolType.equals(threadPoolType)) {
- return null;
- } else {
- return String.format(
- Locale.ROOT,
- "thread pool type for [%s] can only be updated to [%s] but was [%s]",
- threadPool,
- defaultThreadPoolType.getType(),
- threadPoolType.getType()
- );
- }
- }
-
- }
- }
-
}
diff --git a/core/src/main/java/org/elasticsearch/transport/Transport.java b/core/src/main/java/org/elasticsearch/transport/Transport.java
index 10fa9b239d..78b07e3aae 100644
--- a/core/src/main/java/org/elasticsearch/transport/Transport.java
+++ b/core/src/main/java/org/elasticsearch/transport/Transport.java
@@ -21,6 +21,8 @@ package org.elasticsearch.transport;
import org.elasticsearch.cluster.node.DiscoveryNode;
import org.elasticsearch.common.component.LifecycleComponent;
+import org.elasticsearch.common.settings.Setting;
+import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.common.transport.BoundTransportAddress;
import org.elasticsearch.common.transport.TransportAddress;
@@ -34,9 +36,8 @@ import java.util.Map;
public interface Transport extends LifecycleComponent<Transport> {
- public static class TransportSettings {
- public static final String TRANSPORT_TCP_COMPRESS = "transport.tcp.compress";
- }
+ Setting<Settings> TRANSPORT_PROFILES_SETTING = Setting.groupSetting("transport.profiles.", true, Setting.Scope.CLUSTER);
+ Setting<Boolean> TRANSPORT_TCP_COMPRESS = Setting.boolSetting("transport.tcp.compress", false, false, Setting.Scope.CLUSTER);
void transportServiceAdapter(TransportServiceAdapter service);
diff --git a/core/src/main/java/org/elasticsearch/transport/TransportChannelResponseHandler.java b/core/src/main/java/org/elasticsearch/transport/TransportChannelResponseHandler.java
new file mode 100644
index 0000000000..8c042cd193
--- /dev/null
+++ b/core/src/main/java/org/elasticsearch/transport/TransportChannelResponseHandler.java
@@ -0,0 +1,76 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.transport;
+
+import org.elasticsearch.common.logging.ESLogger;
+import org.elasticsearch.threadpool.ThreadPool;
+
+import java.io.IOException;
+
+/**
+ * Base class for delegating transport response to a transport channel
+ */
+public abstract class TransportChannelResponseHandler<T extends TransportResponse> implements TransportResponseHandler<T> {
+
+ /**
+ * Convenience method for delegating an empty response to the provided changed
+ */
+ public static TransportChannelResponseHandler<TransportResponse.Empty> emptyResponseHandler(ESLogger logger, TransportChannel channel, String extraInfoOnError) {
+ return new TransportChannelResponseHandler<TransportResponse.Empty>(logger, channel, extraInfoOnError) {
+ @Override
+ public TransportResponse.Empty newInstance() {
+ return TransportResponse.Empty.INSTANCE;
+ }
+ };
+ }
+
+ private final ESLogger logger;
+ private final TransportChannel channel;
+ private final String extraInfoOnError;
+
+ protected TransportChannelResponseHandler(ESLogger logger, TransportChannel channel, String extraInfoOnError) {
+ this.logger = logger;
+ this.channel = channel;
+ this.extraInfoOnError = extraInfoOnError;
+ }
+
+ @Override
+ public void handleResponse(T response) {
+ try {
+ channel.sendResponse(response);
+ } catch (IOException e) {
+ handleException(new TransportException(e));
+ }
+ }
+
+ @Override
+ public void handleException(TransportException exp) {
+ try {
+ channel.sendResponse(exp);
+ } catch (IOException e) {
+ logger.debug("failed to send failure {}", e, extraInfoOnError == null ? "" : "(" + extraInfoOnError + ")");
+ }
+ }
+
+ @Override
+ public String executor() {
+ return ThreadPool.Names.SAME;
+ }
+}
diff --git a/core/src/main/java/org/elasticsearch/transport/TransportModule.java b/core/src/main/java/org/elasticsearch/transport/TransportModule.java
deleted file mode 100644
index abf90deee8..0000000000
--- a/core/src/main/java/org/elasticsearch/transport/TransportModule.java
+++ /dev/null
@@ -1,122 +0,0 @@
-/*
- * Licensed to Elasticsearch under one or more contributor
- * license agreements. See the NOTICE file distributed with
- * this work for additional information regarding copyright
- * ownership. Elasticsearch licenses this file to you under
- * the Apache License, Version 2.0 (the "License"); you may
- * not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing,
- * software distributed under the License is distributed on an
- * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
- * KIND, either express or implied. See the License for the
- * specific language governing permissions and limitations
- * under the License.
- */
-
-package org.elasticsearch.transport;
-
-import org.elasticsearch.cluster.node.DiscoveryNode;
-import org.elasticsearch.common.inject.AbstractModule;
-import org.elasticsearch.common.io.stream.NamedWriteableRegistry;
-import org.elasticsearch.common.logging.ESLogger;
-import org.elasticsearch.common.logging.Loggers;
-import org.elasticsearch.common.settings.Settings;
-import org.elasticsearch.transport.local.LocalTransport;
-import org.elasticsearch.transport.netty.NettyTransport;
-
-import java.util.HashMap;
-import java.util.Map;
-import java.util.Objects;
-
-/**
- *
- */
-public class TransportModule extends AbstractModule {
-
- public static final String TRANSPORT_TYPE_KEY = "transport.type";
- public static final String TRANSPORT_SERVICE_TYPE_KEY = "transport.service.type";
-
- public static final String LOCAL_TRANSPORT = "local";
- public static final String NETTY_TRANSPORT = "netty";
-
- private final ESLogger logger;
- private final Settings settings;
-
- private final Map<String, Class<? extends TransportService>> transportServices = new HashMap<>();
- private final Map<String, Class<? extends Transport>> transports = new HashMap<>();
- private Class<? extends TransportService> configuredTransportService;
- private Class<? extends Transport> configuredTransport;
- private String configuredTransportServiceSource;
- private String configuredTransportSource;
-
- public TransportModule(Settings settings) {
- this.settings = settings;
- this.logger = Loggers.getLogger(getClass(), settings);
- addTransport(LOCAL_TRANSPORT, LocalTransport.class);
- addTransport(NETTY_TRANSPORT, NettyTransport.class);
- }
-
- public void addTransportService(String name, Class<? extends TransportService> clazz) {
- Class<? extends TransportService> oldClazz = transportServices.put(name, clazz);
- if (oldClazz != null) {
- throw new IllegalArgumentException("Cannot register TransportService [" + name + "] to " + clazz.getName() + ", already registered to " + oldClazz.getName());
- }
- }
-
- public void addTransport(String name, Class<? extends Transport> clazz) {
- Class<? extends Transport> oldClazz = transports.put(name, clazz);
- if (oldClazz != null) {
- throw new IllegalArgumentException("Cannot register Transport [" + name + "] to " + clazz.getName() + ", already registered to " + oldClazz.getName());
- }
- }
-
- @Override
- protected void configure() {
- if (configuredTransportService != null) {
- logger.info("Using [{}] as transport service, overridden by [{}]", configuredTransportService.getName(), configuredTransportServiceSource);
- bind(TransportService.class).to(configuredTransportService).asEagerSingleton();
- } else {
- String typeName = settings.get(TRANSPORT_SERVICE_TYPE_KEY);
- if (typeName == null) {
- bind(TransportService.class).asEagerSingleton();
- } else {
- if (transportServices.containsKey(typeName) == false) {
- throw new IllegalArgumentException("Unknown TransportService type [" + typeName + "], known types are: " + transportServices.keySet());
- }
- bind(TransportService.class).to(transportServices.get(typeName)).asEagerSingleton();
- }
- }
-
- bind(NamedWriteableRegistry.class).asEagerSingleton();
- if (configuredTransport != null) {
- logger.info("Using [{}] as transport, overridden by [{}]", configuredTransport.getName(), configuredTransportSource);
- bind(Transport.class).to(configuredTransport).asEagerSingleton();
- } else {
- String defaultType = DiscoveryNode.localNode(settings) ? LOCAL_TRANSPORT : NETTY_TRANSPORT;
- String typeName = settings.get(TRANSPORT_TYPE_KEY, defaultType);
- Class<? extends Transport> clazz = transports.get(typeName);
- if (clazz == null) {
- throw new IllegalArgumentException("Unknown Transport [" + typeName + "]");
- }
- bind(Transport.class).to(clazz).asEagerSingleton();
- }
- }
-
- public void setTransportService(Class<? extends TransportService> transportService, String source) {
- Objects.requireNonNull(transportService, "Configured transport service may not be null");
- Objects.requireNonNull(source, "Plugin, that changes transport service may not be null");
- this.configuredTransportService = transportService;
- this.configuredTransportServiceSource = source;
- }
-
- public void setTransport(Class<? extends Transport> transport, String source) {
- Objects.requireNonNull(transport, "Configured transport may not be null");
- Objects.requireNonNull(source, "Plugin, that changes transport may not be null");
- this.configuredTransport = transport;
- this.configuredTransportSource = source;
- }
-} \ No newline at end of file
diff --git a/core/src/main/java/org/elasticsearch/transport/TransportService.java b/core/src/main/java/org/elasticsearch/transport/TransportService.java
index 14fc9029b0..444f52b9c0 100644
--- a/core/src/main/java/org/elasticsearch/transport/TransportService.java
+++ b/core/src/main/java/org/elasticsearch/transport/TransportService.java
@@ -29,6 +29,8 @@ import org.elasticsearch.common.logging.ESLogger;
import org.elasticsearch.common.logging.Loggers;
import org.elasticsearch.common.metrics.MeanMetric;
import org.elasticsearch.common.regex.Regex;
+import org.elasticsearch.common.settings.ClusterSettings;
+import org.elasticsearch.common.settings.Setting;
import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.common.transport.BoundTransportAddress;
import org.elasticsearch.common.transport.TransportAddress;
@@ -37,20 +39,15 @@ import org.elasticsearch.common.util.concurrent.ConcurrentCollections;
import org.elasticsearch.common.util.concurrent.ConcurrentMapLong;
import org.elasticsearch.common.util.concurrent.EsRejectedExecutionException;
import org.elasticsearch.common.util.concurrent.FutureUtils;
-import org.elasticsearch.node.settings.NodeSettingsService;
import org.elasticsearch.threadpool.ThreadPool;
import java.io.IOException;
-import java.util.Arrays;
-import java.util.Collections;
-import java.util.LinkedHashMap;
-import java.util.List;
-import java.util.Map;
-import java.util.concurrent.Callable;
+import java.util.*;
import java.util.concurrent.CopyOnWriteArrayList;
import java.util.concurrent.ScheduledFuture;
import java.util.concurrent.atomic.AtomicBoolean;
import java.util.concurrent.atomic.AtomicLong;
+import java.util.function.Function;
import java.util.function.Supplier;
import static org.elasticsearch.common.settings.Settings.Builder.EMPTY_SETTINGS;
@@ -88,14 +85,14 @@ public class TransportService extends AbstractLifecycleComponent<TransportServic
// tracer log
- public static final String SETTING_TRACE_LOG_INCLUDE = "transport.tracer.include";
- public static final String SETTING_TRACE_LOG_EXCLUDE = "transport.tracer.exclude";
+ public static final Setting<List<String>> TRACE_LOG_INCLUDE_SETTING = Setting.listSetting("transport.tracer.include", Collections.emptyList(), Function.identity(), true, Setting.Scope.CLUSTER);
+ public static final Setting<List<String>> TRACE_LOG_EXCLUDE_SETTING = Setting.listSetting("transport.tracer.exclude", Arrays.asList("internal:discovery/zen/fd*", TransportLivenessAction.NAME), Function.identity(), true, Setting.Scope.CLUSTER);
+
private final ESLogger tracerLog;
volatile String[] tracerLogInclude;
volatile String[] tracelLogExclude;
- private final ApplySettings settingsListener = new ApplySettings();
/** if set will call requests sent to this id to shortcut and executed locally */
volatile DiscoveryNode localNode = null;
@@ -109,8 +106,8 @@ public class TransportService extends AbstractLifecycleComponent<TransportServic
super(settings);
this.transport = transport;
this.threadPool = threadPool;
- this.tracerLogInclude = settings.getAsArray(SETTING_TRACE_LOG_INCLUDE, Strings.EMPTY_ARRAY, true);
- this.tracelLogExclude = settings.getAsArray(SETTING_TRACE_LOG_EXCLUDE, new String[]{"internal:discovery/zen/fd*", TransportLivenessAction.NAME}, true);
+ setTracerLogInclude(TRACE_LOG_INCLUDE_SETTING.get(settings));
+ setTracerLogExclude(TRACE_LOG_EXCLUDE_SETTING.get(settings));
tracerLog = Loggers.getLogger(logger, ".tracer");
adapter = createAdapter();
}
@@ -134,34 +131,18 @@ public class TransportService extends AbstractLifecycleComponent<TransportServic
// These need to be optional as they don't exist in the context of a transport client
@Inject(optional = true)
- public void setDynamicSettings(NodeSettingsService nodeSettingsService) {
- nodeSettingsService.addListener(settingsListener);
+ public void setDynamicSettings(ClusterSettings clusterSettings) {
+ clusterSettings.addSettingsUpdateConsumer(TRACE_LOG_INCLUDE_SETTING, this::setTracerLogInclude);
+ clusterSettings.addSettingsUpdateConsumer(TRACE_LOG_EXCLUDE_SETTING, this::setTracerLogExclude);
}
-
- class ApplySettings implements NodeSettingsService.Listener {
- @Override
- public void onRefreshSettings(Settings settings) {
- String[] newTracerLogInclude = settings.getAsArray(SETTING_TRACE_LOG_INCLUDE, TransportService.this.tracerLogInclude, true);
- String[] newTracerLogExclude = settings.getAsArray(SETTING_TRACE_LOG_EXCLUDE, TransportService.this.tracelLogExclude, true);
- if (newTracerLogInclude == TransportService.this.tracerLogInclude && newTracerLogExclude == TransportService.this.tracelLogExclude) {
- return;
- }
- if (Arrays.equals(newTracerLogInclude, TransportService.this.tracerLogInclude) &&
- Arrays.equals(newTracerLogExclude, TransportService.this.tracelLogExclude)) {
- return;
- }
- TransportService.this.tracerLogInclude = newTracerLogInclude;
- TransportService.this.tracelLogExclude = newTracerLogExclude;
- logger.info("tracer log updated to use include: {}, exclude: {}", newTracerLogInclude, newTracerLogExclude);
- }
+ void setTracerLogInclude(List<String> tracerLogInclude) {
+ this.tracerLogInclude = tracerLogInclude.toArray(Strings.EMPTY_ARRAY);
}
- // used for testing
- public void applySettings(Settings settings) {
- settingsListener.onRefreshSettings(settings);
+ void setTracerLogExclude(List<String> tracelLogExclude) {
+ this.tracelLogExclude = tracelLogExclude.toArray(Strings.EMPTY_ARRAY);
}
-
@Override
protected void doStart() {
adapter.rxMetric.clear();
diff --git a/core/src/main/java/org/elasticsearch/transport/netty/NettyTransport.java b/core/src/main/java/org/elasticsearch/transport/netty/NettyTransport.java
index ab39a35d22..6a6a6c3801 100644
--- a/core/src/main/java/org/elasticsearch/transport/netty/NettyTransport.java
+++ b/core/src/main/java/org/elasticsearch/transport/netty/NettyTransport.java
@@ -87,6 +87,7 @@ import org.jboss.netty.channel.socket.oio.OioServerSocketChannelFactory;
import org.jboss.netty.util.HashedWheelTimer;
import java.io.IOException;
+import java.net.BindException;
import java.net.InetAddress;
import java.net.InetSocketAddress;
import java.net.SocketAddress;
@@ -224,7 +225,7 @@ public class NettyTransport extends AbstractLifecycleComponent<Transport> implem
this.connectTimeout = this.settings.getAsTime("transport.netty.connect_timeout", settings.getAsTime("transport.tcp.connect_timeout", settings.getAsTime(TCP_CONNECT_TIMEOUT, TCP_DEFAULT_CONNECT_TIMEOUT)));
this.maxCumulationBufferCapacity = this.settings.getAsBytesSize("transport.netty.max_cumulation_buffer_capacity", null);
this.maxCompositeBufferComponents = this.settings.getAsInt("transport.netty.max_composite_buffer_components", -1);
- this.compress = settings.getAsBoolean(TransportSettings.TRANSPORT_TCP_COMPRESS, false);
+ this.compress = Transport.TRANSPORT_TCP_COMPRESS.get(settings);
this.connectionsPerNodeRecovery = this.settings.getAsInt("transport.netty.connections_per_node.recovery", settings.getAsInt(CONNECTIONS_PER_NODE_RECOVERY, 2));
this.connectionsPerNodeBulk = this.settings.getAsInt("transport.netty.connections_per_node.bulk", settings.getAsInt(CONNECTIONS_PER_NODE_BULK, 3));
@@ -294,7 +295,7 @@ public class NettyTransport extends AbstractLifecycleComponent<Transport> implem
this.serverOpenChannels = openChannels;
// extract default profile first and create standard bootstrap
- Map<String, Settings> profiles = settings.getGroups("transport.profiles", true);
+ Map<String, Settings> profiles = TRANSPORT_PROFILES_SETTING.get(settings()).getAsGroups(true);
if (!profiles.containsKey(DEFAULT_PROFILE)) {
profiles = new HashMap<>(profiles);
profiles.put(DEFAULT_PROFILE, Settings.EMPTY);
@@ -763,6 +764,11 @@ public class NettyTransport extends AbstractLifecycleComponent<Transport> implem
// close the channel as safe measure, which will cause a node to be disconnected if relevant
ctx.getChannel().close();
disconnectFromNodeChannel(ctx.getChannel(), e.getCause());
+ } else if (e.getCause() instanceof BindException) {
+ logger.trace("bind exception caught on transport layer [{}]", e.getCause(), ctx.getChannel());
+ // close the channel as safe measure, which will cause a node to be disconnected if relevant
+ ctx.getChannel().close();
+ disconnectFromNodeChannel(ctx.getChannel(), e.getCause());
} else if (e.getCause() instanceof CancelledKeyException) {
logger.trace("cancelled key exception caught on transport layer [{}], disconnecting from relevant node", e.getCause(), ctx.getChannel());
// close the channel as safe measure, which will cause a node to be disconnected if relevant
diff --git a/core/src/main/java/org/elasticsearch/tribe/TribeService.java b/core/src/main/java/org/elasticsearch/tribe/TribeService.java
index f577415ee6..78453c9eac 100644
--- a/core/src/main/java/org/elasticsearch/tribe/TribeService.java
+++ b/core/src/main/java/org/elasticsearch/tribe/TribeService.java
@@ -26,7 +26,8 @@ import org.elasticsearch.cluster.ClusterChangedEvent;
import org.elasticsearch.cluster.ClusterService;
import org.elasticsearch.cluster.ClusterState;
import org.elasticsearch.cluster.ClusterStateListener;
-import org.elasticsearch.cluster.ClusterStateUpdateTask;
+import org.elasticsearch.cluster.ClusterStateTaskConfig;
+import org.elasticsearch.cluster.ClusterStateTaskExecutor;
import org.elasticsearch.cluster.block.ClusterBlock;
import org.elasticsearch.cluster.block.ClusterBlockLevel;
import org.elasticsearch.cluster.block.ClusterBlocks;
@@ -36,6 +37,7 @@ import org.elasticsearch.cluster.node.DiscoveryNode;
import org.elasticsearch.cluster.node.DiscoveryNodes;
import org.elasticsearch.cluster.routing.IndexRoutingTable;
import org.elasticsearch.cluster.routing.RoutingTable;
+import org.elasticsearch.common.Priority;
import org.elasticsearch.common.Strings;
import org.elasticsearch.common.component.AbstractLifecycleComponent;
import org.elasticsearch.common.inject.Inject;
@@ -205,142 +207,180 @@ public class TribeService extends AbstractLifecycleComponent<TribeService> {
}
}
- class TribeClusterStateListener implements ClusterStateListener {
+ class TribeClusterStateListener implements ClusterStateListener {
private final String tribeName;
+ private final TribeNodeClusterStateTaskExecutor executor;
TribeClusterStateListener(Node tribeNode) {
- this.tribeName = tribeNode.settings().get(TRIBE_NAME);
+ String tribeName = tribeNode.settings().get(TRIBE_NAME);
+ this.tribeName = tribeName;
+ executor = new TribeNodeClusterStateTaskExecutor(tribeName);
}
@Override
public void clusterChanged(final ClusterChangedEvent event) {
logger.debug("[{}] received cluster event, [{}]", tribeName, event.source());
- clusterService.submitStateUpdateTask("cluster event from " + tribeName + ", " + event.source(), new ClusterStateUpdateTask() {
- @Override
- public boolean runOnlyOnMaster() {
- return false;
- }
+ clusterService.submitStateUpdateTask(
+ "cluster event from " + tribeName + ", " + event.source(),
+ event,
+ ClusterStateTaskConfig.build(Priority.NORMAL),
+ executor,
+ (source, t) -> logger.warn("failed to process [{}]", t, source));
+ }
+ }
- @Override
- public ClusterState execute(ClusterState currentState) throws Exception {
- ClusterState tribeState = event.state();
- DiscoveryNodes.Builder nodes = DiscoveryNodes.builder(currentState.nodes());
- // -- merge nodes
- // go over existing nodes, and see if they need to be removed
- for (DiscoveryNode discoNode : currentState.nodes()) {
- String markedTribeName = discoNode.attributes().get(TRIBE_NAME);
- if (markedTribeName != null && markedTribeName.equals(tribeName)) {
- if (tribeState.nodes().get(discoNode.id()) == null) {
- logger.info("[{}] removing node [{}]", tribeName, discoNode);
- nodes.remove(discoNode.id());
- }
- }
+ class TribeNodeClusterStateTaskExecutor implements ClusterStateTaskExecutor<ClusterChangedEvent> {
+ private final String tribeName;
+
+ TribeNodeClusterStateTaskExecutor(String tribeName) {
+ this.tribeName = tribeName;
+ }
+
+
+ @Override
+ public boolean runOnlyOnMaster() {
+ return false;
+ }
+
+ @Override
+ public BatchResult<ClusterChangedEvent> execute(ClusterState currentState, List<ClusterChangedEvent> tasks) throws Exception {
+ ClusterState accumulator = ClusterState.builder(currentState).build();
+ BatchResult.Builder<ClusterChangedEvent> builder = BatchResult.builder();
+
+ try {
+ // we only need to apply the latest cluster state update
+ accumulator = applyUpdate(accumulator, tasks.get(tasks.size() - 1));
+ builder.successes(tasks);
+ } catch (Throwable t) {
+ builder.failures(tasks, t);
+ }
+
+ return builder.build(accumulator);
+ }
+
+ private ClusterState applyUpdate(ClusterState currentState, ClusterChangedEvent task) {
+ boolean clusterStateChanged = false;
+ ClusterState tribeState = task.state();
+ DiscoveryNodes.Builder nodes = DiscoveryNodes.builder(currentState.nodes());
+ // -- merge nodes
+ // go over existing nodes, and see if they need to be removed
+ for (DiscoveryNode discoNode : currentState.nodes()) {
+ String markedTribeName = discoNode.attributes().get(TRIBE_NAME);
+ if (markedTribeName != null && markedTribeName.equals(tribeName)) {
+ if (tribeState.nodes().get(discoNode.id()) == null) {
+ clusterStateChanged = true;
+ logger.info("[{}] removing node [{}]", tribeName, discoNode);
+ nodes.remove(discoNode.id());
}
- // go over tribe nodes, and see if they need to be added
- for (DiscoveryNode tribe : tribeState.nodes()) {
- if (currentState.nodes().get(tribe.id()) == null) {
- // a new node, add it, but also add the tribe name to the attributes
- Map<String, String> tribeAttr = new HashMap<>();
- for (ObjectObjectCursor<String, String> attr : tribe.attributes()) {
- tribeAttr.put(attr.key, attr.value);
- }
- tribeAttr.put(TRIBE_NAME, tribeName);
- DiscoveryNode discoNode = new DiscoveryNode(tribe.name(), tribe.id(), tribe.getHostName(), tribe.getHostAddress(), tribe.address(), unmodifiableMap(tribeAttr), tribe.version());
- logger.info("[{}] adding node [{}]", tribeName, discoNode);
- nodes.put(discoNode);
- }
+ }
+ }
+ // go over tribe nodes, and see if they need to be added
+ for (DiscoveryNode tribe : tribeState.nodes()) {
+ if (currentState.nodes().get(tribe.id()) == null) {
+ // a new node, add it, but also add the tribe name to the attributes
+ Map<String, String> tribeAttr = new HashMap<>();
+ for (ObjectObjectCursor<String, String> attr : tribe.attributes()) {
+ tribeAttr.put(attr.key, attr.value);
}
+ tribeAttr.put(TRIBE_NAME, tribeName);
+ DiscoveryNode discoNode = new DiscoveryNode(tribe.name(), tribe.id(), tribe.getHostName(), tribe.getHostAddress(), tribe.address(), unmodifiableMap(tribeAttr), tribe.version());
+ clusterStateChanged = true;
+ logger.info("[{}] adding node [{}]", tribeName, discoNode);
+ nodes.put(discoNode);
+ }
+ }
- // -- merge metadata
- ClusterBlocks.Builder blocks = ClusterBlocks.builder().blocks(currentState.blocks());
- MetaData.Builder metaData = MetaData.builder(currentState.metaData());
- RoutingTable.Builder routingTable = RoutingTable.builder(currentState.routingTable());
- // go over existing indices, and see if they need to be removed
- for (IndexMetaData index : currentState.metaData()) {
- String markedTribeName = index.getSettings().get(TRIBE_NAME);
- if (markedTribeName != null && markedTribeName.equals(tribeName)) {
- IndexMetaData tribeIndex = tribeState.metaData().index(index.getIndex());
- if (tribeIndex == null || tribeIndex.getState() == IndexMetaData.State.CLOSE) {
- logger.info("[{}] removing index [{}]", tribeName, index.getIndex());
- removeIndex(blocks, metaData, routingTable, index);
- } else {
- // always make sure to update the metadata and routing table, in case
- // there are changes in them (new mapping, shards moving from initializing to started)
- routingTable.add(tribeState.routingTable().index(index.getIndex()));
- Settings tribeSettings = Settings.builder().put(tribeIndex.getSettings()).put(TRIBE_NAME, tribeName).build();
- metaData.put(IndexMetaData.builder(tribeIndex).settings(tribeSettings));
- }
- }
+ // -- merge metadata
+ ClusterBlocks.Builder blocks = ClusterBlocks.builder().blocks(currentState.blocks());
+ MetaData.Builder metaData = MetaData.builder(currentState.metaData());
+ RoutingTable.Builder routingTable = RoutingTable.builder(currentState.routingTable());
+ // go over existing indices, and see if they need to be removed
+ for (IndexMetaData index : currentState.metaData()) {
+ String markedTribeName = index.getSettings().get(TRIBE_NAME);
+ if (markedTribeName != null && markedTribeName.equals(tribeName)) {
+ IndexMetaData tribeIndex = tribeState.metaData().index(index.getIndex());
+ clusterStateChanged = true;
+ if (tribeIndex == null || tribeIndex.getState() == IndexMetaData.State.CLOSE) {
+ logger.info("[{}] removing index [{}]", tribeName, index.getIndex());
+ removeIndex(blocks, metaData, routingTable, index);
+ } else {
+ // always make sure to update the metadata and routing table, in case
+ // there are changes in them (new mapping, shards moving from initializing to started)
+ routingTable.add(tribeState.routingTable().index(index.getIndex()));
+ Settings tribeSettings = Settings.builder().put(tribeIndex.getSettings()).put(TRIBE_NAME, tribeName).build();
+ metaData.put(IndexMetaData.builder(tribeIndex).settings(tribeSettings));
}
- // go over tribe one, and see if they need to be added
- for (IndexMetaData tribeIndex : tribeState.metaData()) {
- // if there is no routing table yet, do nothing with it...
- IndexRoutingTable table = tribeState.routingTable().index(tribeIndex.getIndex());
- if (table == null) {
- continue;
- }
- final IndexMetaData indexMetaData = currentState.metaData().index(tribeIndex.getIndex());
- if (indexMetaData == null) {
- if (!droppedIndices.contains(tribeIndex.getIndex())) {
- // a new index, add it, and add the tribe name as a setting
- logger.info("[{}] adding index [{}]", tribeName, tribeIndex.getIndex());
+ }
+ }
+ // go over tribe one, and see if they need to be added
+ for (IndexMetaData tribeIndex : tribeState.metaData()) {
+ // if there is no routing table yet, do nothing with it...
+ IndexRoutingTable table = tribeState.routingTable().index(tribeIndex.getIndex());
+ if (table == null) {
+ continue;
+ }
+ final IndexMetaData indexMetaData = currentState.metaData().index(tribeIndex.getIndex());
+ if (indexMetaData == null) {
+ if (!droppedIndices.contains(tribeIndex.getIndex())) {
+ // a new index, add it, and add the tribe name as a setting
+ clusterStateChanged = true;
+ logger.info("[{}] adding index [{}]", tribeName, tribeIndex.getIndex());
+ addNewIndex(tribeState, blocks, metaData, routingTable, tribeIndex);
+ }
+ } else {
+ String existingFromTribe = indexMetaData.getSettings().get(TRIBE_NAME);
+ if (!tribeName.equals(existingFromTribe)) {
+ // we have a potential conflict on index names, decide what to do...
+ if (ON_CONFLICT_ANY.equals(onConflict)) {
+ // we chose any tribe, carry on
+ } else if (ON_CONFLICT_DROP.equals(onConflict)) {
+ // drop the indices, there is a conflict
+ clusterStateChanged = true;
+ logger.info("[{}] dropping index [{}] due to conflict with [{}]", tribeName, tribeIndex.getIndex(), existingFromTribe);
+ removeIndex(blocks, metaData, routingTable, tribeIndex);
+ droppedIndices.add(tribeIndex.getIndex());
+ } else if (onConflict.startsWith(ON_CONFLICT_PREFER)) {
+ // on conflict, prefer a tribe...
+ String preferredTribeName = onConflict.substring(ON_CONFLICT_PREFER.length());
+ if (tribeName.equals(preferredTribeName)) {
+ // the new one is hte preferred one, replace...
+ clusterStateChanged = true;
+ logger.info("[{}] adding index [{}], preferred over [{}]", tribeName, tribeIndex.getIndex(), existingFromTribe);
+ removeIndex(blocks, metaData, routingTable, tribeIndex);
addNewIndex(tribeState, blocks, metaData, routingTable, tribeIndex);
- }
- } else {
- String existingFromTribe = indexMetaData.getSettings().get(TRIBE_NAME);
- if (!tribeName.equals(existingFromTribe)) {
- // we have a potential conflict on index names, decide what to do...
- if (ON_CONFLICT_ANY.equals(onConflict)) {
- // we chose any tribe, carry on
- } else if (ON_CONFLICT_DROP.equals(onConflict)) {
- // drop the indices, there is a conflict
- logger.info("[{}] dropping index [{}] due to conflict with [{}]", tribeName, tribeIndex.getIndex(), existingFromTribe);
- removeIndex(blocks, metaData, routingTable, tribeIndex);
- droppedIndices.add(tribeIndex.getIndex());
- } else if (onConflict.startsWith(ON_CONFLICT_PREFER)) {
- // on conflict, prefer a tribe...
- String preferredTribeName = onConflict.substring(ON_CONFLICT_PREFER.length());
- if (tribeName.equals(preferredTribeName)) {
- // the new one is hte preferred one, replace...
- logger.info("[{}] adding index [{}], preferred over [{}]", tribeName, tribeIndex.getIndex(), existingFromTribe);
- removeIndex(blocks, metaData, routingTable, tribeIndex);
- addNewIndex(tribeState, blocks, metaData, routingTable, tribeIndex);
- } // else: either the existing one is the preferred one, or we haven't seen one, carry on
- }
- }
+ } // else: either the existing one is the preferred one, or we haven't seen one, carry on
}
}
-
- return ClusterState.builder(currentState).incrementVersion().blocks(blocks).nodes(nodes).metaData(metaData).routingTable(routingTable.build()).build();
}
+ }
- private void removeIndex(ClusterBlocks.Builder blocks, MetaData.Builder metaData, RoutingTable.Builder routingTable, IndexMetaData index) {
- metaData.remove(index.getIndex());
- routingTable.remove(index.getIndex());
- blocks.removeIndexBlocks(index.getIndex());
- }
+ if (!clusterStateChanged) {
+ return currentState;
+ } else {
+ return ClusterState.builder(currentState).incrementVersion().blocks(blocks).nodes(nodes).metaData(metaData).routingTable(routingTable.build()).build();
+ }
+ }
- private void addNewIndex(ClusterState tribeState, ClusterBlocks.Builder blocks, MetaData.Builder metaData, RoutingTable.Builder routingTable, IndexMetaData tribeIndex) {
- Settings tribeSettings = Settings.builder().put(tribeIndex.getSettings()).put(TRIBE_NAME, tribeName).build();
- metaData.put(IndexMetaData.builder(tribeIndex).settings(tribeSettings));
- routingTable.add(tribeState.routingTable().index(tribeIndex.getIndex()));
- if (Regex.simpleMatch(blockIndicesMetadata, tribeIndex.getIndex())) {
- blocks.addIndexBlock(tribeIndex.getIndex(), IndexMetaData.INDEX_METADATA_BLOCK);
- }
- if (Regex.simpleMatch(blockIndicesRead, tribeIndex.getIndex())) {
- blocks.addIndexBlock(tribeIndex.getIndex(), IndexMetaData.INDEX_READ_BLOCK);
- }
- if (Regex.simpleMatch(blockIndicesWrite, tribeIndex.getIndex())) {
- blocks.addIndexBlock(tribeIndex.getIndex(), IndexMetaData.INDEX_WRITE_BLOCK);
- }
- }
+ private void removeIndex(ClusterBlocks.Builder blocks, MetaData.Builder metaData, RoutingTable.Builder routingTable, IndexMetaData index) {
+ metaData.remove(index.getIndex());
+ routingTable.remove(index.getIndex());
+ blocks.removeIndexBlocks(index.getIndex());
+ }
- @Override
- public void onFailure(String source, Throwable t) {
- logger.warn("failed to process [{}]", t, source);
- }
- });
+ private void addNewIndex(ClusterState tribeState, ClusterBlocks.Builder blocks, MetaData.Builder metaData, RoutingTable.Builder routingTable, IndexMetaData tribeIndex) {
+ Settings tribeSettings = Settings.builder().put(tribeIndex.getSettings()).put(TRIBE_NAME, tribeName).build();
+ metaData.put(IndexMetaData.builder(tribeIndex).settings(tribeSettings));
+ routingTable.add(tribeState.routingTable().index(tribeIndex.getIndex()));
+ if (Regex.simpleMatch(blockIndicesMetadata, tribeIndex.getIndex())) {
+ blocks.addIndexBlock(tribeIndex.getIndex(), IndexMetaData.INDEX_METADATA_BLOCK);
+ }
+ if (Regex.simpleMatch(blockIndicesRead, tribeIndex.getIndex())) {
+ blocks.addIndexBlock(tribeIndex.getIndex(), IndexMetaData.INDEX_READ_BLOCK);
+ }
+ if (Regex.simpleMatch(blockIndicesWrite, tribeIndex.getIndex())) {
+ blocks.addIndexBlock(tribeIndex.getIndex(), IndexMetaData.INDEX_WRITE_BLOCK);
+ }
}
}
}