diff options
author | Ryan Ernst <ryan@iernst.net> | 2015-12-18 12:24:30 -0800 |
---|---|---|
committer | Ryan Ernst <ryan@iernst.net> | 2015-12-18 12:24:30 -0800 |
commit | 853e9c0fd10af51e66f5b9d63c5e6b248968c15e (patch) | |
tree | b39f0a1723d582e857181d0cf69c3be23411e544 /core | |
parent | 0f518e1b07059293b7ab94c5494ff86f531053a4 (diff) | |
parent | 10dfa32f9ddc2216be2aabb88462a32ce5900333 (diff) |
Merge branch 'master' into wildcard_imports
Diffstat (limited to 'core')
438 files changed, 13154 insertions, 6429 deletions
diff --git a/core/build.gradle b/core/build.gradle index fd8a0c10f5..61cdd12a19 100644 --- a/core/build.gradle +++ b/core/build.gradle @@ -82,7 +82,7 @@ dependencies { compile "net.java.dev.jna:jna:${versions.jna}", optional if (isEclipse == false || project.path == ":core-tests") { - testCompile("org.elasticsearch:test-framework:${version}") { + testCompile("org.elasticsearch.test:framework:${version}") { // tests use the locally compiled version of core exclude group: 'org.elasticsearch', module: 'elasticsearch' } @@ -111,6 +111,14 @@ forbiddenPatterns { exclude '**/org/elasticsearch/cluster/routing/shard_routes.txt' } +// classes are missing, e.g. org.jboss.marshalling.Marshaller +thirdPartyAudit.missingClasses = true +// uses internal sun ssl classes! +thirdPartyAudit.excludes = [ + // uses internal java api: sun.security.x509 (X509CertInfo, X509CertImpl, X500Name) + 'org.jboss.netty.handler.ssl.util.OpenJdkSelfSignedCertGenerator', +] + // dependency license are currently checked in distribution dependencyLicenses.enabled = false diff --git a/core/src/main/java/org/apache/lucene/queryparser/classic/MapperQueryParser.java b/core/src/main/java/org/apache/lucene/queryparser/classic/MapperQueryParser.java index fce58d2f88..9f2b1b6622 100644 --- a/core/src/main/java/org/apache/lucene/queryparser/classic/MapperQueryParser.java +++ b/core/src/main/java/org/apache/lucene/queryparser/classic/MapperQueryParser.java @@ -54,7 +54,6 @@ public class MapperQueryParser extends QueryParser { static { Map<String, FieldQueryExtension> fieldQueryExtensions = new HashMap<>(); fieldQueryExtensions.put(ExistsFieldQueryExtension.NAME, new ExistsFieldQueryExtension()); - fieldQueryExtensions.put(MissingFieldQueryExtension.NAME, new MissingFieldQueryExtension()); FIELD_QUERY_EXTENSIONS = unmodifiableMap(fieldQueryExtensions); } diff --git a/core/src/main/java/org/elasticsearch/Version.java b/core/src/main/java/org/elasticsearch/Version.java index a5e2e38ca2..b8ba0a411a 100644 --- a/core/src/main/java/org/elasticsearch/Version.java +++ b/core/src/main/java/org/elasticsearch/Version.java @@ -268,11 +268,15 @@ public class Version { public static final int V_2_0_1_ID = 2000199; public static final Version V_2_0_1 = new Version(V_2_0_1_ID, false, org.apache.lucene.util.Version.LUCENE_5_2_1); public static final int V_2_0_2_ID = 2000299; - public static final Version V_2_0_2 = new Version(V_2_0_2_ID, true, org.apache.lucene.util.Version.LUCENE_5_2_1); + public static final Version V_2_0_2 = new Version(V_2_0_2_ID, false, org.apache.lucene.util.Version.LUCENE_5_2_1); + public static final int V_2_0_3_ID = 2000399; + public static final Version V_2_0_3 = new Version(V_2_0_3_ID, true, org.apache.lucene.util.Version.LUCENE_5_2_1); public static final int V_2_1_0_ID = 2010099; public static final Version V_2_1_0 = new Version(V_2_1_0_ID, false, org.apache.lucene.util.Version.LUCENE_5_3_1); public static final int V_2_1_1_ID = 2010199; - public static final Version V_2_1_1 = new Version(V_2_1_1_ID, true, org.apache.lucene.util.Version.LUCENE_5_3_1); + public static final Version V_2_1_1 = new Version(V_2_1_1_ID, false, org.apache.lucene.util.Version.LUCENE_5_3_1); + public static final int V_2_1_2_ID = 2010299; + public static final Version V_2_1_2 = new Version(V_2_1_2_ID, true, org.apache.lucene.util.Version.LUCENE_5_3_1); public static final int V_2_2_0_ID = 2020099; public static final Version V_2_2_0 = new Version(V_2_2_0_ID, true, org.apache.lucene.util.Version.LUCENE_5_4_0); public static final int V_3_0_0_ID = 3000099; @@ -293,10 +297,14 @@ public class Version { return V_3_0_0; case V_2_2_0_ID: return V_2_2_0; + case V_2_1_2_ID: + return V_2_1_2; case V_2_1_1_ID: return V_2_1_1; case V_2_1_0_ID: return V_2_1_0; + case V_2_0_3_ID: + return V_2_0_3; case V_2_0_2_ID: return V_2_0_2; case V_2_0_1_ID: diff --git a/core/src/main/java/org/elasticsearch/action/ActionModule.java b/core/src/main/java/org/elasticsearch/action/ActionModule.java index 88ccb80971..adcb873e83 100644 --- a/core/src/main/java/org/elasticsearch/action/ActionModule.java +++ b/core/src/main/java/org/elasticsearch/action/ActionModule.java @@ -107,6 +107,8 @@ import org.elasticsearch.action.admin.indices.shards.IndicesShardStoresAction; import org.elasticsearch.action.admin.indices.shards.TransportIndicesShardStoresAction; import org.elasticsearch.action.admin.indices.stats.IndicesStatsAction; import org.elasticsearch.action.admin.indices.stats.TransportIndicesStatsAction; +import org.elasticsearch.action.admin.indices.flush.SyncedFlushAction; +import org.elasticsearch.action.admin.indices.flush.TransportSyncedFlushAction; import org.elasticsearch.action.admin.indices.template.delete.DeleteIndexTemplateAction; import org.elasticsearch.action.admin.indices.template.delete.TransportDeleteIndexTemplateAction; import org.elasticsearch.action.admin.indices.template.get.GetIndexTemplatesAction; @@ -293,6 +295,7 @@ public class ActionModule extends AbstractModule { registerAction(ValidateQueryAction.INSTANCE, TransportValidateQueryAction.class); registerAction(RefreshAction.INSTANCE, TransportRefreshAction.class); registerAction(FlushAction.INSTANCE, TransportFlushAction.class); + registerAction(SyncedFlushAction.INSTANCE, TransportSyncedFlushAction.class); registerAction(ForceMergeAction.INSTANCE, TransportForceMergeAction.class); registerAction(UpgradeAction.INSTANCE, TransportUpgradeAction.class); registerAction(UpgradeStatusAction.INSTANCE, TransportUpgradeStatusAction.class); diff --git a/core/src/main/java/org/elasticsearch/action/admin/cluster/settings/SettingsUpdater.java b/core/src/main/java/org/elasticsearch/action/admin/cluster/settings/SettingsUpdater.java new file mode 100644 index 0000000000..f5020a46b3 --- /dev/null +++ b/core/src/main/java/org/elasticsearch/action/admin/cluster/settings/SettingsUpdater.java @@ -0,0 +1,127 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.action.admin.cluster.settings; + +import org.elasticsearch.cluster.ClusterState; +import org.elasticsearch.cluster.block.ClusterBlocks; +import org.elasticsearch.cluster.metadata.MetaData; +import org.elasticsearch.common.regex.Regex; +import org.elasticsearch.common.settings.ClusterSettings; +import org.elasticsearch.common.settings.Settings; + +import java.util.HashSet; +import java.util.Map; +import java.util.Set; + +import static org.elasticsearch.cluster.ClusterState.builder; + +/** + * Updates transient and persistent cluster state settings if there are any changes + * due to the update. + */ +final class SettingsUpdater { + final Settings.Builder transientUpdates = Settings.settingsBuilder(); + final Settings.Builder persistentUpdates = Settings.settingsBuilder(); + private final ClusterSettings clusterSettings; + + SettingsUpdater(ClusterSettings clusterSettings) { + this.clusterSettings = clusterSettings; + } + + synchronized Settings getTransientUpdates() { + return transientUpdates.build(); + } + + synchronized Settings getPersistentUpdate() { + return persistentUpdates.build(); + } + + synchronized ClusterState updateSettings(final ClusterState currentState, Settings transientToApply, Settings persistentToApply) { + boolean changed = false; + Settings.Builder transientSettings = Settings.settingsBuilder(); + transientSettings.put(currentState.metaData().transientSettings()); + changed |= apply(transientToApply, transientSettings, transientUpdates, "transient"); + + Settings.Builder persistentSettings = Settings.settingsBuilder(); + persistentSettings.put(currentState.metaData().persistentSettings()); + changed |= apply(persistentToApply, persistentSettings, persistentUpdates, "persistent"); + + if (!changed) { + return currentState; + } + + MetaData.Builder metaData = MetaData.builder(currentState.metaData()) + .persistentSettings(persistentSettings.build()) + .transientSettings(transientSettings.build()); + + ClusterBlocks.Builder blocks = ClusterBlocks.builder().blocks(currentState.blocks()); + boolean updatedReadOnly = MetaData.SETTING_READ_ONLY_SETTING.get(metaData.persistentSettings()) || MetaData.SETTING_READ_ONLY_SETTING.get(metaData.transientSettings()); + if (updatedReadOnly) { + blocks.addGlobalBlock(MetaData.CLUSTER_READ_ONLY_BLOCK); + } else { + blocks.removeGlobalBlock(MetaData.CLUSTER_READ_ONLY_BLOCK); + } + ClusterState build = builder(currentState).metaData(metaData).blocks(blocks).build(); + Settings settings = build.metaData().settings(); + // now we try to apply things and if they are invalid we fail + // this dryRun will validate & parse settings but won't actually apply them. + clusterSettings.dryRun(settings); + return build; + } + + private boolean apply(Settings toApply, Settings.Builder target, Settings.Builder updates, String type) { + boolean changed = false; + final Set<String> toRemove = new HashSet<>(); + Settings.Builder settingsBuilder = Settings.settingsBuilder(); + for (Map.Entry<String, String> entry : toApply.getAsMap().entrySet()) { + if (entry.getValue() == null) { + toRemove.add(entry.getKey()); + } else if (clusterSettings.isLoggerSetting(entry.getKey()) || clusterSettings.hasDynamicSetting(entry.getKey())) { + settingsBuilder.put(entry.getKey(), entry.getValue()); + updates.put(entry.getKey(), entry.getValue()); + changed = true; + } else { + throw new IllegalArgumentException(type + " setting [" + entry.getKey() + "], not dynamically updateable"); + } + + } + changed |= applyDeletes(toRemove, target); + target.put(settingsBuilder.build()); + return changed; + } + + private final boolean applyDeletes(Set<String> deletes, Settings.Builder builder) { + boolean changed = false; + for (String entry : deletes) { + Set<String> keysToRemove = new HashSet<>(); + Set<String> keySet = builder.internalMap().keySet(); + for (String key : keySet) { + if (Regex.simpleMatch(entry, key)) { + keysToRemove.add(key); + } + } + for (String key : keysToRemove) { + builder.remove(key); + changed = true; + } + } + return changed; + } +} diff --git a/core/src/main/java/org/elasticsearch/action/admin/cluster/settings/TransportClusterUpdateSettingsAction.java b/core/src/main/java/org/elasticsearch/action/admin/cluster/settings/TransportClusterUpdateSettingsAction.java index 73d14a2bb1..99815b77ff 100644 --- a/core/src/main/java/org/elasticsearch/action/admin/cluster/settings/TransportClusterUpdateSettingsAction.java +++ b/core/src/main/java/org/elasticsearch/action/admin/cluster/settings/TransportClusterUpdateSettingsAction.java @@ -28,23 +28,19 @@ import org.elasticsearch.cluster.ClusterService; import org.elasticsearch.cluster.ClusterState; import org.elasticsearch.cluster.block.ClusterBlockException; import org.elasticsearch.cluster.block.ClusterBlockLevel; -import org.elasticsearch.cluster.block.ClusterBlocks; import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver; import org.elasticsearch.cluster.metadata.MetaData; import org.elasticsearch.cluster.node.DiscoveryNode; import org.elasticsearch.cluster.routing.allocation.AllocationService; import org.elasticsearch.cluster.routing.allocation.RoutingAllocation; -import org.elasticsearch.cluster.settings.ClusterDynamicSettings; -import org.elasticsearch.cluster.settings.DynamicSettings; import org.elasticsearch.common.Nullable; import org.elasticsearch.common.Priority; import org.elasticsearch.common.inject.Inject; +import org.elasticsearch.common.settings.ClusterSettings; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.threadpool.ThreadPool; import org.elasticsearch.transport.TransportService; -import java.util.Map; - import static org.elasticsearch.cluster.ClusterState.builder; /** @@ -54,15 +50,14 @@ public class TransportClusterUpdateSettingsAction extends TransportMasterNodeAct private final AllocationService allocationService; - private final DynamicSettings dynamicSettings; + private final ClusterSettings clusterSettings; @Inject public TransportClusterUpdateSettingsAction(Settings settings, TransportService transportService, ClusterService clusterService, ThreadPool threadPool, - AllocationService allocationService, @ClusterDynamicSettings DynamicSettings dynamicSettings, - ActionFilters actionFilters, IndexNameExpressionResolver indexNameExpressionResolver) { + AllocationService allocationService, ActionFilters actionFilters, IndexNameExpressionResolver indexNameExpressionResolver, ClusterSettings clusterSettings) { super(settings, ClusterUpdateSettingsAction.NAME, transportService, clusterService, threadPool, actionFilters, indexNameExpressionResolver, ClusterUpdateSettingsRequest::new); this.allocationService = allocationService; - this.dynamicSettings = dynamicSettings; + this.clusterSettings = clusterSettings; } @Override @@ -73,8 +68,8 @@ public class TransportClusterUpdateSettingsAction extends TransportMasterNodeAct @Override protected ClusterBlockException checkBlock(ClusterUpdateSettingsRequest request, ClusterState state) { // allow for dedicated changes to the metadata blocks, so we don't block those to allow to "re-enable" it - if ((request.transientSettings().getAsMap().isEmpty() && request.persistentSettings().getAsMap().size() == 1 && request.persistentSettings().get(MetaData.SETTING_READ_ONLY) != null) || - request.persistentSettings().getAsMap().isEmpty() && request.transientSettings().getAsMap().size() == 1 && request.transientSettings().get(MetaData.SETTING_READ_ONLY) != null) { + if ((request.transientSettings().getAsMap().isEmpty() && request.persistentSettings().getAsMap().size() == 1 && MetaData.SETTING_READ_ONLY_SETTING.exists(request.persistentSettings())) || + request.persistentSettings().getAsMap().isEmpty() && request.transientSettings().getAsMap().size() == 1 && MetaData.SETTING_READ_ONLY_SETTING.exists(request.transientSettings())) { return null; } return state.blocks().globalBlockedException(ClusterBlockLevel.METADATA_WRITE); @@ -88,9 +83,7 @@ public class TransportClusterUpdateSettingsAction extends TransportMasterNodeAct @Override protected void masterOperation(final ClusterUpdateSettingsRequest request, final ClusterState state, final ActionListener<ClusterUpdateSettingsResponse> listener) { - final Settings.Builder transientUpdates = Settings.settingsBuilder(); - final Settings.Builder persistentUpdates = Settings.settingsBuilder(); - + final SettingsUpdater updater = new SettingsUpdater(clusterSettings); clusterService.submitStateUpdateTask("cluster_update_settings", new AckedClusterStateUpdateTask<ClusterUpdateSettingsResponse>(Priority.IMMEDIATE, request, listener) { @@ -98,7 +91,7 @@ public class TransportClusterUpdateSettingsAction extends TransportMasterNodeAct @Override protected ClusterUpdateSettingsResponse newResponse(boolean acknowledged) { - return new ClusterUpdateSettingsResponse(acknowledged, transientUpdates.build(), persistentUpdates.build()); + return new ClusterUpdateSettingsResponse(acknowledged, updater.getTransientUpdates(), updater.getPersistentUpdate()); } @Override @@ -125,7 +118,7 @@ public class TransportClusterUpdateSettingsAction extends TransportMasterNodeAct // so we should *not* execute the reroute. if (!clusterService.state().nodes().localNodeMaster()) { logger.debug("Skipping reroute after cluster update settings, because node is no longer master"); - listener.onResponse(new ClusterUpdateSettingsResponse(updateSettingsAcked, transientUpdates.build(), persistentUpdates.build())); + listener.onResponse(new ClusterUpdateSettingsResponse(updateSettingsAcked, updater.getTransientUpdates(), updater.getPersistentUpdate())); return; } @@ -145,13 +138,13 @@ public class TransportClusterUpdateSettingsAction extends TransportMasterNodeAct @Override //we return when the cluster reroute is acked or it times out but the acknowledged flag depends on whether the update settings was acknowledged protected ClusterUpdateSettingsResponse newResponse(boolean acknowledged) { - return new ClusterUpdateSettingsResponse(updateSettingsAcked && acknowledged, transientUpdates.build(), persistentUpdates.build()); + return new ClusterUpdateSettingsResponse(updateSettingsAcked && acknowledged, updater.getTransientUpdates(), updater.getPersistentUpdate()); } @Override public void onNoLongerMaster(String source) { logger.debug("failed to preform reroute after cluster settings were updated - current node is no longer a master"); - listener.onResponse(new ClusterUpdateSettingsResponse(updateSettingsAcked, transientUpdates.build(), persistentUpdates.build())); + listener.onResponse(new ClusterUpdateSettingsResponse(updateSettingsAcked, updater.getTransientUpdates(), updater.getPersistentUpdate())); } @Override @@ -181,58 +174,11 @@ public class TransportClusterUpdateSettingsAction extends TransportMasterNodeAct @Override public ClusterState execute(final ClusterState currentState) { - Settings.Builder transientSettings = Settings.settingsBuilder(); - transientSettings.put(currentState.metaData().transientSettings()); - for (Map.Entry<String, String> entry : request.transientSettings().getAsMap().entrySet()) { - if (dynamicSettings.isDynamicOrLoggingSetting(entry.getKey())) { - String error = dynamicSettings.validateDynamicSetting(entry.getKey(), entry.getValue(), clusterService.state()); - if (error == null) { - transientSettings.put(entry.getKey(), entry.getValue()); - transientUpdates.put(entry.getKey(), entry.getValue()); - changed = true; - } else { - logger.warn("ignoring transient setting [{}], [{}]", entry.getKey(), error); - } - } else { - logger.warn("ignoring transient setting [{}], not dynamically updateable", entry.getKey()); - } - } - - Settings.Builder persistentSettings = Settings.settingsBuilder(); - persistentSettings.put(currentState.metaData().persistentSettings()); - for (Map.Entry<String, String> entry : request.persistentSettings().getAsMap().entrySet()) { - if (dynamicSettings.isDynamicOrLoggingSetting(entry.getKey())) { - String error = dynamicSettings.validateDynamicSetting(entry.getKey(), entry.getValue(), clusterService.state()); - if (error == null) { - persistentSettings.put(entry.getKey(), entry.getValue()); - persistentUpdates.put(entry.getKey(), entry.getValue()); - changed = true; - } else { - logger.warn("ignoring persistent setting [{}], [{}]", entry.getKey(), error); - } - } else { - logger.warn("ignoring persistent setting [{}], not dynamically updateable", entry.getKey()); - } - } - - if (!changed) { - return currentState; - } - - MetaData.Builder metaData = MetaData.builder(currentState.metaData()) - .persistentSettings(persistentSettings.build()) - .transientSettings(transientSettings.build()); - - ClusterBlocks.Builder blocks = ClusterBlocks.builder().blocks(currentState.blocks()); - boolean updatedReadOnly = metaData.persistentSettings().getAsBoolean(MetaData.SETTING_READ_ONLY, false) || metaData.transientSettings().getAsBoolean(MetaData.SETTING_READ_ONLY, false); - if (updatedReadOnly) { - blocks.addGlobalBlock(MetaData.CLUSTER_READ_ONLY_BLOCK); - } else { - blocks.removeGlobalBlock(MetaData.CLUSTER_READ_ONLY_BLOCK); - } - - return builder(currentState).metaData(metaData).blocks(blocks).build(); + ClusterState clusterState = updater.updateSettings(currentState, request.transientSettings(), request.persistentSettings()); + changed = clusterState != currentState; + return clusterState; } }); } + } diff --git a/core/src/main/java/org/elasticsearch/action/admin/cluster/validate/template/TransportRenderSearchTemplateAction.java b/core/src/main/java/org/elasticsearch/action/admin/cluster/validate/template/TransportRenderSearchTemplateAction.java index 5fe8297a6b..f2bfb18c43 100644 --- a/core/src/main/java/org/elasticsearch/action/admin/cluster/validate/template/TransportRenderSearchTemplateAction.java +++ b/core/src/main/java/org/elasticsearch/action/admin/cluster/validate/template/TransportRenderSearchTemplateAction.java @@ -33,6 +33,8 @@ import org.elasticsearch.script.ScriptService; import org.elasticsearch.threadpool.ThreadPool; import org.elasticsearch.transport.TransportService; +import java.util.Collections; + public class TransportRenderSearchTemplateAction extends HandledTransportAction<RenderSearchTemplateRequest, RenderSearchTemplateResponse> { private final ScriptService scriptService; @@ -55,7 +57,7 @@ public class TransportRenderSearchTemplateAction extends HandledTransportAction< @Override protected void doRun() throws Exception { - ExecutableScript executable = scriptService.executable(request.template(), ScriptContext.Standard.SEARCH, request); + ExecutableScript executable = scriptService.executable(request.template(), ScriptContext.Standard.SEARCH, request, Collections.emptyMap()); BytesReference processedTemplate = (BytesReference) executable.run(); RenderSearchTemplateResponse response = new RenderSearchTemplateResponse(); response.source(processedTemplate); diff --git a/core/src/main/java/org/elasticsearch/action/admin/indices/close/TransportCloseIndexAction.java b/core/src/main/java/org/elasticsearch/action/admin/indices/close/TransportCloseIndexAction.java index 2c25ee34f1..e454fcabc7 100644 --- a/core/src/main/java/org/elasticsearch/action/admin/indices/close/TransportCloseIndexAction.java +++ b/core/src/main/java/org/elasticsearch/action/admin/indices/close/TransportCloseIndexAction.java @@ -31,31 +31,36 @@ import org.elasticsearch.cluster.block.ClusterBlockLevel; import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver; import org.elasticsearch.cluster.metadata.MetaDataIndexStateService; import org.elasticsearch.common.inject.Inject; +import org.elasticsearch.common.settings.ClusterSettings; +import org.elasticsearch.common.settings.Setting; import org.elasticsearch.common.settings.Settings; -import org.elasticsearch.node.settings.NodeSettingsService; import org.elasticsearch.threadpool.ThreadPool; import org.elasticsearch.transport.TransportService; /** * Close index action */ -public class TransportCloseIndexAction extends TransportMasterNodeAction<CloseIndexRequest, CloseIndexResponse> implements NodeSettingsService.Listener { +public class TransportCloseIndexAction extends TransportMasterNodeAction<CloseIndexRequest, CloseIndexResponse> { private final MetaDataIndexStateService indexStateService; private final DestructiveOperations destructiveOperations; private volatile boolean closeIndexEnabled; - public static final String SETTING_CLUSTER_INDICES_CLOSE_ENABLE = "cluster.indices.close.enable"; + public static final Setting<Boolean> CLUSTER_INDICES_CLOSE_ENABLE_SETTING = Setting.boolSetting("cluster.indices.close.enable", true, true, Setting.Scope.CLUSTER); @Inject public TransportCloseIndexAction(Settings settings, TransportService transportService, ClusterService clusterService, ThreadPool threadPool, MetaDataIndexStateService indexStateService, - NodeSettingsService nodeSettingsService, ActionFilters actionFilters, + ClusterSettings clusterSettings, ActionFilters actionFilters, IndexNameExpressionResolver indexNameExpressionResolver, DestructiveOperations destructiveOperations) { super(settings, CloseIndexAction.NAME, transportService, clusterService, threadPool, actionFilters, indexNameExpressionResolver, CloseIndexRequest::new); this.indexStateService = indexStateService; this.destructiveOperations = destructiveOperations; - this.closeIndexEnabled = settings.getAsBoolean(SETTING_CLUSTER_INDICES_CLOSE_ENABLE, true); - nodeSettingsService.addListener(this); + this.closeIndexEnabled = CLUSTER_INDICES_CLOSE_ENABLE_SETTING.get(settings); + clusterSettings.addSettingsUpdateConsumer(CLUSTER_INDICES_CLOSE_ENABLE_SETTING, this::setCloseIndexEnabled); + } + + private void setCloseIndexEnabled(boolean closeIndexEnabled) { + this.closeIndexEnabled = closeIndexEnabled; } @Override @@ -73,7 +78,7 @@ public class TransportCloseIndexAction extends TransportMasterNodeAction<CloseIn protected void doExecute(CloseIndexRequest request, ActionListener<CloseIndexResponse> listener) { destructiveOperations.failDestructive(request.indices()); if (closeIndexEnabled == false) { - throw new IllegalStateException("closing indices is disabled - set [" + SETTING_CLUSTER_INDICES_CLOSE_ENABLE + ": true] to enable it. NOTE: closed indices still consume a significant amount of diskspace"); + throw new IllegalStateException("closing indices is disabled - set [" + CLUSTER_INDICES_CLOSE_ENABLE_SETTING.getKey() + ": true] to enable it. NOTE: closed indices still consume a significant amount of diskspace"); } super.doExecute(request, listener); } @@ -104,13 +109,4 @@ public class TransportCloseIndexAction extends TransportMasterNodeAction<CloseIn } }); } - - @Override - public void onRefreshSettings(Settings settings) { - final boolean enable = settings.getAsBoolean(SETTING_CLUSTER_INDICES_CLOSE_ENABLE, this.closeIndexEnabled); - if (enable != this.closeIndexEnabled) { - logger.info("updating [{}] from [{}] to [{}]", SETTING_CLUSTER_INDICES_CLOSE_ENABLE, this.closeIndexEnabled, enable); - this.closeIndexEnabled = enable; - } - } } diff --git a/core/src/main/java/org/elasticsearch/action/admin/indices/delete/TransportDeleteIndexAction.java b/core/src/main/java/org/elasticsearch/action/admin/indices/delete/TransportDeleteIndexAction.java index c02e2ade2a..82176da053 100644 --- a/core/src/main/java/org/elasticsearch/action/admin/indices/delete/TransportDeleteIndexAction.java +++ b/core/src/main/java/org/elasticsearch/action/admin/indices/delete/TransportDeleteIndexAction.java @@ -31,7 +31,6 @@ import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver; import org.elasticsearch.cluster.metadata.MetaDataDeleteIndexService; import org.elasticsearch.common.inject.Inject; import org.elasticsearch.common.settings.Settings; -import org.elasticsearch.node.settings.NodeSettingsService; import org.elasticsearch.threadpool.ThreadPool; import org.elasticsearch.transport.TransportService; @@ -45,8 +44,7 @@ public class TransportDeleteIndexAction extends TransportMasterNodeAction<Delete @Inject public TransportDeleteIndexAction(Settings settings, TransportService transportService, ClusterService clusterService, - ThreadPool threadPool, MetaDataDeleteIndexService deleteIndexService, - NodeSettingsService nodeSettingsService, ActionFilters actionFilters, + ThreadPool threadPool, MetaDataDeleteIndexService deleteIndexService, ActionFilters actionFilters, IndexNameExpressionResolver indexNameExpressionResolver, DestructiveOperations destructiveOperations) { super(settings, DeleteIndexAction.NAME, transportService, clusterService, threadPool, actionFilters, indexNameExpressionResolver, DeleteIndexRequest::new); this.deleteIndexService = deleteIndexService; diff --git a/core/src/main/java/org/apache/lucene/queryparser/classic/MissingFieldQueryExtension.java b/core/src/main/java/org/elasticsearch/action/admin/indices/flush/SyncedFlushAction.java index f9fc8c9d5d..291fd49c63 100644 --- a/core/src/main/java/org/apache/lucene/queryparser/classic/MissingFieldQueryExtension.java +++ b/core/src/main/java/org/elasticsearch/action/admin/indices/flush/SyncedFlushAction.java @@ -17,26 +17,28 @@ * under the License. */ -package org.apache.lucene.queryparser.classic; +package org.elasticsearch.action.admin.indices.flush; -import org.apache.lucene.search.ConstantScoreQuery; -import org.apache.lucene.search.Query; -import org.elasticsearch.index.query.MissingQueryBuilder; -import org.elasticsearch.index.query.QueryShardContext; +import org.elasticsearch.action.Action; +import org.elasticsearch.client.ElasticsearchClient; -/** - * - */ -public class MissingFieldQueryExtension implements FieldQueryExtension { - public static final String NAME = "_missing_"; +public class SyncedFlushAction extends Action<SyncedFlushRequest, SyncedFlushResponse, SyncedFlushRequestBuilder> { + + public static final SyncedFlushAction INSTANCE = new SyncedFlushAction(); + public static final String NAME = "indices:admin/synced_flush"; + + private SyncedFlushAction() { + super(NAME); + } + + @Override + public SyncedFlushResponse newResponse() { + return new SyncedFlushResponse(); + } @Override - public Query query(QueryShardContext context, String queryText) { - Query query = MissingQueryBuilder.newFilter(context, queryText, MissingQueryBuilder.DEFAULT_EXISTENCE_VALUE, MissingQueryBuilder.DEFAULT_NULL_VALUE); - if (query != null) { - return new ConstantScoreQuery(query); - } - return null; + public SyncedFlushRequestBuilder newRequestBuilder(ElasticsearchClient client) { + return new SyncedFlushRequestBuilder(client, this); } } diff --git a/core/src/main/java/org/elasticsearch/action/admin/indices/flush/SyncedFlushRequest.java b/core/src/main/java/org/elasticsearch/action/admin/indices/flush/SyncedFlushRequest.java new file mode 100644 index 0000000000..59719fe887 --- /dev/null +++ b/core/src/main/java/org/elasticsearch/action/admin/indices/flush/SyncedFlushRequest.java @@ -0,0 +1,64 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.action.admin.indices.flush; + +import org.elasticsearch.action.ActionRequest; +import org.elasticsearch.action.support.broadcast.BroadcastRequest; + +import java.util.Arrays; + +/** + * A synced flush request to sync flush one or more indices. The synced flush process of an index performs a flush + * and writes the same sync id to primary and all copies. + * + * <p>Best created with {@link org.elasticsearch.client.Requests#syncedFlushRequest(String...)}. </p> + * + * @see org.elasticsearch.client.Requests#flushRequest(String...) + * @see org.elasticsearch.client.IndicesAdminClient#syncedFlush(SyncedFlushRequest) + * @see SyncedFlushResponse + */ +public class SyncedFlushRequest extends BroadcastRequest<SyncedFlushRequest> { + + public SyncedFlushRequest() { + } + + /** + * Copy constructor that creates a new synced flush request that is a copy of the one provided as an argument. + * The new request will inherit though headers and context from the original request that caused it. + */ + public SyncedFlushRequest(ActionRequest originalRequest) { + super(originalRequest); + } + + /** + * Constructs a new synced flush request against one or more indices. If nothing is provided, all indices will + * be sync flushed. + */ + public SyncedFlushRequest(String... indices) { + super(indices); + } + + + @Override + public String toString() { + return "SyncedFlushRequest{" + + "indices=" + Arrays.toString(indices) + "}"; + } +} diff --git a/core/src/main/java/org/elasticsearch/rest/RestModule.java b/core/src/main/java/org/elasticsearch/action/admin/indices/flush/SyncedFlushRequestBuilder.java index e7949172d0..9e40726081 100644 --- a/core/src/main/java/org/elasticsearch/rest/RestModule.java +++ b/core/src/main/java/org/elasticsearch/action/admin/indices/flush/SyncedFlushRequestBuilder.java @@ -17,35 +17,25 @@ * under the License. */ -package org.elasticsearch.rest; +package org.elasticsearch.action.admin.indices.flush; -import org.elasticsearch.common.inject.AbstractModule; -import org.elasticsearch.common.settings.Settings; -import org.elasticsearch.rest.action.RestActionModule; +import org.elasticsearch.action.ActionRequestBuilder; +import org.elasticsearch.action.support.IndicesOptions; +import org.elasticsearch.client.ElasticsearchClient; -import java.util.ArrayList; -import java.util.List; +public class SyncedFlushRequestBuilder extends ActionRequestBuilder<SyncedFlushRequest, SyncedFlushResponse, SyncedFlushRequestBuilder> { -/** - * - */ -public class RestModule extends AbstractModule { - - private final Settings settings; - private List<Class<? extends BaseRestHandler>> restPluginsActions = new ArrayList<>(); - - public void addRestAction(Class<? extends BaseRestHandler> restAction) { - restPluginsActions.add(restAction); + public SyncedFlushRequestBuilder(ElasticsearchClient client, SyncedFlushAction action) { + super(client, action, new SyncedFlushRequest()); } - public RestModule(Settings settings) { - this.settings = settings; + public SyncedFlushRequestBuilder setIndices(String[] indices) { + super.request().indices(indices); + return this; } - - @Override - protected void configure() { - bind(RestController.class).asEagerSingleton(); - new RestActionModule(restPluginsActions).configure(binder()); + public SyncedFlushRequestBuilder setIndicesOptions(IndicesOptions indicesOptions) { + super.request().indicesOptions(indicesOptions); + return this; } } diff --git a/core/src/main/java/org/elasticsearch/indices/flush/IndicesSyncedFlushResult.java b/core/src/main/java/org/elasticsearch/action/admin/indices/flush/SyncedFlushResponse.java index 435c0d138c..5925370e5f 100644 --- a/core/src/main/java/org/elasticsearch/indices/flush/IndicesSyncedFlushResult.java +++ b/core/src/main/java/org/elasticsearch/action/admin/indices/flush/SyncedFlushResponse.java @@ -16,16 +16,25 @@ * specific language governing permissions and limitations * under the License. */ -package org.elasticsearch.indices.flush; +package org.elasticsearch.action.admin.indices.flush; +import org.elasticsearch.action.ActionResponse; import org.elasticsearch.cluster.routing.ShardRouting; +import org.elasticsearch.common.io.stream.StreamInput; +import org.elasticsearch.common.io.stream.StreamOutput; +import org.elasticsearch.common.io.stream.Streamable; import org.elasticsearch.common.util.iterable.Iterables; import org.elasticsearch.common.xcontent.ToXContent; import org.elasticsearch.common.xcontent.XContentBuilder; import org.elasticsearch.common.xcontent.XContentBuilderString; +import org.elasticsearch.indices.flush.ShardsSyncedFlushResult; +import org.elasticsearch.indices.flush.SyncedFlushService; import org.elasticsearch.rest.RestStatus; import java.io.IOException; +import java.util.ArrayList; +import java.util.Collections; +import java.util.HashMap; import java.util.List; import java.util.Map; @@ -34,13 +43,16 @@ import static java.util.Collections.unmodifiableMap; /** * The result of performing a sync flush operation on all shards of multiple indices */ -public class IndicesSyncedFlushResult implements ToXContent { +public class SyncedFlushResponse extends ActionResponse implements ToXContent { - final Map<String, List<ShardsSyncedFlushResult>> shardsResultPerIndex; - final ShardCounts shardCounts; + Map<String, List<ShardsSyncedFlushResult>> shardsResultPerIndex; + ShardCounts shardCounts; + SyncedFlushResponse() { - public IndicesSyncedFlushResult(Map<String, List<ShardsSyncedFlushResult>> shardsResultPerIndex) { + } + + public SyncedFlushResponse(Map<String, List<ShardsSyncedFlushResult>> shardsResultPerIndex) { // shardsResultPerIndex is never modified after it is passed to this // constructor so this is safe even though shardsResultPerIndex is a // ConcurrentHashMap @@ -48,17 +60,23 @@ public class IndicesSyncedFlushResult implements ToXContent { this.shardCounts = calculateShardCounts(Iterables.flatten(shardsResultPerIndex.values())); } - /** total number shards, including replicas, both assigned and unassigned */ + /** + * total number shards, including replicas, both assigned and unassigned + */ public int totalShards() { return shardCounts.total; } - /** total number of shards for which the operation failed */ + /** + * total number of shards for which the operation failed + */ public int failedShards() { return shardCounts.failed; } - /** total number of shards which were successfully sync-flushed */ + /** + * total number of shards which were successfully sync-flushed + */ public int successfulShards() { return shardCounts.successful; } @@ -91,8 +109,8 @@ public class IndicesSyncedFlushResult implements ToXContent { builder.endObject(); continue; } - Map<ShardRouting, SyncedFlushService.SyncedFlushResponse> failedShards = shardResults.failedShards(); - for (Map.Entry<ShardRouting, SyncedFlushService.SyncedFlushResponse> shardEntry : failedShards.entrySet()) { + Map<ShardRouting, SyncedFlushService.ShardSyncedFlushResponse> failedShards = shardResults.failedShards(); + for (Map.Entry<ShardRouting, SyncedFlushService.ShardSyncedFlushResponse> shardEntry : failedShards.entrySet()) { builder.startObject(); builder.field(Fields.SHARD, shardResults.shardId().id()); builder.field(Fields.REASON, shardEntry.getValue().failureReason()); @@ -123,11 +141,11 @@ public class IndicesSyncedFlushResult implements ToXContent { return new ShardCounts(total, successful, failed); } - static final class ShardCounts implements ToXContent { + static final class ShardCounts implements ToXContent, Streamable { - public final int total; - public final int successful; - public final int failed; + public int total; + public int successful; + public int failed; ShardCounts(int total, int successful, int failed) { this.total = total; @@ -135,6 +153,10 @@ public class IndicesSyncedFlushResult implements ToXContent { this.failed = failed; } + ShardCounts() { + + } + @Override public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { builder.field(Fields.TOTAL, total); @@ -142,6 +164,20 @@ public class IndicesSyncedFlushResult implements ToXContent { builder.field(Fields.FAILED, failed); return builder; } + + @Override + public void readFrom(StreamInput in) throws IOException { + total = in.readInt(); + successful = in.readInt(); + failed = in.readInt(); + } + + @Override + public void writeTo(StreamOutput out) throws IOException { + out.writeInt(total); + out.writeInt(successful); + out.writeInt(failed); + } } static final class Fields { @@ -154,4 +190,37 @@ public class IndicesSyncedFlushResult implements ToXContent { static final XContentBuilderString ROUTING = new XContentBuilderString("routing"); static final XContentBuilderString REASON = new XContentBuilderString("reason"); } + + @Override + public void readFrom(StreamInput in) throws IOException { + super.readFrom(in); + shardCounts = new ShardCounts(); + shardCounts.readFrom(in); + Map<String, List<ShardsSyncedFlushResult>> tmpShardsResultPerIndex = new HashMap<>(); + int numShardsResults = in.readInt(); + for (int i =0 ; i< numShardsResults; i++) { + String index = in.readString(); + List<ShardsSyncedFlushResult> shardsSyncedFlushResults = new ArrayList<>(); + int numShards = in.readInt(); + for (int j =0; j< numShards; j++) { + shardsSyncedFlushResults.add(ShardsSyncedFlushResult.readShardsSyncedFlushResult(in)); + } + tmpShardsResultPerIndex.put(index, shardsSyncedFlushResults); + } + shardsResultPerIndex = Collections.unmodifiableMap(tmpShardsResultPerIndex); + } + + @Override + public void writeTo(StreamOutput out) throws IOException { + super.writeTo(out); + shardCounts.writeTo(out); + out.writeInt(shardsResultPerIndex.size()); + for (Map.Entry<String, List<ShardsSyncedFlushResult>> entry : shardsResultPerIndex.entrySet()) { + out.writeString(entry.getKey()); + out.writeInt(entry.getValue().size()); + for (ShardsSyncedFlushResult shardsSyncedFlushResult : entry.getValue()) { + shardsSyncedFlushResult.writeTo(out); + } + } + } } diff --git a/core/src/main/java/org/elasticsearch/action/admin/indices/flush/TransportSyncedFlushAction.java b/core/src/main/java/org/elasticsearch/action/admin/indices/flush/TransportSyncedFlushAction.java new file mode 100644 index 0000000000..3ba354f462 --- /dev/null +++ b/core/src/main/java/org/elasticsearch/action/admin/indices/flush/TransportSyncedFlushAction.java @@ -0,0 +1,52 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.action.admin.indices.flush; + +import org.elasticsearch.action.ActionListener; +import org.elasticsearch.action.support.ActionFilters; +import org.elasticsearch.action.support.HandledTransportAction; +import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver; +import org.elasticsearch.common.inject.Inject; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.indices.flush.SyncedFlushService; +import org.elasticsearch.threadpool.ThreadPool; +import org.elasticsearch.transport.TransportService; + +/** + * Synced flush Action. + */ +public class TransportSyncedFlushAction extends HandledTransportAction<SyncedFlushRequest, SyncedFlushResponse> { + + SyncedFlushService syncedFlushService; + + @Inject + public TransportSyncedFlushAction(Settings settings, ThreadPool threadPool, + TransportService transportService, ActionFilters actionFilters, + IndexNameExpressionResolver indexNameExpressionResolver, + SyncedFlushService syncedFlushService) { + super(settings, SyncedFlushAction.NAME, threadPool, transportService, actionFilters, indexNameExpressionResolver, SyncedFlushRequest::new); + this.syncedFlushService = syncedFlushService; + } + + @Override + protected void doExecute(SyncedFlushRequest request, ActionListener<SyncedFlushResponse> listener) { + syncedFlushService.attemptSyncedFlush(request.indices(), request.indicesOptions(), listener); + } +} diff --git a/core/src/main/java/org/elasticsearch/action/admin/indices/open/TransportOpenIndexAction.java b/core/src/main/java/org/elasticsearch/action/admin/indices/open/TransportOpenIndexAction.java index cab1047cac..2717a2320e 100644 --- a/core/src/main/java/org/elasticsearch/action/admin/indices/open/TransportOpenIndexAction.java +++ b/core/src/main/java/org/elasticsearch/action/admin/indices/open/TransportOpenIndexAction.java @@ -32,12 +32,9 @@ import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver; import org.elasticsearch.cluster.metadata.MetaDataIndexStateService; import org.elasticsearch.common.inject.Inject; import org.elasticsearch.common.settings.Settings; -import org.elasticsearch.node.settings.NodeSettingsService; import org.elasticsearch.threadpool.ThreadPool; import org.elasticsearch.transport.TransportService; -import java.util.Arrays; - /** * Open index action */ @@ -49,7 +46,7 @@ public class TransportOpenIndexAction extends TransportMasterNodeAction<OpenInde @Inject public TransportOpenIndexAction(Settings settings, TransportService transportService, ClusterService clusterService, ThreadPool threadPool, MetaDataIndexStateService indexStateService, - NodeSettingsService nodeSettingsService, ActionFilters actionFilters, IndexNameExpressionResolver indexNameExpressionResolver, + ActionFilters actionFilters, IndexNameExpressionResolver indexNameExpressionResolver, DestructiveOperations destructiveOperations) { super(settings, OpenIndexAction.NAME, transportService, clusterService, threadPool, actionFilters, indexNameExpressionResolver, OpenIndexRequest::new); this.indexStateService = indexStateService; diff --git a/core/src/main/java/org/elasticsearch/action/admin/indices/shards/IndicesShardStoresResponse.java b/core/src/main/java/org/elasticsearch/action/admin/indices/shards/IndicesShardStoresResponse.java index 84b39d4c68..380f6e0089 100644 --- a/core/src/main/java/org/elasticsearch/action/admin/indices/shards/IndicesShardStoresResponse.java +++ b/core/src/main/java/org/elasticsearch/action/admin/indices/shards/IndicesShardStoresResponse.java @@ -56,13 +56,14 @@ public class IndicesShardStoresResponse extends ActionResponse implements ToXCon public static class StoreStatus implements Streamable, ToXContent, Comparable<StoreStatus> { private DiscoveryNode node; private long version; + private String allocationId; private Throwable storeException; - private Allocation allocation; + private AllocationStatus allocationStatus; /** * The status of the shard store with respect to the cluster */ - public enum Allocation { + public enum AllocationStatus { /** * Allocated as primary @@ -81,16 +82,16 @@ public class IndicesShardStoresResponse extends ActionResponse implements ToXCon private final byte id; - Allocation(byte id) { + AllocationStatus(byte id) { this.id = id; } - private static Allocation fromId(byte id) { + private static AllocationStatus fromId(byte id) { switch (id) { case 0: return PRIMARY; case 1: return REPLICA; case 2: return UNUSED; - default: throw new IllegalArgumentException("unknown id for allocation [" + id + "]"); + default: throw new IllegalArgumentException("unknown id for allocation status [" + id + "]"); } } @@ -99,11 +100,11 @@ public class IndicesShardStoresResponse extends ActionResponse implements ToXCon case 0: return "primary"; case 1: return "replica"; case 2: return "unused"; - default: throw new IllegalArgumentException("unknown id for allocation [" + id + "]"); + default: throw new IllegalArgumentException("unknown id for allocation status [" + id + "]"); } } - private static Allocation readFrom(StreamInput in) throws IOException { + private static AllocationStatus readFrom(StreamInput in) throws IOException { return fromId(in.readByte()); } @@ -115,10 +116,11 @@ public class IndicesShardStoresResponse extends ActionResponse implements ToXCon private StoreStatus() { } - public StoreStatus(DiscoveryNode node, long version, Allocation allocation, Throwable storeException) { + public StoreStatus(DiscoveryNode node, long version, String allocationId, AllocationStatus allocationStatus, Throwable storeException) { this.node = node; this.version = version; - this.allocation = allocation; + this.allocationId = allocationId; + this.allocationStatus = allocationStatus; this.storeException = storeException; } @@ -130,14 +132,21 @@ public class IndicesShardStoresResponse extends ActionResponse implements ToXCon } /** - * Version of the store, used to select the store that will be - * used as a primary. + * Version of the store */ public long getVersion() { return version; } /** + * AllocationStatus id of the store, used to select the store that will be + * used as a primary. + */ + public String getAllocationId() { + return allocationId; + } + + /** * Exception while trying to open the * shard index or from when the shard failed */ @@ -146,13 +155,13 @@ public class IndicesShardStoresResponse extends ActionResponse implements ToXCon } /** - * The allocation status of the store. - * {@link Allocation#PRIMARY} indicates a primary shard copy - * {@link Allocation#REPLICA} indicates a replica shard copy - * {@link Allocation#UNUSED} indicates an unused shard copy + * The allocationStatus status of the store. + * {@link AllocationStatus#PRIMARY} indicates a primary shard copy + * {@link AllocationStatus#REPLICA} indicates a replica shard copy + * {@link AllocationStatus#UNUSED} indicates an unused shard copy */ - public Allocation getAllocation() { - return allocation; + public AllocationStatus getAllocationStatus() { + return allocationStatus; } static StoreStatus readStoreStatus(StreamInput in) throws IOException { @@ -165,7 +174,8 @@ public class IndicesShardStoresResponse extends ActionResponse implements ToXCon public void readFrom(StreamInput in) throws IOException { node = DiscoveryNode.readNode(in); version = in.readLong(); - allocation = Allocation.readFrom(in); + allocationId = in.readOptionalString(); + allocationStatus = AllocationStatus.readFrom(in); if (in.readBoolean()) { storeException = in.readThrowable(); } @@ -175,7 +185,8 @@ public class IndicesShardStoresResponse extends ActionResponse implements ToXCon public void writeTo(StreamOutput out) throws IOException { node.writeTo(out); out.writeLong(version); - allocation.writeTo(out); + out.writeOptionalString(allocationId); + allocationStatus.writeTo(out); if (storeException != null) { out.writeBoolean(true); out.writeThrowable(storeException); @@ -188,7 +199,8 @@ public class IndicesShardStoresResponse extends ActionResponse implements ToXCon public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { node.toXContent(builder, params); builder.field(Fields.VERSION, version); - builder.field(Fields.ALLOCATED, allocation.value()); + builder.field(Fields.ALLOCATION_ID, allocationId); + builder.field(Fields.ALLOCATED, allocationStatus.value()); if (storeException != null) { builder.startObject(Fields.STORE_EXCEPTION); ElasticsearchException.toXContent(builder, params, storeException); @@ -206,7 +218,7 @@ public class IndicesShardStoresResponse extends ActionResponse implements ToXCon } else { int compare = Long.compare(other.version, version); if (compare == 0) { - return Integer.compare(allocation.id, other.allocation.id); + return Integer.compare(allocationStatus.id, other.allocationStatus.id); } return compare; } @@ -379,6 +391,7 @@ public class IndicesShardStoresResponse extends ActionResponse implements ToXCon static final XContentBuilderString STORES = new XContentBuilderString("stores"); // StoreStatus fields static final XContentBuilderString VERSION = new XContentBuilderString("version"); + static final XContentBuilderString ALLOCATION_ID = new XContentBuilderString("allocation_id"); static final XContentBuilderString STORE_EXCEPTION = new XContentBuilderString("store_exception"); static final XContentBuilderString ALLOCATED = new XContentBuilderString("allocation"); } diff --git a/core/src/main/java/org/elasticsearch/action/admin/indices/shards/TransportIndicesShardStoresAction.java b/core/src/main/java/org/elasticsearch/action/admin/indices/shards/TransportIndicesShardStoresAction.java index 336ebc254b..d345c0e7d4 100644 --- a/core/src/main/java/org/elasticsearch/action/admin/indices/shards/TransportIndicesShardStoresAction.java +++ b/core/src/main/java/org/elasticsearch/action/admin/indices/shards/TransportIndicesShardStoresAction.java @@ -179,8 +179,8 @@ public class TransportIndicesShardStoresAction extends TransportMasterNodeReadAc } for (NodeGatewayStartedShards response : fetchResponse.responses) { if (shardExistsInNode(response)) { - IndicesShardStoresResponse.StoreStatus.Allocation allocation = getAllocation(fetchResponse.shardId.getIndex(), fetchResponse.shardId.id(), response.getNode()); - storeStatuses.add(new IndicesShardStoresResponse.StoreStatus(response.getNode(), response.version(), allocation, response.storeException())); + IndicesShardStoresResponse.StoreStatus.AllocationStatus allocationStatus = getAllocationStatus(fetchResponse.shardId.getIndex(), fetchResponse.shardId.id(), response.getNode()); + storeStatuses.add(new IndicesShardStoresResponse.StoreStatus(response.getNode(), response.version(), response.allocationId(), allocationStatus, response.storeException())); } } CollectionUtil.timSort(storeStatuses); @@ -193,27 +193,27 @@ public class TransportIndicesShardStoresAction extends TransportMasterNodeReadAc listener.onResponse(new IndicesShardStoresResponse(indicesStoreStatusesBuilder.build(), Collections.unmodifiableList(failureBuilder))); } - private IndicesShardStoresResponse.StoreStatus.Allocation getAllocation(String index, int shardID, DiscoveryNode node) { + private IndicesShardStoresResponse.StoreStatus.AllocationStatus getAllocationStatus(String index, int shardID, DiscoveryNode node) { for (ShardRouting shardRouting : routingNodes.node(node.id())) { ShardId shardId = shardRouting.shardId(); if (shardId.id() == shardID && shardId.getIndex().equals(index)) { if (shardRouting.primary()) { - return IndicesShardStoresResponse.StoreStatus.Allocation.PRIMARY; + return IndicesShardStoresResponse.StoreStatus.AllocationStatus.PRIMARY; } else if (shardRouting.assignedToNode()) { - return IndicesShardStoresResponse.StoreStatus.Allocation.REPLICA; + return IndicesShardStoresResponse.StoreStatus.AllocationStatus.REPLICA; } else { - return IndicesShardStoresResponse.StoreStatus.Allocation.UNUSED; + return IndicesShardStoresResponse.StoreStatus.AllocationStatus.UNUSED; } } } - return IndicesShardStoresResponse.StoreStatus.Allocation.UNUSED; + return IndicesShardStoresResponse.StoreStatus.AllocationStatus.UNUSED; } /** * A shard exists/existed in a node only if shard state file exists in the node */ private boolean shardExistsInNode(final NodeGatewayStartedShards response) { - return response.storeException() != null || response.version() != -1; + return response.storeException() != null || response.version() != -1 || response.allocationId() != null; } @Override diff --git a/core/src/main/java/org/elasticsearch/action/bulk/BackoffPolicy.java b/core/src/main/java/org/elasticsearch/action/bulk/BackoffPolicy.java new file mode 100644 index 0000000000..a0ccca0fb5 --- /dev/null +++ b/core/src/main/java/org/elasticsearch/action/bulk/BackoffPolicy.java @@ -0,0 +1,203 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ +package org.elasticsearch.action.bulk; + +import org.elasticsearch.common.unit.TimeValue; + +import java.util.Iterator; +import java.util.NoSuchElementException; + +/** + * Provides a backoff policy for bulk requests. Whenever a bulk request is rejected due to resource constraints (i.e. the client's internal + * thread pool is full), the backoff policy decides how long the bulk processor will wait before the operation is retried internally. + * + * Notes for implementing custom subclasses: + * + * The underlying mathematical principle of <code>BackoffPolicy</code> are progressions which can be either finite or infinite although + * the latter should not be used for retrying. A progression can be mapped to a <code>java.util.Iterator</code> with the following + * semantics: + * + * <ul> + * <li><code>#hasNext()</code> determines whether the progression has more elements. Return <code>true</code> for infinite progressions</li> + * <li><code>#next()</code> determines the next element in the progression, i.e. the next wait time period</li> + * </ul> + * + * Note that backoff policies are exposed as <code>Iterables</code> in order to be consumed multiple times. + */ +public abstract class BackoffPolicy implements Iterable<TimeValue> { + private static final BackoffPolicy NO_BACKOFF = new NoBackoff(); + + /** + * Creates a backoff policy that will not allow any backoff, i.e. an operation will fail after the first attempt. + * + * @return A backoff policy without any backoff period. The returned instance is thread safe. + */ + public static BackoffPolicy noBackoff() { + return NO_BACKOFF; + } + + /** + * Creates an new constant backoff policy with the provided configuration. + * + * @param delay The delay defines how long to wait between retry attempts. Must not be null. + * Must be <= <code>Integer.MAX_VALUE</code> ms. + * @param maxNumberOfRetries The maximum number of retries. Must be a non-negative number. + * @return A backoff policy with a constant wait time between retries. The returned instance is thread safe but each + * iterator created from it should only be used by a single thread. + */ + public static BackoffPolicy constantBackoff(TimeValue delay, int maxNumberOfRetries) { + return new ConstantBackoff(checkDelay(delay), maxNumberOfRetries); + } + + /** + * Creates an new exponential backoff policy with a default configuration of 50 ms initial wait period and 8 retries taking + * roughly 5.1 seconds in total. + * + * @return A backoff policy with an exponential increase in wait time for retries. The returned instance is thread safe but each + * iterator created from it should only be used by a single thread. + */ + public static BackoffPolicy exponentialBackoff() { + return exponentialBackoff(TimeValue.timeValueMillis(50), 8); + } + + /** + * Creates an new exponential backoff policy with the provided configuration. + * + * @param initialDelay The initial delay defines how long to wait for the first retry attempt. Must not be null. + * Must be <= <code>Integer.MAX_VALUE</code> ms. + * @param maxNumberOfRetries The maximum number of retries. Must be a non-negative number. + * @return A backoff policy with an exponential increase in wait time for retries. The returned instance is thread safe but each + * iterator created from it should only be used by a single thread. + */ + public static BackoffPolicy exponentialBackoff(TimeValue initialDelay, int maxNumberOfRetries) { + return new ExponentialBackoff((int) checkDelay(initialDelay).millis(), maxNumberOfRetries); + } + + private static TimeValue checkDelay(TimeValue delay) { + if (delay.millis() > Integer.MAX_VALUE) { + throw new IllegalArgumentException("delay must be <= " + Integer.MAX_VALUE + " ms"); + } + return delay; + } + + private static class NoBackoff extends BackoffPolicy { + @Override + public Iterator<TimeValue> iterator() { + return new Iterator<TimeValue>() { + @Override + public boolean hasNext() { + return false; + } + + @Override + public TimeValue next() { + throw new NoSuchElementException("No backoff"); + } + }; + } + } + + private static class ExponentialBackoff extends BackoffPolicy { + private final int start; + + private final int numberOfElements; + + private ExponentialBackoff(int start, int numberOfElements) { + assert start >= 0; + assert numberOfElements >= 0; + this.start = start; + this.numberOfElements = numberOfElements; + } + + @Override + public Iterator<TimeValue> iterator() { + return new ExponentialBackoffIterator(start, numberOfElements); + } + } + + private static class ExponentialBackoffIterator implements Iterator<TimeValue> { + private final int numberOfElements; + + private final int start; + + private int currentlyConsumed; + + private ExponentialBackoffIterator(int start, int numberOfElements) { + this.start = start; + this.numberOfElements = numberOfElements; + } + + @Override + public boolean hasNext() { + return currentlyConsumed < numberOfElements; + } + + @Override + public TimeValue next() { + if (!hasNext()) { + throw new NoSuchElementException("Only up to " + numberOfElements + " elements"); + } + int result = start + 10 * ((int) Math.exp(0.8d * (currentlyConsumed)) - 1); + currentlyConsumed++; + return TimeValue.timeValueMillis(result); + } + } + + private static final class ConstantBackoff extends BackoffPolicy { + private final TimeValue delay; + + private final int numberOfElements; + + public ConstantBackoff(TimeValue delay, int numberOfElements) { + assert numberOfElements >= 0; + this.delay = delay; + this.numberOfElements = numberOfElements; + } + + @Override + public Iterator<TimeValue> iterator() { + return new ConstantBackoffIterator(delay, numberOfElements); + } + } + + private static final class ConstantBackoffIterator implements Iterator<TimeValue> { + private final TimeValue delay; + private final int numberOfElements; + private int curr; + + public ConstantBackoffIterator(TimeValue delay, int numberOfElements) { + this.delay = delay; + this.numberOfElements = numberOfElements; + } + + @Override + public boolean hasNext() { + return curr < numberOfElements; + } + + @Override + public TimeValue next() { + if (!hasNext()) { + throw new NoSuchElementException(); + } + curr++; + return delay; + } + } +} diff --git a/core/src/main/java/org/elasticsearch/action/bulk/BulkProcessor.java b/core/src/main/java/org/elasticsearch/action/bulk/BulkProcessor.java index 2a7c185ad8..43014cfb75 100644 --- a/core/src/main/java/org/elasticsearch/action/bulk/BulkProcessor.java +++ b/core/src/main/java/org/elasticsearch/action/bulk/BulkProcessor.java @@ -19,7 +19,6 @@ package org.elasticsearch.action.bulk; -import org.elasticsearch.action.ActionListener; import org.elasticsearch.action.ActionRequest; import org.elasticsearch.action.delete.DeleteRequest; import org.elasticsearch.action.index.IndexRequest; @@ -48,7 +47,7 @@ public class BulkProcessor implements Closeable { /** * A listener for the execution. */ - public static interface Listener { + public interface Listener { /** * Callback before the bulk is executed. @@ -62,6 +61,9 @@ public class BulkProcessor implements Closeable { /** * Callback after a failed execution of bulk request. + * + * Note that in case an instance of <code>InterruptedException</code> is passed, which means that request processing has been + * cancelled externally, the thread's interruption status has been restored prior to calling this method. */ void afterBulk(long executionId, BulkRequest request, Throwable failure); } @@ -79,6 +81,7 @@ public class BulkProcessor implements Closeable { private int bulkActions = 1000; private ByteSizeValue bulkSize = new ByteSizeValue(5, ByteSizeUnit.MB); private TimeValue flushInterval = null; + private BackoffPolicy backoffPolicy = BackoffPolicy.exponentialBackoff(); /** * Creates a builder of bulk processor with the client to use and the listener that will be used @@ -137,10 +140,26 @@ public class BulkProcessor implements Closeable { } /** + * Sets a custom backoff policy. The backoff policy defines how the bulk processor should handle retries of bulk requests internally + * in case they have failed due to resource constraints (i.e. a thread pool was full). + * + * The default is to back off exponentially. + * + * @see org.elasticsearch.action.bulk.BackoffPolicy#exponentialBackoff() + */ + public Builder setBackoffPolicy(BackoffPolicy backoffPolicy) { + if (backoffPolicy == null) { + throw new NullPointerException("'backoffPolicy' must not be null. To disable backoff, pass BackoffPolicy.noBackoff()"); + } + this.backoffPolicy = backoffPolicy; + return this; + } + + /** * Builds a new bulk processor. */ public BulkProcessor build() { - return new BulkProcessor(client, listener, name, concurrentRequests, bulkActions, bulkSize, flushInterval); + return new BulkProcessor(client, backoffPolicy, listener, name, concurrentRequests, bulkActions, bulkSize, flushInterval); } } @@ -148,42 +167,31 @@ public class BulkProcessor implements Closeable { if (client == null) { throw new NullPointerException("The client you specified while building a BulkProcessor is null"); } - + return new Builder(client, listener); } - private final Client client; - private final Listener listener; - - private final String name; - - private final int concurrentRequests; private final int bulkActions; private final long bulkSize; - private final TimeValue flushInterval; - private final Semaphore semaphore; + private final ScheduledThreadPoolExecutor scheduler; private final ScheduledFuture scheduledFuture; private final AtomicLong executionIdGen = new AtomicLong(); private BulkRequest bulkRequest; + private final BulkRequestHandler bulkRequestHandler; private volatile boolean closed = false; - BulkProcessor(Client client, Listener listener, @Nullable String name, int concurrentRequests, int bulkActions, ByteSizeValue bulkSize, @Nullable TimeValue flushInterval) { - this.client = client; - this.listener = listener; - this.name = name; - this.concurrentRequests = concurrentRequests; + BulkProcessor(Client client, BackoffPolicy backoffPolicy, Listener listener, @Nullable String name, int concurrentRequests, int bulkActions, ByteSizeValue bulkSize, @Nullable TimeValue flushInterval) { this.bulkActions = bulkActions; this.bulkSize = bulkSize.bytes(); - this.semaphore = new Semaphore(concurrentRequests); this.bulkRequest = new BulkRequest(); + this.bulkRequestHandler = (concurrentRequests == 0) ? BulkRequestHandler.syncHandler(client, backoffPolicy, listener) : BulkRequestHandler.asyncHandler(client, backoffPolicy, listener, concurrentRequests); - this.flushInterval = flushInterval; if (flushInterval != null) { this.scheduler = (ScheduledThreadPoolExecutor) Executors.newScheduledThreadPool(1, EsExecutors.daemonThreadFactory(client.settings(), (name != null ? "[" + name + "]" : "") + "bulk_processor")); this.scheduler.setExecuteExistingDelayedTasksAfterShutdownPolicy(false); @@ -231,14 +239,7 @@ public class BulkProcessor implements Closeable { if (bulkRequest.numberOfActions() > 0) { execute(); } - if (this.concurrentRequests < 1) { - return true; - } - if (semaphore.tryAcquire(this.concurrentRequests, timeout, unit)) { - semaphore.release(this.concurrentRequests); - return true; - } - return false; + return this.bulkRequestHandler.awaitClose(timeout, unit); } /** @@ -308,58 +309,7 @@ public class BulkProcessor implements Closeable { final long executionId = executionIdGen.incrementAndGet(); this.bulkRequest = new BulkRequest(); - - if (concurrentRequests == 0) { - // execute in a blocking fashion... - boolean afterCalled = false; - try { - listener.beforeBulk(executionId, bulkRequest); - BulkResponse bulkItemResponses = client.bulk(bulkRequest).actionGet(); - afterCalled = true; - listener.afterBulk(executionId, bulkRequest, bulkItemResponses); - } catch (Exception e) { - if (!afterCalled) { - listener.afterBulk(executionId, bulkRequest, e); - } - } - } else { - boolean success = false; - boolean acquired = false; - try { - listener.beforeBulk(executionId, bulkRequest); - semaphore.acquire(); - acquired = true; - client.bulk(bulkRequest, new ActionListener<BulkResponse>() { - @Override - public void onResponse(BulkResponse response) { - try { - listener.afterBulk(executionId, bulkRequest, response); - } finally { - semaphore.release(); - } - } - - @Override - public void onFailure(Throwable e) { - try { - listener.afterBulk(executionId, bulkRequest, e); - } finally { - semaphore.release(); - } - } - }); - success = true; - } catch (InterruptedException e) { - Thread.interrupted(); - listener.afterBulk(executionId, bulkRequest, e); - } catch (Throwable t) { - listener.afterBulk(executionId, bulkRequest, t); - } finally { - if (!success && acquired) { // if we fail on client.bulk() release the semaphore - semaphore.release(); - } - } - } + this.bulkRequestHandler.execute(bulkRequest, executionId); } private boolean isOverTheLimit() { diff --git a/core/src/main/java/org/elasticsearch/action/bulk/BulkRequestHandler.java b/core/src/main/java/org/elasticsearch/action/bulk/BulkRequestHandler.java new file mode 100644 index 0000000000..dc98a16c57 --- /dev/null +++ b/core/src/main/java/org/elasticsearch/action/bulk/BulkRequestHandler.java @@ -0,0 +1,166 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ +package org.elasticsearch.action.bulk; + +import org.elasticsearch.action.ActionListener; +import org.elasticsearch.client.Client; +import org.elasticsearch.common.logging.ESLogger; +import org.elasticsearch.common.logging.Loggers; +import org.elasticsearch.common.util.concurrent.EsRejectedExecutionException; + +import java.util.concurrent.Semaphore; +import java.util.concurrent.TimeUnit; + +/** + * Abstracts the low-level details of bulk request handling + */ +abstract class BulkRequestHandler { + protected final ESLogger logger; + protected final Client client; + + protected BulkRequestHandler(Client client) { + this.client = client; + this.logger = Loggers.getLogger(getClass(), client.settings()); + } + + + public abstract void execute(BulkRequest bulkRequest, long executionId); + + public abstract boolean awaitClose(long timeout, TimeUnit unit) throws InterruptedException; + + + public static BulkRequestHandler syncHandler(Client client, BackoffPolicy backoffPolicy, BulkProcessor.Listener listener) { + return new SyncBulkRequestHandler(client, backoffPolicy, listener); + } + + public static BulkRequestHandler asyncHandler(Client client, BackoffPolicy backoffPolicy, BulkProcessor.Listener listener, int concurrentRequests) { + return new AsyncBulkRequestHandler(client, backoffPolicy, listener, concurrentRequests); + } + + private static class SyncBulkRequestHandler extends BulkRequestHandler { + private final BulkProcessor.Listener listener; + private final BackoffPolicy backoffPolicy; + + public SyncBulkRequestHandler(Client client, BackoffPolicy backoffPolicy, BulkProcessor.Listener listener) { + super(client); + this.backoffPolicy = backoffPolicy; + this.listener = listener; + } + + @Override + public void execute(BulkRequest bulkRequest, long executionId) { + boolean afterCalled = false; + try { + listener.beforeBulk(executionId, bulkRequest); + BulkResponse bulkResponse = Retry + .on(EsRejectedExecutionException.class) + .policy(backoffPolicy) + .withSyncBackoff(client, bulkRequest); + afterCalled = true; + listener.afterBulk(executionId, bulkRequest, bulkResponse); + } catch (InterruptedException e) { + Thread.currentThread().interrupt(); + logger.info("Bulk request {} has been cancelled.", e, executionId); + if (!afterCalled) { + listener.afterBulk(executionId, bulkRequest, e); + } + } catch (Throwable t) { + logger.warn("Failed to execute bulk request {}.", t, executionId); + if (!afterCalled) { + listener.afterBulk(executionId, bulkRequest, t); + } + } + } + + @Override + public boolean awaitClose(long timeout, TimeUnit unit) throws InterruptedException { + // we are "closed" immediately as there is no request in flight + return true; + } + } + + private static class AsyncBulkRequestHandler extends BulkRequestHandler { + private final BackoffPolicy backoffPolicy; + private final BulkProcessor.Listener listener; + private final Semaphore semaphore; + private final int concurrentRequests; + + private AsyncBulkRequestHandler(Client client, BackoffPolicy backoffPolicy, BulkProcessor.Listener listener, int concurrentRequests) { + super(client); + this.backoffPolicy = backoffPolicy; + assert concurrentRequests > 0; + this.listener = listener; + this.concurrentRequests = concurrentRequests; + this.semaphore = new Semaphore(concurrentRequests); + } + + @Override + public void execute(BulkRequest bulkRequest, long executionId) { + boolean bulkRequestSetupSuccessful = false; + boolean acquired = false; + try { + listener.beforeBulk(executionId, bulkRequest); + semaphore.acquire(); + acquired = true; + Retry.on(EsRejectedExecutionException.class) + .policy(backoffPolicy) + .withAsyncBackoff(client, bulkRequest, new ActionListener<BulkResponse>() { + @Override + public void onResponse(BulkResponse response) { + try { + listener.afterBulk(executionId, bulkRequest, response); + } finally { + semaphore.release(); + } + } + + @Override + public void onFailure(Throwable e) { + try { + listener.afterBulk(executionId, bulkRequest, e); + } finally { + semaphore.release(); + } + } + }); + bulkRequestSetupSuccessful = true; + } catch (InterruptedException e) { + Thread.currentThread().interrupt(); + logger.info("Bulk request {} has been cancelled.", e, executionId); + listener.afterBulk(executionId, bulkRequest, e); + } catch (Throwable t) { + logger.warn("Failed to execute bulk request {}.", t, executionId); + listener.afterBulk(executionId, bulkRequest, t); + } finally { + if (!bulkRequestSetupSuccessful && acquired) { // if we fail on client.bulk() release the semaphore + semaphore.release(); + } + } + } + + @Override + public boolean awaitClose(long timeout, TimeUnit unit) throws InterruptedException { + if (semaphore.tryAcquire(this.concurrentRequests, timeout, unit)) { + semaphore.release(this.concurrentRequests); + return true; + } + return false; + } + } +} diff --git a/core/src/main/java/org/elasticsearch/action/bulk/Retry.java b/core/src/main/java/org/elasticsearch/action/bulk/Retry.java new file mode 100644 index 0000000000..477e61045b --- /dev/null +++ b/core/src/main/java/org/elasticsearch/action/bulk/Retry.java @@ -0,0 +1,237 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ +package org.elasticsearch.action.bulk; + +import org.elasticsearch.ExceptionsHelper; +import org.elasticsearch.action.ActionFuture; +import org.elasticsearch.action.ActionListener; +import org.elasticsearch.action.support.PlainActionFuture; +import org.elasticsearch.client.Client; +import org.elasticsearch.common.logging.ESLogger; +import org.elasticsearch.common.logging.Loggers; +import org.elasticsearch.common.unit.TimeValue; +import org.elasticsearch.common.util.concurrent.FutureUtils; +import org.elasticsearch.threadpool.ThreadPool; + +import java.util.ArrayList; +import java.util.Iterator; +import java.util.List; +import java.util.concurrent.*; +import java.util.function.Predicate; + +/** + * Encapsulates synchronous and asynchronous retry logic. + */ +class Retry { + private final Class<? extends Throwable> retryOnThrowable; + + private BackoffPolicy backoffPolicy; + + public static Retry on(Class<? extends Throwable> retryOnThrowable) { + return new Retry(retryOnThrowable); + } + + /** + * @param backoffPolicy The backoff policy that defines how long and how often to wait for retries. + */ + public Retry policy(BackoffPolicy backoffPolicy) { + this.backoffPolicy = backoffPolicy; + return this; + } + + Retry(Class<? extends Throwable> retryOnThrowable) { + this.retryOnThrowable = retryOnThrowable; + } + + /** + * Invokes #bulk(BulkRequest, ActionListener) on the provided client. Backs off on the provided exception and delegates results to the + * provided listener. + * + * @param client Client invoking the bulk request. + * @param bulkRequest The bulk request that should be executed. + * @param listener A listener that is invoked when the bulk request finishes or completes with an exception. The listener is not + */ + public void withAsyncBackoff(Client client, BulkRequest bulkRequest, ActionListener<BulkResponse> listener) { + AsyncRetryHandler r = new AsyncRetryHandler(retryOnThrowable, backoffPolicy, client, listener); + r.execute(bulkRequest); + + } + + /** + * Invokes #bulk(BulkRequest) on the provided client. Backs off on the provided exception. + * + * @param client Client invoking the bulk request. + * @param bulkRequest The bulk request that should be executed. + * @return the bulk response as returned by the client. + * @throws Exception Any exception thrown by the callable. + */ + public BulkResponse withSyncBackoff(Client client, BulkRequest bulkRequest) throws Exception { + return SyncRetryHandler + .create(retryOnThrowable, backoffPolicy, client) + .executeBlocking(bulkRequest) + .actionGet(); + } + + static class AbstractRetryHandler implements ActionListener<BulkResponse> { + private final ESLogger logger; + private final Client client; + private final ActionListener<BulkResponse> listener; + private final Iterator<TimeValue> backoff; + private final Class<? extends Throwable> retryOnThrowable; + // Access only when holding a client-side lock, see also #addResponses() + private final List<BulkItemResponse> responses = new ArrayList<>(); + private final long startTimestampNanos; + // needed to construct the next bulk request based on the response to the previous one + // volatile as we're called from a scheduled thread + private volatile BulkRequest currentBulkRequest; + private volatile ScheduledFuture<?> scheduledRequestFuture; + + public AbstractRetryHandler(Class<? extends Throwable> retryOnThrowable, BackoffPolicy backoffPolicy, Client client, ActionListener<BulkResponse> listener) { + this.retryOnThrowable = retryOnThrowable; + this.backoff = backoffPolicy.iterator(); + this.client = client; + this.listener = listener; + this.logger = Loggers.getLogger(getClass(), client.settings()); + // in contrast to System.currentTimeMillis(), nanoTime() uses a monotonic clock under the hood + this.startTimestampNanos = System.nanoTime(); + } + + @Override + public void onResponse(BulkResponse bulkItemResponses) { + if (!bulkItemResponses.hasFailures()) { + // we're done here, include all responses + addResponses(bulkItemResponses, (r -> true)); + finishHim(); + } else { + if (canRetry(bulkItemResponses)) { + addResponses(bulkItemResponses, (r -> !r.isFailed())); + retry(createBulkRequestForRetry(bulkItemResponses)); + } else { + addResponses(bulkItemResponses, (r -> true)); + finishHim(); + } + } + } + + @Override + public void onFailure(Throwable e) { + try { + listener.onFailure(e); + } finally { + FutureUtils.cancel(scheduledRequestFuture); + } + } + + private void retry(BulkRequest bulkRequestForRetry) { + assert backoff.hasNext(); + TimeValue next = backoff.next(); + logger.trace("Retry of bulk request scheduled in {} ms.", next.millis()); + scheduledRequestFuture = client.threadPool().schedule(next, ThreadPool.Names.SAME, (() -> this.execute(bulkRequestForRetry))); + } + + private BulkRequest createBulkRequestForRetry(BulkResponse bulkItemResponses) { + BulkRequest requestToReissue = new BulkRequest(); + int index = 0; + for (BulkItemResponse bulkItemResponse : bulkItemResponses.getItems()) { + if (bulkItemResponse.isFailed()) { + requestToReissue.add(currentBulkRequest.requests().get(index)); + } + index++; + } + return requestToReissue; + } + + private boolean canRetry(BulkResponse bulkItemResponses) { + if (!backoff.hasNext()) { + return false; + } + for (BulkItemResponse bulkItemResponse : bulkItemResponses) { + if (bulkItemResponse.isFailed()) { + Throwable cause = bulkItemResponse.getFailure().getCause(); + Throwable rootCause = ExceptionsHelper.unwrapCause(cause); + if (!rootCause.getClass().equals(retryOnThrowable)) { + return false; + } + } + } + return true; + } + + private void finishHim() { + try { + listener.onResponse(getAccumulatedResponse()); + } finally { + FutureUtils.cancel(scheduledRequestFuture); + } + } + + private void addResponses(BulkResponse response, Predicate<BulkItemResponse> filter) { + for (BulkItemResponse bulkItemResponse : response) { + if (filter.test(bulkItemResponse)) { + // Use client-side lock here to avoid visibility issues. This method may be called multiple times + // (based on how many retries we have to issue) and relying that the response handling code will be + // scheduled on the same thread is fragile. + synchronized (responses) { + responses.add(bulkItemResponse); + } + } + } + } + + private BulkResponse getAccumulatedResponse() { + BulkItemResponse[] itemResponses; + synchronized (responses) { + itemResponses = responses.toArray(new BulkItemResponse[1]); + } + long stopTimestamp = System.nanoTime(); + long totalLatencyMs = TimeValue.timeValueNanos(stopTimestamp - startTimestampNanos).millis(); + return new BulkResponse(itemResponses, totalLatencyMs); + } + + public void execute(BulkRequest bulkRequest) { + this.currentBulkRequest = bulkRequest; + client.bulk(bulkRequest, this); + } + } + + static class AsyncRetryHandler extends AbstractRetryHandler { + public AsyncRetryHandler(Class<? extends Throwable> retryOnThrowable, BackoffPolicy backoffPolicy, Client client, ActionListener<BulkResponse> listener) { + super(retryOnThrowable, backoffPolicy, client, listener); + } + } + + static class SyncRetryHandler extends AbstractRetryHandler { + private final PlainActionFuture<BulkResponse> actionFuture; + + public static SyncRetryHandler create(Class<? extends Throwable> retryOnThrowable, BackoffPolicy backoffPolicy, Client client) { + PlainActionFuture<BulkResponse> actionFuture = PlainActionFuture.newFuture(); + return new SyncRetryHandler(retryOnThrowable, backoffPolicy, client, actionFuture); + } + + public SyncRetryHandler(Class<? extends Throwable> retryOnThrowable, BackoffPolicy backoffPolicy, Client client, PlainActionFuture<BulkResponse> actionFuture) { + super(retryOnThrowable, backoffPolicy, client, actionFuture); + this.actionFuture = actionFuture; + } + + public ActionFuture<BulkResponse> executeBlocking(BulkRequest bulkRequest) { + super.execute(bulkRequest); + return actionFuture; + } + } +} diff --git a/core/src/main/java/org/elasticsearch/action/percolate/TransportShardMultiPercolateAction.java b/core/src/main/java/org/elasticsearch/action/percolate/TransportShardMultiPercolateAction.java index 68bcdc1503..1d29e6c397 100644 --- a/core/src/main/java/org/elasticsearch/action/percolate/TransportShardMultiPercolateAction.java +++ b/core/src/main/java/org/elasticsearch/action/percolate/TransportShardMultiPercolateAction.java @@ -20,7 +20,6 @@ package org.elasticsearch.action.percolate; import org.elasticsearch.ElasticsearchException; -import org.elasticsearch.ExceptionsHelper; import org.elasticsearch.action.ActionRequestValidationException; import org.elasticsearch.action.ActionResponse; import org.elasticsearch.action.IndicesRequest; @@ -37,8 +36,6 @@ import org.elasticsearch.common.inject.Inject; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.common.settings.Settings; -import org.elasticsearch.common.text.StringText; -import org.elasticsearch.common.text.Text; import org.elasticsearch.index.shard.ShardId; import org.elasticsearch.percolator.PercolatorService; import org.elasticsearch.threadpool.ThreadPool; diff --git a/core/src/main/java/org/elasticsearch/action/search/SearchRequestBuilder.java b/core/src/main/java/org/elasticsearch/action/search/SearchRequestBuilder.java index 442b0915e3..52d45ec940 100644 --- a/core/src/main/java/org/elasticsearch/action/search/SearchRequestBuilder.java +++ b/core/src/main/java/org/elasticsearch/action/search/SearchRequestBuilder.java @@ -473,6 +473,14 @@ public class SearchRequestBuilder extends ActionRequestBuilder<SearchRequest, Se return this; } + /** + * Should the query be profiled. Defaults to <code>false</code> + */ + public SearchRequestBuilder setProfile(boolean profile) { + sourceBuilder().profile(profile); + return this; + } + @Override public String toString() { if (request.source() != null) { diff --git a/core/src/main/java/org/elasticsearch/action/search/SearchResponse.java b/core/src/main/java/org/elasticsearch/action/search/SearchResponse.java index 769e0978a7..e6681bf2b9 100644 --- a/core/src/main/java/org/elasticsearch/action/search/SearchResponse.java +++ b/core/src/main/java/org/elasticsearch/action/search/SearchResponse.java @@ -20,6 +20,7 @@ package org.elasticsearch.action.search; import org.elasticsearch.action.ActionResponse; +import org.elasticsearch.common.Nullable; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.common.unit.TimeValue; @@ -32,9 +33,12 @@ import org.elasticsearch.rest.action.support.RestActions; import org.elasticsearch.search.SearchHits; import org.elasticsearch.search.aggregations.Aggregations; import org.elasticsearch.search.internal.InternalSearchResponse; +import org.elasticsearch.search.profile.ProfileShardResult; import org.elasticsearch.search.suggest.Suggest; import java.io.IOException; +import java.util.List; +import java.util.Map; import static org.elasticsearch.action.search.ShardSearchFailure.readShardSearchFailure; import static org.elasticsearch.search.internal.InternalSearchResponse.readInternalSearchResponse; @@ -160,6 +164,16 @@ public class SearchResponse extends ActionResponse implements StatusToXContent { this.scrollId = scrollId; } + /** + * If profiling was enabled, this returns an object containing the profile results from + * each shard. If profiling was not enabled, this will return null + * + * @return The profile results or null + */ + public @Nullable Map<String, List<ProfileShardResult>> getProfileResults() { + return internalResponse.profile(); + } + static final class Fields { static final XContentBuilderString _SCROLL_ID = new XContentBuilderString("_scroll_id"); static final XContentBuilderString TOOK = new XContentBuilderString("took"); diff --git a/core/src/main/java/org/elasticsearch/action/support/DestructiveOperations.java b/core/src/main/java/org/elasticsearch/action/support/DestructiveOperations.java index b73ee8a75f..5f2fb33e04 100644 --- a/core/src/main/java/org/elasticsearch/action/support/DestructiveOperations.java +++ b/core/src/main/java/org/elasticsearch/action/support/DestructiveOperations.java @@ -21,25 +21,30 @@ package org.elasticsearch.action.support; import org.elasticsearch.common.component.AbstractComponent; import org.elasticsearch.common.inject.Inject; +import org.elasticsearch.common.settings.ClusterSettings; +import org.elasticsearch.common.settings.Setting; import org.elasticsearch.common.settings.Settings; -import org.elasticsearch.node.settings.NodeSettingsService; /** * Helper for dealing with destructive operations and wildcard usage. */ -public final class DestructiveOperations extends AbstractComponent implements NodeSettingsService.Listener { +public final class DestructiveOperations extends AbstractComponent { /** * Setting which controls whether wildcard usage (*, prefix*, _all) is allowed. */ - public static final String REQUIRES_NAME = "action.destructive_requires_name"; + public static final Setting<Boolean> REQUIRES_NAME_SETTING = Setting.boolSetting("action.destructive_requires_name", false, true, Setting.Scope.CLUSTER); private volatile boolean destructiveRequiresName; @Inject - public DestructiveOperations(Settings settings, NodeSettingsService nodeSettingsService) { + public DestructiveOperations(Settings settings, ClusterSettings clusterSettings) { super(settings); - destructiveRequiresName = settings.getAsBoolean(DestructiveOperations.REQUIRES_NAME, false); - nodeSettingsService.addListener(this); + destructiveRequiresName = REQUIRES_NAME_SETTING.get(settings); + clusterSettings.addSettingsUpdateConsumer(REQUIRES_NAME_SETTING, this::setDestructiveRequiresName); + } + + private void setDestructiveRequiresName(boolean destructiveRequiresName) { + this.destructiveRequiresName = destructiveRequiresName; } /** @@ -65,15 +70,6 @@ public final class DestructiveOperations extends AbstractComponent implements No } } - @Override - public void onRefreshSettings(Settings settings) { - boolean newValue = settings.getAsBoolean(DestructiveOperations.REQUIRES_NAME, destructiveRequiresName); - if (destructiveRequiresName != newValue) { - logger.info("updating [action.operate_all_indices] from [{}] to [{}]", destructiveRequiresName, newValue); - this.destructiveRequiresName = newValue; - } - } - private static boolean hasWildcardUsage(String aliasOrIndex) { return "_all".equals(aliasOrIndex) || aliasOrIndex.indexOf('*') != -1; } diff --git a/core/src/main/java/org/elasticsearch/action/support/broadcast/node/TransportBroadcastByNodeAction.java b/core/src/main/java/org/elasticsearch/action/support/broadcast/node/TransportBroadcastByNodeAction.java index 8bcba8ad54..e8f4a0d83c 100644 --- a/core/src/main/java/org/elasticsearch/action/support/broadcast/node/TransportBroadcastByNodeAction.java +++ b/core/src/main/java/org/elasticsearch/action/support/broadcast/node/TransportBroadcastByNodeAction.java @@ -19,8 +19,16 @@ package org.elasticsearch.action.support.broadcast.node; -import org.elasticsearch.action.*; -import org.elasticsearch.action.support.*; +import org.elasticsearch.action.ActionListener; +import org.elasticsearch.action.FailedNodeException; +import org.elasticsearch.action.IndicesRequest; +import org.elasticsearch.action.NoShardAvailableActionException; +import org.elasticsearch.action.ShardOperationFailedException; +import org.elasticsearch.action.support.ActionFilters; +import org.elasticsearch.action.support.DefaultShardOperationFailedException; +import org.elasticsearch.action.support.HandledTransportAction; +import org.elasticsearch.action.support.IndicesOptions; +import org.elasticsearch.action.support.TransportActions; import org.elasticsearch.action.support.broadcast.BroadcastRequest; import org.elasticsearch.action.support.broadcast.BroadcastResponse; import org.elasticsearch.action.support.broadcast.BroadcastShardOperationFailedException; @@ -37,7 +45,14 @@ import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.common.io.stream.Streamable; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.threadpool.ThreadPool; -import org.elasticsearch.transport.*; +import org.elasticsearch.transport.BaseTransportResponseHandler; +import org.elasticsearch.transport.NodeShouldNotConnectException; +import org.elasticsearch.transport.TransportChannel; +import org.elasticsearch.transport.TransportException; +import org.elasticsearch.transport.TransportRequest; +import org.elasticsearch.transport.TransportRequestHandler; +import org.elasticsearch.transport.TransportResponse; +import org.elasticsearch.transport.TransportService; import java.io.IOException; import java.util.ArrayList; @@ -394,7 +409,15 @@ public abstract class TransportBroadcastByNodeAction<Request extends BroadcastRe e.setIndex(shardRouting.getIndex()); e.setShard(shardRouting.shardId()); shardResults[shardIndex] = e; - logger.debug("[{}] failed to execute operation for shard [{}]", e, actionName, shardRouting.shortSummary()); + if (TransportActions.isShardNotAvailableException(t)) { + if (logger.isTraceEnabled()) { + logger.trace("[{}] failed to execute operation for shard [{}]", t, actionName, shardRouting.shortSummary()); + } + } else { + if (logger.isDebugEnabled()) { + logger.debug("[{}] failed to execute operation for shard [{}]", t, actionName, shardRouting.shortSummary()); + } + } } } } diff --git a/core/src/main/java/org/elasticsearch/action/support/replication/TransportReplicationAction.java b/core/src/main/java/org/elasticsearch/action/support/replication/TransportReplicationAction.java index 26c439c0a3..d17cc02c5b 100644 --- a/core/src/main/java/org/elasticsearch/action/support/replication/TransportReplicationAction.java +++ b/core/src/main/java/org/elasticsearch/action/support/replication/TransportReplicationAction.java @@ -300,11 +300,15 @@ public abstract class TransportReplicationAction<Request extends ReplicationRequ @Override public void onFailure(Throwable t) { if (t instanceof RetryOnReplicaException) { - logger.trace("Retrying operation on replica, action [{}], request [{}]", t, actionName, request); + logger.trace("Retrying operation on replica, action [{}], request [{}]", t, transportReplicaAction, request); observer.waitForNextChange(new ClusterStateObserver.Listener() { @Override public void onNewClusterState(ClusterState state) { - threadPool.executor(executor).execute(AsyncReplicaAction.this); + // Forking a thread on local node via transport service so that custom transport service have an + // opportunity to execute custom logic before the replica operation begins + String extraMessage = "action [" + transportReplicaAction + "], request[" + request + "]"; + TransportChannelResponseHandler<TransportResponse.Empty> handler = TransportChannelResponseHandler.emptyResponseHandler(logger, channel, extraMessage); + transportService.sendRequest(clusterService.localNode(), transportReplicaAction, request, handler); } @Override diff --git a/core/src/main/java/org/elasticsearch/action/update/UpdateHelper.java b/core/src/main/java/org/elasticsearch/action/update/UpdateHelper.java index 9f8b2a2e7b..d28ba2986e 100644 --- a/core/src/main/java/org/elasticsearch/action/update/UpdateHelper.java +++ b/core/src/main/java/org/elasticsearch/action/update/UpdateHelper.java @@ -50,6 +50,7 @@ import org.elasticsearch.search.fetch.source.FetchSourceContext; import org.elasticsearch.search.lookup.SourceLookup; import java.util.ArrayList; +import java.util.Collections; import java.util.HashMap; import java.util.Map; @@ -245,7 +246,7 @@ public class UpdateHelper extends AbstractComponent { private Map<String, Object> executeScript(UpdateRequest request, Map<String, Object> ctx) { try { if (scriptService != null) { - ExecutableScript script = scriptService.executable(request.script, ScriptContext.Standard.UPDATE, request); + ExecutableScript script = scriptService.executable(request.script, ScriptContext.Standard.UPDATE, request, Collections.emptyMap()); script.setNextVar("ctx", ctx); script.run(); // we need to unwrap the ctx... diff --git a/core/src/main/java/org/elasticsearch/client/IndicesAdminClient.java b/core/src/main/java/org/elasticsearch/client/IndicesAdminClient.java index 15def3b273..73eed43352 100644 --- a/core/src/main/java/org/elasticsearch/client/IndicesAdminClient.java +++ b/core/src/main/java/org/elasticsearch/client/IndicesAdminClient.java @@ -53,8 +53,8 @@ import org.elasticsearch.action.admin.indices.exists.types.TypesExistsResponse; import org.elasticsearch.action.admin.indices.flush.FlushRequest; import org.elasticsearch.action.admin.indices.flush.FlushRequestBuilder; import org.elasticsearch.action.admin.indices.flush.FlushResponse; -import org.elasticsearch.action.admin.indices.forcemerge.ForceMergeRequestBuilder; import org.elasticsearch.action.admin.indices.forcemerge.ForceMergeRequest; +import org.elasticsearch.action.admin.indices.forcemerge.ForceMergeRequestBuilder; import org.elasticsearch.action.admin.indices.forcemerge.ForceMergeResponse; import org.elasticsearch.action.admin.indices.get.GetIndexRequest; import org.elasticsearch.action.admin.indices.get.GetIndexRequestBuilder; @@ -82,11 +82,14 @@ import org.elasticsearch.action.admin.indices.settings.put.UpdateSettingsRequest import org.elasticsearch.action.admin.indices.settings.put.UpdateSettingsRequestBuilder; import org.elasticsearch.action.admin.indices.settings.put.UpdateSettingsResponse; import org.elasticsearch.action.admin.indices.shards.IndicesShardStoreRequestBuilder; -import org.elasticsearch.action.admin.indices.shards.IndicesShardStoresResponse; import org.elasticsearch.action.admin.indices.shards.IndicesShardStoresRequest; +import org.elasticsearch.action.admin.indices.shards.IndicesShardStoresResponse; import org.elasticsearch.action.admin.indices.stats.IndicesStatsRequest; import org.elasticsearch.action.admin.indices.stats.IndicesStatsRequestBuilder; import org.elasticsearch.action.admin.indices.stats.IndicesStatsResponse; +import org.elasticsearch.action.admin.indices.flush.SyncedFlushRequest; +import org.elasticsearch.action.admin.indices.flush.SyncedFlushRequestBuilder; +import org.elasticsearch.action.admin.indices.flush.SyncedFlushResponse; import org.elasticsearch.action.admin.indices.template.delete.DeleteIndexTemplateRequest; import org.elasticsearch.action.admin.indices.template.delete.DeleteIndexTemplateRequestBuilder; import org.elasticsearch.action.admin.indices.template.delete.DeleteIndexTemplateResponse; @@ -391,6 +394,29 @@ public interface IndicesAdminClient extends ElasticsearchClient { FlushRequestBuilder prepareFlush(String... indices); /** + * Explicitly sync flush one or more indices (write sync id to shards for faster recovery). + * + * @param request The sync flush request + * @return A result future + * @see org.elasticsearch.client.Requests#syncedFlushRequest(String...) + */ + ActionFuture<SyncedFlushResponse> syncedFlush(SyncedFlushRequest request); + + /** + * Explicitly sync flush one or more indices (write sync id to shards for faster recovery). + * + * @param request The sync flush request + * @param listener A listener to be notified with a result + * @see org.elasticsearch.client.Requests#syncedFlushRequest(String...) + */ + void syncedFlush(SyncedFlushRequest request, ActionListener <SyncedFlushResponse> listener); + + /** + * Explicitly sync flush one or more indices (write sync id to shards for faster recovery). + */ + SyncedFlushRequestBuilder prepareSyncedFlush(String... indices); + + /** * Explicitly force merge one or more indices into a the number of segments. * * @param request The optimize request diff --git a/core/src/main/java/org/elasticsearch/client/Requests.java b/core/src/main/java/org/elasticsearch/client/Requests.java index 7f0decaba5..063fd10dcf 100644 --- a/core/src/main/java/org/elasticsearch/client/Requests.java +++ b/core/src/main/java/org/elasticsearch/client/Requests.java @@ -50,6 +50,7 @@ import org.elasticsearch.action.admin.indices.refresh.RefreshRequest; import org.elasticsearch.action.admin.indices.segments.IndicesSegmentsRequest; import org.elasticsearch.action.admin.indices.settings.put.UpdateSettingsRequest; import org.elasticsearch.action.admin.indices.shards.IndicesShardStoresRequest; +import org.elasticsearch.action.admin.indices.flush.SyncedFlushRequest; import org.elasticsearch.action.admin.indices.upgrade.post.UpgradeRequest; import org.elasticsearch.action.bulk.BulkRequest; import org.elasticsearch.action.delete.DeleteRequest; @@ -131,7 +132,7 @@ public class Requests { public static SuggestRequest suggestRequest(String... indices) { return new SuggestRequest(indices); } - + /** * Creates a search request against one or more indices. Note, the search source must be set either using the * actual JSON search source, or the {@link org.elasticsearch.search.builder.SearchSourceBuilder}. @@ -266,6 +267,17 @@ public class Requests { } /** + * Creates a synced flush indices request. + * + * @param indices The indices to sync flush. Use <tt>null</tt> or <tt>_all</tt> to execute against all indices + * @return The synced flush request + * @see org.elasticsearch.client.IndicesAdminClient#syncedFlush(SyncedFlushRequest) + */ + public static SyncedFlushRequest syncedFlushRequest(String... indices) { + return new SyncedFlushRequest(indices); + } + + /** * Creates a force merge request. * * @param indices The indices to force merge. Use <tt>null</tt> or <tt>_all</tt> to execute against all indices diff --git a/core/src/main/java/org/elasticsearch/client/support/AbstractClient.java b/core/src/main/java/org/elasticsearch/client/support/AbstractClient.java index 1b5e8539ac..ea57901f2b 100644 --- a/core/src/main/java/org/elasticsearch/client/support/AbstractClient.java +++ b/core/src/main/java/org/elasticsearch/client/support/AbstractClient.java @@ -188,6 +188,10 @@ import org.elasticsearch.action.admin.indices.stats.IndicesStatsAction; import org.elasticsearch.action.admin.indices.stats.IndicesStatsRequest; import org.elasticsearch.action.admin.indices.stats.IndicesStatsRequestBuilder; import org.elasticsearch.action.admin.indices.stats.IndicesStatsResponse; +import org.elasticsearch.action.admin.indices.flush.SyncedFlushAction; +import org.elasticsearch.action.admin.indices.flush.SyncedFlushRequest; +import org.elasticsearch.action.admin.indices.flush.SyncedFlushRequestBuilder; +import org.elasticsearch.action.admin.indices.flush.SyncedFlushResponse; import org.elasticsearch.action.admin.indices.template.delete.DeleteIndexTemplateAction; import org.elasticsearch.action.admin.indices.template.delete.DeleteIndexTemplateRequest; import org.elasticsearch.action.admin.indices.template.delete.DeleteIndexTemplateRequestBuilder; @@ -1316,6 +1320,21 @@ public abstract class AbstractClient extends AbstractComponent implements Client } @Override + public ActionFuture<SyncedFlushResponse> syncedFlush(SyncedFlushRequest request) { + return execute(SyncedFlushAction.INSTANCE, request); + } + + @Override + public void syncedFlush(SyncedFlushRequest request, ActionListener<SyncedFlushResponse> listener) { + execute(SyncedFlushAction.INSTANCE, request, listener); + } + + @Override + public SyncedFlushRequestBuilder prepareSyncedFlush(String... indices) { + return new SyncedFlushRequestBuilder(this, SyncedFlushAction.INSTANCE).setIndices(indices); + } + + @Override public void getMappings(GetMappingsRequest request, ActionListener<GetMappingsResponse> listener) { execute(GetMappingsAction.INSTANCE, request, listener); } diff --git a/core/src/main/java/org/elasticsearch/client/transport/TransportClient.java b/core/src/main/java/org/elasticsearch/client/transport/TransportClient.java index 33cf347941..3b8be668f4 100644 --- a/core/src/main/java/org/elasticsearch/client/transport/TransportClient.java +++ b/core/src/main/java/org/elasticsearch/client/transport/TransportClient.java @@ -32,7 +32,6 @@ import org.elasticsearch.client.support.Headers; import org.elasticsearch.client.transport.support.TransportProxyClient; import org.elasticsearch.cluster.ClusterNameModule; import org.elasticsearch.cluster.node.DiscoveryNode; -import org.elasticsearch.common.collect.Tuple; import org.elasticsearch.common.component.LifecycleComponent; import org.elasticsearch.common.inject.Injector; import org.elasticsearch.common.inject.Module; @@ -43,19 +42,15 @@ import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.settings.SettingsFilter; import org.elasticsearch.common.settings.SettingsModule; import org.elasticsearch.common.transport.TransportAddress; -import org.elasticsearch.env.Environment; -import org.elasticsearch.env.EnvironmentModule; import org.elasticsearch.indices.breaker.CircuitBreakerModule; import org.elasticsearch.monitor.MonitorService; import org.elasticsearch.node.internal.InternalSettingsPreparer; -import org.elasticsearch.node.settings.NodeSettingsService; import org.elasticsearch.plugins.Plugin; import org.elasticsearch.plugins.PluginsModule; import org.elasticsearch.plugins.PluginsService; import org.elasticsearch.search.SearchModule; import org.elasticsearch.threadpool.ThreadPool; import org.elasticsearch.threadpool.ThreadPoolModule; -import org.elasticsearch.transport.TransportModule; import org.elasticsearch.transport.TransportService; import org.elasticsearch.transport.netty.NettyTransport; @@ -69,7 +64,7 @@ import static org.elasticsearch.common.settings.Settings.settingsBuilder; * The transport client allows to create a client that is not part of the cluster, but simply connects to one * or more nodes directly by adding their respective addresses using {@link #addTransportAddress(org.elasticsearch.common.transport.TransportAddress)}. * <p> - * The transport client important modules used is the {@link org.elasticsearch.transport.TransportModule} which is + * The transport client important modules used is the {@link org.elasticsearch.common.network.NetworkModule} which is * started in client mode (only connects, no bind). */ public class TransportClient extends AbstractClient { @@ -143,10 +138,9 @@ public class TransportClient extends AbstractClient { } modules.add(new PluginsModule(pluginsService)); modules.add(new SettingsModule(this.settings, settingsFilter )); - modules.add(new NetworkModule(networkService)); + modules.add(new NetworkModule(networkService, this.settings, true)); modules.add(new ClusterNameModule(this.settings)); modules.add(new ThreadPoolModule(threadPool)); - modules.add(new TransportModule(this.settings)); modules.add(new SearchModule() { @Override protected void configure() { @@ -154,7 +148,6 @@ public class TransportClient extends AbstractClient { } }); modules.add(new ActionModule(true)); - modules.add(new ClientTransportModule()); modules.add(new CircuitBreakerModule(this.settings)); pluginsService.processModules(modules); diff --git a/core/src/main/java/org/elasticsearch/cluster/ClusterModule.java b/core/src/main/java/org/elasticsearch/cluster/ClusterModule.java index 2b7786fdb1..43c616d799 100644 --- a/core/src/main/java/org/elasticsearch/cluster/ClusterModule.java +++ b/core/src/main/java/org/elasticsearch/cluster/ClusterModule.java @@ -19,9 +19,6 @@ package org.elasticsearch.cluster; -import org.elasticsearch.action.admin.indices.close.TransportCloseIndexAction; -import org.elasticsearch.action.support.DestructiveOperations; -import org.elasticsearch.action.support.replication.TransportReplicationAction; import org.elasticsearch.cluster.action.index.MappingUpdatedAction; import org.elasticsearch.cluster.action.index.NodeIndexDeletedAction; import org.elasticsearch.cluster.action.index.NodeMappingRefreshAction; @@ -29,7 +26,6 @@ import org.elasticsearch.cluster.action.shard.ShardStateAction; import org.elasticsearch.cluster.metadata.IndexMetaData; import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver; import org.elasticsearch.cluster.metadata.IndexTemplateFilter; -import org.elasticsearch.cluster.metadata.MetaData; import org.elasticsearch.cluster.metadata.MetaDataCreateIndexService; import org.elasticsearch.cluster.metadata.MetaDataDeleteIndexService; import org.elasticsearch.cluster.metadata.MetaDataIndexAliasesService; @@ -60,17 +56,15 @@ import org.elasticsearch.cluster.routing.allocation.decider.ShardsLimitAllocatio import org.elasticsearch.cluster.routing.allocation.decider.SnapshotInProgressAllocationDecider; import org.elasticsearch.cluster.routing.allocation.decider.ThrottlingAllocationDecider; import org.elasticsearch.cluster.service.InternalClusterService; -import org.elasticsearch.cluster.settings.ClusterDynamicSettings; +import org.elasticsearch.common.settings.ClusterSettings; import org.elasticsearch.cluster.settings.DynamicSettings; import org.elasticsearch.cluster.settings.Validator; import org.elasticsearch.common.inject.AbstractModule; import org.elasticsearch.common.logging.ESLogger; import org.elasticsearch.common.logging.Loggers; +import org.elasticsearch.common.settings.Setting; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.util.ExtensionPoint; -import org.elasticsearch.discovery.DiscoverySettings; -import org.elasticsearch.discovery.zen.ZenDiscovery; -import org.elasticsearch.discovery.zen.elect.ElectMasterService; import org.elasticsearch.gateway.GatewayAllocator; import org.elasticsearch.gateway.PrimaryShardAllocator; import org.elasticsearch.index.engine.EngineConfig; @@ -81,21 +75,13 @@ import org.elasticsearch.index.shard.IndexShard; import org.elasticsearch.index.shard.MergePolicyConfig; import org.elasticsearch.index.shard.MergeSchedulerConfig; import org.elasticsearch.index.store.IndexStore; -import org.elasticsearch.index.store.IndexStoreConfig; import org.elasticsearch.index.translog.TranslogConfig; import org.elasticsearch.indices.IndicesWarmer; -import org.elasticsearch.indices.breaker.HierarchyCircuitBreakerService; import org.elasticsearch.indices.cache.request.IndicesRequestCache; -import org.elasticsearch.indices.recovery.RecoverySettings; import org.elasticsearch.indices.ttl.IndicesTTLService; -import org.elasticsearch.search.SearchService; import org.elasticsearch.search.internal.DefaultSearchContext; -import org.elasticsearch.threadpool.ThreadPool; -import org.elasticsearch.transport.TransportService; -import java.util.Arrays; -import java.util.Collections; -import java.util.List; +import java.util.*; /** * Configures classes and services that affect the entire cluster. @@ -122,7 +108,6 @@ public class ClusterModule extends AbstractModule { SnapshotInProgressAllocationDecider.class)); private final Settings settings; - private final DynamicSettings.Builder clusterDynamicSettings = new DynamicSettings.Builder(); private final DynamicSettings.Builder indexDynamicSettings = new DynamicSettings.Builder(); private final ExtensionPoint.SelectedType<ShardsAllocator> shardsAllocators = new ExtensionPoint.SelectedType<>("shards_allocator", ShardsAllocator.class); private final ExtensionPoint.ClassSet<AllocationDecider> allocationDeciders = new ExtensionPoint.ClassSet<>("allocation_decider", AllocationDecider.class, AllocationDeciders.class); @@ -134,7 +119,6 @@ public class ClusterModule extends AbstractModule { public ClusterModule(Settings settings) { this.settings = settings; - registerBuiltinClusterSettings(); registerBuiltinIndexSettings(); for (Class<? extends AllocationDecider> decider : ClusterModule.DEFAULT_ALLOCATION_DECIDERS) { @@ -144,70 +128,11 @@ public class ClusterModule extends AbstractModule { registerShardsAllocator(ClusterModule.EVEN_SHARD_COUNT_ALLOCATOR, BalancedShardsAllocator.class); } - private void registerBuiltinClusterSettings() { - registerClusterDynamicSetting(AwarenessAllocationDecider.CLUSTER_ROUTING_ALLOCATION_AWARENESS_ATTRIBUTES, Validator.EMPTY); - registerClusterDynamicSetting(AwarenessAllocationDecider.CLUSTER_ROUTING_ALLOCATION_AWARENESS_FORCE_GROUP + "*", Validator.EMPTY); - registerClusterDynamicSetting(BalancedShardsAllocator.SETTING_INDEX_BALANCE_FACTOR, Validator.FLOAT); - registerClusterDynamicSetting(BalancedShardsAllocator.SETTING_SHARD_BALANCE_FACTOR, Validator.FLOAT); - registerClusterDynamicSetting(BalancedShardsAllocator.SETTING_THRESHOLD, Validator.NON_NEGATIVE_FLOAT); - registerClusterDynamicSetting(ClusterRebalanceAllocationDecider.CLUSTER_ROUTING_ALLOCATION_ALLOW_REBALANCE, ClusterRebalanceAllocationDecider.ALLOCATION_ALLOW_REBALANCE_VALIDATOR); - registerClusterDynamicSetting(ConcurrentRebalanceAllocationDecider.CLUSTER_ROUTING_ALLOCATION_CLUSTER_CONCURRENT_REBALANCE, Validator.INTEGER); - registerClusterDynamicSetting(EnableAllocationDecider.CLUSTER_ROUTING_ALLOCATION_ENABLE, Validator.EMPTY); - registerClusterDynamicSetting(EnableAllocationDecider.CLUSTER_ROUTING_REBALANCE_ENABLE, Validator.EMPTY); - registerClusterDynamicSetting(ZenDiscovery.SETTING_REJOIN_ON_MASTER_GONE, Validator.BOOLEAN); - registerClusterDynamicSetting(DiscoverySettings.NO_MASTER_BLOCK, Validator.EMPTY); - registerClusterDynamicSetting(FilterAllocationDecider.CLUSTER_ROUTING_INCLUDE_GROUP + "*", Validator.EMPTY); - registerClusterDynamicSetting(FilterAllocationDecider.CLUSTER_ROUTING_EXCLUDE_GROUP + "*", Validator.EMPTY); - registerClusterDynamicSetting(FilterAllocationDecider.CLUSTER_ROUTING_REQUIRE_GROUP + "*", Validator.EMPTY); - registerClusterDynamicSetting(IndexStoreConfig.INDICES_STORE_THROTTLE_TYPE, Validator.EMPTY); - registerClusterDynamicSetting(IndexStoreConfig.INDICES_STORE_THROTTLE_MAX_BYTES_PER_SEC, Validator.BYTES_SIZE); - registerClusterDynamicSetting(IndicesTTLService.INDICES_TTL_INTERVAL, Validator.TIME); - registerClusterDynamicSetting(MappingUpdatedAction.INDICES_MAPPING_DYNAMIC_TIMEOUT, Validator.TIME); - registerClusterDynamicSetting(MetaData.SETTING_READ_ONLY, Validator.EMPTY); - registerClusterDynamicSetting(RecoverySettings.INDICES_RECOVERY_CONCURRENT_STREAMS, Validator.POSITIVE_INTEGER); - registerClusterDynamicSetting(RecoverySettings.INDICES_RECOVERY_CONCURRENT_SMALL_FILE_STREAMS, Validator.POSITIVE_INTEGER); - registerClusterDynamicSetting(RecoverySettings.INDICES_RECOVERY_MAX_BYTES_PER_SEC, Validator.BYTES_SIZE); - registerClusterDynamicSetting(RecoverySettings.INDICES_RECOVERY_RETRY_DELAY_STATE_SYNC, Validator.TIME_NON_NEGATIVE); - registerClusterDynamicSetting(RecoverySettings.INDICES_RECOVERY_RETRY_DELAY_NETWORK, Validator.TIME_NON_NEGATIVE); - registerClusterDynamicSetting(RecoverySettings.INDICES_RECOVERY_ACTIVITY_TIMEOUT, Validator.TIME_NON_NEGATIVE); - registerClusterDynamicSetting(RecoverySettings.INDICES_RECOVERY_INTERNAL_ACTION_TIMEOUT, Validator.TIME_NON_NEGATIVE); - registerClusterDynamicSetting(RecoverySettings.INDICES_RECOVERY_INTERNAL_LONG_ACTION_TIMEOUT, Validator.TIME_NON_NEGATIVE); - registerClusterDynamicSetting(ThreadPool.THREADPOOL_GROUP + "*", ThreadPool.THREAD_POOL_TYPE_SETTINGS_VALIDATOR); - registerClusterDynamicSetting(ThrottlingAllocationDecider.CLUSTER_ROUTING_ALLOCATION_NODE_INITIAL_PRIMARIES_RECOVERIES, Validator.INTEGER); - registerClusterDynamicSetting(ThrottlingAllocationDecider.CLUSTER_ROUTING_ALLOCATION_NODE_CONCURRENT_RECOVERIES, Validator.INTEGER); - registerClusterDynamicSetting(DiskThresholdDecider.CLUSTER_ROUTING_ALLOCATION_LOW_DISK_WATERMARK, Validator.EMPTY); - registerClusterDynamicSetting(DiskThresholdDecider.CLUSTER_ROUTING_ALLOCATION_HIGH_DISK_WATERMARK, Validator.EMPTY); - registerClusterDynamicSetting(DiskThresholdDecider.CLUSTER_ROUTING_ALLOCATION_DISK_THRESHOLD_ENABLED, Validator.BOOLEAN); - registerClusterDynamicSetting(DiskThresholdDecider.CLUSTER_ROUTING_ALLOCATION_INCLUDE_RELOCATIONS, Validator.BOOLEAN); - registerClusterDynamicSetting(DiskThresholdDecider.CLUSTER_ROUTING_ALLOCATION_REROUTE_INTERVAL, Validator.TIME_NON_NEGATIVE); - registerClusterDynamicSetting(InternalClusterInfoService.INTERNAL_CLUSTER_INFO_UPDATE_INTERVAL, Validator.TIME_NON_NEGATIVE); - registerClusterDynamicSetting(InternalClusterInfoService.INTERNAL_CLUSTER_INFO_TIMEOUT, Validator.TIME_NON_NEGATIVE); - registerClusterDynamicSetting(SnapshotInProgressAllocationDecider.CLUSTER_ROUTING_ALLOCATION_SNAPSHOT_RELOCATION_ENABLED, Validator.EMPTY); - registerClusterDynamicSetting(DestructiveOperations.REQUIRES_NAME, Validator.EMPTY); - registerClusterDynamicSetting(DiscoverySettings.PUBLISH_TIMEOUT, Validator.TIME_NON_NEGATIVE); - registerClusterDynamicSetting(DiscoverySettings.PUBLISH_DIFF_ENABLE, Validator.BOOLEAN); - registerClusterDynamicSetting(DiscoverySettings.COMMIT_TIMEOUT, Validator.TIME_NON_NEGATIVE); - registerClusterDynamicSetting(HierarchyCircuitBreakerService.TOTAL_CIRCUIT_BREAKER_LIMIT_SETTING, Validator.MEMORY_SIZE); - registerClusterDynamicSetting(HierarchyCircuitBreakerService.FIELDDATA_CIRCUIT_BREAKER_LIMIT_SETTING, Validator.MEMORY_SIZE); - registerClusterDynamicSetting(HierarchyCircuitBreakerService.FIELDDATA_CIRCUIT_BREAKER_OVERHEAD_SETTING, Validator.NON_NEGATIVE_DOUBLE); - registerClusterDynamicSetting(HierarchyCircuitBreakerService.REQUEST_CIRCUIT_BREAKER_LIMIT_SETTING, Validator.MEMORY_SIZE); - registerClusterDynamicSetting(HierarchyCircuitBreakerService.REQUEST_CIRCUIT_BREAKER_OVERHEAD_SETTING, Validator.NON_NEGATIVE_DOUBLE); - registerClusterDynamicSetting(InternalClusterService.SETTING_CLUSTER_SERVICE_SLOW_TASK_LOGGING_THRESHOLD, Validator.TIME_NON_NEGATIVE); - registerClusterDynamicSetting(SearchService.DEFAULT_SEARCH_TIMEOUT, Validator.TIMEOUT); - registerClusterDynamicSetting(ElectMasterService.DISCOVERY_ZEN_MINIMUM_MASTER_NODES, ElectMasterService.DISCOVERY_ZEN_MINIMUM_MASTER_NODES_VALIDATOR); - registerClusterDynamicSetting(TransportService.SETTING_TRACE_LOG_INCLUDE, Validator.EMPTY); - registerClusterDynamicSetting(TransportService.SETTING_TRACE_LOG_INCLUDE + ".*", Validator.EMPTY); - registerClusterDynamicSetting(TransportService.SETTING_TRACE_LOG_EXCLUDE, Validator.EMPTY); - registerClusterDynamicSetting(TransportService.SETTING_TRACE_LOG_EXCLUDE + ".*", Validator.EMPTY); - registerClusterDynamicSetting(TransportCloseIndexAction.SETTING_CLUSTER_INDICES_CLOSE_ENABLE, Validator.BOOLEAN); - registerClusterDynamicSetting(ShardsLimitAllocationDecider.CLUSTER_TOTAL_SHARDS_PER_NODE, Validator.INTEGER); - registerClusterDynamicSetting(TransportReplicationAction.SHARD_FAILURE_TIMEOUT, Validator.TIME_NON_NEGATIVE); - } private void registerBuiltinIndexSettings() { registerIndexDynamicSetting(IndexStore.INDEX_STORE_THROTTLE_MAX_BYTES_PER_SEC, Validator.BYTES_SIZE); registerIndexDynamicSetting(IndexStore.INDEX_STORE_THROTTLE_TYPE, Validator.EMPTY); - registerIndexDynamicSetting(MergeSchedulerConfig.MAX_THREAD_COUNT, Validator.EMPTY); + registerIndexDynamicSetting(MergeSchedulerConfig.MAX_THREAD_COUNT, Validator.NON_NEGATIVE_INTEGER); registerIndexDynamicSetting(MergeSchedulerConfig.MAX_MERGE_COUNT, Validator.EMPTY); registerIndexDynamicSetting(MergeSchedulerConfig.AUTO_THROTTLE, Validator.EMPTY); registerIndexDynamicSetting(FilterAllocationDecider.INDEX_ROUTING_REQUIRE_GROUP + "*", Validator.EMPTY); @@ -272,9 +197,6 @@ public class ClusterModule extends AbstractModule { indexDynamicSettings.addSetting(setting, validator); } - public void registerClusterDynamicSetting(String setting, Validator validator) { - clusterDynamicSettings.addSetting(setting, validator); - } public void registerAllocationDecider(Class<? extends AllocationDecider> allocationDecider) { allocationDeciders.registerExtension(allocationDecider); @@ -290,7 +212,6 @@ public class ClusterModule extends AbstractModule { @Override protected void configure() { - bind(DynamicSettings.class).annotatedWith(ClusterDynamicSettings.class).toInstance(clusterDynamicSettings.build()); bind(DynamicSettings.class).annotatedWith(IndexDynamicSettings.class).toInstance(indexDynamicSettings.build()); // bind ShardsAllocator diff --git a/core/src/main/java/org/elasticsearch/cluster/ClusterState.java b/core/src/main/java/org/elasticsearch/cluster/ClusterState.java index e20f21b4ce..34ccfd3b43 100644 --- a/core/src/main/java/org/elasticsearch/cluster/ClusterState.java +++ b/core/src/main/java/org/elasticsearch/cluster/ClusterState.java @@ -129,7 +129,7 @@ public class ClusterState implements ToXContent, Diffable<ClusterState> { @SuppressWarnings("unchecked") T proto = (T)customPrototypes.get(type); if (proto == null) { - throw new IllegalArgumentException("No custom state prototype registered for type [" + type + "]"); + throw new IllegalArgumentException("No custom state prototype registered for type [" + type + "], node likely missing plugins"); } return proto; } diff --git a/core/src/main/java/org/elasticsearch/cluster/ClusterStateTaskExecutor.java b/core/src/main/java/org/elasticsearch/cluster/ClusterStateTaskExecutor.java index ab85d9540f..fb22c2ca36 100644 --- a/core/src/main/java/org/elasticsearch/cluster/ClusterStateTaskExecutor.java +++ b/core/src/main/java/org/elasticsearch/cluster/ClusterStateTaskExecutor.java @@ -38,6 +38,13 @@ public interface ClusterStateTaskExecutor<T> { } /** + * Callback invoked after new cluster state is published. Note that + * this method is not invoked if the cluster state was not updated. + */ + default void clusterStatePublished(ClusterState newClusterState) { + } + + /** * Represents the result of a batched execution of cluster state update tasks * @param <T> the type of the cluster state update task */ diff --git a/core/src/main/java/org/elasticsearch/cluster/InternalClusterInfoService.java b/core/src/main/java/org/elasticsearch/cluster/InternalClusterInfoService.java index 039868d16c..925a5a12ed 100644 --- a/core/src/main/java/org/elasticsearch/cluster/InternalClusterInfoService.java +++ b/core/src/main/java/org/elasticsearch/cluster/InternalClusterInfoService.java @@ -37,11 +37,12 @@ import org.elasticsearch.common.collect.ImmutableOpenMap; import org.elasticsearch.common.component.AbstractComponent; import org.elasticsearch.common.inject.Inject; import org.elasticsearch.common.logging.ESLogger; +import org.elasticsearch.common.settings.ClusterSettings; +import org.elasticsearch.common.settings.Setting; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.unit.TimeValue; import org.elasticsearch.common.util.concurrent.EsRejectedExecutionException; import org.elasticsearch.monitor.fs.FsInfo; -import org.elasticsearch.node.settings.NodeSettingsService; import org.elasticsearch.threadpool.ThreadPool; import org.elasticsearch.transport.ReceiveTimeoutTransportException; @@ -63,8 +64,8 @@ import java.util.concurrent.TimeUnit; */ public class InternalClusterInfoService extends AbstractComponent implements ClusterInfoService, LocalNodeMasterListener, ClusterStateListener { - public static final String INTERNAL_CLUSTER_INFO_UPDATE_INTERVAL = "cluster.info.update.interval"; - public static final String INTERNAL_CLUSTER_INFO_TIMEOUT = "cluster.info.update.timeout"; + public static final Setting<TimeValue> INTERNAL_CLUSTER_INFO_UPDATE_INTERVAL_SETTING = Setting.timeSetting("cluster.info.update.interval", TimeValue.timeValueSeconds(30), TimeValue.timeValueSeconds(10), true, Setting.Scope.CLUSTER); + public static final Setting<TimeValue> INTERNAL_CLUSTER_INFO_TIMEOUT_SETTING = Setting.positiveTimeSetting("cluster.info.update.timeout", TimeValue.timeValueSeconds(15), true, Setting.Scope.CLUSTER); private volatile TimeValue updateFrequency; @@ -82,7 +83,7 @@ public class InternalClusterInfoService extends AbstractComponent implements Clu private final List<Listener> listeners = new CopyOnWriteArrayList<>(); @Inject - public InternalClusterInfoService(Settings settings, NodeSettingsService nodeSettingsService, + public InternalClusterInfoService(Settings settings, ClusterSettings clusterSettings, TransportNodesStatsAction transportNodesStatsAction, TransportIndicesStatsAction transportIndicesStatsAction, ClusterService clusterService, ThreadPool threadPool) { @@ -95,10 +96,12 @@ public class InternalClusterInfoService extends AbstractComponent implements Clu this.transportIndicesStatsAction = transportIndicesStatsAction; this.clusterService = clusterService; this.threadPool = threadPool; - this.updateFrequency = settings.getAsTime(INTERNAL_CLUSTER_INFO_UPDATE_INTERVAL, TimeValue.timeValueSeconds(30)); - this.fetchTimeout = settings.getAsTime(INTERNAL_CLUSTER_INFO_TIMEOUT, TimeValue.timeValueSeconds(15)); - this.enabled = settings.getAsBoolean(DiskThresholdDecider.CLUSTER_ROUTING_ALLOCATION_DISK_THRESHOLD_ENABLED, true); - nodeSettingsService.addListener(new ApplySettings()); + this.updateFrequency = INTERNAL_CLUSTER_INFO_UPDATE_INTERVAL_SETTING.get(settings); + this.fetchTimeout = INTERNAL_CLUSTER_INFO_TIMEOUT_SETTING.get(settings); + this.enabled = DiskThresholdDecider.CLUSTER_ROUTING_ALLOCATION_DISK_THRESHOLD_ENABLED_SETTING.get(settings); + clusterSettings.addSettingsUpdateConsumer(INTERNAL_CLUSTER_INFO_TIMEOUT_SETTING, this::setFetchTimeout); + clusterSettings.addSettingsUpdateConsumer(INTERNAL_CLUSTER_INFO_UPDATE_INTERVAL_SETTING, this::setUpdateFrequency); + clusterSettings.addSettingsUpdateConsumer(DiskThresholdDecider.CLUSTER_ROUTING_ALLOCATION_DISK_THRESHOLD_ENABLED_SETTING, this::setEnabled); // Add InternalClusterInfoService to listen for Master changes this.clusterService.add((LocalNodeMasterListener)this); @@ -106,35 +109,16 @@ public class InternalClusterInfoService extends AbstractComponent implements Clu this.clusterService.add((ClusterStateListener)this); } - class ApplySettings implements NodeSettingsService.Listener { - @Override - public void onRefreshSettings(Settings settings) { - TimeValue newUpdateFrequency = settings.getAsTime(INTERNAL_CLUSTER_INFO_UPDATE_INTERVAL, null); - // ClusterInfoService is only enabled if the DiskThresholdDecider is enabled - Boolean newEnabled = settings.getAsBoolean(DiskThresholdDecider.CLUSTER_ROUTING_ALLOCATION_DISK_THRESHOLD_ENABLED, null); - - if (newUpdateFrequency != null) { - if (newUpdateFrequency.getMillis() < TimeValue.timeValueSeconds(10).getMillis()) { - logger.warn("[{}] set too low [{}] (< 10s)", INTERNAL_CLUSTER_INFO_UPDATE_INTERVAL, newUpdateFrequency); - throw new IllegalStateException("Unable to set " + INTERNAL_CLUSTER_INFO_UPDATE_INTERVAL + " less than 10 seconds"); - } else { - logger.info("updating [{}] from [{}] to [{}]", INTERNAL_CLUSTER_INFO_UPDATE_INTERVAL, updateFrequency, newUpdateFrequency); - InternalClusterInfoService.this.updateFrequency = newUpdateFrequency; - } - } - - TimeValue newFetchTimeout = settings.getAsTime(INTERNAL_CLUSTER_INFO_TIMEOUT, null); - if (newFetchTimeout != null) { - logger.info("updating fetch timeout [{}] from [{}] to [{}]", INTERNAL_CLUSTER_INFO_TIMEOUT, fetchTimeout, newFetchTimeout); - InternalClusterInfoService.this.fetchTimeout = newFetchTimeout; - } + private void setEnabled(boolean enabled) { + this.enabled = enabled; + } + private void setFetchTimeout(TimeValue fetchTimeout) { + this.fetchTimeout = fetchTimeout; + } - // We don't log about enabling it here, because the DiskThresholdDecider will already be logging about enable/disable - if (newEnabled != null) { - InternalClusterInfoService.this.enabled = newEnabled; - } - } + void setUpdateFrequency(TimeValue updateFrequency) { + this.updateFrequency = updateFrequency; } @Override diff --git a/core/src/main/java/org/elasticsearch/cluster/action/index/MappingUpdatedAction.java b/core/src/main/java/org/elasticsearch/cluster/action/index/MappingUpdatedAction.java index e3925aa6f4..9e57fe3a48 100644 --- a/core/src/main/java/org/elasticsearch/cluster/action/index/MappingUpdatedAction.java +++ b/core/src/main/java/org/elasticsearch/cluster/action/index/MappingUpdatedAction.java @@ -26,11 +26,12 @@ import org.elasticsearch.client.Client; import org.elasticsearch.client.IndicesAdminClient; import org.elasticsearch.common.component.AbstractComponent; import org.elasticsearch.common.inject.Inject; +import org.elasticsearch.common.settings.ClusterSettings; +import org.elasticsearch.common.settings.Setting; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.unit.TimeValue; import org.elasticsearch.index.mapper.MapperService; import org.elasticsearch.index.mapper.Mapping; -import org.elasticsearch.node.settings.NodeSettingsService; import java.util.concurrent.TimeoutException; @@ -40,30 +41,23 @@ import java.util.concurrent.TimeoutException; */ public class MappingUpdatedAction extends AbstractComponent { - public static final String INDICES_MAPPING_DYNAMIC_TIMEOUT = "indices.mapping.dynamic_timeout"; + public static final Setting<TimeValue> INDICES_MAPPING_DYNAMIC_TIMEOUT_SETTING = Setting.positiveTimeSetting("indices.mapping.dynamic_timeout", TimeValue.timeValueSeconds(30), true, Setting.Scope.CLUSTER); private IndicesAdminClient client; private volatile TimeValue dynamicMappingUpdateTimeout; - class ApplySettings implements NodeSettingsService.Listener { - @Override - public void onRefreshSettings(Settings settings) { - TimeValue current = MappingUpdatedAction.this.dynamicMappingUpdateTimeout; - TimeValue newValue = settings.getAsTime(INDICES_MAPPING_DYNAMIC_TIMEOUT, current); - if (!current.equals(newValue)) { - logger.info("updating " + INDICES_MAPPING_DYNAMIC_TIMEOUT + " from [{}] to [{}]", current, newValue); - MappingUpdatedAction.this.dynamicMappingUpdateTimeout = newValue; - } - } - } - @Inject - public MappingUpdatedAction(Settings settings, NodeSettingsService nodeSettingsService) { + public MappingUpdatedAction(Settings settings, ClusterSettings clusterSettings) { super(settings); - this.dynamicMappingUpdateTimeout = settings.getAsTime(INDICES_MAPPING_DYNAMIC_TIMEOUT, TimeValue.timeValueSeconds(30)); - nodeSettingsService.addListener(new ApplySettings()); + this.dynamicMappingUpdateTimeout = INDICES_MAPPING_DYNAMIC_TIMEOUT_SETTING.get(settings); + clusterSettings.addSettingsUpdateConsumer(INDICES_MAPPING_DYNAMIC_TIMEOUT_SETTING, this::setDynamicMappingUpdateTimeout); } + private void setDynamicMappingUpdateTimeout(TimeValue dynamicMappingUpdateTimeout) { + this.dynamicMappingUpdateTimeout = dynamicMappingUpdateTimeout; + } + + public void setClient(Client client) { this.client = client.admin().indices(); } diff --git a/core/src/main/java/org/elasticsearch/cluster/action/shard/ShardStateAction.java b/core/src/main/java/org/elasticsearch/cluster/action/shard/ShardStateAction.java index d09df094a6..a04a6d7bd5 100644 --- a/core/src/main/java/org/elasticsearch/cluster/action/shard/ShardStateAction.java +++ b/core/src/main/java/org/elasticsearch/cluster/action/shard/ShardStateAction.java @@ -20,7 +20,12 @@ package org.elasticsearch.cluster.action.shard; import org.elasticsearch.ExceptionsHelper; -import org.elasticsearch.cluster.*; +import org.elasticsearch.cluster.ClusterService; +import org.elasticsearch.cluster.ClusterState; +import org.elasticsearch.cluster.ClusterStateTaskConfig; +import org.elasticsearch.cluster.ClusterStateTaskExecutor; +import org.elasticsearch.cluster.ClusterStateTaskListener; +import org.elasticsearch.cluster.NotMasterException; import org.elasticsearch.cluster.metadata.IndexMetaData; import org.elasticsearch.cluster.node.DiscoveryNode; import org.elasticsearch.cluster.routing.RoutingService; @@ -37,19 +42,24 @@ import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.unit.TimeValue; import org.elasticsearch.threadpool.ThreadPool; -import org.elasticsearch.transport.*; +import org.elasticsearch.transport.EmptyTransportResponseHandler; +import org.elasticsearch.transport.TransportChannel; +import org.elasticsearch.transport.TransportException; +import org.elasticsearch.transport.TransportRequest; +import org.elasticsearch.transport.TransportRequestHandler; +import org.elasticsearch.transport.TransportRequestOptions; +import org.elasticsearch.transport.TransportResponse; +import org.elasticsearch.transport.TransportService; import java.io.IOException; import java.util.ArrayList; import java.util.List; +import java.util.Locale; import static org.elasticsearch.cluster.routing.ShardRouting.readShardRoutingEntry; -/** - * - */ -public class ShardStateAction extends AbstractComponent { +public class ShardStateAction extends AbstractComponent { public static final String SHARD_STARTED_ACTION_NAME = "internal:cluster/shard/started"; public static final String SHARD_FAILED_ACTION_NAME = "internal:cluster/shard/failure"; @@ -97,65 +107,68 @@ public class ShardStateAction extends AbstractComponent { options = TransportRequestOptions.builder().withTimeout(timeout).build(); } transportService.sendRequest(masterNode, - SHARD_FAILED_ACTION_NAME, shardRoutingEntry, options, new EmptyTransportResponseHandler(ThreadPool.Names.SAME) { + SHARD_FAILED_ACTION_NAME, shardRoutingEntry, options, new EmptyTransportResponseHandler(ThreadPool.Names.SAME) { + @Override + public void handleResponse(TransportResponse.Empty response) { + listener.onSuccess(); + } + + @Override + public void handleException(TransportException exp) { + logger.warn("unexpected failure while sending request to [{}] to fail shard [{}]", exp, masterNode, shardRoutingEntry); + listener.onShardFailedFailure(masterNode, exp); + } + }); + } + + private class ShardFailedTransportHandler implements TransportRequestHandler<ShardRoutingEntry> { + @Override + public void messageReceived(ShardRoutingEntry request, TransportChannel channel) throws Exception { + handleShardFailureOnMaster(request, new ClusterStateTaskListener() { @Override - public void handleResponse(TransportResponse.Empty response) { - listener.onSuccess(); + public void onFailure(String source, Throwable t) { + logger.error("unexpected failure while failing shard [{}]", t, request.shardRouting); + try { + channel.sendResponse(t); + } catch (Throwable channelThrowable) { + logger.warn("failed to send failure [{}] while failing shard [{}]", channelThrowable, t, request.shardRouting); + } } @Override - public void handleException(TransportException exp) { - logger.warn("failed to send failed shard to {}", exp, masterNode); - listener.onShardFailedFailure(masterNode, exp); + public void onNoLongerMaster(String source) { + logger.error("no longer master while failing shard [{}]", request.shardRouting); + try { + channel.sendResponse(new NotMasterException(source)); + } catch (Throwable channelThrowable) { + logger.warn("failed to send no longer master while failing shard [{}]", channelThrowable, request.shardRouting); + } } - }); - } - - public void shardStarted(final ShardRouting shardRouting, String indexUUID, final String reason) { - DiscoveryNode masterNode = clusterService.state().nodes().masterNode(); - if (masterNode == null) { - logger.warn("{} can't send shard started for {}, no master known.", shardRouting.shardId(), shardRouting); - return; - } - shardStarted(shardRouting, indexUUID, reason, masterNode); - } - public void shardStarted(final ShardRouting shardRouting, String indexUUID, final String reason, final DiscoveryNode masterNode) { - ShardRoutingEntry shardRoutingEntry = new ShardRoutingEntry(shardRouting, indexUUID, reason, null); - logger.debug("{} sending shard started for {}", shardRoutingEntry.shardRouting.shardId(), shardRoutingEntry); - transportService.sendRequest(masterNode, - SHARD_STARTED_ACTION_NAME, new ShardRoutingEntry(shardRouting, indexUUID, reason, null), new EmptyTransportResponseHandler(ThreadPool.Names.SAME) { @Override - public void handleException(TransportException exp) { - logger.warn("failed to send shard started to [{}]", exp, masterNode); + public void clusterStateProcessed(String source, ClusterState oldState, ClusterState newState) { + try { + channel.sendResponse(TransportResponse.Empty.INSTANCE); + } catch (Throwable channelThrowable) { + logger.warn("failed to send response while failing shard [{}]", channelThrowable, request.shardRouting); + } } - - }); - } - - private final ShardFailedClusterStateHandler shardFailedClusterStateHandler = new ShardFailedClusterStateHandler(); - - private void handleShardFailureOnMaster(final ShardRoutingEntry shardRoutingEntry) { - logger.warn("{} received shard failed for {}", shardRoutingEntry.failure, shardRoutingEntry.shardRouting.shardId(), shardRoutingEntry); - clusterService.submitStateUpdateTask( - "shard-failed (" + shardRoutingEntry.shardRouting + "), message [" + shardRoutingEntry.message + "]", - shardRoutingEntry, - ClusterStateTaskConfig.build(Priority.HIGH), - shardFailedClusterStateHandler, - shardFailedClusterStateHandler); + } + ); + } } - class ShardFailedClusterStateHandler implements ClusterStateTaskExecutor<ShardRoutingEntry>, ClusterStateTaskListener { + class ShardFailedClusterStateHandler implements ClusterStateTaskExecutor<ShardRoutingEntry> { @Override public BatchResult<ShardRoutingEntry> execute(ClusterState currentState, List<ShardRoutingEntry> tasks) throws Exception { BatchResult.Builder<ShardRoutingEntry> batchResultBuilder = BatchResult.builder(); - List<FailedRerouteAllocation.FailedShard> shardRoutingsToBeApplied = new ArrayList<>(tasks.size()); + List<FailedRerouteAllocation.FailedShard> failedShards = new ArrayList<>(tasks.size()); for (ShardRoutingEntry task : tasks) { - shardRoutingsToBeApplied.add(new FailedRerouteAllocation.FailedShard(task.shardRouting, task.message, task.failure)); + failedShards.add(new FailedRerouteAllocation.FailedShard(task.shardRouting, task.message, task.failure)); } ClusterState maybeUpdatedState = currentState; try { - RoutingAllocation.Result result = allocationService.applyFailedShards(currentState, shardRoutingsToBeApplied); + RoutingAllocation.Result result = allocationService.applyFailedShards(currentState, failedShards); if (result.changed()) { maybeUpdatedState = ClusterState.builder(currentState).routingResult(result).build(); } @@ -167,31 +180,57 @@ public class ShardStateAction extends AbstractComponent { } @Override - public void clusterStateProcessed(String source, ClusterState oldState, ClusterState newState) { - if (oldState != newState && newState.getRoutingNodes().unassigned().size() > 0) { - logger.trace("unassigned shards after shard failures. scheduling a reroute."); - routingService.reroute("unassigned shards after shard failures, scheduling a reroute"); + public void clusterStatePublished(ClusterState newClusterState) { + int numberOfUnassignedShards = newClusterState.getRoutingNodes().unassigned().size(); + if (numberOfUnassignedShards > 0) { + String reason = String.format(Locale.ROOT, "[%d] unassigned shards after failing shards", numberOfUnassignedShards); + if (logger.isTraceEnabled()) { + logger.trace(reason + ", scheduling a reroute"); } + routingService.reroute(reason); + } } + } - @Override - public void onFailure(String source, Throwable t) { - logger.error("unexpected failure during [{}]", t, source); - } + private final ShardFailedClusterStateHandler shardFailedClusterStateHandler = new ShardFailedClusterStateHandler(); + + private void handleShardFailureOnMaster(final ShardRoutingEntry shardRoutingEntry, ClusterStateTaskListener listener) { + logger.warn("{} received shard failed for {}", shardRoutingEntry.failure, shardRoutingEntry.shardRouting.shardId(), shardRoutingEntry); + clusterService.submitStateUpdateTask( + "shard-failed (" + shardRoutingEntry.shardRouting + "), message [" + shardRoutingEntry.message + "]", + shardRoutingEntry, + ClusterStateTaskConfig.build(Priority.HIGH), + shardFailedClusterStateHandler, + listener); } - private final ShardStartedClusterStateHandler shardStartedClusterStateHandler = - new ShardStartedClusterStateHandler(); + public void shardStarted(final ShardRouting shardRouting, String indexUUID, final String reason) { + DiscoveryNode masterNode = clusterService.state().nodes().masterNode(); + if (masterNode == null) { + logger.warn("{} can't send shard started for {}, no master known.", shardRouting.shardId(), shardRouting); + return; + } + shardStarted(shardRouting, indexUUID, reason, masterNode); + } - private void shardStartedOnMaster(final ShardRoutingEntry shardRoutingEntry) { - logger.debug("received shard started for {}", shardRoutingEntry); + public void shardStarted(final ShardRouting shardRouting, String indexUUID, final String reason, final DiscoveryNode masterNode) { + ShardRoutingEntry shardRoutingEntry = new ShardRoutingEntry(shardRouting, indexUUID, reason, null); + logger.debug("{} sending shard started for {}", shardRoutingEntry.shardRouting.shardId(), shardRoutingEntry); + transportService.sendRequest(masterNode, + SHARD_STARTED_ACTION_NAME, new ShardRoutingEntry(shardRouting, indexUUID, reason, null), new EmptyTransportResponseHandler(ThreadPool.Names.SAME) { + @Override + public void handleException(TransportException exp) { + logger.warn("failed to send shard started to [{}]", exp, masterNode); + } + }); + } - clusterService.submitStateUpdateTask( - "shard-started (" + shardRoutingEntry.shardRouting + "), reason [" + shardRoutingEntry.message + "]", - shardRoutingEntry, - ClusterStateTaskConfig.build(Priority.URGENT), - shardStartedClusterStateHandler, - shardStartedClusterStateHandler); + class ShardStartedTransportHandler implements TransportRequestHandler<ShardRoutingEntry> { + @Override + public void messageReceived(ShardRoutingEntry request, TransportChannel channel) throws Exception { + handleShardStartedOnMaster(request); + channel.sendResponse(TransportResponse.Empty.INSTANCE); + } } class ShardStartedClusterStateHandler implements ClusterStateTaskExecutor<ShardRoutingEntry>, ClusterStateTaskListener { @@ -223,26 +262,20 @@ public class ShardStateAction extends AbstractComponent { } } - private class ShardFailedTransportHandler implements TransportRequestHandler<ShardRoutingEntry> { + private final ShardStartedClusterStateHandler shardStartedClusterStateHandler = new ShardStartedClusterStateHandler(); - @Override - public void messageReceived(ShardRoutingEntry request, TransportChannel channel) throws Exception { - handleShardFailureOnMaster(request); - channel.sendResponse(TransportResponse.Empty.INSTANCE); - } - } - - class ShardStartedTransportHandler implements TransportRequestHandler<ShardRoutingEntry> { + private void handleShardStartedOnMaster(final ShardRoutingEntry shardRoutingEntry) { + logger.debug("received shard started for {}", shardRoutingEntry); - @Override - public void messageReceived(ShardRoutingEntry request, TransportChannel channel) throws Exception { - shardStartedOnMaster(request); - channel.sendResponse(TransportResponse.Empty.INSTANCE); - } + clusterService.submitStateUpdateTask( + "shard-started (" + shardRoutingEntry.shardRouting + "), reason [" + shardRoutingEntry.message + "]", + shardRoutingEntry, + ClusterStateTaskConfig.build(Priority.URGENT), + shardStartedClusterStateHandler, + shardStartedClusterStateHandler); } public static class ShardRoutingEntry extends TransportRequest { - ShardRouting shardRouting; String indexUUID = IndexMetaData.INDEX_UUID_NA_VALUE; String message; @@ -283,8 +316,13 @@ public class ShardStateAction extends AbstractComponent { } public interface Listener { - default void onSuccess() {} - default void onShardFailedNoMaster() {} - default void onShardFailedFailure(final DiscoveryNode master, final TransportException e) {} + default void onSuccess() { + } + + default void onShardFailedNoMaster() { + } + + default void onShardFailedFailure(final DiscoveryNode master, final TransportException e) { + } } } diff --git a/core/src/main/java/org/elasticsearch/cluster/metadata/IndexMetaData.java b/core/src/main/java/org/elasticsearch/cluster/metadata/IndexMetaData.java index 669d71477c..93961bf1fb 100644 --- a/core/src/main/java/org/elasticsearch/cluster/metadata/IndexMetaData.java +++ b/core/src/main/java/org/elasticsearch/cluster/metadata/IndexMetaData.java @@ -621,7 +621,7 @@ public class IndexMetaData implements Diffable<IndexMetaData>, FromXContentBuild public int numberOfReplicas() { return settings.getAsInt(SETTING_NUMBER_OF_REPLICAS, -1); } - + public Builder creationDate(long creationDate) { settings = settingsBuilder().put(settings).put(SETTING_CREATION_DATE, creationDate).build(); return this; diff --git a/core/src/main/java/org/elasticsearch/cluster/metadata/MetaData.java b/core/src/main/java/org/elasticsearch/cluster/metadata/MetaData.java index 751f8a09ea..55cb8a5d94 100644 --- a/core/src/main/java/org/elasticsearch/cluster/metadata/MetaData.java +++ b/core/src/main/java/org/elasticsearch/cluster/metadata/MetaData.java @@ -40,8 +40,8 @@ import org.elasticsearch.common.collect.ImmutableOpenMap; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.common.logging.ESLogger; -import org.elasticsearch.common.logging.support.LoggerMessageFormat; import org.elasticsearch.common.regex.Regex; +import org.elasticsearch.common.settings.Setting; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.settings.loader.SettingsLoader; import org.elasticsearch.common.xcontent.FromXContentBuilder; @@ -134,13 +134,13 @@ public class MetaData implements Iterable<IndexMetaData>, Diffable<MetaData>, Fr //noinspection unchecked T proto = (T) customPrototypes.get(type); if (proto == null) { - throw new IllegalArgumentException("No custom metadata prototype registered for type [" + type + "]"); + throw new IllegalArgumentException("No custom metadata prototype registered for type [" + type + "], node likely missing plugins"); } return proto; } - public static final String SETTING_READ_ONLY = "cluster.blocks.read_only"; + public static final Setting<Boolean> SETTING_READ_ONLY_SETTING = Setting.boolSetting("cluster.blocks.read_only", false, true, Setting.Scope.CLUSTER); public static final ClusterBlock CLUSTER_READ_ONLY_BLOCK = new ClusterBlock(6, "cluster read-only (api)", false, false, RestStatus.FORBIDDEN, EnumSet.of(ClusterBlockLevel.WRITE, ClusterBlockLevel.METADATA_WRITE)); @@ -745,23 +745,23 @@ public class MetaData implements Iterable<IndexMetaData>, Diffable<MetaData>, Fr /** All known byte-sized cluster settings. */ public static final Set<String> CLUSTER_BYTES_SIZE_SETTINGS = unmodifiableSet(newHashSet( - IndexStoreConfig.INDICES_STORE_THROTTLE_MAX_BYTES_PER_SEC, - RecoverySettings.INDICES_RECOVERY_MAX_BYTES_PER_SEC)); + IndexStoreConfig.INDICES_STORE_THROTTLE_MAX_BYTES_PER_SEC_SETTING.getKey(), + RecoverySettings.INDICES_RECOVERY_MAX_BYTES_PER_SEC_SETTING.getKey())); /** All known time cluster settings. */ public static final Set<String> CLUSTER_TIME_SETTINGS = unmodifiableSet(newHashSet( - IndicesTTLService.INDICES_TTL_INTERVAL, - RecoverySettings.INDICES_RECOVERY_RETRY_DELAY_STATE_SYNC, - RecoverySettings.INDICES_RECOVERY_RETRY_DELAY_NETWORK, - RecoverySettings.INDICES_RECOVERY_ACTIVITY_TIMEOUT, - RecoverySettings.INDICES_RECOVERY_INTERNAL_ACTION_TIMEOUT, - RecoverySettings.INDICES_RECOVERY_INTERNAL_LONG_ACTION_TIMEOUT, - DiskThresholdDecider.CLUSTER_ROUTING_ALLOCATION_REROUTE_INTERVAL, - InternalClusterInfoService.INTERNAL_CLUSTER_INFO_UPDATE_INTERVAL, - InternalClusterInfoService.INTERNAL_CLUSTER_INFO_TIMEOUT, - DiscoverySettings.PUBLISH_TIMEOUT, - InternalClusterService.SETTING_CLUSTER_SERVICE_SLOW_TASK_LOGGING_THRESHOLD)); + IndicesTTLService.INDICES_TTL_INTERVAL_SETTING.getKey(), + RecoverySettings.INDICES_RECOVERY_RETRY_DELAY_STATE_SYNC_SETTING.getKey(), + RecoverySettings.INDICES_RECOVERY_RETRY_DELAY_NETWORK_SETTING.getKey(), + RecoverySettings.INDICES_RECOVERY_ACTIVITY_TIMEOUT_SETTING.getKey(), + RecoverySettings.INDICES_RECOVERY_INTERNAL_ACTION_TIMEOUT_SETTING.getKey(), + RecoverySettings.INDICES_RECOVERY_INTERNAL_LONG_ACTION_TIMEOUT_SETTING.getKey(), + DiskThresholdDecider.CLUSTER_ROUTING_ALLOCATION_REROUTE_INTERVAL_SETTING.getKey(), + InternalClusterInfoService.INTERNAL_CLUSTER_INFO_UPDATE_INTERVAL_SETTING.getKey(), + InternalClusterInfoService.INTERNAL_CLUSTER_INFO_TIMEOUT_SETTING.getKey(), + DiscoverySettings.PUBLISH_TIMEOUT_SETTING.getKey(), + InternalClusterService.CLUSTER_SERVICE_SLOW_TASK_LOGGING_THRESHOLD_SETTING.getKey())); /** As of 2.0 we require units for time and byte-sized settings. This methods adds default units to any cluster settings that don't * specify a unit. */ diff --git a/core/src/main/java/org/elasticsearch/cluster/metadata/MetaDataIndexStateService.java b/core/src/main/java/org/elasticsearch/cluster/metadata/MetaDataIndexStateService.java index 1fa1b702f6..b38e99d449 100644 --- a/core/src/main/java/org/elasticsearch/cluster/metadata/MetaDataIndexStateService.java +++ b/core/src/main/java/org/elasticsearch/cluster/metadata/MetaDataIndexStateService.java @@ -47,7 +47,6 @@ import org.elasticsearch.rest.RestStatus; import java.util.ArrayList; import java.util.Arrays; import java.util.List; -import java.util.Locale; /** * Service responsible for submitting open/close index requests @@ -92,14 +91,6 @@ public class MetaDataIndexStateService extends AbstractComponent { } if (indexMetaData.getState() != IndexMetaData.State.CLOSE) { - IndexRoutingTable indexRoutingTable = currentState.routingTable().index(index); - for (IndexShardRoutingTable shard : indexRoutingTable) { - for (ShardRouting shardRouting : shard) { - if (shardRouting.primary() == true && shardRouting.allocatedPostIndexCreate() == false) { - throw new IndexPrimaryShardNotAllocatedException(new Index(index)); - } - } - } indicesToClose.add(index); } } diff --git a/core/src/main/java/org/elasticsearch/cluster/metadata/MetaDataMappingService.java b/core/src/main/java/org/elasticsearch/cluster/metadata/MetaDataMappingService.java index 957125703b..8093d93ccc 100644 --- a/core/src/main/java/org/elasticsearch/cluster/metadata/MetaDataMappingService.java +++ b/core/src/main/java/org/elasticsearch/cluster/metadata/MetaDataMappingService.java @@ -37,7 +37,6 @@ import org.elasticsearch.index.IndexService; import org.elasticsearch.index.NodeServicesProvider; import org.elasticsearch.index.mapper.DocumentMapper; import org.elasticsearch.index.mapper.MapperService; -import org.elasticsearch.index.mapper.MergeResult; import org.elasticsearch.indices.IndicesService; import org.elasticsearch.indices.InvalidTypeNameException; import org.elasticsearch.percolator.PercolatorService; @@ -237,8 +236,8 @@ public class MetaDataMappingService extends AbstractComponent { } private ClusterState applyRequest(ClusterState currentState, PutMappingClusterStateUpdateRequest request) throws IOException { - Map<String, DocumentMapper> newMappers = new HashMap<>(); - Map<String, DocumentMapper> existingMappers = new HashMap<>(); + String mappingType = request.type(); + CompressedXContent mappingUpdateSource = new CompressedXContent(request.source()); for (String index : request.indices()) { IndexService indexService = indicesService.indexServiceSafe(index); // try and parse it (no need to add it here) so we can bail early in case of parsing exception @@ -246,16 +245,13 @@ public class MetaDataMappingService extends AbstractComponent { DocumentMapper existingMapper = indexService.mapperService().documentMapper(request.type()); if (MapperService.DEFAULT_MAPPING.equals(request.type())) { // _default_ types do not go through merging, but we do test the new settings. Also don't apply the old default - newMapper = indexService.mapperService().parse(request.type(), new CompressedXContent(request.source()), false); + newMapper = indexService.mapperService().parse(request.type(), mappingUpdateSource, false); } else { - newMapper = indexService.mapperService().parse(request.type(), new CompressedXContent(request.source()), existingMapper == null); + newMapper = indexService.mapperService().parse(request.type(), mappingUpdateSource, existingMapper == null); if (existingMapper != null) { // first, simulate - MergeResult mergeResult = existingMapper.merge(newMapper.mapping(), true, request.updateAllTypes()); - // if we have conflicts, throw an exception - if (mergeResult.hasConflicts()) { - throw new IllegalArgumentException("Merge failed with failures {" + Arrays.toString(mergeResult.buildConflicts()) + "}"); - } + // this will just throw exceptions in case of problems + existingMapper.merge(newMapper.mapping(), true, request.updateAllTypes()); } else { // TODO: can we find a better place for this validation? // The reason this validation is here is that the mapper service doesn't learn about @@ -274,36 +270,31 @@ public class MetaDataMappingService extends AbstractComponent { } } } - newMappers.put(index, newMapper); - if (existingMapper != null) { - existingMappers.put(index, existingMapper); + if (mappingType == null) { + mappingType = newMapper.type(); + } else if (mappingType.equals(newMapper.type()) == false) { + throw new InvalidTypeNameException("Type name provided does not match type name within mapping definition"); } } + assert mappingType != null; - String mappingType = request.type(); - if (mappingType == null) { - mappingType = newMappers.values().iterator().next().type(); - } else if (!mappingType.equals(newMappers.values().iterator().next().type())) { - throw new InvalidTypeNameException("Type name provided does not match type name within mapping definition"); - } if (!MapperService.DEFAULT_MAPPING.equals(mappingType) && !PercolatorService.TYPE_NAME.equals(mappingType) && mappingType.charAt(0) == '_') { throw new InvalidTypeNameException("Document mapping type name can't start with '_'"); } final Map<String, MappingMetaData> mappings = new HashMap<>(); - for (Map.Entry<String, DocumentMapper> entry : newMappers.entrySet()) { - String index = entry.getKey(); + for (String index : request.indices()) { // do the actual merge here on the master, and update the mapping source - DocumentMapper newMapper = entry.getValue(); IndexService indexService = indicesService.indexService(index); if (indexService == null) { continue; } CompressedXContent existingSource = null; - if (existingMappers.containsKey(entry.getKey())) { - existingSource = existingMappers.get(entry.getKey()).mappingSource(); + DocumentMapper existingMapper = indexService.mapperService().documentMapper(mappingType); + if (existingMapper != null) { + existingSource = existingMapper.mappingSource(); } - DocumentMapper mergedMapper = indexService.mapperService().merge(newMapper.type(), newMapper.mappingSource(), false, request.updateAllTypes()); + DocumentMapper mergedMapper = indexService.mapperService().merge(mappingType, mappingUpdateSource, true, request.updateAllTypes()); CompressedXContent updatedSource = mergedMapper.mappingSource(); if (existingSource != null) { @@ -322,9 +313,9 @@ public class MetaDataMappingService extends AbstractComponent { } else { mappings.put(index, new MappingMetaData(mergedMapper)); if (logger.isDebugEnabled()) { - logger.debug("[{}] create_mapping [{}] with source [{}]", index, newMapper.type(), updatedSource); + logger.debug("[{}] create_mapping [{}] with source [{}]", index, mappingType, updatedSource); } else if (logger.isInfoEnabled()) { - logger.info("[{}] create_mapping [{}]", index, newMapper.type()); + logger.info("[{}] create_mapping [{}]", index, mappingType); } } } diff --git a/core/src/main/java/org/elasticsearch/cluster/routing/RoutingNodes.java b/core/src/main/java/org/elasticsearch/cluster/routing/RoutingNodes.java index badf70a191..8dd980c8bb 100644 --- a/core/src/main/java/org/elasticsearch/cluster/routing/RoutingNodes.java +++ b/core/src/main/java/org/elasticsearch/cluster/routing/RoutingNodes.java @@ -21,12 +21,12 @@ package org.elasticsearch.cluster.routing; import com.carrotsearch.hppc.ObjectIntHashMap; import com.carrotsearch.hppc.cursors.ObjectCursor; - import org.apache.lucene.util.CollectionUtil; import org.elasticsearch.cluster.ClusterState; import org.elasticsearch.cluster.block.ClusterBlocks; import org.elasticsearch.cluster.metadata.MetaData; import org.elasticsearch.cluster.node.DiscoveryNode; +import org.elasticsearch.common.Randomness; import org.elasticsearch.common.collect.ImmutableOpenMap; import org.elasticsearch.index.shard.ShardId; @@ -34,7 +34,6 @@ import java.util.ArrayList; import java.util.Collections; import java.util.Comparator; import java.util.HashMap; -import java.util.HashSet; import java.util.Iterator; import java.util.List; import java.util.Map; @@ -85,7 +84,7 @@ public class RoutingNodes implements Iterable<RoutingNode> { Map<String, List<ShardRouting>> nodesToShards = new HashMap<>(); // fill in the nodeToShards with the "live" nodes for (ObjectCursor<DiscoveryNode> cursor : clusterState.nodes().dataNodes().values()) { - nodesToShards.put(cursor.value.id(), new ArrayList<ShardRouting>()); + nodesToShards.put(cursor.value.id(), new ArrayList<>()); } // fill in the inverse of node -> shards allocated @@ -98,21 +97,13 @@ public class RoutingNodes implements Iterable<RoutingNode> { // by the ShardId, as this is common for primary and replicas. // A replica Set might have one (and not more) replicas with the state of RELOCATING. if (shard.assignedToNode()) { - List<ShardRouting> entries = nodesToShards.get(shard.currentNodeId()); - if (entries == null) { - entries = new ArrayList<>(); - nodesToShards.put(shard.currentNodeId(), entries); - } + List<ShardRouting> entries = nodesToShards.computeIfAbsent(shard.currentNodeId(), k -> new ArrayList<>()); final ShardRouting sr = getRouting(shard, readOnly); entries.add(sr); assignedShardsAdd(sr); if (shard.relocating()) { - entries = nodesToShards.get(shard.relocatingNodeId()); relocatingShards++; - if (entries == null) { - entries = new ArrayList<>(); - nodesToShards.put(shard.relocatingNodeId(), entries); - } + entries = nodesToShards.computeIfAbsent(shard.relocatingNodeId(), k -> new ArrayList<>()); // add the counterpart shard with relocatingNodeId reflecting the source from which // it's relocating from. ShardRouting targetShardRouting = shard.buildTargetRelocatingShard(); @@ -128,7 +119,7 @@ public class RoutingNodes implements Iterable<RoutingNode> { inactiveShardCount++; } } else { - final ShardRouting sr = getRouting(shard, readOnly); + final ShardRouting sr = getRouting(shard, readOnly); assignedShardsAdd(sr); unassignedShards.add(sr); } @@ -456,12 +447,8 @@ public class RoutingNodes implements Iterable<RoutingNode> { // no unassigned return; } - List<ShardRouting> shards = assignedShards.get(shard.shardId()); - if (shards == null) { - shards = new ArrayList<>(); - assignedShards.put(shard.shardId(), shards); - } - assert assertInstanceNotInList(shard, shards); + List<ShardRouting> shards = assignedShards.computeIfAbsent(shard.shardId(), k -> new ArrayList<>()); + assert assertInstanceNotInList(shard, shards); shards.add(shard); } @@ -671,7 +658,7 @@ public class RoutingNodes implements Iterable<RoutingNode> { } public void shuffle() { - Collections.shuffle(unassigned); + Randomness.shuffle(unassigned); } /** diff --git a/core/src/main/java/org/elasticsearch/cluster/routing/ShardRouting.java b/core/src/main/java/org/elasticsearch/cluster/routing/ShardRouting.java index 8dd71e3fba..5ffaee0f2f 100644 --- a/core/src/main/java/org/elasticsearch/cluster/routing/ShardRouting.java +++ b/core/src/main/java/org/elasticsearch/cluster/routing/ShardRouting.java @@ -19,6 +19,8 @@ package org.elasticsearch.cluster.routing; +import org.elasticsearch.Version; +import org.elasticsearch.cluster.metadata.IndexMetaData; import org.elasticsearch.common.Nullable; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; @@ -267,7 +269,7 @@ public final class ShardRouting implements Streamable, ToXContent { return shardIdentifier; } - public boolean allocatedPostIndexCreate() { + public boolean allocatedPostIndexCreate(IndexMetaData indexMetaData) { if (active()) { return true; } @@ -279,6 +281,11 @@ public final class ShardRouting implements Streamable, ToXContent { return false; } + if (indexMetaData.activeAllocationIds(id()).isEmpty() && indexMetaData.getCreationVersion().onOrAfter(Version.V_3_0_0)) { + // when no shards with this id have ever been active for this index + return false; + } + return true; } diff --git a/core/src/main/java/org/elasticsearch/cluster/routing/allocation/AllocationService.java b/core/src/main/java/org/elasticsearch/cluster/routing/allocation/AllocationService.java index feafb76a5f..2268bf1d99 100644 --- a/core/src/main/java/org/elasticsearch/cluster/routing/allocation/AllocationService.java +++ b/core/src/main/java/org/elasticsearch/cluster/routing/allocation/AllocationService.java @@ -39,15 +39,12 @@ import org.elasticsearch.cluster.routing.allocation.allocator.ShardsAllocators; import org.elasticsearch.cluster.routing.allocation.command.AllocationCommands; import org.elasticsearch.cluster.routing.allocation.decider.AllocationDeciders; import org.elasticsearch.cluster.routing.allocation.decider.Decision; +import org.elasticsearch.common.Randomness; import org.elasticsearch.common.component.AbstractComponent; import org.elasticsearch.common.inject.Inject; import org.elasticsearch.common.settings.Settings; -import java.util.ArrayList; -import java.util.Collections; -import java.util.List; -import java.util.Objects; -import java.util.Set; +import java.util.*; import java.util.function.Function; import java.util.stream.Collectors; diff --git a/core/src/main/java/org/elasticsearch/cluster/routing/allocation/allocator/BalancedShardsAllocator.java b/core/src/main/java/org/elasticsearch/cluster/routing/allocation/allocator/BalancedShardsAllocator.java index b9ce532a61..e6dc9a65ef 100644 --- a/core/src/main/java/org/elasticsearch/cluster/routing/allocation/allocator/BalancedShardsAllocator.java +++ b/core/src/main/java/org/elasticsearch/cluster/routing/allocation/allocator/BalancedShardsAllocator.java @@ -34,12 +34,13 @@ import org.elasticsearch.cluster.routing.allocation.StartedRerouteAllocation; import org.elasticsearch.cluster.routing.allocation.decider.AllocationDeciders; import org.elasticsearch.cluster.routing.allocation.decider.Decision; import org.elasticsearch.cluster.routing.allocation.decider.Decision.Type; +import org.elasticsearch.common.settings.ClusterSettings; import org.elasticsearch.common.component.AbstractComponent; import org.elasticsearch.common.inject.Inject; import org.elasticsearch.common.logging.ESLogger; +import org.elasticsearch.common.settings.Setting; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.gateway.PriorityComparator; -import org.elasticsearch.node.settings.NodeSettingsService; import java.util.ArrayList; import java.util.Collection; @@ -72,42 +73,32 @@ import static org.elasticsearch.cluster.routing.ShardRoutingState.RELOCATING; */ public class BalancedShardsAllocator extends AbstractComponent implements ShardsAllocator { - public static final String SETTING_THRESHOLD = "cluster.routing.allocation.balance.threshold"; - public static final String SETTING_INDEX_BALANCE_FACTOR = "cluster.routing.allocation.balance.index"; - public static final String SETTING_SHARD_BALANCE_FACTOR = "cluster.routing.allocation.balance.shard"; - - private static final float DEFAULT_INDEX_BALANCE_FACTOR = 0.55f; - private static final float DEFAULT_SHARD_BALANCE_FACTOR = 0.45f; - - class ApplySettings implements NodeSettingsService.Listener { - @Override - public void onRefreshSettings(Settings settings) { - final float indexBalance = settings.getAsFloat(SETTING_INDEX_BALANCE_FACTOR, weightFunction.indexBalance); - final float shardBalance = settings.getAsFloat(SETTING_SHARD_BALANCE_FACTOR, weightFunction.shardBalance); - float threshold = settings.getAsFloat(SETTING_THRESHOLD, BalancedShardsAllocator.this.threshold); - if (threshold <= 0.0f) { - throw new IllegalArgumentException("threshold must be greater than 0.0f but was: " + threshold); - } - BalancedShardsAllocator.this.threshold = threshold; - BalancedShardsAllocator.this.weightFunction = new WeightFunction(indexBalance, shardBalance); - } - } - - private volatile WeightFunction weightFunction = new WeightFunction(DEFAULT_INDEX_BALANCE_FACTOR, DEFAULT_SHARD_BALANCE_FACTOR); - - private volatile float threshold = 1.0f; + public static final Setting<Float> INDEX_BALANCE_FACTOR_SETTING = Setting.floatSetting("cluster.routing.allocation.balance.index", 0.55f, true, Setting.Scope.CLUSTER); + public static final Setting<Float> SHARD_BALANCE_FACTOR_SETTING = Setting.floatSetting("cluster.routing.allocation.balance.shard", 0.45f, true, Setting.Scope.CLUSTER); + public static final Setting<Float> THRESHOLD_SETTING = Setting.floatSetting("cluster.routing.allocation.balance.threshold", 1.0f, 0.0f, true, Setting.Scope.CLUSTER); + private volatile WeightFunction weightFunction; + private volatile float threshold; public BalancedShardsAllocator(Settings settings) { - this(settings, new NodeSettingsService(settings)); + this(settings, new ClusterSettings(settings, ClusterSettings.BUILT_IN_CLUSTER_SETTINGS)); } @Inject - public BalancedShardsAllocator(Settings settings, NodeSettingsService nodeSettingsService) { + public BalancedShardsAllocator(Settings settings, ClusterSettings clusterSettings) { super(settings); - ApplySettings applySettings = new ApplySettings(); - applySettings.onRefreshSettings(settings); - nodeSettingsService.addListener(applySettings); + setWeightFunction(INDEX_BALANCE_FACTOR_SETTING.get(settings), SHARD_BALANCE_FACTOR_SETTING.get(settings)); + setThreshold(THRESHOLD_SETTING.get(settings)); + clusterSettings.addSettingsUpdateConsumer(INDEX_BALANCE_FACTOR_SETTING, SHARD_BALANCE_FACTOR_SETTING, this::setWeightFunction); + clusterSettings.addSettingsUpdateConsumer(THRESHOLD_SETTING, this::setThreshold); + } + + private void setWeightFunction(float indexBalance, float shardBalanceFactor) { + weightFunction = new WeightFunction(indexBalance, shardBalanceFactor); + } + + private void setThreshold(float threshold) { + this.threshold = threshold; } @Override diff --git a/core/src/main/java/org/elasticsearch/cluster/routing/allocation/decider/AwarenessAllocationDecider.java b/core/src/main/java/org/elasticsearch/cluster/routing/allocation/decider/AwarenessAllocationDecider.java index 6f7bbac8ae..a66c8ddaef 100644 --- a/core/src/main/java/org/elasticsearch/cluster/routing/allocation/decider/AwarenessAllocationDecider.java +++ b/core/src/main/java/org/elasticsearch/cluster/routing/allocation/decider/AwarenessAllocationDecider.java @@ -24,10 +24,11 @@ import org.elasticsearch.cluster.metadata.IndexMetaData; import org.elasticsearch.cluster.routing.RoutingNode; import org.elasticsearch.cluster.routing.ShardRouting; import org.elasticsearch.cluster.routing.allocation.RoutingAllocation; +import org.elasticsearch.common.settings.ClusterSettings; import org.elasticsearch.common.Strings; import org.elasticsearch.common.inject.Inject; +import org.elasticsearch.common.settings.Setting; import org.elasticsearch.common.settings.Settings; -import org.elasticsearch.node.settings.NodeSettingsService; import java.util.HashMap; import java.util.Map; @@ -76,37 +77,12 @@ public class AwarenessAllocationDecider extends AllocationDecider { public static final String NAME = "awareness"; - public static final String CLUSTER_ROUTING_ALLOCATION_AWARENESS_ATTRIBUTES = "cluster.routing.allocation.awareness.attributes"; - public static final String CLUSTER_ROUTING_ALLOCATION_AWARENESS_FORCE_GROUP = "cluster.routing.allocation.awareness.force."; - - class ApplySettings implements NodeSettingsService.Listener { - @Override - public void onRefreshSettings(Settings settings) { - String[] awarenessAttributes = settings.getAsArray(CLUSTER_ROUTING_ALLOCATION_AWARENESS_ATTRIBUTES, null); - if (awarenessAttributes == null && "".equals(settings.get(CLUSTER_ROUTING_ALLOCATION_AWARENESS_ATTRIBUTES, null))) { - awarenessAttributes = Strings.EMPTY_ARRAY; // the empty string resets this - } - if (awarenessAttributes != null) { - logger.info("updating [cluster.routing.allocation.awareness.attributes] from [{}] to [{}]", AwarenessAllocationDecider.this.awarenessAttributes, awarenessAttributes); - AwarenessAllocationDecider.this.awarenessAttributes = awarenessAttributes; - } - Map<String, String[]> forcedAwarenessAttributes = new HashMap<>(AwarenessAllocationDecider.this.forcedAwarenessAttributes); - Map<String, Settings> forceGroups = settings.getGroups(CLUSTER_ROUTING_ALLOCATION_AWARENESS_FORCE_GROUP); - if (!forceGroups.isEmpty()) { - for (Map.Entry<String, Settings> entry : forceGroups.entrySet()) { - String[] aValues = entry.getValue().getAsArray("values"); - if (aValues.length > 0) { - forcedAwarenessAttributes.put(entry.getKey(), aValues); - } - } - } - AwarenessAllocationDecider.this.forcedAwarenessAttributes = forcedAwarenessAttributes; - } - } + public static final Setting<String[]> CLUSTER_ROUTING_ALLOCATION_AWARENESS_ATTRIBUTE_SETTING = new Setting<>("cluster.routing.allocation.awareness.attributes", "", Strings::splitStringByCommaToArray , true, Setting.Scope.CLUSTER); + public static final Setting<Settings> CLUSTER_ROUTING_ALLOCATION_AWARENESS_FORCE_GROUP_SETTING = Setting.groupSetting("cluster.routing.allocation.awareness.force.", true, Setting.Scope.CLUSTER); private String[] awarenessAttributes; - private Map<String, String[]> forcedAwarenessAttributes; + private volatile Map<String, String[]> forcedAwarenessAttributes; /** * Creates a new {@link AwarenessAllocationDecider} instance @@ -121,24 +97,28 @@ public class AwarenessAllocationDecider extends AllocationDecider { * @param settings {@link Settings} to use */ public AwarenessAllocationDecider(Settings settings) { - this(settings, new NodeSettingsService(settings)); + this(settings, new ClusterSettings(settings, ClusterSettings.BUILT_IN_CLUSTER_SETTINGS)); } @Inject - public AwarenessAllocationDecider(Settings settings, NodeSettingsService nodeSettingsService) { + public AwarenessAllocationDecider(Settings settings, ClusterSettings clusterSettings) { super(settings); - this.awarenessAttributes = settings.getAsArray(CLUSTER_ROUTING_ALLOCATION_AWARENESS_ATTRIBUTES); + this.awarenessAttributes = CLUSTER_ROUTING_ALLOCATION_AWARENESS_ATTRIBUTE_SETTING.get(settings); + clusterSettings.addSettingsUpdateConsumer(CLUSTER_ROUTING_ALLOCATION_AWARENESS_ATTRIBUTE_SETTING, this::setAwarenessAttributes); + setForcedAwarenessAttributes(CLUSTER_ROUTING_ALLOCATION_AWARENESS_FORCE_GROUP_SETTING.get(settings)); + clusterSettings.addSettingsUpdateConsumer(CLUSTER_ROUTING_ALLOCATION_AWARENESS_FORCE_GROUP_SETTING, this::setForcedAwarenessAttributes); + } - forcedAwarenessAttributes = new HashMap<>(); - Map<String, Settings> forceGroups = settings.getGroups(CLUSTER_ROUTING_ALLOCATION_AWARENESS_FORCE_GROUP); + private void setForcedAwarenessAttributes(Settings forceSettings) { + Map<String, String[]> forcedAwarenessAttributes = new HashMap<>(); + Map<String, Settings> forceGroups = forceSettings.getAsGroups(); for (Map.Entry<String, Settings> entry : forceGroups.entrySet()) { String[] aValues = entry.getValue().getAsArray("values"); if (aValues.length > 0) { forcedAwarenessAttributes.put(entry.getKey(), aValues); } } - - nodeSettingsService.addListener(new ApplySettings()); + this.forcedAwarenessAttributes = forcedAwarenessAttributes; } /** @@ -150,6 +130,10 @@ public class AwarenessAllocationDecider extends AllocationDecider { return this.awarenessAttributes; } + private void setAwarenessAttributes(String[] awarenessAttributes) { + this.awarenessAttributes = awarenessAttributes; + } + @Override public Decision canAllocate(ShardRouting shardRouting, RoutingNode node, RoutingAllocation allocation) { return underCapacity(shardRouting, node, allocation, true); diff --git a/core/src/main/java/org/elasticsearch/cluster/routing/allocation/decider/ClusterRebalanceAllocationDecider.java b/core/src/main/java/org/elasticsearch/cluster/routing/allocation/decider/ClusterRebalanceAllocationDecider.java index 7638c7aeee..b1be2a6fce 100644 --- a/core/src/main/java/org/elasticsearch/cluster/routing/allocation/decider/ClusterRebalanceAllocationDecider.java +++ b/core/src/main/java/org/elasticsearch/cluster/routing/allocation/decider/ClusterRebalanceAllocationDecider.java @@ -19,13 +19,12 @@ package org.elasticsearch.cluster.routing.allocation.decider; -import org.elasticsearch.cluster.ClusterState; import org.elasticsearch.cluster.routing.ShardRouting; import org.elasticsearch.cluster.routing.allocation.RoutingAllocation; -import org.elasticsearch.cluster.settings.Validator; import org.elasticsearch.common.inject.Inject; +import org.elasticsearch.common.settings.ClusterSettings; +import org.elasticsearch.common.settings.Setting; import org.elasticsearch.common.settings.Settings; -import org.elasticsearch.node.settings.NodeSettingsService; import java.util.Locale; @@ -38,10 +37,10 @@ import java.util.Locale; * <ul> * <li><tt>indices_primaries_active</tt> - Re-balancing is allowed only once all * primary shards on all indices are active.</li> - * + * * <li><tt>indices_all_active</tt> - Re-balancing is allowed only once all * shards on all indices are active.</li> - * + * * <li><tt>always</tt> - Re-balancing is allowed once a shard replication group * is active</li> * </ul> @@ -49,19 +48,10 @@ import java.util.Locale; public class ClusterRebalanceAllocationDecider extends AllocationDecider { public static final String NAME = "cluster_rebalance"; - - public static final String CLUSTER_ROUTING_ALLOCATION_ALLOW_REBALANCE = "cluster.routing.allocation.allow_rebalance"; - public static final Validator ALLOCATION_ALLOW_REBALANCE_VALIDATOR = (setting, value, clusterState) -> { - try { - ClusterRebalanceType.parseString(value); - return null; - } catch (IllegalArgumentException e) { - return "the value of " + setting + " must be one of: [always, indices_primaries_active, indices_all_active]"; - } - }; + public static final Setting<ClusterRebalanceType> CLUSTER_ROUTING_ALLOCATION_ALLOW_REBALANCE_SETTING = new Setting<>("cluster.routing.allocation.allow_rebalance", ClusterRebalanceType.INDICES_ALL_ACTIVE.name().toLowerCase(Locale.ROOT), ClusterRebalanceType::parseString, true, Setting.Scope.CLUSTER); /** - * An enum representation for the configured re-balance type. + * An enum representation for the configured re-balance type. */ public static enum ClusterRebalanceType { /** @@ -73,7 +63,7 @@ public class ClusterRebalanceAllocationDecider extends AllocationDecider { */ INDICES_PRIMARIES_ACTIVE, /** - * Re-balancing is allowed only once all shards on all indices are active. + * Re-balancing is allowed only once all shards on all indices are active. */ INDICES_ALL_ACTIVE; @@ -85,48 +75,28 @@ public class ClusterRebalanceAllocationDecider extends AllocationDecider { } else if ("indices_all_active".equalsIgnoreCase(typeString) || "indicesAllActive".equalsIgnoreCase(typeString)) { return ClusterRebalanceType.INDICES_ALL_ACTIVE; } - throw new IllegalArgumentException("Illegal value for " + CLUSTER_ROUTING_ALLOCATION_ALLOW_REBALANCE + ": " + typeString); + throw new IllegalArgumentException("Illegal value for " + CLUSTER_ROUTING_ALLOCATION_ALLOW_REBALANCE_SETTING + ": " + typeString); } } - private ClusterRebalanceType type; + private volatile ClusterRebalanceType type; @Inject - public ClusterRebalanceAllocationDecider(Settings settings, NodeSettingsService nodeSettingsService) { + public ClusterRebalanceAllocationDecider(Settings settings, ClusterSettings clusterSettings) { super(settings); - String allowRebalance = settings.get(CLUSTER_ROUTING_ALLOCATION_ALLOW_REBALANCE, "indices_all_active"); try { - type = ClusterRebalanceType.parseString(allowRebalance); + type = CLUSTER_ROUTING_ALLOCATION_ALLOW_REBALANCE_SETTING.get(settings); } catch (IllegalStateException e) { - logger.warn("[{}] has a wrong value {}, defaulting to 'indices_all_active'", CLUSTER_ROUTING_ALLOCATION_ALLOW_REBALANCE, allowRebalance); + logger.warn("[{}] has a wrong value {}, defaulting to 'indices_all_active'", CLUSTER_ROUTING_ALLOCATION_ALLOW_REBALANCE_SETTING, CLUSTER_ROUTING_ALLOCATION_ALLOW_REBALANCE_SETTING.getRaw(settings)); type = ClusterRebalanceType.INDICES_ALL_ACTIVE; } - logger.debug("using [{}] with [{}]", CLUSTER_ROUTING_ALLOCATION_ALLOW_REBALANCE, type.toString().toLowerCase(Locale.ROOT)); + logger.debug("using [{}] with [{}]", CLUSTER_ROUTING_ALLOCATION_ALLOW_REBALANCE_SETTING, type.toString().toLowerCase(Locale.ROOT)); - nodeSettingsService.addListener(new ApplySettings()); + clusterSettings.addSettingsUpdateConsumer(CLUSTER_ROUTING_ALLOCATION_ALLOW_REBALANCE_SETTING, this::setType); } - class ApplySettings implements NodeSettingsService.Listener { - - @Override - public void onRefreshSettings(Settings settings) { - String newAllowRebalance = settings.get(CLUSTER_ROUTING_ALLOCATION_ALLOW_REBALANCE, null); - if (newAllowRebalance != null) { - ClusterRebalanceType newType = null; - try { - newType = ClusterRebalanceType.parseString(newAllowRebalance); - } catch (IllegalArgumentException e) { - // ignore - } - - if (newType != null && newType != ClusterRebalanceAllocationDecider.this.type) { - logger.info("updating [{}] from [{}] to [{}]", CLUSTER_ROUTING_ALLOCATION_ALLOW_REBALANCE, - ClusterRebalanceAllocationDecider.this.type.toString().toLowerCase(Locale.ROOT), - newType.toString().toLowerCase(Locale.ROOT)); - ClusterRebalanceAllocationDecider.this.type = newType; - } - } - } + private void setType(ClusterRebalanceType type) { + this.type = type; } @Override diff --git a/core/src/main/java/org/elasticsearch/cluster/routing/allocation/decider/ConcurrentRebalanceAllocationDecider.java b/core/src/main/java/org/elasticsearch/cluster/routing/allocation/decider/ConcurrentRebalanceAllocationDecider.java index 6bd1b437ac..a9ad35fd52 100644 --- a/core/src/main/java/org/elasticsearch/cluster/routing/allocation/decider/ConcurrentRebalanceAllocationDecider.java +++ b/core/src/main/java/org/elasticsearch/cluster/routing/allocation/decider/ConcurrentRebalanceAllocationDecider.java @@ -22,8 +22,9 @@ package org.elasticsearch.cluster.routing.allocation.decider; import org.elasticsearch.cluster.routing.ShardRouting; import org.elasticsearch.cluster.routing.allocation.RoutingAllocation; import org.elasticsearch.common.inject.Inject; +import org.elasticsearch.common.settings.ClusterSettings; +import org.elasticsearch.common.settings.Setting; import org.elasticsearch.common.settings.Settings; -import org.elasticsearch.node.settings.NodeSettingsService; /** * Similar to the {@link ClusterRebalanceAllocationDecider} this @@ -41,27 +42,19 @@ public class ConcurrentRebalanceAllocationDecider extends AllocationDecider { public static final String NAME = "concurrent_rebalance"; - public static final String CLUSTER_ROUTING_ALLOCATION_CLUSTER_CONCURRENT_REBALANCE = "cluster.routing.allocation.cluster_concurrent_rebalance"; - - class ApplySettings implements NodeSettingsService.Listener { - @Override - public void onRefreshSettings(Settings settings) { - int clusterConcurrentRebalance = settings.getAsInt(CLUSTER_ROUTING_ALLOCATION_CLUSTER_CONCURRENT_REBALANCE, ConcurrentRebalanceAllocationDecider.this.clusterConcurrentRebalance); - if (clusterConcurrentRebalance != ConcurrentRebalanceAllocationDecider.this.clusterConcurrentRebalance) { - logger.info("updating [cluster.routing.allocation.cluster_concurrent_rebalance] from [{}], to [{}]", ConcurrentRebalanceAllocationDecider.this.clusterConcurrentRebalance, clusterConcurrentRebalance); - ConcurrentRebalanceAllocationDecider.this.clusterConcurrentRebalance = clusterConcurrentRebalance; - } - } - } - + public static final Setting<Integer> CLUSTER_ROUTING_ALLOCATION_CLUSTER_CONCURRENT_REBALANCE_SETTING = Setting.intSetting("cluster.routing.allocation.cluster_concurrent_rebalance", 2, -1, true, Setting.Scope.CLUSTER); private volatile int clusterConcurrentRebalance; @Inject - public ConcurrentRebalanceAllocationDecider(Settings settings, NodeSettingsService nodeSettingsService) { + public ConcurrentRebalanceAllocationDecider(Settings settings, ClusterSettings clusterSettings) { super(settings); - this.clusterConcurrentRebalance = settings.getAsInt(CLUSTER_ROUTING_ALLOCATION_CLUSTER_CONCURRENT_REBALANCE, 2); + this.clusterConcurrentRebalance = CLUSTER_ROUTING_ALLOCATION_CLUSTER_CONCURRENT_REBALANCE_SETTING.get(settings); logger.debug("using [cluster_concurrent_rebalance] with [{}]", clusterConcurrentRebalance); - nodeSettingsService.addListener(new ApplySettings()); + clusterSettings.addSettingsUpdateConsumer(CLUSTER_ROUTING_ALLOCATION_CLUSTER_CONCURRENT_REBALANCE_SETTING, this::setClusterConcurrentRebalance); + } + + private void setClusterConcurrentRebalance(int concurrentRebalance) { + clusterConcurrentRebalance = concurrentRebalance; } @Override diff --git a/core/src/main/java/org/elasticsearch/cluster/routing/allocation/decider/DiskThresholdDecider.java b/core/src/main/java/org/elasticsearch/cluster/routing/allocation/decider/DiskThresholdDecider.java index a02c72c574..68fd7f3db9 100644 --- a/core/src/main/java/org/elasticsearch/cluster/routing/allocation/decider/DiskThresholdDecider.java +++ b/core/src/main/java/org/elasticsearch/cluster/routing/allocation/decider/DiskThresholdDecider.java @@ -22,26 +22,27 @@ package org.elasticsearch.cluster.routing.allocation.decider; import com.carrotsearch.hppc.ObjectLookupContainer; import com.carrotsearch.hppc.cursors.ObjectCursor; import com.carrotsearch.hppc.cursors.ObjectObjectCursor; - import org.elasticsearch.ElasticsearchParseException; import org.elasticsearch.client.Client; import org.elasticsearch.cluster.ClusterInfo; import org.elasticsearch.cluster.ClusterInfoService; import org.elasticsearch.cluster.DiskUsage; import org.elasticsearch.cluster.EmptyClusterInfoService; +import org.elasticsearch.cluster.metadata.IndexMetaData; import org.elasticsearch.cluster.routing.RoutingNode; import org.elasticsearch.cluster.routing.ShardRouting; import org.elasticsearch.cluster.routing.ShardRoutingState; import org.elasticsearch.cluster.routing.allocation.RoutingAllocation; +import org.elasticsearch.common.settings.ClusterSettings; import org.elasticsearch.common.Strings; import org.elasticsearch.common.collect.ImmutableOpenMap; import org.elasticsearch.common.inject.Inject; +import org.elasticsearch.common.settings.Setting; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.unit.ByteSizeValue; import org.elasticsearch.common.unit.RatioValue; import org.elasticsearch.common.unit.TimeValue; import org.elasticsearch.common.util.set.Sets; -import org.elasticsearch.node.settings.NodeSettingsService; import java.util.Set; @@ -80,53 +81,11 @@ public class DiskThresholdDecider extends AllocationDecider { private volatile boolean enabled; private volatile TimeValue rerouteInterval; - public static final String CLUSTER_ROUTING_ALLOCATION_DISK_THRESHOLD_ENABLED = "cluster.routing.allocation.disk.threshold_enabled"; - public static final String CLUSTER_ROUTING_ALLOCATION_LOW_DISK_WATERMARK = "cluster.routing.allocation.disk.watermark.low"; - public static final String CLUSTER_ROUTING_ALLOCATION_HIGH_DISK_WATERMARK = "cluster.routing.allocation.disk.watermark.high"; - public static final String CLUSTER_ROUTING_ALLOCATION_INCLUDE_RELOCATIONS = "cluster.routing.allocation.disk.include_relocations"; - public static final String CLUSTER_ROUTING_ALLOCATION_REROUTE_INTERVAL = "cluster.routing.allocation.disk.reroute_interval"; - - class ApplySettings implements NodeSettingsService.Listener { - @Override - public void onRefreshSettings(Settings settings) { - String newLowWatermark = settings.get(CLUSTER_ROUTING_ALLOCATION_LOW_DISK_WATERMARK, null); - String newHighWatermark = settings.get(CLUSTER_ROUTING_ALLOCATION_HIGH_DISK_WATERMARK, null); - Boolean newRelocationsSetting = settings.getAsBoolean(CLUSTER_ROUTING_ALLOCATION_INCLUDE_RELOCATIONS, null); - Boolean newEnableSetting = settings.getAsBoolean(CLUSTER_ROUTING_ALLOCATION_DISK_THRESHOLD_ENABLED, null); - TimeValue newRerouteInterval = settings.getAsTime(CLUSTER_ROUTING_ALLOCATION_REROUTE_INTERVAL, null); - - if (newEnableSetting != null) { - logger.info("updating [{}] from [{}] to [{}]", CLUSTER_ROUTING_ALLOCATION_DISK_THRESHOLD_ENABLED, - DiskThresholdDecider.this.enabled, newEnableSetting); - DiskThresholdDecider.this.enabled = newEnableSetting; - } - if (newRelocationsSetting != null) { - logger.info("updating [{}] from [{}] to [{}]", CLUSTER_ROUTING_ALLOCATION_INCLUDE_RELOCATIONS, - DiskThresholdDecider.this.includeRelocations, newRelocationsSetting); - DiskThresholdDecider.this.includeRelocations = newRelocationsSetting; - } - if (newLowWatermark != null) { - if (!validWatermarkSetting(newLowWatermark, CLUSTER_ROUTING_ALLOCATION_LOW_DISK_WATERMARK)) { - throw new ElasticsearchParseException("unable to parse low watermark [{}]", newLowWatermark); - } - logger.info("updating [{}] to [{}]", CLUSTER_ROUTING_ALLOCATION_LOW_DISK_WATERMARK, newLowWatermark); - DiskThresholdDecider.this.freeDiskThresholdLow = 100.0 - thresholdPercentageFromWatermark(newLowWatermark); - DiskThresholdDecider.this.freeBytesThresholdLow = thresholdBytesFromWatermark(newLowWatermark, CLUSTER_ROUTING_ALLOCATION_LOW_DISK_WATERMARK); - } - if (newHighWatermark != null) { - if (!validWatermarkSetting(newHighWatermark, CLUSTER_ROUTING_ALLOCATION_HIGH_DISK_WATERMARK)) { - throw new ElasticsearchParseException("unable to parse high watermark [{}]", newHighWatermark); - } - logger.info("updating [{}] to [{}]", CLUSTER_ROUTING_ALLOCATION_HIGH_DISK_WATERMARK, newHighWatermark); - DiskThresholdDecider.this.freeDiskThresholdHigh = 100.0 - thresholdPercentageFromWatermark(newHighWatermark); - DiskThresholdDecider.this.freeBytesThresholdHigh = thresholdBytesFromWatermark(newHighWatermark, CLUSTER_ROUTING_ALLOCATION_HIGH_DISK_WATERMARK); - } - if (newRerouteInterval != null) { - logger.info("updating [{}] to [{}]", CLUSTER_ROUTING_ALLOCATION_REROUTE_INTERVAL, newRerouteInterval); - DiskThresholdDecider.this.rerouteInterval = newRerouteInterval; - } - } - } + public static final Setting<Boolean> CLUSTER_ROUTING_ALLOCATION_DISK_THRESHOLD_ENABLED_SETTING = Setting.boolSetting("cluster.routing.allocation.disk.threshold_enabled", true, true, Setting.Scope.CLUSTER); + public static final Setting<String> CLUSTER_ROUTING_ALLOCATION_LOW_DISK_WATERMARK_SETTING = new Setting<>("cluster.routing.allocation.disk.watermark.low", "85%", (s) -> validWatermarkSetting(s, "cluster.routing.allocation.disk.watermark.low"), true, Setting.Scope.CLUSTER); + public static final Setting<String> CLUSTER_ROUTING_ALLOCATION_HIGH_DISK_WATERMARK_SETTING = new Setting<>("cluster.routing.allocation.disk.watermark.high", "90%", (s) -> validWatermarkSetting(s, "cluster.routing.allocation.disk.watermark.high"), true, Setting.Scope.CLUSTER); + public static final Setting<Boolean> CLUSTER_ROUTING_ALLOCATION_INCLUDE_RELOCATIONS_SETTING = Setting.boolSetting("cluster.routing.allocation.disk.include_relocations", true, true, Setting.Scope.CLUSTER);; + public static final Setting<TimeValue> CLUSTER_ROUTING_ALLOCATION_REROUTE_INTERVAL_SETTING = Setting.positiveTimeSetting("cluster.routing.allocation.disk.reroute_interval", TimeValue.timeValueSeconds(60), true, Setting.Scope.CLUSTER); /** * Listens for a node to go over the high watermark and kicks off an empty @@ -231,38 +190,49 @@ public class DiskThresholdDecider extends AllocationDecider { // It's okay the Client is null here, because the empty cluster info // service will never actually call the listener where the client is // needed. Also this constructor is only used for tests - this(settings, new NodeSettingsService(settings), EmptyClusterInfoService.INSTANCE, null); + this(settings, new ClusterSettings(settings, ClusterSettings.BUILT_IN_CLUSTER_SETTINGS), EmptyClusterInfoService.INSTANCE, null); } @Inject - public DiskThresholdDecider(Settings settings, NodeSettingsService nodeSettingsService, ClusterInfoService infoService, Client client) { + public DiskThresholdDecider(Settings settings, ClusterSettings clusterSettings, ClusterInfoService infoService, Client client) { super(settings); - String lowWatermark = settings.get(CLUSTER_ROUTING_ALLOCATION_LOW_DISK_WATERMARK, "85%"); - String highWatermark = settings.get(CLUSTER_ROUTING_ALLOCATION_HIGH_DISK_WATERMARK, "90%"); + final String lowWatermark = CLUSTER_ROUTING_ALLOCATION_LOW_DISK_WATERMARK_SETTING.get(settings); + final String highWatermark = CLUSTER_ROUTING_ALLOCATION_HIGH_DISK_WATERMARK_SETTING.get(settings); + setHighWatermark(highWatermark); + setLowWatermark(lowWatermark); + this.includeRelocations = CLUSTER_ROUTING_ALLOCATION_INCLUDE_RELOCATIONS_SETTING.get(settings); + this.rerouteInterval = CLUSTER_ROUTING_ALLOCATION_REROUTE_INTERVAL_SETTING.get(settings); + this.enabled = CLUSTER_ROUTING_ALLOCATION_DISK_THRESHOLD_ENABLED_SETTING.get(settings); + clusterSettings.addSettingsUpdateConsumer(CLUSTER_ROUTING_ALLOCATION_LOW_DISK_WATERMARK_SETTING, this::setLowWatermark); + clusterSettings.addSettingsUpdateConsumer(CLUSTER_ROUTING_ALLOCATION_HIGH_DISK_WATERMARK_SETTING, this::setHighWatermark); + clusterSettings.addSettingsUpdateConsumer(CLUSTER_ROUTING_ALLOCATION_INCLUDE_RELOCATIONS_SETTING, this::setIncludeRelocations); + clusterSettings.addSettingsUpdateConsumer(CLUSTER_ROUTING_ALLOCATION_REROUTE_INTERVAL_SETTING, this::setRerouteInterval); + clusterSettings.addSettingsUpdateConsumer(CLUSTER_ROUTING_ALLOCATION_DISK_THRESHOLD_ENABLED_SETTING, this::setEnabled); + infoService.addListener(new DiskListener(client)); + } - if (!validWatermarkSetting(lowWatermark, CLUSTER_ROUTING_ALLOCATION_LOW_DISK_WATERMARK)) { - throw new ElasticsearchParseException("unable to parse low watermark [{}]", lowWatermark); - } - if (!validWatermarkSetting(highWatermark, CLUSTER_ROUTING_ALLOCATION_HIGH_DISK_WATERMARK)) { - throw new ElasticsearchParseException("unable to parse high watermark [{}]", highWatermark); - } - // Watermark is expressed in terms of used data, but we need "free" data watermark - this.freeDiskThresholdLow = 100.0 - thresholdPercentageFromWatermark(lowWatermark); - this.freeDiskThresholdHigh = 100.0 - thresholdPercentageFromWatermark(highWatermark); + private void setIncludeRelocations(boolean includeRelocations) { + this.includeRelocations = includeRelocations; + } - this.freeBytesThresholdLow = thresholdBytesFromWatermark(lowWatermark, CLUSTER_ROUTING_ALLOCATION_LOW_DISK_WATERMARK); - this.freeBytesThresholdHigh = thresholdBytesFromWatermark(highWatermark, CLUSTER_ROUTING_ALLOCATION_HIGH_DISK_WATERMARK); - this.includeRelocations = settings.getAsBoolean(CLUSTER_ROUTING_ALLOCATION_INCLUDE_RELOCATIONS, true); - this.rerouteInterval = settings.getAsTime(CLUSTER_ROUTING_ALLOCATION_REROUTE_INTERVAL, TimeValue.timeValueSeconds(60)); + private void setRerouteInterval(TimeValue rerouteInterval) { + this.rerouteInterval = rerouteInterval; + } - this.enabled = settings.getAsBoolean(CLUSTER_ROUTING_ALLOCATION_DISK_THRESHOLD_ENABLED, true); - nodeSettingsService.addListener(new ApplySettings()); - infoService.addListener(new DiskListener(client)); + private void setEnabled(boolean enabled) { + this.enabled = enabled; } - // For Testing - ApplySettings newApplySettings() { - return new ApplySettings(); + private void setLowWatermark(String lowWatermark) { + // Watermark is expressed in terms of used data, but we need "free" data watermark + this.freeDiskThresholdLow = 100.0 - thresholdPercentageFromWatermark(lowWatermark); + this.freeBytesThresholdLow = thresholdBytesFromWatermark(lowWatermark, CLUSTER_ROUTING_ALLOCATION_LOW_DISK_WATERMARK_SETTING.getKey()); + } + + private void setHighWatermark(String highWatermark) { + // Watermark is expressed in terms of used data, but we need "free" data watermark + this.freeDiskThresholdHigh = 100.0 - thresholdPercentageFromWatermark(highWatermark); + this.freeBytesThresholdHigh = thresholdBytesFromWatermark(highWatermark, CLUSTER_ROUTING_ALLOCATION_LOW_DISK_WATERMARK_SETTING.getKey()); } // For Testing @@ -360,7 +330,8 @@ public class DiskThresholdDecider extends AllocationDecider { } // a flag for whether the primary shard has been previously allocated - boolean primaryHasBeenAllocated = shardRouting.primary() && shardRouting.allocatedPostIndexCreate(); + IndexMetaData indexMetaData = allocation.metaData().index(shardRouting.getIndex()); + boolean primaryHasBeenAllocated = shardRouting.primary() && shardRouting.allocatedPostIndexCreate(indexMetaData); // checks for exact byte comparisons if (freeBytes < freeBytesThresholdLow.bytes()) { @@ -580,20 +551,21 @@ public class DiskThresholdDecider extends AllocationDecider { /** * Checks if a watermark string is a valid percentage or byte size value, - * returning true if valid, false if invalid. + * @return the watermark value given */ - public boolean validWatermarkSetting(String watermark, String settingName) { + public static String validWatermarkSetting(String watermark, String settingName) { try { RatioValue.parseRatioValue(watermark); - return true; } catch (ElasticsearchParseException e) { try { ByteSizeValue.parseBytesSizeValue(watermark, settingName); - return true; } catch (ElasticsearchParseException ex) { - return false; + ex.addSuppressed(e); + throw ex; } } + return watermark; + } private Decision earlyTerminate(RoutingAllocation allocation, ImmutableOpenMap<String, DiskUsage> usages) { diff --git a/core/src/main/java/org/elasticsearch/cluster/routing/allocation/decider/EnableAllocationDecider.java b/core/src/main/java/org/elasticsearch/cluster/routing/allocation/decider/EnableAllocationDecider.java index 0bbd493504..a31d36db34 100644 --- a/core/src/main/java/org/elasticsearch/cluster/routing/allocation/decider/EnableAllocationDecider.java +++ b/core/src/main/java/org/elasticsearch/cluster/routing/allocation/decider/EnableAllocationDecider.java @@ -19,18 +19,20 @@ package org.elasticsearch.cluster.routing.allocation.decider; +import org.elasticsearch.cluster.metadata.IndexMetaData; import org.elasticsearch.cluster.routing.RoutingNode; import org.elasticsearch.cluster.routing.ShardRouting; import org.elasticsearch.cluster.routing.allocation.RoutingAllocation; import org.elasticsearch.common.inject.Inject; +import org.elasticsearch.common.settings.ClusterSettings; +import org.elasticsearch.common.settings.Setting; import org.elasticsearch.common.settings.Settings; -import org.elasticsearch.node.settings.NodeSettingsService; import java.util.Locale; /** - * This allocation decider allows shard allocations / rebalancing via the cluster wide settings {@link #CLUSTER_ROUTING_ALLOCATION_ENABLE} / - * {@link #CLUSTER_ROUTING_REBALANCE_ENABLE} and the per index setting {@link #INDEX_ROUTING_ALLOCATION_ENABLE} / {@link #INDEX_ROUTING_REBALANCE_ENABLE}. + * This allocation decider allows shard allocations / rebalancing via the cluster wide settings {@link #CLUSTER_ROUTING_ALLOCATION_ENABLE_SETTING} / + * {@link #CLUSTER_ROUTING_REBALANCE_ENABLE_SETTING} and the per index setting {@link #INDEX_ROUTING_ALLOCATION_ENABLE} / {@link #INDEX_ROUTING_REBALANCE_ENABLE}. * The per index settings overrides the cluster wide setting. * * <p> @@ -54,26 +56,34 @@ import java.util.Locale; * @see Rebalance * @see Allocation */ -public class EnableAllocationDecider extends AllocationDecider implements NodeSettingsService.Listener { +public class EnableAllocationDecider extends AllocationDecider { public static final String NAME = "enable"; - public static final String CLUSTER_ROUTING_ALLOCATION_ENABLE = "cluster.routing.allocation.enable"; - public static final String INDEX_ROUTING_ALLOCATION_ENABLE = "index.routing.allocation.enable"; + public static final Setting<Allocation> CLUSTER_ROUTING_ALLOCATION_ENABLE_SETTING = new Setting<>("cluster.routing.allocation.enable", Allocation.ALL.name(), Allocation::parse, true, Setting.Scope.CLUSTER); + public static final String INDEX_ROUTING_ALLOCATION_ENABLE= "index.routing.allocation.enable"; - public static final String CLUSTER_ROUTING_REBALANCE_ENABLE = "cluster.routing.rebalance.enable"; + public static final Setting<Rebalance> CLUSTER_ROUTING_REBALANCE_ENABLE_SETTING = new Setting<>("cluster.routing.rebalance.enable", Rebalance.ALL.name(), Rebalance::parse, true, Setting.Scope.CLUSTER); public static final String INDEX_ROUTING_REBALANCE_ENABLE = "index.routing.rebalance.enable"; private volatile Rebalance enableRebalance; private volatile Allocation enableAllocation; - @Inject - public EnableAllocationDecider(Settings settings, NodeSettingsService nodeSettingsService) { + public EnableAllocationDecider(Settings settings, ClusterSettings clusterSettings) { super(settings); - this.enableAllocation = Allocation.parse(settings.get(CLUSTER_ROUTING_ALLOCATION_ENABLE, Allocation.ALL.name())); - this.enableRebalance = Rebalance.parse(settings.get(CLUSTER_ROUTING_REBALANCE_ENABLE, Rebalance.ALL.name())); - nodeSettingsService.addListener(this); + this.enableAllocation = CLUSTER_ROUTING_ALLOCATION_ENABLE_SETTING.get(settings); + this.enableRebalance = CLUSTER_ROUTING_REBALANCE_ENABLE_SETTING.get(settings); + clusterSettings.addSettingsUpdateConsumer(CLUSTER_ROUTING_ALLOCATION_ENABLE_SETTING, this::setEnableAllocation); + clusterSettings.addSettingsUpdateConsumer(CLUSTER_ROUTING_REBALANCE_ENABLE_SETTING, this::setEnableRebalance); + } + + public void setEnableRebalance(Rebalance enableRebalance) { + this.enableRebalance = enableRebalance; + } + + public void setEnableAllocation(Allocation enableAllocation) { + this.enableAllocation = enableAllocation; } @Override @@ -82,8 +92,8 @@ public class EnableAllocationDecider extends AllocationDecider implements NodeSe return allocation.decision(Decision.YES, NAME, "allocation disabling is ignored"); } - Settings indexSettings = allocation.routingNodes().metaData().index(shardRouting.index()).getSettings(); - String enableIndexValue = indexSettings.get(INDEX_ROUTING_ALLOCATION_ENABLE); + IndexMetaData indexMetaData = allocation.metaData().index(shardRouting.getIndex()); + String enableIndexValue = indexMetaData.getSettings().get(INDEX_ROUTING_ALLOCATION_ENABLE); final Allocation enable; if (enableIndexValue != null) { enable = Allocation.parse(enableIndexValue); @@ -96,7 +106,7 @@ public class EnableAllocationDecider extends AllocationDecider implements NodeSe case NONE: return allocation.decision(Decision.NO, NAME, "no allocations are allowed"); case NEW_PRIMARIES: - if (shardRouting.primary() && shardRouting.allocatedPostIndexCreate() == false) { + if (shardRouting.primary() && shardRouting.allocatedPostIndexCreate(indexMetaData) == false) { return allocation.decision(Decision.YES, NAME, "new primary allocations are allowed"); } else { return allocation.decision(Decision.NO, NAME, "non-new primary allocations are forbidden"); @@ -148,25 +158,9 @@ public class EnableAllocationDecider extends AllocationDecider implements NodeSe } } - @Override - public void onRefreshSettings(Settings settings) { - final Allocation enable = Allocation.parse(settings.get(CLUSTER_ROUTING_ALLOCATION_ENABLE, this.enableAllocation.name())); - if (enable != this.enableAllocation) { - logger.info("updating [{}] from [{}] to [{}]", CLUSTER_ROUTING_ALLOCATION_ENABLE, this.enableAllocation, enable); - EnableAllocationDecider.this.enableAllocation = enable; - } - - final Rebalance enableRebalance = Rebalance.parse(settings.get(CLUSTER_ROUTING_REBALANCE_ENABLE, this.enableRebalance.name())); - if (enableRebalance != this.enableRebalance) { - logger.info("updating [{}] from [{}] to [{}]", CLUSTER_ROUTING_REBALANCE_ENABLE, this.enableRebalance, enableRebalance); - EnableAllocationDecider.this.enableRebalance = enableRebalance; - } - - } - /** * Allocation values or rather their string representation to be used used with - * {@link EnableAllocationDecider#CLUSTER_ROUTING_ALLOCATION_ENABLE} / {@link EnableAllocationDecider#INDEX_ROUTING_ALLOCATION_ENABLE} + * {@link EnableAllocationDecider#CLUSTER_ROUTING_ALLOCATION_ENABLE_SETTING} / {@link EnableAllocationDecider#INDEX_ROUTING_ALLOCATION_ENABLE} * via cluster / index settings. */ public enum Allocation { @@ -192,7 +186,7 @@ public class EnableAllocationDecider extends AllocationDecider implements NodeSe /** * Rebalance values or rather their string representation to be used used with - * {@link EnableAllocationDecider#CLUSTER_ROUTING_REBALANCE_ENABLE} / {@link EnableAllocationDecider#INDEX_ROUTING_REBALANCE_ENABLE} + * {@link EnableAllocationDecider#CLUSTER_ROUTING_REBALANCE_ENABLE_SETTING} / {@link EnableAllocationDecider#INDEX_ROUTING_REBALANCE_ENABLE} * via cluster / index settings. */ public enum Rebalance { diff --git a/core/src/main/java/org/elasticsearch/cluster/routing/allocation/decider/FilterAllocationDecider.java b/core/src/main/java/org/elasticsearch/cluster/routing/allocation/decider/FilterAllocationDecider.java index e0e2caaf04..4c451e7fff 100644 --- a/core/src/main/java/org/elasticsearch/cluster/routing/allocation/decider/FilterAllocationDecider.java +++ b/core/src/main/java/org/elasticsearch/cluster/routing/allocation/decider/FilterAllocationDecider.java @@ -25,10 +25,9 @@ import org.elasticsearch.cluster.routing.RoutingNode; import org.elasticsearch.cluster.routing.ShardRouting; import org.elasticsearch.cluster.routing.allocation.RoutingAllocation; import org.elasticsearch.common.inject.Inject; +import org.elasticsearch.common.settings.ClusterSettings; +import org.elasticsearch.common.settings.Setting; import org.elasticsearch.common.settings.Settings; -import org.elasticsearch.node.settings.NodeSettingsService; - -import java.util.Map; import static org.elasticsearch.cluster.node.DiscoveryNodeFilters.OpType.AND; import static org.elasticsearch.cluster.node.DiscoveryNodeFilters.OpType.OR; @@ -65,36 +64,23 @@ public class FilterAllocationDecider extends AllocationDecider { public static final String INDEX_ROUTING_INCLUDE_GROUP = "index.routing.allocation.include."; public static final String INDEX_ROUTING_EXCLUDE_GROUP = "index.routing.allocation.exclude."; - public static final String CLUSTER_ROUTING_REQUIRE_GROUP = "cluster.routing.allocation.require."; - public static final String CLUSTER_ROUTING_INCLUDE_GROUP = "cluster.routing.allocation.include."; - public static final String CLUSTER_ROUTING_EXCLUDE_GROUP = "cluster.routing.allocation.exclude."; + public static final Setting<Settings> CLUSTER_ROUTING_REQUIRE_GROUP_SETTING = Setting.groupSetting("cluster.routing.allocation.require.", true, Setting.Scope.CLUSTER); + public static final Setting<Settings> CLUSTER_ROUTING_INCLUDE_GROUP_SETTING = Setting.groupSetting("cluster.routing.allocation.include.", true, Setting.Scope.CLUSTER); + public static final Setting<Settings> CLUSTER_ROUTING_EXCLUDE_GROUP_SETTING = Setting.groupSetting("cluster.routing.allocation.exclude.", true, Setting.Scope.CLUSTER); private volatile DiscoveryNodeFilters clusterRequireFilters; private volatile DiscoveryNodeFilters clusterIncludeFilters; private volatile DiscoveryNodeFilters clusterExcludeFilters; @Inject - public FilterAllocationDecider(Settings settings, NodeSettingsService nodeSettingsService) { + public FilterAllocationDecider(Settings settings, ClusterSettings clusterSettings) { super(settings); - Map<String, String> requireMap = settings.getByPrefix(CLUSTER_ROUTING_REQUIRE_GROUP).getAsMap(); - if (requireMap.isEmpty()) { - clusterRequireFilters = null; - } else { - clusterRequireFilters = DiscoveryNodeFilters.buildFromKeyValue(AND, requireMap); - } - Map<String, String> includeMap = settings.getByPrefix(CLUSTER_ROUTING_INCLUDE_GROUP).getAsMap(); - if (includeMap.isEmpty()) { - clusterIncludeFilters = null; - } else { - clusterIncludeFilters = DiscoveryNodeFilters.buildFromKeyValue(OR, includeMap); - } - Map<String, String> excludeMap = settings.getByPrefix(CLUSTER_ROUTING_EXCLUDE_GROUP).getAsMap(); - if (excludeMap.isEmpty()) { - clusterExcludeFilters = null; - } else { - clusterExcludeFilters = DiscoveryNodeFilters.buildFromKeyValue(OR, excludeMap); - } - nodeSettingsService.addListener(new ApplySettings()); + setClusterRequireFilters(CLUSTER_ROUTING_REQUIRE_GROUP_SETTING.get(settings)); + setClusterExcludeFilters(CLUSTER_ROUTING_EXCLUDE_GROUP_SETTING.get(settings)); + setClusterIncludeFilters(CLUSTER_ROUTING_INCLUDE_GROUP_SETTING.get(settings)); + clusterSettings.addSettingsUpdateConsumer(CLUSTER_ROUTING_REQUIRE_GROUP_SETTING, this::setClusterRequireFilters); + clusterSettings.addSettingsUpdateConsumer(CLUSTER_ROUTING_EXCLUDE_GROUP_SETTING, this::setClusterExcludeFilters); + clusterSettings.addSettingsUpdateConsumer(CLUSTER_ROUTING_INCLUDE_GROUP_SETTING, this::setClusterIncludeFilters); } @Override @@ -144,21 +130,13 @@ public class FilterAllocationDecider extends AllocationDecider { return allocation.decision(Decision.YES, NAME, "node passes include/exclude/require filters"); } - class ApplySettings implements NodeSettingsService.Listener { - @Override - public void onRefreshSettings(Settings settings) { - Map<String, String> requireMap = settings.getByPrefix(CLUSTER_ROUTING_REQUIRE_GROUP).getAsMap(); - if (!requireMap.isEmpty()) { - clusterRequireFilters = DiscoveryNodeFilters.buildFromKeyValue(AND, requireMap); - } - Map<String, String> includeMap = settings.getByPrefix(CLUSTER_ROUTING_INCLUDE_GROUP).getAsMap(); - if (!includeMap.isEmpty()) { - clusterIncludeFilters = DiscoveryNodeFilters.buildFromKeyValue(OR, includeMap); - } - Map<String, String> excludeMap = settings.getByPrefix(CLUSTER_ROUTING_EXCLUDE_GROUP).getAsMap(); - if (!excludeMap.isEmpty()) { - clusterExcludeFilters = DiscoveryNodeFilters.buildFromKeyValue(OR, excludeMap); - } - } + private void setClusterRequireFilters(Settings settings) { + clusterRequireFilters = DiscoveryNodeFilters.buildFromKeyValue(AND, settings.getAsMap()); + } + private void setClusterIncludeFilters(Settings settings) { + clusterIncludeFilters = DiscoveryNodeFilters.buildFromKeyValue(OR, settings.getAsMap()); + } + private void setClusterExcludeFilters(Settings settings) { + clusterExcludeFilters = DiscoveryNodeFilters.buildFromKeyValue(OR, settings.getAsMap()); } } diff --git a/core/src/main/java/org/elasticsearch/cluster/routing/allocation/decider/ShardsLimitAllocationDecider.java b/core/src/main/java/org/elasticsearch/cluster/routing/allocation/decider/ShardsLimitAllocationDecider.java index 3d68ed50d2..9149d04cf6 100644 --- a/core/src/main/java/org/elasticsearch/cluster/routing/allocation/decider/ShardsLimitAllocationDecider.java +++ b/core/src/main/java/org/elasticsearch/cluster/routing/allocation/decider/ShardsLimitAllocationDecider.java @@ -24,16 +24,16 @@ import org.elasticsearch.cluster.routing.RoutingNode; import org.elasticsearch.cluster.routing.ShardRouting; import org.elasticsearch.cluster.routing.ShardRoutingState; import org.elasticsearch.cluster.routing.allocation.RoutingAllocation; -import org.elasticsearch.common.Nullable; import org.elasticsearch.common.inject.Inject; +import org.elasticsearch.common.settings.ClusterSettings; +import org.elasticsearch.common.settings.Setting; import org.elasticsearch.common.settings.Settings; -import org.elasticsearch.node.settings.NodeSettingsService; /** * This {@link AllocationDecider} limits the number of shards per node on a per * index or node-wide basis. The allocator prevents a single node to hold more * than {@value #INDEX_TOTAL_SHARDS_PER_NODE} per index and - * {@value #CLUSTER_TOTAL_SHARDS_PER_NODE} globally during the allocation + * <tt>cluster.routing.allocation.total_shards_per_node</tt> globally during the allocation * process. The limits of this decider can be changed in real-time via a the * index settings API. * <p> @@ -64,26 +64,18 @@ public class ShardsLimitAllocationDecider extends AllocationDecider { * Controls the maximum number of shards per node on a global level. * Negative values are interpreted as unlimited. */ - public static final String CLUSTER_TOTAL_SHARDS_PER_NODE = "cluster.routing.allocation.total_shards_per_node"; + public static final Setting<Integer> CLUSTER_TOTAL_SHARDS_PER_NODE_SETTING = Setting.intSetting("cluster.routing.allocation.total_shards_per_node", -1, true, Setting.Scope.CLUSTER); - class ApplySettings implements NodeSettingsService.Listener { - @Override - public void onRefreshSettings(Settings settings) { - Integer newClusterLimit = settings.getAsInt(CLUSTER_TOTAL_SHARDS_PER_NODE, null); - - if (newClusterLimit != null) { - logger.info("updating [{}] from [{}] to [{}]", CLUSTER_TOTAL_SHARDS_PER_NODE, - ShardsLimitAllocationDecider.this.clusterShardLimit, newClusterLimit); - ShardsLimitAllocationDecider.this.clusterShardLimit = newClusterLimit; - } - } - } @Inject - public ShardsLimitAllocationDecider(Settings settings, NodeSettingsService nodeSettingsService) { + public ShardsLimitAllocationDecider(Settings settings, ClusterSettings clusterSettings) { super(settings); - this.clusterShardLimit = settings.getAsInt(CLUSTER_TOTAL_SHARDS_PER_NODE, -1); - nodeSettingsService.addListener(new ApplySettings()); + this.clusterShardLimit = CLUSTER_TOTAL_SHARDS_PER_NODE_SETTING.get(settings); + clusterSettings.addSettingsUpdateConsumer(CLUSTER_TOTAL_SHARDS_PER_NODE_SETTING, this::setClusterShardLimit); + } + + private void setClusterShardLimit(int clusterShardLimit) { + this.clusterShardLimit = clusterShardLimit; } @Override diff --git a/core/src/main/java/org/elasticsearch/cluster/routing/allocation/decider/SnapshotInProgressAllocationDecider.java b/core/src/main/java/org/elasticsearch/cluster/routing/allocation/decider/SnapshotInProgressAllocationDecider.java index 37b9f9f461..597f0add8d 100644 --- a/core/src/main/java/org/elasticsearch/cluster/routing/allocation/decider/SnapshotInProgressAllocationDecider.java +++ b/core/src/main/java/org/elasticsearch/cluster/routing/allocation/decider/SnapshotInProgressAllocationDecider.java @@ -23,9 +23,10 @@ import org.elasticsearch.cluster.SnapshotsInProgress; import org.elasticsearch.cluster.routing.RoutingNode; import org.elasticsearch.cluster.routing.ShardRouting; import org.elasticsearch.cluster.routing.allocation.RoutingAllocation; +import org.elasticsearch.common.settings.ClusterSettings; import org.elasticsearch.common.inject.Inject; +import org.elasticsearch.common.settings.Setting; import org.elasticsearch.common.settings.Settings; -import org.elasticsearch.node.settings.NodeSettingsService; /** * This {@link org.elasticsearch.cluster.routing.allocation.decider.AllocationDecider} prevents shards that @@ -38,18 +39,7 @@ public class SnapshotInProgressAllocationDecider extends AllocationDecider { /** * Disables relocation of shards that are currently being snapshotted. */ - public static final String CLUSTER_ROUTING_ALLOCATION_SNAPSHOT_RELOCATION_ENABLED = "cluster.routing.allocation.snapshot.relocation_enabled"; - - class ApplySettings implements NodeSettingsService.Listener { - @Override - public void onRefreshSettings(Settings settings) { - boolean newEnableRelocation = settings.getAsBoolean(CLUSTER_ROUTING_ALLOCATION_SNAPSHOT_RELOCATION_ENABLED, enableRelocation); - if (newEnableRelocation != enableRelocation) { - logger.info("updating [{}] from [{}], to [{}]", CLUSTER_ROUTING_ALLOCATION_SNAPSHOT_RELOCATION_ENABLED, enableRelocation, newEnableRelocation); - enableRelocation = newEnableRelocation; - } - } - } + public static final Setting<Boolean> CLUSTER_ROUTING_ALLOCATION_SNAPSHOT_RELOCATION_ENABLED_SETTING = Setting.boolSetting("cluster.routing.allocation.snapshot.relocation_enabled", false, true, Setting.Scope.CLUSTER); private volatile boolean enableRelocation = false; @@ -66,14 +56,18 @@ public class SnapshotInProgressAllocationDecider extends AllocationDecider { * @param settings {@link org.elasticsearch.common.settings.Settings} to use */ public SnapshotInProgressAllocationDecider(Settings settings) { - this(settings, new NodeSettingsService(settings)); + this(settings, new ClusterSettings(settings, ClusterSettings.BUILT_IN_CLUSTER_SETTINGS)); } @Inject - public SnapshotInProgressAllocationDecider(Settings settings, NodeSettingsService nodeSettingsService) { + public SnapshotInProgressAllocationDecider(Settings settings, ClusterSettings clusterSettings) { super(settings); - enableRelocation = settings.getAsBoolean(CLUSTER_ROUTING_ALLOCATION_SNAPSHOT_RELOCATION_ENABLED, enableRelocation); - nodeSettingsService.addListener(new ApplySettings()); + enableRelocation = CLUSTER_ROUTING_ALLOCATION_SNAPSHOT_RELOCATION_ENABLED_SETTING.get(settings); + clusterSettings.addSettingsUpdateConsumer(CLUSTER_ROUTING_ALLOCATION_SNAPSHOT_RELOCATION_ENABLED_SETTING, this::setEnableRelocation); + } + + private void setEnableRelocation(boolean enableRelocation) { + this.enableRelocation = enableRelocation; } /** diff --git a/core/src/main/java/org/elasticsearch/cluster/routing/allocation/decider/ThrottlingAllocationDecider.java b/core/src/main/java/org/elasticsearch/cluster/routing/allocation/decider/ThrottlingAllocationDecider.java index ed6814d83a..b97e613867 100644 --- a/core/src/main/java/org/elasticsearch/cluster/routing/allocation/decider/ThrottlingAllocationDecider.java +++ b/core/src/main/java/org/elasticsearch/cluster/routing/allocation/decider/ThrottlingAllocationDecider.java @@ -19,13 +19,14 @@ package org.elasticsearch.cluster.routing.allocation.decider; +import org.elasticsearch.ElasticsearchParseException; import org.elasticsearch.cluster.routing.RoutingNode; import org.elasticsearch.cluster.routing.ShardRouting; -import org.elasticsearch.cluster.routing.ShardRoutingState; import org.elasticsearch.cluster.routing.allocation.RoutingAllocation; import org.elasticsearch.common.inject.Inject; +import org.elasticsearch.common.settings.ClusterSettings; +import org.elasticsearch.common.settings.Setting; import org.elasticsearch.common.settings.Settings; -import org.elasticsearch.node.settings.NodeSettingsService; /** * {@link ThrottlingAllocationDecider} controls the recovery process per node in @@ -47,27 +48,33 @@ import org.elasticsearch.node.settings.NodeSettingsService; */ public class ThrottlingAllocationDecider extends AllocationDecider { + public static final int DEFAULT_CLUSTER_ROUTING_ALLOCATION_NODE_CONCURRENT_RECOVERIES = 2; + public static final int DEFAULT_CLUSTER_ROUTING_ALLOCATION_NODE_INITIAL_PRIMARIES_RECOVERIES = 4; public static final String NAME = "throttling"; - - public static final String CLUSTER_ROUTING_ALLOCATION_NODE_INITIAL_PRIMARIES_RECOVERIES = "cluster.routing.allocation.node_initial_primaries_recoveries"; - public static final String CLUSTER_ROUTING_ALLOCATION_NODE_CONCURRENT_RECOVERIES = "cluster.routing.allocation.node_concurrent_recoveries"; public static final String CLUSTER_ROUTING_ALLOCATION_CONCURRENT_RECOVERIES = "cluster.routing.allocation.concurrent_recoveries"; - public static final int DEFAULT_CLUSTER_ROUTING_ALLOCATION_NODE_CONCURRENT_RECOVERIES = 2; - public static final int DEFAULT_CLUSTER_ROUTING_ALLOCATION_NODE_INITIAL_PRIMARIES_RECOVERIES = 4; + public static final Setting<Integer> CLUSTER_ROUTING_ALLOCATION_NODE_INITIAL_PRIMARIES_RECOVERIES_SETTING = Setting.intSetting("cluster.routing.allocation.node_initial_primaries_recoveries", DEFAULT_CLUSTER_ROUTING_ALLOCATION_NODE_INITIAL_PRIMARIES_RECOVERIES, 0, true, Setting.Scope.CLUSTER); + public static final Setting<Integer> CLUSTER_ROUTING_ALLOCATION_NODE_CONCURRENT_RECOVERIES_SETTING = new Setting<>("cluster.routing.allocation.node_concurrent_recoveries", (s) -> s.get(CLUSTER_ROUTING_ALLOCATION_CONCURRENT_RECOVERIES,Integer.toString(DEFAULT_CLUSTER_ROUTING_ALLOCATION_NODE_CONCURRENT_RECOVERIES)), (s) -> Setting.parseInt(s, 0, "cluster.routing.allocation.node_concurrent_recoveries"), true, Setting.Scope.CLUSTER); private volatile int primariesInitialRecoveries; private volatile int concurrentRecoveries; @Inject - public ThrottlingAllocationDecider(Settings settings, NodeSettingsService nodeSettingsService) { + public ThrottlingAllocationDecider(Settings settings, ClusterSettings clusterSettings) { super(settings); - - this.primariesInitialRecoveries = settings.getAsInt(CLUSTER_ROUTING_ALLOCATION_NODE_INITIAL_PRIMARIES_RECOVERIES, DEFAULT_CLUSTER_ROUTING_ALLOCATION_NODE_INITIAL_PRIMARIES_RECOVERIES); - this.concurrentRecoveries = settings.getAsInt(CLUSTER_ROUTING_ALLOCATION_CONCURRENT_RECOVERIES, settings.getAsInt(CLUSTER_ROUTING_ALLOCATION_NODE_CONCURRENT_RECOVERIES, DEFAULT_CLUSTER_ROUTING_ALLOCATION_NODE_CONCURRENT_RECOVERIES)); + this.primariesInitialRecoveries = CLUSTER_ROUTING_ALLOCATION_NODE_INITIAL_PRIMARIES_RECOVERIES_SETTING.get(settings); + this.concurrentRecoveries = CLUSTER_ROUTING_ALLOCATION_NODE_CONCURRENT_RECOVERIES_SETTING.get(settings); logger.debug("using node_concurrent_recoveries [{}], node_initial_primaries_recoveries [{}]", concurrentRecoveries, primariesInitialRecoveries); + clusterSettings.addSettingsUpdateConsumer(CLUSTER_ROUTING_ALLOCATION_NODE_INITIAL_PRIMARIES_RECOVERIES_SETTING, this::setPrimariesInitialRecoveries); + clusterSettings.addSettingsUpdateConsumer(CLUSTER_ROUTING_ALLOCATION_NODE_CONCURRENT_RECOVERIES_SETTING, this::setConcurrentRecoveries); + } - nodeSettingsService.addListener(new ApplySettings()); + private void setConcurrentRecoveries(int concurrentRecoveries) { + this.concurrentRecoveries = concurrentRecoveries; + } + + private void setPrimariesInitialRecoveries(int primariesInitialRecoveries) { + this.primariesInitialRecoveries = primariesInitialRecoveries; } @Override @@ -115,21 +122,4 @@ public class ThrottlingAllocationDecider extends AllocationDecider { return allocation.decision(Decision.YES, NAME, "below shard recovery limit of [%d]", concurrentRecoveries); } } - - class ApplySettings implements NodeSettingsService.Listener { - @Override - public void onRefreshSettings(Settings settings) { - int primariesInitialRecoveries = settings.getAsInt(CLUSTER_ROUTING_ALLOCATION_NODE_INITIAL_PRIMARIES_RECOVERIES, ThrottlingAllocationDecider.this.primariesInitialRecoveries); - if (primariesInitialRecoveries != ThrottlingAllocationDecider.this.primariesInitialRecoveries) { - logger.info("updating [cluster.routing.allocation.node_initial_primaries_recoveries] from [{}] to [{}]", ThrottlingAllocationDecider.this.primariesInitialRecoveries, primariesInitialRecoveries); - ThrottlingAllocationDecider.this.primariesInitialRecoveries = primariesInitialRecoveries; - } - - int concurrentRecoveries = settings.getAsInt(CLUSTER_ROUTING_ALLOCATION_NODE_CONCURRENT_RECOVERIES, ThrottlingAllocationDecider.this.concurrentRecoveries); - if (concurrentRecoveries != ThrottlingAllocationDecider.this.concurrentRecoveries) { - logger.info("updating [cluster.routing.allocation.node_concurrent_recoveries] from [{}] to [{}]", ThrottlingAllocationDecider.this.concurrentRecoveries, concurrentRecoveries); - ThrottlingAllocationDecider.this.concurrentRecoveries = concurrentRecoveries; - } - } - } } diff --git a/core/src/main/java/org/elasticsearch/cluster/service/InternalClusterService.java b/core/src/main/java/org/elasticsearch/cluster/service/InternalClusterService.java index d4b1586184..5fc013b663 100644 --- a/core/src/main/java/org/elasticsearch/cluster/service/InternalClusterService.java +++ b/core/src/main/java/org/elasticsearch/cluster/service/InternalClusterService.java @@ -20,8 +20,19 @@ package org.elasticsearch.cluster.service; import org.elasticsearch.Version; -import org.elasticsearch.cluster.*; +import org.elasticsearch.cluster.AckedClusterStateTaskListener; +import org.elasticsearch.cluster.ClusterChangedEvent; +import org.elasticsearch.cluster.ClusterName; +import org.elasticsearch.cluster.ClusterService; +import org.elasticsearch.cluster.ClusterState; import org.elasticsearch.cluster.ClusterState.Builder; +import org.elasticsearch.cluster.ClusterStateListener; +import org.elasticsearch.cluster.ClusterStateTaskConfig; +import org.elasticsearch.cluster.ClusterStateTaskExecutor; +import org.elasticsearch.cluster.ClusterStateTaskListener; +import org.elasticsearch.cluster.ClusterStateUpdateTask; +import org.elasticsearch.cluster.LocalNodeMasterListener; +import org.elasticsearch.cluster.TimeoutClusterStateListener; import org.elasticsearch.cluster.block.ClusterBlock; import org.elasticsearch.cluster.block.ClusterBlocks; import org.elasticsearch.cluster.metadata.MetaData; @@ -38,20 +49,39 @@ import org.elasticsearch.common.component.AbstractLifecycleComponent; import org.elasticsearch.common.inject.Inject; import org.elasticsearch.common.logging.ESLogger; import org.elasticsearch.common.logging.Loggers; +import org.elasticsearch.common.settings.ClusterSettings; +import org.elasticsearch.common.settings.Setting; import org.elasticsearch.common.settings.Settings; -import org.elasticsearch.common.text.StringText; +import org.elasticsearch.common.text.Text; import org.elasticsearch.common.transport.TransportAddress; import org.elasticsearch.common.unit.TimeValue; -import org.elasticsearch.common.util.concurrent.*; +import org.elasticsearch.common.util.concurrent.ConcurrentCollections; +import org.elasticsearch.common.util.concurrent.CountDown; +import org.elasticsearch.common.util.concurrent.EsExecutors; +import org.elasticsearch.common.util.concurrent.EsRejectedExecutionException; +import org.elasticsearch.common.util.concurrent.FutureUtils; +import org.elasticsearch.common.util.concurrent.PrioritizedEsThreadPoolExecutor; +import org.elasticsearch.common.util.concurrent.PrioritizedRunnable; import org.elasticsearch.common.util.iterable.Iterables; import org.elasticsearch.discovery.Discovery; import org.elasticsearch.discovery.DiscoveryService; -import org.elasticsearch.node.settings.NodeSettingsService; import org.elasticsearch.threadpool.ThreadPool; import org.elasticsearch.transport.TransportService; -import java.util.*; -import java.util.concurrent.*; +import java.util.ArrayList; +import java.util.Collection; +import java.util.HashMap; +import java.util.Iterator; +import java.util.List; +import java.util.Locale; +import java.util.Map; +import java.util.Queue; +import java.util.concurrent.ConcurrentMap; +import java.util.concurrent.CopyOnWriteArrayList; +import java.util.concurrent.Executor; +import java.util.concurrent.Future; +import java.util.concurrent.ScheduledFuture; +import java.util.concurrent.TimeUnit; import java.util.concurrent.atomic.AtomicBoolean; import java.util.stream.Collectors; @@ -62,8 +92,8 @@ import static org.elasticsearch.common.util.concurrent.EsExecutors.daemonThreadF */ public class InternalClusterService extends AbstractLifecycleComponent<ClusterService> implements ClusterService { - public static final String SETTING_CLUSTER_SERVICE_SLOW_TASK_LOGGING_THRESHOLD = "cluster.service.slow_task_logging_threshold"; - public static final String SETTING_CLUSTER_SERVICE_RECONNECT_INTERVAL = "cluster.service.reconnect_interval"; + public static final Setting<TimeValue> CLUSTER_SERVICE_SLOW_TASK_LOGGING_THRESHOLD_SETTING = Setting.positiveTimeSetting("cluster.service.slow_task_logging_threshold", TimeValue.timeValueSeconds(30), true, Setting.Scope.CLUSTER); + public static final Setting<TimeValue> CLUSTER_SERVICE_RECONNECT_INTERVAL_SETTING = Setting.positiveTimeSetting("cluster.service.reconnect_interval", TimeValue.timeValueSeconds(10), false, Setting.Scope.CLUSTER); public static final String UPDATE_THREAD_NAME = "clusterService#updateTask"; private final ThreadPool threadPool; @@ -74,7 +104,7 @@ public class InternalClusterService extends AbstractLifecycleComponent<ClusterSe private final TransportService transportService; - private final NodeSettingsService nodeSettingsService; + private final ClusterSettings clusterSettings; private final DiscoveryNodeService discoveryNodeService; private final Version version; @@ -107,33 +137,32 @@ public class InternalClusterService extends AbstractLifecycleComponent<ClusterSe @Inject public InternalClusterService(Settings settings, DiscoveryService discoveryService, OperationRouting operationRouting, TransportService transportService, - NodeSettingsService nodeSettingsService, ThreadPool threadPool, ClusterName clusterName, DiscoveryNodeService discoveryNodeService, Version version) { + ClusterSettings clusterSettings, ThreadPool threadPool, ClusterName clusterName, DiscoveryNodeService discoveryNodeService, Version version) { super(settings); this.operationRouting = operationRouting; this.transportService = transportService; this.discoveryService = discoveryService; this.threadPool = threadPool; - this.nodeSettingsService = nodeSettingsService; + this.clusterSettings = clusterSettings; this.discoveryNodeService = discoveryNodeService; this.version = version; // will be replaced on doStart. this.clusterState = ClusterState.builder(clusterName).build(); - this.nodeSettingsService.setClusterService(this); - this.nodeSettingsService.addListener(new ApplySettings()); + this.clusterSettings.addSettingsUpdateConsumer(CLUSTER_SERVICE_SLOW_TASK_LOGGING_THRESHOLD_SETTING, this::setSlowTaskLoggingThreshold); - this.reconnectInterval = this.settings.getAsTime(SETTING_CLUSTER_SERVICE_RECONNECT_INTERVAL, TimeValue.timeValueSeconds(10)); + this.reconnectInterval = CLUSTER_SERVICE_RECONNECT_INTERVAL_SETTING.get(settings); - this.slowTaskLoggingThreshold = this.settings.getAsTime(SETTING_CLUSTER_SERVICE_SLOW_TASK_LOGGING_THRESHOLD, TimeValue.timeValueSeconds(30)); + this.slowTaskLoggingThreshold = CLUSTER_SERVICE_SLOW_TASK_LOGGING_THRESHOLD_SETTING.get(settings); localNodeMasterListeners = new LocalNodeMasterListeners(threadPool); initialBlocks = ClusterBlocks.builder().addGlobalBlock(discoveryService.getNoMasterBlock()); } - public NodeSettingsService settingsService() { - return this.nodeSettingsService; + private void setSlowTaskLoggingThreshold(TimeValue slowTaskLoggingThreshold) { + this.slowTaskLoggingThreshold = slowTaskLoggingThreshold; } @Override @@ -292,6 +321,7 @@ public class InternalClusterService extends AbstractLifecycleComponent<ClusterSe if (config.timeout() != null) { updateTasksExecutor.execute(updateTask, threadPool.scheduler(), config.timeout(), () -> threadPool.generic().execute(() -> { if (updateTask.processed.getAndSet(true) == false) { + logger.debug("cluster state update task [{}] timed out after [{}]", source, config.timeout()); listener.onFailure(source, new ProcessClusterEventTimeoutException(config.timeout(), source)); }})); } else { @@ -327,7 +357,7 @@ public class InternalClusterService extends AbstractLifecycleComponent<ClusterSe timeInQueue = 0; } - pendingClusterTasks.add(new PendingClusterTask(pending.insertionOrder, pending.priority, new StringText(source), timeInQueue, pending.executing)); + pendingClusterTasks.add(new PendingClusterTask(pending.insertionOrder, pending.priority, new Text(source), timeInQueue, pending.executing)); } return pendingClusterTasks; } @@ -413,6 +443,15 @@ public class InternalClusterService extends AbstractLifecycleComponent<ClusterSe } assert batchResult.executionResults != null; + assert batchResult.executionResults.size() == toExecute.size() + : String.format(Locale.ROOT, "expected [%d] task result%s but was [%d]", toExecute.size(), toExecute.size() == 1 ? "" : "s", batchResult.executionResults.size()); + boolean assertsEnabled = false; + assert (assertsEnabled = true); + if (assertsEnabled) { + for (UpdateTask<T> updateTask : toExecute) { + assert batchResult.executionResults.containsKey(updateTask.task) : "missing task result for [" + updateTask.task + "]"; + } + } ClusterState newClusterState = batchResult.resultingState; final ArrayList<UpdateTask<T>> proccessedListeners = new ArrayList<>(); @@ -421,7 +460,13 @@ public class InternalClusterService extends AbstractLifecycleComponent<ClusterSe assert batchResult.executionResults.containsKey(updateTask.task) : "missing " + updateTask.task.toString(); final ClusterStateTaskExecutor.TaskResult executionResult = batchResult.executionResults.get(updateTask.task); - executionResult.handle(() -> proccessedListeners.add(updateTask), ex -> updateTask.listener.onFailure(updateTask.source, ex)); + executionResult.handle( + () -> proccessedListeners.add(updateTask), + ex -> { + logger.debug("cluster state update task [{}] failed", ex, updateTask.source); + updateTask.listener.onFailure(updateTask.source, ex); + } + ); } if (previousClusterState == newClusterState) { @@ -521,6 +566,15 @@ public class InternalClusterService extends AbstractLifecycleComponent<ClusterSe // update the current cluster state clusterState = newClusterState; logger.debug("set local cluster state to version {}", newClusterState.version()); + try { + // nothing to do until we actually recover from the gateway or any other block indicates we need to disable persistency + if (clusterChangedEvent.state().blocks().disableStatePersistence() == false && clusterChangedEvent.metaDataChanged()) { + final Settings incomingSettings = clusterChangedEvent.state().metaData().settings(); + clusterSettings.applySettings(incomingSettings); + } + } catch (Exception ex) { + logger.warn("failed to apply cluster settings", ex); + } for (ClusterStateListener listener : preAppliedListeners) { try { listener.clusterChanged(clusterChangedEvent); @@ -560,6 +614,8 @@ public class InternalClusterService extends AbstractLifecycleComponent<ClusterSe task.listener.clusterStateProcessed(task.source, previousClusterState, newClusterState); } + executor.clusterStatePublished(newClusterState); + TimeValue executionTime = TimeValue.timeValueMillis(Math.max(0, TimeValue.nsecToMSec(System.nanoTime() - startTimeNS))); logger.debug("processing [{}]: took {} done applying updated cluster_state (version: {}, uuid: {})", source, executionTime, newClusterState.version(), newClusterState.stateUUID()); warnAboutSlowTaskIfNeeded(executionTime, source); @@ -846,12 +902,4 @@ public class InternalClusterService extends AbstractLifecycleComponent<ClusterSe } } } - - class ApplySettings implements NodeSettingsService.Listener { - @Override - public void onRefreshSettings(Settings settings) { - final TimeValue slowTaskLoggingThreshold = settings.getAsTime(SETTING_CLUSTER_SERVICE_SLOW_TASK_LOGGING_THRESHOLD, InternalClusterService.this.slowTaskLoggingThreshold); - InternalClusterService.this.slowTaskLoggingThreshold = slowTaskLoggingThreshold; - } - } } diff --git a/core/src/main/java/org/elasticsearch/common/Booleans.java b/core/src/main/java/org/elasticsearch/common/Booleans.java index 6b1b9b016a..9c5f574663 100644 --- a/core/src/main/java/org/elasticsearch/common/Booleans.java +++ b/core/src/main/java/org/elasticsearch/common/Booleans.java @@ -84,7 +84,6 @@ public class Booleans { * throws exception if string cannot be parsed to boolean */ public static Boolean parseBooleanExact(String value) { - boolean isFalse = isExplicitFalse(value); if (isFalse) { return false; @@ -94,7 +93,7 @@ public class Booleans { return true; } - throw new IllegalArgumentException("value cannot be parsed to boolean [ true/1/on/yes OR false/0/off/no ] "); + throw new IllegalArgumentException("Failed to parse value [" + value + "] cannot be parsed to boolean [ true/1/on/yes OR false/0/off/no ]"); } public static Boolean parseBoolean(String value, Boolean defaultValue) { diff --git a/core/src/main/java/org/elasticsearch/common/Randomness.java b/core/src/main/java/org/elasticsearch/common/Randomness.java new file mode 100644 index 0000000000..dbfa8034b9 --- /dev/null +++ b/core/src/main/java/org/elasticsearch/common/Randomness.java @@ -0,0 +1,120 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.common; + +import org.elasticsearch.common.settings.Settings; + +import java.lang.reflect.Method; +import java.util.Collections; +import java.util.List; +import java.util.Random; +import java.util.concurrent.ThreadLocalRandom; + +/** + * Provides factory methods for producing reproducible sources of + * randomness. Reproducible sources of randomness contribute to + * reproducible tests. When running the Elasticsearch test suite, the + * test runner will establish a global random seed accessible via the + * system property "tests.seed". By seeding a random number generator + * with this global seed, we ensure that instances of Random produced + * with this class produce reproducible sources of randomness under + * when running under the Elasticsearch test suite. Alternatively, + * a reproducible source of randomness can be produced by providing a + * setting a reproducible seed. When running the Elasticsearch server + * process, non-reproducible sources of randomness are provided (unless + * a setting is provided for a module that exposes a seed setting (e.g., + * DiscoveryService#SETTING_DISCOVERY_SEED)). + */ +public final class Randomness { + private static final Method currentMethod; + private static final Method getRandomMethod; + + static { + Method maybeCurrentMethod; + Method maybeGetRandomMethod; + try { + Class<?> clazz = Class.forName("com.carrotsearch.randomizedtesting.RandomizedContext"); + maybeCurrentMethod = clazz.getMethod("current"); + maybeGetRandomMethod = clazz.getMethod("getRandom"); + } catch (Throwable t) { + maybeCurrentMethod = null; + maybeGetRandomMethod = null; + } + currentMethod = maybeCurrentMethod; + getRandomMethod = maybeGetRandomMethod; + } + + private Randomness() {} + + /** + * Provides a reproducible source of randomness seeded by a long + * seed in the settings with the key setting. + * + * @param settings the settings containing the seed + * @param setting the key to access the seed + * @return a reproducible source of randomness + */ + public static Random get(Settings settings, String setting) { + Long maybeSeed = settings.getAsLong(setting, null); + if (maybeSeed != null) { + return new Random(maybeSeed); + } else { + return get(); + } + } + + /** + * Provides a source of randomness that is reproducible when + * running under the Elasticsearch test suite, and otherwise + * produces a non-reproducible source of randomness. Reproducible + * sources of randomness are created when the system property + * "tests.seed" is set and the security policy allows reading this + * system property. Otherwise, non-reproducible sources of + * randomness are created. + * + * @return a source of randomness + * @throws IllegalStateException if running tests but was not able + * to acquire an instance of Random from + * RandomizedContext or tests are + * running but tests.seed is not set + */ + public static Random get() { + if (currentMethod != null && getRandomMethod != null) { + try { + Object randomizedContext = currentMethod.invoke(null); + return (Random) getRandomMethod.invoke(randomizedContext); + } catch (ReflectiveOperationException e) { + // unexpected, bail + throw new IllegalStateException("running tests but failed to invoke RandomizedContext#getRandom", e); + } + } else { + return getWithoutSeed(); + } + } + + private static Random getWithoutSeed() { + assert currentMethod == null && getRandomMethod == null : "running under tests but tried to create non-reproducible random"; + return ThreadLocalRandom.current(); + } + + public static void shuffle(List<?> list) { + Collections.shuffle(list, get()); + } +} diff --git a/core/src/main/java/org/elasticsearch/common/geo/builders/EnvelopeBuilder.java b/core/src/main/java/org/elasticsearch/common/geo/builders/EnvelopeBuilder.java index 62f29d2bad..afcf899051 100644 --- a/core/src/main/java/org/elasticsearch/common/geo/builders/EnvelopeBuilder.java +++ b/core/src/main/java/org/elasticsearch/common/geo/builders/EnvelopeBuilder.java @@ -25,26 +25,17 @@ import com.vividsolutions.jts.geom.Coordinate; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.common.xcontent.XContentBuilder; - import java.io.IOException; -import java.util.Locale; import java.util.Objects; public class EnvelopeBuilder extends ShapeBuilder { public static final GeoShapeType TYPE = GeoShapeType.ENVELOPE; - public static final EnvelopeBuilder PROTOTYPE = new EnvelopeBuilder(); - - protected Coordinate topLeft; - protected Coordinate bottomRight; - public EnvelopeBuilder() { - this(Orientation.RIGHT); - } + public static final EnvelopeBuilder PROTOTYPE = new EnvelopeBuilder(); - public EnvelopeBuilder(Orientation orientation) { - super(orientation); - } + private Coordinate topLeft; + private Coordinate bottomRight; public EnvelopeBuilder topLeft(Coordinate topLeft) { this.topLeft = topLeft; @@ -55,6 +46,10 @@ public class EnvelopeBuilder extends ShapeBuilder { return topLeft(coordinate(longitude, latitude)); } + public Coordinate topLeft() { + return this.topLeft; + } + public EnvelopeBuilder bottomRight(Coordinate bottomRight) { this.bottomRight = bottomRight; return this; @@ -64,11 +59,14 @@ public class EnvelopeBuilder extends ShapeBuilder { return bottomRight(coordinate(longitude, latitude)); } + public Coordinate bottomRight() { + return this.bottomRight; + } + @Override public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { builder.startObject(); builder.field(FIELD_TYPE, TYPE.shapeName()); - builder.field(FIELD_ORIENTATION, orientation.name().toLowerCase(Locale.ROOT)); builder.startArray(FIELD_COORDINATES); toXContent(builder, topLeft); toXContent(builder, bottomRight); @@ -88,7 +86,7 @@ public class EnvelopeBuilder extends ShapeBuilder { @Override public int hashCode() { - return Objects.hash(orientation, topLeft, bottomRight); + return Objects.hash(topLeft, bottomRight); } @Override @@ -100,22 +98,19 @@ public class EnvelopeBuilder extends ShapeBuilder { return false; } EnvelopeBuilder other = (EnvelopeBuilder) obj; - return Objects.equals(orientation, other.orientation) && - Objects.equals(topLeft, other.topLeft) && + return Objects.equals(topLeft, other.topLeft) && Objects.equals(bottomRight, other.bottomRight); } @Override public void writeTo(StreamOutput out) throws IOException { - out.writeBoolean(orientation == Orientation.RIGHT); writeCoordinateTo(topLeft, out); writeCoordinateTo(bottomRight, out); } @Override public EnvelopeBuilder readFrom(StreamInput in) throws IOException { - Orientation orientation = in.readBoolean() ? Orientation.RIGHT : Orientation.LEFT; - return new EnvelopeBuilder(orientation) + return new EnvelopeBuilder() .topLeft(readCoordinateFrom(in)) .bottomRight(readCoordinateFrom(in)); } diff --git a/core/src/main/java/org/elasticsearch/common/geo/builders/GeometryCollectionBuilder.java b/core/src/main/java/org/elasticsearch/common/geo/builders/GeometryCollectionBuilder.java index 45397ed962..067cd014c0 100644 --- a/core/src/main/java/org/elasticsearch/common/geo/builders/GeometryCollectionBuilder.java +++ b/core/src/main/java/org/elasticsearch/common/geo/builders/GeometryCollectionBuilder.java @@ -20,27 +20,25 @@ package org.elasticsearch.common.geo.builders; import com.spatial4j.core.shape.Shape; + import org.elasticsearch.common.geo.XShapeCollection; +import org.elasticsearch.common.io.stream.StreamInput; +import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.ElasticsearchException; import org.elasticsearch.common.xcontent.XContentBuilder; import java.io.IOException; import java.util.ArrayList; import java.util.List; +import java.util.Objects; public class GeometryCollectionBuilder extends ShapeBuilder { public static final GeoShapeType TYPE = GeoShapeType.GEOMETRYCOLLECTION; - protected final ArrayList<ShapeBuilder> shapes = new ArrayList<>(); - - public GeometryCollectionBuilder() { - this(Orientation.RIGHT); - } + public static final GeometryCollectionBuilder PROTOTYPE = new GeometryCollectionBuilder(); - public GeometryCollectionBuilder(Orientation orientation) { - super(orientation); - } + protected final ArrayList<ShapeBuilder> shapes = new ArrayList<>(); public GeometryCollectionBuilder shape(ShapeBuilder shape) { this.shapes.add(shape); @@ -132,4 +130,39 @@ public class GeometryCollectionBuilder extends ShapeBuilder { //note: ShapeCollection is probably faster than a Multi* geom. } + @Override + public int hashCode() { + return Objects.hash(shapes); + } + + @Override + public boolean equals(Object obj) { + if (this == obj) { + return true; + } + if (obj == null || getClass() != obj.getClass()) { + return false; + } + GeometryCollectionBuilder other = (GeometryCollectionBuilder) obj; + return Objects.equals(shapes, other.shapes); + } + + @Override + public void writeTo(StreamOutput out) throws IOException { + out.writeVInt(shapes.size()); + for (ShapeBuilder shape : shapes) { + out.writeShape(shape); + } + } + + @Override + public GeometryCollectionBuilder readFrom(StreamInput in) throws IOException { + GeometryCollectionBuilder geometryCollectionBuilder = new GeometryCollectionBuilder(); + int shapes = in.readVInt(); + for (int i = 0; i < shapes; i++) { + geometryCollectionBuilder.shape(in.readShape()); + } + return geometryCollectionBuilder; + } + } diff --git a/core/src/main/java/org/elasticsearch/common/geo/builders/LineStringBuilder.java b/core/src/main/java/org/elasticsearch/common/geo/builders/LineStringBuilder.java index c7ba9b72f5..464d72c8d8 100644 --- a/core/src/main/java/org/elasticsearch/common/geo/builders/LineStringBuilder.java +++ b/core/src/main/java/org/elasticsearch/common/geo/builders/LineStringBuilder.java @@ -22,8 +22,12 @@ package org.elasticsearch.common.geo.builders; import java.io.IOException; import java.util.ArrayList; import java.util.Arrays; +import java.util.Objects; +import org.elasticsearch.common.io.stream.StreamInput; +import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.common.xcontent.XContentBuilder; + import com.spatial4j.core.shape.Shape; import com.vividsolutions.jts.geom.Coordinate; import com.vividsolutions.jts.geom.Geometry; @@ -34,6 +38,8 @@ public class LineStringBuilder extends PointCollection<LineStringBuilder> { public static final GeoShapeType TYPE = GeoShapeType.LINESTRING; + public static final LineStringBuilder PROTOTYPE = new LineStringBuilder(); + @Override public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { builder.startObject(); @@ -139,4 +145,39 @@ public class LineStringBuilder extends PointCollection<LineStringBuilder> { } return coordinates; } + + @Override + public int hashCode() { + return Objects.hash(points); + } + + @Override + public boolean equals(Object obj) { + if (this == obj) { + return true; + } + if (obj == null || getClass() != obj.getClass()) { + return false; + } + LineStringBuilder other = (LineStringBuilder) obj; + return Objects.equals(points, other.points); + } + + @Override + public void writeTo(StreamOutput out) throws IOException { + out.writeVInt(points.size()); + for (Coordinate point : points) { + writeCoordinateTo(point, out); + } + } + + @Override + public LineStringBuilder readFrom(StreamInput in) throws IOException { + LineStringBuilder lineStringBuilder = new LineStringBuilder(); + int size = in.readVInt(); + for (int i=0; i < size; i++) { + lineStringBuilder.point(readCoordinateFrom(in)); + } + return lineStringBuilder; + } } diff --git a/core/src/main/java/org/elasticsearch/common/geo/builders/MultiLineStringBuilder.java b/core/src/main/java/org/elasticsearch/common/geo/builders/MultiLineStringBuilder.java index a004b90a2d..4703ac19b0 100644 --- a/core/src/main/java/org/elasticsearch/common/geo/builders/MultiLineStringBuilder.java +++ b/core/src/main/java/org/elasticsearch/common/geo/builders/MultiLineStringBuilder.java @@ -19,6 +19,8 @@ package org.elasticsearch.common.geo.builders; +import org.elasticsearch.common.io.stream.StreamInput; +import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.common.xcontent.XContentBuilder; import com.spatial4j.core.shape.Shape; @@ -29,11 +31,14 @@ import com.vividsolutions.jts.geom.LineString; import java.io.IOException; import java.util.ArrayList; import java.util.Iterator; +import java.util.Objects; public class MultiLineStringBuilder extends ShapeBuilder { public static final GeoShapeType TYPE = GeoShapeType.MULTILINESTRING; + public static final MultiLineStringBuilder PROTOTYPE = new MultiLineStringBuilder(); + private final ArrayList<LineStringBuilder> lines = new ArrayList<>(); public MultiLineStringBuilder linestring(LineStringBuilder line) { @@ -41,6 +46,10 @@ public class MultiLineStringBuilder extends ShapeBuilder { return this; } + public MultiLineStringBuilder linestring(Coordinate[] coordinates) { + return this.linestring(new LineStringBuilder().points(coordinates)); + } + public Coordinate[][] coordinates() { Coordinate[][] result = new Coordinate[lines.size()][]; for (int i = 0; i < result.length; i++) { @@ -92,4 +101,39 @@ public class MultiLineStringBuilder extends ShapeBuilder { } return jtsGeometry(geometry); } + + @Override + public int hashCode() { + return Objects.hash(lines); + } + + @Override + public boolean equals(Object obj) { + if (this == obj) { + return true; + } + if (obj == null || getClass() != obj.getClass()) { + return false; + } + MultiLineStringBuilder other = (MultiLineStringBuilder) obj; + return Objects.equals(lines, other.lines); + } + + @Override + public void writeTo(StreamOutput out) throws IOException { + out.writeVInt(lines.size()); + for (LineStringBuilder line : lines) { + line.writeTo(out); + } + } + + @Override + public MultiLineStringBuilder readFrom(StreamInput in) throws IOException { + MultiLineStringBuilder multiLineStringBuilder = new MultiLineStringBuilder(); + int size = in.readVInt(); + for (int i = 0; i < size; i++) { + multiLineStringBuilder.linestring(LineStringBuilder.PROTOTYPE.readFrom(in)); + } + return multiLineStringBuilder; + } } diff --git a/core/src/main/java/org/elasticsearch/common/geo/builders/MultiPointBuilder.java b/core/src/main/java/org/elasticsearch/common/geo/builders/MultiPointBuilder.java index 8d5cfabdab..a4d236e355 100644 --- a/core/src/main/java/org/elasticsearch/common/geo/builders/MultiPointBuilder.java +++ b/core/src/main/java/org/elasticsearch/common/geo/builders/MultiPointBuilder.java @@ -22,18 +22,22 @@ package org.elasticsearch.common.geo.builders; import com.spatial4j.core.shape.Point; import com.spatial4j.core.shape.Shape; import com.vividsolutions.jts.geom.Coordinate; + import org.elasticsearch.common.geo.XShapeCollection; +import org.elasticsearch.common.io.stream.StreamInput; +import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.common.xcontent.XContentBuilder; - import java.io.IOException; import java.util.ArrayList; import java.util.List; +import java.util.Objects; public class MultiPointBuilder extends PointCollection<MultiPointBuilder> { - public static final GeoShapeType TYPE = GeoShapeType.MULTIPOINT; + public final static MultiPointBuilder PROTOTYPE = new MultiPointBuilder(); + @Override public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { builder.startObject(); @@ -52,7 +56,7 @@ public class MultiPointBuilder extends PointCollection<MultiPointBuilder> { for (Coordinate coord : points) { shapes.add(SPATIAL_CONTEXT.makePoint(coord.x, coord.y)); } - XShapeCollection multiPoints = new XShapeCollection<>(shapes, SPATIAL_CONTEXT); + XShapeCollection<Point> multiPoints = new XShapeCollection<>(shapes, SPATIAL_CONTEXT); multiPoints.setPointsOnly(true); return multiPoints; } @@ -61,4 +65,39 @@ public class MultiPointBuilder extends PointCollection<MultiPointBuilder> { public GeoShapeType type() { return TYPE; } + + @Override + public int hashCode() { + return Objects.hash(points); + } + + @Override + public boolean equals(Object obj) { + if (this == obj) { + return true; + } + if (obj == null || getClass() != obj.getClass()) { + return false; + } + MultiPointBuilder other = (MultiPointBuilder) obj; + return Objects.equals(points, other.points); + } + + @Override + public void writeTo(StreamOutput out) throws IOException { + out.writeVInt(points.size()); + for (Coordinate point : points) { + writeCoordinateTo(point, out); + } + } + + @Override + public MultiPointBuilder readFrom(StreamInput in) throws IOException { + MultiPointBuilder multiPointBuilder = new MultiPointBuilder(); + int size = in.readVInt(); + for (int i=0; i < size; i++) { + multiPointBuilder.point(readCoordinateFrom(in)); + } + return multiPointBuilder; + } } diff --git a/core/src/main/java/org/elasticsearch/common/geo/builders/MultiPolygonBuilder.java b/core/src/main/java/org/elasticsearch/common/geo/builders/MultiPolygonBuilder.java index e7762e51b6..2f9d595c9c 100644 --- a/core/src/main/java/org/elasticsearch/common/geo/builders/MultiPolygonBuilder.java +++ b/core/src/main/java/org/elasticsearch/common/geo/builders/MultiPolygonBuilder.java @@ -22,8 +22,12 @@ package org.elasticsearch.common.geo.builders; import java.io.IOException; import java.util.ArrayList; import java.util.List; +import java.util.Locale; +import java.util.Objects; import org.elasticsearch.common.geo.XShapeCollection; +import org.elasticsearch.common.io.stream.StreamInput; +import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.common.xcontent.XContentBuilder; import com.spatial4j.core.shape.Shape; @@ -32,26 +36,50 @@ import com.vividsolutions.jts.geom.Coordinate; public class MultiPolygonBuilder extends ShapeBuilder { public static final GeoShapeType TYPE = GeoShapeType.MULTIPOLYGON; + public static final MultiPolygonBuilder PROTOTYPE = new MultiPolygonBuilder(); - protected final ArrayList<PolygonBuilder> polygons = new ArrayList<>(); + private final ArrayList<PolygonBuilder> polygons = new ArrayList<>(); + + private Orientation orientation = Orientation.RIGHT; public MultiPolygonBuilder() { this(Orientation.RIGHT); } public MultiPolygonBuilder(Orientation orientation) { - super(orientation); + this.orientation = orientation; + } + + public Orientation orientation() { + return this.orientation; } + /** + * Add a shallow copy of the polygon to the multipolygon. This will apply the orientation of the + * {@link MultiPolygonBuilder} to the polygon if polygon has different orientation. + */ public MultiPolygonBuilder polygon(PolygonBuilder polygon) { - this.polygons.add(polygon); + PolygonBuilder pb = new PolygonBuilder(this.orientation); + pb.points(polygon.shell().coordinates(false)); + for (LineStringBuilder hole : polygon.holes()) { + pb.hole(hole); + } + this.polygons.add(pb); return this; } + /** + * get the list of polygons + */ + public ArrayList<PolygonBuilder> polygons() { + return polygons; + } + @Override public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { builder.startObject(); builder.field(FIELD_TYPE, TYPE.shapeName()); + builder.field(FIELD_ORIENTATION, orientation.name().toLowerCase(Locale.ROOT)); builder.startArray(FIELD_COORDINATES); for(PolygonBuilder polygon : polygons) { builder.startArray(); @@ -89,4 +117,41 @@ public class MultiPolygonBuilder extends ShapeBuilder { return new XShapeCollection<>(shapes, SPATIAL_CONTEXT); //note: ShapeCollection is probably faster than a Multi* geom. } + + @Override + public int hashCode() { + return Objects.hash(polygons, orientation); + } + + @Override + public boolean equals(Object obj) { + if (this == obj) { + return true; + } + if (obj == null || getClass() != obj.getClass()) { + return false; + } + MultiPolygonBuilder other = (MultiPolygonBuilder) obj; + return Objects.equals(polygons, other.polygons) && + Objects.equals(orientation, other.orientation); + } + + @Override + public void writeTo(StreamOutput out) throws IOException { + orientation.writeTo(out); + out.writeVInt(polygons.size()); + for (PolygonBuilder polygon : polygons) { + polygon.writeTo(out); + } + } + + @Override + public MultiPolygonBuilder readFrom(StreamInput in) throws IOException { + MultiPolygonBuilder polyBuilder = new MultiPolygonBuilder(Orientation.readFrom(in)); + int holes = in.readVInt(); + for (int i = 0; i < holes; i++) { + polyBuilder.polygon(PolygonBuilder.PROTOTYPE.readFrom(in)); + } + return polyBuilder; + } } diff --git a/core/src/main/java/org/elasticsearch/common/geo/builders/PointBuilder.java b/core/src/main/java/org/elasticsearch/common/geo/builders/PointBuilder.java index d6d62c28b8..3522546165 100644 --- a/core/src/main/java/org/elasticsearch/common/geo/builders/PointBuilder.java +++ b/core/src/main/java/org/elasticsearch/common/geo/builders/PointBuilder.java @@ -32,7 +32,6 @@ import com.vividsolutions.jts.geom.Coordinate; public class PointBuilder extends ShapeBuilder { public static final GeoShapeType TYPE = GeoShapeType.POINT; - public static final PointBuilder PROTOTYPE = new PointBuilder(); private Coordinate coordinate; diff --git a/core/src/main/java/org/elasticsearch/common/geo/builders/PolygonBuilder.java b/core/src/main/java/org/elasticsearch/common/geo/builders/PolygonBuilder.java index 04540df27e..03ff6a6b89 100644 --- a/core/src/main/java/org/elasticsearch/common/geo/builders/PolygonBuilder.java +++ b/core/src/main/java/org/elasticsearch/common/geo/builders/PolygonBuilder.java @@ -29,6 +29,8 @@ import com.vividsolutions.jts.geom.MultiPolygon; import com.vividsolutions.jts.geom.Polygon; import org.elasticsearch.common.collect.Tuple; +import org.elasticsearch.common.io.stream.StreamInput; +import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.common.util.set.Sets; import org.elasticsearch.common.xcontent.XContentBuilder; @@ -39,6 +41,9 @@ import java.util.HashMap; import java.util.HashSet; import java.util.Iterator; import java.util.concurrent.atomic.AtomicBoolean; +import java.util.List; +import java.util.Locale; +import java.util.Objects; /** * The {@link PolygonBuilder} implements the groundwork to create polygons. This contains @@ -48,6 +53,11 @@ import java.util.concurrent.atomic.AtomicBoolean; public class PolygonBuilder extends ShapeBuilder { public static final GeoShapeType TYPE = GeoShapeType.POLYGON; + public static final PolygonBuilder PROTOTYPE = new PolygonBuilder(); + + private static final Coordinate[][] EMPTY = new Coordinate[0][]; + + private Orientation orientation = Orientation.RIGHT; // line string defining the shell of the polygon private LineStringBuilder shell; @@ -56,7 +66,7 @@ public class PolygonBuilder extends ShapeBuilder { private final ArrayList<LineStringBuilder> holes = new ArrayList<>(); public PolygonBuilder() { - this(new ArrayList<Coordinate>(), Orientation.RIGHT); + this(Orientation.RIGHT); } public PolygonBuilder(Orientation orientation) { @@ -64,10 +74,14 @@ public class PolygonBuilder extends ShapeBuilder { } public PolygonBuilder(ArrayList<Coordinate> points, Orientation orientation) { - super(orientation); + this.orientation = orientation; this.shell = new LineStringBuilder().points(points); } + public Orientation orientation() { + return this.orientation; + } + public PolygonBuilder point(double longitude, double latitude) { shell.point(longitude, latitude); return this; @@ -104,6 +118,20 @@ public class PolygonBuilder extends ShapeBuilder { } /** + * @return the list of holes defined for this polygon + */ + public List<LineStringBuilder> holes() { + return this.holes; + } + + /** + * @return the list of points of the shell for this polygon + */ + public LineStringBuilder shell() { + return this.shell; + } + + /** * Close the shell of the polygon */ public PolygonBuilder close() { @@ -175,6 +203,7 @@ public class PolygonBuilder extends ShapeBuilder { public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { builder.startObject(); builder.field(FIELD_TYPE, TYPE.shapeName()); + builder.field(FIELD_ORIENTATION, orientation.name().toLowerCase(Locale.ROOT)); builder.startArray(FIELD_COORDINATES); coordinatesArray(builder, params); builder.endArray(); @@ -357,8 +386,6 @@ public class PolygonBuilder extends ShapeBuilder { return result; } - private static final Coordinate[][] EMPTY = new Coordinate[0][]; - private static Coordinate[][] holes(Edge[] holes, int numHoles) { if (numHoles == 0) { return EMPTY; @@ -663,4 +690,44 @@ public class PolygonBuilder extends ShapeBuilder { } } } + + @Override + public int hashCode() { + return Objects.hash(shell, holes, orientation); + } + + @Override + public boolean equals(Object obj) { + if (this == obj) { + return true; + } + if (obj == null || getClass() != obj.getClass()) { + return false; + } + PolygonBuilder other = (PolygonBuilder) obj; + return Objects.equals(shell, other.shell) && + Objects.equals(holes, other.holes) && + Objects.equals(orientation, other.orientation); + } + + @Override + public void writeTo(StreamOutput out) throws IOException { + orientation.writeTo(out); + shell.writeTo(out); + out.writeVInt(holes.size()); + for (LineStringBuilder hole : holes) { + hole.writeTo(out); + } + } + + @Override + public PolygonBuilder readFrom(StreamInput in) throws IOException { + PolygonBuilder polyBuilder = new PolygonBuilder(Orientation.readFrom(in)); + polyBuilder.shell = LineStringBuilder.PROTOTYPE.readFrom(in); + int holes = in.readVInt(); + for (int i = 0; i < holes; i++) { + polyBuilder.hole(LineStringBuilder.PROTOTYPE.readFrom(in)); + } + return polyBuilder; + } } diff --git a/core/src/main/java/org/elasticsearch/common/geo/builders/ShapeBuilder.java b/core/src/main/java/org/elasticsearch/common/geo/builders/ShapeBuilder.java index d8689ee737..fcd8177ac6 100644 --- a/core/src/main/java/org/elasticsearch/common/geo/builders/ShapeBuilder.java +++ b/core/src/main/java/org/elasticsearch/common/geo/builders/ShapeBuilder.java @@ -77,16 +77,10 @@ public abstract class ShapeBuilder extends ToXContentToBytes implements NamedWri /** @see com.spatial4j.core.shape.jts.JtsGeometry#index() */ protected final boolean autoIndexJtsGeometry = true;//may want to turn off once SpatialStrategy impls do it. - protected Orientation orientation = Orientation.RIGHT; - protected ShapeBuilder() { } - protected ShapeBuilder(Orientation orientation) { - this.orientation = orientation; - } - protected static Coordinate coordinate(double longitude, double latitude) { return new Coordinate(longitude, latitude); } @@ -186,22 +180,6 @@ public abstract class ShapeBuilder extends ToXContentToBytes implements NamedWri return new Coordinate(in.readDouble(), in.readDouble()); } - public static Orientation orientationFromString(String orientation) { - orientation = orientation.toLowerCase(Locale.ROOT); - switch (orientation) { - case "right": - case "counterclockwise": - case "ccw": - return Orientation.RIGHT; - case "left": - case "clockwise": - case "cw": - return Orientation.LEFT; - default: - throw new IllegalArgumentException("Unknown orientation [" + orientation + "]"); - } - } - protected static Coordinate shift(Coordinate coordinate, double dateline) { if (dateline == 0) { return coordinate; @@ -408,6 +386,30 @@ public abstract class ShapeBuilder extends ToXContentToBytes implements NamedWri public static final Orientation COUNTER_CLOCKWISE = Orientation.RIGHT; public static final Orientation CW = Orientation.LEFT; public static final Orientation CCW = Orientation.RIGHT; + + public void writeTo (StreamOutput out) throws IOException { + out.writeBoolean(this == Orientation.RIGHT); + } + + public static Orientation readFrom (StreamInput in) throws IOException { + return in.readBoolean() ? Orientation.RIGHT : Orientation.LEFT; + } + + public static Orientation fromString(String orientation) { + orientation = orientation.toLowerCase(Locale.ROOT); + switch (orientation) { + case "right": + case "counterclockwise": + case "ccw": + return Orientation.RIGHT; + case "left": + case "clockwise": + case "cw": + return Orientation.LEFT; + default: + throw new IllegalArgumentException("Unknown orientation [" + orientation + "]"); + } + } } public static final String FIELD_TYPE = "type"; @@ -498,7 +500,7 @@ public abstract class ShapeBuilder extends ToXContentToBytes implements NamedWri radius = Distance.parseDistance(parser.text()); } else if (FIELD_ORIENTATION.equals(fieldName)) { parser.nextToken(); - requestedOrientation = orientationFromString(parser.text()); + requestedOrientation = Orientation.fromString(parser.text()); } else { parser.nextToken(); parser.skipChildren(); @@ -524,7 +526,7 @@ public abstract class ShapeBuilder extends ToXContentToBytes implements NamedWri case POLYGON: return parsePolygon(node, requestedOrientation, coerce); case MULTIPOLYGON: return parseMultiPolygon(node, requestedOrientation, coerce); case CIRCLE: return parseCircle(node, radius); - case ENVELOPE: return parseEnvelope(node, requestedOrientation); + case ENVELOPE: return parseEnvelope(node); case GEOMETRYCOLLECTION: return geometryCollections; default: throw new ElasticsearchParseException("shape type [{}] not included", shapeType); @@ -550,7 +552,7 @@ public abstract class ShapeBuilder extends ToXContentToBytes implements NamedWri return ShapeBuilders.newCircleBuilder().center(coordinates.coordinate).radius(radius); } - protected static EnvelopeBuilder parseEnvelope(CoordinateNode coordinates, final Orientation orientation) { + protected static EnvelopeBuilder parseEnvelope(CoordinateNode coordinates) { // validate the coordinate array for envelope type if (coordinates.children.size() != 2) { throw new ElasticsearchParseException("invalid number of points [{}] provided for " + @@ -564,7 +566,7 @@ public abstract class ShapeBuilder extends ToXContentToBytes implements NamedWri uL = new Coordinate(Math.min(uL.x, lR.x), Math.max(uL.y, lR.y)); lR = new Coordinate(Math.max(uLtmp.x, lR.x), Math.min(uLtmp.y, lR.y)); } - return ShapeBuilders.newEnvelope(orientation).topLeft(uL).bottomRight(lR); + return ShapeBuilders.newEnvelope().topLeft(uL).bottomRight(lR); } protected static void validateMultiPointNode(CoordinateNode coordinates) { @@ -684,8 +686,7 @@ public abstract class ShapeBuilder extends ToXContentToBytes implements NamedWri } XContentParser.Token token = parser.nextToken(); - GeometryCollectionBuilder geometryCollection = ShapeBuilders.newGeometryCollection( (mapper == null) ? Orientation.RIGHT : mapper - .fieldType().orientation()); + GeometryCollectionBuilder geometryCollection = ShapeBuilders.newGeometryCollection(); while (token != XContentParser.Token.END_ARRAY) { ShapeBuilder shapeBuilder = GeoShapeType.parse(parser); geometryCollection.shape(shapeBuilder); @@ -700,15 +701,4 @@ public abstract class ShapeBuilder extends ToXContentToBytes implements NamedWri public String getWriteableName() { return type().shapeName(); } - - // NORELEASE this should be deleted as soon as all shape builders implement writable - @Override - public void writeTo(StreamOutput out) throws IOException { - } - - // NORELEASE this should be deleted as soon as all shape builders implement writable - @Override - public ShapeBuilder readFrom(StreamInput in) throws IOException { - return null; - } } diff --git a/core/src/main/java/org/elasticsearch/common/geo/builders/ShapeBuilderRegistry.java b/core/src/main/java/org/elasticsearch/common/geo/builders/ShapeBuilderRegistry.java new file mode 100644 index 0000000000..c66e969aa3 --- /dev/null +++ b/core/src/main/java/org/elasticsearch/common/geo/builders/ShapeBuilderRegistry.java @@ -0,0 +1,45 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.common.geo.builders; + +import org.elasticsearch.common.geo.ShapesAvailability; +import org.elasticsearch.common.inject.Inject; +import org.elasticsearch.common.io.stream.NamedWriteableRegistry; + +/** + * Register the shape builder prototypes with the {@link NamedWriteableRegistry} + */ +public class ShapeBuilderRegistry { + + @Inject + public ShapeBuilderRegistry(NamedWriteableRegistry namedWriteableRegistry) { + if (ShapesAvailability.JTS_AVAILABLE && ShapesAvailability.SPATIAL4J_AVAILABLE) { + namedWriteableRegistry.registerPrototype(ShapeBuilder.class, PointBuilder.PROTOTYPE); + namedWriteableRegistry.registerPrototype(ShapeBuilder.class, CircleBuilder.PROTOTYPE); + namedWriteableRegistry.registerPrototype(ShapeBuilder.class, EnvelopeBuilder.PROTOTYPE); + namedWriteableRegistry.registerPrototype(ShapeBuilder.class, MultiPointBuilder.PROTOTYPE); + namedWriteableRegistry.registerPrototype(ShapeBuilder.class, LineStringBuilder.PROTOTYPE); + namedWriteableRegistry.registerPrototype(ShapeBuilder.class, MultiLineStringBuilder.PROTOTYPE); + namedWriteableRegistry.registerPrototype(ShapeBuilder.class, PolygonBuilder.PROTOTYPE); + namedWriteableRegistry.registerPrototype(ShapeBuilder.class, MultiPolygonBuilder.PROTOTYPE); + namedWriteableRegistry.registerPrototype(ShapeBuilder.class, GeometryCollectionBuilder.PROTOTYPE); + } + } +} diff --git a/core/src/main/java/org/elasticsearch/common/geo/builders/ShapeBuilders.java b/core/src/main/java/org/elasticsearch/common/geo/builders/ShapeBuilders.java index e294a9d6ef..61d7a9cd07 100644 --- a/core/src/main/java/org/elasticsearch/common/geo/builders/ShapeBuilders.java +++ b/core/src/main/java/org/elasticsearch/common/geo/builders/ShapeBuilders.java @@ -111,15 +111,6 @@ public class ShapeBuilders { } /** - * Create a new GeometryCollection - * - * @return a new {@link GeometryCollectionBuilder} - */ - public static GeometryCollectionBuilder newGeometryCollection(ShapeBuilder.Orientation orientation) { - return new GeometryCollectionBuilder(orientation); - } - - /** * create a new Circle * * @return a new {@link CircleBuilder} @@ -136,13 +127,4 @@ public class ShapeBuilders { public static EnvelopeBuilder newEnvelope() { return new EnvelopeBuilder(); } - - /** - * create a new rectangle - * - * @return a new {@link EnvelopeBuilder} - */ - public static EnvelopeBuilder newEnvelope(ShapeBuilder.Orientation orientation) { - return new EnvelopeBuilder(orientation); - } } diff --git a/core/src/main/java/org/elasticsearch/common/io/stream/StreamInput.java b/core/src/main/java/org/elasticsearch/common/io/stream/StreamInput.java index 20859e2716..ffcb4201f4 100644 --- a/core/src/main/java/org/elasticsearch/common/io/stream/StreamInput.java +++ b/core/src/main/java/org/elasticsearch/common/io/stream/StreamInput.java @@ -33,7 +33,7 @@ import org.elasticsearch.common.Strings; import org.elasticsearch.common.bytes.BytesArray; import org.elasticsearch.common.bytes.BytesReference; import org.elasticsearch.common.geo.GeoPoint; -import org.elasticsearch.common.text.StringAndBytesText; +import org.elasticsearch.common.geo.builders.ShapeBuilder; import org.elasticsearch.common.text.Text; import org.elasticsearch.index.query.QueryBuilder; import org.elasticsearch.index.query.functionscore.ScoreFunctionBuilder; @@ -256,13 +256,13 @@ public abstract class StreamInput extends InputStream { if (length == -1) { return null; } - return new StringAndBytesText(readBytesReference(length)); + return new Text(readBytesReference(length)); } public Text readText() throws IOException { // use StringAndBytes so we can cache the string if its ever converted to it int length = readInt(); - return new StringAndBytesText(readBytesReference(length)); + return new Text(readBytesReference(length)); } @Nullable @@ -630,6 +630,13 @@ public abstract class StreamInput extends InputStream { } /** + * Reads a {@link ShapeBuilder} from the current stream + */ + public ShapeBuilder readShape() throws IOException { + return readNamedWriteable(ShapeBuilder.class); + } + + /** * Reads a {@link org.elasticsearch.index.query.functionscore.ScoreFunctionBuilder} from the current stream */ public ScoreFunctionBuilder<?> readScoreFunction() throws IOException { diff --git a/core/src/main/java/org/elasticsearch/common/io/stream/StreamOutput.java b/core/src/main/java/org/elasticsearch/common/io/stream/StreamOutput.java index 5f1e7623d2..e8997b8073 100644 --- a/core/src/main/java/org/elasticsearch/common/io/stream/StreamOutput.java +++ b/core/src/main/java/org/elasticsearch/common/io/stream/StreamOutput.java @@ -32,6 +32,7 @@ import org.elasticsearch.Version; import org.elasticsearch.common.Nullable; import org.elasticsearch.common.bytes.BytesReference; import org.elasticsearch.common.geo.GeoPoint; +import org.elasticsearch.common.geo.builders.ShapeBuilder; import org.elasticsearch.common.text.Text; import org.elasticsearch.index.query.QueryBuilder; import org.elasticsearch.index.query.functionscore.ScoreFunctionBuilder; @@ -619,6 +620,13 @@ public abstract class StreamOutput extends OutputStream { } /** + * Writes a {@link ShapeBuilder} to the current stream + */ + public void writeShape(ShapeBuilder shapeBuilder) throws IOException { + writeNamedWriteable(shapeBuilder); + } + + /** * Writes a {@link ScoreFunctionBuilder} to the current stream */ public void writeScoreFunction(ScoreFunctionBuilder<?> scoreFunctionBuilder) throws IOException { diff --git a/core/src/main/java/org/elasticsearch/common/lucene/all/AllTermQuery.java b/core/src/main/java/org/elasticsearch/common/lucene/all/AllTermQuery.java index 7191c96e33..4fe90aed9e 100644 --- a/core/src/main/java/org/elasticsearch/common/lucene/all/AllTermQuery.java +++ b/core/src/main/java/org/elasticsearch/common/lucene/all/AllTermQuery.java @@ -149,6 +149,10 @@ public final class AllTermQuery extends Query { return null; } final TermState state = termStates.get(context.ord); + if (state == null) { + // Term does not exist in this segment + return null; + } termsEnum.seekExact(term.bytes(), state); PostingsEnum docs = termsEnum.postings(null, PostingsEnum.PAYLOADS); assert docs != null; diff --git a/core/src/main/java/org/elasticsearch/common/network/NetworkModule.java b/core/src/main/java/org/elasticsearch/common/network/NetworkModule.java index c1f282ac23..f7eab3da2a 100644 --- a/core/src/main/java/org/elasticsearch/common/network/NetworkModule.java +++ b/core/src/main/java/org/elasticsearch/common/network/NetworkModule.java @@ -19,21 +19,362 @@ package org.elasticsearch.common.network; +import org.elasticsearch.client.support.Headers; +import org.elasticsearch.client.transport.TransportClientNodesService; +import org.elasticsearch.client.transport.support.TransportProxyClient; +import org.elasticsearch.cluster.node.DiscoveryNode; import org.elasticsearch.common.inject.AbstractModule; +import org.elasticsearch.common.io.stream.NamedWriteableRegistry; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.common.util.ExtensionPoint; +import org.elasticsearch.http.HttpServer; +import org.elasticsearch.http.HttpServerTransport; +import org.elasticsearch.http.netty.NettyHttpServerTransport; +import org.elasticsearch.rest.RestController; +import org.elasticsearch.rest.RestHandler; +import org.elasticsearch.rest.action.admin.cluster.health.RestClusterHealthAction; +import org.elasticsearch.rest.action.admin.cluster.node.hotthreads.RestNodesHotThreadsAction; +import org.elasticsearch.rest.action.admin.cluster.node.info.RestNodesInfoAction; +import org.elasticsearch.rest.action.admin.cluster.node.stats.RestNodesStatsAction; +import org.elasticsearch.rest.action.admin.cluster.repositories.delete.RestDeleteRepositoryAction; +import org.elasticsearch.rest.action.admin.cluster.repositories.get.RestGetRepositoriesAction; +import org.elasticsearch.rest.action.admin.cluster.repositories.put.RestPutRepositoryAction; +import org.elasticsearch.rest.action.admin.cluster.repositories.verify.RestVerifyRepositoryAction; +import org.elasticsearch.rest.action.admin.cluster.reroute.RestClusterRerouteAction; +import org.elasticsearch.rest.action.admin.cluster.settings.RestClusterGetSettingsAction; +import org.elasticsearch.rest.action.admin.cluster.settings.RestClusterUpdateSettingsAction; +import org.elasticsearch.rest.action.admin.cluster.shards.RestClusterSearchShardsAction; +import org.elasticsearch.rest.action.admin.cluster.snapshots.create.RestCreateSnapshotAction; +import org.elasticsearch.rest.action.admin.cluster.snapshots.delete.RestDeleteSnapshotAction; +import org.elasticsearch.rest.action.admin.cluster.snapshots.get.RestGetSnapshotsAction; +import org.elasticsearch.rest.action.admin.cluster.snapshots.restore.RestRestoreSnapshotAction; +import org.elasticsearch.rest.action.admin.cluster.snapshots.status.RestSnapshotsStatusAction; +import org.elasticsearch.rest.action.admin.cluster.state.RestClusterStateAction; +import org.elasticsearch.rest.action.admin.cluster.stats.RestClusterStatsAction; +import org.elasticsearch.rest.action.admin.cluster.tasks.RestPendingClusterTasksAction; +import org.elasticsearch.rest.action.admin.indices.alias.RestIndicesAliasesAction; +import org.elasticsearch.rest.action.admin.indices.alias.delete.RestIndexDeleteAliasesAction; +import org.elasticsearch.rest.action.admin.indices.alias.get.RestGetAliasesAction; +import org.elasticsearch.rest.action.admin.indices.alias.get.RestGetIndicesAliasesAction; +import org.elasticsearch.rest.action.admin.indices.alias.head.RestAliasesExistAction; +import org.elasticsearch.rest.action.admin.indices.alias.put.RestIndexPutAliasAction; +import org.elasticsearch.rest.action.admin.indices.analyze.RestAnalyzeAction; +import org.elasticsearch.rest.action.admin.indices.cache.clear.RestClearIndicesCacheAction; +import org.elasticsearch.rest.action.admin.indices.close.RestCloseIndexAction; +import org.elasticsearch.rest.action.admin.indices.create.RestCreateIndexAction; +import org.elasticsearch.rest.action.admin.indices.delete.RestDeleteIndexAction; +import org.elasticsearch.rest.action.admin.indices.exists.indices.RestIndicesExistsAction; +import org.elasticsearch.rest.action.admin.indices.exists.types.RestTypesExistsAction; +import org.elasticsearch.rest.action.admin.indices.flush.RestFlushAction; +import org.elasticsearch.rest.action.admin.indices.flush.RestSyncedFlushAction; +import org.elasticsearch.rest.action.admin.indices.forcemerge.RestForceMergeAction; +import org.elasticsearch.rest.action.admin.indices.get.RestGetIndicesAction; +import org.elasticsearch.rest.action.admin.indices.mapping.get.RestGetFieldMappingAction; +import org.elasticsearch.rest.action.admin.indices.mapping.get.RestGetMappingAction; +import org.elasticsearch.rest.action.admin.indices.mapping.put.RestPutMappingAction; +import org.elasticsearch.rest.action.admin.indices.open.RestOpenIndexAction; +import org.elasticsearch.rest.action.admin.indices.recovery.RestRecoveryAction; +import org.elasticsearch.rest.action.admin.indices.refresh.RestRefreshAction; +import org.elasticsearch.rest.action.admin.indices.segments.RestIndicesSegmentsAction; +import org.elasticsearch.rest.action.admin.indices.settings.RestGetSettingsAction; +import org.elasticsearch.rest.action.admin.indices.settings.RestUpdateSettingsAction; +import org.elasticsearch.rest.action.admin.indices.shards.RestIndicesShardStoresAction; +import org.elasticsearch.rest.action.admin.indices.stats.RestIndicesStatsAction; +import org.elasticsearch.rest.action.admin.indices.template.delete.RestDeleteIndexTemplateAction; +import org.elasticsearch.rest.action.admin.indices.template.get.RestGetIndexTemplateAction; +import org.elasticsearch.rest.action.admin.indices.template.head.RestHeadIndexTemplateAction; +import org.elasticsearch.rest.action.admin.indices.template.put.RestPutIndexTemplateAction; +import org.elasticsearch.rest.action.admin.indices.upgrade.RestUpgradeAction; +import org.elasticsearch.rest.action.admin.indices.validate.query.RestValidateQueryAction; +import org.elasticsearch.rest.action.admin.indices.validate.template.RestRenderSearchTemplateAction; +import org.elasticsearch.rest.action.admin.indices.warmer.delete.RestDeleteWarmerAction; +import org.elasticsearch.rest.action.admin.indices.warmer.get.RestGetWarmerAction; +import org.elasticsearch.rest.action.admin.indices.warmer.put.RestPutWarmerAction; +import org.elasticsearch.rest.action.bulk.RestBulkAction; +import org.elasticsearch.rest.action.cat.AbstractCatAction; +import org.elasticsearch.rest.action.cat.RestAliasAction; +import org.elasticsearch.rest.action.cat.RestAllocationAction; +import org.elasticsearch.rest.action.cat.RestCatAction; +import org.elasticsearch.rest.action.cat.RestFielddataAction; +import org.elasticsearch.rest.action.cat.RestHealthAction; +import org.elasticsearch.rest.action.cat.RestIndicesAction; +import org.elasticsearch.rest.action.cat.RestMasterAction; +import org.elasticsearch.rest.action.cat.RestNodeAttrsAction; +import org.elasticsearch.rest.action.cat.RestNodesAction; +import org.elasticsearch.rest.action.cat.RestPluginsAction; +import org.elasticsearch.rest.action.cat.RestRepositoriesAction; +import org.elasticsearch.rest.action.cat.RestSegmentsAction; +import org.elasticsearch.rest.action.cat.RestShardsAction; +import org.elasticsearch.rest.action.cat.RestSnapshotAction; +import org.elasticsearch.rest.action.cat.RestThreadPoolAction; +import org.elasticsearch.rest.action.delete.RestDeleteAction; +import org.elasticsearch.rest.action.explain.RestExplainAction; +import org.elasticsearch.rest.action.fieldstats.RestFieldStatsAction; +import org.elasticsearch.rest.action.get.RestGetAction; +import org.elasticsearch.rest.action.get.RestGetSourceAction; +import org.elasticsearch.rest.action.get.RestHeadAction; +import org.elasticsearch.rest.action.get.RestMultiGetAction; +import org.elasticsearch.rest.action.index.RestIndexAction; +import org.elasticsearch.rest.action.main.RestMainAction; +import org.elasticsearch.rest.action.percolate.RestMultiPercolateAction; +import org.elasticsearch.rest.action.percolate.RestPercolateAction; +import org.elasticsearch.rest.action.script.RestDeleteIndexedScriptAction; +import org.elasticsearch.rest.action.script.RestGetIndexedScriptAction; +import org.elasticsearch.rest.action.script.RestPutIndexedScriptAction; +import org.elasticsearch.rest.action.search.RestClearScrollAction; +import org.elasticsearch.rest.action.search.RestMultiSearchAction; +import org.elasticsearch.rest.action.search.RestSearchAction; +import org.elasticsearch.rest.action.search.RestSearchScrollAction; +import org.elasticsearch.rest.action.suggest.RestSuggestAction; +import org.elasticsearch.rest.action.template.RestDeleteSearchTemplateAction; +import org.elasticsearch.rest.action.template.RestGetSearchTemplateAction; +import org.elasticsearch.rest.action.template.RestPutSearchTemplateAction; +import org.elasticsearch.rest.action.termvectors.RestMultiTermVectorsAction; +import org.elasticsearch.rest.action.termvectors.RestTermVectorsAction; +import org.elasticsearch.rest.action.update.RestUpdateAction; +import org.elasticsearch.transport.Transport; +import org.elasticsearch.transport.TransportService; +import org.elasticsearch.transport.local.LocalTransport; +import org.elasticsearch.transport.netty.NettyTransport; + +import java.util.Arrays; +import java.util.List; /** - * + * A module to handle registering and binding all network related classes. */ public class NetworkModule extends AbstractModule { + public static final String TRANSPORT_TYPE_KEY = "transport.type"; + public static final String TRANSPORT_SERVICE_TYPE_KEY = "transport.service.type"; + + public static final String LOCAL_TRANSPORT = "local"; + public static final String NETTY_TRANSPORT = "netty"; + + public static final String HTTP_TYPE_KEY = "http.type"; + public static final String HTTP_ENABLED = "http.enabled"; + + private static final List<Class<? extends RestHandler>> builtinRestHandlers = Arrays.asList( + RestMainAction.class, + + RestNodesInfoAction.class, + RestNodesStatsAction.class, + RestNodesHotThreadsAction.class, + RestClusterStatsAction.class, + RestClusterStateAction.class, + RestClusterHealthAction.class, + RestClusterUpdateSettingsAction.class, + RestClusterGetSettingsAction.class, + RestClusterRerouteAction.class, + RestClusterSearchShardsAction.class, + RestPendingClusterTasksAction.class, + RestPutRepositoryAction.class, + RestGetRepositoriesAction.class, + RestDeleteRepositoryAction.class, + RestVerifyRepositoryAction.class, + RestGetSnapshotsAction.class, + RestCreateSnapshotAction.class, + RestRestoreSnapshotAction.class, + RestDeleteSnapshotAction.class, + RestSnapshotsStatusAction.class, + + RestIndicesExistsAction.class, + RestTypesExistsAction.class, + RestGetIndicesAction.class, + RestIndicesStatsAction.class, + RestIndicesSegmentsAction.class, + RestIndicesShardStoresAction.class, + RestGetAliasesAction.class, + RestAliasesExistAction.class, + RestIndexDeleteAliasesAction.class, + RestIndexPutAliasAction.class, + RestIndicesAliasesAction.class, + RestGetIndicesAliasesAction.class, + RestCreateIndexAction.class, + RestDeleteIndexAction.class, + RestCloseIndexAction.class, + RestOpenIndexAction.class, + + RestUpdateSettingsAction.class, + RestGetSettingsAction.class, + + RestAnalyzeAction.class, + RestGetIndexTemplateAction.class, + RestPutIndexTemplateAction.class, + RestDeleteIndexTemplateAction.class, + RestHeadIndexTemplateAction.class, + + RestPutWarmerAction.class, + RestDeleteWarmerAction.class, + RestGetWarmerAction.class, + + RestPutMappingAction.class, + RestGetMappingAction.class, + RestGetFieldMappingAction.class, + + RestRefreshAction.class, + RestFlushAction.class, + RestSyncedFlushAction.class, + RestForceMergeAction.class, + RestUpgradeAction.class, + RestClearIndicesCacheAction.class, + + RestIndexAction.class, + RestGetAction.class, + RestGetSourceAction.class, + RestHeadAction.class, + RestMultiGetAction.class, + RestDeleteAction.class, + org.elasticsearch.rest.action.count.RestCountAction.class, + RestSuggestAction.class, + RestTermVectorsAction.class, + RestMultiTermVectorsAction.class, + RestBulkAction.class, + RestUpdateAction.class, + RestPercolateAction.class, + RestMultiPercolateAction.class, + + RestSearchAction.class, + RestSearchScrollAction.class, + RestClearScrollAction.class, + RestMultiSearchAction.class, + RestRenderSearchTemplateAction.class, + + RestValidateQueryAction.class, + + RestExplainAction.class, + + RestRecoveryAction.class, + + // Templates API + RestGetSearchTemplateAction.class, + RestPutSearchTemplateAction.class, + RestDeleteSearchTemplateAction.class, + + // Scripts API + RestGetIndexedScriptAction.class, + RestPutIndexedScriptAction.class, + RestDeleteIndexedScriptAction.class, + + RestFieldStatsAction.class, + + // no abstract cat action + RestCatAction.class + ); + + private static final List<Class<? extends AbstractCatAction>> builtinCatHandlers = Arrays.asList( + RestAllocationAction.class, + RestShardsAction.class, + RestMasterAction.class, + RestNodesAction.class, + RestIndicesAction.class, + RestSegmentsAction.class, + // Fully qualified to prevent interference with rest.action.count.RestCountAction + org.elasticsearch.rest.action.cat.RestCountAction.class, + // Fully qualified to prevent interference with rest.action.indices.RestRecoveryAction + org.elasticsearch.rest.action.cat.RestRecoveryAction.class, + RestHealthAction.class, + org.elasticsearch.rest.action.cat.RestPendingClusterTasksAction.class, + RestAliasAction.class, + RestThreadPoolAction.class, + RestPluginsAction.class, + RestFielddataAction.class, + RestNodeAttrsAction.class, + RestRepositoriesAction.class, + RestSnapshotAction.class + ); + private final NetworkService networkService; + private final Settings settings; + private final boolean transportClient; - public NetworkModule(NetworkService networkService) { + private final ExtensionPoint.SelectedType<TransportService> transportServiceTypes = new ExtensionPoint.SelectedType<>("transport_service", TransportService.class); + private final ExtensionPoint.SelectedType<Transport> transportTypes = new ExtensionPoint.SelectedType<>("transport", Transport.class); + private final ExtensionPoint.SelectedType<HttpServerTransport> httpTransportTypes = new ExtensionPoint.SelectedType<>("http_transport", HttpServerTransport.class); + private final ExtensionPoint.ClassSet<RestHandler> restHandlers = new ExtensionPoint.ClassSet<>("rest_handler", RestHandler.class); + // we must separate the cat rest handlers so RestCatAction can collect them... + private final ExtensionPoint.ClassSet<AbstractCatAction> catHandlers = new ExtensionPoint.ClassSet<>("cat_handler", AbstractCatAction.class); + + /** + * Creates a network module that custom networking classes can be plugged into. + * + * @param networkService A constructed network service object to bind. + * @param settings The settings for the node + * @param transportClient True if only transport classes should be allowed to be registered, false otherwise. + */ + public NetworkModule(NetworkService networkService, Settings settings, boolean transportClient) { this.networkService = networkService; + this.settings = settings; + this.transportClient = transportClient; + registerTransportService(NETTY_TRANSPORT, TransportService.class); + registerTransport(LOCAL_TRANSPORT, LocalTransport.class); + registerTransport(NETTY_TRANSPORT, NettyTransport.class); + + if (transportClient == false) { + registerHttpTransport(NETTY_TRANSPORT, NettyHttpServerTransport.class); + + for (Class<? extends AbstractCatAction> catAction : builtinCatHandlers) { + catHandlers.registerExtension(catAction); + } + for (Class<? extends RestHandler> restAction : builtinRestHandlers) { + restHandlers.registerExtension(restAction); + } + } + } + + /** Adds a transport service implementation that can be selected by setting {@link #TRANSPORT_SERVICE_TYPE_KEY}. */ + public void registerTransportService(String name, Class<? extends TransportService> clazz) { + transportServiceTypes.registerExtension(name, clazz); + } + + /** Adds a transport implementation that can be selected by setting {@link #TRANSPORT_TYPE_KEY}. */ + public void registerTransport(String name, Class<? extends Transport> clazz) { + transportTypes.registerExtension(name, clazz); + } + + /** Adds an http transport implementation that can be selected by setting {@link #HTTP_TYPE_KEY}. */ + // TODO: we need another name than "http transport"....so confusing with transportClient... + public void registerHttpTransport(String name, Class<? extends HttpServerTransport> clazz) { + if (transportClient) { + throw new IllegalArgumentException("Cannot register http transport " + clazz.getName() + " for transport client"); + } + httpTransportTypes.registerExtension(name, clazz); + } + + /** Adds an additional rest action. */ + // TODO: change this further to eliminate the middle man, ie RestController, and just register method and path here + public void registerRestHandler(Class<? extends RestHandler> clazz) { + if (transportClient) { + throw new IllegalArgumentException("Cannot register rest handler " + clazz.getName() + " for transport client"); + } + if (AbstractCatAction.class.isAssignableFrom(clazz)) { + catHandlers.registerExtension(clazz.asSubclass(AbstractCatAction.class)); + } else { + restHandlers.registerExtension(clazz); + } } @Override protected void configure() { bind(NetworkService.class).toInstance(networkService); + bind(NamedWriteableRegistry.class).asEagerSingleton(); + + transportServiceTypes.bindType(binder(), settings, TRANSPORT_SERVICE_TYPE_KEY, NETTY_TRANSPORT); + String defaultTransport = DiscoveryNode.localNode(settings) ? LOCAL_TRANSPORT : NETTY_TRANSPORT; + transportTypes.bindType(binder(), settings, TRANSPORT_TYPE_KEY, defaultTransport); + + if (transportClient) { + bind(Headers.class).asEagerSingleton(); + bind(TransportProxyClient.class).asEagerSingleton(); + bind(TransportClientNodesService.class).asEagerSingleton(); + } else { + if (settings.getAsBoolean(HTTP_ENABLED, true)) { + bind(HttpServer.class).asEagerSingleton(); + httpTransportTypes.bindType(binder(), settings, HTTP_TYPE_KEY, NETTY_TRANSPORT); + } + bind(RestController.class).asEagerSingleton(); + catHandlers.bind(binder()); + restHandlers.bind(binder()); + } } } diff --git a/core/src/main/java/org/elasticsearch/common/settings/AbstractScopedSettings.java b/core/src/main/java/org/elasticsearch/common/settings/AbstractScopedSettings.java new file mode 100644 index 0000000000..13743cabcf --- /dev/null +++ b/core/src/main/java/org/elasticsearch/common/settings/AbstractScopedSettings.java @@ -0,0 +1,252 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.common.settings; + +import org.elasticsearch.ExceptionsHelper; +import org.elasticsearch.common.component.AbstractComponent; + +import java.util.*; +import java.util.function.BiConsumer; +import java.util.function.Consumer; + +/** + * A basic setting service that can be used for per-index and per-cluster settings. + * This service offers transactional application of updates settings. + */ +public abstract class AbstractScopedSettings extends AbstractComponent { + private Settings lastSettingsApplied = Settings.EMPTY; + private final List<SettingUpdater> settingUpdaters = new ArrayList<>(); + private final Map<String, Setting<?>> complexMatchers = new HashMap<>(); + private final Map<String, Setting<?>> keySettings = new HashMap<>(); + private final Setting.Scope scope; + + protected AbstractScopedSettings(Settings settings, Set<Setting<?>> settingsSet, Setting.Scope scope) { + super(settings); + for (Setting<?> entry : settingsSet) { + if (entry.getScope() != scope) { + throw new IllegalArgumentException("Setting must be a cluster setting but was: " + entry.getScope()); + } + if (entry.hasComplexMatcher()) { + complexMatchers.put(entry.getKey(), entry); + } else { + keySettings.put(entry.getKey(), entry); + } + } + this.scope = scope; + } + + public Setting.Scope getScope() { + return this.scope; + } + + /** + * Applies the given settings to all listeners and rolls back the result after application. This + * method will not change any settings but will fail if any of the settings can't be applied. + */ + public synchronized Settings dryRun(Settings settings) { + final Settings current = Settings.builder().put(this.settings).put(settings).build(); + final Settings previous = Settings.builder().put(this.settings).put(this.lastSettingsApplied).build(); + List<RuntimeException> exceptions = new ArrayList<>(); + for (SettingUpdater settingUpdater : settingUpdaters) { + try { + if (settingUpdater.hasChanged(current, previous)) { + settingUpdater.getValue(current, previous); + } + } catch (RuntimeException ex) { + exceptions.add(ex); + logger.debug("failed to prepareCommit settings for [{}]", ex, settingUpdater); + } + } + // here we are exhaustive and record all settings that failed. + ExceptionsHelper.rethrowAndSuppress(exceptions); + return current; + } + + /** + * Applies the given settings to all the settings consumers or to none of them. The settings + * will be merged with the node settings before they are applied while given settings override existing node + * settings. + * @param newSettings the settings to apply + * @return the unmerged applied settings + */ + public synchronized Settings applySettings(Settings newSettings) { + if (lastSettingsApplied != null && newSettings.equals(lastSettingsApplied)) { + // nothing changed in the settings, ignore + return newSettings; + } + final Settings current = Settings.builder().put(this.settings).put(newSettings).build(); + final Settings previous = Settings.builder().put(this.settings).put(this.lastSettingsApplied).build(); + try { + List<Runnable> applyRunnables = new ArrayList<>(); + for (SettingUpdater settingUpdater : settingUpdaters) { + try { + applyRunnables.add(settingUpdater.updater(current, previous)); + } catch (Exception ex) { + logger.warn("failed to prepareCommit settings for [{}]", ex, settingUpdater); + throw ex; + } + } + for (Runnable settingUpdater : applyRunnables) { + settingUpdater.run(); + } + } catch (Exception ex) { + logger.warn("failed to apply settings", ex); + throw ex; + } finally { + } + return lastSettingsApplied = newSettings; + } + + /** + * Adds a settings consumer with a predicate that is only evaluated at update time. + * <p> + * Note: Only settings registered in {@link SettingsModule} can be changed dynamically. + * </p> + * @param validator an additional validator that is only applied to updates of this setting. + * This is useful to add additional validation to settings at runtime compared to at startup time. + */ + public synchronized <T> void addSettingsUpdateConsumer(Setting<T> setting, Consumer<T> consumer, Consumer<T> validator) { + if (setting != get(setting.getKey())) { + throw new IllegalArgumentException("Setting is not registered for key [" + setting.getKey() + "]"); + } + this.settingUpdaters.add(setting.newUpdater(consumer, logger, validator)); + } + + /** + * Adds a settings consumer that accepts the values for two settings. The consumer if only notified if one or both settings change. + * <p> + * Note: Only settings registered in {@link SettingsModule} can be changed dynamically. + * </p> + * This method registers a compound updater that is useful if two settings are depending on each other. The consumer is always provided + * with both values even if only one of the two changes. + */ + public synchronized <A, B> void addSettingsUpdateConsumer(Setting<A> a, Setting<B> b, BiConsumer<A, B> consumer) { + if (a != get(a.getKey())) { + throw new IllegalArgumentException("Setting is not registered for key [" + a.getKey() + "]"); + } + if (b != get(b.getKey())) { + throw new IllegalArgumentException("Setting is not registered for key [" + b.getKey() + "]"); + } + this.settingUpdaters.add(Setting.compoundUpdater(consumer, a, b, logger)); + } + + /** + * Adds a settings consumer. + * <p> + * Note: Only settings registered in {@link org.elasticsearch.cluster.ClusterModule} can be changed dynamically. + * </p> + */ + public synchronized <T> void addSettingsUpdateConsumer(Setting<T> setting, Consumer<T> consumer) { + addSettingsUpdateConsumer(setting, consumer, (s) -> {}); + } + + /** + * Transactional interface to update settings. + * @see Setting + */ + public interface SettingUpdater<T> { + + /** + * Returns true if this updaters setting has changed with the current update + * @param current the current settings + * @param previous the previous setting + * @return true if this updaters setting has changed with the current update + */ + boolean hasChanged(Settings current, Settings previous); + + /** + * Returns the instance value for the current settings. This method is stateless and idempotent. + * This method will throw an exception if the source of this value is invalid. + */ + T getValue(Settings current, Settings previous); + + /** + * Applies the given value to the updater. This methods will actually run the update. + */ + void apply(T value, Settings current, Settings previous); + + /** + * Updates this updaters value if it has changed. + * @return <code>true</code> iff the value has been updated. + */ + default boolean apply(Settings current, Settings previous) { + if (hasChanged(current, previous)) { + T value = getValue(current, previous); + apply(value, current, previous); + return true; + } + return false; + } + + /** + * Returns a callable runnable that calls {@link #apply(Object, Settings, Settings)} if the settings + * actually changed. This allows to defer the update to a later point in time while keeping type safety. + * If the value didn't change the returned runnable is a noop. + */ + default Runnable updater(Settings current, Settings previous) { + if (hasChanged(current, previous)) { + T value = getValue(current, previous); + return () -> { apply(value, current, previous);}; + } + return () -> {}; + } + } + + /** + * Returns the {@link Setting} for the given key or <code>null</code> if the setting can not be found. + */ + public Setting get(String key) { + Setting<?> setting = keySettings.get(key); + if (setting == null) { + for (Map.Entry<String, Setting<?>> entry : complexMatchers.entrySet()) { + if (entry.getValue().match(key)) { + return entry.getValue(); + } + } + } else { + return setting; + } + return null; + } + + /** + * Returns <code>true</code> if the setting for the given key is dynamically updateable. Otherwise <code>false</code>. + */ + public boolean hasDynamicSetting(String key) { + final Setting setting = get(key); + return setting != null && setting.isDynamic(); + } + + /** + * Returns a settings object that contains all settings that are not + * already set in the given source. The diff contains either the default value for each + * setting or the settings value in the given default settings. + */ + public Settings diff(Settings source, Settings defaultSettings) { + Settings.Builder builder = Settings.builder(); + for (Setting<?> setting : keySettings.values()) { + if (setting.exists(source) == false) { + builder.put(setting.getKey(), setting.getRaw(defaultSettings)); + } + } + return builder.build(); + } + +} diff --git a/core/src/main/java/org/elasticsearch/common/settings/ClusterSettings.java b/core/src/main/java/org/elasticsearch/common/settings/ClusterSettings.java new file mode 100644 index 0000000000..ac9631d29b --- /dev/null +++ b/core/src/main/java/org/elasticsearch/common/settings/ClusterSettings.java @@ -0,0 +1,141 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ +package org.elasticsearch.common.settings; + +import org.elasticsearch.action.admin.indices.close.TransportCloseIndexAction; +import org.elasticsearch.action.support.DestructiveOperations; +import org.elasticsearch.cluster.InternalClusterInfoService; +import org.elasticsearch.cluster.action.index.MappingUpdatedAction; +import org.elasticsearch.cluster.metadata.MetaData; +import org.elasticsearch.cluster.routing.allocation.allocator.BalancedShardsAllocator; +import org.elasticsearch.cluster.routing.allocation.decider.*; +import org.elasticsearch.cluster.service.InternalClusterService; +import org.elasticsearch.common.logging.ESLoggerFactory; +import org.elasticsearch.discovery.DiscoverySettings; +import org.elasticsearch.discovery.zen.ZenDiscovery; +import org.elasticsearch.discovery.zen.elect.ElectMasterService; +import org.elasticsearch.index.store.IndexStoreConfig; +import org.elasticsearch.indices.breaker.HierarchyCircuitBreakerService; +import org.elasticsearch.indices.recovery.RecoverySettings; +import org.elasticsearch.indices.ttl.IndicesTTLService; +import org.elasticsearch.search.SearchService; +import org.elasticsearch.threadpool.ThreadPool; +import org.elasticsearch.transport.Transport; +import org.elasticsearch.transport.TransportService; + +import java.util.*; + +/** + * Encapsulates all valid cluster level settings. + */ +public final class ClusterSettings extends AbstractScopedSettings { + + public ClusterSettings(Settings settings, Set<Setting<?>> settingsSet) { + super(settings, settingsSet, Setting.Scope.CLUSTER); + } + + + @Override + public synchronized Settings applySettings(Settings newSettings) { + Settings settings = super.applySettings(newSettings); + try { + for (Map.Entry<String, String> entry : settings.getAsMap().entrySet()) { + if (entry.getKey().startsWith("logger.")) { + String component = entry.getKey().substring("logger.".length()); + if ("_root".equals(component)) { + ESLoggerFactory.getRootLogger().setLevel(entry.getValue()); + } else { + ESLoggerFactory.getLogger(component).setLevel(entry.getValue()); + } + } + } + } catch (Exception e) { + logger.warn("failed to refresh settings for [{}]", e, "logger"); + } + return settings; + } + + /** + * Returns <code>true</code> if the settings is a logger setting. + */ + public boolean isLoggerSetting(String key) { + return key.startsWith("logger."); + } + + + public static Set<Setting<?>> BUILT_IN_CLUSTER_SETTINGS = Collections.unmodifiableSet(new HashSet<>(Arrays.asList(AwarenessAllocationDecider.CLUSTER_ROUTING_ALLOCATION_AWARENESS_ATTRIBUTE_SETTING, + AwarenessAllocationDecider.CLUSTER_ROUTING_ALLOCATION_AWARENESS_FORCE_GROUP_SETTING, + BalancedShardsAllocator.INDEX_BALANCE_FACTOR_SETTING, + BalancedShardsAllocator.SHARD_BALANCE_FACTOR_SETTING, + BalancedShardsAllocator.THRESHOLD_SETTING, + ClusterRebalanceAllocationDecider.CLUSTER_ROUTING_ALLOCATION_ALLOW_REBALANCE_SETTING, + ConcurrentRebalanceAllocationDecider.CLUSTER_ROUTING_ALLOCATION_CLUSTER_CONCURRENT_REBALANCE_SETTING, + EnableAllocationDecider.CLUSTER_ROUTING_ALLOCATION_ENABLE_SETTING, + EnableAllocationDecider.CLUSTER_ROUTING_REBALANCE_ENABLE_SETTING, + ZenDiscovery.REJOIN_ON_MASTER_GONE_SETTING, + FilterAllocationDecider.CLUSTER_ROUTING_INCLUDE_GROUP_SETTING, + FilterAllocationDecider.CLUSTER_ROUTING_EXCLUDE_GROUP_SETTING, + FilterAllocationDecider.CLUSTER_ROUTING_REQUIRE_GROUP_SETTING, + IndexStoreConfig.INDICES_STORE_THROTTLE_TYPE_SETTING, + IndexStoreConfig.INDICES_STORE_THROTTLE_MAX_BYTES_PER_SEC_SETTING, + IndicesTTLService.INDICES_TTL_INTERVAL_SETTING, + MappingUpdatedAction.INDICES_MAPPING_DYNAMIC_TIMEOUT_SETTING, + MetaData.SETTING_READ_ONLY_SETTING, + RecoverySettings.INDICES_RECOVERY_CONCURRENT_STREAMS_SETTING, + RecoverySettings.INDICES_RECOVERY_CONCURRENT_SMALL_FILE_STREAMS_SETTING, + RecoverySettings.INDICES_RECOVERY_MAX_BYTES_PER_SEC_SETTING, + RecoverySettings.INDICES_RECOVERY_RETRY_DELAY_STATE_SYNC_SETTING, + RecoverySettings.INDICES_RECOVERY_RETRY_DELAY_NETWORK_SETTING, + RecoverySettings.INDICES_RECOVERY_ACTIVITY_TIMEOUT_SETTING, + RecoverySettings.INDICES_RECOVERY_INTERNAL_ACTION_TIMEOUT_SETTING, + RecoverySettings.INDICES_RECOVERY_INTERNAL_LONG_ACTION_TIMEOUT_SETTING, + ThreadPool.THREADPOOL_GROUP_SETTING, + ThrottlingAllocationDecider.CLUSTER_ROUTING_ALLOCATION_NODE_INITIAL_PRIMARIES_RECOVERIES_SETTING, + ThrottlingAllocationDecider.CLUSTER_ROUTING_ALLOCATION_NODE_CONCURRENT_RECOVERIES_SETTING, + DiskThresholdDecider.CLUSTER_ROUTING_ALLOCATION_LOW_DISK_WATERMARK_SETTING, + DiskThresholdDecider.CLUSTER_ROUTING_ALLOCATION_HIGH_DISK_WATERMARK_SETTING, + DiskThresholdDecider.CLUSTER_ROUTING_ALLOCATION_DISK_THRESHOLD_ENABLED_SETTING, + DiskThresholdDecider.CLUSTER_ROUTING_ALLOCATION_INCLUDE_RELOCATIONS_SETTING, + DiskThresholdDecider.CLUSTER_ROUTING_ALLOCATION_REROUTE_INTERVAL_SETTING, + InternalClusterInfoService.INTERNAL_CLUSTER_INFO_UPDATE_INTERVAL_SETTING, + InternalClusterInfoService.INTERNAL_CLUSTER_INFO_TIMEOUT_SETTING, + SnapshotInProgressAllocationDecider.CLUSTER_ROUTING_ALLOCATION_SNAPSHOT_RELOCATION_ENABLED_SETTING, + DestructiveOperations.REQUIRES_NAME_SETTING, + DiscoverySettings.PUBLISH_TIMEOUT_SETTING, + DiscoverySettings.PUBLISH_DIFF_ENABLE_SETTING, + DiscoverySettings.COMMIT_TIMEOUT_SETTING, + DiscoverySettings.NO_MASTER_BLOCK_SETTING, + HierarchyCircuitBreakerService.TOTAL_CIRCUIT_BREAKER_LIMIT_SETTING, + HierarchyCircuitBreakerService.FIELDDATA_CIRCUIT_BREAKER_LIMIT_SETTING, + HierarchyCircuitBreakerService.FIELDDATA_CIRCUIT_BREAKER_OVERHEAD_SETTING, + HierarchyCircuitBreakerService.REQUEST_CIRCUIT_BREAKER_LIMIT_SETTING, + HierarchyCircuitBreakerService.REQUEST_CIRCUIT_BREAKER_OVERHEAD_SETTING, + InternalClusterService.CLUSTER_SERVICE_SLOW_TASK_LOGGING_THRESHOLD_SETTING, + SearchService.DEFAULT_SEARCH_TIMEOUT_SETTING, + ElectMasterService.DISCOVERY_ZEN_MINIMUM_MASTER_NODES_SETTING, + TransportService.TRACE_LOG_EXCLUDE_SETTING, + TransportService.TRACE_LOG_INCLUDE_SETTING, + TransportCloseIndexAction.CLUSTER_INDICES_CLOSE_ENABLE_SETTING, + ShardsLimitAllocationDecider.CLUSTER_TOTAL_SHARDS_PER_NODE_SETTING, + InternalClusterService.CLUSTER_SERVICE_RECONNECT_INTERVAL_SETTING, + HierarchyCircuitBreakerService.FIELDDATA_CIRCUIT_BREAKER_TYPE_SETTING, + HierarchyCircuitBreakerService.REQUEST_CIRCUIT_BREAKER_TYPE_SETTING, + Transport.TRANSPORT_PROFILES_SETTING, + Transport.TRANSPORT_TCP_COMPRESS))); +} diff --git a/core/src/main/java/org/elasticsearch/common/settings/Setting.java b/core/src/main/java/org/elasticsearch/common/settings/Setting.java new file mode 100644 index 0000000000..236df5c567 --- /dev/null +++ b/core/src/main/java/org/elasticsearch/common/settings/Setting.java @@ -0,0 +1,461 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ +package org.elasticsearch.common.settings; + +import org.elasticsearch.ElasticsearchException; +import org.elasticsearch.ElasticsearchParseException; +import org.elasticsearch.action.support.ToXContentToBytes; +import org.elasticsearch.common.Booleans; +import org.elasticsearch.common.Strings; +import org.elasticsearch.common.collect.Tuple; +import org.elasticsearch.common.logging.ESLogger; +import org.elasticsearch.common.regex.Regex; +import org.elasticsearch.common.unit.ByteSizeValue; +import org.elasticsearch.common.unit.MemorySizeValue; +import org.elasticsearch.common.unit.TimeValue; +import org.elasticsearch.common.xcontent.*; + +import java.io.IOException; +import java.util.ArrayList; +import java.util.List; +import java.util.function.BiConsumer; +import java.util.function.Consumer; +import java.util.function.Function; +import java.util.regex.Pattern; + +/** + */ +public class Setting<T> extends ToXContentToBytes { + private final String key; + protected final Function<Settings, String> defaultValue; + private final Function<String, T> parser; + private final boolean dynamic; + private final Scope scope; + + /** + * Creates a new Setting instance + * @param key the settings key for this setting. + * @param defaultValue a default value function that returns the default values string representation. + * @param parser a parser that parses the string rep into a complex datatype. + * @param dynamic true iff this setting can be dynamically updateable + * @param scope the scope of this setting + */ + public Setting(String key, Function<Settings, String> defaultValue, Function<String, T> parser, boolean dynamic, Scope scope) { + assert parser.apply(defaultValue.apply(Settings.EMPTY)) != null || this.isGroupSetting(): "parser returned null"; + this.key = key; + this.defaultValue = defaultValue; + this.parser = parser; + this.dynamic = dynamic; + this.scope = scope; + } + + /** + * Returns the settings key or a prefix if this setting is a group setting + * @see #isGroupSetting() + */ + public final String getKey() { + return key; + } + + /** + * Returns <code>true</code> iff this setting is dynamically updateable, otherwise <code>false</code> + */ + public final boolean isDynamic() { + return dynamic; + } + + /** + * Returns the settings scope + */ + public final Scope getScope() { + return scope; + } + + /** + * Returns <code>true</code> iff this setting is a group setting. Group settings represent a set of settings + * rather than a single value. The key, see {@link #getKey()}, in contrast to non-group settings is a prefix like <tt>cluster.store.</tt> + * that matches all settings with this prefix. + */ + boolean isGroupSetting() { + return false; + } + + boolean hasComplexMatcher() { + return isGroupSetting(); + } + + /** + * Returns the default values string representation for this setting. + * @param settings a settings object for settings that has a default value depending on another setting if available + */ + public final String getDefault(Settings settings) { + return defaultValue.apply(settings); + } + + /** + * Returns <code>true</code> iff this setting is present in the given settings object. Otherwise <code>false</code> + */ + public final boolean exists(Settings settings) { + return settings.get(key) != null; + } + + /** + * Returns the settings value. If the setting is not present in the given settings object the default value is returned + * instead. + */ + public T get(Settings settings) { + String value = getRaw(settings); + try { + return parser.apply(value); + } catch (ElasticsearchParseException ex) { + throw new IllegalArgumentException(ex.getMessage(), ex); + } catch (NumberFormatException ex) { + throw new IllegalArgumentException("Failed to parse value [" + value + "] for setting [" + getKey() + "]", ex); + } catch (IllegalArgumentException ex) { + throw ex; + } catch (Exception t) { + throw new IllegalArgumentException("Failed to parse value [" + value + "] for setting [" + getKey() + "]", t); + } + } + + /** + * Returns the raw (string) settings value. If the setting is not present in the given settings object the default value is returned + * instead. This is useful if the value can't be parsed due to an invalid value to access the actual value. + */ + public String getRaw(Settings settings) { + return settings.get(key, defaultValue.apply(settings)); + } + + /** + * Returns <code>true</code> iff the given key matches the settings key or if this setting is a group setting if the + * given key is part of the settings group. + * @see #isGroupSetting() + */ + public boolean match(String toTest) { + return key.equals(toTest); + } + + @Override + public final XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { + builder.startObject(); + builder.field("key", key); + builder.field("type", scope.name()); + builder.field("dynamic", dynamic); + builder.field("is_group_setting", isGroupSetting()); + builder.field("default", defaultValue.apply(Settings.EMPTY)); + builder.endObject(); + return builder; + } + + /** + * The settings scope - settings can either be cluster settings or per index settings. + */ + public enum Scope { + CLUSTER, + INDEX; + } + + final AbstractScopedSettings.SettingUpdater newUpdater(Consumer<T> consumer, ESLogger logger) { + return newUpdater(consumer, logger, (s) -> {}); + } + + AbstractScopedSettings.SettingUpdater newUpdater(Consumer<T> consumer, ESLogger logger, Consumer<T> validator) { + if (isDynamic()) { + return new Updater(consumer, logger, validator); + } else { + throw new IllegalStateException("setting [" + getKey() + "] is not dynamic"); + } + } + + /** + * this is used for settings that depend on each other... see {@link org.elasticsearch.common.settings.AbstractScopedSettings#addSettingsUpdateConsumer(Setting, Setting, BiConsumer)} and it's + * usage for details. + */ + static <A, B> AbstractScopedSettings.SettingUpdater<Tuple<A, B>> compoundUpdater(final BiConsumer<A,B> consumer, final Setting<A> aSettting, final Setting<B> bSetting, ESLogger logger) { + final AbstractScopedSettings.SettingUpdater<A> aSettingUpdater = aSettting.newUpdater(null, logger); + final AbstractScopedSettings.SettingUpdater<B> bSettingUpdater = bSetting.newUpdater(null, logger); + return new AbstractScopedSettings.SettingUpdater<Tuple<A, B>>() { + @Override + public boolean hasChanged(Settings current, Settings previous) { + return aSettingUpdater.hasChanged(current, previous) || bSettingUpdater.hasChanged(current, previous); + } + + @Override + public Tuple<A, B> getValue(Settings current, Settings previous) { + return new Tuple<>(aSettingUpdater.getValue(current, previous), bSettingUpdater.getValue(current, previous)); + } + + @Override + public void apply(Tuple<A, B> value, Settings current, Settings previous) { + consumer.accept(value.v1(), value.v2()); + } + + @Override + public String toString() { + return "CompoundUpdater for: " + aSettingUpdater + " and " + bSettingUpdater; + } + }; + } + + + private class Updater implements AbstractScopedSettings.SettingUpdater<T> { + private final Consumer<T> consumer; + private final ESLogger logger; + private final Consumer<T> accept; + + public Updater(Consumer<T> consumer, ESLogger logger, Consumer<T> accept) { + this.consumer = consumer; + this.logger = logger; + this.accept = accept; + } + + @Override + public String toString() { + return "Updater for: " + Setting.this.toString(); + } + + @Override + public boolean hasChanged(Settings current, Settings previous) { + final String newValue = getRaw(current); + final String value = getRaw(previous); + assert isGroupSetting() == false : "group settings must override this method"; + assert value != null : "value was null but can't be unless default is null which is invalid"; + + return value.equals(newValue) == false; + } + + @Override + public T getValue(Settings current, Settings previous) { + final String newValue = getRaw(current); + final String value = getRaw(previous); + T inst = get(current); + try { + accept.accept(inst); + } catch (Exception | AssertionError e) { + throw new IllegalArgumentException("illegal value can't update [" + key + "] from [" + value + "] to [" + newValue + "]", e); + } + return inst; + } + + @Override + public void apply(T value, Settings current, Settings previous) { + logger.info("update [{}] from [{}] to [{}]", key, getRaw(previous), getRaw(current)); + consumer.accept(value); + } + } + + + public Setting(String key, String defaultValue, Function<String, T> parser, boolean dynamic, Scope scope) { + this(key, (s) -> defaultValue, parser, dynamic, scope); + } + + public static Setting<Float> floatSetting(String key, float defaultValue, boolean dynamic, Scope scope) { + return new Setting<>(key, (s) -> Float.toString(defaultValue), Float::parseFloat, dynamic, scope); + } + + public static Setting<Float> floatSetting(String key, float defaultValue, float minValue, boolean dynamic, Scope scope) { + return new Setting<>(key, (s) -> Float.toString(defaultValue), (s) -> { + float value = Float.parseFloat(s); + if (value < minValue) { + throw new IllegalArgumentException("Failed to parse value [" + s + "] for setting [" + key + "] must be >= " + minValue); + } + return value; + }, dynamic, scope); + } + + public static Setting<Integer> intSetting(String key, int defaultValue, int minValue, boolean dynamic, Scope scope) { + return new Setting<>(key, (s) -> Integer.toString(defaultValue), (s) -> parseInt(s, minValue, key), dynamic, scope); + } + + public static int parseInt(String s, int minValue, String key) { + int value = Integer.parseInt(s); + if (value < minValue) { + throw new IllegalArgumentException("Failed to parse value [" + s + "] for setting [" + key + "] must be >= " + minValue); + } + return value; + } + + public static Setting<Integer> intSetting(String key, int defaultValue, boolean dynamic, Scope scope) { + return intSetting(key, defaultValue, Integer.MIN_VALUE, dynamic, scope); + } + + public static Setting<Boolean> boolSetting(String key, boolean defaultValue, boolean dynamic, Scope scope) { + return new Setting<>(key, (s) -> Boolean.toString(defaultValue), Booleans::parseBooleanExact, dynamic, scope); + } + + public static Setting<ByteSizeValue> byteSizeSetting(String key, String percentage, boolean dynamic, Scope scope) { + return new Setting<>(key, (s) -> percentage, (s) -> MemorySizeValue.parseBytesSizeValueOrHeapRatio(s, key), dynamic, scope); + } + + public static Setting<ByteSizeValue> byteSizeSetting(String key, ByteSizeValue value, boolean dynamic, Scope scope) { + return new Setting<>(key, (s) -> value.toString(), (s) -> ByteSizeValue.parseBytesSizeValue(s, key), dynamic, scope); + } + + public static Setting<TimeValue> positiveTimeSetting(String key, TimeValue defaultValue, boolean dynamic, Scope scope) { + return timeSetting(key, defaultValue, TimeValue.timeValueMillis(0), dynamic, scope); + } + + public static <T> Setting<List<T>> listSetting(String key, List<String> defaultStringValue, Function<String, T> singleValueParser, boolean dynamic, Scope scope) { + return listSetting(key, (s) -> defaultStringValue, singleValueParser, dynamic, scope); + } + public static <T> Setting<List<T>> listSetting(String key, Function<Settings, List<String>> defaultStringValue, Function<String, T> singleValueParser, boolean dynamic, Scope scope) { + Function<String, List<T>> parser = (s) -> { + try (XContentParser xContentParser = XContentType.JSON.xContent().createParser(s)){ + XContentParser.Token token = xContentParser.nextToken(); + if (token != XContentParser.Token.START_ARRAY) { + throw new IllegalArgumentException("expected START_ARRAY but got " + token); + } + ArrayList<T> list = new ArrayList<>(); + while ((token = xContentParser.nextToken()) !=XContentParser.Token.END_ARRAY) { + if (token != XContentParser.Token.VALUE_STRING) { + throw new IllegalArgumentException("expected VALUE_STRING but got " + token); + } + list.add(singleValueParser.apply(xContentParser.text())); + } + return list; + } catch (IOException e) { + throw new IllegalArgumentException("failed to parse array", e); + } + }; + return new Setting<List<T>>(key, (s) -> arrayToParsableString(defaultStringValue.apply(s).toArray(Strings.EMPTY_ARRAY)), parser, dynamic, scope) { + private final Pattern pattern = Pattern.compile(Pattern.quote(key)+"(\\.\\d+)?"); + @Override + public String getRaw(Settings settings) { + String[] array = settings.getAsArray(key, null); + return array == null ? defaultValue.apply(settings) : arrayToParsableString(array); + } + + public boolean match(String toTest) { + return pattern.matcher(toTest).matches(); + } + + @Override + boolean hasComplexMatcher() { + return true; + } + }; + } + + private static String arrayToParsableString(String[] array) { + try { + XContentBuilder builder = XContentBuilder.builder(XContentType.JSON.xContent()); + builder.startArray(); + for (String element : array) { + builder.value(element); + } + builder.endArray(); + return builder.string(); + } catch (IOException ex) { + throw new ElasticsearchException(ex); + } + } + + + + public static Setting<Settings> groupSetting(String key, boolean dynamic, Scope scope) { + if (key.endsWith(".") == false) { + throw new IllegalArgumentException("key must end with a '.'"); + } + return new Setting<Settings>(key, "", (s) -> null, dynamic, scope) { + + @Override + public boolean isGroupSetting() { + return true; + } + + @Override + public Settings get(Settings settings) { + return settings.getByPrefix(key); + } + + @Override + public boolean match(String toTest) { + return Regex.simpleMatch(key + "*", toTest); + } + + @Override + public AbstractScopedSettings.SettingUpdater<Settings> newUpdater(Consumer<Settings> consumer, ESLogger logger, Consumer<Settings> validator) { + if (isDynamic() == false) { + throw new IllegalStateException("setting [" + getKey() + "] is not dynamic"); + } + final Setting<?> setting = this; + return new AbstractScopedSettings.SettingUpdater<Settings>() { + + @Override + public boolean hasChanged(Settings current, Settings previous) { + Settings currentSettings = get(current); + Settings previousSettings = get(previous); + return currentSettings.equals(previousSettings) == false; + } + + @Override + public Settings getValue(Settings current, Settings previous) { + Settings currentSettings = get(current); + Settings previousSettings = get(previous); + try { + validator.accept(currentSettings); + } catch (Exception | AssertionError e) { + throw new IllegalArgumentException("illegal value can't update [" + key + "] from [" + previousSettings.getAsMap() + "] to [" + currentSettings.getAsMap() + "]", e); + } + return currentSettings; + } + + @Override + public void apply(Settings value, Settings current, Settings previous) { + consumer.accept(value); + } + + @Override + public String toString() { + return "Updater for: " + setting.toString(); + } + }; + } + }; + } + + public static Setting<TimeValue> timeSetting(String key, Function<Settings, String> defaultValue, TimeValue minValue, boolean dynamic, Scope scope) { + return new Setting<>(key, defaultValue, (s) -> { + TimeValue timeValue = TimeValue.parseTimeValue(s, null, key); + if (timeValue.millis() < minValue.millis()) { + throw new IllegalArgumentException("Failed to parse value [" + s + "] for setting [" + key + "] must be >= " + minValue); + } + return timeValue; + }, dynamic, scope); + } + + public static Setting<TimeValue> timeSetting(String key, TimeValue defaultValue, TimeValue minValue, boolean dynamic, Scope scope) { + return timeSetting(key, (s) -> defaultValue.getStringRep(), minValue, dynamic, scope); + } + + public static Setting<TimeValue> timeSetting(String key, TimeValue defaultValue, boolean dynamic, Scope scope) { + return new Setting<>(key, (s) -> defaultValue.toString(), (s) -> TimeValue.parseTimeValue(s, defaultValue, key), dynamic, scope); + } + + public static Setting<Double> doubleSetting(String key, double defaultValue, double minValue, boolean dynamic, Scope scope) { + return new Setting<>(key, (s) -> Double.toString(defaultValue), (s) -> { + final double d = Double.parseDouble(s); + if (d < minValue) { + throw new IllegalArgumentException("Failed to parse value [" + s + "] for setting [" + key + "] must be >= " + minValue); + } + return d; + }, dynamic, scope); + } + +} diff --git a/core/src/main/java/org/elasticsearch/common/settings/Settings.java b/core/src/main/java/org/elasticsearch/common/settings/Settings.java index 5e083a9e74..989b05d4bf 100644 --- a/core/src/main/java/org/elasticsearch/common/settings/Settings.java +++ b/core/src/main/java/org/elasticsearch/common/settings/Settings.java @@ -597,6 +597,8 @@ public final class Settings implements ToXContent { return result.toArray(new String[result.size()]); } + + /** * Returns group settings for the given setting prefix. */ @@ -614,6 +616,9 @@ public final class Settings implements ToXContent { if (settingPrefix.charAt(settingPrefix.length() - 1) != '.') { settingPrefix = settingPrefix + "."; } + return getGroupsInternal(settingPrefix, ignoreNonGrouped); + } + private Map<String, Settings> getGroupsInternal(String settingPrefix, boolean ignoreNonGrouped) throws SettingsException { // we don't really care that it might happen twice Map<String, Map<String, String>> map = new LinkedHashMap<>(); for (Object o : settings.keySet()) { @@ -643,6 +648,16 @@ public final class Settings implements ToXContent { } return Collections.unmodifiableMap(retVal); } + /** + * Returns group settings for the given setting prefix. + */ + public Map<String, Settings> getAsGroups() throws SettingsException { + return getAsGroups(false); + } + + public Map<String, Settings> getAsGroups(boolean ignoreNonGrouped) throws SettingsException { + return getGroupsInternal("", ignoreNonGrouped); + } /** * Returns a parsed version. @@ -706,7 +721,7 @@ public final class Settings implements ToXContent { Builder builder = new Builder(); int numberOfSettings = in.readVInt(); for (int i = 0; i < numberOfSettings; i++) { - builder.put(in.readString(), in.readString()); + builder.put(in.readString(), in.readOptionalString()); } return builder.build(); } @@ -715,7 +730,7 @@ public final class Settings implements ToXContent { out.writeVInt(settings.getAsMap().size()); for (Map.Entry<String, String> entry : settings.getAsMap().entrySet()) { out.writeString(entry.getKey()); - out.writeString(entry.getValue()); + out.writeOptionalString(entry.getValue()); } } @@ -818,6 +833,10 @@ public final class Settings implements ToXContent { return this; } + public Builder putNull(String key) { + return put(key, (String) null); + } + /** * Sets a setting with the provided setting key and class as value. * diff --git a/core/src/main/java/org/elasticsearch/common/settings/SettingsModule.java b/core/src/main/java/org/elasticsearch/common/settings/SettingsModule.java index 2ae4799d9f..8bc8ce1b65 100644 --- a/core/src/main/java/org/elasticsearch/common/settings/SettingsModule.java +++ b/core/src/main/java/org/elasticsearch/common/settings/SettingsModule.java @@ -21,6 +21,10 @@ package org.elasticsearch.common.settings; import org.elasticsearch.common.inject.AbstractModule; +import java.util.HashMap; +import java.util.HashSet; +import java.util.Map; + /** * A module that binds the provided settings to the {@link Settings} interface. * @@ -30,15 +34,36 @@ public class SettingsModule extends AbstractModule { private final Settings settings; private final SettingsFilter settingsFilter; + private final Map<String, Setting<?>> clusterDynamicSettings = new HashMap<>(); + public SettingsModule(Settings settings, SettingsFilter settingsFilter) { this.settings = settings; this.settingsFilter = settingsFilter; + for (Setting<?> setting : ClusterSettings.BUILT_IN_CLUSTER_SETTINGS) { + registerSetting(setting); + } } @Override protected void configure() { bind(Settings.class).toInstance(settings); bind(SettingsFilter.class).toInstance(settingsFilter); + final ClusterSettings clusterSettings = new ClusterSettings(settings, new HashSet<>(clusterDynamicSettings.values())); + bind(ClusterSettings.class).toInstance(clusterSettings); } -}
\ No newline at end of file + + public void registerSetting(Setting<?> setting) { + switch (setting.getScope()) { + case CLUSTER: + if (clusterDynamicSettings.containsKey(setting.getKey())) { + throw new IllegalArgumentException("Cannot register setting [" + setting.getKey() + "] twice"); + } + clusterDynamicSettings.put(setting.getKey(), setting); + break; + case INDEX: + throw new UnsupportedOperationException("not yet implemented"); + } + } + +} diff --git a/core/src/main/java/org/elasticsearch/common/settings/loader/XContentSettingsLoader.java b/core/src/main/java/org/elasticsearch/common/settings/loader/XContentSettingsLoader.java index 725c7e5694..9c2f973b96 100644 --- a/core/src/main/java/org/elasticsearch/common/settings/loader/XContentSettingsLoader.java +++ b/core/src/main/java/org/elasticsearch/common/settings/loader/XContentSettingsLoader.java @@ -103,9 +103,9 @@ public abstract class XContentSettingsLoader implements SettingsLoader { } else if (token == XContentParser.Token.FIELD_NAME) { currentFieldName = parser.currentName(); } else if (token == XContentParser.Token.VALUE_NULL) { - // ignore this + serializeValue(settings, sb, path, parser, currentFieldName, true); } else { - serializeValue(settings, sb, path, parser, currentFieldName); + serializeValue(settings, sb, path, parser, currentFieldName, false); } } @@ -126,31 +126,33 @@ public abstract class XContentSettingsLoader implements SettingsLoader { } else if (token == XContentParser.Token.FIELD_NAME) { fieldName = parser.currentName(); } else if (token == XContentParser.Token.VALUE_NULL) { + serializeValue(settings, sb, path, parser, fieldName + '.' + (counter++), true); // ignore } else { - serializeValue(settings, sb, path, parser, fieldName + '.' + (counter++)); + serializeValue(settings, sb, path, parser, fieldName + '.' + (counter++), false); } } } - private void serializeValue(Map<String, String> settings, StringBuilder sb, List<String> path, XContentParser parser, String fieldName) throws IOException { + private void serializeValue(Map<String, String> settings, StringBuilder sb, List<String> path, XContentParser parser, String fieldName, boolean isNull) throws IOException { sb.setLength(0); for (String pathEle : path) { sb.append(pathEle).append('.'); } sb.append(fieldName); String key = sb.toString(); - String currentValue = parser.text(); - String previousValue = settings.put(key, currentValue); - if (previousValue != null) { + String currentValue = isNull ? null : parser.text(); + + if (settings.containsKey(key)) { throw new ElasticsearchParseException( "duplicate settings key [{}] found at line number [{}], column number [{}], previous value [{}], current value [{}]", key, parser.getTokenLocation().lineNumber, parser.getTokenLocation().columnNumber, - previousValue, + settings.get(key), currentValue ); } + settings.put(key, currentValue); } } diff --git a/core/src/main/java/org/elasticsearch/common/text/BytesText.java b/core/src/main/java/org/elasticsearch/common/text/BytesText.java deleted file mode 100644 index d78055db2b..0000000000 --- a/core/src/main/java/org/elasticsearch/common/text/BytesText.java +++ /dev/null @@ -1,82 +0,0 @@ -/* - * Licensed to Elasticsearch under one or more contributor - * license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright - * ownership. Elasticsearch licenses this file to you under - * the Apache License, Version 2.0 (the "License"); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ -package org.elasticsearch.common.text; - -import java.nio.charset.StandardCharsets; -import org.elasticsearch.common.bytes.BytesReference; - -/** - * A {@link BytesReference} representation of the text, will always convert on the fly to a {@link String}. - */ -public class BytesText implements Text { - - private BytesReference bytes; - private int hash; - - public BytesText(BytesReference bytes) { - this.bytes = bytes; - } - - @Override - public boolean hasBytes() { - return true; - } - - @Override - public BytesReference bytes() { - return bytes; - } - - @Override - public boolean hasString() { - return false; - } - - @Override - public String string() { - // TODO: we can optimize the conversion based on the bytes reference API similar to UnicodeUtil - if (!bytes.hasArray()) { - bytes = bytes.toBytesArray(); - } - return new String(bytes.array(), bytes.arrayOffset(), bytes.length(), StandardCharsets.UTF_8); - } - - @Override - public String toString() { - return string(); - } - - @Override - public int hashCode() { - if (hash == 0) { - hash = bytes.hashCode(); - } - return hash; - } - - @Override - public boolean equals(Object obj) { - return bytes().equals(((Text) obj).bytes()); - } - - @Override - public int compareTo(Text text) { - return UTF8SortedAsUnicodeComparator.utf8SortedAsUnicodeSortOrder.compare(bytes(), text.bytes()); - } -}
\ No newline at end of file diff --git a/core/src/main/java/org/elasticsearch/common/text/StringAndBytesText.java b/core/src/main/java/org/elasticsearch/common/text/StringAndBytesText.java deleted file mode 100644 index 36bf76ce44..0000000000 --- a/core/src/main/java/org/elasticsearch/common/text/StringAndBytesText.java +++ /dev/null @@ -1,111 +0,0 @@ -/* - * Licensed to Elasticsearch under one or more contributor - * license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright - * ownership. Elasticsearch licenses this file to you under - * the Apache License, Version 2.0 (the "License"); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ -package org.elasticsearch.common.text; - -import java.nio.charset.StandardCharsets; -import org.elasticsearch.common.bytes.BytesArray; -import org.elasticsearch.common.bytes.BytesReference; - -/** - * Both {@link String} and {@link BytesReference} representation of the text. Starts with one of those, and if - * the other is requests, caches the other one in a local reference so no additional conversion will be needed. - */ -public class StringAndBytesText implements Text { - - public static final Text[] EMPTY_ARRAY = new Text[0]; - - public static Text[] convertFromStringArray(String[] strings) { - if (strings.length == 0) { - return EMPTY_ARRAY; - } - Text[] texts = new Text[strings.length]; - for (int i = 0; i < strings.length; i++) { - texts[i] = new StringAndBytesText(strings[i]); - } - return texts; - } - - private BytesReference bytes; - private String text; - private int hash; - - public StringAndBytesText(BytesReference bytes) { - this.bytes = bytes; - } - - public StringAndBytesText(String text) { - this.text = text; - } - - @Override - public boolean hasBytes() { - return bytes != null; - } - - @Override - public BytesReference bytes() { - if (bytes == null) { - bytes = new BytesArray(text.getBytes(StandardCharsets.UTF_8)); - } - return bytes; - } - - @Override - public boolean hasString() { - return text != null; - } - - @Override - public String string() { - // TODO: we can optimize the conversion based on the bytes reference API similar to UnicodeUtil - if (text == null) { - if (!bytes.hasArray()) { - bytes = bytes.toBytesArray(); - } - text = new String(bytes.array(), bytes.arrayOffset(), bytes.length(), StandardCharsets.UTF_8); - } - return text; - } - - @Override - public String toString() { - return string(); - } - - @Override - public int hashCode() { - if (hash == 0) { - hash = bytes().hashCode(); - } - return hash; - } - - @Override - public boolean equals(Object obj) { - if (obj == null) { - return false; - } - return bytes().equals(((Text) obj).bytes()); - } - - @Override - public int compareTo(Text text) { - return UTF8SortedAsUnicodeComparator.utf8SortedAsUnicodeSortOrder.compare(bytes(), text.bytes()); - } -} diff --git a/core/src/main/java/org/elasticsearch/common/text/StringText.java b/core/src/main/java/org/elasticsearch/common/text/StringText.java deleted file mode 100644 index 9d12096b2c..0000000000 --- a/core/src/main/java/org/elasticsearch/common/text/StringText.java +++ /dev/null @@ -1,94 +0,0 @@ -/* - * Licensed to Elasticsearch under one or more contributor - * license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright - * ownership. Elasticsearch licenses this file to you under - * the Apache License, Version 2.0 (the "License"); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ -package org.elasticsearch.common.text; - -import java.nio.charset.StandardCharsets; -import org.elasticsearch.common.bytes.BytesArray; -import org.elasticsearch.common.bytes.BytesReference; - -/** - * A {@link String} only representation of the text. Will always convert to bytes on the fly. - */ -public class StringText implements Text { - - public static final Text[] EMPTY_ARRAY = new Text[0]; - - public static Text[] convertFromStringArray(String[] strings) { - if (strings.length == 0) { - return EMPTY_ARRAY; - } - Text[] texts = new Text[strings.length]; - for (int i = 0; i < strings.length; i++) { - texts[i] = new StringText(strings[i]); - } - return texts; - } - - private final String text; - private int hash; - - public StringText(String text) { - this.text = text; - } - - @Override - public boolean hasBytes() { - return false; - } - - @Override - public BytesReference bytes() { - return new BytesArray(text.getBytes(StandardCharsets.UTF_8)); - } - - @Override - public boolean hasString() { - return true; - } - - @Override - public String string() { - return text; - } - - @Override - public String toString() { - return string(); - } - - @Override - public int hashCode() { - // we use bytes here so we can be consistent with other text implementations - if (hash == 0) { - hash = bytes().hashCode(); - } - return hash; - } - - @Override - public boolean equals(Object obj) { - // we use bytes here so we can be consistent with other text implementations - return bytes().equals(((Text) obj).bytes()); - } - - @Override - public int compareTo(Text text) { - return UTF8SortedAsUnicodeComparator.utf8SortedAsUnicodeSortOrder.compare(bytes(), text.bytes()); - } -} diff --git a/core/src/main/java/org/elasticsearch/common/text/Text.java b/core/src/main/java/org/elasticsearch/common/text/Text.java index 9fe1ea5f35..d5b02f559f 100644 --- a/core/src/main/java/org/elasticsearch/common/text/Text.java +++ b/core/src/main/java/org/elasticsearch/common/text/Text.java @@ -18,39 +18,101 @@ */ package org.elasticsearch.common.text; +import java.nio.charset.StandardCharsets; +import org.elasticsearch.common.bytes.BytesArray; import org.elasticsearch.common.bytes.BytesReference; - /** - * Text represents a (usually) long text data. We use this abstraction instead of {@link String} - * so we can represent it in a more optimized manner in memory as well as serializing it over the - * network as well as converting it to json format. + * Both {@link String} and {@link BytesReference} representation of the text. Starts with one of those, and if + * the other is requests, caches the other one in a local reference so no additional conversion will be needed. */ -public interface Text extends Comparable<Text> { +public final class Text implements Comparable<Text> { - /** - * Are bytes available without the need to be converted into bytes when calling {@link #bytes()}. - */ - boolean hasBytes(); + public static final Text[] EMPTY_ARRAY = new Text[0]; + + public static Text[] convertFromStringArray(String[] strings) { + if (strings.length == 0) { + return EMPTY_ARRAY; + } + Text[] texts = new Text[strings.length]; + for (int i = 0; i < strings.length; i++) { + texts[i] = new Text(strings[i]); + } + return texts; + } + + private BytesReference bytes; + private String text; + private int hash; + + public Text(BytesReference bytes) { + this.bytes = bytes; + } + + public Text(String text) { + this.text = text; + } /** - * The UTF8 bytes representing the the text, might be converted on the fly, see {@link #hasBytes()} + * Whether a {@link BytesReference} view of the data is already materialized. */ - BytesReference bytes(); + public boolean hasBytes() { + return bytes != null; + } /** - * Is there a {@link String} representation of the text. If not, then it {@link #hasBytes()}. + * Returns a {@link BytesReference} view of the data. */ - boolean hasString(); + public BytesReference bytes() { + if (bytes == null) { + bytes = new BytesArray(text.getBytes(StandardCharsets.UTF_8)); + } + return bytes; + } /** - * Returns the string representation of the text, might be converted to a string on the fly. + * Whether a {@link String} view of the data is already materialized. */ - String string(); + public boolean hasString() { + return text != null; + } /** - * Returns the string representation of the text, might be converted to a string on the fly. + * Returns a {@link String} view of the data. */ + public String string() { + if (text == null) { + if (!bytes.hasArray()) { + bytes = bytes.toBytesArray(); + } + text = new String(bytes.array(), bytes.arrayOffset(), bytes.length(), StandardCharsets.UTF_8); + } + return text; + } + + @Override + public String toString() { + return string(); + } + + @Override + public int hashCode() { + if (hash == 0) { + hash = bytes().hashCode(); + } + return hash; + } + + @Override + public boolean equals(Object obj) { + if (obj == null) { + return false; + } + return bytes().equals(((Text) obj).bytes()); + } + @Override - String toString(); + public int compareTo(Text text) { + return UTF8SortedAsUnicodeComparator.utf8SortedAsUnicodeSortOrder.compare(bytes(), text.bytes()); + } } diff --git a/core/src/main/java/org/elasticsearch/common/unit/TimeValue.java b/core/src/main/java/org/elasticsearch/common/unit/TimeValue.java index ee6371605e..fb44c7dc9a 100644 --- a/core/src/main/java/org/elasticsearch/common/unit/TimeValue.java +++ b/core/src/main/java/org/elasticsearch/common/unit/TimeValue.java @@ -229,6 +229,30 @@ public class TimeValue implements Streamable { return Strings.format1Decimals(value, suffix); } + public String getStringRep() { + if (duration < 0) { + return Long.toString(duration); + } + switch (timeUnit) { + case NANOSECONDS: + return Strings.format1Decimals(duration, "nanos"); + case MICROSECONDS: + return Strings.format1Decimals(duration, "micros"); + case MILLISECONDS: + return Strings.format1Decimals(duration, "ms"); + case SECONDS: + return Strings.format1Decimals(duration, "s"); + case MINUTES: + return Strings.format1Decimals(duration, "m"); + case HOURS: + return Strings.format1Decimals(duration, "h"); + case DAYS: + return Strings.format1Decimals(duration, "d"); + default: + throw new IllegalArgumentException("unknown time unit: " + timeUnit.name()); + } + } + public static TimeValue parseTimeValue(String sValue, TimeValue defaultValue, String settingName) { settingName = Objects.requireNonNull(settingName); assert settingName.startsWith("index.") == false || MetaDataIndexUpgradeService.INDEX_TIME_SETTINGS.contains(settingName) : settingName; diff --git a/core/src/main/java/org/elasticsearch/common/xcontent/XContentBuilder.java b/core/src/main/java/org/elasticsearch/common/xcontent/XContentBuilder.java index af8e753469..d26485a121 100644 --- a/core/src/main/java/org/elasticsearch/common/xcontent/XContentBuilder.java +++ b/core/src/main/java/org/elasticsearch/common/xcontent/XContentBuilder.java @@ -53,7 +53,7 @@ import java.util.Map; */ public final class XContentBuilder implements BytesStream, Releasable { - public static enum FieldCaseConversion { + public enum FieldCaseConversion { /** * No conversion will occur. */ @@ -251,14 +251,7 @@ public final class XContentBuilder implements BytesStream, Releasable { } public XContentBuilder field(XContentBuilderString name) throws IOException { - if (fieldCaseConversion == FieldCaseConversion.UNDERSCORE) { - generator.writeFieldName(name.underscore()); - } else if (fieldCaseConversion == FieldCaseConversion.CAMELCASE) { - generator.writeFieldName(name.camelCase()); - } else { - generator.writeFieldName(name.underscore()); - } - return this; + return field(name, fieldCaseConversion); } public XContentBuilder field(XContentBuilderString name, FieldCaseConversion conversion) throws IOException { @@ -273,22 +266,13 @@ public final class XContentBuilder implements BytesStream, Releasable { } public XContentBuilder field(String name) throws IOException { - if (fieldCaseConversion == FieldCaseConversion.UNDERSCORE) { - if (cachedStringBuilder == null) { - cachedStringBuilder = new StringBuilder(); - } - name = Strings.toUnderscoreCase(name, cachedStringBuilder); - } else if (fieldCaseConversion == FieldCaseConversion.CAMELCASE) { - if (cachedStringBuilder == null) { - cachedStringBuilder = new StringBuilder(); - } - name = Strings.toCamelCase(name, cachedStringBuilder); - } - generator.writeFieldName(name); - return this; + return field(name, fieldCaseConversion); } public XContentBuilder field(String name, FieldCaseConversion conversion) throws IOException { + if (name == null) { + throw new IllegalArgumentException("field name cannot be null"); + } if (conversion == FieldCaseConversion.UNDERSCORE) { if (cachedStringBuilder == null) { cachedStringBuilder = new StringBuilder(); diff --git a/core/src/main/java/org/elasticsearch/discovery/DiscoveryService.java b/core/src/main/java/org/elasticsearch/discovery/DiscoveryService.java index eeba9baa32..a82099658e 100644 --- a/core/src/main/java/org/elasticsearch/discovery/DiscoveryService.java +++ b/core/src/main/java/org/elasticsearch/discovery/DiscoveryService.java @@ -23,6 +23,7 @@ import org.elasticsearch.ElasticsearchTimeoutException; import org.elasticsearch.cluster.ClusterChangedEvent; import org.elasticsearch.cluster.block.ClusterBlock; import org.elasticsearch.cluster.node.DiscoveryNode; +import org.elasticsearch.common.Randomness; import org.elasticsearch.common.Strings; import org.elasticsearch.common.component.AbstractLifecycleComponent; import org.elasticsearch.common.inject.Inject; @@ -41,7 +42,6 @@ public class DiscoveryService extends AbstractLifecycleComponent<DiscoveryServic public static final String SETTING_INITIAL_STATE_TIMEOUT = "discovery.initial_state_timeout"; public static final String SETTING_DISCOVERY_SEED = "discovery.id.seed"; - private static class InitialStateListener implements InitialStateDiscoveryListener { private final CountDownLatch latch = new CountDownLatch(1); @@ -132,10 +132,7 @@ public class DiscoveryService extends AbstractLifecycleComponent<DiscoveryServic } public static String generateNodeId(Settings settings) { - String seed = settings.get(DiscoveryService.SETTING_DISCOVERY_SEED); - if (seed != null) { - return Strings.randomBase64UUID(new Random(Long.parseLong(seed))); - } - return Strings.randomBase64UUID(); + Random random = Randomness.get(settings, DiscoveryService.SETTING_DISCOVERY_SEED); + return Strings.randomBase64UUID(random); } } diff --git a/core/src/main/java/org/elasticsearch/discovery/DiscoverySettings.java b/core/src/main/java/org/elasticsearch/discovery/DiscoverySettings.java index 20f2c96b12..6689d9c868 100644 --- a/core/src/main/java/org/elasticsearch/discovery/DiscoverySettings.java +++ b/core/src/main/java/org/elasticsearch/discovery/DiscoverySettings.java @@ -23,9 +23,10 @@ import org.elasticsearch.cluster.block.ClusterBlock; import org.elasticsearch.cluster.block.ClusterBlockLevel; import org.elasticsearch.common.component.AbstractComponent; import org.elasticsearch.common.inject.Inject; +import org.elasticsearch.common.settings.ClusterSettings; +import org.elasticsearch.common.settings.Setting; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.unit.TimeValue; -import org.elasticsearch.node.settings.NodeSettingsService; import org.elasticsearch.rest.RestStatus; import java.util.EnumSet; @@ -35,42 +36,40 @@ import java.util.EnumSet; */ public class DiscoverySettings extends AbstractComponent { + public final static int NO_MASTER_BLOCK_ID = 2; + public final static ClusterBlock NO_MASTER_BLOCK_ALL = new ClusterBlock(NO_MASTER_BLOCK_ID, "no master", true, true, RestStatus.SERVICE_UNAVAILABLE, ClusterBlockLevel.ALL); + public final static ClusterBlock NO_MASTER_BLOCK_WRITES = new ClusterBlock(NO_MASTER_BLOCK_ID, "no master", true, false, RestStatus.SERVICE_UNAVAILABLE, EnumSet.of(ClusterBlockLevel.WRITE, ClusterBlockLevel.METADATA_WRITE)); /** * sets the timeout for a complete publishing cycle, including both sending and committing. the master * will continute to process the next cluster state update after this time has elapsed **/ - public static final String PUBLISH_TIMEOUT = "discovery.zen.publish_timeout"; + public static final Setting<TimeValue> PUBLISH_TIMEOUT_SETTING = Setting.positiveTimeSetting("discovery.zen.publish_timeout", TimeValue.timeValueSeconds(30), true, Setting.Scope.CLUSTER); /** * sets the timeout for receiving enough acks for a specific cluster state and committing it. failing * to receive responses within this window will cause the cluster state change to be rejected. */ - public static final String COMMIT_TIMEOUT = "discovery.zen.commit_timeout"; - public static final String NO_MASTER_BLOCK = "discovery.zen.no_master_block"; - public static final String PUBLISH_DIFF_ENABLE = "discovery.zen.publish_diff.enable"; - - public static final TimeValue DEFAULT_PUBLISH_TIMEOUT = TimeValue.timeValueSeconds(30); - public static final TimeValue DEFAULT_COMMIT_TIMEOUT = TimeValue.timeValueSeconds(30); - public static final String DEFAULT_NO_MASTER_BLOCK = "write"; - public final static int NO_MASTER_BLOCK_ID = 2; - public final static boolean DEFAULT_PUBLISH_DIFF_ENABLE = true; - - public final static ClusterBlock NO_MASTER_BLOCK_ALL = new ClusterBlock(NO_MASTER_BLOCK_ID, "no master", true, true, RestStatus.SERVICE_UNAVAILABLE, ClusterBlockLevel.ALL); - public final static ClusterBlock NO_MASTER_BLOCK_WRITES = new ClusterBlock(NO_MASTER_BLOCK_ID, "no master", true, false, RestStatus.SERVICE_UNAVAILABLE, EnumSet.of(ClusterBlockLevel.WRITE, ClusterBlockLevel.METADATA_WRITE)); + public static final Setting<TimeValue> COMMIT_TIMEOUT_SETTING = new Setting<>("discovery.zen.commit_timeout", (s) -> PUBLISH_TIMEOUT_SETTING.getRaw(s), (s) -> TimeValue.parseTimeValue(s, TimeValue.timeValueSeconds(30), "discovery.zen.commit_timeout"), true, Setting.Scope.CLUSTER); + public static final Setting<ClusterBlock> NO_MASTER_BLOCK_SETTING = new Setting<>("discovery.zen.no_master_block", "write", DiscoverySettings::parseNoMasterBlock, true, Setting.Scope.CLUSTER); + public static final Setting<Boolean> PUBLISH_DIFF_ENABLE_SETTING = Setting.boolSetting("discovery.zen.publish_diff.enable", true, true, Setting.Scope.CLUSTER); private volatile ClusterBlock noMasterBlock; private volatile TimeValue publishTimeout; + private volatile TimeValue commitTimeout; private volatile boolean publishDiff; @Inject - public DiscoverySettings(Settings settings, NodeSettingsService nodeSettingsService) { + public DiscoverySettings(Settings settings, ClusterSettings clusterSettings) { super(settings); - nodeSettingsService.addListener(new ApplySettings()); - this.noMasterBlock = parseNoMasterBlock(settings.get(NO_MASTER_BLOCK, DEFAULT_NO_MASTER_BLOCK)); - this.publishTimeout = settings.getAsTime(PUBLISH_TIMEOUT, DEFAULT_PUBLISH_TIMEOUT); - this.commitTimeout = settings.getAsTime(COMMIT_TIMEOUT, new TimeValue(Math.min(DEFAULT_COMMIT_TIMEOUT.millis(), publishTimeout.millis()))); - this.publishDiff = settings.getAsBoolean(PUBLISH_DIFF_ENABLE, DEFAULT_PUBLISH_DIFF_ENABLE); + clusterSettings.addSettingsUpdateConsumer(NO_MASTER_BLOCK_SETTING, this::setNoMasterBlock); + clusterSettings.addSettingsUpdateConsumer(PUBLISH_DIFF_ENABLE_SETTING, this::setPublishDiff); + clusterSettings.addSettingsUpdateConsumer(COMMIT_TIMEOUT_SETTING, this::setCommitTimeout); + clusterSettings.addSettingsUpdateConsumer(PUBLISH_TIMEOUT_SETTING, this::setPublishTimeout); + this.noMasterBlock = NO_MASTER_BLOCK_SETTING.get(settings); + this.publishTimeout = PUBLISH_TIMEOUT_SETTING.get(settings); + this.commitTimeout = COMMIT_TIMEOUT_SETTING.get(settings); + this.publishDiff = PUBLISH_DIFF_ENABLE_SETTING.get(settings); } /** @@ -88,47 +87,25 @@ public class DiscoverySettings extends AbstractComponent { return noMasterBlock; } - public boolean getPublishDiff() { return publishDiff;} + private void setNoMasterBlock(ClusterBlock noMasterBlock) { + this.noMasterBlock = noMasterBlock; + } - private class ApplySettings implements NodeSettingsService.Listener { - @Override - public void onRefreshSettings(Settings settings) { - TimeValue newPublishTimeout = settings.getAsTime(PUBLISH_TIMEOUT, null); - if (newPublishTimeout != null) { - if (newPublishTimeout.millis() != publishTimeout.millis()) { - logger.info("updating [{}] from [{}] to [{}]", PUBLISH_TIMEOUT, publishTimeout, newPublishTimeout); - publishTimeout = newPublishTimeout; - if (settings.getAsTime(COMMIT_TIMEOUT, null) == null && commitTimeout.millis() > publishTimeout.millis()) { - logger.info("reducing default [{}] to [{}] due to publish timeout change", COMMIT_TIMEOUT, publishTimeout); - commitTimeout = publishTimeout; - } - } - } - TimeValue newCommitTimeout = settings.getAsTime(COMMIT_TIMEOUT, null); - if (newCommitTimeout != null) { - if (newCommitTimeout.millis() != commitTimeout.millis()) { - logger.info("updating [{}] from [{}] to [{}]", COMMIT_TIMEOUT, commitTimeout, newCommitTimeout); - commitTimeout = newCommitTimeout; - } - } - String newNoMasterBlockValue = settings.get(NO_MASTER_BLOCK); - if (newNoMasterBlockValue != null) { - ClusterBlock newNoMasterBlock = parseNoMasterBlock(newNoMasterBlockValue); - if (newNoMasterBlock != noMasterBlock) { - noMasterBlock = newNoMasterBlock; - } - } - Boolean newPublishDiff = settings.getAsBoolean(PUBLISH_DIFF_ENABLE, null); - if (newPublishDiff != null) { - if (newPublishDiff != publishDiff) { - logger.info("updating [{}] from [{}] to [{}]", PUBLISH_DIFF_ENABLE, publishDiff, newPublishDiff); - publishDiff = newPublishDiff; - } - } - } + private void setPublishDiff(boolean publishDiff) { + this.publishDiff = publishDiff; + } + + private void setPublishTimeout(TimeValue publishTimeout) { + this.publishTimeout = publishTimeout; } - private ClusterBlock parseNoMasterBlock(String value) { + private void setCommitTimeout(TimeValue commitTimeout) { + this.commitTimeout = commitTimeout; + } + + public boolean getPublishDiff() { return publishDiff;} + + private static ClusterBlock parseNoMasterBlock(String value) { switch (value) { case "all": return NO_MASTER_BLOCK_ALL; diff --git a/core/src/main/java/org/elasticsearch/discovery/zen/ZenDiscovery.java b/core/src/main/java/org/elasticsearch/discovery/zen/ZenDiscovery.java index 03111d141e..8849a849f9 100644 --- a/core/src/main/java/org/elasticsearch/discovery/zen/ZenDiscovery.java +++ b/core/src/main/java/org/elasticsearch/discovery/zen/ZenDiscovery.java @@ -39,6 +39,8 @@ import org.elasticsearch.common.inject.internal.Nullable; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.common.logging.ESLogger; +import org.elasticsearch.common.settings.ClusterSettings; +import org.elasticsearch.common.settings.Setting; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.unit.TimeValue; import org.elasticsearch.discovery.Discovery; @@ -55,7 +57,6 @@ import org.elasticsearch.discovery.zen.ping.ZenPingService; import org.elasticsearch.discovery.zen.publish.PendingClusterStateStats; import org.elasticsearch.discovery.zen.publish.PublishClusterStateAction; import org.elasticsearch.node.service.NodeService; -import org.elasticsearch.node.settings.NodeSettingsService; import org.elasticsearch.threadpool.ThreadPool; import org.elasticsearch.transport.*; @@ -74,7 +75,7 @@ import static org.elasticsearch.common.unit.TimeValue.timeValueSeconds; */ public class ZenDiscovery extends AbstractLifecycleComponent<Discovery> implements Discovery, PingContextProvider { - public final static String SETTING_REJOIN_ON_MASTER_GONE = "discovery.zen.rejoin_on_master_gone"; + public final static Setting<Boolean> REJOIN_ON_MASTER_GONE_SETTING = Setting.boolSetting("discovery.zen.rejoin_on_master_gone", true, true, Setting.Scope.CLUSTER); public final static String SETTING_PING_TIMEOUT = "discovery.zen.ping_timeout"; public final static String SETTING_JOIN_TIMEOUT = "discovery.zen.join_timeout"; public final static String SETTING_JOIN_RETRY_ATTEMPTS = "discovery.zen.join_retry_attempts"; @@ -139,7 +140,7 @@ public class ZenDiscovery extends AbstractLifecycleComponent<Discovery> implemen @Inject public ZenDiscovery(Settings settings, ClusterName clusterName, ThreadPool threadPool, - TransportService transportService, final ClusterService clusterService, NodeSettingsService nodeSettingsService, + TransportService transportService, final ClusterService clusterService, ClusterSettings clusterSettings, ZenPingService pingService, ElectMasterService electMasterService, DiscoverySettings discoverySettings) { super(settings); @@ -160,7 +161,7 @@ public class ZenDiscovery extends AbstractLifecycleComponent<Discovery> implemen this.masterElectionFilterClientNodes = settings.getAsBoolean(SETTING_MASTER_ELECTION_FILTER_CLIENT, true); this.masterElectionFilterDataNodes = settings.getAsBoolean(SETTING_MASTER_ELECTION_FILTER_DATA, false); this.masterElectionWaitForJoinsTimeout = settings.getAsTime(SETTING_MASTER_ELECTION_WAIT_FOR_JOINS_TIMEOUT, TimeValue.timeValueMillis(joinTimeout.millis() / 2)); - this.rejoinOnMasterGone = settings.getAsBoolean(SETTING_REJOIN_ON_MASTER_GONE, true); + this.rejoinOnMasterGone = REJOIN_ON_MASTER_GONE_SETTING.get(settings); if (this.joinRetryAttempts < 1) { throw new IllegalArgumentException("'" + SETTING_JOIN_RETRY_ATTEMPTS + "' must be a positive number. got [" + SETTING_JOIN_RETRY_ATTEMPTS + "]"); @@ -171,7 +172,14 @@ public class ZenDiscovery extends AbstractLifecycleComponent<Discovery> implemen logger.debug("using ping_timeout [{}], join.timeout [{}], master_election.filter_client [{}], master_election.filter_data [{}]", this.pingTimeout, joinTimeout, masterElectionFilterClientNodes, masterElectionFilterDataNodes); - nodeSettingsService.addListener(new ApplySettings()); + clusterSettings.addSettingsUpdateConsumer(ElectMasterService.DISCOVERY_ZEN_MINIMUM_MASTER_NODES_SETTING, this::handleMinimumMasterNodesChanged, (value) -> { + final ClusterState clusterState = clusterService.state(); + int masterNodes = clusterState.nodes().masterNodes().size(); + if (value > masterNodes) { + throw new IllegalArgumentException("cannot set " + ElectMasterService.DISCOVERY_ZEN_MINIMUM_MASTER_NODES_SETTING.getKey() + " to more than the current master nodes count [" + masterNodes + "]"); + } + }); + clusterSettings.addSettingsUpdateConsumer(REJOIN_ON_MASTER_GONE_SETTING, this::setRejoingOnMasterGone); this.masterFD = new MasterFaultDetection(settings, threadPool, transportService, clusterName, clusterService); this.masterFD.addListener(new MasterNodeFailureListener()); @@ -306,6 +314,10 @@ public class ZenDiscovery extends AbstractLifecycleComponent<Discovery> implemen return clusterJoinsCounter.get() > 0; } + private void setRejoingOnMasterGone(boolean rejoin) { + this.rejoinOnMasterGone = rejoin; + } + /** end of {@link org.elasticsearch.discovery.zen.ping.PingContextProvider } implementation */ @@ -824,8 +836,7 @@ public class ZenDiscovery extends AbstractLifecycleComponent<Discovery> implemen } } - void handleJoinRequest(final DiscoveryNode node, final MembershipAction.JoinCallback callback) { - + void handleJoinRequest(final DiscoveryNode node, final ClusterState state, final MembershipAction.JoinCallback callback) { if (!transportService.addressSupported(node.address().getClass())) { // TODO, what should we do now? Maybe inform that node that its crap? logger.warn("received a wrong address type from [{}], ignoring...", node); @@ -837,7 +848,7 @@ public class ZenDiscovery extends AbstractLifecycleComponent<Discovery> implemen // Sanity check: maybe we don't end up here, because serialization may have failed. if (node.getVersion().before(minimumNodeJoinVersion)) { callback.onFailure( - new IllegalStateException("Can't handle join request from a node with a version [" + node.getVersion() + "] that is lower than the minimum compatible version [" + minimumNodeJoinVersion.minimumCompatibilityVersion() + "]") + new IllegalStateException("Can't handle join request from a node with a version [" + node.getVersion() + "] that is lower than the minimum compatible version [" + minimumNodeJoinVersion.minimumCompatibilityVersion() + "]") ); return; } @@ -847,7 +858,13 @@ public class ZenDiscovery extends AbstractLifecycleComponent<Discovery> implemen // validate the join request, will throw a failure if it fails, which will get back to the // node calling the join request - membership.sendValidateJoinRequestBlocking(node, joinTimeout); + try { + membership.sendValidateJoinRequestBlocking(node, state, joinTimeout); + } catch (Throwable e) { + logger.warn("failed to validate incoming join request from node [{}]", node); + callback.onFailure(new IllegalStateException("failure when sending a validation request to node", e)); + return; + } nodeJoinController.handleJoinRequest(node, callback); } } @@ -1027,7 +1044,7 @@ public class ZenDiscovery extends AbstractLifecycleComponent<Discovery> implemen private class MembershipListener implements MembershipAction.MembershipListener { @Override public void onJoin(DiscoveryNode node, MembershipAction.JoinCallback callback) { - handleJoinRequest(node, callback); + handleJoinRequest(node, clusterService.state(), callback); } @Override @@ -1139,26 +1156,6 @@ public class ZenDiscovery extends AbstractLifecycleComponent<Discovery> implemen } } - class ApplySettings implements NodeSettingsService.Listener { - @Override - public void onRefreshSettings(Settings settings) { - int minimumMasterNodes = settings.getAsInt(ElectMasterService.DISCOVERY_ZEN_MINIMUM_MASTER_NODES, - ZenDiscovery.this.electMaster.minimumMasterNodes()); - if (minimumMasterNodes != ZenDiscovery.this.electMaster.minimumMasterNodes()) { - logger.info("updating {} from [{}] to [{}]", ElectMasterService.DISCOVERY_ZEN_MINIMUM_MASTER_NODES, - ZenDiscovery.this.electMaster.minimumMasterNodes(), minimumMasterNodes); - handleMinimumMasterNodesChanged(minimumMasterNodes); - } - - boolean rejoinOnMasterGone = settings.getAsBoolean(SETTING_REJOIN_ON_MASTER_GONE, ZenDiscovery.this.rejoinOnMasterGone); - if (rejoinOnMasterGone != ZenDiscovery.this.rejoinOnMasterGone) { - logger.info("updating {} from [{}] to [{}]", SETTING_REJOIN_ON_MASTER_GONE, ZenDiscovery.this.rejoinOnMasterGone, rejoinOnMasterGone); - ZenDiscovery.this.rejoinOnMasterGone = rejoinOnMasterGone; - } - } - } - - /** * All control of the join thread should happen under the cluster state update task thread. * This is important to make sure that the background joining process is always in sync with any cluster state updates diff --git a/core/src/main/java/org/elasticsearch/discovery/zen/elect/ElectMasterService.java b/core/src/main/java/org/elasticsearch/discovery/zen/elect/ElectMasterService.java index 9164a85388..9cca1edfc5 100644 --- a/core/src/main/java/org/elasticsearch/discovery/zen/elect/ElectMasterService.java +++ b/core/src/main/java/org/elasticsearch/discovery/zen/elect/ElectMasterService.java @@ -22,11 +22,10 @@ package org.elasticsearch.discovery.zen.elect; import com.carrotsearch.hppc.ObjectContainer; import org.apache.lucene.util.CollectionUtil; import org.elasticsearch.Version; -import org.elasticsearch.cluster.ClusterState; import org.elasticsearch.cluster.node.DiscoveryNode; -import org.elasticsearch.cluster.settings.Validator; import org.elasticsearch.common.component.AbstractComponent; import org.elasticsearch.common.inject.Inject; +import org.elasticsearch.common.settings.Setting; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.util.CollectionUtils; @@ -41,23 +40,7 @@ import java.util.List; */ public class ElectMasterService extends AbstractComponent { - public static final String DISCOVERY_ZEN_MINIMUM_MASTER_NODES = "discovery.zen.minimum_master_nodes"; - public static final Validator DISCOVERY_ZEN_MINIMUM_MASTER_NODES_VALIDATOR = new Validator() { - @Override - public String validate(String setting, String value, ClusterState clusterState) { - int intValue; - try { - intValue = Integer.parseInt(value); - } catch (NumberFormatException ex) { - return "cannot parse value [" + value + "] as an integer"; - } - int masterNodes = clusterState.nodes().masterNodes().size(); - if (intValue > masterNodes) { - return "cannot set " + ElectMasterService.DISCOVERY_ZEN_MINIMUM_MASTER_NODES + " to more than the current master nodes count [" + masterNodes + "]"; - } - return null; - } - }; + public static final Setting<Integer> DISCOVERY_ZEN_MINIMUM_MASTER_NODES_SETTING = Setting.intSetting("discovery.zen.minimum_master_nodes", -1, true, Setting.Scope.CLUSTER); // This is the minimum version a master needs to be on, otherwise it gets ignored // This is based on the minimum compatible version of the current version this node is on @@ -70,7 +53,7 @@ public class ElectMasterService extends AbstractComponent { public ElectMasterService(Settings settings, Version version) { super(settings); this.minMasterVersion = version.minimumCompatibilityVersion(); - this.minimumMasterNodes = settings.getAsInt(DISCOVERY_ZEN_MINIMUM_MASTER_NODES, -1); + this.minimumMasterNodes = DISCOVERY_ZEN_MINIMUM_MASTER_NODES_SETTING.get(settings); logger.debug("using minimum_master_nodes [{}]", minimumMasterNodes); } diff --git a/core/src/main/java/org/elasticsearch/discovery/zen/membership/MembershipAction.java b/core/src/main/java/org/elasticsearch/discovery/zen/membership/MembershipAction.java index 4260b992dd..5a96addc84 100644 --- a/core/src/main/java/org/elasticsearch/discovery/zen/membership/MembershipAction.java +++ b/core/src/main/java/org/elasticsearch/discovery/zen/membership/MembershipAction.java @@ -20,6 +20,7 @@ package org.elasticsearch.discovery.zen.membership; import org.elasticsearch.cluster.ClusterService; +import org.elasticsearch.cluster.ClusterState; import org.elasticsearch.cluster.node.DiscoveryNode; import org.elasticsearch.common.component.AbstractComponent; import org.elasticsearch.common.io.stream.StreamInput; @@ -88,10 +89,6 @@ public class MembershipAction extends AbstractComponent { transportService.submitRequest(masterNode, DISCOVERY_LEAVE_ACTION_NAME, new LeaveRequest(node), EmptyTransportResponseHandler.INSTANCE_SAME).txGet(timeout.millis(), TimeUnit.MILLISECONDS); } - public void sendJoinRequest(DiscoveryNode masterNode, DiscoveryNode node) { - transportService.sendRequest(masterNode, DISCOVERY_JOIN_ACTION_NAME, new JoinRequest(node), EmptyTransportResponseHandler.INSTANCE_SAME); - } - public void sendJoinRequestBlocking(DiscoveryNode masterNode, DiscoveryNode node, TimeValue timeout) { transportService.submitRequest(masterNode, DISCOVERY_JOIN_ACTION_NAME, new JoinRequest(node), EmptyTransportResponseHandler.INSTANCE_SAME) .txGet(timeout.millis(), TimeUnit.MILLISECONDS); @@ -100,8 +97,8 @@ public class MembershipAction extends AbstractComponent { /** * Validates the join request, throwing a failure if it failed. */ - public void sendValidateJoinRequestBlocking(DiscoveryNode node, TimeValue timeout) { - transportService.submitRequest(node, DISCOVERY_JOIN_VALIDATE_ACTION_NAME, new ValidateJoinRequest(), EmptyTransportResponseHandler.INSTANCE_SAME) + public void sendValidateJoinRequestBlocking(DiscoveryNode node, ClusterState state, TimeValue timeout) { + transportService.submitRequest(node, DISCOVERY_JOIN_VALIDATE_ACTION_NAME, new ValidateJoinRequest(state), EmptyTransportResponseHandler.INSTANCE_SAME) .txGet(timeout.millis(), TimeUnit.MILLISECONDS); } @@ -156,9 +153,26 @@ public class MembershipAction extends AbstractComponent { } } - public static class ValidateJoinRequest extends TransportRequest { + class ValidateJoinRequest extends TransportRequest { + private ClusterState state; + + ValidateJoinRequest() { + } + + ValidateJoinRequest(ClusterState state) { + this.state = state; + } + + @Override + public void readFrom(StreamInput in) throws IOException { + super.readFrom(in); + this.state = ClusterState.Builder.readFrom(in, nodesProvider.nodes().localNode()); + } - public ValidateJoinRequest() { + @Override + public void writeTo(StreamOutput out) throws IOException { + super.writeTo(out); + this.state.writeTo(out); } } diff --git a/core/src/main/java/org/elasticsearch/env/NodeEnvironment.java b/core/src/main/java/org/elasticsearch/env/NodeEnvironment.java index 3a1b430f98..93e95dfaa9 100644 --- a/core/src/main/java/org/elasticsearch/env/NodeEnvironment.java +++ b/core/src/main/java/org/elasticsearch/env/NodeEnvironment.java @@ -21,7 +21,12 @@ package org.elasticsearch.env; import org.apache.lucene.index.IndexWriter; import org.apache.lucene.index.SegmentInfos; -import org.apache.lucene.store.*; +import org.apache.lucene.store.Directory; +import org.apache.lucene.store.FSDirectory; +import org.apache.lucene.store.Lock; +import org.apache.lucene.store.LockObtainFailedException; +import org.apache.lucene.store.NativeFSLockFactory; +import org.apache.lucene.store.SimpleFSDirectory; import org.apache.lucene.util.IOUtils; import org.elasticsearch.ElasticsearchException; import org.elasticsearch.cluster.metadata.IndexMetaData; @@ -31,6 +36,7 @@ import org.elasticsearch.common.component.AbstractComponent; import org.elasticsearch.common.inject.Inject; import org.elasticsearch.common.io.FileSystemUtils; import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.common.unit.ByteSizeValue; import org.elasticsearch.common.unit.TimeValue; import org.elasticsearch.index.Index; import org.elasticsearch.index.IndexSettings; @@ -38,11 +44,25 @@ import org.elasticsearch.index.shard.ShardId; import org.elasticsearch.index.store.FsDirectoryService; import org.elasticsearch.monitor.fs.FsInfo; import org.elasticsearch.monitor.fs.FsProbe; +import org.elasticsearch.monitor.jvm.JvmInfo; import java.io.Closeable; import java.io.IOException; -import java.nio.file.*; -import java.util.*; +import java.nio.file.AtomicMoveNotSupportedException; +import java.nio.file.DirectoryStream; +import java.nio.file.FileStore; +import java.nio.file.Files; +import java.nio.file.Path; +import java.nio.file.StandardCopyOption; +import java.util.ArrayList; +import java.util.Arrays; +import java.util.Collection; +import java.util.HashMap; +import java.util.HashSet; +import java.util.List; +import java.util.Locale; +import java.util.Map; +import java.util.Set; import java.util.concurrent.Semaphore; import java.util.concurrent.TimeUnit; import java.util.concurrent.atomic.AtomicBoolean; @@ -145,7 +165,7 @@ public class NodeEnvironment extends AbstractComponent implements Closeable { for (int dirIndex = 0; dirIndex < environment.dataWithClusterFiles().length; dirIndex++) { Path dir = environment.dataWithClusterFiles()[dirIndex].resolve(NODES_FOLDER).resolve(Integer.toString(possibleLockId)); Files.createDirectories(dir); - + try (Directory luceneDir = FSDirectory.open(dir, NativeFSLockFactory.INSTANCE)) { logger.trace("obtaining node lock on {} ...", dir.toAbsolutePath()); try { @@ -187,6 +207,7 @@ public class NodeEnvironment extends AbstractComponent implements Closeable { } maybeLogPathDetails(); + maybeLogHeapDetails(); if (settings.getAsBoolean(SETTING_ENABLE_LUCENE_SEGMENT_INFOS_TRACE, false)) { SegmentInfos.setInfoStream(System.out); @@ -274,6 +295,13 @@ public class NodeEnvironment extends AbstractComponent implements Closeable { } } + private void maybeLogHeapDetails() { + JvmInfo jvmInfo = JvmInfo.jvmInfo(); + ByteSizeValue maxHeapSize = jvmInfo.getMem().getHeapMax(); + String useCompressedOops = jvmInfo.useCompressedOops(); + logger.info("heap size [{}], compressed ordinary object pointers [{}]", maxHeapSize, useCompressedOops); + } + private static String toString(Collection<String> items) { StringBuilder b = new StringBuilder(); for(String item : items) { @@ -811,7 +839,7 @@ public class NodeEnvironment extends AbstractComponent implements Closeable { // Sanity check: assert Integer.parseInt(shardPath.getName(count-1).toString()) >= 0; assert "indices".equals(shardPath.getName(count-3).toString()); - + return shardPath.getParent().getParent().getParent(); } } diff --git a/core/src/main/java/org/elasticsearch/gateway/GatewayService.java b/core/src/main/java/org/elasticsearch/gateway/GatewayService.java index e83ec695a9..5e410fb6d5 100644 --- a/core/src/main/java/org/elasticsearch/gateway/GatewayService.java +++ b/core/src/main/java/org/elasticsearch/gateway/GatewayService.java @@ -227,7 +227,7 @@ public class GatewayService extends AbstractLifecycleComponent<GatewayService> i // automatically generate a UID for the metadata if we need to metaDataBuilder.generateClusterUuidIfNeeded(); - if (recoveredState.metaData().settings().getAsBoolean(MetaData.SETTING_READ_ONLY, false) || currentState.metaData().settings().getAsBoolean(MetaData.SETTING_READ_ONLY, false)) { + if (MetaData.SETTING_READ_ONLY_SETTING.get(recoveredState.metaData().settings()) || MetaData.SETTING_READ_ONLY_SETTING.get(currentState.metaData().settings())) { blocks.addGlobalBlock(MetaData.CLUSTER_READ_ONLY_BLOCK); } diff --git a/core/src/main/java/org/elasticsearch/gateway/PrimaryShardAllocator.java b/core/src/main/java/org/elasticsearch/gateway/PrimaryShardAllocator.java index e560b4458b..79bfbdac8c 100644 --- a/core/src/main/java/org/elasticsearch/gateway/PrimaryShardAllocator.java +++ b/core/src/main/java/org/elasticsearch/gateway/PrimaryShardAllocator.java @@ -20,6 +20,7 @@ package org.elasticsearch.gateway; import org.apache.lucene.util.CollectionUtil; +import org.elasticsearch.Version; import org.elasticsearch.cluster.metadata.IndexMetaData; import org.elasticsearch.cluster.metadata.MetaData; import org.elasticsearch.cluster.node.DiscoveryNode; @@ -30,8 +31,10 @@ import org.elasticsearch.cluster.routing.allocation.RoutingAllocation; import org.elasticsearch.cluster.routing.allocation.decider.Decision; import org.elasticsearch.common.component.AbstractComponent; import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.index.IndexSettings; import java.util.*; +import java.util.stream.Collectors; /** * The primary shard allocator allocates primary shard that were not created as @@ -39,6 +42,7 @@ import java.util.*; */ public abstract class PrimaryShardAllocator extends AbstractComponent { + @Deprecated public static final String INDEX_RECOVERY_INITIAL_SHARDS = "index.recovery.initial_shards"; private final String initialShards; @@ -56,13 +60,21 @@ public abstract class PrimaryShardAllocator extends AbstractComponent { final RoutingNodes.UnassignedShards.UnassignedIterator unassignedIterator = routingNodes.unassigned().iterator(); while (unassignedIterator.hasNext()) { - ShardRouting shard = unassignedIterator.next(); + final ShardRouting shard = unassignedIterator.next(); - if (needToFindPrimaryCopy(shard) == false) { + if (shard.primary() == false) { continue; } - AsyncShardFetch.FetchResult<TransportNodesListGatewayStartedShards.NodeGatewayStartedShards> shardState = fetchData(shard, allocation); + final IndexMetaData indexMetaData = metaData.index(shard.getIndex()); + final IndexSettings indexSettings = new IndexSettings(indexMetaData, settings, Collections.emptyList()); + + if (shard.allocatedPostIndexCreate(indexMetaData) == false) { + // when we create a fresh index + continue; + } + + final AsyncShardFetch.FetchResult<TransportNodesListGatewayStartedShards.NodeGatewayStartedShards> shardState = fetchData(shard, allocation); if (shardState.hasData() == false) { logger.trace("{}: ignoring allocation, still fetching shard started state", shard); allocation.setHasPendingAsyncFetch(); @@ -70,25 +82,50 @@ public abstract class PrimaryShardAllocator extends AbstractComponent { continue; } - IndexMetaData indexMetaData = metaData.index(shard.getIndex()); - Settings indexSettings = Settings.builder().put(settings).put(indexMetaData.getSettings()).build(); - - NodesAndVersions nodesAndVersions = buildNodesAndVersions(shard, recoverOnAnyNode(indexSettings), allocation.getIgnoreNodes(shard.shardId()), shardState); - logger.debug("[{}][{}] found {} allocations of {}, highest version: [{}]", shard.index(), shard.id(), nodesAndVersions.allocationsFound, shard, nodesAndVersions.highestVersion); + final Set<String> lastActiveAllocationIds = indexMetaData.activeAllocationIds(shard.id()); + final boolean snapshotRestore = shard.restoreSource() != null; + final boolean recoverOnAnyNode = recoverOnAnyNode(indexSettings); + + final NodesAndVersions nodesAndVersions; + final boolean enoughAllocationsFound; + + if (lastActiveAllocationIds.isEmpty()) { + assert indexSettings.getIndexVersionCreated().before(Version.V_3_0_0) : "trying to allocated a primary with an empty allocation id set, but index is new"; + // when we load an old index (after upgrading cluster) or restore a snapshot of an old index + // fall back to old version-based allocation mode + // Note that once the shard has been active, lastActiveAllocationIds will be non-empty + nodesAndVersions = buildNodesAndVersions(shard, snapshotRestore || recoverOnAnyNode, allocation.getIgnoreNodes(shard.shardId()), shardState); + if (snapshotRestore || recoverOnAnyNode) { + enoughAllocationsFound = nodesAndVersions.allocationsFound > 0; + } else { + enoughAllocationsFound = isEnoughVersionBasedAllocationsFound(shard, indexMetaData, nodesAndVersions); + } + logger.debug("[{}][{}]: version-based allocation for pre-{} index found {} allocations of {}, highest version: [{}]", shard.index(), shard.id(), Version.V_3_0_0, nodesAndVersions.allocationsFound, shard, nodesAndVersions.highestVersion); + } else { + assert lastActiveAllocationIds.isEmpty() == false; + // use allocation ids to select nodes + nodesAndVersions = buildAllocationIdBasedNodes(shard, snapshotRestore || recoverOnAnyNode, + allocation.getIgnoreNodes(shard.shardId()), lastActiveAllocationIds, shardState); + enoughAllocationsFound = nodesAndVersions.allocationsFound > 0; + logger.debug("[{}][{}]: found {} allocations of {} based on allocation ids: [{}]", shard.index(), shard.id(), nodesAndVersions.allocationsFound, shard, lastActiveAllocationIds); + } - if (isEnoughAllocationsFound(shard, indexMetaData, nodesAndVersions) == false) { - // if we are restoring this shard we still can allocate - if (shard.restoreSource() == null) { + if (enoughAllocationsFound == false){ + if (snapshotRestore) { + // let BalancedShardsAllocator take care of allocating this shard + logger.debug("[{}][{}]: missing local data, will restore from [{}]", shard.index(), shard.id(), shard.restoreSource()); + } else if (recoverOnAnyNode) { + // let BalancedShardsAllocator take care of allocating this shard + logger.debug("[{}][{}]: missing local data, recover from any node", shard.index(), shard.id()); + } else { // we can't really allocate, so ignore it and continue unassignedIterator.removeAndIgnore(); logger.debug("[{}][{}]: not allocating, number_of_allocated_shards_found [{}]", shard.index(), shard.id(), nodesAndVersions.allocationsFound); - } else { - logger.debug("[{}][{}]: missing local data, will restore from [{}]", shard.index(), shard.id(), shard.restoreSource()); } continue; } - NodesToAllocate nodesToAllocate = buildNodesToAllocate(shard, allocation, nodesAndVersions); + final NodesToAllocate nodesToAllocate = buildNodesToAllocate(shard, allocation, nodesAndVersions.nodes); if (nodesToAllocate.yesNodes.isEmpty() == false) { DiscoveryNode node = nodesToAllocate.yesNodes.get(0); logger.debug("[{}][{}]: allocating [{}] to [{}] on primary allocation", shard.index(), shard.id(), shard, node); @@ -109,63 +146,99 @@ public abstract class PrimaryShardAllocator extends AbstractComponent { } /** - * Does the shard need to find a primary copy? + * Builds a list of nodes. If matchAnyShard is set to false, only nodes that have an allocation id matching + * lastActiveAllocationIds are added to the list. Otherwise, any node that has a shard is added to the list, but + * entries with matching allocation id are always at the front of the list. */ - boolean needToFindPrimaryCopy(ShardRouting shard) { - if (shard.primary() == false) { - return false; - } + protected NodesAndVersions buildAllocationIdBasedNodes(ShardRouting shard, boolean matchAnyShard, Set<String> ignoreNodes, + Set<String> lastActiveAllocationIds, AsyncShardFetch.FetchResult<TransportNodesListGatewayStartedShards.NodeGatewayStartedShards> shardState) { + List<DiscoveryNode> matchingNodes = new ArrayList<>(); + List<DiscoveryNode> nonMatchingNodes = new ArrayList<>(); + long highestVersion = -1; + for (TransportNodesListGatewayStartedShards.NodeGatewayStartedShards nodeShardState : shardState.getData().values()) { + DiscoveryNode node = nodeShardState.getNode(); + String allocationId = nodeShardState.allocationId(); + + if (ignoreNodes.contains(node.id())) { + continue; + } + + if (nodeShardState.storeException() == null) { + if (allocationId == null && nodeShardState.version() != -1) { + // old shard with no allocation id, assign dummy value so that it gets added below in case of matchAnyShard + allocationId = "_n/a_"; + } + + logger.trace("[{}] on node [{}] has allocation id [{}] of shard", shard, nodeShardState.getNode(), allocationId); + } else { + logger.trace("[{}] on node [{}] has allocation id [{}] but the store can not be opened, treating as no allocation id", nodeShardState.storeException(), shard, nodeShardState.getNode(), allocationId); + allocationId = null; + } - // this is an API allocation, ignore since we know there is no data... - if (shard.allocatedPostIndexCreate() == false) { - return false; + if (allocationId != null) { + if (lastActiveAllocationIds.contains(allocationId)) { + matchingNodes.add(node); + highestVersion = Math.max(highestVersion, nodeShardState.version()); + } else if (matchAnyShard) { + nonMatchingNodes.add(node); + highestVersion = Math.max(highestVersion, nodeShardState.version()); + } + } } - return true; + List<DiscoveryNode> nodes = new ArrayList<>(); + nodes.addAll(matchingNodes); + nodes.addAll(nonMatchingNodes); + + if (logger.isTraceEnabled()) { + logger.trace("{} candidates for allocation: {}", shard, nodes.stream().map(DiscoveryNode::name).collect(Collectors.joining(", "))); + } + return new NodesAndVersions(nodes, nodes.size(), highestVersion); } - private boolean isEnoughAllocationsFound(ShardRouting shard, IndexMetaData indexMetaData, NodesAndVersions nodesAndVersions) { + /** + * used by old version-based allocation + */ + private boolean isEnoughVersionBasedAllocationsFound(ShardRouting shard, IndexMetaData indexMetaData, NodesAndVersions nodesAndVersions) { // check if the counts meets the minimum set int requiredAllocation = 1; // if we restore from a repository one copy is more then enough - if (shard.restoreSource() == null) { - try { - String initialShards = indexMetaData.getSettings().get(INDEX_RECOVERY_INITIAL_SHARDS, settings.get(INDEX_RECOVERY_INITIAL_SHARDS, this.initialShards)); - if ("quorum".equals(initialShards)) { - if (indexMetaData.getNumberOfReplicas() > 1) { - requiredAllocation = ((1 + indexMetaData.getNumberOfReplicas()) / 2) + 1; - } - } else if ("quorum-1".equals(initialShards) || "half".equals(initialShards)) { - if (indexMetaData.getNumberOfReplicas() > 2) { - requiredAllocation = ((1 + indexMetaData.getNumberOfReplicas()) / 2); - } - } else if ("one".equals(initialShards)) { - requiredAllocation = 1; - } else if ("full".equals(initialShards) || "all".equals(initialShards)) { - requiredAllocation = indexMetaData.getNumberOfReplicas() + 1; - } else if ("full-1".equals(initialShards) || "all-1".equals(initialShards)) { - if (indexMetaData.getNumberOfReplicas() > 1) { - requiredAllocation = indexMetaData.getNumberOfReplicas(); - } - } else { - requiredAllocation = Integer.parseInt(initialShards); + try { + String initialShards = indexMetaData.getSettings().get(INDEX_RECOVERY_INITIAL_SHARDS, settings.get(INDEX_RECOVERY_INITIAL_SHARDS, this.initialShards)); + if ("quorum".equals(initialShards)) { + if (indexMetaData.getNumberOfReplicas() > 1) { + requiredAllocation = ((1 + indexMetaData.getNumberOfReplicas()) / 2) + 1; } - } catch (Exception e) { - logger.warn("[{}][{}] failed to derived initial_shards from value {}, ignore allocation for {}", shard.index(), shard.id(), initialShards, shard); + } else if ("quorum-1".equals(initialShards) || "half".equals(initialShards)) { + if (indexMetaData.getNumberOfReplicas() > 2) { + requiredAllocation = ((1 + indexMetaData.getNumberOfReplicas()) / 2); + } + } else if ("one".equals(initialShards)) { + requiredAllocation = 1; + } else if ("full".equals(initialShards) || "all".equals(initialShards)) { + requiredAllocation = indexMetaData.getNumberOfReplicas() + 1; + } else if ("full-1".equals(initialShards) || "all-1".equals(initialShards)) { + if (indexMetaData.getNumberOfReplicas() > 1) { + requiredAllocation = indexMetaData.getNumberOfReplicas(); + } + } else { + requiredAllocation = Integer.parseInt(initialShards); } + } catch (Exception e) { + logger.warn("[{}][{}] failed to derived initial_shards from value {}, ignore allocation for {}", shard.index(), shard.id(), initialShards, shard); } return nodesAndVersions.allocationsFound >= requiredAllocation; } /** - * Based on the nodes and versions, build the list of yes/no/throttle nodes that the shard applies to. + * Split the list of nodes to lists of yes/no/throttle nodes based on allocation deciders */ - private NodesToAllocate buildNodesToAllocate(ShardRouting shard, RoutingAllocation allocation, NodesAndVersions nodesAndVersions) { + private NodesToAllocate buildNodesToAllocate(ShardRouting shard, RoutingAllocation allocation, List<DiscoveryNode> nodes) { List<DiscoveryNode> yesNodes = new ArrayList<>(); List<DiscoveryNode> throttledNodes = new ArrayList<>(); List<DiscoveryNode> noNodes = new ArrayList<>(); - for (DiscoveryNode discoNode : nodesAndVersions.nodes) { + for (DiscoveryNode discoNode : nodes) { RoutingNode node = allocation.routingNodes().node(discoNode.id()); if (node == null) { continue; @@ -184,9 +257,11 @@ public abstract class PrimaryShardAllocator extends AbstractComponent { } /** - * Builds a list of nodes and version + * Builds a list of nodes. If matchAnyShard is set to false, only nodes that have the highest shard version + * are added to the list. Otherwise, any node that has a shard is added to the list, but entries with highest + * version are always at the front of the list. */ - NodesAndVersions buildNodesAndVersions(ShardRouting shard, boolean recoveryOnAnyNode, Set<String> ignoreNodes, + NodesAndVersions buildNodesAndVersions(ShardRouting shard, boolean matchAnyShard, Set<String> ignoreNodes, AsyncShardFetch.FetchResult<TransportNodesListGatewayStartedShards.NodeGatewayStartedShards> shardState) { final Map<DiscoveryNode, Long> nodesWithVersion = new HashMap<>(); int numberOfAllocationsFound = 0; @@ -208,20 +283,15 @@ public abstract class PrimaryShardAllocator extends AbstractComponent { version = -1; } - if (recoveryOnAnyNode) { - numberOfAllocationsFound++; - if (version > highestVersion) { - highestVersion = version; - } - // We always put the node without clearing the map - nodesWithVersion.put(node, version); - } else if (version != -1) { + if (version != -1) { numberOfAllocationsFound++; // If we've found a new "best" candidate, clear the // current candidates and add it if (version > highestVersion) { highestVersion = version; - nodesWithVersion.clear(); + if (matchAnyShard == false) { + nodesWithVersion.clear(); + } nodesWithVersion.put(node, version); } else if (version == highestVersion) { // If the candidate is the same, add it to the @@ -258,9 +328,9 @@ public abstract class PrimaryShardAllocator extends AbstractComponent { * Return {@code true} if the index is configured to allow shards to be * recovered on any node */ - private boolean recoverOnAnyNode(Settings idxSettings) { - return IndexMetaData.isOnSharedFilesystem(idxSettings) && - idxSettings.getAsBoolean(IndexMetaData.SETTING_SHARED_FS_ALLOW_RECOVERY_ON_ANY_NODE, false); + private boolean recoverOnAnyNode(IndexSettings indexSettings) { + return indexSettings.isOnSharedFilesystem() + && indexSettings.getSettings().getAsBoolean(IndexMetaData.SETTING_SHARED_FS_ALLOW_RECOVERY_ON_ANY_NODE, false); } protected abstract AsyncShardFetch.FetchResult<TransportNodesListGatewayStartedShards.NodeGatewayStartedShards> fetchData(ShardRouting shard, RoutingAllocation allocation); diff --git a/core/src/main/java/org/elasticsearch/gateway/ReplicaShardAllocator.java b/core/src/main/java/org/elasticsearch/gateway/ReplicaShardAllocator.java index c87f4d9475..0b5f2bc58d 100644 --- a/core/src/main/java/org/elasticsearch/gateway/ReplicaShardAllocator.java +++ b/core/src/main/java/org/elasticsearch/gateway/ReplicaShardAllocator.java @@ -24,6 +24,8 @@ import com.carrotsearch.hppc.ObjectLongMap; import com.carrotsearch.hppc.cursors.ObjectCursor; import com.carrotsearch.hppc.cursors.ObjectLongCursor; import org.elasticsearch.cluster.ClusterChangedEvent; +import org.elasticsearch.cluster.metadata.IndexMetaData; +import org.elasticsearch.cluster.metadata.MetaData; import org.elasticsearch.cluster.node.DiscoveryNode; import org.elasticsearch.cluster.routing.RoutingNode; import org.elasticsearch.cluster.routing.RoutingNodes; @@ -56,6 +58,7 @@ public abstract class ReplicaShardAllocator extends AbstractComponent { */ public boolean processExistingRecoveries(RoutingAllocation allocation) { boolean changed = false; + MetaData metaData = allocation.metaData(); for (RoutingNodes.RoutingNodesIterator nodes = allocation.routingNodes().nodes(); nodes.hasNext(); ) { nodes.next(); for (RoutingNodes.RoutingNodeIterator it = nodes.nodeShards(); it.hasNext(); ) { @@ -69,8 +72,10 @@ public abstract class ReplicaShardAllocator extends AbstractComponent { if (shard.relocatingNodeId() != null) { continue; } + // if we are allocating a replica because of index creation, no need to go and find a copy, there isn't one... - if (shard.allocatedPostIndexCreate() == false) { + IndexMetaData indexMetaData = metaData.index(shard.getIndex()); + if (shard.allocatedPostIndexCreate(indexMetaData) == false) { continue; } @@ -114,6 +119,7 @@ public abstract class ReplicaShardAllocator extends AbstractComponent { boolean changed = false; final RoutingNodes routingNodes = allocation.routingNodes(); final RoutingNodes.UnassignedShards.UnassignedIterator unassignedIterator = routingNodes.unassigned().iterator(); + MetaData metaData = allocation.metaData(); while (unassignedIterator.hasNext()) { ShardRouting shard = unassignedIterator.next(); if (shard.primary()) { @@ -121,7 +127,8 @@ public abstract class ReplicaShardAllocator extends AbstractComponent { } // if we are allocating a replica because of index creation, no need to go and find a copy, there isn't one... - if (shard.allocatedPostIndexCreate() == false) { + IndexMetaData indexMetaData = metaData.index(shard.getIndex()); + if (shard.allocatedPostIndexCreate(indexMetaData) == false) { continue; } diff --git a/core/src/main/java/org/elasticsearch/gateway/TransportNodesListGatewayStartedShards.java b/core/src/main/java/org/elasticsearch/gateway/TransportNodesListGatewayStartedShards.java index d91b4bd8cd..539ac92426 100644 --- a/core/src/main/java/org/elasticsearch/gateway/TransportNodesListGatewayStartedShards.java +++ b/core/src/main/java/org/elasticsearch/gateway/TransportNodesListGatewayStartedShards.java @@ -139,7 +139,8 @@ public class TransportNodesListGatewayStartedShards extends TransportNodesAction Store.tryOpenIndex(shardPath.resolveIndex()); } catch (Exception exception) { logger.trace("{} can't open index for shard [{}] in path [{}]", exception, shardId, shardStateMetaData, (shardPath != null) ? shardPath.resolveIndex() : ""); - return new NodeGatewayStartedShards(clusterService.localNode(), shardStateMetaData.version, exception); + String allocationId = shardStateMetaData.allocationId != null ? shardStateMetaData.allocationId.getId() : null; + return new NodeGatewayStartedShards(clusterService.localNode(), shardStateMetaData.version, allocationId, exception); } } // old shard metadata doesn't have the actual index UUID so we need to check if the actual uuid in the metadata @@ -149,11 +150,12 @@ public class TransportNodesListGatewayStartedShards extends TransportNodesAction logger.warn("{} shard state info found but indexUUID didn't match expected [{}] actual [{}]", shardId, indexUUID, shardStateMetaData.indexUUID); } else { logger.debug("{} shard state info found: [{}]", shardId, shardStateMetaData); - return new NodeGatewayStartedShards(clusterService.localNode(), shardStateMetaData.version); + String allocationId = shardStateMetaData.allocationId != null ? shardStateMetaData.allocationId.getId() : null; + return new NodeGatewayStartedShards(clusterService.localNode(), shardStateMetaData.version, allocationId); } } logger.trace("{} no local shard info found", shardId); - return new NodeGatewayStartedShards(clusterService.localNode(), -1); + return new NodeGatewayStartedShards(clusterService.localNode(), -1, null); } catch (Exception e) { throw new ElasticsearchException("failed to load started shards", e); } @@ -277,17 +279,19 @@ public class TransportNodesListGatewayStartedShards extends TransportNodesAction public static class NodeGatewayStartedShards extends BaseNodeResponse { private long version = -1; + private String allocationId = null; private Throwable storeException = null; public NodeGatewayStartedShards() { } - public NodeGatewayStartedShards(DiscoveryNode node, long version) { - this(node, version, null); + public NodeGatewayStartedShards(DiscoveryNode node, long version, String allocationId) { + this(node, version, allocationId, null); } - public NodeGatewayStartedShards(DiscoveryNode node, long version, Throwable storeException) { + public NodeGatewayStartedShards(DiscoveryNode node, long version, String allocationId, Throwable storeException) { super(node); this.version = version; + this.allocationId = allocationId; this.storeException = storeException; } @@ -295,6 +299,10 @@ public class TransportNodesListGatewayStartedShards extends TransportNodesAction return this.version; } + public String allocationId() { + return this.allocationId; + } + public Throwable storeException() { return this.storeException; } @@ -303,16 +311,17 @@ public class TransportNodesListGatewayStartedShards extends TransportNodesAction public void readFrom(StreamInput in) throws IOException { super.readFrom(in); version = in.readLong(); + allocationId = in.readOptionalString(); if (in.readBoolean()) { storeException = in.readThrowable(); } - } @Override public void writeTo(StreamOutput out) throws IOException { super.writeTo(out); out.writeLong(version); + out.writeOptionalString(allocationId); if (storeException != null) { out.writeBoolean(true); out.writeThrowable(storeException); diff --git a/core/src/main/java/org/elasticsearch/http/HttpServerModule.java b/core/src/main/java/org/elasticsearch/http/HttpServerModule.java deleted file mode 100644 index 49d6736964..0000000000 --- a/core/src/main/java/org/elasticsearch/http/HttpServerModule.java +++ /dev/null @@ -1,59 +0,0 @@ -/* - * Licensed to Elasticsearch under one or more contributor - * license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright - * ownership. Elasticsearch licenses this file to you under - * the Apache License, Version 2.0 (the "License"); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -package org.elasticsearch.http; - -import org.elasticsearch.common.inject.AbstractModule; -import org.elasticsearch.common.logging.ESLogger; -import org.elasticsearch.common.logging.Loggers; -import org.elasticsearch.common.settings.Settings; -import org.elasticsearch.http.netty.NettyHttpServerTransport; - -import java.util.Objects; - -/** - * - */ -public class HttpServerModule extends AbstractModule { - - private final Settings settings; - private final ESLogger logger; - - private Class<? extends HttpServerTransport> httpServerTransportClass; - - public HttpServerModule(Settings settings) { - this.settings = settings; - this.logger = Loggers.getLogger(getClass(), settings); - this.httpServerTransportClass = NettyHttpServerTransport.class; - } - - @SuppressWarnings({"unchecked"}) - @Override - protected void configure() { - bind(HttpServerTransport.class).to(httpServerTransportClass).asEagerSingleton(); - bind(HttpServer.class).asEagerSingleton(); - } - - public void setHttpServerTransport(Class<? extends HttpServerTransport> httpServerTransport, String source) { - Objects.requireNonNull(httpServerTransport, "Configured http server transport may not be null"); - Objects.requireNonNull(source, "Plugin, that changes transport may not be null"); - logger.info("Using [{}] as http transport, overridden by [{}]", httpServerTransportClass.getName(), source); - this.httpServerTransportClass = httpServerTransport; - } -} diff --git a/core/src/main/java/org/elasticsearch/http/netty/pipelining/HttpPipeliningHandler.java b/core/src/main/java/org/elasticsearch/http/netty/pipelining/HttpPipeliningHandler.java index 10008c76a5..4bcbf4079c 100644 --- a/core/src/main/java/org/elasticsearch/http/netty/pipelining/HttpPipeliningHandler.java +++ b/core/src/main/java/org/elasticsearch/http/netty/pipelining/HttpPipeliningHandler.java @@ -1,5 +1,27 @@ package org.elasticsearch.http.netty.pipelining; +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +// this file is from netty-http-pipelining, under apache 2.0 license +// see github.com/typesafehub/netty-http-pipelining + import org.elasticsearch.common.logging.ESLogger; import org.elasticsearch.common.logging.ESLoggerFactory; import org.jboss.netty.channel.*; diff --git a/core/src/main/java/org/elasticsearch/http/netty/pipelining/OrderedDownstreamChannelEvent.java b/core/src/main/java/org/elasticsearch/http/netty/pipelining/OrderedDownstreamChannelEvent.java index 6b713a0802..622a3e6ac9 100644 --- a/core/src/main/java/org/elasticsearch/http/netty/pipelining/OrderedDownstreamChannelEvent.java +++ b/core/src/main/java/org/elasticsearch/http/netty/pipelining/OrderedDownstreamChannelEvent.java @@ -1,5 +1,27 @@ package org.elasticsearch.http.netty.pipelining; +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +// this file is from netty-http-pipelining, under apache 2.0 license +// see github.com/typesafehub/netty-http-pipelining + import org.jboss.netty.channel.*; /** diff --git a/core/src/main/java/org/elasticsearch/http/netty/pipelining/OrderedUpstreamMessageEvent.java b/core/src/main/java/org/elasticsearch/http/netty/pipelining/OrderedUpstreamMessageEvent.java index 7343b29b6c..cc47b5be32 100644 --- a/core/src/main/java/org/elasticsearch/http/netty/pipelining/OrderedUpstreamMessageEvent.java +++ b/core/src/main/java/org/elasticsearch/http/netty/pipelining/OrderedUpstreamMessageEvent.java @@ -1,5 +1,27 @@ package org.elasticsearch.http.netty.pipelining; +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +// this file is from netty-http-pipelining, under apache 2.0 license +// see github.com/typesafehub/netty-http-pipelining + import org.jboss.netty.channel.Channel; import org.jboss.netty.channel.UpstreamMessageEvent; diff --git a/core/src/main/java/org/elasticsearch/index/IndexService.java b/core/src/main/java/org/elasticsearch/index/IndexService.java index 92ca00231b..a6b66742c5 100644 --- a/core/src/main/java/org/elasticsearch/index/IndexService.java +++ b/core/src/main/java/org/elasticsearch/index/IndexService.java @@ -29,7 +29,6 @@ import org.elasticsearch.cluster.metadata.IndexMetaData; import org.elasticsearch.cluster.routing.ShardRouting; import org.elasticsearch.common.Nullable; import org.elasticsearch.common.collect.ImmutableOpenMap; -import org.elasticsearch.common.inject.Inject; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.xcontent.XContentFactory; import org.elasticsearch.common.xcontent.XContentParser; @@ -48,7 +47,13 @@ import org.elasticsearch.index.mapper.MappedFieldType; import org.elasticsearch.index.mapper.MapperService; import org.elasticsearch.index.query.ParsedQuery; import org.elasticsearch.index.query.QueryShardContext; -import org.elasticsearch.index.shard.*; +import org.elasticsearch.index.shard.IndexEventListener; +import org.elasticsearch.index.shard.IndexSearcherWrapper; +import org.elasticsearch.index.shard.IndexShard; +import org.elasticsearch.index.shard.ShadowIndexShard; +import org.elasticsearch.index.shard.ShardId; +import org.elasticsearch.index.shard.ShardNotFoundException; +import org.elasticsearch.index.shard.ShardPath; import org.elasticsearch.index.similarity.SimilarityService; import org.elasticsearch.index.store.IndexStore; import org.elasticsearch.index.store.Store; @@ -73,7 +78,7 @@ import static org.elasticsearch.common.collect.MapBuilder.newMapBuilder; /** * */ -public final class IndexService extends AbstractIndexComponent implements IndexComponent, Iterable<IndexShard>{ +public final class IndexService extends AbstractIndexComponent implements IndexComponent, Iterable<IndexShard> { private final IndexEventListener eventListener; private final AnalysisService analysisService; @@ -93,7 +98,6 @@ public final class IndexService extends AbstractIndexComponent implements IndexC private final AtomicBoolean deleted = new AtomicBoolean(false); private final IndexSettings indexSettings; - @Inject public IndexService(IndexSettings indexSettings, NodeEnvironment nodeEnv, SimilarityService similarityService, ShardStoreDeleter shardStoreDeleter, @@ -146,7 +150,7 @@ public final class IndexService extends AbstractIndexComponent implements IndexC */ @Nullable public IndexShard getShardOrNull(int shardId) { - return shards.get(shardId); + return shards.get(shardId); } /** @@ -160,13 +164,17 @@ public final class IndexService extends AbstractIndexComponent implements IndexC return indexShard; } - public Set<Integer> shardIds() { return shards.keySet(); } + public Set<Integer> shardIds() { + return shards.keySet(); + } public IndexCache cache() { return indexCache; } - public IndexFieldDataService fieldData() { return indexFieldData; } + public IndexFieldDataService fieldData() { + return indexFieldData; + } public AnalysisService analysisService() { return this.analysisService; @@ -207,7 +215,7 @@ public final class IndexService extends AbstractIndexComponent implements IndexC private long getAvgShardSizeInBytes() throws IOException { long sum = 0; int count = 0; - for(IndexShard indexShard : this) { + for (IndexShard indexShard : this) { sum += indexShard.store().stats().sizeInBytes(); count++; } @@ -254,17 +262,17 @@ public final class IndexService extends AbstractIndexComponent implements IndexC // TODO: we should, instead, hold a "bytes reserved" of how large we anticipate this shard will be, e.g. for a shard // that's being relocated/replicated we know how large it will become once it's done copying: // Count up how many shards are currently on each data path: - Map<Path,Integer> dataPathToShardCount = new HashMap<>(); - for(IndexShard shard : this) { + Map<Path, Integer> dataPathToShardCount = new HashMap<>(); + for (IndexShard shard : this) { Path dataPath = shard.shardPath().getRootStatePath(); Integer curCount = dataPathToShardCount.get(dataPath); if (curCount == null) { curCount = 0; } - dataPathToShardCount.put(dataPath, curCount+1); + dataPathToShardCount.put(dataPath, curCount + 1); } path = ShardPath.selectNewPathForShard(nodeEnv, shardId, this.indexSettings, routing.getExpectedShardSize() == ShardRouting.UNAVAILABLE_EXPECTED_SHARD_SIZE ? getAvgShardSizeInBytes() : routing.getExpectedShardSize(), - dataPathToShardCount); + dataPathToShardCount); logger.debug("{} creating using a new path [{}]", shardId, path); } else { logger.debug("{} creating using an existing path [{}]", shardId, path); @@ -277,7 +285,7 @@ public final class IndexService extends AbstractIndexComponent implements IndexC logger.debug("creating shard_id {}", shardId); // if we are on a shared FS we only own the shard (ie. we can safely delete it) if we are the primary. final boolean canDeleteShardContent = IndexMetaData.isOnSharedFilesystem(indexSettings) == false || - (primary && IndexMetaData.isOnSharedFilesystem(indexSettings)); + (primary && IndexMetaData.isOnSharedFilesystem(indexSettings)); store = new Store(shardId, this.indexSettings, indexStore.newDirectoryService(path), lock, new StoreCloseListener(shardId, canDeleteShardContent, () -> nodeServicesProvider.getIndicesQueryCache().onClose(shardId))); if (useShadowEngine(primary, indexSettings)) { indexShard = new ShadowIndexShard(shardId, this.indexSettings, path, store, indexCache, mapperService, similarityService, indexFieldData, engineFactory, eventListener, searcherWrapper, nodeServicesProvider); @@ -462,6 +470,7 @@ public final class IndexService extends AbstractIndexComponent implements IndexC } } } + /** * Returns the filter associated with listed filtering aliases. * <p> diff --git a/core/src/main/java/org/elasticsearch/index/engine/InternalEngine.java b/core/src/main/java/org/elasticsearch/index/engine/InternalEngine.java index 1404b61b8e..de13eb1097 100644 --- a/core/src/main/java/org/elasticsearch/index/engine/InternalEngine.java +++ b/core/src/main/java/org/elasticsearch/index/engine/InternalEngine.java @@ -781,10 +781,14 @@ public class InternalEngine extends Engine { // we need to fail the engine. it might have already been failed before // but we are double-checking it's failed and closed if (indexWriter.isOpen() == false && indexWriter.getTragicException() != null) { - failEngine("already closed by tragic event", indexWriter.getTragicException()); + failEngine("already closed by tragic event on the index writer", indexWriter.getTragicException()); + } else if (translog.isOpen() == false && translog.getTragicException() != null) { + failEngine("already closed by tragic event on the translog", translog.getTragicException()); } return true; - } else if (t != null && indexWriter.isOpen() == false && indexWriter.getTragicException() == t) { + } else if (t != null && + ((indexWriter.isOpen() == false && indexWriter.getTragicException() == t) + || (translog.isOpen() == false && translog.getTragicException() == t))) { // this spot on - we are handling the tragic event exception here so we have to fail the engine // right away failEngine(source, t); diff --git a/core/src/main/java/org/elasticsearch/index/mapper/ContentPath.java b/core/src/main/java/org/elasticsearch/index/mapper/ContentPath.java index 47c4372016..54c6ef20e3 100644 --- a/core/src/main/java/org/elasticsearch/index/mapper/ContentPath.java +++ b/core/src/main/java/org/elasticsearch/index/mapper/ContentPath.java @@ -19,16 +19,9 @@ package org.elasticsearch.index.mapper; -public class ContentPath { +public final class ContentPath { - public enum Type { - JUST_NAME, - FULL, - } - - private Type pathType; - - private final char delimiter; + private static final char DELIMITER = '.'; private final StringBuilder sb; @@ -47,7 +40,6 @@ public class ContentPath { * number of path elements to not be included in {@link #pathAsText(String)}. */ public ContentPath(int offset) { - this.delimiter = '.'; this.sb = new StringBuilder(); this.offset = offset; reset(); @@ -71,26 +63,11 @@ public class ContentPath { } public String pathAsText(String name) { - if (pathType == Type.JUST_NAME) { - return name; - } - return fullPathAsText(name); - } - - public String fullPathAsText(String name) { sb.setLength(0); for (int i = offset; i < index; i++) { - sb.append(path[i]).append(delimiter); + sb.append(path[i]).append(DELIMITER); } sb.append(name); return sb.toString(); } - - public Type pathType() { - return pathType; - } - - public void pathType(Type type) { - this.pathType = type; - } } diff --git a/core/src/main/java/org/elasticsearch/index/mapper/DocumentMapper.java b/core/src/main/java/org/elasticsearch/index/mapper/DocumentMapper.java index c4fec8cf09..333cda459f 100644 --- a/core/src/main/java/org/elasticsearch/index/mapper/DocumentMapper.java +++ b/core/src/main/java/org/elasticsearch/index/mapper/DocumentMapper.java @@ -29,7 +29,7 @@ import org.elasticsearch.common.Nullable; import org.elasticsearch.common.bytes.BytesReference; import org.elasticsearch.common.compress.CompressedXContent; import org.elasticsearch.common.settings.Settings; -import org.elasticsearch.common.text.StringAndBytesText; +import org.elasticsearch.common.text.Text; import org.elasticsearch.common.text.Text; import org.elasticsearch.common.util.concurrent.ReleasableLock; import org.elasticsearch.common.xcontent.ToXContent; @@ -52,6 +52,7 @@ import org.elasticsearch.search.internal.SearchContext; import java.io.IOException; import java.util.ArrayList; +import java.util.Arrays; import java.util.Collection; import java.util.Collections; import java.util.HashMap; @@ -113,11 +114,11 @@ public class DocumentMapper implements ToXContent { private final MapperService mapperService; private final String type; - private final StringAndBytesText typeText; + private final Text typeText; private volatile CompressedXContent mappingSource; - private final Mapping mapping; + private volatile Mapping mapping; private final DocumentParser documentParser; @@ -137,7 +138,7 @@ public class DocumentMapper implements ToXContent { ReentrantReadWriteLock mappingLock) { this.mapperService = mapperService; this.type = rootObjectMapper.name(); - this.typeText = new StringAndBytesText(this.type); + this.typeText = new Text(this.type); this.mapping = new Mapping( Version.indexCreated(indexSettings), rootObjectMapper, @@ -352,16 +353,19 @@ public class DocumentMapper implements ToXContent { mapperService.addMappers(type, objectMappers, fieldMappers); } - public MergeResult merge(Mapping mapping, boolean simulate, boolean updateAllTypes) { + public void merge(Mapping mapping, boolean simulate, boolean updateAllTypes) { try (ReleasableLock lock = mappingWriteLock.acquire()) { mapperService.checkMappersCompatibility(type, mapping, updateAllTypes); - final MergeResult mergeResult = new MergeResult(simulate, updateAllTypes); - this.mapping.merge(mapping, mergeResult); + // do the merge even if simulate == false so that we get exceptions + Mapping merged = this.mapping.merge(mapping, updateAllTypes); if (simulate == false) { - addMappers(mergeResult.getNewObjectMappers(), mergeResult.getNewFieldMappers(), updateAllTypes); + this.mapping = merged; + Collection<ObjectMapper> objectMappers = new ArrayList<>(); + Collection<FieldMapper> fieldMappers = new ArrayList<>(Arrays.asList(merged.metadataMappers)); + MapperUtils.collect(merged.root, objectMappers, fieldMappers); + addMappers(objectMappers, fieldMappers, updateAllTypes); refreshSource(); } - return mergeResult; } } diff --git a/core/src/main/java/org/elasticsearch/index/mapper/DocumentParser.java b/core/src/main/java/org/elasticsearch/index/mapper/DocumentParser.java index b0ad972d57..bb1749d233 100644 --- a/core/src/main/java/org/elasticsearch/index/mapper/DocumentParser.java +++ b/core/src/main/java/org/elasticsearch/index/mapper/DocumentParser.java @@ -234,9 +234,6 @@ class DocumentParser implements Closeable { nestedDoc.add(new Field(TypeFieldMapper.NAME, mapper.nestedTypePathAsString(), TypeFieldMapper.Defaults.FIELD_TYPE)); } - ContentPath.Type origPathType = context.path().pathType(); - context.path().pathType(mapper.pathType()); - // if we are at the end of the previous object, advance if (token == XContentParser.Token.END_OBJECT) { token = parser.nextToken(); @@ -267,12 +264,11 @@ class DocumentParser implements Closeable { if (update == null) { update = newUpdate; } else { - MapperUtils.merge(update, newUpdate); + update = update.merge(newUpdate, false); } } } // restore the enable path flag - context.path().pathType(origPathType); if (nested.isNested()) { ParseContext.Document nestedDoc = context.doc(); ParseContext.Document parentDoc = nestedDoc.getParent(); @@ -341,7 +337,7 @@ class DocumentParser implements Closeable { context.path().remove(); Mapper.Builder builder = context.root().findTemplateBuilder(context, currentFieldName, "object"); if (builder == null) { - builder = MapperBuilders.object(currentFieldName).enabled(true).pathType(mapper.pathType()); + builder = MapperBuilders.object(currentFieldName).enabled(true); // if this is a non root object, then explicitly set the dynamic behavior if set if (!(mapper instanceof RootObjectMapper) && mapper.dynamic() != ObjectMapper.Defaults.DYNAMIC) { ((ObjectMapper.Builder) builder).dynamic(mapper.dynamic()); @@ -610,7 +606,7 @@ class DocumentParser implements Closeable { return null; } final Mapper.BuilderContext builderContext = new Mapper.BuilderContext(context.indexSettings(), context.path()); - final MappedFieldType existingFieldType = context.mapperService().fullName(context.path().fullPathAsText(currentFieldName)); + final MappedFieldType existingFieldType = context.mapperService().fullName(context.path().pathAsText(currentFieldName)); Mapper.Builder builder = null; if (existingFieldType != null) { // create a builder of the same type @@ -695,7 +691,7 @@ class DocumentParser implements Closeable { if (paths.length > 1) { ObjectMapper parent = context.root(); for (int i = 0; i < paths.length-1; i++) { - mapper = context.docMapper().objectMappers().get(context.path().fullPathAsText(paths[i])); + mapper = context.docMapper().objectMappers().get(context.path().pathAsText(paths[i])); if (mapper == null) { // One mapping is missing, check if we are allowed to create a dynamic one. ObjectMapper.Dynamic dynamic = parent.dynamic(); @@ -713,12 +709,12 @@ class DocumentParser implements Closeable { if (!(parent instanceof RootObjectMapper) && parent.dynamic() != ObjectMapper.Defaults.DYNAMIC) { ((ObjectMapper.Builder) builder).dynamic(parent.dynamic()); } - builder = MapperBuilders.object(paths[i]).enabled(true).pathType(parent.pathType()); + builder = MapperBuilders.object(paths[i]).enabled(true); } Mapper.BuilderContext builderContext = new Mapper.BuilderContext(context.indexSettings(), context.path()); mapper = (ObjectMapper) builder.build(builderContext); if (mapper.nested() != ObjectMapper.Nested.NO) { - throw new MapperParsingException("It is forbidden to create dynamic nested objects ([" + context.path().fullPathAsText(paths[i]) + "]) through `copy_to`"); + throw new MapperParsingException("It is forbidden to create dynamic nested objects ([" + context.path().pathAsText(paths[i]) + "]) through `copy_to`"); } break; case FALSE: @@ -759,7 +755,7 @@ class DocumentParser implements Closeable { private static <M extends Mapper> M parseAndMergeUpdate(M mapper, ParseContext context) throws IOException { final Mapper update = parseObjectOrField(context, mapper); if (update != null) { - MapperUtils.merge(mapper, update); + mapper = (M) mapper.merge(update, false); } return mapper; } diff --git a/core/src/main/java/org/elasticsearch/index/mapper/FieldMapper.java b/core/src/main/java/org/elasticsearch/index/mapper/FieldMapper.java index ced3f08b22..30df3562ae 100644 --- a/core/src/main/java/org/elasticsearch/index/mapper/FieldMapper.java +++ b/core/src/main/java/org/elasticsearch/index/mapper/FieldMapper.java @@ -47,7 +47,7 @@ import java.util.List; import java.util.Locale; import java.util.stream.StreamSupport; -public abstract class FieldMapper extends Mapper { +public abstract class FieldMapper extends Mapper implements Cloneable { public abstract static class Builder<T extends Builder, Y extends FieldMapper> extends Mapper.Builder<T, Y> { @@ -64,10 +64,10 @@ public abstract class FieldMapper extends Mapper { protected final MultiFields.Builder multiFieldsBuilder; protected CopyTo copyTo; - protected Builder(String name, MappedFieldType fieldType) { + protected Builder(String name, MappedFieldType fieldType, MappedFieldType defaultFieldType) { super(name); this.fieldType = fieldType.clone(); - this.defaultFieldType = fieldType.clone(); + this.defaultFieldType = defaultFieldType.clone(); this.defaultOptions = fieldType.indexOptions(); // we have to store it the fieldType is mutable multiFieldsBuilder = new MultiFields.Builder(); } @@ -84,8 +84,13 @@ public abstract class FieldMapper extends Mapper { * if the fieldType has a non-null option we are all good it might have been set through a different * call. */ - final IndexOptions options = getDefaultIndexOption(); - assert options != IndexOptions.NONE : "default IndexOptions is NONE can't enable indexing"; + IndexOptions options = getDefaultIndexOption(); + if (options == IndexOptions.NONE) { + // can happen when an existing type on the same index has disabled indexing + // since we inherit the default field type from the first mapper that is + // created on an index + throw new IllegalArgumentException("mapper [" + name + "] has different [index] values from other types of the same index"); + } fieldType.setIndexOptions(options); } } else { @@ -202,11 +207,6 @@ public abstract class FieldMapper extends Mapper { return this; } - public T multiFieldPathType(ContentPath.Type pathType) { - multiFieldsBuilder.pathType(pathType); - return builder; - } - public T addMultiField(Mapper.Builder mapperBuilder) { multiFieldsBuilder.add(mapperBuilder); return builder; @@ -237,7 +237,7 @@ public abstract class FieldMapper extends Mapper { } protected String buildFullName(BuilderContext context) { - return context.path().fullPathAsText(name); + return context.path().pathAsText(name); } protected void setupFieldType(BuilderContext context) { @@ -270,7 +270,7 @@ public abstract class FieldMapper extends Mapper { protected MappedFieldTypeReference fieldTypeRef; protected final MappedFieldType defaultFieldType; - protected final MultiFields multiFields; + protected MultiFields multiFields; protected CopyTo copyTo; protected final boolean indexCreatedBefore2x; @@ -359,26 +359,41 @@ public abstract class FieldMapper extends Mapper { } @Override - public void merge(Mapper mergeWith, MergeResult mergeResult) { + protected FieldMapper clone() { + try { + return (FieldMapper) super.clone(); + } catch (CloneNotSupportedException e) { + throw new AssertionError(e); + } + } + + @Override + public FieldMapper merge(Mapper mergeWith, boolean updateAllTypes) { + FieldMapper merged = clone(); + merged.doMerge(mergeWith, updateAllTypes); + return merged; + } + + /** + * Merge changes coming from {@code mergeWith} in place. + * @param updateAllTypes TODO + */ + protected void doMerge(Mapper mergeWith, boolean updateAllTypes) { if (!this.getClass().equals(mergeWith.getClass())) { String mergedType = mergeWith.getClass().getSimpleName(); if (mergeWith instanceof FieldMapper) { mergedType = ((FieldMapper) mergeWith).contentType(); } - mergeResult.addConflict("mapper [" + fieldType().names().fullName() + "] of different type, current_type [" + contentType() + "], merged_type [" + mergedType + "]"); - // different types, return - return; + throw new IllegalArgumentException("mapper [" + fieldType().names().fullName() + "] of different type, current_type [" + contentType() + "], merged_type [" + mergedType + "]"); } FieldMapper fieldMergeWith = (FieldMapper) mergeWith; - multiFields.merge(mergeWith, mergeResult); + multiFields = multiFields.merge(fieldMergeWith.multiFields); - if (mergeResult.simulate() == false && mergeResult.hasConflicts() == false) { - // apply changeable values - MappedFieldType fieldType = fieldMergeWith.fieldType().clone(); - fieldType.freeze(); - fieldTypeRef.set(fieldType); - this.copyTo = fieldMergeWith.copyTo; - } + // apply changeable values + MappedFieldType fieldType = fieldMergeWith.fieldType().clone(); + fieldType.freeze(); + fieldTypeRef.set(fieldType); + this.copyTo = fieldMergeWith.copyTo; } @Override @@ -520,18 +535,12 @@ public abstract class FieldMapper extends Mapper { public static class MultiFields { public static MultiFields empty() { - return new MultiFields(ContentPath.Type.FULL, ImmutableOpenMap.<String, FieldMapper>of()); + return new MultiFields(ImmutableOpenMap.<String, FieldMapper>of()); } public static class Builder { private final ImmutableOpenMap.Builder<String, Mapper.Builder> mapperBuilders = ImmutableOpenMap.builder(); - private ContentPath.Type pathType = ContentPath.Type.FULL; - - public Builder pathType(ContentPath.Type pathType) { - this.pathType = pathType; - return this; - } public Builder add(Mapper.Builder builder) { mapperBuilders.put(builder.name(), builder); @@ -540,13 +549,9 @@ public abstract class FieldMapper extends Mapper { @SuppressWarnings("unchecked") public MultiFields build(FieldMapper.Builder mainFieldBuilder, BuilderContext context) { - if (pathType == ContentPath.Type.FULL && mapperBuilders.isEmpty()) { + if (mapperBuilders.isEmpty()) { return empty(); - } else if (mapperBuilders.isEmpty()) { - return new MultiFields(pathType, ImmutableOpenMap.<String, FieldMapper>of()); } else { - ContentPath.Type origPathType = context.path().pathType(); - context.path().pathType(pathType); context.path().add(mainFieldBuilder.name()); ImmutableOpenMap.Builder mapperBuilders = this.mapperBuilders; for (ObjectObjectCursor<String, Mapper.Builder> cursor : this.mapperBuilders) { @@ -557,26 +562,25 @@ public abstract class FieldMapper extends Mapper { mapperBuilders.put(key, mapper); } context.path().remove(); - context.path().pathType(origPathType); ImmutableOpenMap.Builder<String, FieldMapper> mappers = mapperBuilders.cast(); - return new MultiFields(pathType, mappers.build()); + return new MultiFields(mappers.build()); } } } - private final ContentPath.Type pathType; - private volatile ImmutableOpenMap<String, FieldMapper> mappers; + private final ImmutableOpenMap<String, FieldMapper> mappers; - public MultiFields(ContentPath.Type pathType, ImmutableOpenMap<String, FieldMapper> mappers) { - this.pathType = pathType; - this.mappers = mappers; + private MultiFields(ImmutableOpenMap<String, FieldMapper> mappers) { + ImmutableOpenMap.Builder<String, FieldMapper> builder = new ImmutableOpenMap.Builder<>(); // we disable the all in multi-field mappers - for (ObjectCursor<FieldMapper> cursor : mappers.values()) { + for (ObjectObjectCursor<String, FieldMapper> cursor : mappers) { FieldMapper mapper = cursor.value; if (mapper instanceof AllFieldMapper.IncludeInAll) { - ((AllFieldMapper.IncludeInAll) mapper).unsetIncludeInAll(); + mapper = (FieldMapper) ((AllFieldMapper.IncludeInAll) mapper).unsetIncludeInAll(); } + builder.put(cursor.key, mapper); } + this.mappers = builder.build(); } public void parse(FieldMapper mainField, ParseContext context) throws IOException { @@ -587,58 +591,33 @@ public abstract class FieldMapper extends Mapper { context = context.createMultiFieldContext(); - ContentPath.Type origPathType = context.path().pathType(); - context.path().pathType(pathType); - context.path().add(mainField.simpleName()); for (ObjectCursor<FieldMapper> cursor : mappers.values()) { cursor.value.parse(context); } context.path().remove(); - context.path().pathType(origPathType); } - // No need for locking, because locking is taken care of in ObjectMapper#merge and DocumentMapper#merge - public void merge(Mapper mergeWith, MergeResult mergeResult) { - FieldMapper mergeWithMultiField = (FieldMapper) mergeWith; - - List<FieldMapper> newFieldMappers = null; - ImmutableOpenMap.Builder<String, FieldMapper> newMappersBuilder = null; + public MultiFields merge(MultiFields mergeWith) { + ImmutableOpenMap.Builder<String, FieldMapper> newMappersBuilder = ImmutableOpenMap.builder(mappers); - for (ObjectCursor<FieldMapper> cursor : mergeWithMultiField.multiFields.mappers.values()) { + for (ObjectCursor<FieldMapper> cursor : mergeWith.mappers.values()) { FieldMapper mergeWithMapper = cursor.value; - Mapper mergeIntoMapper = mappers.get(mergeWithMapper.simpleName()); + FieldMapper mergeIntoMapper = mappers.get(mergeWithMapper.simpleName()); if (mergeIntoMapper == null) { - // no mapping, simply add it if not simulating - if (!mergeResult.simulate()) { - // we disable the all in multi-field mappers - if (mergeWithMapper instanceof AllFieldMapper.IncludeInAll) { - ((AllFieldMapper.IncludeInAll) mergeWithMapper).unsetIncludeInAll(); - } - if (newMappersBuilder == null) { - newMappersBuilder = ImmutableOpenMap.builder(mappers); - } - newMappersBuilder.put(mergeWithMapper.simpleName(), mergeWithMapper); - if (mergeWithMapper instanceof FieldMapper) { - if (newFieldMappers == null) { - newFieldMappers = new ArrayList<>(2); - } - newFieldMappers.add(mergeWithMapper); - } + // we disable the all in multi-field mappers + if (mergeWithMapper instanceof AllFieldMapper.IncludeInAll) { + mergeWithMapper = (FieldMapper) ((AllFieldMapper.IncludeInAll) mergeWithMapper).unsetIncludeInAll(); } + newMappersBuilder.put(mergeWithMapper.simpleName(), mergeWithMapper); } else { - mergeIntoMapper.merge(mergeWithMapper, mergeResult); + FieldMapper merged = mergeIntoMapper.merge(mergeWithMapper, false); + newMappersBuilder.put(merged.simpleName(), merged); // override previous definition } } - // first add all field mappers - if (newFieldMappers != null) { - mergeResult.addFieldMappers(newFieldMappers); - } - // now publish mappers - if (newMappersBuilder != null) { - mappers = newMappersBuilder.build(); - } + ImmutableOpenMap<String, FieldMapper> mappers = newMappersBuilder.build(); + return new MultiFields(mappers); } public Iterator<Mapper> iterator() { @@ -646,9 +625,6 @@ public abstract class FieldMapper extends Mapper { } public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { - if (pathType != ContentPath.Type.FULL) { - builder.field("path", pathType.name().toLowerCase(Locale.ROOT)); - } if (!mappers.isEmpty()) { // sort the mappers so we get consistent serialization format Mapper[] sortedMappers = mappers.values().toArray(Mapper.class); diff --git a/core/src/main/java/org/elasticsearch/index/mapper/Mapper.java b/core/src/main/java/org/elasticsearch/index/mapper/Mapper.java index 33a4dabd3b..4c3aa3c56b 100644 --- a/core/src/main/java/org/elasticsearch/index/mapper/Mapper.java +++ b/core/src/main/java/org/elasticsearch/index/mapper/Mapper.java @@ -174,5 +174,7 @@ public abstract class Mapper implements ToXContent, Iterable<Mapper> { /** Returns the canonical name which uniquely identifies the mapper against other mappers in a type. */ public abstract String name(); - public abstract void merge(Mapper mergeWith, MergeResult mergeResult); + /** Return the merge of {@code mergeWith} into this. + * Both {@code this} and {@code mergeWith} will be left unmodified. */ + public abstract Mapper merge(Mapper mergeWith, boolean updateAllTypes); } diff --git a/core/src/main/java/org/elasticsearch/index/mapper/MapperService.java b/core/src/main/java/org/elasticsearch/index/mapper/MapperService.java index 938f610d6d..37e99e8c90 100755 --- a/core/src/main/java/org/elasticsearch/index/mapper/MapperService.java +++ b/core/src/main/java/org/elasticsearch/index/mapper/MapperService.java @@ -32,7 +32,6 @@ import org.apache.lucene.util.BytesRef; import org.elasticsearch.ElasticsearchGenerationException; import org.elasticsearch.Version; import org.elasticsearch.common.Nullable; -import org.elasticsearch.common.collect.ImmutableOpenMap; import org.elasticsearch.common.collect.Tuple; import org.elasticsearch.common.compress.CompressedXContent; import org.elasticsearch.common.lucene.search.Queries; @@ -92,7 +91,7 @@ public class MapperService extends AbstractIndexComponent implements Closeable { private final ReleasableLock mappingWriteLock = new ReleasableLock(mappingLock.writeLock()); private volatile FieldTypeLookup fieldTypes; - private volatile ImmutableOpenMap<String, ObjectMapper> fullPathObjectMappers = ImmutableOpenMap.of(); + private volatile Map<String, ObjectMapper> fullPathObjectMappers = new HashMap<>(); private boolean hasNested = false; // updated dynamically to true when a nested object is added private final DocumentMapperParser documentParser; @@ -199,6 +198,7 @@ public class MapperService extends AbstractIndexComponent implements Closeable { public DocumentMapper merge(String type, CompressedXContent mappingSource, boolean applyDefault, boolean updateAllTypes) { if (DEFAULT_MAPPING.equals(type)) { // verify we can parse it + // NOTE: never apply the default here DocumentMapper mapper = documentParser.parseCompressed(type, mappingSource); // still add it as a document mapper so we have it registered and, for example, persisted back into // the cluster meta data if needed, or checked for existence @@ -212,74 +212,69 @@ public class MapperService extends AbstractIndexComponent implements Closeable { } return mapper; } else { - return merge(parse(type, mappingSource, applyDefault), updateAllTypes); + try (ReleasableLock lock = mappingWriteLock.acquire()) { + // only apply the default mapping if we don't have the type yet + applyDefault &= mappers.containsKey(type) == false; + return merge(parse(type, mappingSource, applyDefault), updateAllTypes); + } } } // never expose this to the outside world, we need to reparse the doc mapper so we get fresh // instances of field mappers to properly remove existing doc mapper private DocumentMapper merge(DocumentMapper mapper, boolean updateAllTypes) { - try (ReleasableLock lock = mappingWriteLock.acquire()) { - if (mapper.type().length() == 0) { - throw new InvalidTypeNameException("mapping type name is empty"); - } - if (indexSettings.getIndexVersionCreated().onOrAfter(Version.V_2_0_0_beta1) && mapper.type().length() > 255) { - throw new InvalidTypeNameException("mapping type name [" + mapper.type() + "] is too long; limit is length 255 but was [" + mapper.type().length() + "]"); - } - if (mapper.type().charAt(0) == '_') { - throw new InvalidTypeNameException("mapping type name [" + mapper.type() + "] can't start with '_'"); - } - if (mapper.type().contains("#")) { - throw new InvalidTypeNameException("mapping type name [" + mapper.type() + "] should not include '#' in it"); - } - if (mapper.type().contains(",")) { - throw new InvalidTypeNameException("mapping type name [" + mapper.type() + "] should not include ',' in it"); + if (mapper.type().length() == 0) { + throw new InvalidTypeNameException("mapping type name is empty"); + } + if (indexSettings.getIndexVersionCreated().onOrAfter(Version.V_2_0_0_beta1) && mapper.type().length() > 255) { + throw new InvalidTypeNameException("mapping type name [" + mapper.type() + "] is too long; limit is length 255 but was [" + mapper.type().length() + "]"); + } + if (mapper.type().charAt(0) == '_') { + throw new InvalidTypeNameException("mapping type name [" + mapper.type() + "] can't start with '_'"); + } + if (mapper.type().contains("#")) { + throw new InvalidTypeNameException("mapping type name [" + mapper.type() + "] should not include '#' in it"); + } + if (mapper.type().contains(",")) { + throw new InvalidTypeNameException("mapping type name [" + mapper.type() + "] should not include ',' in it"); + } + if (mapper.type().equals(mapper.parentFieldMapper().type())) { + throw new IllegalArgumentException("The [_parent.type] option can't point to the same type"); + } + if (typeNameStartsWithIllegalDot(mapper)) { + if (indexSettings.getIndexVersionCreated().onOrAfter(Version.V_2_0_0_beta1)) { + throw new IllegalArgumentException("mapping type name [" + mapper.type() + "] must not start with a '.'"); + } else { + logger.warn("Type [{}] starts with a '.', it is recommended not to start a type name with a '.'", mapper.type()); } - if (mapper.type().equals(mapper.parentFieldMapper().type())) { - throw new IllegalArgumentException("The [_parent.type] option can't point to the same type"); + } + // we can add new field/object mappers while the old ones are there + // since we get new instances of those, and when we remove, we remove + // by instance equality + DocumentMapper oldMapper = mappers.get(mapper.type()); + + if (oldMapper != null) { + oldMapper.merge(mapper.mapping(), false, updateAllTypes); + return oldMapper; + } else { + Tuple<Collection<ObjectMapper>, Collection<FieldMapper>> newMappers = checkMappersCompatibility( + mapper.type(), mapper.mapping(), updateAllTypes); + Collection<ObjectMapper> newObjectMappers = newMappers.v1(); + Collection<FieldMapper> newFieldMappers = newMappers.v2(); + addMappers(mapper.type(), newObjectMappers, newFieldMappers); + + for (DocumentTypeListener typeListener : typeListeners) { + typeListener.beforeCreate(mapper); } - if (typeNameStartsWithIllegalDot(mapper)) { - if (indexSettings.getIndexVersionCreated().onOrAfter(Version.V_2_0_0_beta1)) { - throw new IllegalArgumentException("mapping type name [" + mapper.type() + "] must not start with a '.'"); - } else { - logger.warn("Type [{}] starts with a '.', it is recommended not to start a type name with a '.'", mapper.type()); - } - } - // we can add new field/object mappers while the old ones are there - // since we get new instances of those, and when we remove, we remove - // by instance equality - DocumentMapper oldMapper = mappers.get(mapper.type()); - - if (oldMapper != null) { - // simulate first - MergeResult result = oldMapper.merge(mapper.mapping(), true, updateAllTypes); - if (result.hasConflicts()) { - throw new IllegalArgumentException("Merge failed with failures {" + Arrays.toString(result.buildConflicts()) + "}"); - } - // then apply for real - result = oldMapper.merge(mapper.mapping(), false, updateAllTypes); - assert result.hasConflicts() == false; // we already simulated - return oldMapper; - } else { - Tuple<Collection<ObjectMapper>, Collection<FieldMapper>> newMappers = checkMappersCompatibility( - mapper.type(), mapper.mapping(), updateAllTypes); - Collection<ObjectMapper> newObjectMappers = newMappers.v1(); - Collection<FieldMapper> newFieldMappers = newMappers.v2(); - addMappers(mapper.type(), newObjectMappers, newFieldMappers); - - for (DocumentTypeListener typeListener : typeListeners) { - typeListener.beforeCreate(mapper); - } - mappers = newMapBuilder(mappers).put(mapper.type(), mapper).map(); - if (mapper.parentFieldMapper().active()) { - Set<String> newParentTypes = new HashSet<>(parentTypes.size() + 1); - newParentTypes.addAll(parentTypes); - newParentTypes.add(mapper.parentFieldMapper().type()); - parentTypes = unmodifiableSet(newParentTypes); - } - assert assertSerialization(mapper); - return mapper; + mappers = newMapBuilder(mappers).put(mapper.type(), mapper).map(); + if (mapper.parentFieldMapper().active()) { + Set<String> newParentTypes = new HashSet<>(parentTypes.size() + 1); + newParentTypes.addAll(parentTypes); + newParentTypes.add(mapper.parentFieldMapper().type()); + parentTypes = unmodifiableSet(newParentTypes); } + assert assertSerialization(mapper); + return mapper; } } @@ -300,19 +295,56 @@ public class MapperService extends AbstractIndexComponent implements Closeable { return true; } + private void checkFieldUniqueness(String type, Collection<ObjectMapper> objectMappers, Collection<FieldMapper> fieldMappers) { + final Set<String> objectFullNames = new HashSet<>(); + for (ObjectMapper objectMapper : objectMappers) { + final String fullPath = objectMapper.fullPath(); + if (objectFullNames.add(fullPath) == false) { + throw new IllegalArgumentException("Object mapper [" + fullPath + "] is defined twice in mapping for type [" + type + "]"); + } + } + + if (indexSettings.getIndexVersionCreated().before(Version.V_3_0_0)) { + // Before 3.0 some metadata mappers are also registered under the root object mapper + // So we avoid false positives by deduplicating mappers + // given that we check exact equality, this would still catch the case that a mapper + // is defined under the root object + Collection<FieldMapper> uniqueFieldMappers = Collections.newSetFromMap(new IdentityHashMap<>()); + uniqueFieldMappers.addAll(fieldMappers); + fieldMappers = uniqueFieldMappers; + } + + final Set<String> fieldNames = new HashSet<>(); + for (FieldMapper fieldMapper : fieldMappers) { + final String name = fieldMapper.name(); + if (objectFullNames.contains(name)) { + throw new IllegalArgumentException("Field [" + name + "] is defined both as an object and a field in [" + type + "]"); + } else if (fieldNames.add(name) == false) { + throw new IllegalArgumentException("Field [" + name + "] is defined twice in [" + type + "]"); + } + } + } + protected void checkMappersCompatibility(String type, Collection<ObjectMapper> objectMappers, Collection<FieldMapper> fieldMappers, boolean updateAllTypes) { assert mappingLock.isWriteLockedByCurrentThread(); + + checkFieldUniqueness(type, objectMappers, fieldMappers); + for (ObjectMapper newObjectMapper : objectMappers) { ObjectMapper existingObjectMapper = fullPathObjectMappers.get(newObjectMapper.fullPath()); if (existingObjectMapper != null) { - MergeResult result = new MergeResult(true, updateAllTypes); - existingObjectMapper.merge(newObjectMapper, result); - if (result.hasConflicts()) { - throw new IllegalArgumentException("Mapper for [" + newObjectMapper.fullPath() + "] conflicts with existing mapping in other types" + - Arrays.toString(result.buildConflicts())); - } + // simulate a merge and ignore the result, we are just interested + // in exceptions here + existingObjectMapper.merge(newObjectMapper, updateAllTypes); } } + + for (FieldMapper fieldMapper : fieldMappers) { + if (fullPathObjectMappers.containsKey(fieldMapper.name())) { + throw new IllegalArgumentException("Field [" + fieldMapper.name() + "] is defined as a field in mapping [" + type + "] but this name is already used for an object in other types"); + } + } + fieldTypes.checkCompatibility(type, fieldMappers, updateAllTypes); } @@ -320,9 +352,7 @@ public class MapperService extends AbstractIndexComponent implements Closeable { String type, Mapping mapping, boolean updateAllTypes) { List<ObjectMapper> objectMappers = new ArrayList<>(); List<FieldMapper> fieldMappers = new ArrayList<>(); - for (MetadataFieldMapper metadataMapper : mapping.metadataMappers) { - fieldMappers.add(metadataMapper); - } + Collections.addAll(fieldMappers, mapping.metadataMappers); MapperUtils.collect(mapping.root, objectMappers, fieldMappers); checkMappersCompatibility(type, objectMappers, fieldMappers, updateAllTypes); return new Tuple<>(objectMappers, fieldMappers); @@ -330,14 +360,14 @@ public class MapperService extends AbstractIndexComponent implements Closeable { protected void addMappers(String type, Collection<ObjectMapper> objectMappers, Collection<FieldMapper> fieldMappers) { assert mappingLock.isWriteLockedByCurrentThread(); - ImmutableOpenMap.Builder<String, ObjectMapper> fullPathObjectMappers = ImmutableOpenMap.builder(this.fullPathObjectMappers); + Map<String, ObjectMapper> fullPathObjectMappers = new HashMap<>(this.fullPathObjectMappers); for (ObjectMapper objectMapper : objectMappers) { fullPathObjectMappers.put(objectMapper.fullPath(), objectMapper); if (objectMapper.nested().isNested()) { hasNested = true; } } - this.fullPathObjectMappers = fullPathObjectMappers.build(); + this.fullPathObjectMappers = Collections.unmodifiableMap(fullPathObjectMappers); this.fieldTypes = this.fieldTypes.copyAndAddAll(type, fieldMappers); } diff --git a/core/src/main/java/org/elasticsearch/index/mapper/MapperUtils.java b/core/src/main/java/org/elasticsearch/index/mapper/MapperUtils.java index d46c32a932..04508827f7 100644 --- a/core/src/main/java/org/elasticsearch/index/mapper/MapperUtils.java +++ b/core/src/main/java/org/elasticsearch/index/mapper/MapperUtils.java @@ -27,52 +27,6 @@ import java.util.Collection; public enum MapperUtils { ; - private static MergeResult newStrictMergeResult() { - return new MergeResult(false, false) { - - @Override - public void addFieldMappers(Collection<FieldMapper> fieldMappers) { - // no-op - } - - @Override - public void addObjectMappers(Collection<ObjectMapper> objectMappers) { - // no-op - } - - @Override - public Collection<FieldMapper> getNewFieldMappers() { - throw new UnsupportedOperationException("Strict merge result does not support new field mappers"); - } - - @Override - public Collection<ObjectMapper> getNewObjectMappers() { - throw new UnsupportedOperationException("Strict merge result does not support new object mappers"); - } - - @Override - public void addConflict(String mergeFailure) { - throw new MapperParsingException("Merging dynamic updates triggered a conflict: " + mergeFailure); - } - }; - } - - /** - * Merge {@code mergeWith} into {@code mergeTo}. Note: this method only - * merges mappings, not lookup structures. Conflicts are returned as exceptions. - */ - public static void merge(Mapper mergeInto, Mapper mergeWith) { - mergeInto.merge(mergeWith, newStrictMergeResult()); - } - - /** - * Merge {@code mergeWith} into {@code mergeTo}. Note: this method only - * merges mappings, not lookup structures. Conflicts are returned as exceptions. - */ - public static void merge(Mapping mergeInto, Mapping mergeWith) { - mergeInto.merge(mergeWith, newStrictMergeResult()); - } - /** Split mapper and its descendants into object and field mappers. */ public static void collect(Mapper mapper, Collection<ObjectMapper> objectMappers, Collection<FieldMapper> fieldMappers) { if (mapper instanceof RootObjectMapper) { diff --git a/core/src/main/java/org/elasticsearch/index/mapper/Mapping.java b/core/src/main/java/org/elasticsearch/index/mapper/Mapping.java index bac4216255..d33a97a415 100644 --- a/core/src/main/java/org/elasticsearch/index/mapper/Mapping.java +++ b/core/src/main/java/org/elasticsearch/index/mapper/Mapping.java @@ -27,10 +27,12 @@ import org.elasticsearch.index.mapper.object.RootObjectMapper; import java.io.IOException; import java.util.Arrays; +import java.util.Collections; import java.util.Comparator; import java.util.HashMap; -import java.util.List; +import java.util.HashSet; import java.util.Map; +import java.util.Set; import static java.util.Collections.emptyMap; import static java.util.Collections.unmodifiableMap; @@ -41,25 +43,27 @@ import static java.util.Collections.unmodifiableMap; */ public final class Mapping implements ToXContent { - public static final List<String> LEGACY_INCLUDE_IN_OBJECT = Arrays.asList("_all", "_id", "_parent", "_routing", "_timestamp", "_ttl"); + // Set of fields that were included into the root object mapper before 2.0 + public static final Set<String> LEGACY_INCLUDE_IN_OBJECT = Collections.unmodifiableSet(new HashSet<>( + Arrays.asList("_all", "_id", "_parent", "_routing", "_timestamp", "_ttl"))); final Version indexCreated; final RootObjectMapper root; final MetadataFieldMapper[] metadataMappers; final Map<Class<? extends MetadataFieldMapper>, MetadataFieldMapper> metadataMappersMap; - volatile Map<String, Object> meta; + final Map<String, Object> meta; public Mapping(Version indexCreated, RootObjectMapper rootObjectMapper, MetadataFieldMapper[] metadataMappers, Map<String, Object> meta) { this.indexCreated = indexCreated; - this.root = rootObjectMapper; this.metadataMappers = metadataMappers; Map<Class<? extends MetadataFieldMapper>, MetadataFieldMapper> metadataMappersMap = new HashMap<>(); for (MetadataFieldMapper metadataMapper : metadataMappers) { if (indexCreated.before(Version.V_2_0_0_beta1) && LEGACY_INCLUDE_IN_OBJECT.contains(metadataMapper.name())) { - root.putMapper(metadataMapper); + rootObjectMapper = rootObjectMapper.copyAndPutMapper(metadataMapper); } metadataMappersMap.put(metadataMapper.getClass(), metadataMapper); } + this.root = rootObjectMapper; // keep root mappers sorted for consistent serialization Arrays.sort(metadataMappers, new Comparator<Mapper>() { @Override @@ -90,21 +94,20 @@ public final class Mapping implements ToXContent { } /** @see DocumentMapper#merge(Mapping, boolean, boolean) */ - public void merge(Mapping mergeWith, MergeResult mergeResult) { - assert metadataMappers.length == mergeWith.metadataMappers.length; - - root.merge(mergeWith.root, mergeResult); - for (MetadataFieldMapper metadataMapper : metadataMappers) { - MetadataFieldMapper mergeWithMetadataMapper = mergeWith.metadataMapper(metadataMapper.getClass()); - if (mergeWithMetadataMapper != null) { - metadataMapper.merge(mergeWithMetadataMapper, mergeResult); + public Mapping merge(Mapping mergeWith, boolean updateAllTypes) { + RootObjectMapper mergedRoot = root.merge(mergeWith.root, updateAllTypes); + Map<Class<? extends MetadataFieldMapper>, MetadataFieldMapper> mergedMetaDataMappers = new HashMap<>(metadataMappersMap); + for (MetadataFieldMapper metaMergeWith : mergeWith.metadataMappers) { + MetadataFieldMapper mergeInto = mergedMetaDataMappers.get(metaMergeWith.getClass()); + MetadataFieldMapper merged; + if (mergeInto == null) { + merged = metaMergeWith; + } else { + merged = mergeInto.merge(metaMergeWith, updateAllTypes); } + mergedMetaDataMappers.put(merged.getClass(), merged); } - - if (mergeResult.simulate() == false) { - // let the merge with attributes to override the attributes - meta = mergeWith.meta; - } + return new Mapping(indexCreated, mergedRoot, mergedMetaDataMappers.values().toArray(new MetadataFieldMapper[0]), mergeWith.meta); } @Override diff --git a/core/src/main/java/org/elasticsearch/index/mapper/MergeResult.java b/core/src/main/java/org/elasticsearch/index/mapper/MergeResult.java deleted file mode 100644 index f5698a0ed1..0000000000 --- a/core/src/main/java/org/elasticsearch/index/mapper/MergeResult.java +++ /dev/null @@ -1,81 +0,0 @@ -/* - * Licensed to Elasticsearch under one or more contributor - * license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright - * ownership. Elasticsearch licenses this file to you under - * the Apache License, Version 2.0 (the "License"); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -package org.elasticsearch.index.mapper; - -import org.elasticsearch.common.Strings; -import org.elasticsearch.index.mapper.object.ObjectMapper; - -import java.util.ArrayList; -import java.util.Collection; -import java.util.List; - -/** A container for tracking results of a mapping merge. */ -public class MergeResult { - - private final boolean simulate; - private final boolean updateAllTypes; - - private final List<String> conflicts = new ArrayList<>(); - private final List<FieldMapper> newFieldMappers = new ArrayList<>(); - private final List<ObjectMapper> newObjectMappers = new ArrayList<>(); - - public MergeResult(boolean simulate, boolean updateAllTypes) { - this.simulate = simulate; - this.updateAllTypes = updateAllTypes; - } - - public void addFieldMappers(Collection<FieldMapper> fieldMappers) { - assert simulate() == false; - newFieldMappers.addAll(fieldMappers); - } - - public void addObjectMappers(Collection<ObjectMapper> objectMappers) { - assert simulate() == false; - newObjectMappers.addAll(objectMappers); - } - - public Collection<FieldMapper> getNewFieldMappers() { - return newFieldMappers; - } - - public Collection<ObjectMapper> getNewObjectMappers() { - return newObjectMappers; - } - - public boolean simulate() { - return simulate; - } - - public boolean updateAllTypes() { - return updateAllTypes; - } - - public void addConflict(String mergeFailure) { - conflicts.add(mergeFailure); - } - - public boolean hasConflicts() { - return conflicts.isEmpty() == false; - } - - public String[] buildConflicts() { - return conflicts.toArray(Strings.EMPTY_ARRAY); - } -}
\ No newline at end of file diff --git a/core/src/main/java/org/elasticsearch/index/mapper/MetadataFieldMapper.java b/core/src/main/java/org/elasticsearch/index/mapper/MetadataFieldMapper.java index fc6d1fa9e1..622c7729dd 100644 --- a/core/src/main/java/org/elasticsearch/index/mapper/MetadataFieldMapper.java +++ b/core/src/main/java/org/elasticsearch/index/mapper/MetadataFieldMapper.java @@ -51,8 +51,8 @@ public abstract class MetadataFieldMapper extends FieldMapper { } public abstract static class Builder<T extends Builder, Y extends MetadataFieldMapper> extends FieldMapper.Builder<T, Y> { - public Builder(String name, MappedFieldType fieldType) { - super(name, fieldType); + public Builder(String name, MappedFieldType fieldType, MappedFieldType defaultFieldType) { + super(name, fieldType, defaultFieldType); } } @@ -70,4 +70,8 @@ public abstract class MetadataFieldMapper extends FieldMapper { */ public abstract void postParse(ParseContext context) throws IOException; + @Override + public MetadataFieldMapper merge(Mapper mergeWith, boolean updateAllTypes) { + return (MetadataFieldMapper) super.merge(mergeWith, updateAllTypes); + } } diff --git a/core/src/main/java/org/elasticsearch/index/mapper/ParseContext.java b/core/src/main/java/org/elasticsearch/index/mapper/ParseContext.java index edf75621c1..0a88e29c8d 100644 --- a/core/src/main/java/org/elasticsearch/index/mapper/ParseContext.java +++ b/core/src/main/java/org/elasticsearch/index/mapper/ParseContext.java @@ -595,7 +595,7 @@ public abstract class ParseContext { if (dynamicMappingsUpdate == null) { dynamicMappingsUpdate = mapper; } else { - MapperUtils.merge(dynamicMappingsUpdate, mapper); + dynamicMappingsUpdate = dynamicMappingsUpdate.merge(mapper, false); } } diff --git a/core/src/main/java/org/elasticsearch/index/mapper/ParsedDocument.java b/core/src/main/java/org/elasticsearch/index/mapper/ParsedDocument.java index ed8314c6f7..aa35e699b2 100644 --- a/core/src/main/java/org/elasticsearch/index/mapper/ParsedDocument.java +++ b/core/src/main/java/org/elasticsearch/index/mapper/ParsedDocument.java @@ -128,7 +128,7 @@ public class ParsedDocument { if (dynamicMappingsUpdate == null) { dynamicMappingsUpdate = update; } else { - MapperUtils.merge(dynamicMappingsUpdate, update); + dynamicMappingsUpdate = dynamicMappingsUpdate.merge(update, false); } } diff --git a/core/src/main/java/org/elasticsearch/index/mapper/core/BinaryFieldMapper.java b/core/src/main/java/org/elasticsearch/index/mapper/core/BinaryFieldMapper.java index 7468f4fb2f..0ee311678e 100644 --- a/core/src/main/java/org/elasticsearch/index/mapper/core/BinaryFieldMapper.java +++ b/core/src/main/java/org/elasticsearch/index/mapper/core/BinaryFieldMapper.java @@ -72,7 +72,7 @@ public class BinaryFieldMapper extends FieldMapper { public static class Builder extends FieldMapper.Builder<Builder, BinaryFieldMapper> { public Builder(String name) { - super(name, Defaults.FIELD_TYPE); + super(name, Defaults.FIELD_TYPE, Defaults.FIELD_TYPE); builder = this; } diff --git a/core/src/main/java/org/elasticsearch/index/mapper/core/BooleanFieldMapper.java b/core/src/main/java/org/elasticsearch/index/mapper/core/BooleanFieldMapper.java index cd76fdbb04..e381bc9c60 100644 --- a/core/src/main/java/org/elasticsearch/index/mapper/core/BooleanFieldMapper.java +++ b/core/src/main/java/org/elasticsearch/index/mapper/core/BooleanFieldMapper.java @@ -72,7 +72,7 @@ public class BooleanFieldMapper extends FieldMapper { public static class Builder extends FieldMapper.Builder<Builder, BooleanFieldMapper> { public Builder(String name) { - super(name, Defaults.FIELD_TYPE); + super(name, Defaults.FIELD_TYPE, Defaults.FIELD_TYPE); this.builder = this; } diff --git a/core/src/main/java/org/elasticsearch/index/mapper/core/ByteFieldMapper.java b/core/src/main/java/org/elasticsearch/index/mapper/core/ByteFieldMapper.java index 61b22a1ee2..44b4cbcd35 100644 --- a/core/src/main/java/org/elasticsearch/index/mapper/core/ByteFieldMapper.java +++ b/core/src/main/java/org/elasticsearch/index/mapper/core/ByteFieldMapper.java @@ -77,8 +77,7 @@ public class ByteFieldMapper extends NumberFieldMapper { setupFieldType(context); ByteFieldMapper fieldMapper = new ByteFieldMapper(name, fieldType, defaultFieldType, ignoreMalformed(context), coerce(context), context.indexSettings(), multiFieldsBuilder.build(this, context), copyTo); - fieldMapper.includeInAll(includeInAll); - return fieldMapper; + return (ByteFieldMapper) fieldMapper.includeInAll(includeInAll); } @Override diff --git a/core/src/main/java/org/elasticsearch/index/mapper/core/CompletionFieldMapper.java b/core/src/main/java/org/elasticsearch/index/mapper/core/CompletionFieldMapper.java index 5b4df635a3..69177401db 100644 --- a/core/src/main/java/org/elasticsearch/index/mapper/core/CompletionFieldMapper.java +++ b/core/src/main/java/org/elasticsearch/index/mapper/core/CompletionFieldMapper.java @@ -356,7 +356,7 @@ public class CompletionFieldMapper extends FieldMapper implements ArrayValueMapp * @param name of the completion field to build */ public Builder(String name) { - super(name, new CompletionFieldType()); + super(name, Defaults.FIELD_TYPE, Defaults.FIELD_TYPE); builder = this; } @@ -605,11 +605,9 @@ public class CompletionFieldMapper extends FieldMapper implements ArrayValueMapp } @Override - public void merge(Mapper mergeWith, MergeResult mergeResult) { - super.merge(mergeWith, mergeResult); + protected void doMerge(Mapper mergeWith, boolean updateAllTypes) { + super.doMerge(mergeWith, updateAllTypes); CompletionFieldMapper fieldMergeWith = (CompletionFieldMapper) mergeWith; - if (!mergeResult.simulate()) { - this.maxInputLength = fieldMergeWith.maxInputLength; - } + this.maxInputLength = fieldMergeWith.maxInputLength; } } diff --git a/core/src/main/java/org/elasticsearch/index/mapper/core/DateFieldMapper.java b/core/src/main/java/org/elasticsearch/index/mapper/core/DateFieldMapper.java index 27b96b27a4..7a99e6b50c 100644 --- a/core/src/main/java/org/elasticsearch/index/mapper/core/DateFieldMapper.java +++ b/core/src/main/java/org/elasticsearch/index/mapper/core/DateFieldMapper.java @@ -123,8 +123,7 @@ public class DateFieldMapper extends NumberFieldMapper { fieldType.setNullValue(nullValue); DateFieldMapper fieldMapper = new DateFieldMapper(name, fieldType, defaultFieldType, ignoreMalformed(context), coerce(context), context.indexSettings(), multiFieldsBuilder.build(this, context), copyTo); - fieldMapper.includeInAll(includeInAll); - return fieldMapper; + return (DateFieldMapper) fieldMapper.includeInAll(includeInAll); } @Override diff --git a/core/src/main/java/org/elasticsearch/index/mapper/core/DoubleFieldMapper.java b/core/src/main/java/org/elasticsearch/index/mapper/core/DoubleFieldMapper.java index 0e512bf428..861d33e560 100644 --- a/core/src/main/java/org/elasticsearch/index/mapper/core/DoubleFieldMapper.java +++ b/core/src/main/java/org/elasticsearch/index/mapper/core/DoubleFieldMapper.java @@ -80,8 +80,7 @@ public class DoubleFieldMapper extends NumberFieldMapper { setupFieldType(context); DoubleFieldMapper fieldMapper = new DoubleFieldMapper(name, fieldType, defaultFieldType, ignoreMalformed(context), coerce(context), context.indexSettings(), multiFieldsBuilder.build(this, context), copyTo); - fieldMapper.includeInAll(includeInAll); - return fieldMapper; + return (DoubleFieldMapper) fieldMapper.includeInAll(includeInAll); } @Override diff --git a/core/src/main/java/org/elasticsearch/index/mapper/core/FloatFieldMapper.java b/core/src/main/java/org/elasticsearch/index/mapper/core/FloatFieldMapper.java index 9a607ffd41..ad88c745df 100644 --- a/core/src/main/java/org/elasticsearch/index/mapper/core/FloatFieldMapper.java +++ b/core/src/main/java/org/elasticsearch/index/mapper/core/FloatFieldMapper.java @@ -81,8 +81,7 @@ public class FloatFieldMapper extends NumberFieldMapper { setupFieldType(context); FloatFieldMapper fieldMapper = new FloatFieldMapper(name, fieldType, defaultFieldType, ignoreMalformed(context), coerce(context), context.indexSettings(), multiFieldsBuilder.build(this, context), copyTo); - fieldMapper.includeInAll(includeInAll); - return fieldMapper; + return (FloatFieldMapper) fieldMapper.includeInAll(includeInAll); } @Override diff --git a/core/src/main/java/org/elasticsearch/index/mapper/core/IntegerFieldMapper.java b/core/src/main/java/org/elasticsearch/index/mapper/core/IntegerFieldMapper.java index 868cfeb438..1899549811 100644 --- a/core/src/main/java/org/elasticsearch/index/mapper/core/IntegerFieldMapper.java +++ b/core/src/main/java/org/elasticsearch/index/mapper/core/IntegerFieldMapper.java @@ -85,8 +85,7 @@ public class IntegerFieldMapper extends NumberFieldMapper { IntegerFieldMapper fieldMapper = new IntegerFieldMapper(name, fieldType, defaultFieldType, ignoreMalformed(context), coerce(context), context.indexSettings(), multiFieldsBuilder.build(this, context), copyTo); - fieldMapper.includeInAll(includeInAll); - return fieldMapper; + return (IntegerFieldMapper) fieldMapper.includeInAll(includeInAll); } @Override diff --git a/core/src/main/java/org/elasticsearch/index/mapper/core/LongFieldMapper.java b/core/src/main/java/org/elasticsearch/index/mapper/core/LongFieldMapper.java index 4130c90258..9d9557c41f 100644 --- a/core/src/main/java/org/elasticsearch/index/mapper/core/LongFieldMapper.java +++ b/core/src/main/java/org/elasticsearch/index/mapper/core/LongFieldMapper.java @@ -84,8 +84,7 @@ public class LongFieldMapper extends NumberFieldMapper { setupFieldType(context); LongFieldMapper fieldMapper = new LongFieldMapper(name, fieldType, defaultFieldType, ignoreMalformed(context), coerce(context), context.indexSettings(), multiFieldsBuilder.build(this, context), copyTo); - fieldMapper.includeInAll(includeInAll); - return fieldMapper; + return (LongFieldMapper) fieldMapper.includeInAll(includeInAll); } @Override diff --git a/core/src/main/java/org/elasticsearch/index/mapper/core/NumberFieldMapper.java b/core/src/main/java/org/elasticsearch/index/mapper/core/NumberFieldMapper.java index 87a63de99e..ed537aa7e5 100644 --- a/core/src/main/java/org/elasticsearch/index/mapper/core/NumberFieldMapper.java +++ b/core/src/main/java/org/elasticsearch/index/mapper/core/NumberFieldMapper.java @@ -66,7 +66,7 @@ public abstract class NumberFieldMapper extends FieldMapper implements AllFieldM private Boolean coerce; public Builder(String name, MappedFieldType fieldType, int defaultPrecisionStep) { - super(name, fieldType); + super(name, fieldType, fieldType); this.fieldType.setNumericPrecisionStep(defaultPrecisionStep); } @@ -183,22 +183,41 @@ public abstract class NumberFieldMapper extends FieldMapper implements AllFieldM } @Override - public void includeInAll(Boolean includeInAll) { + protected NumberFieldMapper clone() { + return (NumberFieldMapper) super.clone(); + } + + @Override + public Mapper includeInAll(Boolean includeInAll) { if (includeInAll != null) { - this.includeInAll = includeInAll; + NumberFieldMapper clone = clone(); + clone.includeInAll = includeInAll; + return clone; + } else { + return this; } } @Override - public void includeInAllIfNotSet(Boolean includeInAll) { + public Mapper includeInAllIfNotSet(Boolean includeInAll) { if (includeInAll != null && this.includeInAll == null) { - this.includeInAll = includeInAll; + NumberFieldMapper clone = clone(); + clone.includeInAll = includeInAll; + return clone; + } else { + return this; } } @Override - public void unsetIncludeInAll() { - includeInAll = null; + public Mapper unsetIncludeInAll() { + if (includeInAll != null) { + NumberFieldMapper clone = clone(); + clone.includeInAll = null; + return clone; + } else { + return this; + } } @Override @@ -254,21 +273,16 @@ public abstract class NumberFieldMapper extends FieldMapper implements AllFieldM } @Override - public void merge(Mapper mergeWith, MergeResult mergeResult) { - super.merge(mergeWith, mergeResult); - if (!this.getClass().equals(mergeWith.getClass())) { - return; - } + protected void doMerge(Mapper mergeWith, boolean updateAllTypes) { + super.doMerge(mergeWith, updateAllTypes); NumberFieldMapper nfmMergeWith = (NumberFieldMapper) mergeWith; - if (mergeResult.simulate() == false && mergeResult.hasConflicts() == false) { - this.includeInAll = nfmMergeWith.includeInAll; - if (nfmMergeWith.ignoreMalformed.explicit()) { - this.ignoreMalformed = nfmMergeWith.ignoreMalformed; - } - if (nfmMergeWith.coerce.explicit()) { - this.coerce = nfmMergeWith.coerce; - } + this.includeInAll = nfmMergeWith.includeInAll; + if (nfmMergeWith.ignoreMalformed.explicit()) { + this.ignoreMalformed = nfmMergeWith.ignoreMalformed; + } + if (nfmMergeWith.coerce.explicit()) { + this.coerce = nfmMergeWith.coerce; } } diff --git a/core/src/main/java/org/elasticsearch/index/mapper/core/ShortFieldMapper.java b/core/src/main/java/org/elasticsearch/index/mapper/core/ShortFieldMapper.java index 81ed6cc3ba..e455959c53 100644 --- a/core/src/main/java/org/elasticsearch/index/mapper/core/ShortFieldMapper.java +++ b/core/src/main/java/org/elasticsearch/index/mapper/core/ShortFieldMapper.java @@ -81,8 +81,7 @@ public class ShortFieldMapper extends NumberFieldMapper { ShortFieldMapper fieldMapper = new ShortFieldMapper(name, fieldType, defaultFieldType, ignoreMalformed(context), coerce(context), context.indexSettings(), multiFieldsBuilder.build(this, context), copyTo); - fieldMapper.includeInAll(includeInAll); - return fieldMapper; + return (ShortFieldMapper) fieldMapper.includeInAll(includeInAll); } @Override diff --git a/core/src/main/java/org/elasticsearch/index/mapper/core/StringFieldMapper.java b/core/src/main/java/org/elasticsearch/index/mapper/core/StringFieldMapper.java index 0a921ad85e..08582c6599 100644 --- a/core/src/main/java/org/elasticsearch/index/mapper/core/StringFieldMapper.java +++ b/core/src/main/java/org/elasticsearch/index/mapper/core/StringFieldMapper.java @@ -35,7 +35,6 @@ import org.elasticsearch.index.mapper.FieldMapper; import org.elasticsearch.index.mapper.MappedFieldType; import org.elasticsearch.index.mapper.Mapper; import org.elasticsearch.index.mapper.MapperParsingException; -import org.elasticsearch.index.mapper.MergeResult; import org.elasticsearch.index.mapper.ParseContext; import org.elasticsearch.index.mapper.internal.AllFieldMapper; @@ -99,7 +98,7 @@ public class StringFieldMapper extends FieldMapper implements AllFieldMapper.Inc protected int ignoreAbove = Defaults.IGNORE_ABOVE; public Builder(String name) { - super(name, Defaults.FIELD_TYPE); + super(name, Defaults.FIELD_TYPE, Defaults.FIELD_TYPE); builder = this; } @@ -150,8 +149,7 @@ public class StringFieldMapper extends FieldMapper implements AllFieldMapper.Inc StringFieldMapper fieldMapper = new StringFieldMapper( name, fieldType, defaultFieldType, positionIncrementGap, ignoreAbove, context.indexSettings(), multiFieldsBuilder.build(this, context), copyTo); - fieldMapper.includeInAll(includeInAll); - return fieldMapper; + return fieldMapper.includeInAll(includeInAll); } } @@ -257,22 +255,41 @@ public class StringFieldMapper extends FieldMapper implements AllFieldMapper.Inc } @Override - public void includeInAll(Boolean includeInAll) { + protected StringFieldMapper clone() { + return (StringFieldMapper) super.clone(); + } + + @Override + public StringFieldMapper includeInAll(Boolean includeInAll) { if (includeInAll != null) { - this.includeInAll = includeInAll; + StringFieldMapper clone = clone(); + clone.includeInAll = includeInAll; + return clone; + } else { + return this; } } @Override - public void includeInAllIfNotSet(Boolean includeInAll) { + public StringFieldMapper includeInAllIfNotSet(Boolean includeInAll) { if (includeInAll != null && this.includeInAll == null) { - this.includeInAll = includeInAll; + StringFieldMapper clone = clone(); + clone.includeInAll = includeInAll; + return clone; + } else { + return this; } } @Override - public void unsetIncludeInAll() { - includeInAll = null; + public StringFieldMapper unsetIncludeInAll() { + if (includeInAll != null) { + StringFieldMapper clone = clone(); + clone.includeInAll = null; + return clone; + } else { + return this; + } } @Override @@ -359,15 +376,10 @@ public class StringFieldMapper extends FieldMapper implements AllFieldMapper.Inc } @Override - public void merge(Mapper mergeWith, MergeResult mergeResult) { - super.merge(mergeWith, mergeResult); - if (!this.getClass().equals(mergeWith.getClass())) { - return; - } - if (!mergeResult.simulate()) { - this.includeInAll = ((StringFieldMapper) mergeWith).includeInAll; - this.ignoreAbove = ((StringFieldMapper) mergeWith).ignoreAbove; - } + protected void doMerge(Mapper mergeWith, boolean updateAllTypes) { + super.doMerge(mergeWith, updateAllTypes); + this.includeInAll = ((StringFieldMapper) mergeWith).includeInAll; + this.ignoreAbove = ((StringFieldMapper) mergeWith).ignoreAbove; } @Override diff --git a/core/src/main/java/org/elasticsearch/index/mapper/core/TokenCountFieldMapper.java b/core/src/main/java/org/elasticsearch/index/mapper/core/TokenCountFieldMapper.java index 8348892e44..a485c3727f 100644 --- a/core/src/main/java/org/elasticsearch/index/mapper/core/TokenCountFieldMapper.java +++ b/core/src/main/java/org/elasticsearch/index/mapper/core/TokenCountFieldMapper.java @@ -33,7 +33,6 @@ import org.elasticsearch.index.mapper.FieldMapper; import org.elasticsearch.index.mapper.MappedFieldType; import org.elasticsearch.index.mapper.Mapper; import org.elasticsearch.index.mapper.MapperParsingException; -import org.elasticsearch.index.mapper.MergeResult; import org.elasticsearch.index.mapper.ParseContext; import org.elasticsearch.index.mapper.core.StringFieldMapper.ValueAndBoost; @@ -81,8 +80,7 @@ public class TokenCountFieldMapper extends IntegerFieldMapper { TokenCountFieldMapper fieldMapper = new TokenCountFieldMapper(name, fieldType, defaultFieldType, ignoreMalformed(context), coerce(context), context.indexSettings(), analyzer, multiFieldsBuilder.build(this, context), copyTo); - fieldMapper.includeInAll(includeInAll); - return fieldMapper; + return (TokenCountFieldMapper) fieldMapper.includeInAll(includeInAll); } @Override @@ -190,14 +188,9 @@ public class TokenCountFieldMapper extends IntegerFieldMapper { } @Override - public void merge(Mapper mergeWith, MergeResult mergeResult) { - super.merge(mergeWith, mergeResult); - if (!this.getClass().equals(mergeWith.getClass())) { - return; - } - if (!mergeResult.simulate()) { - this.analyzer = ((TokenCountFieldMapper) mergeWith).analyzer; - } + protected void doMerge(Mapper mergeWith, boolean updateAllTypes) { + super.doMerge(mergeWith, updateAllTypes); + this.analyzer = ((TokenCountFieldMapper) mergeWith).analyzer; } @Override diff --git a/core/src/main/java/org/elasticsearch/index/mapper/core/TypeParsers.java b/core/src/main/java/org/elasticsearch/index/mapper/core/TypeParsers.java index e530243657..f6bd4946eb 100644 --- a/core/src/main/java/org/elasticsearch/index/mapper/core/TypeParsers.java +++ b/core/src/main/java/org/elasticsearch/index/mapper/core/TypeParsers.java @@ -61,7 +61,6 @@ public class TypeParsers { @Override public Mapper.Builder<?, ?> parse(String name, Map<String, Object> node, ParserContext parserContext) throws MapperParsingException { - ContentPath.Type pathType = null; FieldMapper.Builder mainFieldBuilder = null; List<FieldMapper.Builder> fields = null; String firstType = null; @@ -70,10 +69,7 @@ public class TypeParsers { Map.Entry<String, Object> entry = iterator.next(); String fieldName = Strings.toUnderscoreCase(entry.getKey()); Object fieldNode = entry.getValue(); - if (fieldName.equals("path") && parserContext.indexVersionCreated().before(Version.V_2_0_0_beta1)) { - pathType = parsePathType(name, fieldNode.toString()); - iterator.remove(); - } else if (fieldName.equals("fields")) { + if (fieldName.equals("fields")) { Map<String, Object> fieldsNode = (Map<String, Object>) fieldNode; for (Iterator<Map.Entry<String, Object>> fieldsIterator = fieldsNode.entrySet().iterator(); fieldsIterator.hasNext();) { Map.Entry<String, Object> entry1 = fieldsIterator.next(); @@ -132,17 +128,10 @@ public class TypeParsers { } } - if (fields != null && pathType != null) { + if (fields != null) { for (Mapper.Builder field : fields) { mainFieldBuilder.addMultiField(field); } - mainFieldBuilder.multiFieldPathType(pathType); - } else if (fields != null) { - for (Mapper.Builder field : fields) { - mainFieldBuilder.addMultiField(field); - } - } else if (pathType != null) { - mainFieldBuilder.multiFieldPathType(pathType); } return mainFieldBuilder; } @@ -337,10 +326,7 @@ public class TypeParsers { public static boolean parseMultiField(FieldMapper.Builder builder, String name, Mapper.TypeParser.ParserContext parserContext, String propName, Object propNode) { parserContext = parserContext.createMultiFieldContext(parserContext); - if (propName.equals("path") && parserContext.indexVersionCreated().before(Version.V_2_0_0_beta1)) { - builder.multiFieldPathType(parsePathType(name, propNode.toString())); - return true; - } else if (propName.equals("fields")) { + if (propName.equals("fields")) { final Map<String, Object> multiFieldsPropNodes; @@ -457,17 +443,6 @@ public class TypeParsers { } } - public static ContentPath.Type parsePathType(String name, String path) throws MapperParsingException { - path = Strings.toUnderscoreCase(path); - if ("just_name".equals(path)) { - return ContentPath.Type.JUST_NAME; - } else if ("full".equals(path)) { - return ContentPath.Type.FULL; - } else { - throw new MapperParsingException("wrong value for pathType [" + path + "] for object [" + name + "]"); - } - } - @SuppressWarnings("unchecked") public static void parseCopyFields(Object propNode, FieldMapper.Builder builder) { FieldMapper.CopyTo.Builder copyToBuilder = new FieldMapper.CopyTo.Builder(); diff --git a/core/src/main/java/org/elasticsearch/index/mapper/geo/BaseGeoPointFieldMapper.java b/core/src/main/java/org/elasticsearch/index/mapper/geo/BaseGeoPointFieldMapper.java index 0b57d866dd..0bbe2fe8f1 100644 --- a/core/src/main/java/org/elasticsearch/index/mapper/geo/BaseGeoPointFieldMapper.java +++ b/core/src/main/java/org/elasticsearch/index/mapper/geo/BaseGeoPointFieldMapper.java @@ -33,12 +33,10 @@ import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.xcontent.XContentBuilder; import org.elasticsearch.common.xcontent.XContentParser; import org.elasticsearch.common.xcontent.support.XContentMapValues; -import org.elasticsearch.index.mapper.ContentPath; import org.elasticsearch.index.mapper.FieldMapper; import org.elasticsearch.index.mapper.MappedFieldType; import org.elasticsearch.index.mapper.Mapper; import org.elasticsearch.index.mapper.MapperParsingException; -import org.elasticsearch.index.mapper.MergeResult; import org.elasticsearch.index.mapper.ParseContext; import org.elasticsearch.index.mapper.core.DoubleFieldMapper; import org.elasticsearch.index.mapper.core.NumberFieldMapper; @@ -74,7 +72,6 @@ public abstract class BaseGeoPointFieldMapper extends FieldMapper implements Arr } public static class Defaults { - public static final ContentPath.Type PATH_TYPE = ContentPath.Type.FULL; public static final boolean ENABLE_LATLON = false; public static final boolean ENABLE_GEOHASH = false; public static final boolean ENABLE_GEOHASH_PREFIX = false; @@ -83,7 +80,6 @@ public abstract class BaseGeoPointFieldMapper extends FieldMapper implements Arr } public abstract static class Builder<T extends Builder, Y extends BaseGeoPointFieldMapper> extends FieldMapper.Builder<T, Y> { - protected ContentPath.Type pathType = Defaults.PATH_TYPE; protected boolean enableLatLon = Defaults.ENABLE_LATLON; @@ -98,7 +94,7 @@ public abstract class BaseGeoPointFieldMapper extends FieldMapper implements Arr protected Boolean ignoreMalformed; public Builder(String name, GeoPointFieldType fieldType) { - super(name, fieldType); + super(name, fieldType, fieldType); } @Override @@ -107,12 +103,6 @@ public abstract class BaseGeoPointFieldMapper extends FieldMapper implements Arr } @Override - public T multiFieldPathType(ContentPath.Type pathType) { - this.pathType = pathType; - return builder; - } - - @Override public T fieldDataSettings(Settings settings) { this.fieldDataSettings = settings; return builder; @@ -159,13 +149,10 @@ public abstract class BaseGeoPointFieldMapper extends FieldMapper implements Arr } public abstract Y build(BuilderContext context, String simpleName, MappedFieldType fieldType, MappedFieldType defaultFieldType, - Settings indexSettings, ContentPath.Type pathType, DoubleFieldMapper latMapper, DoubleFieldMapper lonMapper, + Settings indexSettings, DoubleFieldMapper latMapper, DoubleFieldMapper lonMapper, StringFieldMapper geoHashMapper, MultiFields multiFields, Explicit<Boolean> ignoreMalformed, CopyTo copyTo); public Y build(Mapper.BuilderContext context) { - ContentPath.Type origPathType = context.path().pathType(); - context.path().pathType(pathType); - GeoPointFieldType geoPointFieldType = (GeoPointFieldType)fieldType; DoubleFieldMapper latMapper = null; @@ -191,9 +178,8 @@ public abstract class BaseGeoPointFieldMapper extends FieldMapper implements Arr geoPointFieldType.setGeoHashEnabled(geoHashMapper.fieldType(), geoHashPrecision, enableGeoHashPrefix); } context.path().remove(); - context.path().pathType(origPathType); - return build(context, name, fieldType, defaultFieldType, context.indexSettings(), origPathType, + return build(context, name, fieldType, defaultFieldType, context.indexSettings(), latMapper, lonMapper, geoHashMapper, multiFieldsBuilder.build(this, context), ignoreMalformed(context), copyTo); } } @@ -365,17 +351,14 @@ public abstract class BaseGeoPointFieldMapper extends FieldMapper implements Arr protected final DoubleFieldMapper lonMapper; - protected final ContentPath.Type pathType; - protected final StringFieldMapper geoHashMapper; protected Explicit<Boolean> ignoreMalformed; protected BaseGeoPointFieldMapper(String simpleName, MappedFieldType fieldType, MappedFieldType defaultFieldType, Settings indexSettings, - ContentPath.Type pathType, DoubleFieldMapper latMapper, DoubleFieldMapper lonMapper, StringFieldMapper geoHashMapper, + DoubleFieldMapper latMapper, DoubleFieldMapper lonMapper, StringFieldMapper geoHashMapper, MultiFields multiFields, Explicit<Boolean> ignoreMalformed, CopyTo copyTo) { super(simpleName, fieldType, defaultFieldType, indexSettings, multiFields, copyTo); - this.pathType = pathType; this.latMapper = latMapper; this.lonMapper = lonMapper; this.geoHashMapper = geoHashMapper; @@ -388,17 +371,11 @@ public abstract class BaseGeoPointFieldMapper extends FieldMapper implements Arr } @Override - public void merge(Mapper mergeWith, MergeResult mergeResult) { - super.merge(mergeWith, mergeResult); - if (!this.getClass().equals(mergeWith.getClass())) { - return; - } - + protected void doMerge(Mapper mergeWith, boolean updateAllTypes) { + super.doMerge(mergeWith, updateAllTypes); BaseGeoPointFieldMapper gpfmMergeWith = (BaseGeoPointFieldMapper) mergeWith; - if (mergeResult.simulate() == false && mergeResult.hasConflicts() == false) { - if (gpfmMergeWith.ignoreMalformed.explicit()) { - this.ignoreMalformed = gpfmMergeWith.ignoreMalformed; - } + if (gpfmMergeWith.ignoreMalformed.explicit()) { + this.ignoreMalformed = gpfmMergeWith.ignoreMalformed; } } @@ -441,8 +418,6 @@ public abstract class BaseGeoPointFieldMapper extends FieldMapper implements Arr @Override public Mapper parse(ParseContext context) throws IOException { - ContentPath.Type origPathType = context.path().pathType(); - context.path().pathType(pathType); context.path().add(simpleName()); GeoPoint sparse = context.parseExternalValue(GeoPoint.class); @@ -487,7 +462,6 @@ public abstract class BaseGeoPointFieldMapper extends FieldMapper implements Arr } context.path().remove(); - context.path().pathType(origPathType); return null; } @@ -512,9 +486,6 @@ public abstract class BaseGeoPointFieldMapper extends FieldMapper implements Arr @Override protected void doXContentBody(XContentBuilder builder, boolean includeDefaults, Params params) throws IOException { super.doXContentBody(builder, includeDefaults, params); - if (includeDefaults || pathType != Defaults.PATH_TYPE) { - builder.field("path", pathType.name().toLowerCase(Locale.ROOT)); - } if (includeDefaults || fieldType().isLatLonEnabled() != GeoPointFieldMapper.Defaults.ENABLE_LATLON) { builder.field("lat_lon", fieldType().isLatLonEnabled()); } diff --git a/core/src/main/java/org/elasticsearch/index/mapper/geo/GeoPointFieldMapper.java b/core/src/main/java/org/elasticsearch/index/mapper/geo/GeoPointFieldMapper.java index 286aca2972..fa61669e80 100644 --- a/core/src/main/java/org/elasticsearch/index/mapper/geo/GeoPointFieldMapper.java +++ b/core/src/main/java/org/elasticsearch/index/mapper/geo/GeoPointFieldMapper.java @@ -27,7 +27,6 @@ import org.elasticsearch.common.Explicit; import org.elasticsearch.common.geo.GeoPoint; import org.elasticsearch.common.geo.GeoUtils; import org.elasticsearch.common.settings.Settings; -import org.elasticsearch.index.mapper.ContentPath; import org.elasticsearch.index.mapper.MappedFieldType; import org.elasticsearch.index.mapper.Mapper; import org.elasticsearch.index.mapper.MapperParsingException; @@ -81,12 +80,12 @@ public class GeoPointFieldMapper extends BaseGeoPointFieldMapper { @Override public GeoPointFieldMapper build(BuilderContext context, String simpleName, MappedFieldType fieldType, - MappedFieldType defaultFieldType, Settings indexSettings, ContentPath.Type pathType, DoubleFieldMapper latMapper, + MappedFieldType defaultFieldType, Settings indexSettings, DoubleFieldMapper latMapper, DoubleFieldMapper lonMapper, StringFieldMapper geoHashMapper, MultiFields multiFields, Explicit<Boolean> ignoreMalformed, CopyTo copyTo) { fieldType.setTokenized(false); setupFieldType(context); - return new GeoPointFieldMapper(simpleName, fieldType, defaultFieldType, indexSettings, pathType, latMapper, lonMapper, + return new GeoPointFieldMapper(simpleName, fieldType, defaultFieldType, indexSettings, latMapper, lonMapper, geoHashMapper, multiFields, ignoreMalformed, copyTo); } @@ -104,9 +103,9 @@ public class GeoPointFieldMapper extends BaseGeoPointFieldMapper { } public GeoPointFieldMapper(String simpleName, MappedFieldType fieldType, MappedFieldType defaultFieldType, Settings indexSettings, - ContentPath.Type pathType, DoubleFieldMapper latMapper, DoubleFieldMapper lonMapper, + DoubleFieldMapper latMapper, DoubleFieldMapper lonMapper, StringFieldMapper geoHashMapper, MultiFields multiFields, Explicit<Boolean> ignoreMalformed, CopyTo copyTo) { - super(simpleName, fieldType, defaultFieldType, indexSettings, pathType, latMapper, lonMapper, geoHashMapper, multiFields, + super(simpleName, fieldType, defaultFieldType, indexSettings, latMapper, lonMapper, geoHashMapper, multiFields, ignoreMalformed, copyTo); } diff --git a/core/src/main/java/org/elasticsearch/index/mapper/geo/GeoPointFieldMapperLegacy.java b/core/src/main/java/org/elasticsearch/index/mapper/geo/GeoPointFieldMapperLegacy.java index 84e6bde07a..735baa8853 100644 --- a/core/src/main/java/org/elasticsearch/index/mapper/geo/GeoPointFieldMapperLegacy.java +++ b/core/src/main/java/org/elasticsearch/index/mapper/geo/GeoPointFieldMapperLegacy.java @@ -35,11 +35,9 @@ import org.elasticsearch.common.unit.DistanceUnit; import org.elasticsearch.common.util.ByteUtils; import org.elasticsearch.common.xcontent.XContentBuilder; import org.elasticsearch.common.xcontent.support.XContentMapValues; -import org.elasticsearch.index.mapper.ContentPath; import org.elasticsearch.index.mapper.MappedFieldType; import org.elasticsearch.index.mapper.Mapper; import org.elasticsearch.index.mapper.MapperParsingException; -import org.elasticsearch.index.mapper.MergeResult; import org.elasticsearch.index.mapper.ParseContext; import org.elasticsearch.index.mapper.core.DoubleFieldMapper; import org.elasticsearch.index.mapper.core.NumberFieldMapper.CustomNumericDocValuesField; @@ -111,14 +109,14 @@ public class GeoPointFieldMapperLegacy extends BaseGeoPointFieldMapper implement @Override public GeoPointFieldMapperLegacy build(BuilderContext context, String simpleName, MappedFieldType fieldType, - MappedFieldType defaultFieldType, Settings indexSettings, ContentPath.Type pathType, DoubleFieldMapper latMapper, + MappedFieldType defaultFieldType, Settings indexSettings, DoubleFieldMapper latMapper, DoubleFieldMapper lonMapper, StringFieldMapper geoHashMapper, MultiFields multiFields, Explicit<Boolean> ignoreMalformed, CopyTo copyTo) { fieldType.setTokenized(false); setupFieldType(context); fieldType.setHasDocValues(false); defaultFieldType.setHasDocValues(false); - return new GeoPointFieldMapperLegacy(simpleName, fieldType, defaultFieldType, indexSettings, pathType, latMapper, lonMapper, + return new GeoPointFieldMapperLegacy(simpleName, fieldType, defaultFieldType, indexSettings, latMapper, lonMapper, geoHashMapper, multiFields, ignoreMalformed, coerce(context), copyTo); } @@ -288,32 +286,27 @@ public class GeoPointFieldMapperLegacy extends BaseGeoPointFieldMapper implement protected Explicit<Boolean> coerce; public GeoPointFieldMapperLegacy(String simpleName, MappedFieldType fieldType, MappedFieldType defaultFieldType, Settings indexSettings, - ContentPath.Type pathType, DoubleFieldMapper latMapper, DoubleFieldMapper lonMapper, + DoubleFieldMapper latMapper, DoubleFieldMapper lonMapper, StringFieldMapper geoHashMapper, MultiFields multiFields, Explicit<Boolean> ignoreMalformed, Explicit<Boolean> coerce, CopyTo copyTo) { - super(simpleName, fieldType, defaultFieldType, indexSettings, pathType, latMapper, lonMapper, geoHashMapper, multiFields, + super(simpleName, fieldType, defaultFieldType, indexSettings, latMapper, lonMapper, geoHashMapper, multiFields, ignoreMalformed, copyTo); this.coerce = coerce; } @Override - public void merge(Mapper mergeWith, MergeResult mergeResult) { - super.merge(mergeWith, mergeResult); - if (!this.getClass().equals(mergeWith.getClass())) { - return; - } + protected void doMerge(Mapper mergeWith, boolean updateAllTypes) { + super.doMerge(mergeWith, updateAllTypes); GeoPointFieldMapperLegacy gpfmMergeWith = (GeoPointFieldMapperLegacy) mergeWith; if (gpfmMergeWith.coerce.explicit()) { if (coerce.explicit() && coerce.value() != gpfmMergeWith.coerce.value()) { - mergeResult.addConflict("mapper [" + fieldType().names().fullName() + "] has different [coerce]"); + throw new IllegalArgumentException("mapper [" + fieldType().names().fullName() + "] has different [coerce]"); } } - if (mergeResult.simulate() == false && mergeResult.hasConflicts() == false) { - if (gpfmMergeWith.coerce.explicit()) { - this.coerce = gpfmMergeWith.coerce; - } + if (gpfmMergeWith.coerce.explicit()) { + this.coerce = gpfmMergeWith.coerce; } } diff --git a/core/src/main/java/org/elasticsearch/index/mapper/geo/GeoShapeFieldMapper.java b/core/src/main/java/org/elasticsearch/index/mapper/geo/GeoShapeFieldMapper.java index 7e784324f3..1ba49e64d8 100644 --- a/core/src/main/java/org/elasticsearch/index/mapper/geo/GeoShapeFieldMapper.java +++ b/core/src/main/java/org/elasticsearch/index/mapper/geo/GeoShapeFieldMapper.java @@ -45,7 +45,6 @@ import org.elasticsearch.index.mapper.FieldMapper; import org.elasticsearch.index.mapper.MappedFieldType; import org.elasticsearch.index.mapper.Mapper; import org.elasticsearch.index.mapper.MapperParsingException; -import org.elasticsearch.index.mapper.MergeResult; import org.elasticsearch.index.mapper.ParseContext; import java.io.IOException; @@ -121,7 +120,7 @@ public class GeoShapeFieldMapper extends FieldMapper { private Boolean coerce; public Builder(String name) { - super(name, Defaults.FIELD_TYPE); + super(name, Defaults.FIELD_TYPE, Defaults.FIELD_TYPE); } @Override @@ -185,7 +184,7 @@ public class GeoShapeFieldMapper extends FieldMapper { builder.fieldType().setDistanceErrorPct(Double.parseDouble(fieldNode.toString())); iterator.remove(); } else if (Names.ORIENTATION.equals(fieldName)) { - builder.fieldType().setOrientation(ShapeBuilder.orientationFromString(fieldNode.toString())); + builder.fieldType().setOrientation(ShapeBuilder.Orientation.fromString(fieldNode.toString())); iterator.remove(); } else if (Names.STRATEGY.equals(fieldName)) { builder.fieldType().setStrategyName(fieldNode.toString()); @@ -193,7 +192,8 @@ public class GeoShapeFieldMapper extends FieldMapper { } else if (Names.COERCE.equals(fieldName)) { builder.coerce(nodeBooleanValue(fieldNode)); iterator.remove(); - } else if (Names.STRATEGY_POINTS_ONLY.equals(fieldName)) { + } else if (Names.STRATEGY_POINTS_ONLY.equals(fieldName) + && builder.fieldType().strategyName.equals(SpatialStrategy.TERM.getStrategyName()) == false) { builder.fieldType().setPointsOnly(XContentMapValues.nodeBooleanValue(fieldNode)); iterator.remove(); } @@ -284,6 +284,7 @@ public class GeoShapeFieldMapper extends FieldMapper { termStrategy = new TermQueryPrefixTreeStrategy(prefixTree, names().indexName()); termStrategy.setDistErrPct(distanceErrorPct()); defaultStrategy = resolveStrategy(strategyName); + defaultStrategy.setPointsOnly(pointsOnly); } @Override @@ -347,6 +348,9 @@ public class GeoShapeFieldMapper extends FieldMapper { public void setStrategyName(String strategyName) { checkIfFrozen(); this.strategyName = strategyName; + if (this.strategyName.equals(SpatialStrategy.TERM)) { + this.pointsOnly = true; + } } public boolean pointsOnly() { @@ -406,7 +410,6 @@ public class GeoShapeFieldMapper extends FieldMapper { public PrefixTreeStrategy resolveStrategy(String strategyName) { if (SpatialStrategy.RECURSIVE.getStrategyName().equals(strategyName)) { - recursiveStrategy.setPointsOnly(pointsOnly()); return recursiveStrategy; } if (SpatialStrategy.TERM.getStrategyName().equals(strategyName)) { @@ -446,7 +449,7 @@ public class GeoShapeFieldMapper extends FieldMapper { } shape = shapeBuilder.build(); } - if (fieldType().defaultStrategy() instanceof RecursivePrefixTreeStrategy && fieldType().pointsOnly() && !(shape instanceof Point)) { + if (fieldType().pointsOnly() && !(shape instanceof Point)) { throw new MapperParsingException("[{" + fieldType().names().fullName() + "}] is configured for points only but a " + ((shape instanceof JtsGeometry) ? ((JtsGeometry)shape).getGeom().getGeometryType() : shape.getClass()) + " was found"); } @@ -471,17 +474,12 @@ public class GeoShapeFieldMapper extends FieldMapper { } @Override - public void merge(Mapper mergeWith, MergeResult mergeResult) { - super.merge(mergeWith, mergeResult); - if (!this.getClass().equals(mergeWith.getClass())) { - return; - } + protected void doMerge(Mapper mergeWith, boolean updateAllTypes) { + super.doMerge(mergeWith, updateAllTypes); GeoShapeFieldMapper gsfm = (GeoShapeFieldMapper)mergeWith; - if (mergeResult.simulate() == false && mergeResult.hasConflicts() == false) { - if (gsfm.coerce.explicit()) { - this.coerce = gsfm.coerce; - } + if (gsfm.coerce.explicit()) { + this.coerce = gsfm.coerce; } } diff --git a/core/src/main/java/org/elasticsearch/index/mapper/internal/AllFieldMapper.java b/core/src/main/java/org/elasticsearch/index/mapper/internal/AllFieldMapper.java index 645c36a485..bcd094d2ae 100644 --- a/core/src/main/java/org/elasticsearch/index/mapper/internal/AllFieldMapper.java +++ b/core/src/main/java/org/elasticsearch/index/mapper/internal/AllFieldMapper.java @@ -36,7 +36,6 @@ import org.elasticsearch.index.fielddata.FieldDataType; import org.elasticsearch.index.mapper.MappedFieldType; import org.elasticsearch.index.mapper.Mapper; import org.elasticsearch.index.mapper.MapperParsingException; -import org.elasticsearch.index.mapper.MergeResult; import org.elasticsearch.index.mapper.MetadataFieldMapper; import org.elasticsearch.index.mapper.ParseContext; import org.elasticsearch.index.query.QueryShardContext; @@ -58,11 +57,24 @@ public class AllFieldMapper extends MetadataFieldMapper { public interface IncludeInAll { - void includeInAll(Boolean includeInAll); - - void includeInAllIfNotSet(Boolean includeInAll); - - void unsetIncludeInAll(); + /** + * If {@code includeInAll} is not null then return a copy of this mapper + * that will include values in the _all field according to {@code includeInAll}. + */ + Mapper includeInAll(Boolean includeInAll); + + /** + * If {@code includeInAll} is not null and not set on this mapper yet, then + * return a copy of this mapper that will include values in the _all field + * according to {@code includeInAll}. + */ + Mapper includeInAllIfNotSet(Boolean includeInAll); + + /** + * If {@code includeInAll} was already set on this mapper then return a copy + * of this mapper that has {@code includeInAll} not set. + */ + Mapper unsetIncludeInAll(); } public static final String NAME = "_all"; @@ -89,7 +101,7 @@ public class AllFieldMapper extends MetadataFieldMapper { private EnabledAttributeMapper enabled = Defaults.ENABLED; public Builder(MappedFieldType existing) { - super(Defaults.NAME, existing == null ? Defaults.FIELD_TYPE : existing); + super(Defaults.NAME, existing == null ? Defaults.FIELD_TYPE : existing, Defaults.FIELD_TYPE); builder = this; indexName = Defaults.INDEX_NAME; } @@ -309,11 +321,11 @@ public class AllFieldMapper extends MetadataFieldMapper { } @Override - public void merge(Mapper mergeWith, MergeResult mergeResult) { + protected void doMerge(Mapper mergeWith, boolean updateAllTypes) { if (((AllFieldMapper)mergeWith).enabled() != this.enabled() && ((AllFieldMapper)mergeWith).enabledState != Defaults.ENABLED) { - mergeResult.addConflict("mapper [" + fieldType().names().fullName() + "] enabled is " + this.enabled() + " now encountering "+ ((AllFieldMapper)mergeWith).enabled()); + throw new IllegalArgumentException("mapper [" + fieldType().names().fullName() + "] enabled is " + this.enabled() + " now encountering "+ ((AllFieldMapper)mergeWith).enabled()); } - super.merge(mergeWith, mergeResult); + super.doMerge(mergeWith, updateAllTypes); } @Override diff --git a/core/src/main/java/org/elasticsearch/index/mapper/internal/FieldNamesFieldMapper.java b/core/src/main/java/org/elasticsearch/index/mapper/internal/FieldNamesFieldMapper.java index 7883415e59..e03439f3f5 100644 --- a/core/src/main/java/org/elasticsearch/index/mapper/internal/FieldNamesFieldMapper.java +++ b/core/src/main/java/org/elasticsearch/index/mapper/internal/FieldNamesFieldMapper.java @@ -78,7 +78,7 @@ public class FieldNamesFieldMapper extends MetadataFieldMapper { private boolean enabled = Defaults.ENABLED; public Builder(MappedFieldType existing) { - super(Defaults.NAME, existing == null ? Defaults.FIELD_TYPE : existing); + super(Defaults.NAME, existing == null ? Defaults.FIELD_TYPE : existing, Defaults.FIELD_TYPE); indexName = Defaults.NAME; } diff --git a/core/src/main/java/org/elasticsearch/index/mapper/internal/IdFieldMapper.java b/core/src/main/java/org/elasticsearch/index/mapper/internal/IdFieldMapper.java index 16b6c4c56d..0fe3e10bcb 100644 --- a/core/src/main/java/org/elasticsearch/index/mapper/internal/IdFieldMapper.java +++ b/core/src/main/java/org/elasticsearch/index/mapper/internal/IdFieldMapper.java @@ -44,7 +44,6 @@ import org.elasticsearch.index.fielddata.FieldDataType; import org.elasticsearch.index.mapper.MappedFieldType; import org.elasticsearch.index.mapper.Mapper; import org.elasticsearch.index.mapper.MapperParsingException; -import org.elasticsearch.index.mapper.MergeResult; import org.elasticsearch.index.mapper.MetadataFieldMapper; import org.elasticsearch.index.mapper.ParseContext; import org.elasticsearch.index.mapper.Uid; @@ -90,7 +89,7 @@ public class IdFieldMapper extends MetadataFieldMapper { private String path = Defaults.PATH; public Builder(MappedFieldType existing) { - super(Defaults.NAME, existing == null ? Defaults.FIELD_TYPE : existing); + super(Defaults.NAME, existing == null ? Defaults.FIELD_TYPE : existing, Defaults.FIELD_TYPE); indexName = Defaults.NAME; } @@ -331,7 +330,7 @@ public class IdFieldMapper extends MetadataFieldMapper { } @Override - public void merge(Mapper mergeWith, MergeResult mergeResult) { + protected void doMerge(Mapper mergeWith, boolean updateAllTypes) { // do nothing here, no merging, but also no exception } } diff --git a/core/src/main/java/org/elasticsearch/index/mapper/internal/IndexFieldMapper.java b/core/src/main/java/org/elasticsearch/index/mapper/internal/IndexFieldMapper.java index 962332b5c4..dbbf03b72e 100644 --- a/core/src/main/java/org/elasticsearch/index/mapper/internal/IndexFieldMapper.java +++ b/core/src/main/java/org/elasticsearch/index/mapper/internal/IndexFieldMapper.java @@ -34,7 +34,6 @@ import org.elasticsearch.common.xcontent.XContentBuilder; import org.elasticsearch.index.mapper.MappedFieldType; import org.elasticsearch.index.mapper.Mapper; import org.elasticsearch.index.mapper.MapperParsingException; -import org.elasticsearch.index.mapper.MergeResult; import org.elasticsearch.index.mapper.MetadataFieldMapper; import org.elasticsearch.index.mapper.ParseContext; import org.elasticsearch.index.query.QueryShardContext; @@ -80,7 +79,7 @@ public class IndexFieldMapper extends MetadataFieldMapper { private EnabledAttributeMapper enabledState = EnabledAttributeMapper.UNSET_DISABLED; public Builder(MappedFieldType existing) { - super(Defaults.NAME, existing == null ? Defaults.FIELD_TYPE : existing); + super(Defaults.NAME, existing == null ? Defaults.FIELD_TYPE : existing, Defaults.FIELD_TYPE); indexName = Defaults.NAME; } @@ -279,12 +278,10 @@ public class IndexFieldMapper extends MetadataFieldMapper { } @Override - public void merge(Mapper mergeWith, MergeResult mergeResult) { + protected void doMerge(Mapper mergeWith, boolean updateAllTypes) { IndexFieldMapper indexFieldMapperMergeWith = (IndexFieldMapper) mergeWith; - if (!mergeResult.simulate()) { - if (indexFieldMapperMergeWith.enabledState != enabledState && !indexFieldMapperMergeWith.enabledState.unset()) { - this.enabledState = indexFieldMapperMergeWith.enabledState; - } + if (indexFieldMapperMergeWith.enabledState != enabledState && !indexFieldMapperMergeWith.enabledState.unset()) { + this.enabledState = indexFieldMapperMergeWith.enabledState; } } diff --git a/core/src/main/java/org/elasticsearch/index/mapper/internal/ParentFieldMapper.java b/core/src/main/java/org/elasticsearch/index/mapper/internal/ParentFieldMapper.java index 760259a180..65daef2a83 100644 --- a/core/src/main/java/org/elasticsearch/index/mapper/internal/ParentFieldMapper.java +++ b/core/src/main/java/org/elasticsearch/index/mapper/internal/ParentFieldMapper.java @@ -38,7 +38,6 @@ import org.elasticsearch.index.mapper.DocumentMapper; import org.elasticsearch.index.mapper.MappedFieldType; import org.elasticsearch.index.mapper.Mapper; import org.elasticsearch.index.mapper.MapperParsingException; -import org.elasticsearch.index.mapper.MergeResult; import org.elasticsearch.index.mapper.MetadataFieldMapper; import org.elasticsearch.index.mapper.ParseContext; import org.elasticsearch.index.mapper.Uid; @@ -98,7 +97,7 @@ public class ParentFieldMapper extends MetadataFieldMapper { private final MappedFieldType childJoinFieldType = Defaults.JOIN_FIELD_TYPE.clone(); public Builder(String documentType) { - super(Defaults.NAME, Defaults.FIELD_TYPE); + super(Defaults.NAME, Defaults.FIELD_TYPE, Defaults.FIELD_TYPE); this.indexName = name; this.documentType = documentType; builder = this; @@ -371,11 +370,11 @@ public class ParentFieldMapper extends MetadataFieldMapper { } @Override - public void merge(Mapper mergeWith, MergeResult mergeResult) { - super.merge(mergeWith, mergeResult); + protected void doMerge(Mapper mergeWith, boolean updateAllTypes) { + super.doMerge(mergeWith, updateAllTypes); ParentFieldMapper fieldMergeWith = (ParentFieldMapper) mergeWith; if (Objects.equals(parentType, fieldMergeWith.parentType) == false) { - mergeResult.addConflict("The _parent field's type option can't be changed: [" + parentType + "]->[" + fieldMergeWith.parentType + "]"); + throw new IllegalArgumentException("The _parent field's type option can't be changed: [" + parentType + "]->[" + fieldMergeWith.parentType + "]"); } List<String> conflicts = new ArrayList<>(); @@ -383,13 +382,13 @@ public class ParentFieldMapper extends MetadataFieldMapper { parentJoinFieldType.checkCompatibility(fieldMergeWith.parentJoinFieldType, conflicts, true); // same here if (childJoinFieldType != null) { // TODO: this can be set to false when the old parent/child impl is removed, we can do eager global ordinals loading per type. - childJoinFieldType.checkCompatibility(fieldMergeWith.childJoinFieldType, conflicts, mergeResult.updateAllTypes() == false); + childJoinFieldType.checkCompatibility(fieldMergeWith.childJoinFieldType, conflicts, updateAllTypes == false); } - for (String conflict : conflicts) { - mergeResult.addConflict(conflict); + if (conflicts.isEmpty() == false) { + throw new IllegalArgumentException("Merge conflicts: " + conflicts); } - if (active() && mergeResult.simulate() == false && mergeResult.hasConflicts() == false) { + if (active()) { childJoinFieldType = fieldMergeWith.childJoinFieldType.clone(); } } diff --git a/core/src/main/java/org/elasticsearch/index/mapper/internal/RoutingFieldMapper.java b/core/src/main/java/org/elasticsearch/index/mapper/internal/RoutingFieldMapper.java index 18d0645d2d..40b7e6871c 100644 --- a/core/src/main/java/org/elasticsearch/index/mapper/internal/RoutingFieldMapper.java +++ b/core/src/main/java/org/elasticsearch/index/mapper/internal/RoutingFieldMapper.java @@ -31,7 +31,6 @@ import org.elasticsearch.index.fielddata.FieldDataType; import org.elasticsearch.index.mapper.MappedFieldType; import org.elasticsearch.index.mapper.Mapper; import org.elasticsearch.index.mapper.MapperParsingException; -import org.elasticsearch.index.mapper.MergeResult; import org.elasticsearch.index.mapper.MetadataFieldMapper; import org.elasticsearch.index.mapper.ParseContext; @@ -78,7 +77,7 @@ public class RoutingFieldMapper extends MetadataFieldMapper { private String path = Defaults.PATH; public Builder(MappedFieldType existing) { - super(Defaults.NAME, existing == null ? Defaults.FIELD_TYPE : existing); + super(Defaults.NAME, existing == null ? Defaults.FIELD_TYPE : existing, Defaults.FIELD_TYPE); } public Builder required(boolean required) { @@ -249,7 +248,7 @@ public class RoutingFieldMapper extends MetadataFieldMapper { } @Override - public void merge(Mapper mergeWith, MergeResult mergeResult) { + protected void doMerge(Mapper mergeWith, boolean updateAllTypes) { // do nothing here, no merging, but also no exception } } diff --git a/core/src/main/java/org/elasticsearch/index/mapper/internal/SourceFieldMapper.java b/core/src/main/java/org/elasticsearch/index/mapper/internal/SourceFieldMapper.java index da3b8dbc5a..40bf9eb0c8 100644 --- a/core/src/main/java/org/elasticsearch/index/mapper/internal/SourceFieldMapper.java +++ b/core/src/main/java/org/elasticsearch/index/mapper/internal/SourceFieldMapper.java @@ -29,13 +29,10 @@ import org.elasticsearch.common.Strings; import org.elasticsearch.common.bytes.BytesArray; import org.elasticsearch.common.bytes.BytesReference; import org.elasticsearch.common.collect.Tuple; -import org.elasticsearch.common.compress.Compressor; import org.elasticsearch.common.compress.CompressorFactory; import org.elasticsearch.common.io.stream.BytesStreamOutput; -import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.common.lucene.Lucene; import org.elasticsearch.common.settings.Settings; -import org.elasticsearch.common.unit.ByteSizeValue; import org.elasticsearch.common.xcontent.XContentBuilder; import org.elasticsearch.common.xcontent.XContentFactory; import org.elasticsearch.common.xcontent.XContentHelper; @@ -44,21 +41,17 @@ import org.elasticsearch.common.xcontent.support.XContentMapValues; import org.elasticsearch.index.mapper.MappedFieldType; import org.elasticsearch.index.mapper.Mapper; import org.elasticsearch.index.mapper.MapperParsingException; -import org.elasticsearch.index.mapper.MergeResult; import org.elasticsearch.index.mapper.MetadataFieldMapper; import org.elasticsearch.index.mapper.ParseContext; -import java.io.BufferedInputStream; import java.io.IOException; -import java.io.InputStream; +import java.util.ArrayList; import java.util.Arrays; import java.util.Iterator; import java.util.List; import java.util.Map; -import java.util.Objects; import static org.elasticsearch.common.xcontent.support.XContentMapValues.nodeBooleanValue; -import static org.elasticsearch.common.xcontent.support.XContentMapValues.nodeStringValue; /** * @@ -72,8 +65,6 @@ public class SourceFieldMapper extends MetadataFieldMapper { public static class Defaults { public static final String NAME = SourceFieldMapper.NAME; public static final boolean ENABLED = true; - public static final long COMPRESS_THRESHOLD = -1; - public static final String FORMAT = null; // default format is to use the one provided public static final MappedFieldType FIELD_TYPE = new SourceFieldType(); @@ -93,17 +84,11 @@ public class SourceFieldMapper extends MetadataFieldMapper { private boolean enabled = Defaults.ENABLED; - private long compressThreshold = Defaults.COMPRESS_THRESHOLD; - - private Boolean compress = null; - - private String format = Defaults.FORMAT; - private String[] includes = null; private String[] excludes = null; public Builder() { - super(Defaults.NAME, Defaults.FIELD_TYPE); + super(Defaults.NAME, Defaults.FIELD_TYPE, Defaults.FIELD_TYPE); } public Builder enabled(boolean enabled) { @@ -111,21 +96,6 @@ public class SourceFieldMapper extends MetadataFieldMapper { return this; } - public Builder compress(boolean compress) { - this.compress = compress; - return this; - } - - public Builder compressThreshold(long compressThreshold) { - this.compressThreshold = compressThreshold; - return this; - } - - public Builder format(String format) { - this.format = format; - return this; - } - public Builder includes(String[] includes) { this.includes = includes; return this; @@ -138,7 +108,7 @@ public class SourceFieldMapper extends MetadataFieldMapper { @Override public SourceFieldMapper build(BuilderContext context) { - return new SourceFieldMapper(enabled, format, compress, compressThreshold, includes, excludes, context.indexSettings()); + return new SourceFieldMapper(enabled, includes, excludes, context.indexSettings()); } } @@ -154,24 +124,8 @@ public class SourceFieldMapper extends MetadataFieldMapper { if (fieldName.equals("enabled")) { builder.enabled(nodeBooleanValue(fieldNode)); iterator.remove(); - } else if (fieldName.equals("compress") && parserContext.indexVersionCreated().before(Version.V_2_0_0_beta1)) { - if (fieldNode != null) { - builder.compress(nodeBooleanValue(fieldNode)); - } - iterator.remove(); - } else if (fieldName.equals("compress_threshold") && parserContext.indexVersionCreated().before(Version.V_2_0_0_beta1)) { - if (fieldNode != null) { - if (fieldNode instanceof Number) { - builder.compressThreshold(((Number) fieldNode).longValue()); - builder.compress(true); - } else { - builder.compressThreshold(ByteSizeValue.parseBytesSizeValue(fieldNode.toString(), "compress_threshold").bytes()); - builder.compress(true); - } - } - iterator.remove(); - } else if ("format".equals(fieldName)) { - builder.format(nodeStringValue(fieldNode, null)); + } else if ("format".equals(fieldName) && parserContext.indexVersionCreated().before(Version.V_3_0_0)) { + // ignore on old indices, reject on and after 3.0 iterator.remove(); } else if (fieldName.equals("includes")) { List<Object> values = (List<Object>) fieldNode; @@ -242,30 +196,18 @@ public class SourceFieldMapper extends MetadataFieldMapper { /** indicates whether the source will always exist and be complete, for use by features like the update API */ private final boolean complete; - private Boolean compress; - private long compressThreshold; - private final String[] includes; private final String[] excludes; - private String format; - - private XContentType formatContentType; - private SourceFieldMapper(Settings indexSettings) { - this(Defaults.ENABLED, Defaults.FORMAT, null, -1, null, null, indexSettings); + this(Defaults.ENABLED, null, null, indexSettings); } - private SourceFieldMapper(boolean enabled, String format, Boolean compress, long compressThreshold, - String[] includes, String[] excludes, Settings indexSettings) { + private SourceFieldMapper(boolean enabled, String[] includes, String[] excludes, Settings indexSettings) { super(NAME, Defaults.FIELD_TYPE.clone(), Defaults.FIELD_TYPE, indexSettings); // Only stored. this.enabled = enabled; - this.compress = compress; - this.compressThreshold = compressThreshold; this.includes = includes; this.excludes = excludes; - this.format = format; - this.formatContentType = format == null ? null : XContentType.fromRestContentType(format); this.complete = enabled && includes == null && excludes == null; } @@ -321,71 +263,11 @@ public class SourceFieldMapper extends MetadataFieldMapper { Tuple<XContentType, Map<String, Object>> mapTuple = XContentHelper.convertToMap(source, true); Map<String, Object> filteredSource = XContentMapValues.filter(mapTuple.v2(), includes, excludes); BytesStreamOutput bStream = new BytesStreamOutput(); - StreamOutput streamOutput = bStream; - if (compress != null && compress && (compressThreshold == -1 || source.length() > compressThreshold)) { - streamOutput = CompressorFactory.defaultCompressor().streamOutput(bStream); - } - XContentType contentType = formatContentType; - if (contentType == null) { - contentType = mapTuple.v1(); - } - XContentBuilder builder = XContentFactory.contentBuilder(contentType, streamOutput).map(filteredSource); + XContentType contentType = mapTuple.v1(); + XContentBuilder builder = XContentFactory.contentBuilder(contentType, bStream).map(filteredSource); builder.close(); source = bStream.bytes(); - } else if (compress != null && compress && !CompressorFactory.isCompressed(source)) { - if (compressThreshold == -1 || source.length() > compressThreshold) { - BytesStreamOutput bStream = new BytesStreamOutput(); - XContentType contentType = XContentFactory.xContentType(source); - if (formatContentType != null && formatContentType != contentType) { - XContentBuilder builder = XContentFactory.contentBuilder(formatContentType, CompressorFactory.defaultCompressor().streamOutput(bStream)); - builder.copyCurrentStructure(XContentFactory.xContent(contentType).createParser(source)); - builder.close(); - } else { - StreamOutput streamOutput = CompressorFactory.defaultCompressor().streamOutput(bStream); - source.writeTo(streamOutput); - streamOutput.close(); - } - source = bStream.bytes(); - // update the data in the context, so it can be compressed and stored compressed outside... - context.source(source); - } - } else if (formatContentType != null) { - // see if we need to convert the content type - Compressor compressor = CompressorFactory.compressor(source); - if (compressor != null) { - InputStream compressedStreamInput = compressor.streamInput(source.streamInput()); - if (compressedStreamInput.markSupported() == false) { - compressedStreamInput = new BufferedInputStream(compressedStreamInput); - } - XContentType contentType = XContentFactory.xContentType(compressedStreamInput); - if (contentType != formatContentType) { - // we need to reread and store back, compressed.... - BytesStreamOutput bStream = new BytesStreamOutput(); - StreamOutput streamOutput = CompressorFactory.defaultCompressor().streamOutput(bStream); - XContentBuilder builder = XContentFactory.contentBuilder(formatContentType, streamOutput); - builder.copyCurrentStructure(XContentFactory.xContent(contentType).createParser(compressedStreamInput)); - builder.close(); - source = bStream.bytes(); - // update the data in the context, so we store it in the translog in this format - context.source(source); - } else { - compressedStreamInput.close(); - } - } else { - XContentType contentType = XContentFactory.xContentType(source); - if (contentType != formatContentType) { - // we need to reread and store back - // we need to reread and store back, compressed.... - BytesStreamOutput bStream = new BytesStreamOutput(); - XContentBuilder builder = XContentFactory.contentBuilder(formatContentType, bStream); - builder.copyCurrentStructure(XContentFactory.xContent(contentType).createParser(source)); - builder.close(); - source = bStream.bytes(); - // update the data in the context, so we store it in the translog in this format - context.source(source); - } - } } if (!source.hasArray()) { source = source.toBytesArray(); @@ -403,26 +285,13 @@ public class SourceFieldMapper extends MetadataFieldMapper { boolean includeDefaults = params.paramAsBoolean("include_defaults", false); // all are defaults, no need to write it at all - if (!includeDefaults && enabled == Defaults.ENABLED && compress == null && compressThreshold == -1 && includes == null && excludes == null) { + if (!includeDefaults && enabled == Defaults.ENABLED && includes == null && excludes == null) { return builder; } builder.startObject(contentType()); if (includeDefaults || enabled != Defaults.ENABLED) { builder.field("enabled", enabled); } - if (includeDefaults || !Objects.equals(format, Defaults.FORMAT)) { - builder.field("format", format); - } - if (compress != null) { - builder.field("compress", compress); - } else if (includeDefaults) { - builder.field("compress", false); - } - if (compressThreshold != -1) { - builder.field("compress_threshold", new ByteSizeValue(compressThreshold).toString()); - } else if (includeDefaults) { - builder.field("compress_threshold", -1); - } if (includes != null) { builder.field("includes", includes); @@ -441,25 +310,20 @@ public class SourceFieldMapper extends MetadataFieldMapper { } @Override - public void merge(Mapper mergeWith, MergeResult mergeResult) { + protected void doMerge(Mapper mergeWith, boolean updateAllTypes) { SourceFieldMapper sourceMergeWith = (SourceFieldMapper) mergeWith; - if (mergeResult.simulate()) { - if (this.enabled != sourceMergeWith.enabled) { - mergeResult.addConflict("Cannot update enabled setting for [_source]"); - } - if (Arrays.equals(includes(), sourceMergeWith.includes()) == false) { - mergeResult.addConflict("Cannot update includes setting for [_source]"); - } - if (Arrays.equals(excludes(), sourceMergeWith.excludes()) == false) { - mergeResult.addConflict("Cannot update excludes setting for [_source]"); - } - } else { - if (sourceMergeWith.compress != null) { - this.compress = sourceMergeWith.compress; - } - if (sourceMergeWith.compressThreshold != -1) { - this.compressThreshold = sourceMergeWith.compressThreshold; - } + List<String> conflicts = new ArrayList<>(); + if (this.enabled != sourceMergeWith.enabled) { + conflicts.add("Cannot update enabled setting for [_source]"); + } + if (Arrays.equals(includes(), sourceMergeWith.includes()) == false) { + conflicts.add("Cannot update includes setting for [_source]"); + } + if (Arrays.equals(excludes(), sourceMergeWith.excludes()) == false) { + conflicts.add("Cannot update excludes setting for [_source]"); + } + if (conflicts.isEmpty() == false) { + throw new IllegalArgumentException("Can't merge because of conflicts: " + conflicts); } } } diff --git a/core/src/main/java/org/elasticsearch/index/mapper/internal/TTLFieldMapper.java b/core/src/main/java/org/elasticsearch/index/mapper/internal/TTLFieldMapper.java index 9a18befe62..f99ca18600 100644 --- a/core/src/main/java/org/elasticsearch/index/mapper/internal/TTLFieldMapper.java +++ b/core/src/main/java/org/elasticsearch/index/mapper/internal/TTLFieldMapper.java @@ -32,7 +32,6 @@ import org.elasticsearch.index.analysis.NumericLongAnalyzer; import org.elasticsearch.index.mapper.MappedFieldType; import org.elasticsearch.index.mapper.Mapper; import org.elasticsearch.index.mapper.MapperParsingException; -import org.elasticsearch.index.mapper.MergeResult; import org.elasticsearch.index.mapper.MetadataFieldMapper; import org.elasticsearch.index.mapper.ParseContext; import org.elasticsearch.index.mapper.SourceToParse; @@ -79,7 +78,7 @@ public class TTLFieldMapper extends MetadataFieldMapper { private long defaultTTL = Defaults.DEFAULT; public Builder() { - super(Defaults.NAME, Defaults.TTL_FIELD_TYPE); + super(Defaults.NAME, Defaults.TTL_FIELD_TYPE, Defaults.FIELD_TYPE); } public Builder enabled(EnabledAttributeMapper enabled) { @@ -258,21 +257,19 @@ public class TTLFieldMapper extends MetadataFieldMapper { } @Override - public void merge(Mapper mergeWith, MergeResult mergeResult) { + protected void doMerge(Mapper mergeWith, boolean updateAllTypes) { TTLFieldMapper ttlMergeWith = (TTLFieldMapper) mergeWith; - if (((TTLFieldMapper) mergeWith).enabledState != Defaults.ENABLED_STATE) {//only do something if actually something was set for the document mapper that we merge with - if (this.enabledState == EnabledAttributeMapper.ENABLED && ((TTLFieldMapper) mergeWith).enabledState == EnabledAttributeMapper.DISABLED) { - mergeResult.addConflict("_ttl cannot be disabled once it was enabled."); + if (ttlMergeWith.enabledState != Defaults.ENABLED_STATE) {//only do something if actually something was set for the document mapper that we merge with + if (this.enabledState == EnabledAttributeMapper.ENABLED && ttlMergeWith.enabledState == EnabledAttributeMapper.DISABLED) { + throw new IllegalArgumentException("_ttl cannot be disabled once it was enabled."); } else { - if (!mergeResult.simulate()) { - this.enabledState = ttlMergeWith.enabledState; - } + this.enabledState = ttlMergeWith.enabledState; } } if (ttlMergeWith.defaultTTL != -1) { // we never build the default when the field is disabled so we should also not set it // (it does not make a difference though as everything that is not build in toXContent will also not be set in the cluster) - if (!mergeResult.simulate() && (enabledState == EnabledAttributeMapper.ENABLED)) { + if (enabledState == EnabledAttributeMapper.ENABLED) { this.defaultTTL = ttlMergeWith.defaultTTL; } } diff --git a/core/src/main/java/org/elasticsearch/index/mapper/internal/TimestampFieldMapper.java b/core/src/main/java/org/elasticsearch/index/mapper/internal/TimestampFieldMapper.java index 468243d63c..b0606f1994 100644 --- a/core/src/main/java/org/elasticsearch/index/mapper/internal/TimestampFieldMapper.java +++ b/core/src/main/java/org/elasticsearch/index/mapper/internal/TimestampFieldMapper.java @@ -33,13 +33,13 @@ import org.elasticsearch.index.analysis.NumericDateAnalyzer; import org.elasticsearch.index.mapper.MappedFieldType; import org.elasticsearch.index.mapper.Mapper; import org.elasticsearch.index.mapper.MapperParsingException; -import org.elasticsearch.index.mapper.MergeResult; import org.elasticsearch.index.mapper.ParseContext; import org.elasticsearch.index.mapper.MetadataFieldMapper; import org.elasticsearch.index.mapper.core.DateFieldMapper; import org.elasticsearch.index.mapper.core.LongFieldMapper; import java.io.IOException; +import java.util.ArrayList; import java.util.Iterator; import java.util.List; import java.util.Map; @@ -96,7 +96,7 @@ public class TimestampFieldMapper extends MetadataFieldMapper { private Boolean ignoreMissing = null; public Builder(MappedFieldType existing) { - super(Defaults.NAME, existing == null ? Defaults.FIELD_TYPE : existing); + super(Defaults.NAME, existing == null ? Defaults.FIELD_TYPE : existing, Defaults.FIELD_TYPE); if (existing != null) { // if there is an existing type, always use that store value (only matters for < 2.0) explicitStore = true; @@ -379,31 +379,32 @@ public class TimestampFieldMapper extends MetadataFieldMapper { } @Override - public void merge(Mapper mergeWith, MergeResult mergeResult) { + protected void doMerge(Mapper mergeWith, boolean updateAllTypes) { TimestampFieldMapper timestampFieldMapperMergeWith = (TimestampFieldMapper) mergeWith; - super.merge(mergeWith, mergeResult); - if (!mergeResult.simulate()) { - if (timestampFieldMapperMergeWith.enabledState != enabledState && !timestampFieldMapperMergeWith.enabledState.unset()) { - this.enabledState = timestampFieldMapperMergeWith.enabledState; - } - } else { - if (timestampFieldMapperMergeWith.defaultTimestamp() == null && defaultTimestamp == null) { - return; - } - if (defaultTimestamp == null) { - mergeResult.addConflict("Cannot update default in _timestamp value. Value is null now encountering " + timestampFieldMapperMergeWith.defaultTimestamp()); - } else if (timestampFieldMapperMergeWith.defaultTimestamp() == null) { - mergeResult.addConflict("Cannot update default in _timestamp value. Value is \" + defaultTimestamp.toString() + \" now encountering null"); - } else if (!timestampFieldMapperMergeWith.defaultTimestamp().equals(defaultTimestamp)) { - mergeResult.addConflict("Cannot update default in _timestamp value. Value is " + defaultTimestamp.toString() + " now encountering " + timestampFieldMapperMergeWith.defaultTimestamp()); - } - if (this.path != null) { - if (path.equals(timestampFieldMapperMergeWith.path()) == false) { - mergeResult.addConflict("Cannot update path in _timestamp value. Value is " + path + " path in merged mapping is " + (timestampFieldMapperMergeWith.path() == null ? "missing" : timestampFieldMapperMergeWith.path())); - } - } else if (timestampFieldMapperMergeWith.path() != null) { - mergeResult.addConflict("Cannot update path in _timestamp value. Value is " + path + " path in merged mapping is missing"); + super.doMerge(mergeWith, updateAllTypes); + if (timestampFieldMapperMergeWith.enabledState != enabledState && !timestampFieldMapperMergeWith.enabledState.unset()) { + this.enabledState = timestampFieldMapperMergeWith.enabledState; + } + if (timestampFieldMapperMergeWith.defaultTimestamp() == null && defaultTimestamp == null) { + return; + } + List<String> conflicts = new ArrayList<>(); + if (defaultTimestamp == null) { + conflicts.add("Cannot update default in _timestamp value. Value is null now encountering " + timestampFieldMapperMergeWith.defaultTimestamp()); + } else if (timestampFieldMapperMergeWith.defaultTimestamp() == null) { + conflicts.add("Cannot update default in _timestamp value. Value is \" + defaultTimestamp.toString() + \" now encountering null"); + } else if (!timestampFieldMapperMergeWith.defaultTimestamp().equals(defaultTimestamp)) { + conflicts.add("Cannot update default in _timestamp value. Value is " + defaultTimestamp.toString() + " now encountering " + timestampFieldMapperMergeWith.defaultTimestamp()); + } + if (this.path != null) { + if (path.equals(timestampFieldMapperMergeWith.path()) == false) { + conflicts.add("Cannot update path in _timestamp value. Value is " + path + " path in merged mapping is " + (timestampFieldMapperMergeWith.path() == null ? "missing" : timestampFieldMapperMergeWith.path())); } + } else if (timestampFieldMapperMergeWith.path() != null) { + conflicts.add("Cannot update path in _timestamp value. Value is " + path + " path in merged mapping is missing"); + } + if (conflicts.isEmpty() == false) { + throw new IllegalArgumentException("Conflicts: " + conflicts); } } } diff --git a/core/src/main/java/org/elasticsearch/index/mapper/internal/TypeFieldMapper.java b/core/src/main/java/org/elasticsearch/index/mapper/internal/TypeFieldMapper.java index d4acc3c597..c529db5183 100644 --- a/core/src/main/java/org/elasticsearch/index/mapper/internal/TypeFieldMapper.java +++ b/core/src/main/java/org/elasticsearch/index/mapper/internal/TypeFieldMapper.java @@ -40,7 +40,6 @@ import org.elasticsearch.index.fielddata.FieldDataType; import org.elasticsearch.index.mapper.MappedFieldType; import org.elasticsearch.index.mapper.Mapper; import org.elasticsearch.index.mapper.MapperParsingException; -import org.elasticsearch.index.mapper.MergeResult; import org.elasticsearch.index.mapper.MetadataFieldMapper; import org.elasticsearch.index.mapper.ParseContext; import org.elasticsearch.index.mapper.Uid; @@ -81,7 +80,7 @@ public class TypeFieldMapper extends MetadataFieldMapper { public static class Builder extends MetadataFieldMapper.Builder<Builder, TypeFieldMapper> { public Builder(MappedFieldType existing) { - super(Defaults.NAME, existing == null ? Defaults.FIELD_TYPE : existing); + super(Defaults.NAME, existing == null ? Defaults.FIELD_TYPE : existing, Defaults.FIELD_TYPE); indexName = Defaults.NAME; } @@ -225,7 +224,7 @@ public class TypeFieldMapper extends MetadataFieldMapper { } @Override - public void merge(Mapper mergeWith, MergeResult mergeResult) { + protected void doMerge(Mapper mergeWith, boolean updateAllTypes) { // do nothing here, no merging, but also no exception } } diff --git a/core/src/main/java/org/elasticsearch/index/mapper/internal/UidFieldMapper.java b/core/src/main/java/org/elasticsearch/index/mapper/internal/UidFieldMapper.java index ef4c48e62e..10f9880d97 100644 --- a/core/src/main/java/org/elasticsearch/index/mapper/internal/UidFieldMapper.java +++ b/core/src/main/java/org/elasticsearch/index/mapper/internal/UidFieldMapper.java @@ -33,7 +33,6 @@ import org.elasticsearch.index.fielddata.FieldDataType; import org.elasticsearch.index.mapper.MappedFieldType; import org.elasticsearch.index.mapper.Mapper; import org.elasticsearch.index.mapper.MapperParsingException; -import org.elasticsearch.index.mapper.MergeResult; import org.elasticsearch.index.mapper.MetadataFieldMapper; import org.elasticsearch.index.mapper.ParseContext; import org.elasticsearch.index.mapper.ParseContext.Document; @@ -79,7 +78,7 @@ public class UidFieldMapper extends MetadataFieldMapper { public static class Builder extends MetadataFieldMapper.Builder<Builder, UidFieldMapper> { public Builder(MappedFieldType existing) { - super(Defaults.NAME, existing == null ? Defaults.FIELD_TYPE : existing); + super(Defaults.NAME, existing == null ? Defaults.FIELD_TYPE : existing, Defaults.FIELD_TYPE); indexName = Defaults.NAME; } @@ -225,7 +224,7 @@ public class UidFieldMapper extends MetadataFieldMapper { } @Override - public void merge(Mapper mergeWith, MergeResult mergeResult) { + protected void doMerge(Mapper mergeWith, boolean updateAllTypes) { // do nothing here, no merging, but also no exception } } diff --git a/core/src/main/java/org/elasticsearch/index/mapper/internal/VersionFieldMapper.java b/core/src/main/java/org/elasticsearch/index/mapper/internal/VersionFieldMapper.java index 292a622ab7..6b1471afda 100644 --- a/core/src/main/java/org/elasticsearch/index/mapper/internal/VersionFieldMapper.java +++ b/core/src/main/java/org/elasticsearch/index/mapper/internal/VersionFieldMapper.java @@ -30,7 +30,6 @@ import org.elasticsearch.index.fielddata.FieldDataType; import org.elasticsearch.index.mapper.MappedFieldType; import org.elasticsearch.index.mapper.Mapper; import org.elasticsearch.index.mapper.MapperParsingException; -import org.elasticsearch.index.mapper.MergeResult; import org.elasticsearch.index.mapper.MetadataFieldMapper; import org.elasticsearch.index.mapper.ParseContext; import org.elasticsearch.index.mapper.ParseContext.Document; @@ -62,7 +61,7 @@ public class VersionFieldMapper extends MetadataFieldMapper { public static class Builder extends MetadataFieldMapper.Builder<Builder, VersionFieldMapper> { public Builder() { - super(Defaults.NAME, Defaults.FIELD_TYPE); + super(Defaults.NAME, Defaults.FIELD_TYPE, Defaults.FIELD_TYPE); } @Override @@ -166,7 +165,7 @@ public class VersionFieldMapper extends MetadataFieldMapper { } @Override - public void merge(Mapper mergeWith, MergeResult mergeResult) { + protected void doMerge(Mapper mergeWith, boolean updateAllTypes) { // nothing to do } } diff --git a/core/src/main/java/org/elasticsearch/index/mapper/ip/IpFieldMapper.java b/core/src/main/java/org/elasticsearch/index/mapper/ip/IpFieldMapper.java index e57ceaf8ca..d8a7c752e6 100644 --- a/core/src/main/java/org/elasticsearch/index/mapper/ip/IpFieldMapper.java +++ b/core/src/main/java/org/elasticsearch/index/mapper/ip/IpFieldMapper.java @@ -122,8 +122,7 @@ public class IpFieldMapper extends NumberFieldMapper { setupFieldType(context); IpFieldMapper fieldMapper = new IpFieldMapper(name, fieldType, defaultFieldType, ignoreMalformed(context), coerce(context), context.indexSettings(), multiFieldsBuilder.build(this, context), copyTo); - fieldMapper.includeInAll(includeInAll); - return fieldMapper; + return (IpFieldMapper) fieldMapper.includeInAll(includeInAll); } @Override diff --git a/core/src/main/java/org/elasticsearch/index/mapper/object/DynamicTemplate.java b/core/src/main/java/org/elasticsearch/index/mapper/object/DynamicTemplate.java index c51264f3db..58602f06df 100644 --- a/core/src/main/java/org/elasticsearch/index/mapper/object/DynamicTemplate.java +++ b/core/src/main/java/org/elasticsearch/index/mapper/object/DynamicTemplate.java @@ -125,13 +125,13 @@ public class DynamicTemplate { } public boolean match(ContentPath path, String name, String dynamicType) { - if (pathMatch != null && !patternMatch(pathMatch, path.fullPathAsText(name))) { + if (pathMatch != null && !patternMatch(pathMatch, path.pathAsText(name))) { return false; } if (match != null && !patternMatch(match, name)) { return false; } - if (pathUnmatch != null && patternMatch(pathUnmatch, path.fullPathAsText(name))) { + if (pathUnmatch != null && patternMatch(pathUnmatch, path.pathAsText(name))) { return false; } if (unmatch != null && patternMatch(unmatch, name)) { diff --git a/core/src/main/java/org/elasticsearch/index/mapper/object/ObjectMapper.java b/core/src/main/java/org/elasticsearch/index/mapper/object/ObjectMapper.java index 88f8971905..c2d9783fc9 100644 --- a/core/src/main/java/org/elasticsearch/index/mapper/object/ObjectMapper.java +++ b/core/src/main/java/org/elasticsearch/index/mapper/object/ObjectMapper.java @@ -24,7 +24,6 @@ import org.apache.lucene.search.Query; import org.apache.lucene.search.TermQuery; import org.apache.lucene.util.BytesRef; import org.elasticsearch.ElasticsearchParseException; -import org.elasticsearch.Version; import org.elasticsearch.common.Nullable; import org.elasticsearch.common.Strings; import org.elasticsearch.common.collect.CopyOnWriteHashMap; @@ -40,7 +39,6 @@ import java.util.*; import static org.elasticsearch.common.xcontent.support.XContentMapValues.nodeBooleanValue; import static org.elasticsearch.index.mapper.MapperBuilders.object; -import static org.elasticsearch.index.mapper.core.TypeParsers.parsePathType; /** * @@ -54,7 +52,6 @@ public class ObjectMapper extends Mapper implements AllFieldMapper.IncludeInAll, public static final boolean ENABLED = true; public static final Nested NESTED = Nested.NO; public static final Dynamic DYNAMIC = null; // not set, inherited from root - public static final ContentPath.Type PATH_TYPE = ContentPath.Type.FULL; } public static enum Dynamic { @@ -104,8 +101,6 @@ public class ObjectMapper extends Mapper implements AllFieldMapper.IncludeInAll, protected Dynamic dynamic = Defaults.DYNAMIC; - protected ContentPath.Type pathType = Defaults.PATH_TYPE; - protected Boolean includeInAll; protected final List<Mapper.Builder> mappersBuilders = new ArrayList<>(); @@ -130,11 +125,6 @@ public class ObjectMapper extends Mapper implements AllFieldMapper.IncludeInAll, return builder; } - public T pathType(ContentPath.Type pathType) { - this.pathType = pathType; - return builder; - } - public T includeInAll(boolean includeInAll) { this.includeInAll = includeInAll; return builder; @@ -147,8 +137,6 @@ public class ObjectMapper extends Mapper implements AllFieldMapper.IncludeInAll, @Override public Y build(BuilderContext context) { - ContentPath.Type origPathType = context.path().pathType(); - context.path().pathType(pathType); context.path().add(name); Map<String, Mapper> mappers = new HashMap<>(); @@ -156,17 +144,16 @@ public class ObjectMapper extends Mapper implements AllFieldMapper.IncludeInAll, Mapper mapper = builder.build(context); mappers.put(mapper.simpleName(), mapper); } - context.path().pathType(origPathType); context.path().remove(); - ObjectMapper objectMapper = createMapper(name, context.path().fullPathAsText(name), enabled, nested, dynamic, pathType, mappers, context.indexSettings()); - objectMapper.includeInAllIfNotSet(includeInAll); + ObjectMapper objectMapper = createMapper(name, context.path().pathAsText(name), enabled, nested, dynamic, mappers, context.indexSettings()); + objectMapper = objectMapper.includeInAllIfNotSet(includeInAll); return (Y) objectMapper; } - protected ObjectMapper createMapper(String name, String fullPath, boolean enabled, Nested nested, Dynamic dynamic, ContentPath.Type pathType, Map<String, Mapper> mappers, @Nullable Settings settings) { - return new ObjectMapper(name, fullPath, enabled, nested, dynamic, pathType, mappers); + protected ObjectMapper createMapper(String name, String fullPath, boolean enabled, Nested nested, Dynamic dynamic, Map<String, Mapper> mappers, @Nullable Settings settings) { + return new ObjectMapper(name, fullPath, enabled, nested, dynamic, mappers); } } @@ -179,7 +166,7 @@ public class ObjectMapper extends Mapper implements AllFieldMapper.IncludeInAll, Map.Entry<String, Object> entry = iterator.next(); String fieldName = Strings.toUnderscoreCase(entry.getKey()); Object fieldNode = entry.getValue(); - if (parseObjectOrDocumentTypeProperties(fieldName, fieldNode, parserContext, builder) || parseObjectProperties(name, fieldName, fieldNode, parserContext, builder)) { + if (parseObjectOrDocumentTypeProperties(fieldName, fieldNode, parserContext, builder)) { iterator.remove(); } } @@ -214,14 +201,6 @@ public class ObjectMapper extends Mapper implements AllFieldMapper.IncludeInAll, return false; } - protected static boolean parseObjectProperties(String name, String fieldName, Object fieldNode, ParserContext parserContext, ObjectMapper.Builder builder) { - if (fieldName.equals("path") && parserContext.indexVersionCreated().before(Version.V_2_0_0_beta1)) { - builder.pathType(parsePathType(name, fieldNode.toString())); - return true; - } - return false; - } - protected static void parseNested(String name, Map<String, Object> node, ObjectMapper.Builder builder) { boolean nested = false; boolean nestedIncludeInParent = false; @@ -326,19 +305,16 @@ public class ObjectMapper extends Mapper implements AllFieldMapper.IncludeInAll, private volatile Dynamic dynamic; - private final ContentPath.Type pathType; - private Boolean includeInAll; private volatile CopyOnWriteHashMap<String, Mapper> mappers; - ObjectMapper(String name, String fullPath, boolean enabled, Nested nested, Dynamic dynamic, ContentPath.Type pathType, Map<String, Mapper> mappers) { + ObjectMapper(String name, String fullPath, boolean enabled, Nested nested, Dynamic dynamic, Map<String, Mapper> mappers) { super(name); this.fullPath = fullPath; this.enabled = enabled; this.nested = nested; this.dynamic = dynamic; - this.pathType = pathType; if (mappers == null) { this.mappers = new CopyOnWriteHashMap<>(); } else { @@ -380,50 +356,58 @@ public class ObjectMapper extends Mapper implements AllFieldMapper.IncludeInAll, return this.enabled; } - public ContentPath.Type pathType() { - return pathType; - } - public Mapper getMapper(String field) { return mappers.get(field); } @Override - public void includeInAll(Boolean includeInAll) { + public ObjectMapper includeInAll(Boolean includeInAll) { if (includeInAll == null) { - return; + return this; } - this.includeInAll = includeInAll; + + ObjectMapper clone = clone(); + clone.includeInAll = includeInAll; // when called from outside, apply this on all the inner mappers - for (Mapper mapper : mappers.values()) { + for (Mapper mapper : clone.mappers.values()) { if (mapper instanceof AllFieldMapper.IncludeInAll) { - ((AllFieldMapper.IncludeInAll) mapper).includeInAll(includeInAll); + clone.putMapper(((AllFieldMapper.IncludeInAll) mapper).includeInAll(includeInAll)); } } + return clone; } @Override - public void includeInAllIfNotSet(Boolean includeInAll) { - if (this.includeInAll == null) { - this.includeInAll = includeInAll; + public ObjectMapper includeInAllIfNotSet(Boolean includeInAll) { + if (includeInAll == null || this.includeInAll != null) { + return this; } + + ObjectMapper clone = clone(); + clone.includeInAll = includeInAll; // when called from outside, apply this on all the inner mappers - for (Mapper mapper : mappers.values()) { + for (Mapper mapper : clone.mappers.values()) { if (mapper instanceof AllFieldMapper.IncludeInAll) { - ((AllFieldMapper.IncludeInAll) mapper).includeInAllIfNotSet(includeInAll); + clone.putMapper(((AllFieldMapper.IncludeInAll) mapper).includeInAllIfNotSet(includeInAll)); } } + return clone; } @Override - public void unsetIncludeInAll() { - includeInAll = null; + public ObjectMapper unsetIncludeInAll() { + if (includeInAll == null) { + return this; + } + ObjectMapper clone = clone(); + clone.includeInAll = null; // when called from outside, apply this on all the inner mappers for (Mapper mapper : mappers.values()) { if (mapper instanceof AllFieldMapper.IncludeInAll) { - ((AllFieldMapper.IncludeInAll) mapper).unsetIncludeInAll(); + clone.putMapper(((AllFieldMapper.IncludeInAll) mapper).unsetIncludeInAll()); } } + return clone; } public Nested nested() { @@ -434,14 +418,9 @@ public class ObjectMapper extends Mapper implements AllFieldMapper.IncludeInAll, return this.nestedTypeFilter; } - /** - * Put a new mapper. - * NOTE: this method must be called under the current {@link DocumentMapper} - * lock if concurrent updates are expected. - */ - public void putMapper(Mapper mapper) { + protected void putMapper(Mapper mapper) { if (mapper instanceof AllFieldMapper.IncludeInAll) { - ((AllFieldMapper.IncludeInAll) mapper).includeInAllIfNotSet(includeInAll); + mapper = ((AllFieldMapper.IncludeInAll) mapper).includeInAllIfNotSet(includeInAll); } mappers = mappers.copyAndPut(mapper.simpleName(), mapper); } @@ -464,66 +443,45 @@ public class ObjectMapper extends Mapper implements AllFieldMapper.IncludeInAll, } @Override - public void merge(final Mapper mergeWith, final MergeResult mergeResult) { + public ObjectMapper merge(Mapper mergeWith, boolean updateAllTypes) { if (!(mergeWith instanceof ObjectMapper)) { - mergeResult.addConflict("Can't merge a non object mapping [" + mergeWith.name() + "] with an object mapping [" + name() + "]"); - return; + throw new IllegalArgumentException("Can't merge a non object mapping [" + mergeWith.name() + "] with an object mapping [" + name() + "]"); } ObjectMapper mergeWithObject = (ObjectMapper) mergeWith; + ObjectMapper merged = clone(); + merged.doMerge(mergeWithObject, updateAllTypes); + return merged; + } + protected void doMerge(final ObjectMapper mergeWith, boolean updateAllTypes) { if (nested().isNested()) { - if (!mergeWithObject.nested().isNested()) { - mergeResult.addConflict("object mapping [" + name() + "] can't be changed from nested to non-nested"); - return; + if (!mergeWith.nested().isNested()) { + throw new IllegalArgumentException("object mapping [" + name() + "] can't be changed from nested to non-nested"); } } else { - if (mergeWithObject.nested().isNested()) { - mergeResult.addConflict("object mapping [" + name() + "] can't be changed from non-nested to nested"); - return; + if (mergeWith.nested().isNested()) { + throw new IllegalArgumentException("object mapping [" + name() + "] can't be changed from non-nested to nested"); } } - if (!mergeResult.simulate()) { - if (mergeWithObject.dynamic != null) { - this.dynamic = mergeWithObject.dynamic; - } + if (mergeWith.dynamic != null) { + this.dynamic = mergeWith.dynamic; } - doMerge(mergeWithObject, mergeResult); - - List<Mapper> mappersToPut = new ArrayList<>(); - List<ObjectMapper> newObjectMappers = new ArrayList<>(); - List<FieldMapper> newFieldMappers = new ArrayList<>(); - for (Mapper mapper : mergeWithObject) { - Mapper mergeWithMapper = mapper; + for (Mapper mergeWithMapper : mergeWith) { Mapper mergeIntoMapper = mappers.get(mergeWithMapper.simpleName()); + Mapper merged; if (mergeIntoMapper == null) { - // no mapping, simply add it if not simulating - if (!mergeResult.simulate()) { - mappersToPut.add(mergeWithMapper); - MapperUtils.collect(mergeWithMapper, newObjectMappers, newFieldMappers); - } - } else if (mergeIntoMapper instanceof MetadataFieldMapper == false) { + // no mapping, simply add it + merged = mergeWithMapper; + } else { // root mappers can only exist here for backcompat, and are merged in Mapping - mergeIntoMapper.merge(mergeWithMapper, mergeResult); + merged = mergeIntoMapper.merge(mergeWithMapper, updateAllTypes); } - } - if (!newFieldMappers.isEmpty()) { - mergeResult.addFieldMappers(newFieldMappers); - } - if (!newObjectMappers.isEmpty()) { - mergeResult.addObjectMappers(newObjectMappers); - } - // add the mappers only after the administration have been done, so it will not be visible to parser (which first try to read with no lock) - for (Mapper mapper : mappersToPut) { - putMapper(mapper); + putMapper(merged); } } - protected void doMerge(ObjectMapper mergeWith, MergeResult mergeResult) { - - } - @Override public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { toXContent(builder, params, null); @@ -549,9 +507,6 @@ public class ObjectMapper extends Mapper implements AllFieldMapper.IncludeInAll, if (enabled != Defaults.ENABLED) { builder.field("enabled", enabled); } - if (pathType != Defaults.PATH_TYPE) { - builder.field("path", pathType.name().toLowerCase(Locale.ROOT)); - } if (includeInAll != null) { builder.field("include_in_all", includeInAll); } diff --git a/core/src/main/java/org/elasticsearch/index/mapper/object/RootObjectMapper.java b/core/src/main/java/org/elasticsearch/index/mapper/object/RootObjectMapper.java index a0c989abd7..2fd4e91471 100644 --- a/core/src/main/java/org/elasticsearch/index/mapper/object/RootObjectMapper.java +++ b/core/src/main/java/org/elasticsearch/index/mapper/object/RootObjectMapper.java @@ -95,7 +95,7 @@ public class RootObjectMapper extends ObjectMapper { @Override - protected ObjectMapper createMapper(String name, String fullPath, boolean enabled, Nested nested, Dynamic dynamic, ContentPath.Type pathType, Map<String, Mapper> mappers, @Nullable Settings settings) { + protected ObjectMapper createMapper(String name, String fullPath, boolean enabled, Nested nested, Dynamic dynamic, Map<String, Mapper> mappers, @Nullable Settings settings) { assert !nested.isNested(); FormatDateTimeFormatter[] dates = null; if (dynamicDateTimeFormatters == null) { @@ -106,7 +106,7 @@ public class RootObjectMapper extends ObjectMapper { } else { dates = dynamicDateTimeFormatters.toArray(new FormatDateTimeFormatter[dynamicDateTimeFormatters.size()]); } - return new RootObjectMapper(name, enabled, dynamic, pathType, mappers, + return new RootObjectMapper(name, enabled, dynamic, mappers, dates, dynamicTemplates.toArray(new DynamicTemplate[dynamicTemplates.size()]), dateDetection, numericDetection); @@ -196,15 +196,23 @@ public class RootObjectMapper extends ObjectMapper { private volatile DynamicTemplate dynamicTemplates[]; - RootObjectMapper(String name, boolean enabled, Dynamic dynamic, ContentPath.Type pathType, Map<String, Mapper> mappers, + RootObjectMapper(String name, boolean enabled, Dynamic dynamic, Map<String, Mapper> mappers, FormatDateTimeFormatter[] dynamicDateTimeFormatters, DynamicTemplate dynamicTemplates[], boolean dateDetection, boolean numericDetection) { - super(name, name, enabled, Nested.NO, dynamic, pathType, mappers); + super(name, name, enabled, Nested.NO, dynamic, mappers); this.dynamicTemplates = dynamicTemplates; this.dynamicDateTimeFormatters = dynamicDateTimeFormatters; this.dateDetection = dateDetection; this.numericDetection = numericDetection; } + /** Return a copy of this mapper that has the given {@code mapper} as a + * sub mapper. */ + public RootObjectMapper copyAndPutMapper(Mapper mapper) { + RootObjectMapper clone = (RootObjectMapper) clone(); + clone.putMapper(mapper); + return clone; + } + @Override public ObjectMapper mappingUpdate(Mapper mapper) { RootObjectMapper update = (RootObjectMapper) super.mappingUpdate(mapper); @@ -253,25 +261,29 @@ public class RootObjectMapper extends ObjectMapper { } @Override - protected void doMerge(ObjectMapper mergeWith, MergeResult mergeResult) { + public RootObjectMapper merge(Mapper mergeWith, boolean updateAllTypes) { + return (RootObjectMapper) super.merge(mergeWith, updateAllTypes); + } + + @Override + protected void doMerge(ObjectMapper mergeWith, boolean updateAllTypes) { + super.doMerge(mergeWith, updateAllTypes); RootObjectMapper mergeWithObject = (RootObjectMapper) mergeWith; - if (!mergeResult.simulate()) { - // merge them - List<DynamicTemplate> mergedTemplates = new ArrayList<>(Arrays.asList(this.dynamicTemplates)); - for (DynamicTemplate template : mergeWithObject.dynamicTemplates) { - boolean replaced = false; - for (int i = 0; i < mergedTemplates.size(); i++) { - if (mergedTemplates.get(i).name().equals(template.name())) { - mergedTemplates.set(i, template); - replaced = true; - } - } - if (!replaced) { - mergedTemplates.add(template); + // merge them + List<DynamicTemplate> mergedTemplates = new ArrayList<>(Arrays.asList(this.dynamicTemplates)); + for (DynamicTemplate template : mergeWithObject.dynamicTemplates) { + boolean replaced = false; + for (int i = 0; i < mergedTemplates.size(); i++) { + if (mergedTemplates.get(i).name().equals(template.name())) { + mergedTemplates.set(i, template); + replaced = true; } } - this.dynamicTemplates = mergedTemplates.toArray(new DynamicTemplate[mergedTemplates.size()]); + if (!replaced) { + mergedTemplates.add(template); + } } + this.dynamicTemplates = mergedTemplates.toArray(new DynamicTemplate[mergedTemplates.size()]); } @Override diff --git a/core/src/main/java/org/elasticsearch/index/query/GeoShapeQueryBuilder.java b/core/src/main/java/org/elasticsearch/index/query/GeoShapeQueryBuilder.java index 5aad36cd27..454465727b 100644 --- a/core/src/main/java/org/elasticsearch/index/query/GeoShapeQueryBuilder.java +++ b/core/src/main/java/org/elasticsearch/index/query/GeoShapeQueryBuilder.java @@ -31,19 +31,16 @@ import org.elasticsearch.action.get.GetRequest; import org.elasticsearch.action.get.GetResponse; import org.elasticsearch.client.Client; import org.elasticsearch.common.Strings; -import org.elasticsearch.common.bytes.BytesArray; -import org.elasticsearch.common.bytes.BytesReference; import org.elasticsearch.common.geo.ShapeRelation; import org.elasticsearch.common.geo.ShapesAvailability; import org.elasticsearch.common.geo.SpatialStrategy; +import org.elasticsearch.common.geo.builders.PointBuilder; import org.elasticsearch.common.geo.builders.ShapeBuilder; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.common.xcontent.XContentBuilder; -import org.elasticsearch.common.xcontent.XContentFactory; import org.elasticsearch.common.xcontent.XContentHelper; import org.elasticsearch.common.xcontent.XContentParser; -import org.elasticsearch.common.xcontent.XContentType; import org.elasticsearch.index.mapper.MappedFieldType; import org.elasticsearch.index.mapper.geo.GeoShapeFieldMapper; import org.elasticsearch.search.internal.SearchContext; @@ -61,13 +58,11 @@ public class GeoShapeQueryBuilder extends AbstractQueryBuilder<GeoShapeQueryBuil public static final String DEFAULT_SHAPE_FIELD_NAME = "shape"; public static final ShapeRelation DEFAULT_SHAPE_RELATION = ShapeRelation.INTERSECTS; - static final GeoShapeQueryBuilder PROTOTYPE = new GeoShapeQueryBuilder("field", new BytesArray(new byte[1])); + static final GeoShapeQueryBuilder PROTOTYPE = new GeoShapeQueryBuilder("field", new PointBuilder()); private final String fieldName; - // TODO make the ShapeBuilder and subclasses Writable and implement hashCode - // and Equals so ShapeBuilder can be used here - private BytesReference shapeBytes; + private ShapeBuilder shape; private SpatialStrategy strategy; @@ -88,7 +83,7 @@ public class GeoShapeQueryBuilder extends AbstractQueryBuilder<GeoShapeQueryBuil * @param shape * Shape used in the Query */ - public GeoShapeQueryBuilder(String fieldName, ShapeBuilder shape) throws IOException { + public GeoShapeQueryBuilder(String fieldName, ShapeBuilder shape) { this(fieldName, shape, null, null); } @@ -105,37 +100,21 @@ public class GeoShapeQueryBuilder extends AbstractQueryBuilder<GeoShapeQueryBuil * Index type of the indexed Shapes */ public GeoShapeQueryBuilder(String fieldName, String indexedShapeId, String indexedShapeType) { - this(fieldName, (BytesReference) null, indexedShapeId, indexedShapeType); + this(fieldName, (ShapeBuilder) null, indexedShapeId, indexedShapeType); } - GeoShapeQueryBuilder(String fieldName, BytesReference shapeBytes) { - this(fieldName, shapeBytes, null, null); - } - - private GeoShapeQueryBuilder(String fieldName, ShapeBuilder shape, String indexedShapeId, String indexedShapeType) throws IOException { - this(fieldName, new BytesArray(new byte[1]), indexedShapeId, indexedShapeType); - if (shape != null) { - this.shapeBytes = shape.buildAsBytes(XContentType.JSON); - if (this.shapeBytes.length() == 0) { - throw new IllegalArgumentException("shape must not be empty"); - } - } else { - throw new IllegalArgumentException("shape must not be null"); - } - } - - private GeoShapeQueryBuilder(String fieldName, BytesReference shapeBytes, String indexedShapeId, String indexedShapeType) { + private GeoShapeQueryBuilder(String fieldName, ShapeBuilder shape, String indexedShapeId, String indexedShapeType) { if (fieldName == null) { throw new IllegalArgumentException("fieldName is required"); } - if ((shapeBytes == null || shapeBytes.length() == 0) && indexedShapeId == null) { + if (shape == null && indexedShapeId == null) { throw new IllegalArgumentException("either shapeBytes or indexedShapeId and indexedShapeType are required"); } if (indexedShapeId != null && indexedShapeType == null) { throw new IllegalArgumentException("indexedShapeType is required if indexedShapeId is specified"); } this.fieldName = fieldName; - this.shapeBytes = shapeBytes; + this.shape = shape; this.indexedShapeId = indexedShapeId; this.indexedShapeType = indexedShapeType; } @@ -148,10 +127,10 @@ public class GeoShapeQueryBuilder extends AbstractQueryBuilder<GeoShapeQueryBuil } /** - * @return the JSON bytes for the shape used in the Query + * @return the shape used in the Query */ - public BytesReference shapeBytes() { - return shapeBytes; + public ShapeBuilder shape() { + return shape; } /** @@ -258,15 +237,11 @@ public class GeoShapeQueryBuilder extends AbstractQueryBuilder<GeoShapeQueryBuil @Override protected Query doToQuery(QueryShardContext context) throws IOException { - ShapeBuilder shape; - if (shapeBytes == null) { + ShapeBuilder shapeToQuery = shape; + if (shapeToQuery == null) { GetRequest getRequest = new GetRequest(indexedShapeIndex, indexedShapeType, indexedShapeId); getRequest.copyContextAndHeadersFrom(SearchContext.current()); - shape = fetch(context.getClient(), getRequest, indexedShapePath); - } else { - XContentParser shapeParser = XContentHelper.createParser(shapeBytes); - shapeParser.nextToken(); - shape = ShapeBuilder.parse(shapeParser); + shapeToQuery = fetch(context.getClient(), getRequest, indexedShapePath); } MappedFieldType fieldType = context.fieldMapper(fieldName); if (fieldType == null) { @@ -291,12 +266,12 @@ public class GeoShapeQueryBuilder extends AbstractQueryBuilder<GeoShapeQueryBuil // in this case, execute disjoint as exists && !intersects BooleanQuery.Builder bool = new BooleanQuery.Builder(); Query exists = ExistsQueryBuilder.newFilter(context, fieldName); - Query intersects = strategy.makeQuery(getArgs(shape, ShapeRelation.INTERSECTS)); + Query intersects = strategy.makeQuery(getArgs(shapeToQuery, ShapeRelation.INTERSECTS)); bool.add(exists, BooleanClause.Occur.MUST); bool.add(intersects, BooleanClause.Occur.MUST_NOT); query = new ConstantScoreQuery(bool.build()); } else { - query = new ConstantScoreQuery(strategy.makeQuery(getArgs(shape, relation))); + query = new ConstantScoreQuery(strategy.makeQuery(getArgs(shapeToQuery, relation))); } return query; } @@ -378,11 +353,9 @@ public class GeoShapeQueryBuilder extends AbstractQueryBuilder<GeoShapeQueryBuil builder.field(GeoShapeQueryParser.STRATEGY_FIELD.getPreferredName(), strategy.getStrategyName()); } - if (shapeBytes != null) { + if (shape != null) { builder.field(GeoShapeQueryParser.SHAPE_FIELD.getPreferredName()); - XContentParser parser = XContentFactory.xContent(XContentType.JSON).createParser(shapeBytes); - parser.nextToken(); - builder.copyCurrentStructure(parser); + shape.toXContent(builder, params); } else { builder.startObject(GeoShapeQueryParser.INDEXED_SHAPE_FIELD.getPreferredName()) .field(GeoShapeQueryParser.SHAPE_ID_FIELD.getPreferredName(), indexedShapeId) @@ -412,8 +385,7 @@ public class GeoShapeQueryBuilder extends AbstractQueryBuilder<GeoShapeQueryBuil String fieldName = in.readString(); GeoShapeQueryBuilder builder; if (in.readBoolean()) { - BytesReference shapeBytes = in.readBytesReference(); - builder = new GeoShapeQueryBuilder(fieldName, shapeBytes); + builder = new GeoShapeQueryBuilder(fieldName, in.readShape()); } else { String indexedShapeId = in.readOptionalString(); String indexedShapeType = in.readOptionalString(); @@ -437,10 +409,10 @@ public class GeoShapeQueryBuilder extends AbstractQueryBuilder<GeoShapeQueryBuil @Override protected void doWriteTo(StreamOutput out) throws IOException { out.writeString(fieldName); - boolean hasShapeBytes = shapeBytes != null; - out.writeBoolean(hasShapeBytes); - if (hasShapeBytes) { - out.writeBytesReference(shapeBytes); + boolean hasShape = shape != null; + out.writeBoolean(hasShape); + if (hasShape) { + out.writeShape(shape); } else { out.writeOptionalString(indexedShapeId); out.writeOptionalString(indexedShapeType); @@ -464,14 +436,14 @@ public class GeoShapeQueryBuilder extends AbstractQueryBuilder<GeoShapeQueryBuil && Objects.equals(indexedShapePath, other.indexedShapePath) && Objects.equals(indexedShapeType, other.indexedShapeType) && Objects.equals(relation, other.relation) - && Objects.equals(shapeBytes, other.shapeBytes) + && Objects.equals(shape, other.shape) && Objects.equals(strategy, other.strategy); } @Override protected int doHashCode() { return Objects.hash(fieldName, indexedShapeId, indexedShapeIndex, - indexedShapePath, indexedShapeType, relation, shapeBytes, strategy); + indexedShapePath, indexedShapeType, relation, shape, strategy); } @Override diff --git a/core/src/main/java/org/elasticsearch/index/query/GeoShapeQueryParser.java b/core/src/main/java/org/elasticsearch/index/query/GeoShapeQueryParser.java index 12add77a88..c1d040f33b 100644 --- a/core/src/main/java/org/elasticsearch/index/query/GeoShapeQueryParser.java +++ b/core/src/main/java/org/elasticsearch/index/query/GeoShapeQueryParser.java @@ -22,11 +22,9 @@ package org.elasticsearch.index.query; import org.elasticsearch.common.ParseField; import org.elasticsearch.common.ParsingException; import org.elasticsearch.common.Strings; -import org.elasticsearch.common.bytes.BytesReference; import org.elasticsearch.common.geo.ShapeRelation; import org.elasticsearch.common.geo.SpatialStrategy; -import org.elasticsearch.common.xcontent.XContentBuilder; -import org.elasticsearch.common.xcontent.XContentFactory; +import org.elasticsearch.common.geo.builders.ShapeBuilder; import org.elasticsearch.common.xcontent.XContentParser; import java.io.IOException; @@ -54,7 +52,7 @@ public class GeoShapeQueryParser implements QueryParser<GeoShapeQueryBuilder> { String fieldName = null; ShapeRelation shapeRelation = null; SpatialStrategy strategy = null; - BytesReference shape = null; + ShapeBuilder shape = null; String id = null; String type = null; @@ -79,8 +77,7 @@ public class GeoShapeQueryParser implements QueryParser<GeoShapeQueryBuilder> { currentFieldName = parser.currentName(); token = parser.nextToken(); if (parseContext.parseFieldMatcher().match(currentFieldName, SHAPE_FIELD)) { - XContentBuilder builder = XContentFactory.jsonBuilder().copyCurrentStructure(parser); - shape = builder.bytes(); + shape = ShapeBuilder.parse(parser); } else if (parseContext.parseFieldMatcher().match(currentFieldName, STRATEGY_FIELD)) { String strategyName = parser.text(); strategy = SpatialStrategy.fromString(strategyName); diff --git a/core/src/main/java/org/elasticsearch/index/query/MissingQueryBuilder.java b/core/src/main/java/org/elasticsearch/index/query/MissingQueryBuilder.java deleted file mode 100644 index 70d0bb9350..0000000000 --- a/core/src/main/java/org/elasticsearch/index/query/MissingQueryBuilder.java +++ /dev/null @@ -1,234 +0,0 @@ -/* - * Licensed to Elasticsearch under one or more contributor - * license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright - * ownership. Elasticsearch licenses this file to you under - * the Apache License, Version 2.0 (the "License"); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -package org.elasticsearch.index.query; - -import org.apache.lucene.search.BooleanClause; -import org.apache.lucene.search.BooleanQuery; -import org.apache.lucene.search.ConstantScoreQuery; -import org.apache.lucene.search.Query; -import org.apache.lucene.search.TermRangeQuery; -import org.elasticsearch.common.Strings; -import org.elasticsearch.common.io.stream.StreamInput; -import org.elasticsearch.common.io.stream.StreamOutput; -import org.elasticsearch.common.lucene.search.Queries; -import org.elasticsearch.common.xcontent.XContentBuilder; -import org.elasticsearch.index.mapper.MappedFieldType; -import org.elasticsearch.index.mapper.internal.FieldNamesFieldMapper; -import org.elasticsearch.index.mapper.object.ObjectMapper; - -import java.io.IOException; -import java.util.Collection; -import java.util.Objects; - -/** - * Constructs a filter that have only null values or no value in the original field. - */ -public class MissingQueryBuilder extends AbstractQueryBuilder<MissingQueryBuilder> { - - public static final String NAME = "missing"; - - public static final boolean DEFAULT_NULL_VALUE = false; - - public static final boolean DEFAULT_EXISTENCE_VALUE = true; - - private final String fieldPattern; - - private final boolean nullValue; - - private final boolean existence; - - static final MissingQueryBuilder PROTOTYPE = new MissingQueryBuilder("field", DEFAULT_NULL_VALUE, DEFAULT_EXISTENCE_VALUE); - - /** - * Constructs a filter that returns documents with only null values or no value in the original field. - * @param fieldPattern the field to query - * @param nullValue should the missing filter automatically include fields with null value configured in the - * mappings. Defaults to <tt>false</tt>. - * @param existence should the missing filter include documents where the field doesn't exist in the docs. - * Defaults to <tt>true</tt>. - * @throws IllegalArgumentException when both <tt>existence</tt> and <tt>nullValue</tt> are set to false - */ - public MissingQueryBuilder(String fieldPattern, boolean nullValue, boolean existence) { - if (Strings.isEmpty(fieldPattern)) { - throw new IllegalArgumentException("missing query must be provided with a [field]"); - } - if (nullValue == false && existence == false) { - throw new IllegalArgumentException("missing query must have either 'existence', or 'null_value', or both set to true"); - } - this.fieldPattern = fieldPattern; - this.nullValue = nullValue; - this.existence = existence; - } - - public MissingQueryBuilder(String fieldPattern) { - this(fieldPattern, DEFAULT_NULL_VALUE, DEFAULT_EXISTENCE_VALUE); - } - - public String fieldPattern() { - return this.fieldPattern; - } - - /** - * Returns true if the missing filter will include documents where the field contains a null value, otherwise - * these documents will not be included. - */ - public boolean nullValue() { - return this.nullValue; - } - - /** - * Returns true if the missing filter will include documents where the field has no values, otherwise - * these documents will not be included. - */ - public boolean existence() { - return this.existence; - } - - @Override - protected void doXContent(XContentBuilder builder, Params params) throws IOException { - builder.startObject(NAME); - builder.field(MissingQueryParser.FIELD_FIELD.getPreferredName(), fieldPattern); - builder.field(MissingQueryParser.NULL_VALUE_FIELD.getPreferredName(), nullValue); - builder.field(MissingQueryParser.EXISTENCE_FIELD.getPreferredName(), existence); - printBoostAndQueryName(builder); - builder.endObject(); - } - - @Override - public String getWriteableName() { - return NAME; - } - - @Override - protected Query doToQuery(QueryShardContext context) throws IOException { - return newFilter(context, fieldPattern, existence, nullValue); - } - - public static Query newFilter(QueryShardContext context, String fieldPattern, boolean existence, boolean nullValue) { - if (!existence && !nullValue) { - throw new QueryShardException(context, "missing must have either existence, or null_value, or both set to true"); - } - - final FieldNamesFieldMapper.FieldNamesFieldType fieldNamesFieldType = (FieldNamesFieldMapper.FieldNamesFieldType) context.getMapperService().fullName(FieldNamesFieldMapper.NAME); - if (fieldNamesFieldType == null) { - // can only happen when no types exist, so no docs exist either - return Queries.newMatchNoDocsQuery(); - } - - ObjectMapper objectMapper = context.getObjectMapper(fieldPattern); - if (objectMapper != null) { - // automatic make the object mapper pattern - fieldPattern = fieldPattern + ".*"; - } - - Collection<String> fields = context.simpleMatchToIndexNames(fieldPattern); - if (fields.isEmpty()) { - if (existence) { - // if we ask for existence of fields, and we found none, then we should match on all - return Queries.newMatchAllQuery(); - } - return null; - } - - Query existenceFilter = null; - Query nullFilter = null; - - if (existence) { - BooleanQuery.Builder boolFilter = new BooleanQuery.Builder(); - for (String field : fields) { - MappedFieldType fieldType = context.fieldMapper(field); - Query filter = null; - if (fieldNamesFieldType.isEnabled()) { - final String f; - if (fieldType != null) { - f = fieldType.names().indexName(); - } else { - f = field; - } - filter = fieldNamesFieldType.termQuery(f, context); - } - // if _field_names are not indexed, we need to go the slow way - if (filter == null && fieldType != null) { - filter = fieldType.rangeQuery(null, null, true, true); - } - if (filter == null) { - filter = new TermRangeQuery(field, null, null, true, true); - } - boolFilter.add(filter, BooleanClause.Occur.SHOULD); - } - - existenceFilter = boolFilter.build(); - existenceFilter = Queries.not(existenceFilter);; - } - - if (nullValue) { - for (String field : fields) { - MappedFieldType fieldType = context.fieldMapper(field); - if (fieldType != null) { - nullFilter = fieldType.nullValueQuery(); - } - } - } - - Query filter; - if (nullFilter != null) { - if (existenceFilter != null) { - filter = new BooleanQuery.Builder() - .add(existenceFilter, BooleanClause.Occur.SHOULD) - .add(nullFilter, BooleanClause.Occur.SHOULD) - .build(); - } else { - filter = nullFilter; - } - } else { - filter = existenceFilter; - } - - if (filter == null) { - return null; - } - - return new ConstantScoreQuery(filter); - } - - @Override - protected MissingQueryBuilder doReadFrom(StreamInput in) throws IOException { - return new MissingQueryBuilder(in.readString(), in.readBoolean(), in.readBoolean()); - } - - @Override - protected void doWriteTo(StreamOutput out) throws IOException { - out.writeString(fieldPattern); - out.writeBoolean(nullValue); - out.writeBoolean(existence); - } - - @Override - protected int doHashCode() { - return Objects.hash(fieldPattern, nullValue, existence); - } - - @Override - protected boolean doEquals(MissingQueryBuilder other) { - return Objects.equals(fieldPattern, other.fieldPattern) && - Objects.equals(nullValue, other.nullValue) && - Objects.equals(existence, other.existence); - } -} diff --git a/core/src/main/java/org/elasticsearch/index/query/MissingQueryParser.java b/core/src/main/java/org/elasticsearch/index/query/MissingQueryParser.java deleted file mode 100644 index 467971b65c..0000000000 --- a/core/src/main/java/org/elasticsearch/index/query/MissingQueryParser.java +++ /dev/null @@ -1,88 +0,0 @@ -/* - * Licensed to Elasticsearch under one or more contributor - * license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright - * ownership. Elasticsearch licenses this file to you under - * the Apache License, Version 2.0 (the "License"); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -package org.elasticsearch.index.query; - -import org.elasticsearch.common.ParseField; -import org.elasticsearch.common.ParsingException; -import org.elasticsearch.common.xcontent.XContentParser; - -import java.io.IOException; - -/** - * Parser for missing query - */ -public class MissingQueryParser implements QueryParser<MissingQueryBuilder> { - - public static final ParseField FIELD_FIELD = new ParseField("field"); - public static final ParseField NULL_VALUE_FIELD = new ParseField("null_value"); - public static final ParseField EXISTENCE_FIELD = new ParseField("existence"); - - @Override - public String[] names() { - return new String[]{MissingQueryBuilder.NAME}; - } - - @Override - public MissingQueryBuilder fromXContent(QueryParseContext parseContext) throws IOException { - XContentParser parser = parseContext.parser(); - - String fieldPattern = null; - String queryName = null; - float boost = AbstractQueryBuilder.DEFAULT_BOOST; - boolean nullValue = MissingQueryBuilder.DEFAULT_NULL_VALUE; - boolean existence = MissingQueryBuilder.DEFAULT_EXISTENCE_VALUE; - - XContentParser.Token token; - String currentFieldName = null; - while ((token = parser.nextToken()) != XContentParser.Token.END_OBJECT) { - if (token == XContentParser.Token.FIELD_NAME) { - currentFieldName = parser.currentName(); - } else if (token.isValue()) { - if (parseContext.parseFieldMatcher().match(currentFieldName, FIELD_FIELD)) { - fieldPattern = parser.text(); - } else if (parseContext.parseFieldMatcher().match(currentFieldName, NULL_VALUE_FIELD)) { - nullValue = parser.booleanValue(); - } else if (parseContext.parseFieldMatcher().match(currentFieldName, EXISTENCE_FIELD)) { - existence = parser.booleanValue(); - } else if (parseContext.parseFieldMatcher().match(currentFieldName, AbstractQueryBuilder.NAME_FIELD)) { - queryName = parser.text(); - } else if (parseContext.parseFieldMatcher().match(currentFieldName, AbstractQueryBuilder.BOOST_FIELD)) { - boost = parser.floatValue(); - } else { - throw new ParsingException(parser.getTokenLocation(), "[" + MissingQueryBuilder.NAME + "] query does not support [" + currentFieldName + "]"); - } - } else { - throw new ParsingException(parser.getTokenLocation(), "[" + MissingQueryBuilder.NAME + "] unknown token [" + token + "] after [" + currentFieldName + "]"); - } - } - - if (fieldPattern == null) { - throw new ParsingException(parser.getTokenLocation(), "missing must be provided with a [field]"); - } - return new MissingQueryBuilder(fieldPattern, nullValue, existence) - .boost(boost) - .queryName(queryName); - } - - @Override - public MissingQueryBuilder getBuilderPrototype() { - return MissingQueryBuilder.PROTOTYPE; - } -} diff --git a/core/src/main/java/org/elasticsearch/index/query/QueryBuilders.java b/core/src/main/java/org/elasticsearch/index/query/QueryBuilders.java index 45f97d68c3..3fb0967920 100644 --- a/core/src/main/java/org/elasticsearch/index/query/QueryBuilders.java +++ b/core/src/main/java/org/elasticsearch/index/query/QueryBuilders.java @@ -810,27 +810,6 @@ public abstract class QueryBuilders { return new ExistsQueryBuilder(name); } - /** - * A filter to filter only documents where a field does not exists in them. - * @param name the field to query - */ - public static MissingQueryBuilder missingQuery(String name) { - return missingQuery(name, MissingQueryBuilder.DEFAULT_NULL_VALUE, MissingQueryBuilder.DEFAULT_EXISTENCE_VALUE); - } - - /** - * A filter to filter only documents where a field does not exists in them. - * @param name the field to query - * @param nullValue should the missing filter automatically include fields with null value configured in the - * mappings. Defaults to <tt>false</tt>. - * @param existence should the missing filter include documents where the field doesn't exist in the docs. - * Defaults to <tt>true</tt>. - * @throws IllegalArgumentException when both <tt>existence</tt> and <tt>nullValue</tt> are set to false - */ - public static MissingQueryBuilder missingQuery(String name, boolean nullValue, boolean existence) { - return new MissingQueryBuilder(name, nullValue, existence); - } - private QueryBuilders() { } diff --git a/core/src/main/java/org/elasticsearch/index/query/QueryShardContext.java b/core/src/main/java/org/elasticsearch/index/query/QueryShardContext.java index 65dfb559e3..faf482ead9 100644 --- a/core/src/main/java/org/elasticsearch/index/query/QueryShardContext.java +++ b/core/src/main/java/org/elasticsearch/index/query/QueryShardContext.java @@ -63,6 +63,7 @@ import org.elasticsearch.search.lookup.SearchLookup; import java.io.IOException; import java.util.Arrays; import java.util.Collection; +import java.util.Collections; import java.util.HashMap; import java.util.Map; @@ -364,7 +365,7 @@ public class QueryShardContext { * Executes the given template, and returns the response. */ public BytesReference executeQueryTemplate(Template template, SearchContext searchContext) { - ExecutableScript executable = getScriptService().executable(template, ScriptContext.Standard.SEARCH, searchContext); + ExecutableScript executable = getScriptService().executable(template, ScriptContext.Standard.SEARCH, searchContext, Collections.emptyMap()); return (BytesReference) executable.run(); } diff --git a/core/src/main/java/org/elasticsearch/index/query/ScriptQueryBuilder.java b/core/src/main/java/org/elasticsearch/index/query/ScriptQueryBuilder.java index f69ac8c054..6f14f15d3f 100644 --- a/core/src/main/java/org/elasticsearch/index/query/ScriptQueryBuilder.java +++ b/core/src/main/java/org/elasticsearch/index/query/ScriptQueryBuilder.java @@ -33,6 +33,7 @@ import org.elasticsearch.script.Script.ScriptField; import org.elasticsearch.search.lookup.SearchLookup; import java.io.IOException; +import java.util.Collections; import java.util.Objects; public class ScriptQueryBuilder extends AbstractQueryBuilder<ScriptQueryBuilder> { @@ -80,7 +81,7 @@ public class ScriptQueryBuilder extends AbstractQueryBuilder<ScriptQueryBuilder> public ScriptQuery(Script script, ScriptService scriptService, SearchLookup searchLookup) { this.script = script; - this.searchScript = scriptService.search(searchLookup, script, ScriptContext.Standard.SEARCH); + this.searchScript = scriptService.search(searchLookup, script, ScriptContext.Standard.SEARCH, Collections.emptyMap()); } @Override @@ -161,4 +162,4 @@ public class ScriptQueryBuilder extends AbstractQueryBuilder<ScriptQueryBuilder> protected boolean doEquals(ScriptQueryBuilder other) { return Objects.equals(script, other.script); } -}
\ No newline at end of file +} diff --git a/core/src/main/java/org/elasticsearch/index/query/functionscore/ScoreFunctionParserMapper.java b/core/src/main/java/org/elasticsearch/index/query/functionscore/ScoreFunctionParserMapper.java index c528c0007f..e7ce9b90e2 100644 --- a/core/src/main/java/org/elasticsearch/index/query/functionscore/ScoreFunctionParserMapper.java +++ b/core/src/main/java/org/elasticsearch/index/query/functionscore/ScoreFunctionParserMapper.java @@ -19,13 +19,11 @@ package org.elasticsearch.index.query.functionscore; -import java.util.Map; - import org.elasticsearch.common.ParsingException; import org.elasticsearch.common.inject.Inject; +import org.elasticsearch.common.io.stream.NamedWriteable; import org.elasticsearch.common.io.stream.NamedWriteableRegistry; import org.elasticsearch.common.xcontent.XContentLocation; -import org.elasticsearch.index.query.QueryParseContext; import org.elasticsearch.index.query.functionscore.exp.ExponentialDecayFunctionParser; import org.elasticsearch.index.query.functionscore.fieldvaluefactor.FieldValueFactorFunctionParser; import org.elasticsearch.index.query.functionscore.gauss.GaussDecayFunctionParser; @@ -74,11 +72,12 @@ public class ScoreFunctionParserMapper { return functionParsers.get(parserName); } - private static void addParser(ScoreFunctionParser<?> scoreFunctionParser, Map<String, ScoreFunctionParser<?>> map, NamedWriteableRegistry namedWriteableRegistry) { + private static void addParser(ScoreFunctionParser<? extends ScoreFunctionBuilder> scoreFunctionParser, Map<String, ScoreFunctionParser<?>> map, NamedWriteableRegistry namedWriteableRegistry) { for (String name : scoreFunctionParser.getNames()) { map.put(name, scoreFunctionParser); } - namedWriteableRegistry.registerPrototype(ScoreFunctionBuilder.class, scoreFunctionParser.getBuilderPrototype()); + @SuppressWarnings("unchecked") NamedWriteable<? extends ScoreFunctionBuilder> sfb = scoreFunctionParser.getBuilderPrototype(); + namedWriteableRegistry.registerPrototype(ScoreFunctionBuilder.class, sfb); } } diff --git a/core/src/main/java/org/elasticsearch/index/query/functionscore/script/ScriptScoreFunctionBuilder.java b/core/src/main/java/org/elasticsearch/index/query/functionscore/script/ScriptScoreFunctionBuilder.java index 9230846631..5fcd70b65d 100644 --- a/core/src/main/java/org/elasticsearch/index/query/functionscore/script/ScriptScoreFunctionBuilder.java +++ b/core/src/main/java/org/elasticsearch/index/query/functionscore/script/ScriptScoreFunctionBuilder.java @@ -33,6 +33,7 @@ import org.elasticsearch.script.ScriptContext; import org.elasticsearch.script.SearchScript; import java.io.IOException; +import java.util.Collections; import java.util.Objects; /** @@ -89,10 +90,10 @@ public class ScriptScoreFunctionBuilder extends ScoreFunctionBuilder<ScriptScore @Override protected ScoreFunction doToFunction(QueryShardContext context) { try { - SearchScript searchScript = context.getScriptService().search(context.lookup(), script, ScriptContext.Standard.SEARCH); + SearchScript searchScript = context.getScriptService().search(context.lookup(), script, ScriptContext.Standard.SEARCH, Collections.emptyMap()); return new ScriptScoreFunction(script, searchScript); } catch (Exception e) { throw new QueryShardException(context, "script_score: the script could not be loaded", e); } } -}
\ No newline at end of file +} diff --git a/core/src/main/java/org/elasticsearch/index/shard/IndexShard.java b/core/src/main/java/org/elasticsearch/index/shard/IndexShard.java index c0bf924467..a67ca309bc 100644 --- a/core/src/main/java/org/elasticsearch/index/shard/IndexShard.java +++ b/core/src/main/java/org/elasticsearch/index/shard/IndexShard.java @@ -19,7 +19,11 @@ package org.elasticsearch.index.shard; -import org.apache.lucene.index.*; +import org.apache.lucene.index.CheckIndex; +import org.apache.lucene.index.IndexCommit; +import org.apache.lucene.index.KeepOnlyLastCommitDeletionPolicy; +import org.apache.lucene.index.SnapshotDeletionPolicy; +import org.apache.lucene.index.Term; import org.apache.lucene.search.QueryCachingPolicy; import org.apache.lucene.search.UsageTrackingQueryCachingPolicy; import org.apache.lucene.store.AlreadyClosedException; @@ -61,7 +65,16 @@ import org.elasticsearch.index.cache.bitset.ShardBitsetFilterCache; import org.elasticsearch.index.cache.query.QueryCacheStats; import org.elasticsearch.index.cache.request.ShardRequestCache; import org.elasticsearch.index.codec.CodecService; -import org.elasticsearch.index.engine.*; +import org.elasticsearch.index.engine.CommitStats; +import org.elasticsearch.index.engine.Engine; +import org.elasticsearch.index.engine.EngineClosedException; +import org.elasticsearch.index.engine.EngineConfig; +import org.elasticsearch.index.engine.EngineException; +import org.elasticsearch.index.engine.EngineFactory; +import org.elasticsearch.index.engine.InternalEngineFactory; +import org.elasticsearch.index.engine.RefreshFailedEngineException; +import org.elasticsearch.index.engine.Segment; +import org.elasticsearch.index.engine.SegmentsStats; import org.elasticsearch.index.fielddata.FieldDataStats; import org.elasticsearch.index.fielddata.IndexFieldDataService; import org.elasticsearch.index.fielddata.ShardFieldData; @@ -70,7 +83,12 @@ import org.elasticsearch.index.get.GetStats; import org.elasticsearch.index.get.ShardGetService; import org.elasticsearch.index.indexing.IndexingStats; import org.elasticsearch.index.indexing.ShardIndexingService; -import org.elasticsearch.index.mapper.*; +import org.elasticsearch.index.mapper.DocumentMapper; +import org.elasticsearch.index.mapper.DocumentMapperForType; +import org.elasticsearch.index.mapper.MapperService; +import org.elasticsearch.index.mapper.ParsedDocument; +import org.elasticsearch.index.mapper.SourceToParse; +import org.elasticsearch.index.mapper.Uid; import org.elasticsearch.index.merge.MergeStats; import org.elasticsearch.index.percolator.PercolateStats; import org.elasticsearch.index.percolator.PercolatorQueriesRegistry; @@ -108,7 +126,12 @@ import java.io.IOException; import java.io.PrintStream; import java.nio.channels.ClosedByInterruptException; import java.nio.charset.StandardCharsets; -import java.util.*; +import java.util.Arrays; +import java.util.EnumSet; +import java.util.List; +import java.util.Locale; +import java.util.Map; +import java.util.Objects; import java.util.concurrent.CopyOnWriteArrayList; import java.util.concurrent.ScheduledFuture; import java.util.concurrent.TimeUnit; @@ -151,7 +174,6 @@ public class IndexShard extends AbstractIndexShardComponent { private TimeValue refreshInterval; private volatile ScheduledFuture<?> refreshScheduledFuture; - private volatile ScheduledFuture<?> mergeScheduleFuture; protected volatile ShardRouting shardRouting; protected volatile IndexShardState state; protected final AtomicReference<Engine> currentEngineReference = new AtomicReference<>(); @@ -766,8 +788,6 @@ public class IndexShard extends AbstractIndexShardComponent { if (state != IndexShardState.CLOSED) { FutureUtils.cancel(refreshScheduledFuture); refreshScheduledFuture = null; - FutureUtils.cancel(mergeScheduleFuture); - mergeScheduleFuture = null; } changeState(IndexShardState.CLOSED, reason); indexShardOperationCounter.decRef(); @@ -1099,7 +1119,8 @@ public class IndexShard extends AbstractIndexShardComponent { // we are the first primary, recover from the gateway // if its post api allocation, the index should exists assert shardRouting.primary() : "recover from store only makes sense if the shard is a primary shard"; - final boolean shouldExist = shardRouting.allocatedPostIndexCreate(); + boolean shouldExist = shardRouting.allocatedPostIndexCreate(idxSettings.getIndexMetaData()); + StoreRecovery storeRecovery = new StoreRecovery(shardId, logger); return storeRecovery.recoverFromStore(this, shouldExist, localNode); } diff --git a/core/src/main/java/org/elasticsearch/index/shard/TranslogRecoveryPerformer.java b/core/src/main/java/org/elasticsearch/index/shard/TranslogRecoveryPerformer.java index ac46f6725d..88e55600bc 100644 --- a/core/src/main/java/org/elasticsearch/index/shard/TranslogRecoveryPerformer.java +++ b/core/src/main/java/org/elasticsearch/index/shard/TranslogRecoveryPerformer.java @@ -110,7 +110,7 @@ public class TranslogRecoveryPerformer { if (currentUpdate == null) { recoveredTypes.put(type, update); } else { - MapperUtils.merge(currentUpdate, update); + currentUpdate = currentUpdate.merge(update, false); } } diff --git a/core/src/main/java/org/elasticsearch/index/store/IndexStoreConfig.java b/core/src/main/java/org/elasticsearch/index/store/IndexStoreConfig.java index 1bd023abdb..ed56187673 100644 --- a/core/src/main/java/org/elasticsearch/index/store/IndexStoreConfig.java +++ b/core/src/main/java/org/elasticsearch/index/store/IndexStoreConfig.java @@ -21,37 +21,36 @@ package org.elasticsearch.index.store; import org.apache.lucene.store.StoreRateLimiting; import org.elasticsearch.common.logging.ESLogger; import org.elasticsearch.common.logging.Loggers; +import org.elasticsearch.common.settings.Setting; import org.elasticsearch.common.settings.Settings; -import org.elasticsearch.common.unit.ByteSizeUnit; import org.elasticsearch.common.unit.ByteSizeValue; -import org.elasticsearch.node.settings.NodeSettingsService; /** * IndexStoreConfig encapsulates node / cluster level configuration for index level {@link IndexStore} instances. * For instance it maintains the node level rate limiter configuration: updates to the cluster that disable or enable - * {@value #INDICES_STORE_THROTTLE_TYPE} or {@value #INDICES_STORE_THROTTLE_MAX_BYTES_PER_SEC} are reflected immediately + * <tt>indices.store.throttle.type</tt> or <tt>indices.store.throttle.max_bytes_per_sec</tt> are reflected immediately * on all referencing {@link IndexStore} instances */ -public class IndexStoreConfig implements NodeSettingsService.Listener { +public class IndexStoreConfig{ /** * Configures the node / cluster level throttle type. See {@link StoreRateLimiting.Type}. */ - public static final String INDICES_STORE_THROTTLE_TYPE = "indices.store.throttle.type"; + public static final Setting<StoreRateLimiting.Type> INDICES_STORE_THROTTLE_TYPE_SETTING = new Setting<>("indices.store.throttle.type", StoreRateLimiting.Type.NONE.name(),StoreRateLimiting.Type::fromString, true, Setting.Scope.CLUSTER); /** * Configures the node / cluster level throttle intensity. The default is <tt>10240 MB</tt> */ - public static final String INDICES_STORE_THROTTLE_MAX_BYTES_PER_SEC = "indices.store.throttle.max_bytes_per_sec"; - private volatile String rateLimitingType; + public static final Setting<ByteSizeValue> INDICES_STORE_THROTTLE_MAX_BYTES_PER_SEC_SETTING = Setting.byteSizeSetting("indices.store.throttle.max_bytes_per_sec", new ByteSizeValue(0), true, Setting.Scope.CLUSTER); + private volatile StoreRateLimiting.Type rateLimitingType; private volatile ByteSizeValue rateLimitingThrottle; private final StoreRateLimiting rateLimiting = new StoreRateLimiting(); private final ESLogger logger; public IndexStoreConfig(Settings settings) { logger = Loggers.getLogger(IndexStoreConfig.class, settings); // we don't limit by default (we default to CMS's auto throttle instead): - this.rateLimitingType = settings.get("indices.store.throttle.type", StoreRateLimiting.Type.NONE.name()); + this.rateLimitingType = INDICES_STORE_THROTTLE_TYPE_SETTING.get(settings); rateLimiting.setType(rateLimitingType); - this.rateLimitingThrottle = settings.getAsBytesSize("indices.store.throttle.max_bytes_per_sec", new ByteSizeValue(0)); + this.rateLimitingThrottle = INDICES_STORE_THROTTLE_MAX_BYTES_PER_SEC_SETTING.get(settings); rateLimiting.setMaxRate(rateLimitingThrottle); logger.debug("using indices.store.throttle.type [{}], with index.store.throttle.max_bytes_per_sec [{}]", rateLimitingType, rateLimitingThrottle); } @@ -63,22 +62,12 @@ public class IndexStoreConfig implements NodeSettingsService.Listener { return rateLimiting; } - @Override - public void onRefreshSettings(Settings settings) { - String rateLimitingType = settings.get(INDICES_STORE_THROTTLE_TYPE, this.rateLimitingType); - // try and parse the type - StoreRateLimiting.Type.fromString(rateLimitingType); - if (!rateLimitingType.equals(this.rateLimitingType)) { - logger.info("updating indices.store.throttle.type from [{}] to [{}]", this.rateLimitingType, rateLimitingType); - this.rateLimitingType = rateLimitingType; - this.rateLimiting.setType(rateLimitingType); - } + public void setRateLimitingType(StoreRateLimiting.Type rateLimitingType) { + this.rateLimitingType = rateLimitingType; + rateLimiting.setType(rateLimitingType); + } - ByteSizeValue rateLimitingThrottle = settings.getAsBytesSize(INDICES_STORE_THROTTLE_MAX_BYTES_PER_SEC, this.rateLimitingThrottle); - if (!rateLimitingThrottle.equals(this.rateLimitingThrottle)) { - logger.info("updating indices.store.throttle.max_bytes_per_sec from [{}] to [{}], note, type is [{}]", this.rateLimitingThrottle, rateLimitingThrottle, this.rateLimitingType); - this.rateLimitingThrottle = rateLimitingThrottle; - this.rateLimiting.setMaxRate(rateLimitingThrottle); - } + public void setRateLimitingThrottle(ByteSizeValue rateLimitingThrottle) { + this.rateLimitingThrottle = rateLimitingThrottle; } } diff --git a/core/src/main/java/org/elasticsearch/index/translog/BufferingTranslogWriter.java b/core/src/main/java/org/elasticsearch/index/translog/BufferingTranslogWriter.java index 6026468973..a2eb0bff64 100644 --- a/core/src/main/java/org/elasticsearch/index/translog/BufferingTranslogWriter.java +++ b/core/src/main/java/org/elasticsearch/index/translog/BufferingTranslogWriter.java @@ -48,22 +48,27 @@ public final class BufferingTranslogWriter extends TranslogWriter { public Translog.Location add(BytesReference data) throws IOException { try (ReleasableLock lock = writeLock.acquire()) { ensureOpen(); - operationCounter++; final long offset = totalOffset; if (data.length() >= buffer.length) { flush(); // we use the channel to write, since on windows, writing to the RAF might not be reflected // when reading through the channel - data.writeTo(channel); + try { + data.writeTo(channel); + } catch (Throwable ex) { + closeWithTragicEvent(ex); + throw ex; + } writtenOffset += data.length(); totalOffset += data.length(); - return new Translog.Location(generation, offset, data.length()); - } - if (data.length() > buffer.length - bufferCount) { - flush(); + } else { + if (data.length() > buffer.length - bufferCount) { + flush(); + } + data.writeTo(bufferOs); + totalOffset += data.length(); } - data.writeTo(bufferOs); - totalOffset += data.length(); + operationCounter++; return new Translog.Location(generation, offset, data.length()); } } @@ -71,10 +76,17 @@ public final class BufferingTranslogWriter extends TranslogWriter { protected final void flush() throws IOException { assert writeLock.isHeldByCurrentThread(); if (bufferCount > 0) { + ensureOpen(); // we use the channel to write, since on windows, writing to the RAF might not be reflected // when reading through the channel - Channels.writeToChannel(buffer, 0, bufferCount, channel); - writtenOffset += bufferCount; + final int bufferSize = bufferCount; + try { + Channels.writeToChannel(buffer, 0, bufferSize, channel); + } catch (Throwable ex) { + closeWithTragicEvent(ex); + throw ex; + } + writtenOffset += bufferSize; bufferCount = 0; } } @@ -102,20 +114,28 @@ public final class BufferingTranslogWriter extends TranslogWriter { } @Override - public void sync() throws IOException { - if (!syncNeeded()) { - return; - } - synchronized (this) { + public synchronized void sync() throws IOException { + if (syncNeeded()) { + ensureOpen(); // this call gives a better exception that the incRef if we are closed by a tragic event channelReference.incRef(); try { + final long offsetToSync; + final int opsCounter; try (ReleasableLock lock = writeLock.acquire()) { flush(); - lastSyncedOffset = totalOffset; + offsetToSync = totalOffset; + opsCounter = operationCounter; } // we can do this outside of the write lock but we have to protect from // concurrent syncs - checkpoint(lastSyncedOffset, operationCounter, channelReference); + ensureOpen(); // just for kicks - the checkpoint happens or not either way + try { + checkpoint(offsetToSync, opsCounter, channelReference); + } catch (Throwable ex) { + closeWithTragicEvent(ex); + throw ex; + } + lastSyncedOffset = offsetToSync; } finally { channelReference.decRef(); } diff --git a/core/src/main/java/org/elasticsearch/index/translog/Translog.java b/core/src/main/java/org/elasticsearch/index/translog/Translog.java index 35dd895bc2..fd5c64f96a 100644 --- a/core/src/main/java/org/elasticsearch/index/translog/Translog.java +++ b/core/src/main/java/org/elasticsearch/index/translog/Translog.java @@ -115,7 +115,7 @@ public class Translog extends AbstractIndexShardComponent implements IndexShardC private final Path location; private TranslogWriter current; private volatile ImmutableTranslogReader currentCommittingTranslog; - private long lastCommittedTranslogFileGeneration = -1; // -1 is safe as it will not cause an translog deletion. + private volatile long lastCommittedTranslogFileGeneration = -1; // -1 is safe as it will not cause an translog deletion. private final AtomicBoolean closed = new AtomicBoolean(); private final TranslogConfig config; private final String translogUUID; @@ -158,7 +158,7 @@ public class Translog extends AbstractIndexShardComponent implements IndexShardC try { if (translogGeneration != null) { - final Checkpoint checkpoint = Checkpoint.read(location.resolve(CHECKPOINT_FILE_NAME)); + final Checkpoint checkpoint = readCheckpoint(); this.recoveredTranslogs = recoverFromFiles(translogGeneration, checkpoint); if (recoveredTranslogs.isEmpty()) { throw new IllegalStateException("at least one reader must be recovered"); @@ -279,7 +279,8 @@ public class Translog extends AbstractIndexShardComponent implements IndexShardC } } - boolean isOpen() { + /** Returns {@code true} if this {@code Translog} is still open. */ + public boolean isOpen() { return closed.get() == false; } @@ -288,10 +289,14 @@ public class Translog extends AbstractIndexShardComponent implements IndexShardC if (closed.compareAndSet(false, true)) { try (ReleasableLock lock = writeLock.acquire()) { try { - IOUtils.close(current, currentCommittingTranslog); + current.sync(); } finally { - IOUtils.close(recoveredTranslogs); - recoveredTranslogs.clear(); + try { + IOUtils.close(current, currentCommittingTranslog); + } finally { + IOUtils.close(recoveredTranslogs); + recoveredTranslogs.clear(); + } } } finally { FutureUtils.cancel(syncScheduler); @@ -354,7 +359,7 @@ public class Translog extends AbstractIndexShardComponent implements IndexShardC TranslogWriter createWriter(long fileGeneration) throws IOException { TranslogWriter newFile; try { - newFile = TranslogWriter.create(config.getType(), shardId, translogUUID, fileGeneration, location.resolve(getFilename(fileGeneration)), new OnCloseRunnable(), config.getBufferSize()); + newFile = TranslogWriter.create(config.getType(), shardId, translogUUID, fileGeneration, location.resolve(getFilename(fileGeneration)), new OnCloseRunnable(), config.getBufferSize(), getChannelFactory()); } catch (IOException e) { throw new TranslogException(shardId, "failed to create new translog file", e); } @@ -393,7 +398,7 @@ public class Translog extends AbstractIndexShardComponent implements IndexShardC * @see Index * @see org.elasticsearch.index.translog.Translog.Delete */ - public Location add(Operation operation) throws TranslogException { + public Location add(Operation operation) throws IOException { final ReleasableBytesStreamOutput out = new ReleasableBytesStreamOutput(bigArrays); try { final BufferedChecksumStreamOutput checksumStreamOutput = new BufferedChecksumStreamOutput(out); @@ -415,9 +420,11 @@ public class Translog extends AbstractIndexShardComponent implements IndexShardC assert current.assertBytesAtLocation(location, bytes); return location; } - } catch (AlreadyClosedException ex) { + } catch (AlreadyClosedException | IOException ex) { + closeOnTragicEvent(ex); throw ex; } catch (Throwable e) { + closeOnTragicEvent(e); throw new TranslogException(shardId, "Failed to write operation [" + operation + "]", e); } finally { Releasables.close(out.bytes()); @@ -429,6 +436,7 @@ public class Translog extends AbstractIndexShardComponent implements IndexShardC * Snapshots are fixed in time and will not be updated with future operations. */ public Snapshot newSnapshot() { + ensureOpen(); try (ReleasableLock lock = readLock.acquire()) { ArrayList<TranslogReader> toOpen = new ArrayList<>(); toOpen.addAll(recoveredTranslogs); @@ -493,6 +501,9 @@ public class Translog extends AbstractIndexShardComponent implements IndexShardC if (closed.get() == false) { current.sync(); } + } catch (Throwable ex) { + closeOnTragicEvent(ex); + throw ex; } } @@ -520,12 +531,26 @@ public class Translog extends AbstractIndexShardComponent implements IndexShardC public boolean ensureSynced(Location location) throws IOException { try (ReleasableLock lock = readLock.acquire()) { if (location.generation == current.generation) { // if we have a new one it's already synced + ensureOpen(); return current.syncUpTo(location.translogLocation + location.size); } + } catch (Throwable ex) { + closeOnTragicEvent(ex); + throw ex; } return false; } + private void closeOnTragicEvent(Throwable ex) { + if (current.getTragicException() != null) { + try { + close(); + } catch (Exception inner) { + ex.addSuppressed(inner); + } + } + } + /** * return stats */ @@ -548,31 +573,29 @@ public class Translog extends AbstractIndexShardComponent implements IndexShardC private final class OnCloseRunnable implements Callback<ChannelReference> { @Override public void handle(ChannelReference channelReference) { - try (ReleasableLock lock = writeLock.acquire()) { - if (isReferencedGeneration(channelReference.getGeneration()) == false) { - Path translogPath = channelReference.getPath(); - assert channelReference.getPath().getParent().equals(location) : "translog files must be in the location folder: " + location + " but was: " + translogPath; - // if the given translogPath is not the current we can safely delete the file since all references are released - logger.trace("delete translog file - not referenced and not current anymore {}", translogPath); - IOUtils.deleteFilesIgnoringExceptions(translogPath); - IOUtils.deleteFilesIgnoringExceptions(translogPath.resolveSibling(getCommitCheckpointFileName(channelReference.getGeneration()))); + if (isReferencedGeneration(channelReference.getGeneration()) == false) { + Path translogPath = channelReference.getPath(); + assert channelReference.getPath().getParent().equals(location) : "translog files must be in the location folder: " + location + " but was: " + translogPath; + // if the given translogPath is not the current we can safely delete the file since all references are released + logger.trace("delete translog file - not referenced and not current anymore {}", translogPath); + IOUtils.deleteFilesIgnoringExceptions(translogPath); + IOUtils.deleteFilesIgnoringExceptions(translogPath.resolveSibling(getCommitCheckpointFileName(channelReference.getGeneration()))); - } - try (DirectoryStream<Path> stream = Files.newDirectoryStream(location)) { - for (Path path : stream) { - Matcher matcher = PARSE_STRICT_ID_PATTERN.matcher(path.getFileName().toString()); - if (matcher.matches()) { - long generation = Long.parseLong(matcher.group(1)); - if (isReferencedGeneration(generation) == false) { - logger.trace("delete translog file - not referenced and not current anymore {}", path); - IOUtils.deleteFilesIgnoringExceptions(path); - IOUtils.deleteFilesIgnoringExceptions(path.resolveSibling(getCommitCheckpointFileName(channelReference.getGeneration()))); - } + } + try (DirectoryStream<Path> stream = Files.newDirectoryStream(location)) { + for (Path path : stream) { + Matcher matcher = PARSE_STRICT_ID_PATTERN.matcher(path.getFileName().toString()); + if (matcher.matches()) { + long generation = Long.parseLong(matcher.group(1)); + if (isReferencedGeneration(generation) == false) { + logger.trace("delete translog file - not referenced and not current anymore {}", path); + IOUtils.deleteFilesIgnoringExceptions(path); + IOUtils.deleteFilesIgnoringExceptions(path.resolveSibling(getCommitCheckpointFileName(channelReference.getGeneration()))); } } - } catch (IOException e) { - logger.warn("failed to delete unreferenced translog files", e); } + } catch (IOException e) { + logger.warn("failed to delete unreferenced translog files", e); } } } @@ -1294,6 +1317,7 @@ public class Translog extends AbstractIndexShardComponent implements IndexShardC throw new IllegalStateException("already committing a translog with generation: " + currentCommittingTranslog.getGeneration()); } final TranslogWriter oldCurrent = current; + oldCurrent.ensureOpen(); oldCurrent.sync(); currentCommittingTranslog = current.immutableReader(); Path checkpoint = location.resolve(CHECKPOINT_FILE_NAME); @@ -1389,7 +1413,7 @@ public class Translog extends AbstractIndexShardComponent implements IndexShardC private void ensureOpen() { if (closed.get()) { - throw new AlreadyClosedException("translog is already closed"); + throw new AlreadyClosedException("translog is already closed", current.getTragicException()); } } @@ -1400,4 +1424,20 @@ public class Translog extends AbstractIndexShardComponent implements IndexShardC return outstandingViews.size(); } + TranslogWriter.ChannelFactory getChannelFactory() { + return TranslogWriter.ChannelFactory.DEFAULT; + } + + /** If this {@code Translog} was closed as a side-effect of a tragic exception, + * e.g. disk full while flushing a new segment, this returns the root cause exception. + * Otherwise (no tragic exception has occurred) it returns null. */ + public Throwable getTragicException() { + return current.getTragicException(); + } + + /** Reads and returns the current checkpoint */ + final Checkpoint readCheckpoint() throws IOException { + return Checkpoint.read(location.resolve(CHECKPOINT_FILE_NAME)); + } + } diff --git a/core/src/main/java/org/elasticsearch/index/translog/TranslogReader.java b/core/src/main/java/org/elasticsearch/index/translog/TranslogReader.java index 590bc31905..d7077fd90a 100644 --- a/core/src/main/java/org/elasticsearch/index/translog/TranslogReader.java +++ b/core/src/main/java/org/elasticsearch/index/translog/TranslogReader.java @@ -140,16 +140,16 @@ public abstract class TranslogReader implements Closeable, Comparable<TranslogRe @Override public void close() throws IOException { if (closed.compareAndSet(false, true)) { - doClose(); + channelReference.decRef(); } } - protected void doClose() throws IOException { - channelReference.decRef(); + protected final boolean isClosed() { + return closed.get(); } protected void ensureOpen() { - if (closed.get()) { + if (isClosed()) { throw new AlreadyClosedException("translog [" + getGeneration() + "] is already closed"); } } diff --git a/core/src/main/java/org/elasticsearch/index/translog/TranslogWriter.java b/core/src/main/java/org/elasticsearch/index/translog/TranslogWriter.java index 045550cb62..9870bddf87 100644 --- a/core/src/main/java/org/elasticsearch/index/translog/TranslogWriter.java +++ b/core/src/main/java/org/elasticsearch/index/translog/TranslogWriter.java @@ -20,6 +20,7 @@ package org.elasticsearch.index.translog; import org.apache.lucene.codecs.CodecUtil; +import org.apache.lucene.store.AlreadyClosedException; import org.apache.lucene.store.OutputStreamDataOutput; import org.apache.lucene.util.BytesRef; import org.apache.lucene.util.IOUtils; @@ -54,6 +55,9 @@ public class TranslogWriter extends TranslogReader { protected volatile int operationCounter; /* the offset in bytes written to the file */ protected volatile long writtenOffset; + /* if we hit an exception that we can't recover from we assign it to this var and ship it with every AlreadyClosedException we throw */ + private volatile Throwable tragedy; + public TranslogWriter(ShardId shardId, long generation, ChannelReference channelReference) throws IOException { super(generation, channelReference, channelReference.getChannel().position()); @@ -65,10 +69,10 @@ public class TranslogWriter extends TranslogReader { this.lastSyncedOffset = channelReference.getChannel().position();; } - public static TranslogWriter create(Type type, ShardId shardId, String translogUUID, long fileGeneration, Path file, Callback<ChannelReference> onClose, int bufferSize) throws IOException { + public static TranslogWriter create(Type type, ShardId shardId, String translogUUID, long fileGeneration, Path file, Callback<ChannelReference> onClose, int bufferSize, ChannelFactory channelFactory) throws IOException { final BytesRef ref = new BytesRef(translogUUID); final int headerLength = CodecUtil.headerLength(TRANSLOG_CODEC) + ref.length + RamUsageEstimator.NUM_BYTES_INT; - final FileChannel channel = FileChannel.open(file, StandardOpenOption.WRITE, StandardOpenOption.READ, StandardOpenOption.CREATE_NEW); + final FileChannel channel = channelFactory.open(file); try { // This OutputStreamDataOutput is intentionally not closed because // closing it will close the FileChannel @@ -90,6 +94,12 @@ public class TranslogWriter extends TranslogReader { throw throwable; } } + /** If this {@code TranslogWriter} was closed as a side-effect of a tragic exception, + * e.g. disk full while flushing a new segment, this returns the root cause exception. + * Otherwise (no tragic exception has occurred) it returns null. */ + public Throwable getTragicException() { + return tragedy; + } public enum Type { @@ -118,6 +128,16 @@ public class TranslogWriter extends TranslogReader { } } + protected final void closeWithTragicEvent(Throwable throwable) throws IOException { + try (ReleasableLock lock = writeLock.acquire()) { + if (tragedy == null) { + tragedy = throwable; + } else { + tragedy.addSuppressed(throwable); + } + close(); + } + } /** * add the given bytes to the translog and return the location they were written at @@ -127,9 +147,14 @@ public class TranslogWriter extends TranslogReader { try (ReleasableLock lock = writeLock.acquire()) { ensureOpen(); position = writtenOffset; - data.writeTo(channel); + try { + data.writeTo(channel); + } catch (Throwable e) { + closeWithTragicEvent(e); + throw e; + } writtenOffset = writtenOffset + data.length(); - operationCounter = operationCounter + 1; + operationCounter++;; } return new Translog.Location(generation, position, data.length()); } @@ -143,12 +168,13 @@ public class TranslogWriter extends TranslogReader { /** * write all buffered ops to disk and fsync file */ - public void sync() throws IOException { + public synchronized void sync() throws IOException { // synchronized to ensure only one sync happens a time // check if we really need to sync here... if (syncNeeded()) { try (ReleasableLock lock = writeLock.acquire()) { + ensureOpen(); + checkpoint(writtenOffset, operationCounter, channelReference); lastSyncedOffset = writtenOffset; - checkpoint(lastSyncedOffset, operationCounter, channelReference); } } } @@ -263,15 +289,6 @@ public class TranslogWriter extends TranslogReader { } @Override - protected final void doClose() throws IOException { - try (ReleasableLock lock = writeLock.acquire()) { - sync(); - } finally { - super.doClose(); - } - } - - @Override protected void readBytes(ByteBuffer buffer, long position) throws IOException { try (ReleasableLock lock = readLock.acquire()) { Channels.readFromFileChannelWithEofException(channel, position, buffer); @@ -288,4 +305,20 @@ public class TranslogWriter extends TranslogReader { Checkpoint checkpoint = new Checkpoint(syncPosition, numOperations, generation); Checkpoint.write(checkpointFile, checkpoint, options); } + + static class ChannelFactory { + + static final ChannelFactory DEFAULT = new ChannelFactory(); + + // only for testing until we have a disk-full FileSystemt + public FileChannel open(Path file) throws IOException { + return FileChannel.open(file, StandardOpenOption.WRITE, StandardOpenOption.READ, StandardOpenOption.CREATE_NEW); + } + } + + protected final void ensureOpen() { + if (isClosed()) { + throw new AlreadyClosedException("translog [" + getGeneration() + "] is already closed", tragedy); + } + } } diff --git a/core/src/main/java/org/elasticsearch/indices/IndicesModule.java b/core/src/main/java/org/elasticsearch/indices/IndicesModule.java index cdd7f05033..61210bb041 100644 --- a/core/src/main/java/org/elasticsearch/indices/IndicesModule.java +++ b/core/src/main/java/org/elasticsearch/indices/IndicesModule.java @@ -22,18 +22,86 @@ package org.elasticsearch.indices; import org.elasticsearch.action.update.UpdateHelper; import org.elasticsearch.cluster.metadata.MetaDataIndexUpgradeService; import org.elasticsearch.common.geo.ShapesAvailability; +import org.elasticsearch.common.geo.builders.ShapeBuilderRegistry; import org.elasticsearch.common.inject.AbstractModule; import org.elasticsearch.common.util.ExtensionPoint; import org.elasticsearch.index.NodeServicesProvider; import org.elasticsearch.index.mapper.Mapper; import org.elasticsearch.index.mapper.MetadataFieldMapper; -import org.elasticsearch.index.mapper.core.*; +import org.elasticsearch.index.mapper.core.BinaryFieldMapper; +import org.elasticsearch.index.mapper.core.BooleanFieldMapper; +import org.elasticsearch.index.mapper.core.ByteFieldMapper; +import org.elasticsearch.index.mapper.core.CompletionFieldMapper; +import org.elasticsearch.index.mapper.core.DateFieldMapper; +import org.elasticsearch.index.mapper.core.DoubleFieldMapper; +import org.elasticsearch.index.mapper.core.FloatFieldMapper; +import org.elasticsearch.index.mapper.core.IntegerFieldMapper; +import org.elasticsearch.index.mapper.core.LongFieldMapper; +import org.elasticsearch.index.mapper.core.ShortFieldMapper; +import org.elasticsearch.index.mapper.core.StringFieldMapper; +import org.elasticsearch.index.mapper.core.TokenCountFieldMapper; +import org.elasticsearch.index.mapper.core.TypeParsers; import org.elasticsearch.index.mapper.geo.GeoPointFieldMapper; import org.elasticsearch.index.mapper.geo.GeoShapeFieldMapper; -import org.elasticsearch.index.mapper.internal.*; +import org.elasticsearch.index.mapper.internal.AllFieldMapper; +import org.elasticsearch.index.mapper.internal.FieldNamesFieldMapper; +import org.elasticsearch.index.mapper.internal.IdFieldMapper; +import org.elasticsearch.index.mapper.internal.IndexFieldMapper; +import org.elasticsearch.index.mapper.internal.ParentFieldMapper; +import org.elasticsearch.index.mapper.internal.RoutingFieldMapper; +import org.elasticsearch.index.mapper.internal.SourceFieldMapper; +import org.elasticsearch.index.mapper.internal.TTLFieldMapper; +import org.elasticsearch.index.mapper.internal.TimestampFieldMapper; +import org.elasticsearch.index.mapper.internal.TypeFieldMapper; +import org.elasticsearch.index.mapper.internal.UidFieldMapper; +import org.elasticsearch.index.mapper.internal.VersionFieldMapper; import org.elasticsearch.index.mapper.ip.IpFieldMapper; import org.elasticsearch.index.mapper.object.ObjectMapper; -import org.elasticsearch.index.query.*; +import org.elasticsearch.index.query.BoolQueryParser; +import org.elasticsearch.index.query.BoostingQueryParser; +import org.elasticsearch.index.query.CommonTermsQueryParser; +import org.elasticsearch.index.query.ConstantScoreQueryParser; +import org.elasticsearch.index.query.DisMaxQueryParser; +import org.elasticsearch.index.query.ExistsQueryParser; +import org.elasticsearch.index.query.FieldMaskingSpanQueryParser; +import org.elasticsearch.index.query.FuzzyQueryParser; +import org.elasticsearch.index.query.GeoBoundingBoxQueryParser; +import org.elasticsearch.index.query.GeoDistanceQueryParser; +import org.elasticsearch.index.query.GeoDistanceRangeQueryParser; +import org.elasticsearch.index.query.GeoPolygonQueryParser; +import org.elasticsearch.index.query.GeoShapeQueryParser; +import org.elasticsearch.index.query.GeohashCellQuery; +import org.elasticsearch.index.query.HasChildQueryParser; +import org.elasticsearch.index.query.HasParentQueryParser; +import org.elasticsearch.index.query.IdsQueryParser; +import org.elasticsearch.index.query.IndicesQueryParser; +import org.elasticsearch.index.query.MatchAllQueryParser; +import org.elasticsearch.index.query.MatchNoneQueryParser; +import org.elasticsearch.index.query.MatchQueryParser; +import org.elasticsearch.index.query.MoreLikeThisQueryParser; +import org.elasticsearch.index.query.MultiMatchQueryParser; +import org.elasticsearch.index.query.NestedQueryParser; +import org.elasticsearch.index.query.PrefixQueryParser; +import org.elasticsearch.index.query.QueryParser; +import org.elasticsearch.index.query.QueryStringQueryParser; +import org.elasticsearch.index.query.RangeQueryParser; +import org.elasticsearch.index.query.RegexpQueryParser; +import org.elasticsearch.index.query.ScriptQueryParser; +import org.elasticsearch.index.query.SimpleQueryStringParser; +import org.elasticsearch.index.query.SpanContainingQueryParser; +import org.elasticsearch.index.query.SpanFirstQueryParser; +import org.elasticsearch.index.query.SpanMultiTermQueryParser; +import org.elasticsearch.index.query.SpanNearQueryParser; +import org.elasticsearch.index.query.SpanNotQueryParser; +import org.elasticsearch.index.query.SpanOrQueryParser; +import org.elasticsearch.index.query.SpanTermQueryParser; +import org.elasticsearch.index.query.SpanWithinQueryParser; +import org.elasticsearch.index.query.TemplateQueryParser; +import org.elasticsearch.index.query.TermQueryParser; +import org.elasticsearch.index.query.TermsQueryParser; +import org.elasticsearch.index.query.TypeQueryParser; +import org.elasticsearch.index.query.WildcardQueryParser; +import org.elasticsearch.index.query.WrapperQueryParser; import org.elasticsearch.index.query.functionscore.FunctionScoreQueryParser; import org.elasticsearch.index.termvectors.TermVectorsService; import org.elasticsearch.indices.cache.query.IndicesQueryCache; @@ -120,10 +188,9 @@ public class IndicesModule extends AbstractModule { registerQueryParser(GeohashCellQuery.Parser.class); registerQueryParser(GeoPolygonQueryParser.class); registerQueryParser(ExistsQueryParser.class); - registerQueryParser(MissingQueryParser.class); registerQueryParser(MatchNoneQueryParser.class); - if (ShapesAvailability.JTS_AVAILABLE) { + if (ShapesAvailability.JTS_AVAILABLE && ShapesAvailability.SPATIAL4J_AVAILABLE) { registerQueryParser(GeoShapeQueryParser.class); } } @@ -147,7 +214,7 @@ public class IndicesModule extends AbstractModule { registerMapper(CompletionFieldMapper.CONTENT_TYPE, new CompletionFieldMapper.TypeParser()); registerMapper(GeoPointFieldMapper.CONTENT_TYPE, new GeoPointFieldMapper.TypeParser()); - if (ShapesAvailability.JTS_AVAILABLE) { + if (ShapesAvailability.JTS_AVAILABLE && ShapesAvailability.SPATIAL4J_AVAILABLE) { registerMapper(GeoShapeFieldMapper.CONTENT_TYPE, new GeoShapeFieldMapper.TypeParser()); } } @@ -219,6 +286,7 @@ public class IndicesModule extends AbstractModule { bind(IndicesFieldDataCacheListener.class).asEagerSingleton(); bind(TermVectorsService.class).asEagerSingleton(); bind(NodeServicesProvider.class).asEagerSingleton(); + bind(ShapeBuilderRegistry.class).asEagerSingleton(); } // public for testing diff --git a/core/src/main/java/org/elasticsearch/indices/IndicesService.java b/core/src/main/java/org/elasticsearch/indices/IndicesService.java index dead72aee8..d8c142f478 100644 --- a/core/src/main/java/org/elasticsearch/indices/IndicesService.java +++ b/core/src/main/java/org/elasticsearch/indices/IndicesService.java @@ -36,6 +36,7 @@ import org.elasticsearch.common.Nullable; import org.elasticsearch.common.component.AbstractLifecycleComponent; import org.elasticsearch.common.inject.Inject; import org.elasticsearch.common.io.FileSystemUtils; +import org.elasticsearch.common.settings.ClusterSettings; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.unit.TimeValue; import org.elasticsearch.common.util.concurrent.EsExecutors; @@ -58,7 +59,6 @@ import org.elasticsearch.index.shard.ShardId; import org.elasticsearch.index.store.IndexStoreConfig; import org.elasticsearch.indices.mapper.MapperRegistry; import org.elasticsearch.indices.query.IndicesQueriesRegistry; -import org.elasticsearch.node.settings.NodeSettingsService; import org.elasticsearch.plugins.PluginsService; import java.io.IOException; @@ -100,9 +100,9 @@ public class IndicesService extends AbstractLifecycleComponent<IndicesService> i @Inject public IndicesService(Settings settings, PluginsService pluginsService, NodeEnvironment nodeEnv, - NodeSettingsService nodeSettingsService, AnalysisRegistry analysisRegistry, - IndicesQueriesRegistry indicesQueriesRegistry, IndexNameExpressionResolver indexNameExpressionResolver, - ClusterService clusterService, MapperRegistry mapperRegistry) { + ClusterSettings clusterSettings, AnalysisRegistry analysisRegistry, + IndicesQueriesRegistry indicesQueriesRegistry, IndexNameExpressionResolver indexNameExpressionResolver, + ClusterService clusterService, MapperRegistry mapperRegistry) { super(settings); this.pluginsService = pluginsService; this.nodeEnv = nodeEnv; @@ -113,7 +113,9 @@ public class IndicesService extends AbstractLifecycleComponent<IndicesService> i this.clusterService = clusterService; this.indexNameExpressionResolver = indexNameExpressionResolver; this.mapperRegistry = mapperRegistry; - nodeSettingsService.addListener(indexStoreConfig); + clusterSettings.addSettingsUpdateConsumer(IndexStoreConfig.INDICES_STORE_THROTTLE_TYPE_SETTING, indexStoreConfig::setRateLimitingType); + clusterSettings.addSettingsUpdateConsumer(IndexStoreConfig.INDICES_STORE_THROTTLE_MAX_BYTES_PER_SEC_SETTING, indexStoreConfig::setRateLimitingThrottle); + } @Override diff --git a/core/src/main/java/org/elasticsearch/indices/breaker/HierarchyCircuitBreakerService.java b/core/src/main/java/org/elasticsearch/indices/breaker/HierarchyCircuitBreakerService.java index 33f3c127d6..0e1532bc6b 100644 --- a/core/src/main/java/org/elasticsearch/indices/breaker/HierarchyCircuitBreakerService.java +++ b/core/src/main/java/org/elasticsearch/indices/breaker/HierarchyCircuitBreakerService.java @@ -25,9 +25,10 @@ import org.elasticsearch.common.breaker.CircuitBreakingException; import org.elasticsearch.common.breaker.NoopCircuitBreaker; import org.elasticsearch.common.inject.Inject; import org.elasticsearch.common.logging.Loggers; +import org.elasticsearch.common.settings.ClusterSettings; +import org.elasticsearch.common.settings.Setting; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.unit.ByteSizeValue; -import org.elasticsearch.node.settings.NodeSettingsService; import java.util.ArrayList; import java.util.List; @@ -45,25 +46,17 @@ public class HierarchyCircuitBreakerService extends CircuitBreakerService { private final ConcurrentMap<String, CircuitBreaker> breakers = new ConcurrentHashMap(); - // Old pre-1.4.0 backwards compatible settings - public static final String OLD_CIRCUIT_BREAKER_MAX_BYTES_SETTING = "indices.fielddata.breaker.limit"; - public static final String OLD_CIRCUIT_BREAKER_OVERHEAD_SETTING = "indices.fielddata.breaker.overhead"; + public static final Setting<ByteSizeValue> TOTAL_CIRCUIT_BREAKER_LIMIT_SETTING = Setting.byteSizeSetting("indices.breaker.total.limit", "70%", true, Setting.Scope.CLUSTER); - public static final String TOTAL_CIRCUIT_BREAKER_LIMIT_SETTING = "indices.breaker.total.limit"; - public static final String DEFAULT_TOTAL_CIRCUIT_BREAKER_LIMIT = "70%"; + public static final Setting<ByteSizeValue> FIELDDATA_CIRCUIT_BREAKER_LIMIT_SETTING = Setting.byteSizeSetting("indices.breaker.fielddata.limit", "60%", true, Setting.Scope.CLUSTER); + public static final Setting<Double> FIELDDATA_CIRCUIT_BREAKER_OVERHEAD_SETTING = Setting.doubleSetting("indices.breaker.fielddata.overhead", 1.03d, 0.0d, true, Setting.Scope.CLUSTER); + public static final Setting<CircuitBreaker.Type> FIELDDATA_CIRCUIT_BREAKER_TYPE_SETTING = new Setting<>("indices.breaker.fielddata.type", "memory", CircuitBreaker.Type::parseValue, false, Setting.Scope.CLUSTER); - public static final String FIELDDATA_CIRCUIT_BREAKER_LIMIT_SETTING = "indices.breaker.fielddata.limit"; - public static final String FIELDDATA_CIRCUIT_BREAKER_OVERHEAD_SETTING = "indices.breaker.fielddata.overhead"; - public static final String FIELDDATA_CIRCUIT_BREAKER_TYPE_SETTING = "indices.breaker.fielddata.type"; - public static final String DEFAULT_FIELDDATA_BREAKER_LIMIT = "60%"; - public static final double DEFAULT_FIELDDATA_OVERHEAD_CONSTANT = 1.03; + public static final Setting<ByteSizeValue> REQUEST_CIRCUIT_BREAKER_LIMIT_SETTING = Setting.byteSizeSetting("indices.breaker.request.limit", "40%", true, Setting.Scope.CLUSTER); + public static final Setting<Double> REQUEST_CIRCUIT_BREAKER_OVERHEAD_SETTING = Setting.doubleSetting("indices.breaker.request.overhead", 1.0d, 0.0d, true, Setting.Scope.CLUSTER); + public static final Setting<CircuitBreaker.Type> REQUEST_CIRCUIT_BREAKER_TYPE_SETTING = new Setting<>("indices.breaker.request.type", "memory", CircuitBreaker.Type::parseValue, false, Setting.Scope.CLUSTER); - public static final String REQUEST_CIRCUIT_BREAKER_LIMIT_SETTING = "indices.breaker.request.limit"; - public static final String REQUEST_CIRCUIT_BREAKER_OVERHEAD_SETTING = "indices.breaker.request.overhead"; - public static final String REQUEST_CIRCUIT_BREAKER_TYPE_SETTING = "indices.breaker.request.type"; - public static final String DEFAULT_REQUEST_BREAKER_LIMIT = "40%"; - public static final String DEFAULT_BREAKER_TYPE = "memory"; private volatile BreakerSettings parentSettings; private volatile BreakerSettings fielddataSettings; @@ -73,41 +66,21 @@ public class HierarchyCircuitBreakerService extends CircuitBreakerService { private final AtomicLong parentTripCount = new AtomicLong(0); @Inject - public HierarchyCircuitBreakerService(Settings settings, NodeSettingsService nodeSettingsService) { + public HierarchyCircuitBreakerService(Settings settings, ClusterSettings clusterSettings) { super(settings); - - // This uses the old InternalCircuitBreakerService.CIRCUIT_BREAKER_MAX_BYTES_SETTING - // setting to keep backwards compatibility with 1.3, it can be safely - // removed when compatibility with 1.3 is no longer needed - String compatibilityFielddataLimitDefault = DEFAULT_FIELDDATA_BREAKER_LIMIT; - ByteSizeValue compatibilityFielddataLimit = settings.getAsMemory(OLD_CIRCUIT_BREAKER_MAX_BYTES_SETTING, null); - if (compatibilityFielddataLimit != null) { - compatibilityFielddataLimitDefault = compatibilityFielddataLimit.toString(); - } - - // This uses the old InternalCircuitBreakerService.CIRCUIT_BREAKER_OVERHEAD_SETTING - // setting to keep backwards compatibility with 1.3, it can be safely - // removed when compatibility with 1.3 is no longer needed - double compatibilityFielddataOverheadDefault = DEFAULT_FIELDDATA_OVERHEAD_CONSTANT; - Double compatibilityFielddataOverhead = settings.getAsDouble(OLD_CIRCUIT_BREAKER_OVERHEAD_SETTING, null); - if (compatibilityFielddataOverhead != null) { - compatibilityFielddataOverheadDefault = compatibilityFielddataOverhead; - } - this.fielddataSettings = new BreakerSettings(CircuitBreaker.FIELDDATA, - settings.getAsMemory(FIELDDATA_CIRCUIT_BREAKER_LIMIT_SETTING, compatibilityFielddataLimitDefault).bytes(), - settings.getAsDouble(FIELDDATA_CIRCUIT_BREAKER_OVERHEAD_SETTING, compatibilityFielddataOverheadDefault), - CircuitBreaker.Type.parseValue(settings.get(FIELDDATA_CIRCUIT_BREAKER_TYPE_SETTING, DEFAULT_BREAKER_TYPE)) + FIELDDATA_CIRCUIT_BREAKER_LIMIT_SETTING.get(settings).bytes(), + FIELDDATA_CIRCUIT_BREAKER_OVERHEAD_SETTING.get(settings), + FIELDDATA_CIRCUIT_BREAKER_TYPE_SETTING.get(settings) ); this.requestSettings = new BreakerSettings(CircuitBreaker.REQUEST, - settings.getAsMemory(REQUEST_CIRCUIT_BREAKER_LIMIT_SETTING, DEFAULT_REQUEST_BREAKER_LIMIT).bytes(), - settings.getAsDouble(REQUEST_CIRCUIT_BREAKER_OVERHEAD_SETTING, 1.0), - CircuitBreaker.Type.parseValue(settings.get(REQUEST_CIRCUIT_BREAKER_TYPE_SETTING, DEFAULT_BREAKER_TYPE)) + REQUEST_CIRCUIT_BREAKER_LIMIT_SETTING.get(settings).bytes(), + REQUEST_CIRCUIT_BREAKER_OVERHEAD_SETTING.get(settings), + REQUEST_CIRCUIT_BREAKER_TYPE_SETTING.get(settings) ); - this.parentSettings = new BreakerSettings(CircuitBreaker.PARENT, - settings.getAsMemory(TOTAL_CIRCUIT_BREAKER_LIMIT_SETTING, DEFAULT_TOTAL_CIRCUIT_BREAKER_LIMIT).bytes(), 1.0, CircuitBreaker.Type.PARENT); + this.parentSettings = new BreakerSettings(CircuitBreaker.PARENT, TOTAL_CIRCUIT_BREAKER_LIMIT_SETTING.get(settings).bytes(), 1.0, CircuitBreaker.Type.PARENT); if (logger.isTraceEnabled()) { logger.trace("parent circuit breaker with settings {}", this.parentSettings); } @@ -115,52 +88,38 @@ public class HierarchyCircuitBreakerService extends CircuitBreakerService { registerBreaker(this.requestSettings); registerBreaker(this.fielddataSettings); - nodeSettingsService.addListener(new ApplySettings()); + clusterSettings.addSettingsUpdateConsumer(TOTAL_CIRCUIT_BREAKER_LIMIT_SETTING, this::setTotalCircuitBreakerLimit, this::validateTotalCircuitBreakerLimit); + clusterSettings.addSettingsUpdateConsumer(FIELDDATA_CIRCUIT_BREAKER_LIMIT_SETTING, FIELDDATA_CIRCUIT_BREAKER_OVERHEAD_SETTING, this::setFieldDataBreakerLimit); + clusterSettings.addSettingsUpdateConsumer(REQUEST_CIRCUIT_BREAKER_LIMIT_SETTING, REQUEST_CIRCUIT_BREAKER_OVERHEAD_SETTING, this::setRequestBreakerLimit); + } + private void setRequestBreakerLimit(ByteSizeValue newRequestMax, Double newRequestOverhead) { + BreakerSettings newRequestSettings = new BreakerSettings(CircuitBreaker.REQUEST, newRequestMax.bytes(), newRequestOverhead, + HierarchyCircuitBreakerService.this.requestSettings.getType()); + registerBreaker(newRequestSettings); + HierarchyCircuitBreakerService.this.requestSettings = newRequestSettings; + logger.info("Updated breaker settings request: {}", newRequestSettings); } - public class ApplySettings implements NodeSettingsService.Listener { - - @Override - public void onRefreshSettings(Settings settings) { - - // Fielddata settings - ByteSizeValue newFielddataMax = settings.getAsMemory(FIELDDATA_CIRCUIT_BREAKER_LIMIT_SETTING, null); - Double newFielddataOverhead = settings.getAsDouble(FIELDDATA_CIRCUIT_BREAKER_OVERHEAD_SETTING, null); - if (newFielddataMax != null || newFielddataOverhead != null) { - long newFielddataLimitBytes = newFielddataMax == null ? HierarchyCircuitBreakerService.this.fielddataSettings.getLimit() : newFielddataMax.bytes(); - newFielddataOverhead = newFielddataOverhead == null ? HierarchyCircuitBreakerService.this.fielddataSettings.getOverhead() : newFielddataOverhead; + private void setFieldDataBreakerLimit(ByteSizeValue newFielddataMax, Double newFielddataOverhead) { + long newFielddataLimitBytes = newFielddataMax == null ? HierarchyCircuitBreakerService.this.fielddataSettings.getLimit() : newFielddataMax.bytes(); + newFielddataOverhead = newFielddataOverhead == null ? HierarchyCircuitBreakerService.this.fielddataSettings.getOverhead() : newFielddataOverhead; + BreakerSettings newFielddataSettings = new BreakerSettings(CircuitBreaker.FIELDDATA, newFielddataLimitBytes, newFielddataOverhead, + HierarchyCircuitBreakerService.this.fielddataSettings.getType()); + registerBreaker(newFielddataSettings); + HierarchyCircuitBreakerService.this.fielddataSettings = newFielddataSettings; + logger.info("Updated breaker settings field data: {}", newFielddataSettings); - BreakerSettings newFielddataSettings = new BreakerSettings(CircuitBreaker.FIELDDATA, newFielddataLimitBytes, newFielddataOverhead, - HierarchyCircuitBreakerService.this.fielddataSettings.getType()); - registerBreaker(newFielddataSettings); - HierarchyCircuitBreakerService.this.fielddataSettings = newFielddataSettings; - logger.info("Updated breaker settings fielddata: {}", newFielddataSettings); - } + } - // Request settings - ByteSizeValue newRequestMax = settings.getAsMemory(REQUEST_CIRCUIT_BREAKER_LIMIT_SETTING, null); - Double newRequestOverhead = settings.getAsDouble(REQUEST_CIRCUIT_BREAKER_OVERHEAD_SETTING, null); - if (newRequestMax != null || newRequestOverhead != null) { - long newRequestLimitBytes = newRequestMax == null ? HierarchyCircuitBreakerService.this.requestSettings.getLimit() : newRequestMax.bytes(); - newRequestOverhead = newRequestOverhead == null ? HierarchyCircuitBreakerService.this.requestSettings.getOverhead() : newRequestOverhead; - - BreakerSettings newRequestSettings = new BreakerSettings(CircuitBreaker.REQUEST, newRequestLimitBytes, newRequestOverhead, - HierarchyCircuitBreakerService.this.requestSettings.getType()); - registerBreaker(newRequestSettings); - HierarchyCircuitBreakerService.this.requestSettings = newRequestSettings; - logger.info("Updated breaker settings request: {}", newRequestSettings); - } + private boolean validateTotalCircuitBreakerLimit(ByteSizeValue byteSizeValue) { + BreakerSettings newParentSettings = new BreakerSettings(CircuitBreaker.PARENT, byteSizeValue.bytes(), 1.0, CircuitBreaker.Type.PARENT); + validateSettings(new BreakerSettings[]{newParentSettings}); + return true; + } - // Parent settings - long oldParentMax = HierarchyCircuitBreakerService.this.parentSettings.getLimit(); - ByteSizeValue newParentMax = settings.getAsMemory(TOTAL_CIRCUIT_BREAKER_LIMIT_SETTING, null); - if (newParentMax != null && (newParentMax.bytes() != oldParentMax)) { - BreakerSettings newParentSettings = new BreakerSettings(CircuitBreaker.PARENT, newParentMax.bytes(), 1.0, CircuitBreaker.Type.PARENT); - validateSettings(new BreakerSettings[]{newParentSettings}); - HierarchyCircuitBreakerService.this.parentSettings = newParentSettings; - logger.info("Updated breaker settings parent: {}", newParentSettings); - } - } + private void setTotalCircuitBreakerLimit(ByteSizeValue byteSizeValue) { + BreakerSettings newParentSettings = new BreakerSettings(CircuitBreaker.PARENT, byteSizeValue.bytes(), 1.0, CircuitBreaker.Type.PARENT); + this.parentSettings = newParentSettings; } /** diff --git a/core/src/main/java/org/elasticsearch/indices/flush/ShardsSyncedFlushResult.java b/core/src/main/java/org/elasticsearch/indices/flush/ShardsSyncedFlushResult.java index f7ae5f94b9..220ce9120e 100644 --- a/core/src/main/java/org/elasticsearch/indices/flush/ShardsSyncedFlushResult.java +++ b/core/src/main/java/org/elasticsearch/indices/flush/ShardsSyncedFlushResult.java @@ -19,8 +19,12 @@ package org.elasticsearch.indices.flush; import org.elasticsearch.cluster.routing.ShardRouting; +import org.elasticsearch.common.io.stream.StreamInput; +import org.elasticsearch.common.io.stream.StreamOutput; +import org.elasticsearch.common.io.stream.Streamable; import org.elasticsearch.index.shard.ShardId; +import java.io.IOException; import java.util.HashMap; import java.util.Map; @@ -30,15 +34,15 @@ import static java.util.Collections.unmodifiableMap; /** * Result for all copies of a shard */ -public class ShardsSyncedFlushResult { +public class ShardsSyncedFlushResult implements Streamable { private String failureReason; - private Map<ShardRouting, SyncedFlushService.SyncedFlushResponse> shardResponses; + private Map<ShardRouting, SyncedFlushService.ShardSyncedFlushResponse> shardResponses; private String syncId; private ShardId shardId; // some shards may be unassigned, so we need this as state private int totalShards; - public ShardsSyncedFlushResult() { + private ShardsSyncedFlushResult() { } public ShardId getShardId() { @@ -59,7 +63,7 @@ public class ShardsSyncedFlushResult { /** * success constructor */ - public ShardsSyncedFlushResult(ShardId shardId, String syncId, int totalShards, Map<ShardRouting, SyncedFlushService.SyncedFlushResponse> shardResponses) { + public ShardsSyncedFlushResult(ShardId shardId, String syncId, int totalShards, Map<ShardRouting, SyncedFlushService.ShardSyncedFlushResponse> shardResponses) { this.failureReason = null; this.shardResponses = unmodifiableMap(new HashMap<>(shardResponses)); this.syncId = syncId; @@ -98,7 +102,7 @@ public class ShardsSyncedFlushResult { */ public int successfulShards() { int i = 0; - for (SyncedFlushService.SyncedFlushResponse result : shardResponses.values()) { + for (SyncedFlushService.ShardSyncedFlushResponse result : shardResponses.values()) { if (result.success()) { i++; } @@ -109,9 +113,9 @@ public class ShardsSyncedFlushResult { /** * @return an array of shard failures */ - public Map<ShardRouting, SyncedFlushService.SyncedFlushResponse> failedShards() { - Map<ShardRouting, SyncedFlushService.SyncedFlushResponse> failures = new HashMap<>(); - for (Map.Entry<ShardRouting, SyncedFlushService.SyncedFlushResponse> result : shardResponses.entrySet()) { + public Map<ShardRouting, SyncedFlushService.ShardSyncedFlushResponse> failedShards() { + Map<ShardRouting, SyncedFlushService.ShardSyncedFlushResponse> failures = new HashMap<>(); + for (Map.Entry<ShardRouting, SyncedFlushService.ShardSyncedFlushResponse> result : shardResponses.entrySet()) { if (result.getValue().success() == false) { failures.put(result.getKey(), result.getValue()); } @@ -123,11 +127,45 @@ public class ShardsSyncedFlushResult { * @return Individual responses for each shard copy with a detailed failure message if the copy failed to perform the synced flush. * Empty if synced flush failed before step three. */ - public Map<ShardRouting, SyncedFlushService.SyncedFlushResponse> shardResponses() { + public Map<ShardRouting, SyncedFlushService.ShardSyncedFlushResponse> shardResponses() { return shardResponses; } public ShardId shardId() { return shardId; } + + @Override + public void readFrom(StreamInput in) throws IOException { + failureReason = in.readOptionalString(); + int numResponses = in.readInt(); + shardResponses = new HashMap<>(); + for (int i = 0; i < numResponses; i++) { + ShardRouting shardRouting = ShardRouting.readShardRoutingEntry(in); + SyncedFlushService.ShardSyncedFlushResponse response = SyncedFlushService.ShardSyncedFlushResponse.readSyncedFlushResponse(in); + shardResponses.put(shardRouting, response); + } + syncId = in.readOptionalString(); + shardId = ShardId.readShardId(in); + totalShards = in.readInt(); + } + + @Override + public void writeTo(StreamOutput out) throws IOException { + out.writeOptionalString(failureReason); + out.writeInt(shardResponses.size()); + for (Map.Entry<ShardRouting, SyncedFlushService.ShardSyncedFlushResponse> entry : shardResponses.entrySet()) { + entry.getKey().writeTo(out); + entry.getValue().writeTo(out); + } + out.writeOptionalString(syncId); + shardId.writeTo(out); + out.writeInt(totalShards); + } + + public static ShardsSyncedFlushResult readShardsSyncedFlushResult(StreamInput in) throws IOException { + ShardsSyncedFlushResult shardsSyncedFlushResult = new ShardsSyncedFlushResult(); + shardsSyncedFlushResult.readFrom(in); + return shardsSyncedFlushResult; + } } diff --git a/core/src/main/java/org/elasticsearch/indices/flush/SyncedFlushService.java b/core/src/main/java/org/elasticsearch/indices/flush/SyncedFlushService.java index ad264c2ac0..0918ad2afe 100644 --- a/core/src/main/java/org/elasticsearch/indices/flush/SyncedFlushService.java +++ b/core/src/main/java/org/elasticsearch/indices/flush/SyncedFlushService.java @@ -21,6 +21,7 @@ package org.elasticsearch.indices.flush; import org.elasticsearch.ElasticsearchException; import org.elasticsearch.action.ActionListener; import org.elasticsearch.action.admin.indices.flush.FlushRequest; +import org.elasticsearch.action.admin.indices.flush.SyncedFlushResponse; import org.elasticsearch.action.support.IndicesOptions; import org.elasticsearch.cluster.ClusterService; import org.elasticsearch.cluster.ClusterState; @@ -81,9 +82,8 @@ public class SyncedFlushService extends AbstractComponent implements IndexEventL this.clusterService = clusterService; this.transportService = transportService; this.indexNameExpressionResolver = indexNameExpressionResolver; - - transportService.registerRequestHandler(PRE_SYNCED_FLUSH_ACTION_NAME, PreSyncedFlushRequest::new, ThreadPool.Names.FLUSH, new PreSyncedFlushTransportHandler()); - transportService.registerRequestHandler(SYNCED_FLUSH_ACTION_NAME, SyncedFlushRequest::new, ThreadPool.Names.FLUSH, new SyncedFlushTransportHandler()); + transportService.registerRequestHandler(PRE_SYNCED_FLUSH_ACTION_NAME, PreShardSyncedFlushRequest::new, ThreadPool.Names.FLUSH, new PreSyncedFlushTransportHandler()); + transportService.registerRequestHandler(SYNCED_FLUSH_ACTION_NAME, ShardSyncedFlushRequest::new, ThreadPool.Names.FLUSH, new SyncedFlushTransportHandler()); transportService.registerRequestHandler(IN_FLIGHT_OPS_ACTION_NAME, InFlightOpsRequest::new, ThreadPool.Names.SAME, new InFlightOpCountTransportHandler()); } @@ -109,7 +109,7 @@ public class SyncedFlushService extends AbstractComponent implements IndexEventL * a utility method to perform a synced flush for all shards of multiple indices. see {@link #attemptSyncedFlush(ShardId, ActionListener)} * for more details. */ - public void attemptSyncedFlush(final String[] aliasesOrIndices, IndicesOptions indicesOptions, final ActionListener<IndicesSyncedFlushResult> listener) { + public void attemptSyncedFlush(final String[] aliasesOrIndices, IndicesOptions indicesOptions, final ActionListener<SyncedFlushResponse> listener) { final ClusterState state = clusterService.state(); final String[] concreteIndices = indexNameExpressionResolver.concreteIndices(state, indicesOptions, aliasesOrIndices); final Map<String, List<ShardsSyncedFlushResult>> results = ConcurrentCollections.newConcurrentMap(); @@ -123,7 +123,7 @@ public class SyncedFlushService extends AbstractComponent implements IndexEventL } if (numberOfShards == 0) { - listener.onResponse(new IndicesSyncedFlushResult(results)); + listener.onResponse(new SyncedFlushResponse(results)); return; } final int finalTotalNumberOfShards = totalNumberOfShards; @@ -138,7 +138,7 @@ public class SyncedFlushService extends AbstractComponent implements IndexEventL public void onResponse(ShardsSyncedFlushResult syncedFlushResult) { results.get(index).add(syncedFlushResult); if (countDown.countDown()) { - listener.onResponse(new IndicesSyncedFlushResult(results)); + listener.onResponse(new SyncedFlushResponse(results)); } } @@ -147,7 +147,7 @@ public class SyncedFlushService extends AbstractComponent implements IndexEventL logger.debug("{} unexpected error while executing synced flush", shardId); results.get(index).add(new ShardsSyncedFlushResult(shardId, finalTotalNumberOfShards, e.getMessage())); if (countDown.countDown()) { - listener.onResponse(new IndicesSyncedFlushResult(results)); + listener.onResponse(new SyncedFlushResponse(results)); } } }); @@ -297,33 +297,33 @@ public class SyncedFlushService extends AbstractComponent implements IndexEventL void sendSyncRequests(final String syncId, final List<ShardRouting> shards, ClusterState state, Map<String, Engine.CommitId> expectedCommitIds, final ShardId shardId, final int totalShards, final ActionListener<ShardsSyncedFlushResult> listener) { final CountDown countDown = new CountDown(shards.size()); - final Map<ShardRouting, SyncedFlushResponse> results = ConcurrentCollections.newConcurrentMap(); + final Map<ShardRouting, ShardSyncedFlushResponse> results = ConcurrentCollections.newConcurrentMap(); for (final ShardRouting shard : shards) { final DiscoveryNode node = state.nodes().get(shard.currentNodeId()); if (node == null) { logger.trace("{} is assigned to an unknown node. skipping for sync id [{}]. shard routing {}", shardId, syncId, shard); - results.put(shard, new SyncedFlushResponse("unknown node")); + results.put(shard, new ShardSyncedFlushResponse("unknown node")); contDownAndSendResponseIfDone(syncId, shards, shardId, totalShards, listener, countDown, results); continue; } final Engine.CommitId expectedCommitId = expectedCommitIds.get(shard.currentNodeId()); if (expectedCommitId == null) { logger.trace("{} can't resolve expected commit id for {}, skipping for sync id [{}]. shard routing {}", shardId, syncId, shard); - results.put(shard, new SyncedFlushResponse("no commit id from pre-sync flush")); + results.put(shard, new ShardSyncedFlushResponse("no commit id from pre-sync flush")); contDownAndSendResponseIfDone(syncId, shards, shardId, totalShards, listener, countDown, results); continue; } logger.trace("{} sending synced flush request to {}. sync id [{}].", shardId, shard, syncId); - transportService.sendRequest(node, SYNCED_FLUSH_ACTION_NAME, new SyncedFlushRequest(shard.shardId(), syncId, expectedCommitId), - new BaseTransportResponseHandler<SyncedFlushResponse>() { + transportService.sendRequest(node, SYNCED_FLUSH_ACTION_NAME, new ShardSyncedFlushRequest(shard.shardId(), syncId, expectedCommitId), + new BaseTransportResponseHandler<ShardSyncedFlushResponse>() { @Override - public SyncedFlushResponse newInstance() { - return new SyncedFlushResponse(); + public ShardSyncedFlushResponse newInstance() { + return new ShardSyncedFlushResponse(); } @Override - public void handleResponse(SyncedFlushResponse response) { - SyncedFlushResponse existing = results.put(shard, response); + public void handleResponse(ShardSyncedFlushResponse response) { + ShardSyncedFlushResponse existing = results.put(shard, response); assert existing == null : "got two answers for node [" + node + "]"; // count after the assert so we won't decrement twice in handleException contDownAndSendResponseIfDone(syncId, shards, shardId, totalShards, listener, countDown, results); @@ -332,7 +332,7 @@ public class SyncedFlushService extends AbstractComponent implements IndexEventL @Override public void handleException(TransportException exp) { logger.trace("{} error while performing synced flush on [{}], skipping", exp, shardId, shard); - results.put(shard, new SyncedFlushResponse(exp.getMessage())); + results.put(shard, new ShardSyncedFlushResponse(exp.getMessage())); contDownAndSendResponseIfDone(syncId, shards, shardId, totalShards, listener, countDown, results); } @@ -346,7 +346,7 @@ public class SyncedFlushService extends AbstractComponent implements IndexEventL } private void contDownAndSendResponseIfDone(String syncId, List<ShardRouting> shards, ShardId shardId, int totalShards, - ActionListener<ShardsSyncedFlushResult> listener, CountDown countDown, Map<ShardRouting, SyncedFlushResponse> results) { + ActionListener<ShardsSyncedFlushResult> listener, CountDown countDown, Map<ShardRouting, ShardSyncedFlushResponse> results) { if (countDown.countDown()) { assert results.size() == shards.size(); listener.onResponse(new ShardsSyncedFlushResult(shardId, syncId, totalShards, results)); @@ -369,7 +369,7 @@ public class SyncedFlushService extends AbstractComponent implements IndexEventL } continue; } - transportService.sendRequest(node, PRE_SYNCED_FLUSH_ACTION_NAME, new PreSyncedFlushRequest(shard.shardId()), new BaseTransportResponseHandler<PreSyncedFlushResponse>() { + transportService.sendRequest(node, PRE_SYNCED_FLUSH_ACTION_NAME, new PreShardSyncedFlushRequest(shard.shardId()), new BaseTransportResponseHandler<PreSyncedFlushResponse>() { @Override public PreSyncedFlushResponse newInstance() { return new PreSyncedFlushResponse(); @@ -401,7 +401,7 @@ public class SyncedFlushService extends AbstractComponent implements IndexEventL } } - private PreSyncedFlushResponse performPreSyncedFlush(PreSyncedFlushRequest request) { + private PreSyncedFlushResponse performPreSyncedFlush(PreShardSyncedFlushRequest request) { IndexShard indexShard = indicesService.indexServiceSafe(request.shardId().getIndex()).getShard(request.shardId().id()); FlushRequest flushRequest = new FlushRequest().force(false).waitIfOngoing(true); logger.trace("{} performing pre sync flush", request.shardId()); @@ -410,7 +410,7 @@ public class SyncedFlushService extends AbstractComponent implements IndexEventL return new PreSyncedFlushResponse(commitId); } - private SyncedFlushResponse performSyncedFlush(SyncedFlushRequest request) { + private ShardSyncedFlushResponse performSyncedFlush(ShardSyncedFlushRequest request) { IndexService indexService = indicesService.indexServiceSafe(request.shardId().getIndex()); IndexShard indexShard = indexService.getShard(request.shardId().id()); logger.trace("{} performing sync flush. sync id [{}], expected commit id {}", request.shardId(), request.syncId(), request.expectedCommitId()); @@ -418,11 +418,11 @@ public class SyncedFlushService extends AbstractComponent implements IndexEventL logger.trace("{} sync flush done. sync id [{}], result [{}]", request.shardId(), request.syncId(), result); switch (result) { case SUCCESS: - return new SyncedFlushResponse(); + return new ShardSyncedFlushResponse(); case COMMIT_MISMATCH: - return new SyncedFlushResponse("commit has changed"); + return new ShardSyncedFlushResponse("commit has changed"); case PENDING_OPERATIONS: - return new SyncedFlushResponse("pending operations"); + return new ShardSyncedFlushResponse("pending operations"); default: throw new ElasticsearchException("unknown synced flush result [" + result + "]"); } @@ -439,19 +439,19 @@ public class SyncedFlushService extends AbstractComponent implements IndexEventL return new InFlightOpsResponse(opCount); } - public final static class PreSyncedFlushRequest extends TransportRequest { + public final static class PreShardSyncedFlushRequest extends TransportRequest { private ShardId shardId; - public PreSyncedFlushRequest() { + public PreShardSyncedFlushRequest() { } - public PreSyncedFlushRequest(ShardId shardId) { + public PreShardSyncedFlushRequest(ShardId shardId) { this.shardId = shardId; } @Override public String toString() { - return "PreSyncedFlushRequest{" + + return "PreShardSyncedFlushRequest{" + "shardId=" + shardId + '}'; } @@ -504,16 +504,16 @@ public class SyncedFlushService extends AbstractComponent implements IndexEventL } } - public static final class SyncedFlushRequest extends TransportRequest { + public static final class ShardSyncedFlushRequest extends TransportRequest { private String syncId; private Engine.CommitId expectedCommitId; private ShardId shardId; - public SyncedFlushRequest() { + public ShardSyncedFlushRequest() { } - public SyncedFlushRequest(ShardId shardId, String syncId, Engine.CommitId expectedCommitId) { + public ShardSyncedFlushRequest(ShardId shardId, String syncId, Engine.CommitId expectedCommitId) { this.expectedCommitId = expectedCommitId; this.shardId = shardId; this.syncId = syncId; @@ -549,7 +549,7 @@ public class SyncedFlushService extends AbstractComponent implements IndexEventL @Override public String toString() { - return "SyncedFlushRequest{" + + return "ShardSyncedFlushRequest{" + "shardId=" + shardId + ",syncId='" + syncId + '\'' + '}'; @@ -559,18 +559,18 @@ public class SyncedFlushService extends AbstractComponent implements IndexEventL /** * Response for third step of synced flush (writing the sync id) for one shard copy */ - public static final class SyncedFlushResponse extends TransportResponse { + public static final class ShardSyncedFlushResponse extends TransportResponse { /** * a non null value indicates a failure to sync flush. null means success */ String failureReason; - public SyncedFlushResponse() { + public ShardSyncedFlushResponse() { failureReason = null; } - public SyncedFlushResponse(String failureReason) { + public ShardSyncedFlushResponse(String failureReason) { this.failureReason = failureReason; } @@ -596,11 +596,17 @@ public class SyncedFlushService extends AbstractComponent implements IndexEventL @Override public String toString() { - return "SyncedFlushResponse{" + + return "ShardSyncedFlushResponse{" + "success=" + success() + ", failureReason='" + failureReason + '\'' + '}'; } + + public static ShardSyncedFlushResponse readSyncedFlushResponse(StreamInput in) throws IOException { + ShardSyncedFlushResponse shardSyncedFlushResponse = new ShardSyncedFlushResponse(); + shardSyncedFlushResponse.readFrom(in); + return shardSyncedFlushResponse; + } } @@ -677,18 +683,18 @@ public class SyncedFlushService extends AbstractComponent implements IndexEventL } } - private final class PreSyncedFlushTransportHandler implements TransportRequestHandler<PreSyncedFlushRequest> { + private final class PreSyncedFlushTransportHandler implements TransportRequestHandler<PreShardSyncedFlushRequest> { @Override - public void messageReceived(PreSyncedFlushRequest request, TransportChannel channel) throws Exception { + public void messageReceived(PreShardSyncedFlushRequest request, TransportChannel channel) throws Exception { channel.sendResponse(performPreSyncedFlush(request)); } } - private final class SyncedFlushTransportHandler implements TransportRequestHandler<SyncedFlushRequest> { + private final class SyncedFlushTransportHandler implements TransportRequestHandler<ShardSyncedFlushRequest> { @Override - public void messageReceived(SyncedFlushRequest request, TransportChannel channel) throws Exception { + public void messageReceived(ShardSyncedFlushRequest request, TransportChannel channel) throws Exception { channel.sendResponse(performSyncedFlush(request)); } } diff --git a/core/src/main/java/org/elasticsearch/indices/query/IndicesQueriesRegistry.java b/core/src/main/java/org/elasticsearch/indices/query/IndicesQueriesRegistry.java index 0cec415d63..08b7b34e91 100644 --- a/core/src/main/java/org/elasticsearch/indices/query/IndicesQueriesRegistry.java +++ b/core/src/main/java/org/elasticsearch/indices/query/IndicesQueriesRegistry.java @@ -21,6 +21,7 @@ package org.elasticsearch.indices.query; import org.elasticsearch.common.component.AbstractComponent; import org.elasticsearch.common.inject.Inject; +import org.elasticsearch.common.io.stream.NamedWriteable; import org.elasticsearch.common.io.stream.NamedWriteableRegistry; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.index.query.EmptyQueryBuilder; @@ -40,11 +41,12 @@ public class IndicesQueriesRegistry extends AbstractComponent { public IndicesQueriesRegistry(Settings settings, Set<QueryParser> injectedQueryParsers, NamedWriteableRegistry namedWriteableRegistry) { super(settings); Map<String, QueryParser<?>> queryParsers = new HashMap<>(); - for (QueryParser<?> queryParser : injectedQueryParsers) { + for (@SuppressWarnings("unchecked") QueryParser<? extends QueryBuilder> queryParser : injectedQueryParsers) { for (String name : queryParser.names()) { queryParsers.put(name, queryParser); } - namedWriteableRegistry.registerPrototype(QueryBuilder.class, queryParser.getBuilderPrototype()); + @SuppressWarnings("unchecked") NamedWriteable<? extends QueryBuilder> qb = queryParser.getBuilderPrototype(); + namedWriteableRegistry.registerPrototype(QueryBuilder.class, qb); } // EmptyQueryBuilder is not registered as query parser but used internally. // We need to register it with the NamedWriteableRegistry in order to serialize it @@ -58,4 +60,4 @@ public class IndicesQueriesRegistry extends AbstractComponent { public Map<String, QueryParser<?>> queryParsers() { return queryParsers; } -}
\ No newline at end of file +} diff --git a/core/src/main/java/org/elasticsearch/indices/recovery/RecoverySettings.java b/core/src/main/java/org/elasticsearch/indices/recovery/RecoverySettings.java index 6db38d59e8..682b66e084 100644 --- a/core/src/main/java/org/elasticsearch/indices/recovery/RecoverySettings.java +++ b/core/src/main/java/org/elasticsearch/indices/recovery/RecoverySettings.java @@ -23,16 +23,16 @@ import org.apache.lucene.store.RateLimiter; import org.apache.lucene.store.RateLimiter.SimpleRateLimiter; import org.elasticsearch.common.component.AbstractComponent; import org.elasticsearch.common.inject.Inject; +import org.elasticsearch.common.settings.ClusterSettings; +import org.elasticsearch.common.settings.Setting; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.unit.ByteSizeUnit; import org.elasticsearch.common.unit.ByteSizeValue; import org.elasticsearch.common.unit.TimeValue; import org.elasticsearch.common.util.concurrent.EsExecutors; -import org.elasticsearch.node.settings.NodeSettingsService; import org.elasticsearch.threadpool.ThreadPool; import java.io.Closeable; -import java.util.Objects; import java.util.concurrent.ThreadPoolExecutor; import java.util.concurrent.TimeUnit; @@ -40,34 +40,33 @@ import java.util.concurrent.TimeUnit; */ public class RecoverySettings extends AbstractComponent implements Closeable { - public static final String INDICES_RECOVERY_CONCURRENT_STREAMS = "indices.recovery.concurrent_streams"; - public static final String INDICES_RECOVERY_CONCURRENT_SMALL_FILE_STREAMS = "indices.recovery.concurrent_small_file_streams"; - public static final String INDICES_RECOVERY_MAX_BYTES_PER_SEC = "indices.recovery.max_bytes_per_sec"; + public static final Setting<Integer> INDICES_RECOVERY_CONCURRENT_STREAMS_SETTING = Setting.intSetting("indices.recovery.concurrent_streams", 3, true, Setting.Scope.CLUSTER); + public static final Setting<Integer> INDICES_RECOVERY_CONCURRENT_SMALL_FILE_STREAMS_SETTING = Setting.intSetting("indices.recovery.concurrent_small_file_streams", 2, true, Setting.Scope.CLUSTER); + public static final Setting<ByteSizeValue> INDICES_RECOVERY_MAX_BYTES_PER_SEC_SETTING = Setting.byteSizeSetting("indices.recovery.max_bytes_per_sec", new ByteSizeValue(40, ByteSizeUnit.MB), true, Setting.Scope.CLUSTER); /** * how long to wait before retrying after issues cause by cluster state syncing between nodes * i.e., local node is not yet known on remote node, remote shard not yet started etc. */ - public static final String INDICES_RECOVERY_RETRY_DELAY_STATE_SYNC = "indices.recovery.retry_delay_state_sync"; + public static final Setting<TimeValue> INDICES_RECOVERY_RETRY_DELAY_STATE_SYNC_SETTING = Setting.positiveTimeSetting("indices.recovery.retry_delay_state_sync", TimeValue.timeValueMillis(500), true, Setting.Scope.CLUSTER); /** how long to wait before retrying after network related issues */ - public static final String INDICES_RECOVERY_RETRY_DELAY_NETWORK = "indices.recovery.retry_delay_network"; - - /** - * recoveries that don't show any activity for more then this interval will be failed. - * defaults to `indices.recovery.internal_action_long_timeout` - */ - public static final String INDICES_RECOVERY_ACTIVITY_TIMEOUT = "indices.recovery.recovery_activity_timeout"; + public static final Setting<TimeValue> INDICES_RECOVERY_RETRY_DELAY_NETWORK_SETTING = Setting.positiveTimeSetting("indices.recovery.retry_delay_network", TimeValue.timeValueSeconds(5), true, Setting.Scope.CLUSTER); /** timeout value to use for requests made as part of the recovery process */ - public static final String INDICES_RECOVERY_INTERNAL_ACTION_TIMEOUT = "indices.recovery.internal_action_timeout"; + public static final Setting<TimeValue> INDICES_RECOVERY_INTERNAL_ACTION_TIMEOUT_SETTING = Setting.positiveTimeSetting("indices.recovery.internal_action_timeout", TimeValue.timeValueMinutes(15), true, Setting.Scope.CLUSTER); /** * timeout value to use for requests made as part of the recovery process that are expected to take long time. * defaults to twice `indices.recovery.internal_action_timeout`. */ - public static final String INDICES_RECOVERY_INTERNAL_LONG_ACTION_TIMEOUT = "indices.recovery.internal_action_long_timeout"; + public static final Setting<TimeValue> INDICES_RECOVERY_INTERNAL_LONG_ACTION_TIMEOUT_SETTING = Setting.timeSetting("indices.recovery.internal_action_long_timeout", (s) -> TimeValue.timeValueMillis(INDICES_RECOVERY_INTERNAL_ACTION_TIMEOUT_SETTING.get(s).millis() * 2).toString(), TimeValue.timeValueSeconds(0), true, Setting.Scope.CLUSTER); + /** + * recoveries that don't show any activity for more then this interval will be failed. + * defaults to `indices.recovery.internal_action_long_timeout` + */ + public static final Setting<TimeValue> INDICES_RECOVERY_ACTIVITY_TIMEOUT_SETTING = Setting.timeSetting("indices.recovery.recovery_activity_timeout", (s) -> INDICES_RECOVERY_INTERNAL_LONG_ACTION_TIMEOUT_SETTING.getRaw(s) , TimeValue.timeValueSeconds(0), true, Setting.Scope.CLUSTER); public static final long SMALL_FILE_CUTOFF_BYTES = ByteSizeValue.parseBytesSizeValue("5mb", "SMALL_FILE_CUTOFF_BYTES").bytes(); @@ -89,31 +88,28 @@ public class RecoverySettings extends AbstractComponent implements Closeable { private volatile ByteSizeValue chunkSize = DEFAULT_CHUNK_SIZE; @Inject - public RecoverySettings(Settings settings, NodeSettingsService nodeSettingsService) { + public RecoverySettings(Settings settings, ClusterSettings clusterSettings) { super(settings); - this.retryDelayStateSync = settings.getAsTime(INDICES_RECOVERY_RETRY_DELAY_STATE_SYNC, TimeValue.timeValueMillis(500)); + this.retryDelayStateSync = INDICES_RECOVERY_RETRY_DELAY_STATE_SYNC_SETTING.get(settings); // doesn't have to be fast as nodes are reconnected every 10s by default (see InternalClusterService.ReconnectToNodes) // and we want to give the master time to remove a faulty node - this.retryDelayNetwork = settings.getAsTime(INDICES_RECOVERY_RETRY_DELAY_NETWORK, TimeValue.timeValueSeconds(5)); + this.retryDelayNetwork = INDICES_RECOVERY_RETRY_DELAY_NETWORK_SETTING.get(settings); - this.internalActionTimeout = settings.getAsTime(INDICES_RECOVERY_INTERNAL_ACTION_TIMEOUT, TimeValue.timeValueMinutes(15)); - this.internalActionLongTimeout = settings.getAsTime(INDICES_RECOVERY_INTERNAL_LONG_ACTION_TIMEOUT, new TimeValue(internalActionTimeout.millis() * 2)); + this.internalActionTimeout = INDICES_RECOVERY_INTERNAL_ACTION_TIMEOUT_SETTING.get(settings); + this.internalActionLongTimeout = INDICES_RECOVERY_INTERNAL_LONG_ACTION_TIMEOUT_SETTING.get(settings); - this.activityTimeout = settings.getAsTime(INDICES_RECOVERY_ACTIVITY_TIMEOUT, - // default to the internalActionLongTimeout used as timeouts on RecoverySource - internalActionLongTimeout - ); + this.activityTimeout = INDICES_RECOVERY_ACTIVITY_TIMEOUT_SETTING.get(settings); - this.concurrentStreams = settings.getAsInt(INDICES_RECOVERY_CONCURRENT_STREAMS, 3); + this.concurrentStreams = INDICES_RECOVERY_CONCURRENT_STREAMS_SETTING.get(settings); this.concurrentStreamPool = EsExecutors.newScaling("recovery_stream", 0, concurrentStreams, 60, TimeUnit.SECONDS, EsExecutors.daemonThreadFactory(settings, "[recovery_stream]")); - this.concurrentSmallFileStreams = settings.getAsInt(INDICES_RECOVERY_CONCURRENT_SMALL_FILE_STREAMS, 2); + this.concurrentSmallFileStreams = INDICES_RECOVERY_CONCURRENT_SMALL_FILE_STREAMS_SETTING.get(settings); this.concurrentSmallFileStreamPool = EsExecutors.newScaling("small_file_recovery_stream", 0, concurrentSmallFileStreams, 60, TimeUnit.SECONDS, EsExecutors.daemonThreadFactory(settings, "[small_file_recovery_stream]")); - this.maxBytesPerSec = settings.getAsBytesSize(INDICES_RECOVERY_MAX_BYTES_PER_SEC, new ByteSizeValue(40, ByteSizeUnit.MB)); + this.maxBytesPerSec = INDICES_RECOVERY_MAX_BYTES_PER_SEC_SETTING.get(settings); if (maxBytesPerSec.bytes() <= 0) { rateLimiter = null; } else { @@ -123,7 +119,14 @@ public class RecoverySettings extends AbstractComponent implements Closeable { logger.debug("using max_bytes_per_sec[{}], concurrent_streams [{}]", maxBytesPerSec, concurrentStreams); - nodeSettingsService.addListener(new ApplySettings()); + clusterSettings.addSettingsUpdateConsumer(INDICES_RECOVERY_CONCURRENT_STREAMS_SETTING, this::setConcurrentStreams); + clusterSettings.addSettingsUpdateConsumer(INDICES_RECOVERY_CONCURRENT_SMALL_FILE_STREAMS_SETTING, this::setConcurrentSmallFileStreams); + clusterSettings.addSettingsUpdateConsumer(INDICES_RECOVERY_MAX_BYTES_PER_SEC_SETTING, this::setMaxBytesPerSec); + clusterSettings.addSettingsUpdateConsumer(INDICES_RECOVERY_RETRY_DELAY_STATE_SYNC_SETTING, this::setRetryDelayStateSync); + clusterSettings.addSettingsUpdateConsumer(INDICES_RECOVERY_RETRY_DELAY_NETWORK_SETTING, this::setRetryDelayNetwork); + clusterSettings.addSettingsUpdateConsumer(INDICES_RECOVERY_INTERNAL_ACTION_TIMEOUT_SETTING, this::setInternalActionTimeout); + clusterSettings.addSettingsUpdateConsumer(INDICES_RECOVERY_INTERNAL_LONG_ACTION_TIMEOUT_SETTING, this::setInternalActionLongTimeout); + clusterSettings.addSettingsUpdateConsumer(INDICES_RECOVERY_ACTIVITY_TIMEOUT_SETTING, this::setActivityTimeout); } @Override @@ -173,51 +176,44 @@ public class RecoverySettings extends AbstractComponent implements Closeable { this.chunkSize = chunkSize; } + private void setConcurrentStreams(int concurrentStreams) { + this.concurrentStreams = concurrentStreams; + concurrentStreamPool.setMaximumPoolSize(concurrentStreams); + } + + public void setRetryDelayStateSync(TimeValue retryDelayStateSync) { + this.retryDelayStateSync = retryDelayStateSync; + } - class ApplySettings implements NodeSettingsService.Listener { - @Override - public void onRefreshSettings(Settings settings) { - ByteSizeValue maxSizePerSec = settings.getAsBytesSize(INDICES_RECOVERY_MAX_BYTES_PER_SEC, RecoverySettings.this.maxBytesPerSec); - if (!Objects.equals(maxSizePerSec, RecoverySettings.this.maxBytesPerSec)) { - logger.info("updating [{}] from [{}] to [{}]", INDICES_RECOVERY_MAX_BYTES_PER_SEC, RecoverySettings.this.maxBytesPerSec, maxSizePerSec); - RecoverySettings.this.maxBytesPerSec = maxSizePerSec; - if (maxSizePerSec.bytes() <= 0) { - rateLimiter = null; - } else if (rateLimiter != null) { - rateLimiter.setMBPerSec(maxSizePerSec.mbFrac()); - } else { - rateLimiter = new SimpleRateLimiter(maxSizePerSec.mbFrac()); - } - } - - int concurrentStreams = settings.getAsInt(INDICES_RECOVERY_CONCURRENT_STREAMS, RecoverySettings.this.concurrentStreams); - if (concurrentStreams != RecoverySettings.this.concurrentStreams) { - logger.info("updating [indices.recovery.concurrent_streams] from [{}] to [{}]", RecoverySettings.this.concurrentStreams, concurrentStreams); - RecoverySettings.this.concurrentStreams = concurrentStreams; - RecoverySettings.this.concurrentStreamPool.setMaximumPoolSize(concurrentStreams); - } - - int concurrentSmallFileStreams = settings.getAsInt(INDICES_RECOVERY_CONCURRENT_SMALL_FILE_STREAMS, RecoverySettings.this.concurrentSmallFileStreams); - if (concurrentSmallFileStreams != RecoverySettings.this.concurrentSmallFileStreams) { - logger.info("updating [indices.recovery.concurrent_small_file_streams] from [{}] to [{}]", RecoverySettings.this.concurrentSmallFileStreams, concurrentSmallFileStreams); - RecoverySettings.this.concurrentSmallFileStreams = concurrentSmallFileStreams; - RecoverySettings.this.concurrentSmallFileStreamPool.setMaximumPoolSize(concurrentSmallFileStreams); - } - - RecoverySettings.this.retryDelayNetwork = maybeUpdate(RecoverySettings.this.retryDelayNetwork, settings, INDICES_RECOVERY_RETRY_DELAY_NETWORK); - RecoverySettings.this.retryDelayStateSync = maybeUpdate(RecoverySettings.this.retryDelayStateSync, settings, INDICES_RECOVERY_RETRY_DELAY_STATE_SYNC); - RecoverySettings.this.activityTimeout = maybeUpdate(RecoverySettings.this.activityTimeout, settings, INDICES_RECOVERY_ACTIVITY_TIMEOUT); - RecoverySettings.this.internalActionTimeout = maybeUpdate(RecoverySettings.this.internalActionTimeout, settings, INDICES_RECOVERY_INTERNAL_ACTION_TIMEOUT); - RecoverySettings.this.internalActionLongTimeout = maybeUpdate(RecoverySettings.this.internalActionLongTimeout, settings, INDICES_RECOVERY_INTERNAL_LONG_ACTION_TIMEOUT); - } + public void setRetryDelayNetwork(TimeValue retryDelayNetwork) { + this.retryDelayNetwork = retryDelayNetwork; + } + + public void setActivityTimeout(TimeValue activityTimeout) { + this.activityTimeout = activityTimeout; + } + + public void setInternalActionTimeout(TimeValue internalActionTimeout) { + this.internalActionTimeout = internalActionTimeout; + } + + public void setInternalActionLongTimeout(TimeValue internalActionLongTimeout) { + this.internalActionLongTimeout = internalActionLongTimeout; + } - private TimeValue maybeUpdate(final TimeValue currentValue, final Settings settings, final String key) { - final TimeValue value = settings.getAsTime(key, currentValue); - if (value.equals(currentValue)) { - return currentValue; - } - logger.info("updating [] from [{}] to [{}]", key, currentValue, value); - return value; + private void setMaxBytesPerSec(ByteSizeValue maxBytesPerSec) { + this.maxBytesPerSec = maxBytesPerSec; + if (maxBytesPerSec.bytes() <= 0) { + rateLimiter = null; + } else if (rateLimiter != null) { + rateLimiter.setMBPerSec(maxBytesPerSec.mbFrac()); + } else { + rateLimiter = new SimpleRateLimiter(maxBytesPerSec.mbFrac()); } } + + private void setConcurrentSmallFileStreams(int concurrentSmallFileStreams) { + this.concurrentSmallFileStreams = concurrentSmallFileStreams; + concurrentSmallFileStreamPool.setMaximumPoolSize(concurrentSmallFileStreams); + } } diff --git a/core/src/main/java/org/elasticsearch/indices/ttl/IndicesTTLService.java b/core/src/main/java/org/elasticsearch/indices/ttl/IndicesTTLService.java index f095cc355e..0eed82561a 100644 --- a/core/src/main/java/org/elasticsearch/indices/ttl/IndicesTTLService.java +++ b/core/src/main/java/org/elasticsearch/indices/ttl/IndicesTTLService.java @@ -36,6 +36,8 @@ import org.elasticsearch.cluster.metadata.MetaData; import org.elasticsearch.common.component.AbstractLifecycleComponent; import org.elasticsearch.common.inject.Inject; import org.elasticsearch.common.lucene.uid.Versions; +import org.elasticsearch.common.settings.ClusterSettings; +import org.elasticsearch.common.settings.Setting; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.unit.TimeValue; import org.elasticsearch.common.util.concurrent.EsExecutors; @@ -49,7 +51,6 @@ import org.elasticsearch.index.IndexService; import org.elasticsearch.index.shard.IndexShardState; import org.elasticsearch.index.shard.IndexShard; import org.elasticsearch.indices.IndicesService; -import org.elasticsearch.node.settings.NodeSettingsService; import java.io.IOException; import java.util.ArrayList; @@ -66,7 +67,7 @@ import java.util.concurrent.locks.ReentrantLock; */ public class IndicesTTLService extends AbstractLifecycleComponent<IndicesTTLService> { - public static final String INDICES_TTL_INTERVAL = "indices.ttl.interval"; + public static final Setting<TimeValue> INDICES_TTL_INTERVAL_SETTING = Setting.positiveTimeSetting("indices.ttl.interval", TimeValue.timeValueSeconds(60), true, Setting.Scope.CLUSTER); public static final String INDEX_TTL_DISABLE_PURGE = "index.ttl.disable_purge"; private final ClusterService clusterService; @@ -77,16 +78,15 @@ public class IndicesTTLService extends AbstractLifecycleComponent<IndicesTTLServ private PurgerThread purgerThread; @Inject - public IndicesTTLService(Settings settings, ClusterService clusterService, IndicesService indicesService, NodeSettingsService nodeSettingsService, TransportBulkAction bulkAction) { + public IndicesTTLService(Settings settings, ClusterService clusterService, IndicesService indicesService, ClusterSettings clusterSettings, TransportBulkAction bulkAction) { super(settings); this.clusterService = clusterService; this.indicesService = indicesService; - TimeValue interval = this.settings.getAsTime("indices.ttl.interval", TimeValue.timeValueSeconds(60)); + TimeValue interval = INDICES_TTL_INTERVAL_SETTING.get(settings); this.bulkAction = bulkAction; this.bulkSize = this.settings.getAsInt("indices.ttl.bulk_size", 10000); this.purgerThread = new PurgerThread(EsExecutors.threadName(settings, "[ttl_expire]"), interval); - - nodeSettingsService.addListener(new ApplySettings()); + clusterSettings.addSettingsUpdateConsumer(INDICES_TTL_INTERVAL_SETTING, this.purgerThread::resetInterval); } @Override @@ -310,20 +310,6 @@ public class IndicesTTLService extends AbstractLifecycleComponent<IndicesTTLServ return bulkRequest; } - class ApplySettings implements NodeSettingsService.Listener { - @Override - public void onRefreshSettings(Settings settings) { - final TimeValue currentInterval = IndicesTTLService.this.purgerThread.getInterval(); - final TimeValue interval = settings.getAsTime(INDICES_TTL_INTERVAL, currentInterval); - if (!interval.equals(currentInterval)) { - logger.info("updating indices.ttl.interval from [{}] to [{}]",currentInterval, interval); - IndicesTTLService.this.purgerThread.resetInterval(interval); - - } - } - } - - private static final class Notifier { private final ReentrantLock lock = new ReentrantLock(); diff --git a/core/src/main/java/org/elasticsearch/monitor/jvm/JvmInfo.java b/core/src/main/java/org/elasticsearch/monitor/jvm/JvmInfo.java index e224c722d4..82a99bd0bd 100644 --- a/core/src/main/java/org/elasticsearch/monitor/jvm/JvmInfo.java +++ b/core/src/main/java/org/elasticsearch/monitor/jvm/JvmInfo.java @@ -29,7 +29,14 @@ import org.elasticsearch.common.xcontent.XContentBuilder; import org.elasticsearch.common.xcontent.XContentBuilderString; import java.io.IOException; -import java.lang.management.*; +import java.lang.management.GarbageCollectorMXBean; +import java.lang.management.ManagementFactory; +import java.lang.management.ManagementPermission; +import java.lang.management.MemoryMXBean; +import java.lang.management.MemoryPoolMXBean; +import java.lang.management.PlatformManagedObject; +import java.lang.management.RuntimeMXBean; +import java.lang.reflect.Method; import java.util.Collections; import java.util.HashMap; import java.util.List; @@ -101,6 +108,20 @@ public class JvmInfo implements Streamable, ToXContent { info.memoryPools[i] = memoryPoolMXBean.getName(); } + try { + @SuppressWarnings("unchecked") Class<? extends PlatformManagedObject> clazz = + (Class<? extends PlatformManagedObject>)Class.forName("com.sun.management.HotSpotDiagnosticMXBean"); + Class<?> vmOptionClazz = Class.forName("com.sun.management.VMOption"); + PlatformManagedObject hotSpotDiagnosticMXBean = ManagementFactory.getPlatformMXBean(clazz); + Method vmOptionMethod = clazz.getMethod("getVMOption", String.class); + Object useCompressedOopsVmOption = vmOptionMethod.invoke(hotSpotDiagnosticMXBean, "UseCompressedOops"); + Method valueMethod = vmOptionClazz.getMethod("getValue"); + info.useCompressedOops = (String)valueMethod.invoke(useCompressedOopsVmOption); + } catch (Throwable t) { + // unable to deduce the state of compressed oops + info.useCompressedOops = "unknown"; + } + INSTANCE = info; } @@ -135,6 +156,8 @@ public class JvmInfo implements Streamable, ToXContent { String[] gcCollectors = Strings.EMPTY_ARRAY; String[] memoryPools = Strings.EMPTY_ARRAY; + private String useCompressedOops; + private JvmInfo() { } @@ -258,6 +281,18 @@ public class JvmInfo implements Streamable, ToXContent { return this.systemProperties; } + /** + * The value of the JVM flag UseCompressedOops, if available otherwise + * "unknown". The value "unknown" indicates that an attempt was + * made to obtain the value of the flag on this JVM and the attempt + * failed. + * + * @return the value of the JVM flag UseCompressedOops or "unknown" + */ + public String useCompressedOops() { + return this.useCompressedOops; + } + @Override public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { builder.startObject(Fields.JVM); @@ -279,6 +314,8 @@ public class JvmInfo implements Streamable, ToXContent { builder.field(Fields.GC_COLLECTORS, gcCollectors); builder.field(Fields.MEMORY_POOLS, memoryPools); + builder.field(Fields.USING_COMPRESSED_OOPS, useCompressedOops); + builder.endObject(); return builder; } @@ -306,6 +343,7 @@ public class JvmInfo implements Streamable, ToXContent { static final XContentBuilderString DIRECT_MAX_IN_BYTES = new XContentBuilderString("direct_max_in_bytes"); static final XContentBuilderString GC_COLLECTORS = new XContentBuilderString("gc_collectors"); static final XContentBuilderString MEMORY_POOLS = new XContentBuilderString("memory_pools"); + static final XContentBuilderString USING_COMPRESSED_OOPS = new XContentBuilderString("using_compressed_ordinary_object_pointers"); } public static JvmInfo readJvmInfo(StreamInput in) throws IOException { @@ -337,6 +375,7 @@ public class JvmInfo implements Streamable, ToXContent { mem.readFrom(in); gcCollectors = in.readStringArray(); memoryPools = in.readStringArray(); + useCompressedOops = in.readString(); } @Override @@ -361,6 +400,7 @@ public class JvmInfo implements Streamable, ToXContent { mem.writeTo(out); out.writeStringArray(gcCollectors); out.writeStringArray(memoryPools); + out.writeString(useCompressedOops); } public static class Mem implements Streamable { diff --git a/core/src/main/java/org/elasticsearch/client/transport/ClientTransportModule.java b/core/src/main/java/org/elasticsearch/monitor/os/DummyOsInfo.java index 895b3d844f..599755e78a 100644 --- a/core/src/main/java/org/elasticsearch/client/transport/ClientTransportModule.java +++ b/core/src/main/java/org/elasticsearch/monitor/os/DummyOsInfo.java @@ -17,21 +17,18 @@ * under the License. */ -package org.elasticsearch.client.transport; +package org.elasticsearch.monitor.os; -import org.elasticsearch.client.support.Headers; -import org.elasticsearch.client.transport.support.TransportProxyClient; -import org.elasticsearch.common.inject.AbstractModule; +public class DummyOsInfo extends OsInfo { -/** - * - */ -public class ClientTransportModule extends AbstractModule { - - @Override - protected void configure() { - bind(Headers.class).asEagerSingleton(); - bind(TransportProxyClient.class).asEagerSingleton(); - bind(TransportClientNodesService.class).asEagerSingleton(); + DummyOsInfo() { + refreshInterval = 0; + availableProcessors = 0; + allocatedProcessors = 0; + name = "dummy_name"; + arch = "dummy_arch"; + version = "dummy_version"; } + + public static final DummyOsInfo INSTANCE = new DummyOsInfo(); } diff --git a/core/src/main/java/org/elasticsearch/monitor/os/OsInfo.java b/core/src/main/java/org/elasticsearch/monitor/os/OsInfo.java index f34cd51a14..d94447221c 100644 --- a/core/src/main/java/org/elasticsearch/monitor/os/OsInfo.java +++ b/core/src/main/java/org/elasticsearch/monitor/os/OsInfo.java @@ -108,6 +108,9 @@ public class OsInfo implements Streamable, ToXContent { refreshInterval = in.readLong(); availableProcessors = in.readInt(); allocatedProcessors = in.readInt(); + name = in.readOptionalString(); + arch = in.readOptionalString(); + version = in.readOptionalString(); } @Override @@ -115,5 +118,8 @@ public class OsInfo implements Streamable, ToXContent { out.writeLong(refreshInterval); out.writeInt(availableProcessors); out.writeInt(allocatedProcessors); + out.writeOptionalString(name); + out.writeOptionalString(arch); + out.writeOptionalString(version); } } diff --git a/core/src/main/java/org/elasticsearch/node/Node.java b/core/src/main/java/org/elasticsearch/node/Node.java index f4bc34a91e..c964e79587 100644 --- a/core/src/main/java/org/elasticsearch/node/Node.java +++ b/core/src/main/java/org/elasticsearch/node/Node.java @@ -23,7 +23,6 @@ import org.elasticsearch.Build; import org.elasticsearch.ElasticsearchException; import org.elasticsearch.Version; import org.elasticsearch.action.ActionModule; -import org.elasticsearch.bootstrap.Elasticsearch; import org.elasticsearch.cache.recycler.PageCacheRecycler; import org.elasticsearch.client.Client; import org.elasticsearch.client.node.NodeClientModule; @@ -42,11 +41,15 @@ import org.elasticsearch.common.lease.Releasable; import org.elasticsearch.common.lease.Releasables; import org.elasticsearch.common.logging.ESLogger; import org.elasticsearch.common.logging.Loggers; +import org.elasticsearch.common.network.NetworkAddress; import org.elasticsearch.common.network.NetworkModule; import org.elasticsearch.common.network.NetworkService; +import org.elasticsearch.common.settings.ClusterSettings; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.settings.SettingsFilter; import org.elasticsearch.common.settings.SettingsModule; +import org.elasticsearch.common.transport.BoundTransportAddress; +import org.elasticsearch.common.transport.TransportAddress; import org.elasticsearch.discovery.Discovery; import org.elasticsearch.discovery.DiscoveryModule; import org.elasticsearch.discovery.DiscoveryService; @@ -58,7 +61,7 @@ import org.elasticsearch.gateway.GatewayAllocator; import org.elasticsearch.gateway.GatewayModule; import org.elasticsearch.gateway.GatewayService; import org.elasticsearch.http.HttpServer; -import org.elasticsearch.http.HttpServerModule; +import org.elasticsearch.http.HttpServerTransport; import org.elasticsearch.indices.IndicesModule; import org.elasticsearch.indices.IndicesService; import org.elasticsearch.indices.analysis.AnalysisModule; @@ -73,7 +76,6 @@ import org.elasticsearch.indices.ttl.IndicesTTLService; import org.elasticsearch.monitor.MonitorService; import org.elasticsearch.monitor.jvm.JvmInfo; import org.elasticsearch.node.internal.InternalSettingsPreparer; -import org.elasticsearch.node.settings.NodeSettingsService; import org.elasticsearch.percolator.PercolatorModule; import org.elasticsearch.percolator.PercolatorService; import org.elasticsearch.plugins.Plugin; @@ -81,7 +83,6 @@ import org.elasticsearch.plugins.PluginsModule; import org.elasticsearch.plugins.PluginsService; import org.elasticsearch.repositories.RepositoriesModule; import org.elasticsearch.rest.RestController; -import org.elasticsearch.rest.RestModule; import org.elasticsearch.script.ScriptModule; import org.elasticsearch.script.ScriptService; import org.elasticsearch.search.SearchModule; @@ -90,14 +91,21 @@ import org.elasticsearch.snapshots.SnapshotShardsService; import org.elasticsearch.snapshots.SnapshotsService; import org.elasticsearch.threadpool.ThreadPool; import org.elasticsearch.threadpool.ThreadPoolModule; -import org.elasticsearch.transport.TransportModule; import org.elasticsearch.transport.TransportService; import org.elasticsearch.tribe.TribeModule; import org.elasticsearch.tribe.TribeService; import org.elasticsearch.watcher.ResourceWatcherModule; import org.elasticsearch.watcher.ResourceWatcherService; +import java.io.BufferedWriter; import java.io.IOException; +import java.net.Inet6Address; +import java.net.InetAddress; +import java.net.InetSocketAddress; +import java.nio.charset.Charset; +import java.nio.file.Files; +import java.nio.file.Path; +import java.nio.file.StandardCopyOption; import java.util.Arrays; import java.util.Collection; import java.util.Collections; @@ -156,7 +164,6 @@ public class Node implements Releasable { throw new IllegalStateException("Failed to created node environment", ex); } final NetworkService networkService = new NetworkService(settings); - final NodeSettingsService nodeSettingsService = new NodeSettingsService(settings); final SettingsFilter settingsFilter = new SettingsFilter(settings); final ThreadPool threadPool = new ThreadPool(settings); boolean success = false; @@ -171,20 +178,15 @@ public class Node implements Releasable { } modules.add(new PluginsModule(pluginsService)); modules.add(new SettingsModule(this.settings, settingsFilter)); - modules.add(new NodeModule(this, nodeSettingsService, monitorService)); - modules.add(new NetworkModule(networkService)); - modules.add(new ScriptModule(this.settings)); modules.add(new EnvironmentModule(environment)); + modules.add(new NodeModule(this, monitorService)); + modules.add(new NetworkModule(networkService, settings, false)); + modules.add(new ScriptModule(this.settings)); modules.add(new NodeEnvironmentModule(nodeEnvironment)); modules.add(new ClusterNameModule(this.settings)); modules.add(new ThreadPoolModule(threadPool)); modules.add(new DiscoveryModule(this.settings)); modules.add(new ClusterModule(this.settings)); - modules.add(new RestModule(this.settings)); - modules.add(new TransportModule(settings)); - if (settings.getAsBoolean(HTTP_ENABLED, true)) { - modules.add(new HttpServerModule(settings)); - } modules.add(new IndicesModule()); modules.add(new SearchModule()); modules.add(new ActionModule(false)); @@ -201,7 +203,7 @@ public class Node implements Releasable { injector = modules.createInjector(); client = injector.getInstance(Client.class); - threadPool.setNodeSettingsService(injector.getInstance(NodeSettingsService.class)); + threadPool.setClusterSettings(injector.getInstance(ClusterSettings.class)); success = true; } catch (IOException ex) { throw new ElasticsearchException("failed to bind service", ex); @@ -274,6 +276,15 @@ public class Node implements Releasable { injector.getInstance(ResourceWatcherService.class).start(); injector.getInstance(TribeService.class).start(); + if (System.getProperty("es.tests.portsfile", "false").equals("true")) { + if (settings.getAsBoolean("http.enabled", true)) { + HttpServerTransport http = injector.getInstance(HttpServerTransport.class); + writePortsFile("http", http.boundAddress()); + } + TransportService transport = injector.getInstance(TransportService.class); + writePortsFile("transport", transport.boundAddress()); + } + logger.info("started"); return this; @@ -425,4 +436,27 @@ public class Node implements Releasable { public Injector injector() { return this.injector; } + + /** Writes a file to the logs dir containing the ports for the given transport type */ + private void writePortsFile(String type, BoundTransportAddress boundAddress) { + Path tmpPortsFile = environment.logsFile().resolve(type + ".ports.tmp"); + try (BufferedWriter writer = Files.newBufferedWriter(tmpPortsFile, Charset.forName("UTF-8"))) { + for (TransportAddress address : boundAddress.boundAddresses()) { + InetAddress inetAddress = InetAddress.getByName(address.getAddress()); + if (inetAddress instanceof Inet6Address && inetAddress.isLinkLocalAddress()) { + // no link local, just causes problems + continue; + } + writer.write(NetworkAddress.formatAddress(new InetSocketAddress(inetAddress, address.getPort())) + "\n"); + } + } catch (IOException e) { + throw new RuntimeException("Failed to write ports file", e); + } + Path portsFile = environment.logsFile().resolve(type + ".ports"); + try { + Files.move(tmpPortsFile, portsFile, StandardCopyOption.ATOMIC_MOVE); + } catch (IOException e) { + throw new RuntimeException("Failed to rename ports file", e); + } + } } diff --git a/core/src/main/java/org/elasticsearch/node/NodeModule.java b/core/src/main/java/org/elasticsearch/node/NodeModule.java index 3641c32503..aa52d38934 100644 --- a/core/src/main/java/org/elasticsearch/node/NodeModule.java +++ b/core/src/main/java/org/elasticsearch/node/NodeModule.java @@ -23,9 +23,7 @@ import org.elasticsearch.cache.recycler.PageCacheRecycler; import org.elasticsearch.common.inject.AbstractModule; import org.elasticsearch.common.util.BigArrays; import org.elasticsearch.monitor.MonitorService; -import org.elasticsearch.node.Node; import org.elasticsearch.node.service.NodeService; -import org.elasticsearch.node.settings.NodeSettingsService; /** * @@ -33,16 +31,14 @@ import org.elasticsearch.node.settings.NodeSettingsService; public class NodeModule extends AbstractModule { private final Node node; - private final NodeSettingsService nodeSettingsService; private final MonitorService monitorService; // pkg private so tests can mock Class<? extends PageCacheRecycler> pageCacheRecyclerImpl = PageCacheRecycler.class; Class<? extends BigArrays> bigArraysImpl = BigArrays.class; - public NodeModule(Node node, NodeSettingsService nodeSettingsService, MonitorService monitorService) { + public NodeModule(Node node, MonitorService monitorService) { this.node = node; - this.nodeSettingsService = nodeSettingsService; this.monitorService = monitorService; } @@ -60,7 +56,6 @@ public class NodeModule extends AbstractModule { } bind(Node.class).toInstance(node); - bind(NodeSettingsService.class).toInstance(nodeSettingsService); bind(MonitorService.class).toInstance(monitorService); bind(NodeService.class).asEagerSingleton(); } diff --git a/core/src/main/java/org/elasticsearch/node/settings/NodeSettingsService.java b/core/src/main/java/org/elasticsearch/node/settings/NodeSettingsService.java deleted file mode 100644 index dbe6a33172..0000000000 --- a/core/src/main/java/org/elasticsearch/node/settings/NodeSettingsService.java +++ /dev/null @@ -1,122 +0,0 @@ -/* - * Licensed to Elasticsearch under one or more contributor - * license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright - * ownership. Elasticsearch licenses this file to you under - * the Apache License, Version 2.0 (the "License"); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -package org.elasticsearch.node.settings; - -import org.elasticsearch.cluster.ClusterChangedEvent; -import org.elasticsearch.cluster.ClusterService; -import org.elasticsearch.cluster.ClusterStateListener; -import org.elasticsearch.common.component.AbstractComponent; -import org.elasticsearch.common.inject.Inject; -import org.elasticsearch.common.logging.ESLoggerFactory; -import org.elasticsearch.common.settings.Settings; - -import java.util.Map; -import java.util.concurrent.CopyOnWriteArrayList; - -/** - * A service that allows to register for node settings change that can come from cluster - * events holding new settings. - */ -public class NodeSettingsService extends AbstractComponent implements ClusterStateListener { - - private static volatile Settings globalSettings = Settings.Builder.EMPTY_SETTINGS; - - /** - * Returns the global (static) settings last updated by a node. Note, if you have multiple - * nodes on the same JVM, it will just return the latest one set... - */ - public static Settings getGlobalSettings() { - return globalSettings; - } - - private volatile Settings lastSettingsApplied; - - private final CopyOnWriteArrayList<Listener> listeners = new CopyOnWriteArrayList<>(); - - @Inject - public NodeSettingsService(Settings settings) { - super(settings); - globalSettings = settings; - } - - // inject it as a member, so we won't get into possible cyclic problems - public void setClusterService(ClusterService clusterService) { - clusterService.add(this); - } - - @Override - public void clusterChanged(ClusterChangedEvent event) { - // nothing to do until we actually recover from the gateway or any other block indicates we need to disable persistency - if (event.state().blocks().disableStatePersistence()) { - return; - } - - if (!event.metaDataChanged()) { - // nothing changed in the metadata, no need to check - return; - } - - if (lastSettingsApplied != null && event.state().metaData().settings().equals(lastSettingsApplied)) { - // nothing changed in the settings, ignore - return; - } - - for (Listener listener : listeners) { - try { - listener.onRefreshSettings(event.state().metaData().settings()); - } catch (Exception e) { - logger.warn("failed to refresh settings for [{}]", e, listener); - } - } - - try { - for (Map.Entry<String, String> entry : event.state().metaData().settings().getAsMap().entrySet()) { - if (entry.getKey().startsWith("logger.")) { - String component = entry.getKey().substring("logger.".length()); - if ("_root".equals(component)) { - ESLoggerFactory.getRootLogger().setLevel(entry.getValue()); - } else { - ESLoggerFactory.getLogger(component).setLevel(entry.getValue()); - } - } - } - } catch (Exception e) { - logger.warn("failed to refresh settings for [{}]", e, "logger"); - } - - lastSettingsApplied = event.state().metaData().settings(); - globalSettings = lastSettingsApplied; - } - - /** - * Only settings registered in {@link org.elasticsearch.cluster.ClusterModule} can be changed dynamically. - */ - public void addListener(Listener listener) { - this.listeners.add(listener); - } - - public void removeListener(Listener listener) { - this.listeners.remove(listener); - } - - public interface Listener { - void onRefreshSettings(Settings settings); - } -} diff --git a/core/src/main/java/org/elasticsearch/percolator/PercolateContext.java b/core/src/main/java/org/elasticsearch/percolator/PercolateContext.java index 70abaaaff3..8df956f2ce 100644 --- a/core/src/main/java/org/elasticsearch/percolator/PercolateContext.java +++ b/core/src/main/java/org/elasticsearch/percolator/PercolateContext.java @@ -38,12 +38,11 @@ import org.elasticsearch.common.HasHeaders; import org.elasticsearch.common.ParseFieldMatcher; import org.elasticsearch.common.collect.ImmutableOpenMap; import org.elasticsearch.common.lease.Releasables; -import org.elasticsearch.common.text.StringText; +import org.elasticsearch.common.text.Text; import org.elasticsearch.common.util.BigArrays; import org.elasticsearch.index.IndexService; import org.elasticsearch.index.analysis.AnalysisService; import org.elasticsearch.index.cache.bitset.BitsetFilterCache; -import org.elasticsearch.index.cache.query.QueryCache; import org.elasticsearch.index.engine.Engine; import org.elasticsearch.index.fielddata.IndexFieldDataService; import org.elasticsearch.index.mapper.MappedFieldType; @@ -74,6 +73,8 @@ import org.elasticsearch.search.internal.SearchContext; import org.elasticsearch.search.internal.ShardSearchRequest; import org.elasticsearch.search.lookup.LeafSearchLookup; import org.elasticsearch.search.lookup.SearchLookup; +import org.elasticsearch.search.profile.Profiler; +import org.elasticsearch.search.profile.Profilers; import org.elasticsearch.search.query.QuerySearchResult; import org.elasticsearch.search.rescore.RescoreSearchContext; import org.elasticsearch.search.suggest.SuggestionSearchContext; @@ -139,7 +140,7 @@ public class PercolateContext extends SearchContext { this.bigArrays = bigArrays.withCircuitBreaking(); this.querySearchResult = new QuerySearchResult(0, searchShardTarget); this.engineSearcher = indexShard.acquireSearcher("percolate"); - this.searcher = new ContextIndexSearcher(this, engineSearcher); + this.searcher = new ContextIndexSearcher(engineSearcher, indexService.cache().query(), indexShard.getQueryCachingPolicy()); this.scriptService = scriptService; this.numberOfShards = request.getNumberOfShards(); this.aliasFilter = aliasFilter; @@ -164,7 +165,7 @@ public class PercolateContext extends SearchContext { fields.put(field.name(), new InternalSearchHitField(field.name(), Collections.emptyList())); } hitContext().reset( - new InternalSearchHit(0, "unknown", new StringText(parsedDocument.type()), fields), + new InternalSearchHit(0, "unknown", new Text(parsedDocument.type()), fields), atomicReaderContext, 0, docSearcher.searcher() ); } @@ -748,5 +749,7 @@ public class PercolateContext extends SearchContext { } @Override - public QueryCache getQueryCache() { return indexService.cache().query();} + public Profilers getProfilers() { + throw new UnsupportedOperationException(); + } } diff --git a/core/src/main/java/org/elasticsearch/percolator/PercolatorService.java b/core/src/main/java/org/elasticsearch/percolator/PercolatorService.java index fa7b47766a..eb33f3832b 100644 --- a/core/src/main/java/org/elasticsearch/percolator/PercolatorService.java +++ b/core/src/main/java/org/elasticsearch/percolator/PercolatorService.java @@ -52,8 +52,6 @@ import org.elasticsearch.common.io.stream.BytesStreamOutput; import org.elasticsearch.common.lucene.Lucene; import org.elasticsearch.common.lucene.search.Queries; import org.elasticsearch.common.settings.Settings; -import org.elasticsearch.common.text.BytesText; -import org.elasticsearch.common.text.StringText; import org.elasticsearch.common.text.Text; import org.elasticsearch.common.unit.ByteSizeUnit; import org.elasticsearch.common.unit.ByteSizeValue; @@ -533,10 +531,10 @@ public class PercolatorService extends AbstractComponent { List<PercolateResponse.Match> finalMatches = new ArrayList<>(requestedSize == 0 ? numMatches : requestedSize); outer: for (PercolateShardResponse response : shardResults) { - Text index = new StringText(response.getIndex()); + Text index = new Text(response.getIndex()); for (int i = 0; i < response.matches().length; i++) { float score = response.scores().length == 0 ? NO_SCORE : response.scores()[i]; - Text match = new BytesText(new BytesArray(response.matches()[i])); + Text match = new Text(new BytesArray(response.matches()[i])); Map<String, HighlightField> hl = response.hls().isEmpty() ? null : response.hls().get(i); finalMatches.add(new PercolateResponse.Match(index, match, score, hl)); if (requestedSize != 0 && finalMatches.size() == requestedSize) { @@ -686,10 +684,10 @@ public class PercolatorService extends AbstractComponent { List<PercolateResponse.Match> finalMatches = new ArrayList<>(requestedSize); if (nonEmptyResponses == 1) { PercolateShardResponse response = shardResults.get(firstNonEmptyIndex); - Text index = new StringText(response.getIndex()); + Text index = new Text(response.getIndex()); for (int i = 0; i < response.matches().length; i++) { float score = response.scores().length == 0 ? Float.NaN : response.scores()[i]; - Text match = new BytesText(new BytesArray(response.matches()[i])); + Text match = new Text(new BytesArray(response.matches()[i])); if (!response.hls().isEmpty()) { Map<String, HighlightField> hl = response.hls().get(i); finalMatches.add(new PercolateResponse.Match(index, match, score, hl)); @@ -728,8 +726,8 @@ public class PercolatorService extends AbstractComponent { slots[requestIndex]++; PercolateShardResponse shardResponse = shardResults.get(requestIndex); - Text index = new StringText(shardResponse.getIndex()); - Text match = new BytesText(new BytesArray(shardResponse.matches()[itemIndex])); + Text index = new Text(shardResponse.getIndex()); + Text match = new Text(new BytesArray(shardResponse.matches()[itemIndex])); float score = shardResponse.scores()[itemIndex]; if (!shardResponse.hls().isEmpty()) { Map<String, HighlightField> hl = shardResponse.hls().get(itemIndex); diff --git a/core/src/main/java/org/elasticsearch/cluster/settings/ClusterDynamicSettings.java b/core/src/main/java/org/elasticsearch/plugins/DummyPluginInfo.java index b537c448be..a57a96c631 100644 --- a/core/src/main/java/org/elasticsearch/cluster/settings/ClusterDynamicSettings.java +++ b/core/src/main/java/org/elasticsearch/plugins/DummyPluginInfo.java @@ -16,23 +16,13 @@ * specific language governing permissions and limitations * under the License. */ +package org.elasticsearch.plugins; -package org.elasticsearch.cluster.settings; +public class DummyPluginInfo extends PluginInfo { -import org.elasticsearch.common.inject.BindingAnnotation; + private DummyPluginInfo(String name, String description, boolean site, String version, boolean jvm, String classname, boolean isolated) { + super(name, description, site, version, jvm, classname, isolated); + } -import java.lang.annotation.Documented; -import java.lang.annotation.Retention; -import java.lang.annotation.Target; - -import static java.lang.annotation.ElementType.FIELD; -import static java.lang.annotation.ElementType.PARAMETER; -import static java.lang.annotation.RetentionPolicy.RUNTIME; - - -@BindingAnnotation -@Target({FIELD, PARAMETER}) -@Retention(RUNTIME) -@Documented -public @interface ClusterDynamicSettings { -}
\ No newline at end of file + public static final DummyPluginInfo INSTANCE = new DummyPluginInfo("dummy_plugin_name", "dummy plugin description", true, "dummy_plugin_version", true, "DummyPluginName", true); +} diff --git a/core/src/main/java/org/elasticsearch/plugins/PluginManager.java b/core/src/main/java/org/elasticsearch/plugins/PluginManager.java index 6600bf7035..1ebe7813d3 100644 --- a/core/src/main/java/org/elasticsearch/plugins/PluginManager.java +++ b/core/src/main/java/org/elasticsearch/plugins/PluginManager.java @@ -22,6 +22,7 @@ package org.elasticsearch.plugins; import org.apache.lucene.util.IOUtils; import org.elasticsearch.*; import org.elasticsearch.bootstrap.JarHell; +import org.elasticsearch.common.Randomness; import org.elasticsearch.common.Strings; import org.elasticsearch.common.cli.Terminal; import org.elasticsearch.common.collect.Tuple; @@ -66,7 +67,7 @@ public class PluginManager { "plugin", "plugin.bat", "service.bat")); - + static final Set<String> MODULES = unmodifiableSet(newHashSet( "lang-expression", "lang-groovy")); @@ -89,6 +90,7 @@ public class PluginManager { "mapper-murmur3", "mapper-size", "repository-azure", + "repository-hdfs", "repository-s3", "store-smb")); @@ -124,7 +126,7 @@ public class PluginManager { checkForForbiddenName(pluginHandle.name); } else { // if we have no name but url, use temporary name that will be overwritten later - pluginHandle = new PluginHandle("temp_name" + new Random().nextInt(), null, null); + pluginHandle = new PluginHandle("temp_name" + Randomness.get().nextInt(), null, null); } Path pluginFile = download(pluginHandle, terminal); @@ -224,7 +226,7 @@ public class PluginManager { PluginInfo info = PluginInfo.readFromProperties(root); terminal.println(VERBOSE, "%s", info); - // don't let luser install plugin as a module... + // don't let luser install plugin as a module... // they might be unavoidably in maven central and are packaged up the same way) if (MODULES.contains(info.getName())) { throw new IOException("plugin '" + info.getName() + "' cannot be installed like this, it is a system module"); diff --git a/core/src/main/java/org/elasticsearch/rest/action/RestActionModule.java b/core/src/main/java/org/elasticsearch/rest/action/RestActionModule.java deleted file mode 100644 index f0e4d10d7c..0000000000 --- a/core/src/main/java/org/elasticsearch/rest/action/RestActionModule.java +++ /dev/null @@ -1,273 +0,0 @@ -/* - * Licensed to Elasticsearch under one or more contributor - * license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright - * ownership. Elasticsearch licenses this file to you under - * the Apache License, Version 2.0 (the "License"); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -package org.elasticsearch.rest.action; - -import org.elasticsearch.common.inject.AbstractModule; -import org.elasticsearch.common.inject.multibindings.Multibinder; -import org.elasticsearch.rest.BaseRestHandler; -import org.elasticsearch.rest.action.admin.cluster.health.RestClusterHealthAction; -import org.elasticsearch.rest.action.admin.cluster.node.hotthreads.RestNodesHotThreadsAction; -import org.elasticsearch.rest.action.admin.cluster.node.info.RestNodesInfoAction; -import org.elasticsearch.rest.action.admin.cluster.node.stats.RestNodesStatsAction; -import org.elasticsearch.rest.action.admin.cluster.repositories.delete.RestDeleteRepositoryAction; -import org.elasticsearch.rest.action.admin.cluster.repositories.get.RestGetRepositoriesAction; -import org.elasticsearch.rest.action.admin.cluster.repositories.put.RestPutRepositoryAction; -import org.elasticsearch.rest.action.admin.cluster.repositories.verify.RestVerifyRepositoryAction; -import org.elasticsearch.rest.action.admin.cluster.reroute.RestClusterRerouteAction; -import org.elasticsearch.rest.action.admin.cluster.settings.RestClusterGetSettingsAction; -import org.elasticsearch.rest.action.admin.cluster.settings.RestClusterUpdateSettingsAction; -import org.elasticsearch.rest.action.admin.cluster.shards.RestClusterSearchShardsAction; -import org.elasticsearch.rest.action.admin.cluster.snapshots.create.RestCreateSnapshotAction; -import org.elasticsearch.rest.action.admin.cluster.snapshots.delete.RestDeleteSnapshotAction; -import org.elasticsearch.rest.action.admin.cluster.snapshots.get.RestGetSnapshotsAction; -import org.elasticsearch.rest.action.admin.cluster.snapshots.restore.RestRestoreSnapshotAction; -import org.elasticsearch.rest.action.admin.cluster.snapshots.status.RestSnapshotsStatusAction; -import org.elasticsearch.rest.action.admin.cluster.state.RestClusterStateAction; -import org.elasticsearch.rest.action.admin.cluster.stats.RestClusterStatsAction; -import org.elasticsearch.rest.action.admin.cluster.tasks.RestPendingClusterTasksAction; -import org.elasticsearch.rest.action.admin.indices.alias.RestIndicesAliasesAction; -import org.elasticsearch.rest.action.admin.indices.alias.delete.RestIndexDeleteAliasesAction; -import org.elasticsearch.rest.action.admin.indices.alias.get.RestGetAliasesAction; -import org.elasticsearch.rest.action.admin.indices.alias.get.RestGetIndicesAliasesAction; -import org.elasticsearch.rest.action.admin.indices.alias.head.RestAliasesExistAction; -import org.elasticsearch.rest.action.admin.indices.alias.put.RestIndexPutAliasAction; -import org.elasticsearch.rest.action.admin.indices.analyze.RestAnalyzeAction; -import org.elasticsearch.rest.action.admin.indices.cache.clear.RestClearIndicesCacheAction; -import org.elasticsearch.rest.action.admin.indices.close.RestCloseIndexAction; -import org.elasticsearch.rest.action.admin.indices.create.RestCreateIndexAction; -import org.elasticsearch.rest.action.admin.indices.delete.RestDeleteIndexAction; -import org.elasticsearch.rest.action.admin.indices.exists.indices.RestIndicesExistsAction; -import org.elasticsearch.rest.action.admin.indices.exists.types.RestTypesExistsAction; -import org.elasticsearch.rest.action.admin.indices.flush.RestFlushAction; -import org.elasticsearch.rest.action.admin.indices.flush.RestSyncedFlushAction; -import org.elasticsearch.rest.action.admin.indices.forcemerge.RestForceMergeAction; -import org.elasticsearch.rest.action.admin.indices.get.RestGetIndicesAction; -import org.elasticsearch.rest.action.admin.indices.mapping.get.RestGetFieldMappingAction; -import org.elasticsearch.rest.action.admin.indices.mapping.get.RestGetMappingAction; -import org.elasticsearch.rest.action.admin.indices.mapping.put.RestPutMappingAction; -import org.elasticsearch.rest.action.admin.indices.open.RestOpenIndexAction; -import org.elasticsearch.rest.action.admin.indices.recovery.RestRecoveryAction; -import org.elasticsearch.rest.action.admin.indices.refresh.RestRefreshAction; -import org.elasticsearch.rest.action.admin.indices.segments.RestIndicesSegmentsAction; -import org.elasticsearch.rest.action.admin.indices.settings.RestGetSettingsAction; -import org.elasticsearch.rest.action.admin.indices.settings.RestUpdateSettingsAction; -import org.elasticsearch.rest.action.admin.indices.shards.RestIndicesShardStoresAction; -import org.elasticsearch.rest.action.admin.indices.stats.RestIndicesStatsAction; -import org.elasticsearch.rest.action.admin.indices.template.delete.RestDeleteIndexTemplateAction; -import org.elasticsearch.rest.action.admin.indices.template.get.RestGetIndexTemplateAction; -import org.elasticsearch.rest.action.admin.indices.template.head.RestHeadIndexTemplateAction; -import org.elasticsearch.rest.action.admin.indices.template.put.RestPutIndexTemplateAction; -import org.elasticsearch.rest.action.admin.indices.upgrade.RestUpgradeAction; -import org.elasticsearch.rest.action.admin.indices.validate.query.RestValidateQueryAction; -import org.elasticsearch.rest.action.admin.indices.validate.template.RestRenderSearchTemplateAction; -import org.elasticsearch.rest.action.admin.indices.warmer.delete.RestDeleteWarmerAction; -import org.elasticsearch.rest.action.admin.indices.warmer.get.RestGetWarmerAction; -import org.elasticsearch.rest.action.admin.indices.warmer.put.RestPutWarmerAction; -import org.elasticsearch.rest.action.bulk.RestBulkAction; -import org.elasticsearch.rest.action.cat.AbstractCatAction; -import org.elasticsearch.rest.action.cat.RestAliasAction; -import org.elasticsearch.rest.action.cat.RestAllocationAction; -import org.elasticsearch.rest.action.cat.RestCatAction; -import org.elasticsearch.rest.action.cat.RestFielddataAction; -import org.elasticsearch.rest.action.cat.RestHealthAction; -import org.elasticsearch.rest.action.cat.RestIndicesAction; -import org.elasticsearch.rest.action.cat.RestMasterAction; -import org.elasticsearch.rest.action.cat.RestNodeAttrsAction; -import org.elasticsearch.rest.action.cat.RestNodesAction; -import org.elasticsearch.rest.action.cat.RestPluginsAction; -import org.elasticsearch.rest.action.cat.RestRepositoriesAction; -import org.elasticsearch.rest.action.cat.RestSegmentsAction; -import org.elasticsearch.rest.action.cat.RestShardsAction; -import org.elasticsearch.rest.action.cat.RestSnapshotAction; -import org.elasticsearch.rest.action.cat.RestThreadPoolAction; -import org.elasticsearch.rest.action.delete.RestDeleteAction; -import org.elasticsearch.rest.action.explain.RestExplainAction; -import org.elasticsearch.rest.action.fieldstats.RestFieldStatsAction; -import org.elasticsearch.rest.action.get.RestGetAction; -import org.elasticsearch.rest.action.get.RestGetSourceAction; -import org.elasticsearch.rest.action.get.RestHeadAction; -import org.elasticsearch.rest.action.get.RestMultiGetAction; -import org.elasticsearch.rest.action.index.RestIndexAction; -import org.elasticsearch.rest.action.main.RestMainAction; -import org.elasticsearch.rest.action.percolate.RestMultiPercolateAction; -import org.elasticsearch.rest.action.percolate.RestPercolateAction; -import org.elasticsearch.rest.action.script.RestDeleteIndexedScriptAction; -import org.elasticsearch.rest.action.script.RestGetIndexedScriptAction; -import org.elasticsearch.rest.action.script.RestPutIndexedScriptAction; -import org.elasticsearch.rest.action.search.RestClearScrollAction; -import org.elasticsearch.rest.action.search.RestMultiSearchAction; -import org.elasticsearch.rest.action.search.RestSearchAction; -import org.elasticsearch.rest.action.search.RestSearchScrollAction; -import org.elasticsearch.rest.action.suggest.RestSuggestAction; -import org.elasticsearch.rest.action.template.RestDeleteSearchTemplateAction; -import org.elasticsearch.rest.action.template.RestGetSearchTemplateAction; -import org.elasticsearch.rest.action.template.RestPutSearchTemplateAction; -import org.elasticsearch.rest.action.termvectors.RestMultiTermVectorsAction; -import org.elasticsearch.rest.action.termvectors.RestTermVectorsAction; -import org.elasticsearch.rest.action.update.RestUpdateAction; - -import java.util.ArrayList; -import java.util.List; - -/** - * - */ -public class RestActionModule extends AbstractModule { - private List<Class<? extends BaseRestHandler>> restPluginsActions = new ArrayList<>(); - - public RestActionModule(List<Class<? extends BaseRestHandler>> restPluginsActions) { - this.restPluginsActions = restPluginsActions; - } - - @Override - protected void configure() { - for (Class<? extends BaseRestHandler> restAction : restPluginsActions) { - bind(restAction).asEagerSingleton(); - } - - bind(RestMainAction.class).asEagerSingleton(); - - bind(RestNodesInfoAction.class).asEagerSingleton(); - bind(RestNodesStatsAction.class).asEagerSingleton(); - bind(RestNodesHotThreadsAction.class).asEagerSingleton(); - bind(RestClusterStatsAction.class).asEagerSingleton(); - bind(RestClusterStateAction.class).asEagerSingleton(); - bind(RestClusterHealthAction.class).asEagerSingleton(); - bind(RestClusterUpdateSettingsAction.class).asEagerSingleton(); - bind(RestClusterGetSettingsAction.class).asEagerSingleton(); - bind(RestClusterRerouteAction.class).asEagerSingleton(); - bind(RestClusterSearchShardsAction.class).asEagerSingleton(); - bind(RestPendingClusterTasksAction.class).asEagerSingleton(); - bind(RestPutRepositoryAction.class).asEagerSingleton(); - bind(RestGetRepositoriesAction.class).asEagerSingleton(); - bind(RestDeleteRepositoryAction.class).asEagerSingleton(); - bind(RestVerifyRepositoryAction.class).asEagerSingleton(); - bind(RestGetSnapshotsAction.class).asEagerSingleton(); - bind(RestCreateSnapshotAction.class).asEagerSingleton(); - bind(RestRestoreSnapshotAction.class).asEagerSingleton(); - bind(RestDeleteSnapshotAction.class).asEagerSingleton(); - bind(RestSnapshotsStatusAction.class).asEagerSingleton(); - - bind(RestIndicesExistsAction.class).asEagerSingleton(); - bind(RestTypesExistsAction.class).asEagerSingleton(); - bind(RestGetIndicesAction.class).asEagerSingleton(); - bind(RestIndicesStatsAction.class).asEagerSingleton(); - bind(RestIndicesSegmentsAction.class).asEagerSingleton(); - bind(RestIndicesShardStoresAction.class).asEagerSingleton(); - bind(RestGetAliasesAction.class).asEagerSingleton(); - bind(RestAliasesExistAction.class).asEagerSingleton(); - bind(RestIndexDeleteAliasesAction.class).asEagerSingleton(); - bind(RestIndexPutAliasAction.class).asEagerSingleton(); - bind(RestIndicesAliasesAction.class).asEagerSingleton(); - bind(RestGetIndicesAliasesAction.class).asEagerSingleton(); - bind(RestCreateIndexAction.class).asEagerSingleton(); - bind(RestDeleteIndexAction.class).asEagerSingleton(); - bind(RestCloseIndexAction.class).asEagerSingleton(); - bind(RestOpenIndexAction.class).asEagerSingleton(); - - bind(RestUpdateSettingsAction.class).asEagerSingleton(); - bind(RestGetSettingsAction.class).asEagerSingleton(); - - bind(RestAnalyzeAction.class).asEagerSingleton(); - bind(RestGetIndexTemplateAction.class).asEagerSingleton(); - bind(RestPutIndexTemplateAction.class).asEagerSingleton(); - bind(RestDeleteIndexTemplateAction.class).asEagerSingleton(); - bind(RestHeadIndexTemplateAction.class).asEagerSingleton(); - - bind(RestPutWarmerAction.class).asEagerSingleton(); - bind(RestDeleteWarmerAction.class).asEagerSingleton(); - bind(RestGetWarmerAction.class).asEagerSingleton(); - - bind(RestPutMappingAction.class).asEagerSingleton(); - bind(RestGetMappingAction.class).asEagerSingleton(); - bind(RestGetFieldMappingAction.class).asEagerSingleton(); - - bind(RestRefreshAction.class).asEagerSingleton(); - bind(RestFlushAction.class).asEagerSingleton(); - bind(RestSyncedFlushAction.class).asEagerSingleton(); - bind(RestForceMergeAction.class).asEagerSingleton(); - bind(RestUpgradeAction.class).asEagerSingleton(); - bind(RestClearIndicesCacheAction.class).asEagerSingleton(); - - bind(RestIndexAction.class).asEagerSingleton(); - bind(RestGetAction.class).asEagerSingleton(); - bind(RestGetSourceAction.class).asEagerSingleton(); - bind(RestHeadAction.class).asEagerSingleton(); - bind(RestMultiGetAction.class).asEagerSingleton(); - bind(RestDeleteAction.class).asEagerSingleton(); - bind(org.elasticsearch.rest.action.count.RestCountAction.class).asEagerSingleton(); - bind(RestSuggestAction.class).asEagerSingleton(); - bind(RestTermVectorsAction.class).asEagerSingleton(); - bind(RestMultiTermVectorsAction.class).asEagerSingleton(); - bind(RestBulkAction.class).asEagerSingleton(); - bind(RestUpdateAction.class).asEagerSingleton(); - bind(RestPercolateAction.class).asEagerSingleton(); - bind(RestMultiPercolateAction.class).asEagerSingleton(); - - bind(RestSearchAction.class).asEagerSingleton(); - bind(RestSearchScrollAction.class).asEagerSingleton(); - bind(RestClearScrollAction.class).asEagerSingleton(); - bind(RestMultiSearchAction.class).asEagerSingleton(); - bind(RestRenderSearchTemplateAction.class).asEagerSingleton(); - - bind(RestValidateQueryAction.class).asEagerSingleton(); - - bind(RestExplainAction.class).asEagerSingleton(); - - bind(RestRecoveryAction.class).asEagerSingleton(); - - // Templates API - bind(RestGetSearchTemplateAction.class).asEagerSingleton(); - bind(RestPutSearchTemplateAction.class).asEagerSingleton(); - bind(RestDeleteSearchTemplateAction.class).asEagerSingleton(); - - // Scripts API - bind(RestGetIndexedScriptAction.class).asEagerSingleton(); - bind(RestPutIndexedScriptAction.class).asEagerSingleton(); - bind(RestDeleteIndexedScriptAction.class).asEagerSingleton(); - - - bind(RestFieldStatsAction.class).asEagerSingleton(); - - // cat API - Multibinder<AbstractCatAction> catActionMultibinder = Multibinder.newSetBinder(binder(), AbstractCatAction.class); - catActionMultibinder.addBinding().to(RestAllocationAction.class).asEagerSingleton(); - catActionMultibinder.addBinding().to(RestShardsAction.class).asEagerSingleton(); - catActionMultibinder.addBinding().to(RestMasterAction.class).asEagerSingleton(); - catActionMultibinder.addBinding().to(RestNodesAction.class).asEagerSingleton(); - catActionMultibinder.addBinding().to(RestIndicesAction.class).asEagerSingleton(); - catActionMultibinder.addBinding().to(RestSegmentsAction.class).asEagerSingleton(); - // Fully qualified to prevent interference with rest.action.count.RestCountAction - catActionMultibinder.addBinding().to(org.elasticsearch.rest.action.cat.RestCountAction.class).asEagerSingleton(); - // Fully qualified to prevent interference with rest.action.indices.RestRecoveryAction - catActionMultibinder.addBinding().to(org.elasticsearch.rest.action.cat.RestRecoveryAction.class).asEagerSingleton(); - catActionMultibinder.addBinding().to(RestHealthAction.class).asEagerSingleton(); - catActionMultibinder.addBinding().to(org.elasticsearch.rest.action.cat.RestPendingClusterTasksAction.class).asEagerSingleton(); - catActionMultibinder.addBinding().to(RestAliasAction.class).asEagerSingleton(); - catActionMultibinder.addBinding().to(RestThreadPoolAction.class).asEagerSingleton(); - catActionMultibinder.addBinding().to(RestPluginsAction.class).asEagerSingleton(); - catActionMultibinder.addBinding().to(RestFielddataAction.class).asEagerSingleton(); - catActionMultibinder.addBinding().to(RestNodeAttrsAction.class).asEagerSingleton(); - catActionMultibinder.addBinding().to(RestRepositoriesAction.class).asEagerSingleton(); - catActionMultibinder.addBinding().to(RestSnapshotAction.class).asEagerSingleton(); - // no abstract cat action - bind(RestCatAction.class).asEagerSingleton(); - } -} diff --git a/core/src/main/java/org/elasticsearch/rest/action/admin/cluster/settings/RestClusterGetSettingsAction.java b/core/src/main/java/org/elasticsearch/rest/action/admin/cluster/settings/RestClusterGetSettingsAction.java index a1cfdb48dd..b7b5064c09 100644 --- a/core/src/main/java/org/elasticsearch/rest/action/admin/cluster/settings/RestClusterGetSettingsAction.java +++ b/core/src/main/java/org/elasticsearch/rest/action/admin/cluster/settings/RestClusterGetSettingsAction.java @@ -23,19 +23,27 @@ import org.elasticsearch.action.admin.cluster.state.ClusterStateRequest; import org.elasticsearch.action.admin.cluster.state.ClusterStateResponse; import org.elasticsearch.client.Client; import org.elasticsearch.client.Requests; +import org.elasticsearch.cluster.ClusterState; import org.elasticsearch.common.inject.Inject; +import org.elasticsearch.common.settings.ClusterSettings; import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.common.xcontent.ToXContent; import org.elasticsearch.common.xcontent.XContentBuilder; import org.elasticsearch.rest.*; import org.elasticsearch.rest.action.support.RestBuilderListener; +import java.io.IOException; + /** */ public class RestClusterGetSettingsAction extends BaseRestHandler { + private final ClusterSettings clusterSettings; + @Inject - public RestClusterGetSettingsAction(Settings settings, RestController controller, Client client) { + public RestClusterGetSettingsAction(Settings settings, RestController controller, Client client, ClusterSettings clusterSettings) { super(settings, controller, client); + this.clusterSettings = clusterSettings; controller.registerHandler(RestRequest.Method.GET, "/_cluster/settings", this); } @@ -44,24 +52,34 @@ public class RestClusterGetSettingsAction extends BaseRestHandler { ClusterStateRequest clusterStateRequest = Requests.clusterStateRequest() .routingTable(false) .nodes(false); + final boolean renderDefaults = request.paramAsBoolean("defaults", false); clusterStateRequest.local(request.paramAsBoolean("local", clusterStateRequest.local())); client.admin().cluster().state(clusterStateRequest, new RestBuilderListener<ClusterStateResponse>(channel) { @Override public RestResponse buildResponse(ClusterStateResponse response, XContentBuilder builder) throws Exception { - builder.startObject(); + return new BytesRestResponse(RestStatus.OK, renderResponse(response.getState(), renderDefaults, builder, request)); + } + }); + } - builder.startObject("persistent"); - response.getState().metaData().persistentSettings().toXContent(builder, request); - builder.endObject(); + private XContentBuilder renderResponse(ClusterState state, boolean renderDefaults, XContentBuilder builder, ToXContent.Params params) throws IOException { + builder.startObject(); - builder.startObject("transient"); - response.getState().metaData().transientSettings().toXContent(builder, request); - builder.endObject(); + builder.startObject("persistent"); + state.metaData().persistentSettings().toXContent(builder, params); + builder.endObject(); - builder.endObject(); + builder.startObject("transient"); + state.metaData().transientSettings().toXContent(builder, params); + builder.endObject(); - return new BytesRestResponse(RestStatus.OK, builder); - } - }); + if (renderDefaults) { + builder.startObject("defaults"); + clusterSettings.diff(state.metaData().settings(), this.settings).toXContent(builder, params); + builder.endObject(); + } + + builder.endObject(); + return builder; } } diff --git a/core/src/main/java/org/elasticsearch/rest/action/admin/indices/flush/RestSyncedFlushAction.java b/core/src/main/java/org/elasticsearch/rest/action/admin/indices/flush/RestSyncedFlushAction.java index 9a3f844abb..0b8ffcf94d 100644 --- a/core/src/main/java/org/elasticsearch/rest/action/admin/indices/flush/RestSyncedFlushAction.java +++ b/core/src/main/java/org/elasticsearch/rest/action/admin/indices/flush/RestSyncedFlushAction.java @@ -19,14 +19,14 @@ package org.elasticsearch.rest.action.admin.indices.flush; +import org.elasticsearch.action.admin.indices.flush.SyncedFlushRequest; +import org.elasticsearch.action.admin.indices.flush.SyncedFlushResponse; import org.elasticsearch.action.support.IndicesOptions; import org.elasticsearch.client.Client; import org.elasticsearch.common.Strings; import org.elasticsearch.common.inject.Inject; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.xcontent.XContentBuilder; -import org.elasticsearch.indices.flush.IndicesSyncedFlushResult; -import org.elasticsearch.indices.flush.SyncedFlushService; import org.elasticsearch.rest.*; import org.elasticsearch.rest.action.support.RestBuilderListener; @@ -38,12 +38,9 @@ import static org.elasticsearch.rest.RestRequest.Method.POST; */ public class RestSyncedFlushAction extends BaseRestHandler { - private final SyncedFlushService syncedFlushService; - @Inject - public RestSyncedFlushAction(Settings settings, RestController controller, Client client, SyncedFlushService syncedFlushService) { + public RestSyncedFlushAction(Settings settings, RestController controller, Client client) { super(settings, controller, client); - this.syncedFlushService = syncedFlushService; controller.registerHandler(POST, "/_flush/synced", this); controller.registerHandler(POST, "/{index}/_flush/synced", this); @@ -53,12 +50,12 @@ public class RestSyncedFlushAction extends BaseRestHandler { @Override public void handleRequest(final RestRequest request, final RestChannel channel, final Client client) { - String[] indices = Strings.splitStringByCommaToArray(request.param("index")); IndicesOptions indicesOptions = IndicesOptions.fromRequest(request, IndicesOptions.lenientExpandOpen()); - - syncedFlushService.attemptSyncedFlush(indices, indicesOptions, new RestBuilderListener<IndicesSyncedFlushResult>(channel) { + SyncedFlushRequest syncedFlushRequest = new SyncedFlushRequest(Strings.splitStringByCommaToArray(request.param("index"))); + syncedFlushRequest.indicesOptions(indicesOptions); + client.admin().indices().syncedFlush(syncedFlushRequest, new RestBuilderListener<SyncedFlushResponse>(channel) { @Override - public RestResponse buildResponse(IndicesSyncedFlushResult results, XContentBuilder builder) throws Exception { + public RestResponse buildResponse(SyncedFlushResponse results, XContentBuilder builder) throws Exception { builder.startObject(); results.toXContent(builder, request); builder.endObject(); diff --git a/core/src/main/java/org/elasticsearch/rest/action/admin/indices/settings/RestUpdateSettingsAction.java b/core/src/main/java/org/elasticsearch/rest/action/admin/indices/settings/RestUpdateSettingsAction.java index 005b30e620..bd7e62abf4 100644 --- a/core/src/main/java/org/elasticsearch/rest/action/admin/indices/settings/RestUpdateSettingsAction.java +++ b/core/src/main/java/org/elasticsearch/rest/action/admin/indices/settings/RestUpdateSettingsAction.java @@ -88,6 +88,6 @@ public class RestUpdateSettingsAction extends BaseRestHandler { } updateSettingsRequest.settings(updateSettings); - client.admin().indices().updateSettings(updateSettingsRequest, new AcknowledgedRestListener<UpdateSettingsResponse>(channel)); + client.admin().indices().updateSettings(updateSettingsRequest, new AcknowledgedRestListener<>(channel)); } } diff --git a/core/src/main/java/org/elasticsearch/script/NativeScriptEngineService.java b/core/src/main/java/org/elasticsearch/script/NativeScriptEngineService.java index 073a4eb5fa..19bc447888 100644 --- a/core/src/main/java/org/elasticsearch/script/NativeScriptEngineService.java +++ b/core/src/main/java/org/elasticsearch/script/NativeScriptEngineService.java @@ -62,7 +62,7 @@ public class NativeScriptEngineService extends AbstractComponent implements Scri } @Override - public Object compile(String script) { + public Object compile(String script, Map<String, String> params) { NativeScriptFactory scriptFactory = scripts.get(script); if (scriptFactory != null) { return scriptFactory; diff --git a/core/src/main/java/org/elasticsearch/script/ScriptEngineService.java b/core/src/main/java/org/elasticsearch/script/ScriptEngineService.java index 993c95ad79..41befc9406 100644 --- a/core/src/main/java/org/elasticsearch/script/ScriptEngineService.java +++ b/core/src/main/java/org/elasticsearch/script/ScriptEngineService.java @@ -36,7 +36,7 @@ public interface ScriptEngineService extends Closeable { boolean sandboxed(); - Object compile(String script); + Object compile(String script, Map<String, String> params); ExecutableScript executable(CompiledScript compiledScript, @Nullable Map<String, Object> vars); diff --git a/core/src/main/java/org/elasticsearch/script/ScriptService.java b/core/src/main/java/org/elasticsearch/script/ScriptService.java index 3b91f2d311..c9e9f9a873 100644 --- a/core/src/main/java/org/elasticsearch/script/ScriptService.java +++ b/core/src/main/java/org/elasticsearch/script/ScriptService.java @@ -67,6 +67,7 @@ import java.io.InputStreamReader; import java.nio.charset.StandardCharsets; import java.nio.file.Files; import java.nio.file.Path; +import java.util.Collections; import java.util.HashMap; import java.util.Locale; import java.util.Map; @@ -96,9 +97,9 @@ public class ScriptService extends AbstractComponent implements Closeable { private final Map<String, ScriptEngineService> scriptEnginesByLang; private final Map<String, ScriptEngineService> scriptEnginesByExt; - private final ConcurrentMap<String, CompiledScript> staticCache = ConcurrentCollections.newConcurrentMap(); + private final ConcurrentMap<CacheKey, CompiledScript> staticCache = ConcurrentCollections.newConcurrentMap(); - private final Cache<String, CompiledScript> cache; + private final Cache<CacheKey, CompiledScript> cache; private final Path scriptsDirectory; private final ScriptModes scriptModes; @@ -153,7 +154,7 @@ public class ScriptService extends AbstractComponent implements Closeable { this.defaultLang = settings.get(DEFAULT_SCRIPTING_LANGUAGE_SETTING, DEFAULT_LANG); - CacheBuilder<String, CompiledScript> cacheBuilder = CacheBuilder.builder(); + CacheBuilder<CacheKey, CompiledScript> cacheBuilder = CacheBuilder.builder(); if (cacheMaxSize >= 0) { cacheBuilder.setMaximumWeight(cacheMaxSize); } @@ -224,7 +225,7 @@ public class ScriptService extends AbstractComponent implements Closeable { /** * Checks if a script can be executed and compiles it if needed, or returns the previously compiled and cached script. */ - public CompiledScript compile(Script script, ScriptContext scriptContext, HasContextAndHeaders headersContext) { + public CompiledScript compile(Script script, ScriptContext scriptContext, HasContextAndHeaders headersContext, Map<String, String> params) { if (script == null) { throw new IllegalArgumentException("The parameter script (Script) must not be null."); } @@ -252,14 +253,14 @@ public class ScriptService extends AbstractComponent implements Closeable { " operation [" + scriptContext.getKey() + "] and lang [" + lang + "] are not supported"); } - return compileInternal(script, headersContext); + return compileInternal(script, headersContext, params); } /** * Compiles a script straight-away, or returns the previously compiled and cached script, * without checking if it can be executed based on settings. */ - public CompiledScript compileInternal(Script script, HasContextAndHeaders context) { + public CompiledScript compileInternal(Script script, HasContextAndHeaders context, Map<String, String> params) { if (script == null) { throw new IllegalArgumentException("The parameter script (Script) must not be null."); } @@ -277,7 +278,7 @@ public class ScriptService extends AbstractComponent implements Closeable { ScriptEngineService scriptEngineService = getScriptEngineServiceForLang(lang); if (type == ScriptType.FILE) { - String cacheKey = getCacheKey(scriptEngineService, name, null); + CacheKey cacheKey = new CacheKey(scriptEngineService, name, null, params); //On disk scripts will be loaded into the staticCache by the listener CompiledScript compiledScript = staticCache.get(cacheKey); @@ -299,14 +300,14 @@ public class ScriptService extends AbstractComponent implements Closeable { code = getScriptFromIndex(indexedScript.lang, indexedScript.id, context); } - String cacheKey = getCacheKey(scriptEngineService, type == ScriptType.INLINE ? null : name, code); + CacheKey cacheKey = new CacheKey(scriptEngineService, type == ScriptType.INLINE ? null : name, code, params); CompiledScript compiledScript = cache.get(cacheKey); if (compiledScript == null) { //Either an un-cached inline script or indexed script //If the script type is inline the name will be the same as the code for identification in exceptions try { - compiledScript = new CompiledScript(type, name, lang, scriptEngineService.compile(code)); + compiledScript = new CompiledScript(type, name, lang, scriptEngineService.compile(code, params)); } catch (Exception exception) { throw new ScriptException("Failed to compile " + type + " script [" + name + "] using lang [" + lang + "]", exception); } @@ -364,7 +365,7 @@ public class ScriptService extends AbstractComponent implements Closeable { //we don't know yet what the script will be used for, but if all of the operations for this lang with //indexed scripts are disabled, it makes no sense to even compile it. if (isAnyScriptContextEnabled(scriptLang, scriptEngineService, ScriptType.INDEXED)) { - Object compiled = scriptEngineService.compile(template.getScript()); + Object compiled = scriptEngineService.compile(template.getScript(), Collections.emptyMap()); if (compiled == null) { throw new IllegalArgumentException("Unable to parse [" + template.getScript() + "] lang [" + scriptLang + "] (ScriptService.compile returned null)"); @@ -436,8 +437,8 @@ public class ScriptService extends AbstractComponent implements Closeable { /** * Compiles (or retrieves from cache) and executes the provided script */ - public ExecutableScript executable(Script script, ScriptContext scriptContext, HasContextAndHeaders headersContext) { - return executable(compile(script, scriptContext, headersContext), script.getParams()); + public ExecutableScript executable(Script script, ScriptContext scriptContext, HasContextAndHeaders headersContext, Map<String, String> params) { + return executable(compile(script, scriptContext, headersContext, params), script.getParams()); } /** @@ -450,8 +451,8 @@ public class ScriptService extends AbstractComponent implements Closeable { /** * Compiles (or retrieves from cache) and executes the provided search script */ - public SearchScript search(SearchLookup lookup, Script script, ScriptContext scriptContext) { - CompiledScript compiledScript = compile(script, scriptContext, SearchContext.current()); + public SearchScript search(SearchLookup lookup, Script script, ScriptContext scriptContext, Map<String, String> params) { + CompiledScript compiledScript = compile(script, scriptContext, SearchContext.current(), params); return getScriptEngineServiceForLang(compiledScript.lang()).search(compiledScript, lookup, script.getParams()); } @@ -491,9 +492,9 @@ public class ScriptService extends AbstractComponent implements Closeable { * {@code ScriptEngineService}'s {@code scriptRemoved} method when the * script has been removed from the cache */ - private class ScriptCacheRemovalListener implements RemovalListener<String, CompiledScript> { + private class ScriptCacheRemovalListener implements RemovalListener<CacheKey, CompiledScript> { @Override - public void onRemoval(RemovalNotification<String, CompiledScript> notification) { + public void onRemoval(RemovalNotification<CacheKey, CompiledScript> notification) { scriptMetrics.onCacheEviction(); for (ScriptEngineService service : scriptEngines) { try { @@ -539,8 +540,8 @@ public class ScriptService extends AbstractComponent implements Closeable { logger.info("compiling script file [{}]", file.toAbsolutePath()); try(InputStreamReader reader = new InputStreamReader(Files.newInputStream(file), StandardCharsets.UTF_8)) { String script = Streams.copyToString(reader); - String cacheKey = getCacheKey(engineService, scriptNameExt.v1(), null); - staticCache.put(cacheKey, new CompiledScript(ScriptType.FILE, scriptNameExt.v1(), engineService.types()[0], engineService.compile(script))); + CacheKey cacheKey = new CacheKey(engineService, scriptNameExt.v1(), null, Collections.emptyMap()); + staticCache.put(cacheKey, new CompiledScript(ScriptType.FILE, scriptNameExt.v1(), engineService.types()[0], engineService.compile(script, Collections.emptyMap()))); scriptMetrics.onCompilation(); } } else { @@ -565,7 +566,7 @@ public class ScriptService extends AbstractComponent implements Closeable { ScriptEngineService engineService = getScriptEngineServiceForFileExt(scriptNameExt.v2()); assert engineService != null; logger.info("removing script file [{}]", file.toAbsolutePath()); - staticCache.remove(getCacheKey(engineService, scriptNameExt.v1(), null)); + staticCache.remove(new CacheKey(engineService, scriptNameExt.v1(), null, Collections.emptyMap())); } } @@ -625,11 +626,44 @@ public class ScriptService extends AbstractComponent implements Closeable { } } - private static String getCacheKey(ScriptEngineService scriptEngineService, String name, String code) { - String lang = scriptEngineService.types()[0]; - return lang + ":" + (name != null ? ":" + name : "") + (code != null ? ":" + code : ""); + private static final class CacheKey { + final String lang; + final String name; + final String code; + final Map<String, String> params; + + private CacheKey(final ScriptEngineService service, final String name, final String code, final Map<String, String> params) { + this.lang = service.types()[0]; + this.name = name; + this.code = code; + this.params = params; + } + + @Override + public boolean equals(Object o) { + if (this == o) return true; + if (o == null || getClass() != o.getClass()) return false; + + CacheKey cacheKey = (CacheKey)o; + + if (!lang.equals(cacheKey.lang)) return false; + if (name != null ? !name.equals(cacheKey.name) : cacheKey.name != null) return false; + if (code != null ? !code.equals(cacheKey.code) : cacheKey.code != null) return false; + return params.equals(cacheKey.params); + + } + + @Override + public int hashCode() { + int result = lang.hashCode(); + result = 31 * result + (name != null ? name.hashCode() : 0); + result = 31 * result + (code != null ? code.hashCode() : 0); + result = 31 * result + params.hashCode(); + return result; + } } + private static class IndexedScript { private final String lang; private final String id; diff --git a/core/src/main/java/org/elasticsearch/search/SearchService.java b/core/src/main/java/org/elasticsearch/search/SearchService.java index 9501099997..99f9b0ea0d 100644 --- a/core/src/main/java/org/elasticsearch/search/SearchService.java +++ b/core/src/main/java/org/elasticsearch/search/SearchService.java @@ -23,6 +23,7 @@ import com.carrotsearch.hppc.ObjectFloatHashMap; import com.carrotsearch.hppc.ObjectHashSet; import com.carrotsearch.hppc.ObjectSet; import com.carrotsearch.hppc.cursors.ObjectCursor; + import org.apache.lucene.index.IndexOptions; import org.apache.lucene.index.LeafReaderContext; import org.apache.lucene.index.NumericDocValues; @@ -38,6 +39,8 @@ import org.elasticsearch.common.bytes.BytesReference; import org.elasticsearch.common.component.AbstractLifecycleComponent; import org.elasticsearch.common.inject.Inject; import org.elasticsearch.common.lucene.Lucene; +import org.elasticsearch.common.settings.ClusterSettings; +import org.elasticsearch.common.settings.Setting; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.unit.TimeValue; import org.elasticsearch.common.util.BigArrays; @@ -70,7 +73,6 @@ import org.elasticsearch.indices.IndicesService; import org.elasticsearch.indices.IndicesWarmer; import org.elasticsearch.indices.IndicesWarmer.TerminationHandle; import org.elasticsearch.indices.cache.request.IndicesRequestCache; -import org.elasticsearch.node.settings.NodeSettingsService; import org.elasticsearch.script.ExecutableScript; import org.elasticsearch.script.ScriptContext; import org.elasticsearch.script.ScriptService; @@ -83,12 +85,16 @@ import org.elasticsearch.search.fetch.fielddata.FieldDataFieldsContext; import org.elasticsearch.search.fetch.fielddata.FieldDataFieldsContext.FieldDataField; import org.elasticsearch.search.fetch.fielddata.FieldDataFieldsFetchSubPhase; import org.elasticsearch.search.fetch.script.ScriptFieldsContext.ScriptField; +import org.elasticsearch.search.highlight.HighlightBuilder; import org.elasticsearch.search.internal.*; import org.elasticsearch.search.internal.SearchContext.Lifetime; +import org.elasticsearch.search.profile.Profilers; import org.elasticsearch.search.query.*; import org.elasticsearch.search.warmer.IndexWarmersMetaData; import org.elasticsearch.threadpool.ThreadPool; +import java.io.IOException; +import java.util.Collections; import java.util.HashMap; import java.util.Map; import java.util.concurrent.CountDownLatch; @@ -109,9 +115,10 @@ public class SearchService extends AbstractLifecycleComponent<SearchService> imp public static final String NORMS_LOADING_KEY = "index.norms.loading"; public static final String DEFAULT_KEEPALIVE_KEY = "search.default_keep_alive"; public static final String KEEPALIVE_INTERVAL_KEY = "search.keep_alive_interval"; - public static final String DEFAULT_SEARCH_TIMEOUT = "search.default_search_timeout"; public static final TimeValue NO_TIMEOUT = timeValueMillis(-1); + public static final Setting<TimeValue> DEFAULT_SEARCH_TIMEOUT_SETTING = Setting.timeSetting("search.default_search_timeout", NO_TIMEOUT, true, Setting.Scope.CLUSTER); + private final ThreadPool threadPool; @@ -150,7 +157,7 @@ public class SearchService extends AbstractLifecycleComponent<SearchService> imp private final ParseFieldMatcher parseFieldMatcher; @Inject - public SearchService(Settings settings, NodeSettingsService nodeSettingsService, ClusterService clusterService, IndicesService indicesService,IndicesWarmer indicesWarmer, ThreadPool threadPool, + public SearchService(Settings settings, ClusterSettings clusterSettings, ClusterService clusterService, IndicesService indicesService, IndicesWarmer indicesWarmer, ThreadPool threadPool, ScriptService scriptService, PageCacheRecycler pageCacheRecycler, BigArrays bigArrays, DfsPhase dfsPhase, QueryPhase queryPhase, FetchPhase fetchPhase, IndicesRequestCache indicesQueryCache) { super(settings); @@ -184,19 +191,12 @@ public class SearchService extends AbstractLifecycleComponent<SearchService> imp this.indicesWarmer.addListener(new FieldDataWarmer(indicesWarmer)); this.indicesWarmer.addListener(new SearchWarmer()); - defaultSearchTimeout = settings.getAsTime(DEFAULT_SEARCH_TIMEOUT, NO_TIMEOUT); - nodeSettingsService.addListener(new SearchSettingsListener()); + defaultSearchTimeout = DEFAULT_SEARCH_TIMEOUT_SETTING.get(settings); + clusterSettings.addSettingsUpdateConsumer(DEFAULT_SEARCH_TIMEOUT_SETTING, this::setDefaultSearchTimeout); } - class SearchSettingsListener implements NodeSettingsService.Listener { - @Override - public void onRefreshSettings(Settings settings) { - final TimeValue maybeNewDefaultSearchTimeout = settings.getAsTime(SearchService.DEFAULT_SEARCH_TIMEOUT, SearchService.this.defaultSearchTimeout); - if (!maybeNewDefaultSearchTimeout.equals(SearchService.this.defaultSearchTimeout)) { - logger.info("updating [{}] from [{}] to [{}]", SearchService.DEFAULT_SEARCH_TIMEOUT, SearchService.this.defaultSearchTimeout, maybeNewDefaultSearchTimeout); - SearchService.this.defaultSearchTimeout = maybeNewDefaultSearchTimeout; - } - } + private void setDefaultSearchTimeout(TimeValue defaultSearchTimeout) { + this.defaultSearchTimeout = defaultSearchTimeout; } @Override @@ -549,7 +549,7 @@ public class SearchService extends AbstractLifecycleComponent<SearchService> imp Engine.Searcher engineSearcher = searcher == null ? indexShard.acquireSearcher("search") : searcher; - SearchContext context = new DefaultSearchContext(idGenerator.incrementAndGet(), request, shardTarget, engineSearcher, indexService, indexShard, scriptService, pageCacheRecycler, bigArrays, threadPool.estimatedTimeInMillisCounter(), parseFieldMatcher, defaultSearchTimeout); + DefaultSearchContext context = new DefaultSearchContext(idGenerator.incrementAndGet(), request, shardTarget, engineSearcher, indexService, indexShard, scriptService, pageCacheRecycler, bigArrays, threadPool.estimatedTimeInMillisCounter(), parseFieldMatcher, defaultSearchTimeout); SearchContext.setCurrent(context); try { @@ -558,7 +558,7 @@ public class SearchService extends AbstractLifecycleComponent<SearchService> imp context.scrollContext().scroll = request.scroll(); } if (request.template() != null) { - ExecutableScript executable = this.scriptService.executable(request.template(), ScriptContext.Standard.SEARCH, context); + ExecutableScript executable = this.scriptService.executable(request.template(), ScriptContext.Standard.SEARCH, context, Collections.emptyMap()); BytesReference run = (BytesReference) executable.run(); try (XContentParser parser = XContentFactory.xContent(run).createParser(run)) { QueryParseContext queryParseContext = new QueryParseContext(indicesService.getIndicesQueryRegistry()); @@ -656,7 +656,7 @@ public class SearchService extends AbstractLifecycleComponent<SearchService> imp } } - private void parseSource(SearchContext context, SearchSourceBuilder source) throws SearchParseException { + private void parseSource(DefaultSearchContext context, SearchSourceBuilder source) throws SearchContextException { // nothing to parse... if (source == null) { return; @@ -712,6 +712,9 @@ public class SearchService extends AbstractLifecycleComponent<SearchService> imp if (source.minScore() != null) { context.minimumScore(source.minScore()); } + if (source.profile()) { + context.setProfilers(new Profilers(context.searcher())); + } context.timeoutInMillis(source.timeoutInMillis()); context.terminateAfter(source.terminateAfter()); if (source.aggregations() != null) { @@ -807,19 +810,11 @@ public class SearchService extends AbstractLifecycleComponent<SearchService> imp fieldDataFieldsContext.setHitExecutionNeeded(true); } if (source.highlighter() != null) { - XContentParser highlighterParser = null; + HighlightBuilder highlightBuilder = source.highlighter(); try { - highlighterParser = XContentFactory.xContent(source.highlighter()).createParser(source.highlighter()); - this.elementParsers.get("highlight").parse(highlighterParser, context); - } catch (Exception e) { - String sSource = "_na_"; - try { - sSource = source.toString(); - } catch (Throwable e1) { - // ignore - } - XContentLocation location = highlighterParser != null ? highlighterParser.getTokenLocation() : null; - throw new SearchParseException(context, "failed to parse suggest source [" + sSource + "]", location, e); + context.highlight(highlightBuilder.build(context.indexShard().getQueryShardContext())); + } catch (IOException e) { + throw new SearchContextException(context, "failed to create SearchContextHighlighter", e); } } if (source.innerHits() != null) { @@ -841,7 +836,7 @@ public class SearchService extends AbstractLifecycleComponent<SearchService> imp } if (source.scriptFields() != null) { for (org.elasticsearch.search.builder.SearchSourceBuilder.ScriptField field : source.scriptFields()) { - SearchScript searchScript = context.scriptService().search(context.lookup(), field.script(), ScriptContext.Standard.SEARCH); + SearchScript searchScript = context.scriptService().search(context.lookup(), field.script(), ScriptContext.Standard.SEARCH, Collections.emptyMap()); context.scriptFields().add(new ScriptField(field.fieldName(), searchScript, field.ignoreFailure())); } } diff --git a/core/src/main/java/org/elasticsearch/search/SearchShardTarget.java b/core/src/main/java/org/elasticsearch/search/SearchShardTarget.java index 1a12751d39..c648436c3a 100644 --- a/core/src/main/java/org/elasticsearch/search/SearchShardTarget.java +++ b/core/src/main/java/org/elasticsearch/search/SearchShardTarget.java @@ -23,7 +23,7 @@ import org.elasticsearch.common.Nullable; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.common.io.stream.Streamable; -import org.elasticsearch.common.text.StringAndBytesText; +import org.elasticsearch.common.text.Text; import org.elasticsearch.common.text.Text; import java.io.IOException; @@ -42,8 +42,8 @@ public class SearchShardTarget implements Streamable, Comparable<SearchShardTarg } public SearchShardTarget(String nodeId, String index, int shardId) { - this.nodeId = nodeId == null ? null : new StringAndBytesText(nodeId); - this.index = new StringAndBytesText(index); + this.nodeId = nodeId == null ? null : new Text(nodeId); + this.index = new Text(index); this.shardId = shardId; } diff --git a/core/src/main/java/org/elasticsearch/search/aggregations/AggregationPhase.java b/core/src/main/java/org/elasticsearch/search/aggregations/AggregationPhase.java index 742f678f6f..0681996e3e 100644 --- a/core/src/main/java/org/elasticsearch/search/aggregations/AggregationPhase.java +++ b/core/src/main/java/org/elasticsearch/search/aggregations/AggregationPhase.java @@ -20,6 +20,7 @@ package org.elasticsearch.search.aggregations; import org.apache.lucene.search.BooleanClause.Occur; import org.apache.lucene.search.BooleanQuery; +import org.apache.lucene.search.Collector; import org.apache.lucene.search.Query; import org.elasticsearch.common.inject.Inject; import org.elasticsearch.common.lucene.search.Queries; @@ -30,10 +31,13 @@ import org.elasticsearch.search.aggregations.pipeline.PipelineAggregator; import org.elasticsearch.search.aggregations.pipeline.SiblingPipelineAggregator; import org.elasticsearch.search.aggregations.support.AggregationContext; import org.elasticsearch.search.internal.SearchContext; +import org.elasticsearch.search.profile.CollectorResult; +import org.elasticsearch.search.profile.InternalProfileCollector; import org.elasticsearch.search.query.QueryPhaseExecutionException; import java.io.IOException; import java.util.ArrayList; +import java.util.Collections; import java.util.HashMap; import java.util.List; import java.util.Map; @@ -81,8 +85,13 @@ public class AggregationPhase implements SearchPhase { } context.aggregations().aggregators(aggregators); if (!collectors.isEmpty()) { - final BucketCollector collector = BucketCollector.wrap(collectors); - collector.preCollection(); + Collector collector = BucketCollector.wrap(collectors); + ((BucketCollector)collector).preCollection(); + if (context.getProfilers() != null) { + collector = new InternalProfileCollector(collector, CollectorResult.REASON_AGGREGATION, + // TODO: report on child aggs as well + Collections.emptyList()); + } context.queryCollectors().put(AggregationPhase.class, collector); } } catch (IOException e) { @@ -116,6 +125,7 @@ public class AggregationPhase implements SearchPhase { BucketCollector globalsCollector = BucketCollector.wrap(globals); Query query = Queries.newMatchAllQuery(); Query searchFilter = context.searchFilter(context.types()); + if (searchFilter != null) { BooleanQuery filtered = new BooleanQuery.Builder() .add(query, Occur.MUST) @@ -124,8 +134,20 @@ public class AggregationPhase implements SearchPhase { query = filtered; } try { + final Collector collector; + if (context.getProfilers() == null) { + collector = globalsCollector; + } else { + InternalProfileCollector profileCollector = new InternalProfileCollector( + globalsCollector, CollectorResult.REASON_AGGREGATION_GLOBAL, + // TODO: report on sub collectors + Collections.emptyList()); + collector = profileCollector; + // start a new profile with this collector + context.getProfilers().addProfiler().setCollector(profileCollector); + } globalsCollector.preCollection(); - context.searcher().search(query, globalsCollector); + context.searcher().search(query, collector); } catch (Exception e) { throw new QueryPhaseExecutionException(context, "Failed to execute global aggregators", e); } finally { diff --git a/core/src/main/java/org/elasticsearch/search/aggregations/BucketCollector.java b/core/src/main/java/org/elasticsearch/search/aggregations/BucketCollector.java index ee38e2b361..c1c1bff1ad 100644 --- a/core/src/main/java/org/elasticsearch/search/aggregations/BucketCollector.java +++ b/core/src/main/java/org/elasticsearch/search/aggregations/BucketCollector.java @@ -25,6 +25,7 @@ import org.apache.lucene.search.Collector; import java.io.IOException; import java.util.ArrayList; +import java.util.Arrays; import java.util.List; import java.util.stream.StreamSupport; @@ -99,6 +100,11 @@ public abstract class BucketCollector implements Collector { } return false; } + + @Override + public String toString() { + return Arrays.toString(collectors); + } }; } } diff --git a/core/src/main/java/org/elasticsearch/search/aggregations/bucket/children/ParentToChildrenAggregator.java b/core/src/main/java/org/elasticsearch/search/aggregations/bucket/children/ParentToChildrenAggregator.java index 6d9a1edc71..0678338fcf 100644 --- a/core/src/main/java/org/elasticsearch/search/aggregations/bucket/children/ParentToChildrenAggregator.java +++ b/core/src/main/java/org/elasticsearch/search/aggregations/bucket/children/ParentToChildrenAggregator.java @@ -18,6 +18,7 @@ */ package org.elasticsearch.search.aggregations.bucket.children; +import org.apache.lucene.index.IndexReader; import org.apache.lucene.index.LeafReaderContext; import org.apache.lucene.index.SortedDocValues; import org.apache.lucene.search.*; @@ -64,9 +65,6 @@ public class ParentToChildrenAggregator extends SingleBucketAggregator { private final LongObjectPagedHashMap<long[]> parentOrdToOtherBuckets; private boolean multipleBucketsPerParentOrd = false; - // This needs to be a Set to avoid duplicate reader context entries via (#setNextReader(...), it can get invoked multiple times with the same reader context) - private Set<LeafReaderContext> replay = new LinkedHashSet<>(); - public ParentToChildrenAggregator(String name, AggregatorFactories factories, AggregationContext aggregationContext, Aggregator parent, String parentType, Query childFilter, Query parentFilter, ValuesSource.Bytes.WithOrdinals.ParentChild valuesSource, @@ -99,17 +97,11 @@ public class ParentToChildrenAggregator extends SingleBucketAggregator { if (valuesSource == null) { return LeafBucketCollector.NO_OP_COLLECTOR; } - if (replay == null) { - throw new IllegalStateException(); - } final SortedDocValues globalOrdinals = valuesSource.globalOrdinalsValues(parentType, ctx); assert globalOrdinals != null; Scorer parentScorer = parentFilter.scorer(ctx); final Bits parentDocs = Lucene.asSequentialAccessBits(ctx.reader().maxDoc(), parentScorer); - if (childFilter.scorer(ctx) != null) { - replay.add(ctx); - } return new LeafBucketCollector() { @Override @@ -138,10 +130,8 @@ public class ParentToChildrenAggregator extends SingleBucketAggregator { @Override protected void doPostCollection() throws IOException { - final Set<LeafReaderContext> replay = this.replay; - this.replay = null; - - for (LeafReaderContext ctx : replay) { + IndexReader indexReader = context().searchContext().searcher().getIndexReader(); + for (LeafReaderContext ctx : indexReader.leaves()) { DocIdSetIterator childDocsIter = childFilter.scorer(ctx); if (childDocsIter == null) { continue; diff --git a/core/src/main/java/org/elasticsearch/search/aggregations/bucket/histogram/InternalHistogram.java b/core/src/main/java/org/elasticsearch/search/aggregations/bucket/histogram/InternalHistogram.java index da3bc286ff..faca359d76 100644 --- a/core/src/main/java/org/elasticsearch/search/aggregations/bucket/histogram/InternalHistogram.java +++ b/core/src/main/java/org/elasticsearch/search/aggregations/bucket/histogram/InternalHistogram.java @@ -23,7 +23,6 @@ import org.apache.lucene.util.PriorityQueue; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.common.rounding.Rounding; -import org.elasticsearch.common.text.StringText; import org.elasticsearch.common.text.Text; import org.elasticsearch.common.xcontent.XContentBuilder; import org.elasticsearch.search.aggregations.AggregationExecutionException; @@ -151,7 +150,7 @@ public class InternalHistogram<B extends InternalHistogram.Bucket> extends Inter @Override public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { if (formatter != ValueFormatter.RAW) { - Text keyTxt = new StringText(formatter.format(key)); + Text keyTxt = new Text(formatter.format(key)); if (keyed) { builder.startObject(keyTxt.string()); } else { @@ -392,12 +391,14 @@ public class InternalHistogram<B extends InternalHistogram.Bucket> extends Inter return reducedBuckets; } - private void addEmptyBuckets(List<B> list) { + private void addEmptyBuckets(List<B> list, ReduceContext reduceContext) { B lastBucket = null; ExtendedBounds bounds = emptyBucketInfo.bounds; ListIterator<B> iter = list.listIterator(); // first adding all the empty buckets *before* the actual data (based on th extended_bounds.min the user requested) + InternalAggregations reducedEmptySubAggs = InternalAggregations.reduce(Collections.singletonList(emptyBucketInfo.subAggregations), + reduceContext); if (bounds != null) { B firstBucket = iter.hasNext() ? list.get(iter.nextIndex()) : null; if (firstBucket == null) { @@ -405,7 +406,9 @@ public class InternalHistogram<B extends InternalHistogram.Bucket> extends Inter long key = bounds.min; long max = bounds.max; while (key <= max) { - iter.add(getFactory().createBucket(key, 0, emptyBucketInfo.subAggregations, keyed, formatter)); + iter.add(getFactory().createBucket(key, 0, + reducedEmptySubAggs, + keyed, formatter)); key = emptyBucketInfo.rounding.nextRoundingValue(key); } } @@ -414,7 +417,9 @@ public class InternalHistogram<B extends InternalHistogram.Bucket> extends Inter long key = bounds.min; if (key < firstBucket.key) { while (key < firstBucket.key) { - iter.add(getFactory().createBucket(key, 0, emptyBucketInfo.subAggregations, keyed, formatter)); + iter.add(getFactory().createBucket(key, 0, + reducedEmptySubAggs, + keyed, formatter)); key = emptyBucketInfo.rounding.nextRoundingValue(key); } } @@ -429,7 +434,9 @@ public class InternalHistogram<B extends InternalHistogram.Bucket> extends Inter if (lastBucket != null) { long key = emptyBucketInfo.rounding.nextRoundingValue(lastBucket.key); while (key < nextBucket.key) { - iter.add(getFactory().createBucket(key, 0, emptyBucketInfo.subAggregations, keyed, formatter)); + iter.add(getFactory().createBucket(key, 0, + reducedEmptySubAggs, keyed, + formatter)); key = emptyBucketInfo.rounding.nextRoundingValue(key); } assert key == nextBucket.key; @@ -442,7 +449,9 @@ public class InternalHistogram<B extends InternalHistogram.Bucket> extends Inter long key = emptyBucketInfo.rounding.nextRoundingValue(lastBucket.key); long max = bounds.max; while (key <= max) { - iter.add(getFactory().createBucket(key, 0, emptyBucketInfo.subAggregations, keyed, formatter)); + iter.add(getFactory().createBucket(key, 0, + reducedEmptySubAggs, keyed, + formatter)); key = emptyBucketInfo.rounding.nextRoundingValue(key); } } @@ -454,7 +463,7 @@ public class InternalHistogram<B extends InternalHistogram.Bucket> extends Inter // adding empty buckets if needed if (minDocCount == 0) { - addEmptyBuckets(reducedBuckets); + addEmptyBuckets(reducedBuckets, reduceContext); } if (order == InternalOrder.KEY_ASC) { diff --git a/core/src/main/java/org/elasticsearch/search/aggregations/bucket/significant/heuristics/ScriptHeuristic.java b/core/src/main/java/org/elasticsearch/search/aggregations/bucket/significant/heuristics/ScriptHeuristic.java index 046ca717b9..b6d1d56d07 100644 --- a/core/src/main/java/org/elasticsearch/search/aggregations/bucket/significant/heuristics/ScriptHeuristic.java +++ b/core/src/main/java/org/elasticsearch/search/aggregations/bucket/significant/heuristics/ScriptHeuristic.java @@ -37,6 +37,7 @@ import org.elasticsearch.search.aggregations.InternalAggregation; import org.elasticsearch.search.internal.SearchContext; import java.io.IOException; +import java.util.Collections; import java.util.HashMap; import java.util.Map; @@ -82,7 +83,7 @@ public class ScriptHeuristic extends SignificanceHeuristic { @Override public void initialize(InternalAggregation.ReduceContext context) { - searchScript = context.scriptService().executable(script, ScriptContext.Standard.AGGS, context); + searchScript = context.scriptService().executable(script, ScriptContext.Standard.AGGS, context, Collections.emptyMap()); searchScript.setNextVar("_subset_freq", subsetDfHolder); searchScript.setNextVar("_subset_size", subsetSizeHolder); searchScript.setNextVar("_superset_freq", supersetDfHolder); @@ -170,7 +171,7 @@ public class ScriptHeuristic extends SignificanceHeuristic { } ExecutableScript searchScript; try { - searchScript = scriptService.executable(script, ScriptContext.Standard.AGGS, context); + searchScript = scriptService.executable(script, ScriptContext.Standard.AGGS, context, Collections.emptyMap()); } catch (Exception e) { throw new ElasticsearchParseException("failed to parse [{}] significance heuristic. the script [{}] could not be loaded", e, script, heuristicName); } diff --git a/core/src/main/java/org/elasticsearch/search/aggregations/bucket/terms/LongTerms.java b/core/src/main/java/org/elasticsearch/search/aggregations/bucket/terms/LongTerms.java index 26c2eee2f6..c270517cd9 100644 --- a/core/src/main/java/org/elasticsearch/search/aggregations/bucket/terms/LongTerms.java +++ b/core/src/main/java/org/elasticsearch/search/aggregations/bucket/terms/LongTerms.java @@ -91,7 +91,7 @@ public class LongTerms extends InternalTerms<LongTerms, LongTerms.Bucket> { @Override public String getKeyAsString() { - return String.valueOf(term); + return formatter.format(term); } @Override diff --git a/core/src/main/java/org/elasticsearch/search/aggregations/metrics/scripted/InternalScriptedMetric.java b/core/src/main/java/org/elasticsearch/search/aggregations/metrics/scripted/InternalScriptedMetric.java index d39a0335ac..00c6b6b49b 100644 --- a/core/src/main/java/org/elasticsearch/search/aggregations/metrics/scripted/InternalScriptedMetric.java +++ b/core/src/main/java/org/elasticsearch/search/aggregations/metrics/scripted/InternalScriptedMetric.java @@ -33,6 +33,7 @@ import org.elasticsearch.search.aggregations.pipeline.PipelineAggregator; import java.io.IOException; import java.util.ArrayList; +import java.util.Collections; import java.util.HashMap; import java.util.List; import java.util.Map; @@ -91,7 +92,7 @@ public class InternalScriptedMetric extends InternalMetricsAggregation implement vars.putAll(firstAggregation.reduceScript.getParams()); } CompiledScript compiledScript = reduceContext.scriptService().compile(firstAggregation.reduceScript, - ScriptContext.Standard.AGGS, reduceContext); + ScriptContext.Standard.AGGS, reduceContext, Collections.emptyMap()); ExecutableScript script = reduceContext.scriptService().executable(compiledScript, vars); aggregation = script.run(); } else { diff --git a/core/src/main/java/org/elasticsearch/search/aggregations/metrics/scripted/ScriptedMetricAggregator.java b/core/src/main/java/org/elasticsearch/search/aggregations/metrics/scripted/ScriptedMetricAggregator.java index 2c1caaa524..6603c6289b 100644 --- a/core/src/main/java/org/elasticsearch/search/aggregations/metrics/scripted/ScriptedMetricAggregator.java +++ b/core/src/main/java/org/elasticsearch/search/aggregations/metrics/scripted/ScriptedMetricAggregator.java @@ -39,6 +39,7 @@ import org.elasticsearch.search.internal.SearchContext; import java.io.IOException; import java.util.ArrayList; +import java.util.Collections; import java.util.HashMap; import java.util.List; import java.util.Map; @@ -58,11 +59,11 @@ public class ScriptedMetricAggregator extends MetricsAggregator { this.params = params; ScriptService scriptService = context.searchContext().scriptService(); if (initScript != null) { - scriptService.executable(initScript, ScriptContext.Standard.AGGS, context.searchContext()).run(); + scriptService.executable(initScript, ScriptContext.Standard.AGGS, context.searchContext(), Collections.emptyMap()).run(); } - this.mapScript = scriptService.search(context.searchContext().lookup(), mapScript, ScriptContext.Standard.AGGS); + this.mapScript = scriptService.search(context.searchContext().lookup(), mapScript, ScriptContext.Standard.AGGS, Collections.emptyMap()); if (combineScript != null) { - this.combineScript = scriptService.executable(combineScript, ScriptContext.Standard.AGGS, context.searchContext()); + this.combineScript = scriptService.executable(combineScript, ScriptContext.Standard.AGGS, context.searchContext(), Collections.emptyMap()); } else { this.combineScript = null; } diff --git a/core/src/main/java/org/elasticsearch/search/aggregations/metrics/tophits/TopHitsBuilder.java b/core/src/main/java/org/elasticsearch/search/aggregations/metrics/tophits/TopHitsBuilder.java index 32b5d7390d..1efd4a7cd2 100644 --- a/core/src/main/java/org/elasticsearch/search/aggregations/metrics/tophits/TopHitsBuilder.java +++ b/core/src/main/java/org/elasticsearch/search/aggregations/metrics/tophits/TopHitsBuilder.java @@ -19,7 +19,6 @@ package org.elasticsearch.search.aggregations.metrics.tophits; import org.elasticsearch.common.Nullable; -import org.elasticsearch.common.bytes.BytesReference; import org.elasticsearch.common.xcontent.XContentBuilder; import org.elasticsearch.script.Script; import org.elasticsearch.search.aggregations.AbstractAggregationBuilder; @@ -194,7 +193,7 @@ public class TopHitsBuilder extends AbstractAggregationBuilder { return sourceBuilder; } - public BytesReference highlighter() { + public HighlightBuilder highlighter() { return sourceBuilder().highlighter(); } diff --git a/core/src/main/java/org/elasticsearch/search/aggregations/pipeline/bucketscript/BucketScriptPipelineAggregator.java b/core/src/main/java/org/elasticsearch/search/aggregations/pipeline/bucketscript/BucketScriptPipelineAggregator.java index 789f8c961a..e5ccbf6971 100644 --- a/core/src/main/java/org/elasticsearch/search/aggregations/pipeline/bucketscript/BucketScriptPipelineAggregator.java +++ b/core/src/main/java/org/elasticsearch/search/aggregations/pipeline/bucketscript/BucketScriptPipelineAggregator.java @@ -90,7 +90,7 @@ public class BucketScriptPipelineAggregator extends PipelineAggregator { InternalMultiBucketAggregation<InternalMultiBucketAggregation, InternalMultiBucketAggregation.InternalBucket> originalAgg = (InternalMultiBucketAggregation<InternalMultiBucketAggregation, InternalMultiBucketAggregation.InternalBucket>) aggregation; List<? extends Bucket> buckets = originalAgg.getBuckets(); - CompiledScript compiledScript = reduceContext.scriptService().compile(script, ScriptContext.Standard.AGGS, reduceContext); + CompiledScript compiledScript = reduceContext.scriptService().compile(script, ScriptContext.Standard.AGGS, reduceContext, Collections.emptyMap()); List newBuckets = new ArrayList<>(); for (Bucket bucket : buckets) { Map<String, Object> vars = new HashMap<>(); diff --git a/core/src/main/java/org/elasticsearch/search/aggregations/pipeline/having/BucketSelectorPipelineAggregator.java b/core/src/main/java/org/elasticsearch/search/aggregations/pipeline/having/BucketSelectorPipelineAggregator.java index 669a223b21..edc3b4e87c 100644 --- a/core/src/main/java/org/elasticsearch/search/aggregations/pipeline/having/BucketSelectorPipelineAggregator.java +++ b/core/src/main/java/org/elasticsearch/search/aggregations/pipeline/having/BucketSelectorPipelineAggregator.java @@ -38,6 +38,7 @@ import org.elasticsearch.search.aggregations.pipeline.PipelineAggregatorStreams; import java.io.IOException; import java.util.ArrayList; +import java.util.Collections; import java.util.HashMap; import java.util.List; import java.util.Map; @@ -88,7 +89,7 @@ public class BucketSelectorPipelineAggregator extends PipelineAggregator { InternalMultiBucketAggregation<InternalMultiBucketAggregation, InternalMultiBucketAggregation.InternalBucket> originalAgg = (InternalMultiBucketAggregation<InternalMultiBucketAggregation, InternalMultiBucketAggregation.InternalBucket>) aggregation; List<? extends Bucket> buckets = originalAgg.getBuckets(); - CompiledScript compiledScript = reduceContext.scriptService().compile(script, ScriptContext.Standard.AGGS, reduceContext); + CompiledScript compiledScript = reduceContext.scriptService().compile(script, ScriptContext.Standard.AGGS, reduceContext, Collections.emptyMap()); List newBuckets = new ArrayList<>(); for (Bucket bucket : buckets) { Map<String, Object> vars = new HashMap<>(); diff --git a/core/src/main/java/org/elasticsearch/search/aggregations/support/ValuesSourceParser.java b/core/src/main/java/org/elasticsearch/search/aggregations/support/ValuesSourceParser.java index 506c9d16d7..a9dcc77ee9 100644 --- a/core/src/main/java/org/elasticsearch/search/aggregations/support/ValuesSourceParser.java +++ b/core/src/main/java/org/elasticsearch/search/aggregations/support/ValuesSourceParser.java @@ -43,6 +43,7 @@ import org.elasticsearch.search.internal.SearchContext; import org.joda.time.DateTimeZone; import java.io.IOException; +import java.util.Collections; import java.util.HashMap; import java.util.Map; @@ -227,7 +228,7 @@ public class ValuesSourceParser<VS extends ValuesSource> { } private SearchScript createScript() { - return input.script == null ? null : context.scriptService().search(context.lookup(), input.script, ScriptContext.Standard.AGGS); + return input.script == null ? null : context.scriptService().search(context.lookup(), input.script, ScriptContext.Standard.AGGS, Collections.emptyMap()); } private static ValueFormat resolveFormat(@Nullable String format, @Nullable ValueType valueType) { diff --git a/core/src/main/java/org/elasticsearch/search/builder/SearchSourceBuilder.java b/core/src/main/java/org/elasticsearch/search/builder/SearchSourceBuilder.java index 7963b678fb..3ea2d604b8 100644 --- a/core/src/main/java/org/elasticsearch/search/builder/SearchSourceBuilder.java +++ b/core/src/main/java/org/elasticsearch/search/builder/SearchSourceBuilder.java @@ -22,6 +22,7 @@ package org.elasticsearch.search.builder; import com.carrotsearch.hppc.ObjectFloatHashMap; import com.carrotsearch.hppc.cursors.ObjectCursor; +import org.elasticsearch.Version; import org.elasticsearch.action.support.ToXContentToBytes; import org.elasticsearch.common.Nullable; import org.elasticsearch.common.ParseField; @@ -91,6 +92,7 @@ public final class SearchSourceBuilder extends ToXContentToBytes implements Writ public static final ParseField RESCORE_FIELD = new ParseField("rescore"); public static final ParseField STATS_FIELD = new ParseField("stats"); public static final ParseField EXT_FIELD = new ParseField("ext"); + public static final ParseField PROFILE_FIELD = new ParseField("profile"); private static final SearchSourceBuilder PROTOTYPE = new SearchSourceBuilder(); @@ -144,7 +146,7 @@ public final class SearchSourceBuilder extends ToXContentToBytes implements Writ private List<BytesReference> aggregations; - private BytesReference highlightBuilder; + private HighlightBuilder highlightBuilder; private BytesReference suggestBuilder; @@ -158,6 +160,9 @@ public final class SearchSourceBuilder extends ToXContentToBytes implements Writ private BytesReference ext = null; + private boolean profile = false; + + /** * Constructs a new search source builder. */ @@ -405,22 +410,14 @@ public final class SearchSourceBuilder extends ToXContentToBytes implements Writ * Adds highlight to perform as part of the search. */ public SearchSourceBuilder highlighter(HighlightBuilder highlightBuilder) { - try { - XContentBuilder builder = XContentFactory.jsonBuilder(); - builder.startObject(); - highlightBuilder.innerXContent(builder); - builder.endObject(); - this.highlightBuilder = builder.bytes(); - return this; - } catch (IOException e) { - throw new RuntimeException(e); - } + this.highlightBuilder = highlightBuilder; + return this; } /** - * Gets the bytes representing the hightlighter builder for this request. + * Gets the hightlighter builder for this request. */ - public BytesReference highlighter() { + public HighlightBuilder highlighter() { return highlightBuilder; } @@ -484,6 +481,22 @@ public final class SearchSourceBuilder extends ToXContentToBytes implements Writ } /** + * Should the query be profiled. Defaults to <tt>false</tt> + */ + public SearchSourceBuilder profile(boolean profile) { + this.profile = profile; + return this; + } + + /** + * Return whether to profile query execution, or {@code null} if + * unspecified. + */ + public boolean profile() { + return profile; + } + + /** * Gets the bytes representing the rescore builders for this request. */ public List<BytesReference> rescores() { @@ -731,6 +744,8 @@ public final class SearchSourceBuilder extends ToXContentToBytes implements Writ builder.fieldNames = fieldNames; } else if (context.parseFieldMatcher().match(currentFieldName, SORT_FIELD)) { builder.sort(parser.text()); + } else if (context.parseFieldMatcher().match(currentFieldName, PROFILE_FIELD)) { + builder.profile = parser.booleanValue(); } else { throw new ParsingException(parser.getTokenLocation(), "Unknown key for a " + token + " in [" + currentFieldName + "].", parser.getTokenLocation()); @@ -813,8 +828,7 @@ public final class SearchSourceBuilder extends ToXContentToBytes implements Writ } builder.aggregations = aggregations; } else if (context.parseFieldMatcher().match(currentFieldName, HIGHLIGHT_FIELD)) { - XContentBuilder xContentBuilder = XContentFactory.jsonBuilder().copyCurrentStructure(parser); - builder.highlightBuilder = xContentBuilder.bytes(); + builder.highlightBuilder = HighlightBuilder.PROTOTYPE.fromXContent(context); } else if (context.parseFieldMatcher().match(currentFieldName, INNER_HITS_FIELD)) { XContentBuilder xContentBuilder = XContentFactory.jsonBuilder().copyCurrentStructure(parser); builder.innerHitsBuilder = xContentBuilder.bytes(); @@ -940,6 +954,10 @@ public final class SearchSourceBuilder extends ToXContentToBytes implements Writ builder.field(EXPLAIN_FIELD.getPreferredName(), explain); } + if (profile) { + builder.field("profile", true); + } + if (fetchSourceContext != null) { builder.field(_SOURCE_FIELD.getPreferredName(), fetchSourceContext); } @@ -1012,10 +1030,7 @@ public final class SearchSourceBuilder extends ToXContentToBytes implements Writ } if (highlightBuilder != null) { - builder.field(HIGHLIGHT_FIELD.getPreferredName()); - XContentParser parser = XContentFactory.xContent(XContentType.JSON).createParser(highlightBuilder); - parser.nextToken(); - builder.copyCurrentStructure(parser); + this.highlightBuilder.toXContent(builder, params); } if (innerHitsBuilder != null) { @@ -1158,7 +1173,7 @@ public final class SearchSourceBuilder extends ToXContentToBytes implements Writ } builder.from = in.readVInt(); if (in.readBoolean()) { - builder.highlightBuilder = in.readBytesReference(); + builder.highlightBuilder = HighlightBuilder.PROTOTYPE.readFrom(in); } boolean hasIndexBoost = in.readBoolean(); if (hasIndexBoost) { @@ -1224,6 +1239,11 @@ public final class SearchSourceBuilder extends ToXContentToBytes implements Writ if (in.readBoolean()) { builder.ext = in.readBytesReference(); } + if (in.getVersion().onOrAfter(Version.V_2_2_0)) { + builder.profile = in.readBoolean(); + } else { + builder.profile = false; + } return builder; } @@ -1259,7 +1279,7 @@ public final class SearchSourceBuilder extends ToXContentToBytes implements Writ boolean hasHighlightBuilder = highlightBuilder != null; out.writeBoolean(hasHighlightBuilder); if (hasHighlightBuilder) { - out.writeBytesReference(highlightBuilder); + highlightBuilder.writeTo(out); } boolean hasIndexBoost = indexBoost != null; out.writeBoolean(hasIndexBoost); @@ -1337,13 +1357,16 @@ public final class SearchSourceBuilder extends ToXContentToBytes implements Writ if (hasExt) { out.writeBytesReference(ext); } + if (out.getVersion().onOrAfter(Version.V_2_2_0)) { + out.writeBoolean(profile); + } } @Override public int hashCode() { return Objects.hash(aggregations, explain, fetchSourceContext, fieldDataFields, fieldNames, from, highlightBuilder, indexBoost, innerHitsBuilder, minScore, postQueryBuilder, queryBuilder, rescoreBuilders, scriptFields, - size, sorts, stats, suggestBuilder, terminateAfter, timeoutInMillis, trackScores, version); + size, sorts, stats, suggestBuilder, terminateAfter, timeoutInMillis, trackScores, version, profile); } @Override @@ -1376,6 +1399,7 @@ public final class SearchSourceBuilder extends ToXContentToBytes implements Writ && Objects.equals(terminateAfter, other.terminateAfter) && Objects.equals(timeoutInMillis, other.timeoutInMillis) && Objects.equals(trackScores, other.trackScores) - && Objects.equals(version, other.version); + && Objects.equals(version, other.version) + && Objects.equals(profile, other.profile); } } diff --git a/core/src/main/java/org/elasticsearch/search/controller/SearchPhaseController.java b/core/src/main/java/org/elasticsearch/search/controller/SearchPhaseController.java index f76527163c..835e6e7142 100644 --- a/core/src/main/java/org/elasticsearch/search/controller/SearchPhaseController.java +++ b/core/src/main/java/org/elasticsearch/search/controller/SearchPhaseController.java @@ -43,7 +43,6 @@ import org.elasticsearch.script.ScriptService; import org.elasticsearch.search.aggregations.InternalAggregation; import org.elasticsearch.search.aggregations.InternalAggregation.ReduceContext; import org.elasticsearch.search.aggregations.InternalAggregations; -import org.elasticsearch.search.aggregations.pipeline.PipelineAggregator; import org.elasticsearch.search.aggregations.pipeline.SiblingPipelineAggregator; import org.elasticsearch.search.dfs.AggregatedDfs; import org.elasticsearch.search.dfs.DfsSearchResult; @@ -52,9 +51,11 @@ import org.elasticsearch.search.fetch.FetchSearchResultProvider; import org.elasticsearch.search.internal.InternalSearchHit; import org.elasticsearch.search.internal.InternalSearchHits; import org.elasticsearch.search.internal.InternalSearchResponse; +import org.elasticsearch.search.profile.InternalProfileShardResults; import org.elasticsearch.search.query.QuerySearchResult; import org.elasticsearch.search.query.QuerySearchResultProvider; import org.elasticsearch.search.suggest.Suggest; +import org.elasticsearch.search.profile.ProfileShardResult; import java.io.IOException; import java.util.ArrayList; @@ -410,6 +411,17 @@ public class SearchPhaseController extends AbstractComponent { } } + //Collect profile results + InternalProfileShardResults shardResults = null; + if (!queryResults.isEmpty() && firstResult.profileResults() != null) { + Map<String, List<ProfileShardResult>> profileResults = new HashMap<>(queryResults.size()); + for (AtomicArray.Entry<? extends QuerySearchResultProvider> entry : queryResults) { + String key = entry.value.queryResult().shardTarget().toString(); + profileResults.put(key, entry.value.queryResult().profileResults()); + } + shardResults = new InternalProfileShardResults(profileResults); + } + if (aggregations != null) { List<SiblingPipelineAggregator> pipelineAggregators = firstResult.pipelineAggregators(); if (pipelineAggregators != null) { @@ -427,7 +439,7 @@ public class SearchPhaseController extends AbstractComponent { InternalSearchHits searchHits = new InternalSearchHits(hits.toArray(new InternalSearchHit[hits.size()]), totalHits, maxScore); - return new InternalSearchResponse(searchHits, aggregations, suggest, timedOut, terminatedEarly); + return new InternalSearchResponse(searchHits, aggregations, suggest, shardResults, timedOut, terminatedEarly); } } diff --git a/core/src/main/java/org/elasticsearch/search/fetch/FetchPhase.java b/core/src/main/java/org/elasticsearch/search/fetch/FetchPhase.java index 227141e4dd..04890700be 100644 --- a/core/src/main/java/org/elasticsearch/search/fetch/FetchPhase.java +++ b/core/src/main/java/org/elasticsearch/search/fetch/FetchPhase.java @@ -31,7 +31,7 @@ import org.elasticsearch.common.collect.Tuple; import org.elasticsearch.common.inject.Inject; import org.elasticsearch.common.lucene.search.Queries; import org.elasticsearch.common.regex.Regex; -import org.elasticsearch.common.text.StringAndBytesText; +import org.elasticsearch.common.text.Text; import org.elasticsearch.common.text.Text; import org.elasticsearch.common.xcontent.XContentHelper; import org.elasticsearch.common.xcontent.XContentType; @@ -198,7 +198,7 @@ public class FetchPhase implements SearchPhase { DocumentMapper documentMapper = context.mapperService().documentMapper(fieldsVisitor.uid().type()); Text typeText; if (documentMapper == null) { - typeText = new StringAndBytesText(fieldsVisitor.uid().type()); + typeText = new Text(fieldsVisitor.uid().type()); } else { typeText = documentMapper.typeText(); } diff --git a/core/src/main/java/org/elasticsearch/search/fetch/innerhits/InnerHitsBuilder.java b/core/src/main/java/org/elasticsearch/search/fetch/innerhits/InnerHitsBuilder.java index 7941e17775..2e76a4c370 100644 --- a/core/src/main/java/org/elasticsearch/search/fetch/innerhits/InnerHitsBuilder.java +++ b/core/src/main/java/org/elasticsearch/search/fetch/innerhits/InnerHitsBuilder.java @@ -20,7 +20,6 @@ package org.elasticsearch.search.fetch.innerhits; import org.elasticsearch.common.Nullable; -import org.elasticsearch.common.bytes.BytesReference; import org.elasticsearch.common.xcontent.ToXContent; import org.elasticsearch.common.xcontent.XContentBuilder; import org.elasticsearch.index.query.QueryBuilder; @@ -266,7 +265,7 @@ public class InnerHitsBuilder implements ToXContent { return this; } - public BytesReference highlighter() { + public HighlightBuilder highlighter() { return sourceBuilder().highlighter(); } diff --git a/core/src/main/java/org/elasticsearch/search/fetch/script/ScriptFieldsParseElement.java b/core/src/main/java/org/elasticsearch/search/fetch/script/ScriptFieldsParseElement.java index 6dbdcbd589..de1703b5c9 100644 --- a/core/src/main/java/org/elasticsearch/search/fetch/script/ScriptFieldsParseElement.java +++ b/core/src/main/java/org/elasticsearch/search/fetch/script/ScriptFieldsParseElement.java @@ -30,6 +30,7 @@ import org.elasticsearch.search.SearchParseElement; import org.elasticsearch.search.SearchParseException; import org.elasticsearch.search.internal.SearchContext; +import java.util.Collections; import java.util.HashMap; import java.util.Map; @@ -97,9 +98,9 @@ public class ScriptFieldsParseElement implements SearchParseElement { throw new SearchParseException(context, "must specify a script in script fields", parser.getTokenLocation()); } - SearchScript searchScript = context.scriptService().search(context.lookup(), script, ScriptContext.Standard.SEARCH); + SearchScript searchScript = context.scriptService().search(context.lookup(), script, ScriptContext.Standard.SEARCH, Collections.emptyMap()); context.scriptFields().add(new ScriptFieldsContext.ScriptField(fieldName, searchScript, ignoreException)); } } } -}
\ No newline at end of file +} diff --git a/core/src/main/java/org/elasticsearch/search/highlight/AbstractHighlighterBuilder.java b/core/src/main/java/org/elasticsearch/search/highlight/AbstractHighlighterBuilder.java index d30144f777..b4de465cc7 100644 --- a/core/src/main/java/org/elasticsearch/search/highlight/AbstractHighlighterBuilder.java +++ b/core/src/main/java/org/elasticsearch/search/highlight/AbstractHighlighterBuilder.java @@ -22,13 +22,19 @@ package org.elasticsearch.search.highlight; import org.apache.lucene.search.highlight.SimpleFragmenter; import org.apache.lucene.search.highlight.SimpleSpanFragmenter; import org.elasticsearch.common.ParseField; +import org.elasticsearch.common.ParsingException; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.common.xcontent.XContentBuilder; +import org.elasticsearch.common.xcontent.XContentParser; import org.elasticsearch.index.query.QueryBuilder; +import org.elasticsearch.index.query.QueryParseContext; +import org.elasticsearch.search.highlight.HighlightBuilder.Order; import java.io.IOException; +import java.util.ArrayList; import java.util.Arrays; +import java.util.List; import java.util.Map; import java.util.Objects; @@ -74,7 +80,7 @@ public abstract class AbstractHighlighterBuilder<HB extends AbstractHighlighterB protected QueryBuilder<?> highlightQuery; - protected String order; + protected Order order; protected Boolean highlightFilter; @@ -213,18 +219,26 @@ public abstract class AbstractHighlighterBuilder<HB extends AbstractHighlighterB /** * The order of fragments per field. By default, ordered by the order in the * highlighted text. Can be <tt>score</tt>, which then it will be ordered - * by score of the fragments. + * by score of the fragments, or <tt>none</TT>. */ - @SuppressWarnings("unchecked") public HB order(String order) { - this.order = order; + return order(Order.fromString(order)); + } + + /** + * By default, fragments of a field are ordered by the order in the highlighted text. + * If set to {@link Order#SCORE}, this changes order to score of the fragments. + */ + @SuppressWarnings("unchecked") + public HB order(Order scoreOrdered) { + this.order = scoreOrdered; return (HB) this; } /** - * @return the value set by {@link #order(String)} + * @return the value set by {@link #order(Order)} */ - public String order() { + public Order order() { return this.order; } @@ -391,7 +405,7 @@ public abstract class AbstractHighlighterBuilder<HB extends AbstractHighlighterB builder.field(HIGHLIGHT_QUERY_FIELD.getPreferredName(), highlightQuery); } if (order != null) { - builder.field(ORDER_FIELD.getPreferredName(), order); + builder.field(ORDER_FIELD.getPreferredName(), order.toString()); } if (highlightFilter != null) { builder.field(HIGHLIGHT_FILTER_FIELD.getPreferredName(), highlightFilter); @@ -419,6 +433,100 @@ public abstract class AbstractHighlighterBuilder<HB extends AbstractHighlighterB } } + /** + * Creates a new {@link HighlightBuilder} from the highlighter held by the {@link QueryParseContext} + * in {@link org.elasticsearch.common.xcontent.XContent} format + * + * @param parseContext containing the parser positioned at the structure to be parsed from. + * the state on the parser contained in this context will be changed as a side effect of this + * method call + * @return the new {@link AbstractHighlighterBuilder} + */ + public HB fromXContent(QueryParseContext parseContext) throws IOException { + XContentParser parser = parseContext.parser(); + XContentParser.Token token = parser.currentToken(); + String currentFieldName = null; + HB highlightBuilder = createInstance(parser); + while ((token = parser.nextToken()) != XContentParser.Token.END_OBJECT) { + if (token == XContentParser.Token.FIELD_NAME) { + currentFieldName = parser.currentName(); + } else if (token == XContentParser.Token.START_ARRAY) { + if (parseContext.parseFieldMatcher().match(currentFieldName, PRE_TAGS_FIELD)) { + List<String> preTagsList = new ArrayList<>(); + while ((token = parser.nextToken()) != XContentParser.Token.END_ARRAY) { + preTagsList.add(parser.text()); + } + highlightBuilder.preTags(preTagsList.toArray(new String[preTagsList.size()])); + } else if (parseContext.parseFieldMatcher().match(currentFieldName, POST_TAGS_FIELD)) { + List<String> postTagsList = new ArrayList<>(); + while ((token = parser.nextToken()) != XContentParser.Token.END_ARRAY) { + postTagsList.add(parser.text()); + } + highlightBuilder.postTags(postTagsList.toArray(new String[postTagsList.size()])); + } else if (false == highlightBuilder.doFromXContent(parseContext, currentFieldName, token)) { + throw new ParsingException(parser.getTokenLocation(), "cannot parse array with name [{}]", currentFieldName); + } + } else if (token.isValue()) { + if (parseContext.parseFieldMatcher().match(currentFieldName, ORDER_FIELD)) { + highlightBuilder.order(Order.fromString(parser.text())); + } else if (parseContext.parseFieldMatcher().match(currentFieldName, HIGHLIGHT_FILTER_FIELD)) { + highlightBuilder.highlightFilter(parser.booleanValue()); + } else if (parseContext.parseFieldMatcher().match(currentFieldName, FRAGMENT_SIZE_FIELD)) { + highlightBuilder.fragmentSize(parser.intValue()); + } else if (parseContext.parseFieldMatcher().match(currentFieldName, NUMBER_OF_FRAGMENTS_FIELD)) { + highlightBuilder.numOfFragments(parser.intValue()); + } else if (parseContext.parseFieldMatcher().match(currentFieldName, REQUIRE_FIELD_MATCH_FIELD)) { + highlightBuilder.requireFieldMatch(parser.booleanValue()); + } else if (parseContext.parseFieldMatcher().match(currentFieldName, BOUNDARY_MAX_SCAN_FIELD)) { + highlightBuilder.boundaryMaxScan(parser.intValue()); + } else if (parseContext.parseFieldMatcher().match(currentFieldName, BOUNDARY_CHARS_FIELD)) { + highlightBuilder.boundaryChars(parser.text().toCharArray()); + } else if (parseContext.parseFieldMatcher().match(currentFieldName, TYPE_FIELD)) { + highlightBuilder.highlighterType(parser.text()); + } else if (parseContext.parseFieldMatcher().match(currentFieldName, FRAGMENTER_FIELD)) { + highlightBuilder.fragmenter(parser.text()); + } else if (parseContext.parseFieldMatcher().match(currentFieldName, NO_MATCH_SIZE_FIELD)) { + highlightBuilder.noMatchSize(parser.intValue()); + } else if (parseContext.parseFieldMatcher().match(currentFieldName, FORCE_SOURCE_FIELD)) { + highlightBuilder.forceSource(parser.booleanValue()); + } else if (parseContext.parseFieldMatcher().match(currentFieldName, PHRASE_LIMIT_FIELD)) { + highlightBuilder.phraseLimit(parser.intValue()); + } else if (false == highlightBuilder.doFromXContent(parseContext, currentFieldName, token)) { + throw new ParsingException(parser.getTokenLocation(), "unexpected fieldname [{}]", currentFieldName); + } + } else if (token == XContentParser.Token.START_OBJECT && currentFieldName != null) { + if (parseContext.parseFieldMatcher().match(currentFieldName, OPTIONS_FIELD)) { + highlightBuilder.options(parser.map()); + } else if (parseContext.parseFieldMatcher().match(currentFieldName, HIGHLIGHT_QUERY_FIELD)) { + highlightBuilder.highlightQuery(parseContext.parseInnerQueryBuilder()); + } else if (false == highlightBuilder.doFromXContent(parseContext, currentFieldName, token)) { + throw new ParsingException(parser.getTokenLocation(), "cannot parse object with name [{}]", currentFieldName); + } + } else if (currentFieldName != null) { + throw new ParsingException(parser.getTokenLocation(), "unexpected token [{}] after [{}]", token, currentFieldName); + } + } + + if (highlightBuilder.preTags() != null && highlightBuilder.postTags() == null) { + throw new ParsingException(parser.getTokenLocation(), "Highlighter global preTags are set, but global postTags are not set"); + } + return highlightBuilder; + } + + /** + * @param parser the input parser. Implementing classes might advance the parser depending on the + * information they need to instantiate a new instance + * @return a new instance + */ + protected abstract HB createInstance(XContentParser parser) throws IOException; + + /** + * Implementing subclasses can handle parsing special options depending on the + * current token, field name and the parse context. + * @return <tt>true</tt> if an option was found and successfully parsed, otherwise <tt>false</tt> + */ + protected abstract boolean doFromXContent(QueryParseContext parseContext, String currentFieldName, XContentParser.Token endMarkerToken) throws IOException; + @Override public final int hashCode() { return Objects.hash(getClass(), Arrays.hashCode(preTags), Arrays.hashCode(postTags), fragmentSize, @@ -480,7 +588,9 @@ public abstract class AbstractHighlighterBuilder<HB extends AbstractHighlighterB if (in.readBoolean()) { highlightQuery(in.readQuery()); } - order(in.readOptionalString()); + if (in.readBoolean()) { + order(Order.PROTOTYPE.readFrom(in)); + } highlightFilter(in.readOptionalBoolean()); forceSource(in.readOptionalBoolean()); boundaryMaxScan(in.readOptionalVInt()); @@ -511,7 +621,11 @@ public abstract class AbstractHighlighterBuilder<HB extends AbstractHighlighterB if (hasQuery) { out.writeQuery(highlightQuery); } - out.writeOptionalString(order); + boolean hasSetOrder = order != null; + out.writeBoolean(hasSetOrder); + if (hasSetOrder) { + order.writeTo(out); + } out.writeOptionalBoolean(highlightFilter); out.writeOptionalBoolean(forceSource); out.writeOptionalVInt(boundaryMaxScan); diff --git a/core/src/main/java/org/elasticsearch/search/highlight/CustomQueryScorer.java b/core/src/main/java/org/elasticsearch/search/highlight/CustomQueryScorer.java index 9e86edef47..8ad24b5cb1 100644 --- a/core/src/main/java/org/elasticsearch/search/highlight/CustomQueryScorer.java +++ b/core/src/main/java/org/elasticsearch/search/highlight/CustomQueryScorer.java @@ -82,7 +82,7 @@ public final class CustomQueryScorer extends QueryScorer { } else if (query instanceof FiltersFunctionScoreQuery) { query = ((FiltersFunctionScoreQuery) query).getSubQuery(); extract(query, query.getBoost(), terms); - } else { + } else if (terms.isEmpty()) { extractWeightedTerms(terms, query, query.getBoost()); } } diff --git a/core/src/main/java/org/elasticsearch/search/highlight/FastVectorHighlighter.java b/core/src/main/java/org/elasticsearch/search/highlight/FastVectorHighlighter.java index 65702dd24b..b57899b2e1 100644 --- a/core/src/main/java/org/elasticsearch/search/highlight/FastVectorHighlighter.java +++ b/core/src/main/java/org/elasticsearch/search/highlight/FastVectorHighlighter.java @@ -33,7 +33,7 @@ import org.apache.lucene.search.vectorhighlight.SimpleFieldFragList; import org.apache.lucene.search.vectorhighlight.SimpleFragListBuilder; import org.apache.lucene.search.vectorhighlight.SingleFragListBuilder; import org.elasticsearch.common.settings.Settings; -import org.elasticsearch.common.text.StringText; +import org.elasticsearch.common.text.Text; import org.elasticsearch.index.mapper.FieldMapper; import org.elasticsearch.search.fetch.FetchPhaseExecutionException; import org.elasticsearch.search.fetch.FetchSubPhase; @@ -159,7 +159,7 @@ public class FastVectorHighlighter implements Highlighter { } if (fragments != null && fragments.length > 0) { - return new HighlightField(highlighterContext.fieldName, StringText.convertFromStringArray(fragments)); + return new HighlightField(highlighterContext.fieldName, Text.convertFromStringArray(fragments)); } int noMatchSize = highlighterContext.field.fieldOptions().noMatchSize(); @@ -170,7 +170,7 @@ public class FastVectorHighlighter implements Highlighter { fragments = entry.fragmentsBuilder.createFragments(hitContext.reader(), hitContext.docId(), mapper.fieldType().names().indexName(), fieldFragList, 1, field.fieldOptions().preTags(), field.fieldOptions().postTags(), encoder); if (fragments != null && fragments.length > 0) { - return new HighlightField(highlighterContext.fieldName, StringText.convertFromStringArray(fragments)); + return new HighlightField(highlighterContext.fieldName, Text.convertFromStringArray(fragments)); } } diff --git a/core/src/main/java/org/elasticsearch/search/highlight/HighlightBuilder.java b/core/src/main/java/org/elasticsearch/search/highlight/HighlightBuilder.java index e45303ccb5..c0b1aeea3b 100644 --- a/core/src/main/java/org/elasticsearch/search/highlight/HighlightBuilder.java +++ b/core/src/main/java/org/elasticsearch/search/highlight/HighlightBuilder.java @@ -30,11 +30,13 @@ import org.elasticsearch.common.xcontent.ToXContent; import org.elasticsearch.common.xcontent.XContentBuilder; import org.elasticsearch.common.xcontent.XContentFactory; import org.elasticsearch.common.xcontent.XContentParser; +import org.elasticsearch.common.xcontent.XContentParser.Token; import org.elasticsearch.index.query.QueryBuilder; import org.elasticsearch.index.query.QueryParseContext; import org.elasticsearch.index.query.QueryShardContext; import org.elasticsearch.search.highlight.SearchContextHighlight.FieldOptions; import org.elasticsearch.search.highlight.SearchContextHighlight.FieldOptions.Builder; + import java.io.IOException; import java.util.ArrayList; import java.util.Arrays; @@ -42,6 +44,7 @@ import java.util.Collection; import java.util.Collections; import java.util.HashSet; import java.util.List; +import java.util.Locale; import java.util.Objects; import java.util.Set; @@ -230,117 +233,45 @@ public class HighlightBuilder extends AbstractHighlighterBuilder<HighlightBuilde } /** - * Creates a new {@link HighlightBuilder} from the highlighter held by the {@link QueryParseContext} - * in {@link org.elasticsearch.common.xcontent.XContent} format - * - * @param parseContext - * the input parse context. The state on the parser contained in - * this context will be changed as a side effect of this method - * call - * @return the new {@link HighlightBuilder} + * parse options only present in top level highlight builder (`tags_schema`, `encoder` and nested `fields`) */ - public static HighlightBuilder fromXContent(QueryParseContext parseContext) throws IOException { + @Override + protected boolean doFromXContent(QueryParseContext parseContext, String currentFieldName, Token currentToken) throws IOException { XContentParser parser = parseContext.parser(); XContentParser.Token token; - String topLevelFieldName = null; - - HighlightBuilder highlightBuilder = new HighlightBuilder(); - while ((token = parser.nextToken()) != XContentParser.Token.END_OBJECT) { - if (token == XContentParser.Token.FIELD_NAME) { - topLevelFieldName = parser.currentName(); - } else if (token == XContentParser.Token.START_ARRAY) { - if (parseContext.parseFieldMatcher().match(topLevelFieldName, PRE_TAGS_FIELD)) { - List<String> preTagsList = new ArrayList<>(); - while ((token = parser.nextToken()) != XContentParser.Token.END_ARRAY) { - preTagsList.add(parser.text()); - } - highlightBuilder.preTags(preTagsList.toArray(new String[preTagsList.size()])); - } else if (parseContext.parseFieldMatcher().match(topLevelFieldName, POST_TAGS_FIELD)) { - List<String> postTagsList = new ArrayList<>(); - while ((token = parser.nextToken()) != XContentParser.Token.END_ARRAY) { - postTagsList.add(parser.text()); - } - highlightBuilder.postTags(postTagsList.toArray(new String[postTagsList.size()])); - } else if (parseContext.parseFieldMatcher().match(topLevelFieldName, FIELDS_FIELD)) { - highlightBuilder.useExplicitFieldOrder(true); - while ((token = parser.nextToken()) != XContentParser.Token.END_ARRAY) { - if (token == XContentParser.Token.START_OBJECT) { - String highlightFieldName = null; - while ((token = parser.nextToken()) != XContentParser.Token.END_OBJECT) { - if (token == XContentParser.Token.FIELD_NAME) { - if (highlightFieldName != null) { - throw new ParsingException(parser.getTokenLocation(), "If highlighter fields is an array it must contain objects containing a single field"); - } - highlightFieldName = parser.currentName(); - } else if (token == XContentParser.Token.START_OBJECT) { - highlightBuilder.field(Field.fromXContent(highlightFieldName, parseContext)); - } - } - } else { - throw new ParsingException(parser.getTokenLocation(), "If highlighter fields is an array it must contain objects containing a single field"); - } - } - } else { - throw new ParsingException(parser.getTokenLocation(), "cannot parse array with name [{}]", topLevelFieldName); - } - } else if (token.isValue()) { - if (parseContext.parseFieldMatcher().match(topLevelFieldName, ORDER_FIELD)) { - highlightBuilder.order(parser.text()); - } else if (parseContext.parseFieldMatcher().match(topLevelFieldName, TAGS_SCHEMA_FIELD)) { - highlightBuilder.tagsSchema(parser.text()); - } else if (parseContext.parseFieldMatcher().match(topLevelFieldName, HIGHLIGHT_FILTER_FIELD)) { - highlightBuilder.highlightFilter(parser.booleanValue()); - } else if (parseContext.parseFieldMatcher().match(topLevelFieldName, FRAGMENT_SIZE_FIELD)) { - highlightBuilder.fragmentSize(parser.intValue()); - } else if (parseContext.parseFieldMatcher().match(topLevelFieldName, NUMBER_OF_FRAGMENTS_FIELD)) { - highlightBuilder.numOfFragments(parser.intValue()); - } else if (parseContext.parseFieldMatcher().match(topLevelFieldName, ENCODER_FIELD)) { - highlightBuilder.encoder(parser.text()); - } else if (parseContext.parseFieldMatcher().match(topLevelFieldName, REQUIRE_FIELD_MATCH_FIELD)) { - highlightBuilder.requireFieldMatch(parser.booleanValue()); - } else if (parseContext.parseFieldMatcher().match(topLevelFieldName, BOUNDARY_MAX_SCAN_FIELD)) { - highlightBuilder.boundaryMaxScan(parser.intValue()); - } else if (parseContext.parseFieldMatcher().match(topLevelFieldName, BOUNDARY_CHARS_FIELD)) { - highlightBuilder.boundaryChars(parser.text().toCharArray()); - } else if (parseContext.parseFieldMatcher().match(topLevelFieldName, TYPE_FIELD)) { - highlightBuilder.highlighterType(parser.text()); - } else if (parseContext.parseFieldMatcher().match(topLevelFieldName, FRAGMENTER_FIELD)) { - highlightBuilder.fragmenter(parser.text()); - } else if (parseContext.parseFieldMatcher().match(topLevelFieldName, NO_MATCH_SIZE_FIELD)) { - highlightBuilder.noMatchSize(parser.intValue()); - } else if (parseContext.parseFieldMatcher().match(topLevelFieldName, FORCE_SOURCE_FIELD)) { - highlightBuilder.forceSource(parser.booleanValue()); - } else if (parseContext.parseFieldMatcher().match(topLevelFieldName, PHRASE_LIMIT_FIELD)) { - highlightBuilder.phraseLimit(parser.intValue()); - } else { - throw new ParsingException(parser.getTokenLocation(), "unexpected fieldname [{}]", topLevelFieldName); - } - } else if (token == XContentParser.Token.START_OBJECT && topLevelFieldName != null) { - if (parseContext.parseFieldMatcher().match(topLevelFieldName, OPTIONS_FIELD)) { - highlightBuilder.options(parser.map()); - } else if (parseContext.parseFieldMatcher().match(topLevelFieldName, FIELDS_FIELD)) { - String highlightFieldName = null; + boolean foundCurrentFieldMatch = false; + if (currentToken.isValue()) { + if (parseContext.parseFieldMatcher().match(currentFieldName, TAGS_SCHEMA_FIELD)) { + tagsSchema(parser.text()); + foundCurrentFieldMatch = true; + } else if (parseContext.parseFieldMatcher().match(currentFieldName, ENCODER_FIELD)) { + encoder(parser.text()); + foundCurrentFieldMatch = true; + } + } else if (currentToken == Token.START_ARRAY && parseContext.parseFieldMatcher().match(currentFieldName, FIELDS_FIELD)) { + useExplicitFieldOrder(true); + while ((token = parser.nextToken()) != XContentParser.Token.END_ARRAY) { + if (token == XContentParser.Token.START_OBJECT) { while ((token = parser.nextToken()) != XContentParser.Token.END_OBJECT) { if (token == XContentParser.Token.FIELD_NAME) { - highlightFieldName = parser.currentName(); - } else if (token == XContentParser.Token.START_OBJECT) { - highlightBuilder.field(Field.fromXContent(highlightFieldName, parseContext)); + field(HighlightBuilder.Field.PROTOTYPE.fromXContent(parseContext)); } } - } else if (parseContext.parseFieldMatcher().match(topLevelFieldName, HIGHLIGHT_QUERY_FIELD)) { - highlightBuilder.highlightQuery(parseContext.parseInnerQueryBuilder()); + foundCurrentFieldMatch = true; } else { - throw new ParsingException(parser.getTokenLocation(), "cannot parse object with name [{}]", topLevelFieldName); + throw new ParsingException(parser.getTokenLocation(), + "If highlighter fields is an array it must contain objects containing a single field"); } - } else if (topLevelFieldName != null) { - throw new ParsingException(parser.getTokenLocation(), "unexpected token [{}] after [{}]", token, topLevelFieldName); } + } else if (currentToken == Token.START_OBJECT && parseContext.parseFieldMatcher().match(currentFieldName, FIELDS_FIELD)) { + while ((token = parser.nextToken()) != XContentParser.Token.END_OBJECT) { + if (token == XContentParser.Token.FIELD_NAME) { + field(HighlightBuilder.Field.PROTOTYPE.fromXContent(parseContext)); + } + } + foundCurrentFieldMatch = true; } - - if (highlightBuilder.preTags() != null && highlightBuilder.postTags() == null) { - throw new ParsingException(parser.getTokenLocation(), "Highlighter global preTags are set, but global postTags are not set"); - } - return highlightBuilder; + return foundCurrentFieldMatch; } public SearchContextHighlight build(QueryShardContext context) throws IOException { @@ -378,9 +309,15 @@ public class HighlightBuilder extends AbstractHighlighterBuilder<HighlightBuilde */ @SuppressWarnings({ "rawtypes", "unchecked" }) private static void transferOptions(AbstractHighlighterBuilder highlighterBuilder, SearchContextHighlight.FieldOptions.Builder targetOptionsBuilder, QueryShardContext context) throws IOException { - targetOptionsBuilder.preTags(highlighterBuilder.preTags); - targetOptionsBuilder.postTags(highlighterBuilder.postTags); - targetOptionsBuilder.scoreOrdered("score".equals(highlighterBuilder.order)); + if (highlighterBuilder.preTags != null) { + targetOptionsBuilder.preTags(highlighterBuilder.preTags); + } + if (highlighterBuilder.postTags != null) { + targetOptionsBuilder.postTags(highlighterBuilder.postTags); + } + if (highlighterBuilder.order != null) { + targetOptionsBuilder.scoreOrdered(highlighterBuilder.order == Order.SCORE); + } if (highlighterBuilder.highlightFilter != null) { targetOptionsBuilder.highlightFilter(highlighterBuilder.highlightFilter); } @@ -396,9 +333,15 @@ public class HighlightBuilder extends AbstractHighlighterBuilder<HighlightBuilde if (highlighterBuilder.boundaryMaxScan != null) { targetOptionsBuilder.boundaryMaxScan(highlighterBuilder.boundaryMaxScan); } - targetOptionsBuilder.boundaryChars(convertCharArray(highlighterBuilder.boundaryChars)); - targetOptionsBuilder.highlighterType(highlighterBuilder.highlighterType); - targetOptionsBuilder.fragmenter(highlighterBuilder.fragmenter); + if (highlighterBuilder.boundaryChars != null) { + targetOptionsBuilder.boundaryChars(convertCharArray(highlighterBuilder.boundaryChars)); + } + if (highlighterBuilder.highlighterType != null) { + targetOptionsBuilder.highlighterType(highlighterBuilder.highlighterType); + } + if (highlighterBuilder.fragmenter != null) { + targetOptionsBuilder.fragmenter(highlighterBuilder.fragmenter); + } if (highlighterBuilder.noMatchSize != null) { targetOptionsBuilder.noMatchSize(highlighterBuilder.noMatchSize); } @@ -408,7 +351,9 @@ public class HighlightBuilder extends AbstractHighlighterBuilder<HighlightBuilde if (highlighterBuilder.phraseLimit != null) { targetOptionsBuilder.phraseLimit(highlighterBuilder.phraseLimit); } - targetOptionsBuilder.options(highlighterBuilder.options); + if (highlighterBuilder.options != null) { + targetOptionsBuilder.options(highlighterBuilder.options); + } if (highlighterBuilder.highlightQuery != null) { targetOptionsBuilder.highlightQuery(highlighterBuilder.highlightQuery.toQuery(context)); } @@ -468,6 +413,11 @@ public class HighlightBuilder extends AbstractHighlighterBuilder<HighlightBuilde } @Override + protected HighlightBuilder createInstance(XContentParser parser) { + return new HighlightBuilder(); + } + + @Override protected int doHashCode() { return Objects.hash(encoder, useExplicitFieldOrder, fields); } @@ -549,80 +499,36 @@ public class HighlightBuilder extends AbstractHighlighterBuilder<HighlightBuilde builder.endObject(); } - private static HighlightBuilder.Field fromXContent(String fieldname, QueryParseContext parseContext) throws IOException { + /** + * parse options only present in field highlight builder (`fragment_offset`, `matched_fields`) + */ + @Override + protected boolean doFromXContent(QueryParseContext parseContext, String currentFieldName, Token currentToken) throws IOException { XContentParser parser = parseContext.parser(); - XContentParser.Token token; - - final HighlightBuilder.Field field = new HighlightBuilder.Field(fieldname); - String currentFieldName = null; - while ((token = parser.nextToken()) != XContentParser.Token.END_OBJECT) { - if (token == XContentParser.Token.FIELD_NAME) { - currentFieldName = parser.currentName(); - } else if (token == XContentParser.Token.START_ARRAY) { - if (parseContext.parseFieldMatcher().match(currentFieldName, PRE_TAGS_FIELD)) { - List<String> preTagsList = new ArrayList<>(); - while ((token = parser.nextToken()) != XContentParser.Token.END_ARRAY) { - preTagsList.add(parser.text()); - } - field.preTags(preTagsList.toArray(new String[preTagsList.size()])); - } else if (parseContext.parseFieldMatcher().match(currentFieldName, POST_TAGS_FIELD)) { - List<String> postTagsList = new ArrayList<>(); - while ((token = parser.nextToken()) != XContentParser.Token.END_ARRAY) { - postTagsList.add(parser.text()); - } - field.postTags(postTagsList.toArray(new String[postTagsList.size()])); - } else if (parseContext.parseFieldMatcher().match(currentFieldName, MATCHED_FIELDS_FIELD)) { - List<String> matchedFields = new ArrayList<>(); - while ((token = parser.nextToken()) != XContentParser.Token.END_ARRAY) { - matchedFields.add(parser.text()); - } - field.matchedFields(matchedFields.toArray(new String[matchedFields.size()])); - } else { - throw new ParsingException(parser.getTokenLocation(), "cannot parse array with name [{}]", currentFieldName); - } - } else if (token.isValue()) { - if (parseContext.parseFieldMatcher().match(currentFieldName, FRAGMENT_SIZE_FIELD)) { - field.fragmentSize(parser.intValue()); - } else if (parseContext.parseFieldMatcher().match(currentFieldName, NUMBER_OF_FRAGMENTS_FIELD)) { - field.numOfFragments(parser.intValue()); - } else if (parseContext.parseFieldMatcher().match(currentFieldName, FRAGMENT_OFFSET_FIELD)) { - field.fragmentOffset(parser.intValue()); - } else if (parseContext.parseFieldMatcher().match(currentFieldName, HIGHLIGHT_FILTER_FIELD)) { - field.highlightFilter(parser.booleanValue()); - } else if (parseContext.parseFieldMatcher().match(currentFieldName, ORDER_FIELD)) { - field.order(parser.text()); - } else if (parseContext.parseFieldMatcher().match(currentFieldName, REQUIRE_FIELD_MATCH_FIELD)) { - field.requireFieldMatch(parser.booleanValue()); - } else if (parseContext.parseFieldMatcher().match(currentFieldName, BOUNDARY_MAX_SCAN_FIELD)) { - field.boundaryMaxScan(parser.intValue()); - } else if (parseContext.parseFieldMatcher().match(currentFieldName, BOUNDARY_CHARS_FIELD)) { - field.boundaryChars(parser.text().toCharArray()); - } else if (parseContext.parseFieldMatcher().match(currentFieldName, TYPE_FIELD)) { - field.highlighterType(parser.text()); - } else if (parseContext.parseFieldMatcher().match(currentFieldName, FRAGMENTER_FIELD)) { - field.fragmenter(parser.text()); - } else if (parseContext.parseFieldMatcher().match(currentFieldName, NO_MATCH_SIZE_FIELD)) { - field.noMatchSize(parser.intValue()); - } else if (parseContext.parseFieldMatcher().match(currentFieldName, FORCE_SOURCE_FIELD)) { - field.forceSource(parser.booleanValue()); - } else if (parseContext.parseFieldMatcher().match(currentFieldName, PHRASE_LIMIT_FIELD)) { - field.phraseLimit(parser.intValue()); - } else { - throw new ParsingException(parser.getTokenLocation(), "unexpected fieldname [{}]", currentFieldName); - } - } else if (token == XContentParser.Token.START_OBJECT && currentFieldName != null) { - if (parseContext.parseFieldMatcher().match(currentFieldName, HIGHLIGHT_QUERY_FIELD)) { - field.highlightQuery(parseContext.parseInnerQueryBuilder()); - } else if (parseContext.parseFieldMatcher().match(currentFieldName, OPTIONS_FIELD)) { - field.options(parser.map()); - } else { - throw new ParsingException(parser.getTokenLocation(), "cannot parse object with name [{}]", currentFieldName); - } - } else if (currentFieldName != null) { - throw new ParsingException(parser.getTokenLocation(), "unexpected token [{}] after [{}]", token, currentFieldName); + boolean foundCurrentFieldMatch = false; + if (parseContext.parseFieldMatcher().match(currentFieldName, FRAGMENT_OFFSET_FIELD) && currentToken.isValue()) { + fragmentOffset(parser.intValue()); + foundCurrentFieldMatch = true; + } else if (parseContext.parseFieldMatcher().match(currentFieldName, MATCHED_FIELDS_FIELD) + && currentToken == XContentParser.Token.START_ARRAY) { + List<String> matchedFields = new ArrayList<>(); + while (parser.nextToken() != XContentParser.Token.END_ARRAY) { + matchedFields.add(parser.text()); } + matchedFields(matchedFields.toArray(new String[matchedFields.size()])); + foundCurrentFieldMatch = true; + } + return foundCurrentFieldMatch; + } + + @Override + protected Field createInstance(XContentParser parser) throws IOException { + if (parser.currentToken() == XContentParser.Token.FIELD_NAME) { + String fieldname = parser.currentName(); + return new Field(fieldname); + } else { + throw new ParsingException(parser.getTokenLocation(), "unknown token type [{}], expected field name", parser.currentToken()); } - return field; } @Override @@ -654,4 +560,36 @@ public class HighlightBuilder extends AbstractHighlighterBuilder<HighlightBuilde writeOptionsTo(out); } } + + public enum Order implements Writeable<Order> { + NONE, SCORE; + + static Order PROTOTYPE = NONE; + + @Override + public Order readFrom(StreamInput in) throws IOException { + int ordinal = in.readVInt(); + if (ordinal < 0 || ordinal >= values().length) { + throw new IOException("Unknown Order ordinal [" + ordinal + "]"); + } + return values()[ordinal]; + } + + @Override + public void writeTo(StreamOutput out) throws IOException { + out.writeVInt(this.ordinal()); + } + + public static Order fromString(String order) { + if (order.toUpperCase(Locale.ROOT).equals(SCORE.name())) { + return Order.SCORE; + } + return NONE; + } + + @Override + public String toString() { + return name().toLowerCase(Locale.ROOT); + } + } } diff --git a/core/src/main/java/org/elasticsearch/search/highlight/HighlightField.java b/core/src/main/java/org/elasticsearch/search/highlight/HighlightField.java index 9077278d51..30530b697f 100644 --- a/core/src/main/java/org/elasticsearch/search/highlight/HighlightField.java +++ b/core/src/main/java/org/elasticsearch/search/highlight/HighlightField.java @@ -22,7 +22,6 @@ package org.elasticsearch.search.highlight; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.common.io.stream.Streamable; -import org.elasticsearch.common.text.StringText; import org.elasticsearch.common.text.Text; import java.io.IOException; @@ -90,7 +89,7 @@ public class HighlightField implements Streamable { if (in.readBoolean()) { int size = in.readVInt(); if (size == 0) { - fragments = StringText.EMPTY_ARRAY; + fragments = Text.EMPTY_ARRAY; } else { fragments = new Text[size]; for (int i = 0; i < size; i++) { diff --git a/core/src/main/java/org/elasticsearch/search/highlight/PlainHighlighter.java b/core/src/main/java/org/elasticsearch/search/highlight/PlainHighlighter.java index 041ed754d7..5f4cdddb06 100644 --- a/core/src/main/java/org/elasticsearch/search/highlight/PlainHighlighter.java +++ b/core/src/main/java/org/elasticsearch/search/highlight/PlainHighlighter.java @@ -33,9 +33,7 @@ import org.apache.lucene.search.highlight.SimpleSpanFragmenter; import org.apache.lucene.search.highlight.TextFragment; import org.apache.lucene.util.BytesRefHash; import org.apache.lucene.util.CollectionUtil; -import org.apache.lucene.util.IOUtils; import org.elasticsearch.ExceptionsHelper; -import org.elasticsearch.common.text.StringText; import org.elasticsearch.common.text.Text; import org.elasticsearch.index.mapper.FieldMapper; import org.elasticsearch.search.fetch.FetchPhaseExecutionException; @@ -158,7 +156,7 @@ public class PlainHighlighter implements Highlighter { } if (fragments.length > 0) { - return new HighlightField(highlighterContext.fieldName, StringText.convertFromStringArray(fragments)); + return new HighlightField(highlighterContext.fieldName, Text.convertFromStringArray(fragments)); } int noMatchSize = highlighterContext.field.fieldOptions().noMatchSize(); @@ -172,7 +170,7 @@ public class PlainHighlighter implements Highlighter { throw new FetchPhaseExecutionException(context, "Failed to highlight field [" + highlighterContext.fieldName + "]", e); } if (end > 0) { - return new HighlightField(highlighterContext.fieldName, new Text[] { new StringText(fieldContents.substring(0, end)) }); + return new HighlightField(highlighterContext.fieldName, new Text[] { new Text(fieldContents.substring(0, end)) }); } } return null; diff --git a/core/src/main/java/org/elasticsearch/search/highlight/PostingsHighlighter.java b/core/src/main/java/org/elasticsearch/search/highlight/PostingsHighlighter.java index e11840e89e..2509f95da5 100644 --- a/core/src/main/java/org/elasticsearch/search/highlight/PostingsHighlighter.java +++ b/core/src/main/java/org/elasticsearch/search/highlight/PostingsHighlighter.java @@ -28,7 +28,7 @@ import org.apache.lucene.search.postingshighlight.CustomSeparatorBreakIterator; import org.apache.lucene.search.postingshighlight.Snippet; import org.apache.lucene.util.CollectionUtil; import org.elasticsearch.common.Strings; -import org.elasticsearch.common.text.StringText; +import org.elasticsearch.common.text.Text; import org.elasticsearch.index.mapper.FieldMapper; import org.elasticsearch.search.fetch.FetchPhaseExecutionException; import org.elasticsearch.search.fetch.FetchSubPhase; @@ -122,7 +122,7 @@ public class PostingsHighlighter implements Highlighter { } if (fragments.length > 0) { - return new HighlightField(highlighterContext.fieldName, StringText.convertFromStringArray(fragments)); + return new HighlightField(highlighterContext.fieldName, Text.convertFromStringArray(fragments)); } return null; diff --git a/core/src/main/java/org/elasticsearch/search/internal/ContextIndexSearcher.java b/core/src/main/java/org/elasticsearch/search/internal/ContextIndexSearcher.java index 0a9b860edb..a7bacb64d9 100644 --- a/core/src/main/java/org/elasticsearch/search/internal/ContextIndexSearcher.java +++ b/core/src/main/java/org/elasticsearch/search/internal/ContextIndexSearcher.java @@ -26,6 +26,9 @@ import org.apache.lucene.search.*; import org.elasticsearch.common.lease.Releasable; import org.elasticsearch.index.engine.Engine; import org.elasticsearch.search.dfs.AggregatedDfs; +import org.elasticsearch.search.profile.ProfileBreakdown; +import org.elasticsearch.search.profile.ProfileWeight; +import org.elasticsearch.search.profile.Profiler; import java.io.IOException; @@ -43,26 +46,44 @@ public class ContextIndexSearcher extends IndexSearcher implements Releasable { private final Engine.Searcher engineSearcher; - public ContextIndexSearcher(SearchContext searchContext, Engine.Searcher searcher) { + // TODO revisit moving the profiler to inheritance or wrapping model in the future + private Profiler profiler; + + public ContextIndexSearcher(Engine.Searcher searcher, + QueryCache queryCache, QueryCachingPolicy queryCachingPolicy) { super(searcher.reader()); in = searcher.searcher(); engineSearcher = searcher; setSimilarity(searcher.searcher().getSimilarity(true)); - setQueryCache(searchContext.getQueryCache()); - setQueryCachingPolicy(searchContext.indexShard().getQueryCachingPolicy()); + setQueryCache(queryCache); + setQueryCachingPolicy(queryCachingPolicy); } @Override public void close() { } + public void setProfiler(Profiler profiler) { + this.profiler = profiler; + } + public void setAggregatedDfs(AggregatedDfs aggregatedDfs) { this.aggregatedDfs = aggregatedDfs; } @Override public Query rewrite(Query original) throws IOException { - return in.rewrite(original); + if (profiler != null) { + profiler.startRewriteTime(); + } + + try { + return in.rewrite(original); + } finally { + if (profiler != null) { + profiler.stopAndAddRewriteTime(); + } + } } @Override @@ -72,8 +93,34 @@ public class ContextIndexSearcher extends IndexSearcher implements Releasable { if (aggregatedDfs != null && needsScores) { // if scores are needed and we have dfs data then use it return super.createNormalizedWeight(query, needsScores); + } else if (profiler != null) { + // we need to use the createWeight method to insert the wrappers + return super.createNormalizedWeight(query, needsScores); + } else { + return in.createNormalizedWeight(query, needsScores); + } + } + + @Override + public Weight createWeight(Query query, boolean needsScores) throws IOException { + if (profiler != null) { + // createWeight() is called for each query in the tree, so we tell the queryProfiler + // each invocation so that it can build an internal representation of the query + // tree + ProfileBreakdown profile = profiler.getQueryBreakdown(query); + profile.startTime(ProfileBreakdown.TimingType.CREATE_WEIGHT); + final Weight weight; + try { + weight = super.createWeight(query, needsScores); + } finally { + profile.stopAndRecordTime(); + profiler.pollLastQuery(); + } + return new ProfileWeight(query, weight, profile); + } else { + // needs to be 'super', not 'in' in order to use aggregated DFS + return super.createWeight(query, needsScores); } - return in.createNormalizedWeight(query, needsScores); } @Override diff --git a/core/src/main/java/org/elasticsearch/search/internal/DefaultSearchContext.java b/core/src/main/java/org/elasticsearch/search/internal/DefaultSearchContext.java index 1174fcdd8a..2d3f659062 100644 --- a/core/src/main/java/org/elasticsearch/search/internal/DefaultSearchContext.java +++ b/core/src/main/java/org/elasticsearch/search/internal/DefaultSearchContext.java @@ -58,6 +58,8 @@ import org.elasticsearch.search.fetch.source.FetchSourceContext; import org.elasticsearch.search.highlight.SearchContextHighlight; import org.elasticsearch.search.lookup.SearchLookup; import org.elasticsearch.search.query.QueryPhaseExecutionException; +import org.elasticsearch.search.profile.Profiler; +import org.elasticsearch.search.profile.Profilers; import org.elasticsearch.search.query.QuerySearchResult; import org.elasticsearch.search.rescore.RescoreSearchContext; import org.elasticsearch.search.suggest.SuggestionSearchContext; @@ -129,10 +131,10 @@ public class DefaultSearchContext extends SearchContext { private List<RescoreSearchContext> rescore; private SearchLookup searchLookup; private volatile long keepAlive; - private ScoreDoc lastEmittedDoc; private final long originNanoTime = System.nanoTime(); private volatile long lastAccessTime = -1; private InnerHitsContext innerHitsContext; + private Profilers profilers; private final Map<String, FetchSubPhaseContext> subPhaseContexts = new HashMap<>(); private final Map<Class<?>, Collector> queryCollectors = new HashMap<>(); @@ -158,7 +160,7 @@ public class DefaultSearchContext extends SearchContext { this.fetchResult = new FetchSearchResult(id, shardTarget); this.indexShard = indexShard; this.indexService = indexService; - this.searcher = new ContextIndexSearcher(this, engineSearcher); + this.searcher = new ContextIndexSearcher(engineSearcher, indexService.cache().query(), indexShard.getQueryCachingPolicy()); this.timeEstimateCounter = timeEstimateCounter; this.timeoutInMillis = timeout.millis(); } @@ -724,5 +726,11 @@ public class DefaultSearchContext extends SearchContext { } @Override - public QueryCache getQueryCache() { return indexService.cache().query();} + public Profilers getProfilers() { + return profilers; + } + + public void setProfilers(Profilers profilers) { + this.profilers = profilers; + } } diff --git a/core/src/main/java/org/elasticsearch/search/internal/FilteredSearchContext.java b/core/src/main/java/org/elasticsearch/search/internal/FilteredSearchContext.java index 7225c7b32b..1f04d01340 100644 --- a/core/src/main/java/org/elasticsearch/search/internal/FilteredSearchContext.java +++ b/core/src/main/java/org/elasticsearch/search/internal/FilteredSearchContext.java @@ -29,7 +29,6 @@ import org.elasticsearch.common.ParseFieldMatcher; import org.elasticsearch.common.util.BigArrays; import org.elasticsearch.index.analysis.AnalysisService; import org.elasticsearch.index.cache.bitset.BitsetFilterCache; -import org.elasticsearch.index.cache.query.QueryCache; import org.elasticsearch.index.fielddata.IndexFieldDataService; import org.elasticsearch.index.mapper.MappedFieldType; import org.elasticsearch.index.mapper.MapperService; @@ -49,6 +48,7 @@ import org.elasticsearch.search.fetch.script.ScriptFieldsContext; import org.elasticsearch.search.fetch.source.FetchSourceContext; import org.elasticsearch.search.highlight.SearchContextHighlight; import org.elasticsearch.search.lookup.SearchLookup; +import org.elasticsearch.search.profile.Profilers; import org.elasticsearch.search.query.QuerySearchResult; import org.elasticsearch.search.rescore.RescoreSearchContext; import org.elasticsearch.search.suggest.SuggestionSearchContext; @@ -517,8 +517,11 @@ public abstract class FilteredSearchContext extends SearchContext { } @Override - public Map<Class<?>, Collector> queryCollectors() { return in.queryCollectors();} + public Profilers getProfilers() { + return in.getProfilers(); + } @Override - public QueryCache getQueryCache() { return in.getQueryCache();} + public Map<Class<?>, Collector> queryCollectors() { return in.queryCollectors();} + } diff --git a/core/src/main/java/org/elasticsearch/search/internal/InternalSearchHit.java b/core/src/main/java/org/elasticsearch/search/internal/InternalSearchHit.java index 96fd103fa6..fcac5b1cc8 100644 --- a/core/src/main/java/org/elasticsearch/search/internal/InternalSearchHit.java +++ b/core/src/main/java/org/elasticsearch/search/internal/InternalSearchHit.java @@ -30,7 +30,7 @@ import org.elasticsearch.common.compress.CompressorFactory; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.common.io.stream.Streamable; -import org.elasticsearch.common.text.StringAndBytesText; +import org.elasticsearch.common.text.Text; import org.elasticsearch.common.text.Text; import org.elasticsearch.common.xcontent.ToXContent; import org.elasticsearch.common.xcontent.XContentBuilder; @@ -104,14 +104,14 @@ public class InternalSearchHit implements SearchHit { public InternalSearchHit(int docId, String id, Text type, Map<String, SearchHitField> fields) { this.docId = docId; - this.id = new StringAndBytesText(id); + this.id = new Text(id); this.type = type; this.fields = fields; } public InternalSearchHit(int nestedTopDocId, String id, Text type, InternalNestedIdentity nestedIdentity, Map<String, SearchHitField> fields) { this.docId = nestedTopDocId; - this.id = new StringAndBytesText(id); + this.id = new Text(id); this.type = type; this.nestedIdentity = nestedIdentity; this.fields = fields; @@ -339,7 +339,7 @@ public class InternalSearchHit implements SearchHit { if (sortValues != null) { for (int i = 0; i < sortValues.length; i++) { if (sortValues[i] instanceof BytesRef) { - sortValuesCopy[i] = new StringAndBytesText(new BytesArray((BytesRef) sortValues[i])); + sortValuesCopy[i] = new Text(new BytesArray((BytesRef) sortValues[i])); } } } @@ -783,7 +783,7 @@ public class InternalSearchHit implements SearchHit { private InternalNestedIdentity child; public InternalNestedIdentity(String field, int offset, InternalNestedIdentity child) { - this.field = new StringAndBytesText(field); + this.field = new Text(field); this.offset = offset; this.child = child; } diff --git a/core/src/main/java/org/elasticsearch/search/internal/InternalSearchResponse.java b/core/src/main/java/org/elasticsearch/search/internal/InternalSearchResponse.java index 7b73772f9d..b8255e0bb5 100644 --- a/core/src/main/java/org/elasticsearch/search/internal/InternalSearchResponse.java +++ b/core/src/main/java/org/elasticsearch/search/internal/InternalSearchResponse.java @@ -28,9 +28,14 @@ import org.elasticsearch.common.xcontent.XContentBuilder; import org.elasticsearch.search.SearchHits; import org.elasticsearch.search.aggregations.Aggregations; import org.elasticsearch.search.aggregations.InternalAggregations; +import org.elasticsearch.search.profile.InternalProfileShardResults; +import org.elasticsearch.search.profile.ProfileShardResult; import org.elasticsearch.search.suggest.Suggest; import java.io.IOException; +import java.util.Collections; +import java.util.List; +import java.util.Map; import static org.elasticsearch.search.internal.InternalSearchHits.readSearchHits; @@ -40,7 +45,7 @@ import static org.elasticsearch.search.internal.InternalSearchHits.readSearchHit public class InternalSearchResponse implements Streamable, ToXContent { public static InternalSearchResponse empty() { - return new InternalSearchResponse(InternalSearchHits.empty(), null, null, false, null); + return new InternalSearchResponse(InternalSearchHits.empty(), null, null, null, false, null); } private InternalSearchHits hits; @@ -49,6 +54,8 @@ public class InternalSearchResponse implements Streamable, ToXContent { private Suggest suggest; + private InternalProfileShardResults profileResults; + private boolean timedOut; private Boolean terminatedEarly = null; @@ -56,10 +63,12 @@ public class InternalSearchResponse implements Streamable, ToXContent { private InternalSearchResponse() { } - public InternalSearchResponse(InternalSearchHits hits, InternalAggregations aggregations, Suggest suggest, boolean timedOut, Boolean terminatedEarly) { + public InternalSearchResponse(InternalSearchHits hits, InternalAggregations aggregations, Suggest suggest, + InternalProfileShardResults profileResults, boolean timedOut, Boolean terminatedEarly) { this.hits = hits; this.aggregations = aggregations; this.suggest = suggest; + this.profileResults = profileResults; this.timedOut = timedOut; this.terminatedEarly = terminatedEarly; } @@ -84,6 +93,19 @@ public class InternalSearchResponse implements Streamable, ToXContent { return suggest; } + /** + * Returns the profile results for this search response (including all shards). + * An empty map is returned if profiling was not enabled + * + * @return Profile results + */ + public Map<String, List<ProfileShardResult>> profile() { + if (profileResults == null) { + return Collections.emptyMap(); + } + return profileResults.getShardResults(); + } + @Override public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { hits.toXContent(builder, params); @@ -93,6 +115,9 @@ public class InternalSearchResponse implements Streamable, ToXContent { if (suggest != null) { suggest.toXContent(builder, params); } + if (profileResults != null) { + profileResults.toXContent(builder, params); + } return builder; } @@ -114,6 +139,12 @@ public class InternalSearchResponse implements Streamable, ToXContent { timedOut = in.readBoolean(); terminatedEarly = in.readOptionalBoolean(); + + if (in.getVersion().onOrAfter(Version.V_2_2_0) && in.readBoolean()) { + profileResults = new InternalProfileShardResults(in); + } else { + profileResults = null; + } } @Override @@ -134,5 +165,14 @@ public class InternalSearchResponse implements Streamable, ToXContent { out.writeBoolean(timedOut); out.writeOptionalBoolean(terminatedEarly); + + if (out.getVersion().onOrAfter(Version.V_2_2_0)) { + if (profileResults == null) { + out.writeBoolean(false); + } else { + out.writeBoolean(true); + profileResults.writeTo(out); + } + } } } diff --git a/core/src/main/java/org/elasticsearch/search/internal/SearchContext.java b/core/src/main/java/org/elasticsearch/search/internal/SearchContext.java index 0f61b2bc6a..4e4e9dd5dd 100644 --- a/core/src/main/java/org/elasticsearch/search/internal/SearchContext.java +++ b/core/src/main/java/org/elasticsearch/search/internal/SearchContext.java @@ -35,7 +35,6 @@ import org.elasticsearch.common.util.BigArrays; import org.elasticsearch.common.util.iterable.Iterables; import org.elasticsearch.index.analysis.AnalysisService; import org.elasticsearch.index.cache.bitset.BitsetFilterCache; -import org.elasticsearch.index.cache.query.QueryCache; import org.elasticsearch.index.fielddata.IndexFieldDataService; import org.elasticsearch.index.mapper.MappedFieldType; import org.elasticsearch.index.mapper.MapperService; @@ -56,6 +55,7 @@ import org.elasticsearch.search.fetch.script.ScriptFieldsContext; import org.elasticsearch.search.fetch.source.FetchSourceContext; import org.elasticsearch.search.highlight.SearchContextHighlight; import org.elasticsearch.search.lookup.SearchLookup; +import org.elasticsearch.search.profile.Profilers; import org.elasticsearch.search.query.QuerySearchResult; import org.elasticsearch.search.rescore.RescoreSearchContext; import org.elasticsearch.search.suggest.SuggestionSearchContext; @@ -304,6 +304,11 @@ public abstract class SearchContext extends DelegatingHasContextAndHeaders imple public abstract FetchSearchResult fetchResult(); /** + * Return a handle over the profilers for the current search request, or {@code null} if profiling is not enabled. + */ + public abstract Profilers getProfilers(); + + /** * Schedule the release of a resource. The time when {@link Releasable#close()} will be called on this object * is function of the provided {@link Lifetime}. */ @@ -367,5 +372,4 @@ public abstract class SearchContext extends DelegatingHasContextAndHeaders imple CONTEXT } - public abstract QueryCache getQueryCache(); } diff --git a/core/src/main/java/org/elasticsearch/search/internal/ShardSearchLocalRequest.java b/core/src/main/java/org/elasticsearch/search/internal/ShardSearchLocalRequest.java index 47791aeddf..9d15dfd579 100644 --- a/core/src/main/java/org/elasticsearch/search/internal/ShardSearchLocalRequest.java +++ b/core/src/main/java/org/elasticsearch/search/internal/ShardSearchLocalRequest.java @@ -71,6 +71,8 @@ public class ShardSearchLocalRequest extends ContextAndHeaderHolder implements S private Boolean requestCache; private long nowInMillis; + private boolean profile; + ShardSearchLocalRequest() { } @@ -165,6 +167,16 @@ public class ShardSearchLocalRequest extends ContextAndHeaderHolder implements S return scroll; } + @Override + public void setProfile(boolean profile) { + this.profile = profile; + } + + @Override + public boolean isProfile() { + return profile; + } + @SuppressWarnings("unchecked") protected void innerReadFrom(StreamInput in) throws IOException { index = in.readString(); diff --git a/core/src/main/java/org/elasticsearch/search/internal/ShardSearchRequest.java b/core/src/main/java/org/elasticsearch/search/internal/ShardSearchRequest.java index fb631b0827..b1730b6a14 100644 --- a/core/src/main/java/org/elasticsearch/search/internal/ShardSearchRequest.java +++ b/core/src/main/java/org/elasticsearch/search/internal/ShardSearchRequest.java @@ -60,6 +60,17 @@ public interface ShardSearchRequest extends HasContextAndHeaders { Scroll scroll(); /** + * Sets if this shard search needs to be profiled or not + * @param profile True if the shard should be profiled + */ + void setProfile(boolean profile); + + /** + * Returns true if this shard search is being profiled or not + */ + boolean isProfile(); + + /** * Returns the cache key for this shard search request, based on its content */ BytesReference cacheKey() throws IOException; diff --git a/core/src/main/java/org/elasticsearch/search/internal/ShardSearchTransportRequest.java b/core/src/main/java/org/elasticsearch/search/internal/ShardSearchTransportRequest.java index 279d9d6bd2..0f9c0ced41 100644 --- a/core/src/main/java/org/elasticsearch/search/internal/ShardSearchTransportRequest.java +++ b/core/src/main/java/org/elasticsearch/search/internal/ShardSearchTransportRequest.java @@ -150,4 +150,14 @@ public class ShardSearchTransportRequest extends TransportRequest implements Sha public BytesReference cacheKey() throws IOException { return shardSearchLocalRequest.cacheKey(); } + + @Override + public void setProfile(boolean profile) { + shardSearchLocalRequest.setProfile(profile); + } + + @Override + public boolean isProfile() { + return shardSearchLocalRequest.isProfile(); + } } diff --git a/core/src/main/java/org/elasticsearch/search/profile/CollectorResult.java b/core/src/main/java/org/elasticsearch/search/profile/CollectorResult.java new file mode 100644 index 0000000000..4949c6388d --- /dev/null +++ b/core/src/main/java/org/elasticsearch/search/profile/CollectorResult.java @@ -0,0 +1,156 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.search.profile; + +import org.elasticsearch.common.ParseField; +import org.elasticsearch.common.io.stream.StreamInput; +import org.elasticsearch.common.io.stream.StreamOutput; +import org.elasticsearch.common.io.stream.Writeable; +import org.elasticsearch.common.xcontent.ToXContent; +import org.elasticsearch.common.xcontent.XContentBuilder; + +import java.io.IOException; +import java.util.ArrayList; +import java.util.List; +import java.util.Locale; + +/** + * Public interface and serialization container for profiled timings of the + * Collectors used in the search. Children CollectorResult's may be + * embedded inside of a parent CollectorResult + */ +public class CollectorResult implements ToXContent, Writeable { + + public static final String REASON_SEARCH_COUNT = "search_count"; + public static final String REASON_SEARCH_TOP_HITS = "search_top_hits"; + public static final String REASON_SEARCH_TERMINATE_AFTER_COUNT = "search_terminate_after_count"; + public static final String REASON_SEARCH_POST_FILTER = "search_post_filter"; + public static final String REASON_SEARCH_MIN_SCORE = "search_min_score"; + public static final String REASON_SEARCH_MULTI = "search_multi"; + public static final String REASON_SEARCH_TIMEOUT = "search_timeout"; + public static final String REASON_AGGREGATION = "aggregation"; + public static final String REASON_AGGREGATION_GLOBAL = "aggregation_global"; + + private static final ParseField NAME = new ParseField("name"); + private static final ParseField REASON = new ParseField("reason"); + private static final ParseField TIME = new ParseField("time"); + private static final ParseField CHILDREN = new ParseField("children"); + + /** + * A more friendly representation of the Collector's class name + */ + private final String collectorName; + + /** + * A "hint" to help provide some context about this Collector + */ + private final String reason; + + /** + * The total elapsed time for this Collector + */ + private final Long time; + + /** + * A list of children collectors "embedded" inside this collector + */ + private List<CollectorResult> children; + + public CollectorResult(String collectorName, String reason, Long time, List<CollectorResult> children) { + this.collectorName = collectorName; + this.reason = reason; + this.time = time; + this.children = children; + } + + public CollectorResult(StreamInput in) throws IOException { + this.collectorName = in.readString(); + this.reason = in.readString(); + this.time = in.readLong(); + int size = in.readVInt(); + this.children = new ArrayList<>(size); + for (int i = 0; i < size; i++) { + CollectorResult child = new CollectorResult(in); + this.children.add(child); + } + } + + /** + * @return the profiled time for this collector (inclusive of children) + */ + public long getTime() { + return this.time; + } + + /** + * @return a human readable "hint" about what this collector was used for + */ + public String getReason() { + return this.reason; + } + + /** + * @return the lucene class name of the collector + */ + public String getName() { + return this.collectorName; + } + + /** + * @return a list of children collectors + */ + public List<CollectorResult> getProfiledChildren() { + return children; + } + + @Override + public XContentBuilder toXContent(XContentBuilder builder, ToXContent.Params params) throws IOException { + builder = builder.startObject() + .field(NAME.getPreferredName(), toString()) + .field(REASON.getPreferredName(), reason) + .field(TIME.getPreferredName(), String.format(Locale.US, "%.10gms", (double) (getTime() / 1000000.0))); + + if (!children.isEmpty()) { + builder = builder.startArray(CHILDREN.getPreferredName()); + for (CollectorResult child : children) { + builder = child.toXContent(builder, params); + } + builder = builder.endArray(); + } + builder = builder.endObject(); + return builder; + } + + @Override + public void writeTo(StreamOutput out) throws IOException { + out.writeString(collectorName); + out.writeString(reason); + out.writeLong(time); + out.writeVInt(children.size()); + for (CollectorResult child : children) { + child.writeTo(out); + } + } + + @Override + public Object readFrom(StreamInput in) throws IOException { + return new CollectorResult(in); + } +} diff --git a/core/src/main/java/org/elasticsearch/search/profile/InternalProfileCollector.java b/core/src/main/java/org/elasticsearch/search/profile/InternalProfileCollector.java new file mode 100644 index 0000000000..132731f37c --- /dev/null +++ b/core/src/main/java/org/elasticsearch/search/profile/InternalProfileCollector.java @@ -0,0 +1,135 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.search.profile; + +import org.apache.lucene.index.LeafReaderContext; +import org.apache.lucene.search.Collector; +import org.apache.lucene.search.LeafCollector; + +import java.io.IOException; +import java.util.ArrayList; +import java.util.List; + +/** + * This class wraps a Lucene Collector and times the execution of: + * - setScorer() + * - collect() + * - doSetNextReader() + * - needsScores() + * + * InternalProfiler facilitates the linking of the the Collector graph + */ +public class InternalProfileCollector implements Collector { + + /** + * A more friendly representation of the Collector's class name + */ + private final String collectorName; + + /** + * A "hint" to help provide some context about this Collector + */ + private final String reason; + + /** The wrapped collector */ + private final ProfileCollector collector; + + /** + * A list of "embedded" children collectors + */ + private final List<InternalProfileCollector> children; + + public InternalProfileCollector(Collector collector, String reason, List<InternalProfileCollector> children) { + this.collector = new ProfileCollector(collector); + this.reason = reason; + this.collectorName = deriveCollectorName(collector); + this.children = children; + } + + /** + * @return the profiled time for this collector (inclusive of children) + */ + public long getTime() { + return collector.getTime(); + } + + /** + * @return a human readable "hint" about what this collector was used for + */ + public String getReason() { + return this.reason; + } + + /** + * @return the lucene class name of the collector + */ + public String getName() { + return this.collectorName; + } + + /** + * Creates a human-friendly representation of the Collector name. + * + * Bucket Collectors use the aggregation name in their toString() method, + * which makes the profiled output a bit nicer. + * + * @param c The Collector to derive a name from + * @return A (hopefully) prettier name + */ + private String deriveCollectorName(Collector c) { + String s = c.getClass().getSimpleName(); + + // MutiCollector which wraps multiple BucketCollectors is generated + // via an anonymous class, so this corrects the lack of a name by + // asking the enclosingClass + if (s.equals("")) { + s = c.getClass().getEnclosingClass().getSimpleName(); + } + + // Aggregation collector toString()'s include the user-defined agg name + if (reason.equals(CollectorResult.REASON_AGGREGATION) || reason.equals(CollectorResult.REASON_AGGREGATION_GLOBAL)) { + s += ": [" + c.toString() + "]"; + } + return s; + } + + @Override + public LeafCollector getLeafCollector(LeafReaderContext context) throws IOException { + return collector.getLeafCollector(context); + } + + @Override + public boolean needsScores() { + return collector.needsScores(); + } + + public CollectorResult getCollectorTree() { + return InternalProfileCollector.doGetCollectorTree(this); + } + + private static CollectorResult doGetCollectorTree(InternalProfileCollector collector) { + List<CollectorResult> childResults = new ArrayList<>(collector.children.size()); + for (InternalProfileCollector child : collector.children) { + CollectorResult result = doGetCollectorTree(child); + childResults.add(result); + } + return new CollectorResult(collector.getName(), collector.getReason(), collector.getTime(), childResults); + } +} diff --git a/core/src/main/java/org/elasticsearch/search/profile/InternalProfileShardResults.java b/core/src/main/java/org/elasticsearch/search/profile/InternalProfileShardResults.java new file mode 100644 index 0000000000..e6052ff509 --- /dev/null +++ b/core/src/main/java/org/elasticsearch/search/profile/InternalProfileShardResults.java @@ -0,0 +1,108 @@ +package org.elasticsearch.search.profile; + +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +import org.elasticsearch.common.io.stream.StreamInput; +import org.elasticsearch.common.io.stream.StreamOutput; +import org.elasticsearch.common.io.stream.Writeable; +import org.elasticsearch.common.xcontent.ToXContent; +import org.elasticsearch.common.xcontent.XContentBuilder; + +import java.io.IOException; +import java.util.*; +import java.util.stream.Collectors; + +/** + * A container class to hold all the profile results across all shards. Internally + * holds a map of shard ID -> Profiled results + */ +public final class InternalProfileShardResults implements Writeable<InternalProfileShardResults>, ToXContent{ + + private Map<String, List<ProfileShardResult>> shardResults; + + public InternalProfileShardResults(Map<String, List<ProfileShardResult>> shardResults) { + Map<String, List<ProfileShardResult>> transformed = + shardResults.entrySet() + .stream() + .collect(Collectors.toMap( + Map.Entry::getKey, + e -> Collections.unmodifiableList(e.getValue())) + ); + this.shardResults = Collections.unmodifiableMap(transformed); + } + + public InternalProfileShardResults(StreamInput in) throws IOException { + int size = in.readInt(); + shardResults = new HashMap<>(size); + + for (int i = 0; i < size; i++) { + String key = in.readString(); + int shardResultsSize = in.readInt(); + + List<ProfileShardResult> shardResult = new ArrayList<>(shardResultsSize); + + for (int j = 0; j < shardResultsSize; j++) { + ProfileShardResult result = new ProfileShardResult(in); + shardResult.add(result); + } + shardResults.put(key, shardResult); + } + } + + public Map<String, List<ProfileShardResult>> getShardResults() { + return this.shardResults; + } + + @Override + public InternalProfileShardResults readFrom(StreamInput in) throws IOException { + return new InternalProfileShardResults(in); + } + + @Override + public void writeTo(StreamOutput out) throws IOException { + out.writeInt(shardResults.size()); + for (Map.Entry<String, List<ProfileShardResult>> entry : shardResults.entrySet()) { + out.writeString(entry.getKey()); + out.writeInt(entry.getValue().size()); + + for (ProfileShardResult result : entry.getValue()) { + result.writeTo(out); + } + } + } + + @Override + public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { + builder.startObject("profile").startArray("shards"); + + for (Map.Entry<String, List<ProfileShardResult>> entry : shardResults.entrySet()) { + builder.startObject().field("id",entry.getKey()).startArray("searches"); + for (ProfileShardResult result : entry.getValue()) { + builder.startObject(); + result.toXContent(builder, params); + builder.endObject(); + } + builder.endArray().endObject(); + } + + builder.endArray().endObject(); + return builder; + } +} diff --git a/core/src/main/java/org/elasticsearch/search/profile/InternalProfileTree.java b/core/src/main/java/org/elasticsearch/search/profile/InternalProfileTree.java new file mode 100644 index 0000000000..4bc8a85a78 --- /dev/null +++ b/core/src/main/java/org/elasticsearch/search/profile/InternalProfileTree.java @@ -0,0 +1,235 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.search.profile; + +import org.apache.lucene.search.Query; + +import java.util.*; +import java.util.concurrent.LinkedBlockingDeque; + +/** + * This class tracks the dependency tree for queries (scoring and rewriting) and + * generates {@link ProfileBreakdown} for each node in the tree. It also finalizes the tree + * and returns a list of {@link ProfileResult} that can be serialized back to the client + */ +final class InternalProfileTree { + + private ArrayList<ProfileBreakdown> timings; + + /** Maps the Query to it's list of children. This is basically the dependency tree */ + private ArrayList<ArrayList<Integer>> tree; + + /** A list of the original queries, keyed by index position */ + private ArrayList<Query> queries; + + /** A list of top-level "roots". Each root can have its own tree of profiles */ + private ArrayList<Integer> roots; + + /** Rewrite time */ + private long rewriteTime; + private long rewriteScratch; + + /** A temporary stack used to record where we are in the dependency tree. Only used by scoring queries */ + private Deque<Integer> stack; + + private int currentToken = 0; + + public InternalProfileTree() { + timings = new ArrayList<>(10); + stack = new LinkedBlockingDeque<>(10); + tree = new ArrayList<>(10); + queries = new ArrayList<>(10); + roots = new ArrayList<>(10); + } + + /** + * Returns a {@link ProfileBreakdown} for a scoring query. Scoring queries (e.g. those + * that are past the rewrite phase and are now being wrapped by createWeight() ) follow + * a recursive progression. We can track the dependency tree by a simple stack + * + * The only hiccup is that the first scoring query will be identical to the last rewritten + * query, so we need to take special care to fix that + * + * @param query The scoring query we wish to profile + * @return A ProfileBreakdown for this query + */ + public ProfileBreakdown getQueryBreakdown(Query query) { + int token = currentToken; + + boolean stackEmpty = stack.isEmpty(); + + // If the stack is empty, we are a new root query + if (stackEmpty) { + + // We couldn't find a rewritten query to attach to, so just add it as a + // top-level root. This is just a precaution: it really shouldn't happen. + // We would only get here if a top-level query that never rewrites for some reason. + roots.add(token); + + // Increment the token since we are adding a new node, but notably, do not + // updateParent() because this was added as a root + currentToken += 1; + stack.add(token); + + return addDependencyNode(query, token); + } + + updateParent(token); + + // Increment the token since we are adding a new node + currentToken += 1; + stack.add(token); + + return addDependencyNode(query, token); + } + + /** + * Begin timing a query for a specific Timing context + */ + public void startRewriteTime() { + assert rewriteScratch == 0; + rewriteScratch = System.nanoTime(); + } + + /** + * Halt the timing process and add the elapsed rewriting time. + * startRewriteTime() must be called for a particular context prior to calling + * stopAndAddRewriteTime(), otherwise the elapsed time will be negative and + * nonsensical + * + * @return The elapsed time + */ + public long stopAndAddRewriteTime() { + long time = Math.max(1, System.nanoTime() - rewriteScratch); + rewriteTime += time; + rewriteScratch = 0; + return time; + } + + /** + * Helper method to add a new node to the dependency tree. + * + * Initializes a new list in the dependency tree, saves the query and + * generates a new {@link ProfileBreakdown} to track the timings + * of this query + * + * @param query The query to profile + * @param token The assigned token for this query + * @return A ProfileBreakdown to profile this query + */ + private ProfileBreakdown addDependencyNode(Query query, int token) { + + // Add a new slot in the dependency tree + tree.add(new ArrayList<>(5)); + + // Save our query for lookup later + queries.add(query); + + ProfileBreakdown queryTimings = new ProfileBreakdown(); + timings.add(token, queryTimings); + return queryTimings; + } + + /** + * Removes the last (e.g. most recent) value on the stack + */ + public void pollLast() { + stack.pollLast(); + } + + /** + * After the query has been run and profiled, we need to merge the flat timing map + * with the dependency graph to build a data structure that mirrors the original + * query tree + * + * @return a hierarchical representation of the profiled query tree + */ + public List<ProfileResult> getQueryTree() { + ArrayList<ProfileResult> results = new ArrayList<>(5); + for (Integer root : roots) { + results.add(doGetQueryTree(root)); + } + return results; + } + + /** + * Recursive helper to finalize a node in the dependency tree + * @param token The node we are currently finalizing + * @return A hierarchical representation of the tree inclusive of children at this level + */ + private ProfileResult doGetQueryTree(int token) { + Query query = queries.get(token); + ProfileBreakdown breakdown = timings.get(token); + Map<String, Long> timings = breakdown.toTimingMap(); + List<Integer> children = tree.get(token); + List<ProfileResult> childrenProfileResults = Collections.emptyList(); + + if (children != null) { + childrenProfileResults = new ArrayList<>(children.size()); + for (Integer child : children) { + ProfileResult childNode = doGetQueryTree(child); + childrenProfileResults.add(childNode); + } + } + + // TODO this would be better done bottom-up instead of top-down to avoid + // calculating the same times over and over...but worth the effort? + long nodeTime = getNodeTime(timings, childrenProfileResults); + String queryDescription = query.getClass().getSimpleName(); + String luceneName = query.toString(); + return new ProfileResult(queryDescription, luceneName, timings, childrenProfileResults, nodeTime); + } + + public long getRewriteTime() { + return rewriteTime; + } + + /** + * Internal helper to add a child to the current parent node + * + * @param childToken The child to add to the current parent + */ + private void updateParent(int childToken) { + Integer parent = stack.peekLast(); + ArrayList<Integer> parentNode = tree.get(parent); + parentNode.add(childToken); + tree.set(parent, parentNode); + } + + /** + * Internal helper to calculate the time of a node, inclusive of children + * + * @param timings A map of breakdown timing for the node + * @param children All children profile results at this node + * @return The total time at this node, inclusive of children + */ + private static long getNodeTime(Map<String, Long> timings, List<ProfileResult> children) { + long nodeTime = 0; + for (long time : timings.values()) { + nodeTime += time; + } + + // Then add up our children + for (ProfileResult child : children) { + nodeTime += getNodeTime(child.getTimeBreakdown(), child.getProfiledChildren()); + } + return nodeTime; + } +} diff --git a/core/src/main/java/org/elasticsearch/search/profile/ProfileBreakdown.java b/core/src/main/java/org/elasticsearch/search/profile/ProfileBreakdown.java new file mode 100644 index 0000000000..55ad77b693 --- /dev/null +++ b/core/src/main/java/org/elasticsearch/search/profile/ProfileBreakdown.java @@ -0,0 +1,113 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.search.profile; + +import java.util.Collections; +import java.util.HashMap; +import java.util.Locale; +import java.util.Map; + +/** + * A record of timings for the various operations that may happen during query execution. + * A node's time may be composed of several internal attributes (rewriting, weighting, + * scoring, etc). + */ +public final class ProfileBreakdown { + + /** Enumeration of all supported timing types. */ + public enum TimingType { + CREATE_WEIGHT, + BUILD_SCORER, + NEXT_DOC, + ADVANCE, + MATCH, + SCORE; + + @Override + public String toString() { + return name().toLowerCase(Locale.ROOT); + } + } + + /** + * The accumulated timings for this query node + */ + private final long[] timings; + + /** Scrach to store the current timing type. */ + private TimingType currentTimingType; + + /** + * The temporary scratch space for holding start-times + */ + private long scratch; + + /** Sole constructor. */ + public ProfileBreakdown() { + timings = new long[TimingType.values().length]; + } + + /** + * Begin timing a query for a specific Timing context + * @param timing The timing context being profiled + */ + public void startTime(TimingType timing) { + assert currentTimingType == null; + assert scratch == 0; + currentTimingType = timing; + scratch = System.nanoTime(); + } + + /** + * Halt the timing process and save the elapsed time. + * startTime() must be called for a particular context prior to calling + * stopAndRecordTime(), otherwise the elapsed time will be negative and + * nonsensical + * + * @return The elapsed time + */ + public long stopAndRecordTime() { + long time = Math.max(1, System.nanoTime() - scratch); + timings[currentTimingType.ordinal()] += time; + currentTimingType = null; + scratch = 0L; + return time; + } + + /** Convert this record to a map from {@link TimingType} to times. */ + public Map<String, Long> toTimingMap() { + Map<String, Long> map = new HashMap<>(); + for (TimingType timingType : TimingType.values()) { + map.put(timingType.toString(), timings[timingType.ordinal()]); + } + return Collections.unmodifiableMap(map); + } + + /** + * Add <code>other</code>'s timings into this breakdown + * @param other Another Breakdown to merge with this one + */ + public void merge(ProfileBreakdown other) { + assert(timings.length == other.timings.length); + for (int i = 0; i < timings.length; ++i) { + timings[i] += other.timings[i]; + } + } +} diff --git a/core/src/main/java/org/elasticsearch/search/profile/ProfileCollector.java b/core/src/main/java/org/elasticsearch/search/profile/ProfileCollector.java new file mode 100644 index 0000000000..7d7538c911 --- /dev/null +++ b/core/src/main/java/org/elasticsearch/search/profile/ProfileCollector.java @@ -0,0 +1,94 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.search.profile; + +import org.apache.lucene.index.LeafReaderContext; +import org.apache.lucene.search.Collector; +import org.apache.lucene.search.FilterCollector; +import org.apache.lucene.search.FilterLeafCollector; +import org.apache.lucene.search.LeafCollector; +import org.apache.lucene.search.Scorer; + +import java.io.IOException; + +/** A collector that profiles how much time is spent calling it. */ +final class ProfileCollector extends FilterCollector { + + private long time; + + /** Sole constructor. */ + public ProfileCollector(Collector in) { + super(in); + } + + /** Return the wrapped collector. */ + public Collector getDelegate() { + return in; + } + + @Override + public boolean needsScores() { + final long start = System.nanoTime(); + try { + return super.needsScores(); + } finally { + time += Math.max(1, System.nanoTime() - start); + } + } + + @Override + public LeafCollector getLeafCollector(LeafReaderContext context) throws IOException { + final long start = System.nanoTime(); + final LeafCollector inLeafCollector; + try { + inLeafCollector = super.getLeafCollector(context); + } finally { + time += Math.max(1, System.nanoTime() - start); + } + return new FilterLeafCollector(inLeafCollector) { + + @Override + public void collect(int doc) throws IOException { + final long start = System.nanoTime(); + try { + super.collect(doc); + } finally { + time += Math.max(1, System.nanoTime() - start); + } + } + + @Override + public void setScorer(Scorer scorer) throws IOException { + final long start = System.nanoTime(); + try { + super.setScorer(scorer); + } finally { + time += Math.max(1, System.nanoTime() - start); + } + } + }; + } + + /** Return the total time spent on this collector. */ + public long getTime() { + return time; + } + +} diff --git a/core/src/main/java/org/elasticsearch/search/profile/ProfileResult.java b/core/src/main/java/org/elasticsearch/search/profile/ProfileResult.java new file mode 100644 index 0000000000..4c8752fdaf --- /dev/null +++ b/core/src/main/java/org/elasticsearch/search/profile/ProfileResult.java @@ -0,0 +1,165 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.search.profile; + +import org.elasticsearch.common.ParseField; +import org.elasticsearch.common.io.stream.StreamInput; +import org.elasticsearch.common.io.stream.StreamOutput; +import org.elasticsearch.common.io.stream.Writeable; +import org.elasticsearch.common.xcontent.ToXContent; +import org.elasticsearch.common.xcontent.XContentBuilder; + +import java.io.IOException; +import java.util.ArrayList; +import java.util.Collections; +import java.util.HashMap; +import java.util.List; +import java.util.Locale; +import java.util.Map; + +/** + * This class is the internal representation of a profiled Query, corresponding + * to a single node in the query tree. It is built after the query has finished executing + * and is merely a structured representation, rather than the entity that collects the timing + * profile (see InternalProfiler for that) + * + * Each InternalProfileResult has a List of InternalProfileResults, which will contain + * "children" queries if applicable + */ +final class ProfileResult implements Writeable<ProfileResult>, ToXContent { + + private static final ParseField QUERY_TYPE = new ParseField("query_type"); + private static final ParseField LUCENE_DESCRIPTION = new ParseField("lucene"); + private static final ParseField NODE_TIME = new ParseField("time"); + private static final ParseField CHILDREN = new ParseField("children"); + private static final ParseField BREAKDOWN = new ParseField("breakdown"); + + private final String queryType; + private final String luceneDescription; + private final Map<String, Long> timings; + private final long nodeTime; + private final List<ProfileResult> children; + + public ProfileResult(String queryType, String luceneDescription, Map<String, Long> timings, List<ProfileResult> children, long nodeTime) { + this.queryType = queryType; + this.luceneDescription = luceneDescription; + this.timings = timings; + this.children = children; + this.nodeTime = nodeTime; + } + + public ProfileResult(StreamInput in) throws IOException{ + this.queryType = in.readString(); + this.luceneDescription = in.readString(); + this.nodeTime = in.readLong(); + + int timingsSize = in.readVInt(); + this.timings = new HashMap<>(timingsSize); + for (int i = 0; i < timingsSize; ++i) { + timings.put(in.readString(), in.readLong()); + } + + int size = in.readVInt(); + this.children = new ArrayList<>(size); + + for (int i = 0; i < size; i++) { + children.add(new ProfileResult(in)); + } + } + + /** + * Retrieve the lucene description of this query (e.g. the "explain" text) + */ + public String getLuceneDescription() { + return luceneDescription; + } + + /** + * Retrieve the name of the query (e.g. "TermQuery") + */ + public String getQueryName() { + return queryType; + } + + /** + * Returns the timing breakdown for this particular query node + */ + public Map<String, Long> getTimeBreakdown() { + return Collections.unmodifiableMap(timings); + } + + /** + * Returns the total time (inclusive of children) for this query node. + * + * @return elapsed time in nanoseconds + */ + public long getTime() { + return nodeTime; + } + + /** + * Returns a list of all profiled children queries + */ + public List<ProfileResult> getProfiledChildren() { + return Collections.unmodifiableList(children); + } + + @Override + public ProfileResult readFrom(StreamInput in) throws IOException { + return new ProfileResult(in); + } + + @Override + public void writeTo(StreamOutput out) throws IOException { + out.writeString(queryType); + out.writeString(luceneDescription); + out.writeLong(nodeTime); // not Vlong because can be negative + out.writeVInt(timings.size()); + for (Map.Entry<String, Long> entry : timings.entrySet()) { + out.writeString(entry.getKey()); + out.writeLong(entry.getValue()); + } + out.writeVInt(children.size()); + for (ProfileResult child : children) { + child.writeTo(out); + } + } + + @Override + public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { + builder = builder.startObject() + .field(QUERY_TYPE.getPreferredName(), queryType) + .field(LUCENE_DESCRIPTION.getPreferredName(), luceneDescription) + .field(NODE_TIME.getPreferredName(), String.format(Locale.US, "%.10gms", (double)(getTime() / 1000000.0))) + .field(BREAKDOWN.getPreferredName(), timings); + + if (!children.isEmpty()) { + builder = builder.startArray(CHILDREN.getPreferredName()); + for (ProfileResult child : children) { + builder = child.toXContent(builder, params); + } + builder = builder.endArray(); + } + + builder = builder.endObject(); + return builder; + } + +} diff --git a/core/src/main/java/org/elasticsearch/search/profile/ProfileScorer.java b/core/src/main/java/org/elasticsearch/search/profile/ProfileScorer.java new file mode 100644 index 0000000000..b0dc6f2cd4 --- /dev/null +++ b/core/src/main/java/org/elasticsearch/search/profile/ProfileScorer.java @@ -0,0 +1,158 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.search.profile; + +import org.apache.lucene.search.DocIdSetIterator; +import org.apache.lucene.search.Scorer; +import org.apache.lucene.search.TwoPhaseIterator; +import org.apache.lucene.search.Weight; + +import java.io.IOException; +import java.util.Collection; + +/** + * {@link Scorer} wrapper that will compute how much time is spent on moving + * the iterator, confirming matches and computing scores. + */ +final class ProfileScorer extends Scorer { + + private final Scorer scorer; + private ProfileWeight profileWeight; + private final ProfileBreakdown profile; + + ProfileScorer(ProfileWeight w, Scorer scorer, ProfileBreakdown profile) throws IOException { + super(w); + this.scorer = scorer; + this.profileWeight = w; + this.profile = profile; + } + + @Override + public int docID() { + return scorer.docID(); + } + + @Override + public int advance(int target) throws IOException { + profile.startTime(ProfileBreakdown.TimingType.ADVANCE); + try { + return scorer.advance(target); + } finally { + profile.stopAndRecordTime(); + } + } + + @Override + public int nextDoc() throws IOException { + profile.startTime(ProfileBreakdown.TimingType.NEXT_DOC); + try { + return scorer.nextDoc(); + } finally { + profile.stopAndRecordTime(); + } + } + + @Override + public float score() throws IOException { + profile.startTime(ProfileBreakdown.TimingType.SCORE); + try { + return scorer.score(); + } finally { + profile.stopAndRecordTime(); + } + } + + @Override + public int freq() throws IOException { + return scorer.freq(); + } + + @Override + public long cost() { + return scorer.cost(); + } + + @Override + public Weight getWeight() { + return profileWeight; + } + + @Override + public Collection<ChildScorer> getChildren() { + return scorer.getChildren(); + } + + @Override + public TwoPhaseIterator asTwoPhaseIterator() { + final TwoPhaseIterator in = scorer.asTwoPhaseIterator(); + if (in == null) { + return null; + } + final DocIdSetIterator inApproximation = in.approximation(); + final DocIdSetIterator approximation = new DocIdSetIterator() { + + @Override + public int advance(int target) throws IOException { + profile.startTime(ProfileBreakdown.TimingType.ADVANCE); + try { + return inApproximation.advance(target); + } finally { + profile.stopAndRecordTime(); + } + } + + @Override + public int nextDoc() throws IOException { + profile.startTime(ProfileBreakdown.TimingType.NEXT_DOC); + try { + return inApproximation.nextDoc(); + } finally { + profile.stopAndRecordTime(); + } + } + + @Override + public int docID() { + return inApproximation.docID(); + } + + @Override + public long cost() { + return inApproximation.cost(); + } + }; + return new TwoPhaseIterator(approximation) { + @Override + public boolean matches() throws IOException { + profile.startTime(ProfileBreakdown.TimingType.MATCH); + try { + return in.matches(); + } finally { + profile.stopAndRecordTime(); + } + } + + @Override + public float matchCost() { + return in.matchCost(); + } + }; + } +} diff --git a/core/src/main/java/org/elasticsearch/search/profile/ProfileShardResult.java b/core/src/main/java/org/elasticsearch/search/profile/ProfileShardResult.java new file mode 100644 index 0000000000..6e005babb3 --- /dev/null +++ b/core/src/main/java/org/elasticsearch/search/profile/ProfileShardResult.java @@ -0,0 +1,103 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.search.profile; + +import org.elasticsearch.common.io.stream.StreamInput; +import org.elasticsearch.common.io.stream.StreamOutput; +import org.elasticsearch.common.io.stream.Writeable; +import org.elasticsearch.common.xcontent.ToXContent; +import org.elasticsearch.common.xcontent.XContentBuilder; + +import java.io.IOException; +import java.util.*; + +/** + * A container class to hold the profile results for a single shard in the request. + * Contains a list of query profiles, a collector tree and a total rewrite tree. + */ +public final class ProfileShardResult implements Writeable<ProfileShardResult>, ToXContent { + + private final List<ProfileResult> profileResults; + + private final CollectorResult profileCollector; + + private final long rewriteTime; + + public ProfileShardResult(List<ProfileResult> profileResults, long rewriteTime, + CollectorResult profileCollector) { + assert(profileCollector != null); + this.profileResults = profileResults; + this.profileCollector = profileCollector; + this.rewriteTime = rewriteTime; + } + + public ProfileShardResult(StreamInput in) throws IOException { + int profileSize = in.readVInt(); + profileResults = new ArrayList<>(profileSize); + for (int j = 0; j < profileSize; j++) { + profileResults.add(new ProfileResult(in)); + } + + profileCollector = new CollectorResult(in); + rewriteTime = in.readLong(); + } + + public List<ProfileResult> getQueryResults() { + return Collections.unmodifiableList(profileResults); + } + + public long getRewriteTime() { + return rewriteTime; + } + + public CollectorResult getCollectorResult() { + return profileCollector; + } + + @Override + public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { + builder.startArray("query"); + for (ProfileResult p : profileResults) { + p.toXContent(builder, params); + } + builder.endArray(); + builder.field("rewrite_time", rewriteTime); + builder.startArray("collector"); + profileCollector.toXContent(builder, params); + builder.endArray(); + return builder; + } + + @Override + public ProfileShardResult readFrom(StreamInput in) throws IOException { + return new ProfileShardResult(in); + } + + @Override + public void writeTo(StreamOutput out) throws IOException { + out.writeVInt(profileResults.size()); + for (ProfileResult p : profileResults) { + p.writeTo(out); + } + profileCollector.writeTo(out); + out.writeLong(rewriteTime); + } + +} diff --git a/core/src/main/java/org/elasticsearch/search/profile/ProfileWeight.java b/core/src/main/java/org/elasticsearch/search/profile/ProfileWeight.java new file mode 100644 index 0000000000..1ce5cd721f --- /dev/null +++ b/core/src/main/java/org/elasticsearch/search/profile/ProfileWeight.java @@ -0,0 +1,97 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.search.profile; + +import org.apache.lucene.index.LeafReaderContext; +import org.apache.lucene.index.Term; +import org.apache.lucene.search.BulkScorer; +import org.apache.lucene.search.Explanation; +import org.apache.lucene.search.Query; +import org.apache.lucene.search.Scorer; +import org.apache.lucene.search.Weight; + +import java.io.IOException; +import java.util.Set; + +/** + * Weight wrapper that will compute how much time it takes to build the + * {@link Scorer} and then return a {@link Scorer} that is wrapped in + * order to compute timings as well. + */ +public final class ProfileWeight extends Weight { + + private final Weight subQueryWeight; + private final ProfileBreakdown profile; + + public ProfileWeight(Query query, Weight subQueryWeight, ProfileBreakdown profile) throws IOException { + super(query); + this.subQueryWeight = subQueryWeight; + this.profile = profile; + } + + @Override + public Scorer scorer(LeafReaderContext context) throws IOException { + profile.startTime(ProfileBreakdown.TimingType.BUILD_SCORER); + final Scorer subQueryScorer; + try { + subQueryScorer = subQueryWeight.scorer(context); + } finally { + profile.stopAndRecordTime(); + } + if (subQueryScorer == null) { + return null; + } + + return new ProfileScorer(this, subQueryScorer, profile); + } + + @Override + public BulkScorer bulkScorer(LeafReaderContext context) throws IOException { + // We use the default bulk scorer instead of the specialized one. The reason + // is that Lucene's BulkScorers do everything at once: finding matches, + // scoring them and calling the collector, so they make it impossible to + // see where time is spent, which is the purpose of query profiling. + // The default bulk scorer will pull a scorer and iterate over matches, + // this might be a significantly different execution path for some queries + // like disjunctions, but in general this is what is done anyway + return super.bulkScorer(context); + } + + @Override + public Explanation explain(LeafReaderContext context, int doc) throws IOException { + return subQueryWeight.explain(context, doc); + } + + @Override + public float getValueForNormalization() throws IOException { + return subQueryWeight.getValueForNormalization(); + } + + @Override + public void normalize(float norm, float topLevelBoost) { + subQueryWeight.normalize(norm, topLevelBoost); + } + + @Override + public void extractTerms(Set<Term> set) { + subQueryWeight.extractTerms(set); + } + +} diff --git a/core/src/main/java/org/elasticsearch/search/profile/Profiler.java b/core/src/main/java/org/elasticsearch/search/profile/Profiler.java new file mode 100644 index 0000000000..bf0c9ec01b --- /dev/null +++ b/core/src/main/java/org/elasticsearch/search/profile/Profiler.java @@ -0,0 +1,130 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.search.profile; + +import org.apache.lucene.search.Query; + +import java.util.*; + +/** + * This class acts as a thread-local storage for profiling a query. It also + * builds a representation of the query tree which is built constructed + * "online" as the weights are wrapped by ContextIndexSearcher. This allows us + * to know the relationship between nodes in tree without explicitly + * walking the tree or pre-wrapping everything + * + * A Profiler is associated with every Search, not per Search-Request. E.g. a + * request may execute two searches (query + global agg). A Profiler just + * represents one of those + */ +public final class Profiler { + + private final InternalProfileTree queryTree = new InternalProfileTree(); + + /** + * The root Collector used in the search + */ + private InternalProfileCollector collector; + + public Profiler() {} + + /** Set the collector that is associated with this profiler. */ + public void setCollector(InternalProfileCollector collector) { + if (this.collector != null) { + throw new IllegalStateException("The collector can only be set once."); + } + this.collector = Objects.requireNonNull(collector); + } + + /** + * Get the {@link ProfileBreakdown} for the given query, potentially creating it if it did not exist. + * This should only be used for queries that will be undergoing scoring. Do not use it to profile the + * rewriting phase + */ + public ProfileBreakdown getQueryBreakdown(Query query) { + return queryTree.getQueryBreakdown(query); + } + + /** + * Begin timing the rewrite phase of a request. All rewrites are accumulated together into a + * single metric + */ + public void startRewriteTime() { + queryTree.startRewriteTime(); + } + + /** + * Stop recording the current rewrite and add it's time to the total tally, returning the + * cumulative time so far. + * + * @return cumulative rewrite time + */ + public long stopAndAddRewriteTime() { + return queryTree.stopAndAddRewriteTime(); + } + + /** + * Removes the last (e.g. most recent) query on the stack. This should only be called for scoring + * queries, not rewritten queries + */ + public void pollLastQuery() { + queryTree.pollLast(); + } + + /** + * @return a hierarchical representation of the profiled query tree + */ + public List<ProfileResult> getQueryTree() { + return queryTree.getQueryTree(); + } + + /** + * @return total time taken to rewrite all queries in this profile + */ + public long getRewriteTime() { + return queryTree.getRewriteTime(); + } + + /** + * Return the current root Collector for this search + */ + public CollectorResult getCollector() { + return collector.getCollectorTree(); + } + + /** + * Helper method to convert Profiler into InternalProfileShardResults, which can be + * serialized to other nodes, emitted as JSON, etc. + * + * @param profilers A list of Profilers to convert into InternalProfileShardResults + * @return A list of corresponding InternalProfileShardResults + */ + public static List<ProfileShardResult> buildShardResults(List<Profiler> profilers) { + List<ProfileShardResult> results = new ArrayList<>(profilers.size()); + for (Profiler profiler : profilers) { + ProfileShardResult result = new ProfileShardResult( + profiler.getQueryTree(), profiler.getRewriteTime(), profiler.getCollector()); + results.add(result); + } + return results; + } + + +} diff --git a/core/src/main/java/org/elasticsearch/search/profile/Profilers.java b/core/src/main/java/org/elasticsearch/search/profile/Profilers.java new file mode 100644 index 0000000000..0fb7d9ac1c --- /dev/null +++ b/core/src/main/java/org/elasticsearch/search/profile/Profilers.java @@ -0,0 +1,59 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.search.profile; + +import org.elasticsearch.search.internal.ContextIndexSearcher; + +import java.util.ArrayList; +import java.util.Collections; +import java.util.List; + +/** Wrapper around several {@link Profiler}s that makes management easier. */ +public final class Profilers { + + private final ContextIndexSearcher searcher; + private final List<Profiler> profilers; + + /** Sole constructor. This {@link Profilers} instance will initiall wrap one {@link Profiler}. */ + public Profilers(ContextIndexSearcher searcher) { + this.searcher = searcher; + this.profilers = new ArrayList<>(); + addProfiler(); + } + + /** Switch to a new profile. */ + public Profiler addProfiler() { + Profiler profiler = new Profiler(); + searcher.setProfiler(profiler); + profilers.add(profiler); + return profiler; + } + + /** Get the current profiler. */ + public Profiler getCurrent() { + return profilers.get(profilers.size() - 1); + } + + /** Return the list of all created {@link Profiler}s so far. */ + public List<Profiler> getProfilers() { + return Collections.unmodifiableList(profilers); + } + +} diff --git a/core/src/main/java/org/elasticsearch/search/query/QueryPhase.java b/core/src/main/java/org/elasticsearch/search/query/QueryPhase.java index ce8836cd33..08ff849871 100644 --- a/core/src/main/java/org/elasticsearch/search/query/QueryPhase.java +++ b/core/src/main/java/org/elasticsearch/search/query/QueryPhase.java @@ -52,13 +52,16 @@ import org.elasticsearch.search.SearchService; import org.elasticsearch.search.aggregations.AggregationPhase; import org.elasticsearch.search.internal.ScrollContext; import org.elasticsearch.search.internal.SearchContext; +import org.elasticsearch.search.profile.*; import org.elasticsearch.search.rescore.RescorePhase; import org.elasticsearch.search.rescore.RescoreSearchContext; import org.elasticsearch.search.sort.SortParseElement; import org.elasticsearch.search.sort.TrackScoresParseElement; import org.elasticsearch.search.suggest.SuggestPhase; +import java.util.AbstractList; import java.util.ArrayList; +import java.util.Collections; import java.util.HashMap; import java.util.List; import java.util.Map; @@ -124,6 +127,11 @@ public class QueryPhase implements SearchPhase { } suggestPhase.execute(searchContext); aggregationPhase.execute(searchContext); + + if (searchContext.getProfilers() != null) { + List<ProfileShardResult> shardResults = Profiler.buildShardResults(searchContext.getProfilers().getProfilers()); + searchContext.queryResult().profileResults(shardResults); + } } private static boolean returnsDocsInOrder(Query query, Sort sort) { @@ -147,6 +155,7 @@ public class QueryPhase implements SearchPhase { QuerySearchResult queryResult = searchContext.queryResult(); queryResult.searchTimedOut(false); + final boolean doProfile = searchContext.getProfilers() != null; final SearchType searchType = searchContext.searchType(); boolean rescore = false; try { @@ -162,9 +171,13 @@ public class QueryPhase implements SearchPhase { Callable<TopDocs> topDocsCallable; assert query == searcher.rewrite(query); // already rewritten + if (searchContext.size() == 0) { // no matter what the value of from is final TotalHitCountCollector totalHitCountCollector = new TotalHitCountCollector(); collector = totalHitCountCollector; + if (searchContext.getProfilers() != null) { + collector = new InternalProfileCollector(collector, CollectorResult.REASON_SEARCH_COUNT, Collections.emptyList()); + } topDocsCallable = new Callable<TopDocs>() { @Override public TopDocs call() throws Exception { @@ -219,6 +232,9 @@ public class QueryPhase implements SearchPhase { topDocsCollector = TopScoreDocCollector.create(numDocs, lastEmittedDoc); } collector = topDocsCollector; + if (doProfile) { + collector = new InternalProfileCollector(collector, CollectorResult.REASON_SEARCH_TOP_HITS, Collections.emptyList()); + } topDocsCallable = new Callable<TopDocs>() { @Override public TopDocs call() throws Exception { @@ -254,27 +270,57 @@ public class QueryPhase implements SearchPhase { final boolean terminateAfterSet = searchContext.terminateAfter() != SearchContext.DEFAULT_TERMINATE_AFTER; if (terminateAfterSet) { + final Collector child = collector; // throws Lucene.EarlyTerminationException when given count is reached collector = Lucene.wrapCountBasedEarlyTerminatingCollector(collector, searchContext.terminateAfter()); + if (doProfile) { + collector = new InternalProfileCollector(collector, CollectorResult.REASON_SEARCH_TERMINATE_AFTER_COUNT, + Collections.singletonList((InternalProfileCollector) child)); + } } if (searchContext.parsedPostFilter() != null) { + final Collector child = collector; // this will only get applied to the actual search collector and not // to any scoped collectors, also, it will only be applied to the main collector // since that is where the filter should only work final Weight filterWeight = searcher.createNormalizedWeight(searchContext.parsedPostFilter().query(), false); collector = new FilteredCollector(collector, filterWeight); + if (doProfile) { + collector = new InternalProfileCollector(collector, CollectorResult.REASON_SEARCH_POST_FILTER, + Collections.singletonList((InternalProfileCollector) child)); + } } // plug in additional collectors, like aggregations - List<Collector> allCollectors = new ArrayList<>(); - allCollectors.add(collector); - allCollectors.addAll(searchContext.queryCollectors().values()); - collector = MultiCollector.wrap(allCollectors); + final List<Collector> subCollectors = new ArrayList<>(); + subCollectors.add(collector); + subCollectors.addAll(searchContext.queryCollectors().values()); + collector = MultiCollector.wrap(subCollectors); + if (doProfile && collector instanceof InternalProfileCollector == false) { + // When there is a single collector to wrap, MultiCollector returns it + // directly, so only wrap in the case that there are several sub collectors + final List<InternalProfileCollector> children = new AbstractList<InternalProfileCollector>() { + @Override + public InternalProfileCollector get(int index) { + return (InternalProfileCollector) subCollectors.get(index); + } + @Override + public int size() { + return subCollectors.size(); + } + }; + collector = new InternalProfileCollector(collector, CollectorResult.REASON_SEARCH_MULTI, children); + } // apply the minimum score after multi collector so we filter aggs as well if (searchContext.minimumScore() != null) { + final Collector child = collector; collector = new MinimumScoreCollector(collector, searchContext.minimumScore()); + if (doProfile) { + collector = new InternalProfileCollector(collector, CollectorResult.REASON_SEARCH_MIN_SCORE, + Collections.singletonList((InternalProfileCollector) child)); + } } if (collector.getClass() == TotalHitCountCollector.class) { @@ -319,13 +365,21 @@ public class QueryPhase implements SearchPhase { final boolean timeoutSet = searchContext.timeoutInMillis() != SearchService.NO_TIMEOUT.millis(); if (timeoutSet && collector != null) { // collector might be null if no collection is actually needed + final Collector child = collector; // TODO: change to use our own counter that uses the scheduler in ThreadPool // throws TimeLimitingCollector.TimeExceededException when timeout has reached collector = Lucene.wrapTimeLimitingCollector(collector, searchContext.timeEstimateCounter(), searchContext.timeoutInMillis()); + if (doProfile) { + collector = new InternalProfileCollector(collector, CollectorResult.REASON_SEARCH_TIMEOUT, + Collections.singletonList((InternalProfileCollector) child)); + } } try { if (collector != null) { + if (doProfile) { + searchContext.getProfilers().getCurrent().setCollector((InternalProfileCollector) collector); + } searcher.search(query, collector); } } catch (TimeLimitingCollector.TimeExceededException e) { @@ -343,7 +397,13 @@ public class QueryPhase implements SearchPhase { queryResult.topDocs(topDocsCallable.call()); + if (searchContext.getProfilers() != null) { + List<ProfileShardResult> shardResults = Profiler.buildShardResults(searchContext.getProfilers().getProfilers()); + searchContext.queryResult().profileResults(shardResults); + } + return rescore; + } catch (Throwable e) { throw new QueryPhaseExecutionException(searchContext, "Failed to execute main query", e); } diff --git a/core/src/main/java/org/elasticsearch/search/query/QuerySearchResult.java b/core/src/main/java/org/elasticsearch/search/query/QuerySearchResult.java index 7f8d12a9c9..9223eb5a82 100644 --- a/core/src/main/java/org/elasticsearch/search/query/QuerySearchResult.java +++ b/core/src/main/java/org/elasticsearch/search/query/QuerySearchResult.java @@ -20,6 +20,8 @@ package org.elasticsearch.search.query; import org.apache.lucene.search.TopDocs; +import org.elasticsearch.Version; +import org.elasticsearch.common.Nullable; import org.elasticsearch.common.bytes.BytesReference; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; @@ -29,6 +31,7 @@ import org.elasticsearch.search.aggregations.InternalAggregations; import org.elasticsearch.search.aggregations.pipeline.PipelineAggregator; import org.elasticsearch.search.aggregations.pipeline.PipelineAggregatorStreams; import org.elasticsearch.search.aggregations.pipeline.SiblingPipelineAggregator; +import org.elasticsearch.search.profile.ProfileShardResult; import org.elasticsearch.search.suggest.Suggest; import java.io.IOException; @@ -53,6 +56,7 @@ public class QuerySearchResult extends QuerySearchResultProvider { private Suggest suggest; private boolean searchTimedOut; private Boolean terminatedEarly = null; + private List<ProfileShardResult> profileShardResults; public QuerySearchResult() { @@ -120,6 +124,22 @@ public class QuerySearchResult extends QuerySearchResultProvider { this.aggregations = aggregations; } + /** + * Returns the profiled results for this search, or potentially null if result was empty + * @return The profiled results, or null + */ + public @Nullable List<ProfileShardResult> profileResults() { + return profileShardResults; + } + + /** + * Sets the finalized profiling results for this query + * @param shardResults The finalized profile + */ + public void profileResults(List<ProfileShardResult> shardResults) { + this.profileShardResults = shardResults; + } + public List<SiblingPipelineAggregator> pipelineAggregators() { return pipelineAggregators; } @@ -191,6 +211,15 @@ public class QuerySearchResult extends QuerySearchResultProvider { } searchTimedOut = in.readBoolean(); terminatedEarly = in.readOptionalBoolean(); + + if (in.getVersion().onOrAfter(Version.V_2_2_0) && in.readBoolean()) { + int profileSize = in.readVInt(); + profileShardResults = new ArrayList<>(profileSize); + for (int i = 0; i < profileSize; i++) { + ProfileShardResult result = new ProfileShardResult(in); + profileShardResults.add(result); + } + } } @Override @@ -229,5 +258,17 @@ public class QuerySearchResult extends QuerySearchResultProvider { } out.writeBoolean(searchTimedOut); out.writeOptionalBoolean(terminatedEarly); + + if (out.getVersion().onOrAfter(Version.V_2_2_0)) { + if (profileShardResults == null) { + out.writeBoolean(false); + } else { + out.writeBoolean(true); + out.writeVInt(profileShardResults.size()); + for (ProfileShardResult shardResult : profileShardResults) { + shardResult.writeTo(out); + } + } + } } } diff --git a/core/src/main/java/org/elasticsearch/search/sort/ScriptSortParser.java b/core/src/main/java/org/elasticsearch/search/sort/ScriptSortParser.java index c465eaf6ef..e4fe2c08f7 100644 --- a/core/src/main/java/org/elasticsearch/search/sort/ScriptSortParser.java +++ b/core/src/main/java/org/elasticsearch/search/sort/ScriptSortParser.java @@ -50,6 +50,7 @@ import org.elasticsearch.search.SearchParseException; import org.elasticsearch.search.internal.SearchContext; import java.io.IOException; +import java.util.Collections; import java.util.HashMap; import java.util.Map; @@ -130,7 +131,7 @@ public class ScriptSortParser implements SortParser { if (type == null) { throw new SearchParseException(context, "_script sorting requires setting the type of the script", parser.getTokenLocation()); } - final SearchScript searchScript = context.scriptService().search(context.lookup(), script, ScriptContext.Standard.SEARCH); + final SearchScript searchScript = context.scriptService().search(context.lookup(), script, ScriptContext.Standard.SEARCH, Collections.emptyMap()); if (STRING_SORT_TYPE.equals(type) && (sortMode == MultiValueMode.SUM || sortMode == MultiValueMode.AVG)) { throw new SearchParseException(context, "type [string] doesn't support mode [" + sortMode + "]", parser.getTokenLocation()); diff --git a/core/src/main/java/org/elasticsearch/search/suggest/completion/CompletionSuggester.java b/core/src/main/java/org/elasticsearch/search/suggest/completion/CompletionSuggester.java index 106672ae7a..6a0155ffb7 100644 --- a/core/src/main/java/org/elasticsearch/search/suggest/completion/CompletionSuggester.java +++ b/core/src/main/java/org/elasticsearch/search/suggest/completion/CompletionSuggester.java @@ -30,7 +30,7 @@ import org.apache.lucene.search.suggest.document.TopSuggestDocs; import org.apache.lucene.search.suggest.document.TopSuggestDocsCollector; import org.apache.lucene.util.*; import org.apache.lucene.util.PriorityQueue; -import org.elasticsearch.common.text.StringText; +import org.elasticsearch.common.text.Text; import org.elasticsearch.index.fielddata.AtomicFieldData; import org.elasticsearch.index.fielddata.ScriptDocValues; import org.elasticsearch.index.mapper.MappedFieldType; @@ -57,7 +57,7 @@ public class CompletionSuggester extends Suggester<CompletionSuggestionContext> } CompletionSuggestion completionSuggestion = new CompletionSuggestion(name, suggestionContext.getSize()); spare.copyUTF8Bytes(suggestionContext.getText()); - CompletionSuggestion.Entry completionSuggestEntry = new CompletionSuggestion.Entry(new StringText(spare.toString()), 0, spare.length()); + CompletionSuggestion.Entry completionSuggestEntry = new CompletionSuggestion.Entry(new Text(spare.toString()), 0, spare.length()); completionSuggestion.addTerm(completionSuggestEntry); TopSuggestDocsCollector collector = new TopDocumentsCollector(suggestionContext.getSize()); suggest(searcher, suggestionContext.toQuery(), collector); @@ -91,7 +91,7 @@ public class CompletionSuggester extends Suggester<CompletionSuggestionContext> } if (numResult++ < suggestionContext.getSize()) { CompletionSuggestion.Entry.Option option = new CompletionSuggestion.Entry.Option( - new StringText(suggestDoc.key.toString()), suggestDoc.score, contexts, payload); + new Text(suggestDoc.key.toString()), suggestDoc.score, contexts, payload); completionSuggestEntry.addOption(option); } else { break; diff --git a/core/src/main/java/org/elasticsearch/search/suggest/phrase/PhraseSuggestParser.java b/core/src/main/java/org/elasticsearch/search/suggest/phrase/PhraseSuggestParser.java index 4bbdaf9c49..9b083a9178 100644 --- a/core/src/main/java/org/elasticsearch/search/suggest/phrase/PhraseSuggestParser.java +++ b/core/src/main/java/org/elasticsearch/search/suggest/phrase/PhraseSuggestParser.java @@ -39,6 +39,7 @@ import org.elasticsearch.search.suggest.SuggestionSearchContext; import org.elasticsearch.search.suggest.phrase.PhraseSuggestionContext.DirectCandidateGenerator; import java.io.IOException; +import java.util.Collections; public final class PhraseSuggestParser implements SuggestContextParser { @@ -143,7 +144,7 @@ public final class PhraseSuggestParser implements SuggestContextParser { } Template template = Template.parse(parser, parseFieldMatcher); CompiledScript compiledScript = suggester.scriptService().compile(template, ScriptContext.Standard.SEARCH, - headersContext); + headersContext, Collections.emptyMap()); suggestion.setCollateQueryScript(compiledScript); } else if ("params".equals(fieldName)) { suggestion.setCollateScriptParams(parser.map()); diff --git a/core/src/main/java/org/elasticsearch/search/suggest/phrase/PhraseSuggester.java b/core/src/main/java/org/elasticsearch/search/suggest/phrase/PhraseSuggester.java index fccf9ebc30..c7fa6fae30 100644 --- a/core/src/main/java/org/elasticsearch/search/suggest/phrase/PhraseSuggester.java +++ b/core/src/main/java/org/elasticsearch/search/suggest/phrase/PhraseSuggester.java @@ -30,7 +30,6 @@ import org.apache.lucene.util.BytesRefBuilder; import org.apache.lucene.util.CharsRefBuilder; import org.elasticsearch.common.bytes.BytesReference; import org.elasticsearch.common.lucene.Lucene; -import org.elasticsearch.common.text.StringText; import org.elasticsearch.common.text.Text; import org.elasticsearch.index.IndexService; import org.elasticsearch.index.query.ParsedQuery; @@ -127,11 +126,11 @@ public final class PhraseSuggester extends Suggester<PhraseSuggestionContext> { if (!collateMatch && !collatePrune) { continue; } - Text phrase = new StringText(spare.toString()); + Text phrase = new Text(spare.toString()); Text highlighted = null; if (suggestion.getPreTag() != null) { spare.copyUTF8Bytes(correction.join(SEPARATOR, byteSpare, suggestion.getPreTag(), suggestion.getPostTag())); - highlighted = new StringText(spare.toString()); + highlighted = new Text(spare.toString()); } if (collatePrune) { resultEntry.addOption(new Suggestion.Entry.Option(phrase, highlighted, (float) (correction.score), collateMatch)); @@ -147,7 +146,7 @@ public final class PhraseSuggester extends Suggester<PhraseSuggestionContext> { private PhraseSuggestion.Entry buildResultEntry(PhraseSuggestionContext suggestion, CharsRefBuilder spare, double cutoffScore) { spare.copyUTF8Bytes(suggestion.getText()); - return new PhraseSuggestion.Entry(new StringText(spare.toString()), 0, spare.length(), cutoffScore); + return new PhraseSuggestion.Entry(new Text(spare.toString()), 0, spare.length(), cutoffScore); } ScriptService scriptService() { diff --git a/core/src/main/java/org/elasticsearch/search/suggest/term/TermSuggester.java b/core/src/main/java/org/elasticsearch/search/suggest/term/TermSuggester.java index 4c1b176c99..34cd3ad4d5 100644 --- a/core/src/main/java/org/elasticsearch/search/suggest/term/TermSuggester.java +++ b/core/src/main/java/org/elasticsearch/search/suggest/term/TermSuggester.java @@ -27,8 +27,6 @@ import org.apache.lucene.util.BytesRef; import org.apache.lucene.util.BytesRefBuilder; import org.apache.lucene.util.CharsRefBuilder; import org.elasticsearch.common.bytes.BytesArray; -import org.elasticsearch.common.text.BytesText; -import org.elasticsearch.common.text.StringText; import org.elasticsearch.common.text.Text; import org.elasticsearch.search.suggest.SuggestContextParser; import org.elasticsearch.search.suggest.SuggestUtils; @@ -54,10 +52,10 @@ public final class TermSuggester extends Suggester<TermSuggestionContext> { SuggestWord[] suggestedWords = directSpellChecker.suggestSimilar( token.term, suggestion.getShardSize(), indexReader, suggestion.getDirectSpellCheckerSettings().suggestMode() ); - Text key = new BytesText(new BytesArray(token.term.bytes())); + Text key = new Text(new BytesArray(token.term.bytes())); TermSuggestion.Entry resultEntry = new TermSuggestion.Entry(key, token.startOffset, token.endOffset - token.startOffset); for (SuggestWord suggestWord : suggestedWords) { - Text word = new StringText(suggestWord.string); + Text word = new Text(suggestWord.string); resultEntry.addOption(new TermSuggestion.Entry.Option(word, suggestWord.freq, suggestWord.score)); } response.addTerm(resultEntry); diff --git a/core/src/main/java/org/elasticsearch/snapshots/RestoreService.java b/core/src/main/java/org/elasticsearch/snapshots/RestoreService.java index cd710d52cd..14b2680d25 100644 --- a/core/src/main/java/org/elasticsearch/snapshots/RestoreService.java +++ b/core/src/main/java/org/elasticsearch/snapshots/RestoreService.java @@ -33,8 +33,7 @@ import org.elasticsearch.cluster.node.DiscoveryNode; import org.elasticsearch.cluster.routing.*; import org.elasticsearch.cluster.routing.allocation.AllocationService; import org.elasticsearch.cluster.routing.allocation.RoutingAllocation; -import org.elasticsearch.cluster.settings.ClusterDynamicSettings; -import org.elasticsearch.cluster.settings.DynamicSettings; +import org.elasticsearch.common.settings.ClusterSettings; import org.elasticsearch.common.Nullable; import org.elasticsearch.common.Strings; import org.elasticsearch.common.collect.ImmutableOpenMap; @@ -118,18 +117,19 @@ public class RestoreService extends AbstractComponent implements ClusterStateLis private final MetaDataCreateIndexService createIndexService; - private final DynamicSettings dynamicSettings; + private final ClusterSettings dynamicSettings; private final MetaDataIndexUpgradeService metaDataIndexUpgradeService; private final CopyOnWriteArrayList<ActionListener<RestoreCompletionResponse>> listeners = new CopyOnWriteArrayList<>(); private final BlockingQueue<UpdateIndexShardRestoreStatusRequest> updatedSnapshotStateQueue = ConcurrentCollections.newBlockingQueue(); + private final ClusterSettings clusterSettings; @Inject public RestoreService(Settings settings, ClusterService clusterService, RepositoriesService repositoriesService, TransportService transportService, - AllocationService allocationService, MetaDataCreateIndexService createIndexService, @ClusterDynamicSettings DynamicSettings dynamicSettings, - MetaDataIndexUpgradeService metaDataIndexUpgradeService) { + AllocationService allocationService, MetaDataCreateIndexService createIndexService, ClusterSettings dynamicSettings, + MetaDataIndexUpgradeService metaDataIndexUpgradeService, ClusterSettings clusterSettings) { super(settings); this.clusterService = clusterService; this.repositoriesService = repositoriesService; @@ -140,6 +140,7 @@ public class RestoreService extends AbstractComponent implements ClusterStateLis this.metaDataIndexUpgradeService = metaDataIndexUpgradeService; transportService.registerRequestHandler(UPDATE_RESTORE_ACTION_NAME, UpdateIndexShardRestoreStatusRequest::new, ThreadPool.Names.SAME, new UpdateRestoreStateRequestHandler()); clusterService.add(this); + this.clusterSettings = clusterSettings; } /** @@ -389,24 +390,9 @@ public class RestoreService extends AbstractComponent implements ClusterStateLis private void restoreGlobalStateIfRequested(MetaData.Builder mdBuilder) { if (request.includeGlobalState()) { if (metaData.persistentSettings() != null) { - boolean changed = false; - Settings.Builder persistentSettings = Settings.settingsBuilder().put(); - for (Map.Entry<String, String> entry : metaData.persistentSettings().getAsMap().entrySet()) { - if (dynamicSettings.isDynamicOrLoggingSetting(entry.getKey())) { - String error = dynamicSettings.validateDynamicSetting(entry.getKey(), entry.getValue(), clusterService.state()); - if (error == null) { - persistentSettings.put(entry.getKey(), entry.getValue()); - changed = true; - } else { - logger.warn("ignoring persistent setting [{}], [{}]", entry.getKey(), error); - } - } else { - logger.warn("ignoring persistent setting [{}], not dynamically updateable", entry.getKey()); - } - } - if (changed) { - mdBuilder.persistentSettings(persistentSettings.build()); - } + Settings settings = metaData.persistentSettings(); + clusterSettings.dryRun(settings); + mdBuilder.persistentSettings(settings); } if (metaData.templates() != null) { // TODO: Should all existing templates be deleted first? diff --git a/core/src/main/java/org/elasticsearch/threadpool/ThreadPool.java b/core/src/main/java/org/elasticsearch/threadpool/ThreadPool.java index b0d81279b0..56e02926ed 100644 --- a/core/src/main/java/org/elasticsearch/threadpool/ThreadPool.java +++ b/core/src/main/java/org/elasticsearch/threadpool/ThreadPool.java @@ -20,13 +20,13 @@ package org.elasticsearch.threadpool; import org.apache.lucene.util.Counter; -import org.elasticsearch.cluster.ClusterState; -import org.elasticsearch.cluster.settings.Validator; import org.elasticsearch.common.Nullable; import org.elasticsearch.common.component.AbstractComponent; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.common.io.stream.Streamable; +import org.elasticsearch.common.settings.ClusterSettings; +import org.elasticsearch.common.settings.Setting; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.settings.SettingsException; import org.elasticsearch.common.unit.SizeValue; @@ -38,14 +38,11 @@ import org.elasticsearch.common.util.concurrent.XRejectedExecutionHandler; import org.elasticsearch.common.xcontent.ToXContent; import org.elasticsearch.common.xcontent.XContentBuilder; import org.elasticsearch.common.xcontent.XContentBuilderString; -import org.elasticsearch.node.settings.NodeSettingsService; import java.io.IOException; import java.util.*; import java.util.concurrent.*; -import java.util.function.Function; -import java.util.regex.Matcher; -import java.util.regex.Pattern; +import java.util.concurrent.atomic.AtomicBoolean; import static java.util.Collections.unmodifiableMap; import static org.elasticsearch.common.settings.Settings.settingsBuilder; @@ -172,7 +169,7 @@ public class ThreadPool extends AbstractComponent { } } - public static final String THREADPOOL_GROUP = "threadpool."; + public static final Setting<Settings> THREADPOOL_GROUP_SETTING = Setting.groupSetting("threadpool.", true, Setting.Scope.CLUSTER); private volatile Map<String, ExecutorHolder> executors; @@ -184,7 +181,7 @@ public class ThreadPool extends AbstractComponent { private final EstimatedTimeThread estimatedTimeThread; - private boolean settingsListenerIsSet = false; + private final AtomicBoolean settingsListenerIsSet = new AtomicBoolean(false); static final Executor DIRECT_EXECUTOR = command -> command.run(); @@ -197,7 +194,8 @@ public class ThreadPool extends AbstractComponent { assert settings.get("name") != null : "ThreadPool's settings should contain a name"; - Map<String, Settings> groupSettings = getThreadPoolSettingsGroup(settings); + Map<String, Settings> groupSettings = THREADPOOL_GROUP_SETTING.get(settings).getAsGroups(); + validate(groupSettings); int availableProcessors = EsExecutors.boundedNumberOfProcessors(settings); int halfProcMaxAt5 = Math.min(((availableProcessors + 1) / 2), 5); @@ -252,18 +250,12 @@ public class ThreadPool extends AbstractComponent { this.estimatedTimeThread.start(); } - private Map<String, Settings> getThreadPoolSettingsGroup(Settings settings) { - Map<String, Settings> groupSettings = settings.getGroups(THREADPOOL_GROUP); - validate(groupSettings); - return groupSettings; - } - - public void setNodeSettingsService(NodeSettingsService nodeSettingsService) { - if(settingsListenerIsSet) { + public void setClusterSettings(ClusterSettings clusterSettings) { + if(settingsListenerIsSet.compareAndSet(false, true)) { + clusterSettings.addSettingsUpdateConsumer(THREADPOOL_GROUP_SETTING, this::updateSettings, (s) -> validate(s.getAsGroups())); + } else { throw new IllegalStateException("the node settings listener was set more then once"); } - nodeSettingsService.addListener(new ApplySettings()); - settingsListenerIsSet = true; } public long estimatedTimeInMillis() { @@ -526,8 +518,8 @@ public class ThreadPool extends AbstractComponent { throw new IllegalArgumentException("No type found [" + type + "], for [" + name + "]"); } - public void updateSettings(Settings settings) { - Map<String, Settings> groupSettings = getThreadPoolSettingsGroup(settings); + private void updateSettings(Settings settings) { + Map<String, Settings> groupSettings = settings.getAsGroups(); if (groupSettings.isEmpty()) { return; } @@ -583,7 +575,7 @@ public class ThreadPool extends AbstractComponent { ThreadPoolType correctThreadPoolType = THREAD_POOL_TYPES.get(key); // TODO: the type equality check can be removed after #3760/#6732 are addressed if (type != null && !correctThreadPoolType.getType().equals(type)) { - throw new IllegalArgumentException("setting " + THREADPOOL_GROUP + key + ".type to " + type + " is not permitted; must be " + correctThreadPoolType.getType()); + throw new IllegalArgumentException("setting " + THREADPOOL_GROUP_SETTING.getKey() + key + ".type to " + type + " is not permitted; must be " + correctThreadPoolType.getType()); } } } @@ -866,13 +858,6 @@ public class ThreadPool extends AbstractComponent { } - class ApplySettings implements NodeSettingsService.Listener { - @Override - public void onRefreshSettings(Settings settings) { - updateSettings(settings); - } - } - /** * Returns <code>true</code> if the given service was terminated successfully. If the termination timed out, * the service is <code>null</code> this method will return <code>false</code>. @@ -911,38 +896,4 @@ public class ThreadPool extends AbstractComponent { } return false; } - - public static ThreadPoolTypeSettingsValidator THREAD_POOL_TYPE_SETTINGS_VALIDATOR = new ThreadPoolTypeSettingsValidator(); - private static class ThreadPoolTypeSettingsValidator implements Validator { - @Override - public String validate(String setting, String value, ClusterState clusterState) { - // TODO: the type equality validation can be removed after #3760/#6732 are addressed - Matcher matcher = Pattern.compile("threadpool\\.(.*)\\.type").matcher(setting); - if (!matcher.matches()) { - return null; - } else { - String threadPool = matcher.group(1); - ThreadPool.ThreadPoolType defaultThreadPoolType = ThreadPool.THREAD_POOL_TYPES.get(threadPool); - ThreadPool.ThreadPoolType threadPoolType; - try { - threadPoolType = ThreadPool.ThreadPoolType.fromType(value); - } catch (IllegalArgumentException e) { - return e.getMessage(); - } - if (defaultThreadPoolType.equals(threadPoolType)) { - return null; - } else { - return String.format( - Locale.ROOT, - "thread pool type for [%s] can only be updated to [%s] but was [%s]", - threadPool, - defaultThreadPoolType.getType(), - threadPoolType.getType() - ); - } - } - - } - } - } diff --git a/core/src/main/java/org/elasticsearch/transport/Transport.java b/core/src/main/java/org/elasticsearch/transport/Transport.java index 10fa9b239d..78b07e3aae 100644 --- a/core/src/main/java/org/elasticsearch/transport/Transport.java +++ b/core/src/main/java/org/elasticsearch/transport/Transport.java @@ -21,6 +21,8 @@ package org.elasticsearch.transport; import org.elasticsearch.cluster.node.DiscoveryNode; import org.elasticsearch.common.component.LifecycleComponent; +import org.elasticsearch.common.settings.Setting; +import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.transport.BoundTransportAddress; import org.elasticsearch.common.transport.TransportAddress; @@ -34,9 +36,8 @@ import java.util.Map; public interface Transport extends LifecycleComponent<Transport> { - public static class TransportSettings { - public static final String TRANSPORT_TCP_COMPRESS = "transport.tcp.compress"; - } + Setting<Settings> TRANSPORT_PROFILES_SETTING = Setting.groupSetting("transport.profiles.", true, Setting.Scope.CLUSTER); + Setting<Boolean> TRANSPORT_TCP_COMPRESS = Setting.boolSetting("transport.tcp.compress", false, false, Setting.Scope.CLUSTER); void transportServiceAdapter(TransportServiceAdapter service); diff --git a/core/src/main/java/org/elasticsearch/transport/TransportChannelResponseHandler.java b/core/src/main/java/org/elasticsearch/transport/TransportChannelResponseHandler.java new file mode 100644 index 0000000000..8c042cd193 --- /dev/null +++ b/core/src/main/java/org/elasticsearch/transport/TransportChannelResponseHandler.java @@ -0,0 +1,76 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.transport; + +import org.elasticsearch.common.logging.ESLogger; +import org.elasticsearch.threadpool.ThreadPool; + +import java.io.IOException; + +/** + * Base class for delegating transport response to a transport channel + */ +public abstract class TransportChannelResponseHandler<T extends TransportResponse> implements TransportResponseHandler<T> { + + /** + * Convenience method for delegating an empty response to the provided changed + */ + public static TransportChannelResponseHandler<TransportResponse.Empty> emptyResponseHandler(ESLogger logger, TransportChannel channel, String extraInfoOnError) { + return new TransportChannelResponseHandler<TransportResponse.Empty>(logger, channel, extraInfoOnError) { + @Override + public TransportResponse.Empty newInstance() { + return TransportResponse.Empty.INSTANCE; + } + }; + } + + private final ESLogger logger; + private final TransportChannel channel; + private final String extraInfoOnError; + + protected TransportChannelResponseHandler(ESLogger logger, TransportChannel channel, String extraInfoOnError) { + this.logger = logger; + this.channel = channel; + this.extraInfoOnError = extraInfoOnError; + } + + @Override + public void handleResponse(T response) { + try { + channel.sendResponse(response); + } catch (IOException e) { + handleException(new TransportException(e)); + } + } + + @Override + public void handleException(TransportException exp) { + try { + channel.sendResponse(exp); + } catch (IOException e) { + logger.debug("failed to send failure {}", e, extraInfoOnError == null ? "" : "(" + extraInfoOnError + ")"); + } + } + + @Override + public String executor() { + return ThreadPool.Names.SAME; + } +} diff --git a/core/src/main/java/org/elasticsearch/transport/TransportModule.java b/core/src/main/java/org/elasticsearch/transport/TransportModule.java deleted file mode 100644 index abf90deee8..0000000000 --- a/core/src/main/java/org/elasticsearch/transport/TransportModule.java +++ /dev/null @@ -1,122 +0,0 @@ -/* - * Licensed to Elasticsearch under one or more contributor - * license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright - * ownership. Elasticsearch licenses this file to you under - * the Apache License, Version 2.0 (the "License"); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -package org.elasticsearch.transport; - -import org.elasticsearch.cluster.node.DiscoveryNode; -import org.elasticsearch.common.inject.AbstractModule; -import org.elasticsearch.common.io.stream.NamedWriteableRegistry; -import org.elasticsearch.common.logging.ESLogger; -import org.elasticsearch.common.logging.Loggers; -import org.elasticsearch.common.settings.Settings; -import org.elasticsearch.transport.local.LocalTransport; -import org.elasticsearch.transport.netty.NettyTransport; - -import java.util.HashMap; -import java.util.Map; -import java.util.Objects; - -/** - * - */ -public class TransportModule extends AbstractModule { - - public static final String TRANSPORT_TYPE_KEY = "transport.type"; - public static final String TRANSPORT_SERVICE_TYPE_KEY = "transport.service.type"; - - public static final String LOCAL_TRANSPORT = "local"; - public static final String NETTY_TRANSPORT = "netty"; - - private final ESLogger logger; - private final Settings settings; - - private final Map<String, Class<? extends TransportService>> transportServices = new HashMap<>(); - private final Map<String, Class<? extends Transport>> transports = new HashMap<>(); - private Class<? extends TransportService> configuredTransportService; - private Class<? extends Transport> configuredTransport; - private String configuredTransportServiceSource; - private String configuredTransportSource; - - public TransportModule(Settings settings) { - this.settings = settings; - this.logger = Loggers.getLogger(getClass(), settings); - addTransport(LOCAL_TRANSPORT, LocalTransport.class); - addTransport(NETTY_TRANSPORT, NettyTransport.class); - } - - public void addTransportService(String name, Class<? extends TransportService> clazz) { - Class<? extends TransportService> oldClazz = transportServices.put(name, clazz); - if (oldClazz != null) { - throw new IllegalArgumentException("Cannot register TransportService [" + name + "] to " + clazz.getName() + ", already registered to " + oldClazz.getName()); - } - } - - public void addTransport(String name, Class<? extends Transport> clazz) { - Class<? extends Transport> oldClazz = transports.put(name, clazz); - if (oldClazz != null) { - throw new IllegalArgumentException("Cannot register Transport [" + name + "] to " + clazz.getName() + ", already registered to " + oldClazz.getName()); - } - } - - @Override - protected void configure() { - if (configuredTransportService != null) { - logger.info("Using [{}] as transport service, overridden by [{}]", configuredTransportService.getName(), configuredTransportServiceSource); - bind(TransportService.class).to(configuredTransportService).asEagerSingleton(); - } else { - String typeName = settings.get(TRANSPORT_SERVICE_TYPE_KEY); - if (typeName == null) { - bind(TransportService.class).asEagerSingleton(); - } else { - if (transportServices.containsKey(typeName) == false) { - throw new IllegalArgumentException("Unknown TransportService type [" + typeName + "], known types are: " + transportServices.keySet()); - } - bind(TransportService.class).to(transportServices.get(typeName)).asEagerSingleton(); - } - } - - bind(NamedWriteableRegistry.class).asEagerSingleton(); - if (configuredTransport != null) { - logger.info("Using [{}] as transport, overridden by [{}]", configuredTransport.getName(), configuredTransportSource); - bind(Transport.class).to(configuredTransport).asEagerSingleton(); - } else { - String defaultType = DiscoveryNode.localNode(settings) ? LOCAL_TRANSPORT : NETTY_TRANSPORT; - String typeName = settings.get(TRANSPORT_TYPE_KEY, defaultType); - Class<? extends Transport> clazz = transports.get(typeName); - if (clazz == null) { - throw new IllegalArgumentException("Unknown Transport [" + typeName + "]"); - } - bind(Transport.class).to(clazz).asEagerSingleton(); - } - } - - public void setTransportService(Class<? extends TransportService> transportService, String source) { - Objects.requireNonNull(transportService, "Configured transport service may not be null"); - Objects.requireNonNull(source, "Plugin, that changes transport service may not be null"); - this.configuredTransportService = transportService; - this.configuredTransportServiceSource = source; - } - - public void setTransport(Class<? extends Transport> transport, String source) { - Objects.requireNonNull(transport, "Configured transport may not be null"); - Objects.requireNonNull(source, "Plugin, that changes transport may not be null"); - this.configuredTransport = transport; - this.configuredTransportSource = source; - } -}
\ No newline at end of file diff --git a/core/src/main/java/org/elasticsearch/transport/TransportService.java b/core/src/main/java/org/elasticsearch/transport/TransportService.java index 14fc9029b0..444f52b9c0 100644 --- a/core/src/main/java/org/elasticsearch/transport/TransportService.java +++ b/core/src/main/java/org/elasticsearch/transport/TransportService.java @@ -29,6 +29,8 @@ import org.elasticsearch.common.logging.ESLogger; import org.elasticsearch.common.logging.Loggers; import org.elasticsearch.common.metrics.MeanMetric; import org.elasticsearch.common.regex.Regex; +import org.elasticsearch.common.settings.ClusterSettings; +import org.elasticsearch.common.settings.Setting; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.transport.BoundTransportAddress; import org.elasticsearch.common.transport.TransportAddress; @@ -37,20 +39,15 @@ import org.elasticsearch.common.util.concurrent.ConcurrentCollections; import org.elasticsearch.common.util.concurrent.ConcurrentMapLong; import org.elasticsearch.common.util.concurrent.EsRejectedExecutionException; import org.elasticsearch.common.util.concurrent.FutureUtils; -import org.elasticsearch.node.settings.NodeSettingsService; import org.elasticsearch.threadpool.ThreadPool; import java.io.IOException; -import java.util.Arrays; -import java.util.Collections; -import java.util.LinkedHashMap; -import java.util.List; -import java.util.Map; -import java.util.concurrent.Callable; +import java.util.*; import java.util.concurrent.CopyOnWriteArrayList; import java.util.concurrent.ScheduledFuture; import java.util.concurrent.atomic.AtomicBoolean; import java.util.concurrent.atomic.AtomicLong; +import java.util.function.Function; import java.util.function.Supplier; import static org.elasticsearch.common.settings.Settings.Builder.EMPTY_SETTINGS; @@ -88,14 +85,14 @@ public class TransportService extends AbstractLifecycleComponent<TransportServic // tracer log - public static final String SETTING_TRACE_LOG_INCLUDE = "transport.tracer.include"; - public static final String SETTING_TRACE_LOG_EXCLUDE = "transport.tracer.exclude"; + public static final Setting<List<String>> TRACE_LOG_INCLUDE_SETTING = Setting.listSetting("transport.tracer.include", Collections.emptyList(), Function.identity(), true, Setting.Scope.CLUSTER); + public static final Setting<List<String>> TRACE_LOG_EXCLUDE_SETTING = Setting.listSetting("transport.tracer.exclude", Arrays.asList("internal:discovery/zen/fd*", TransportLivenessAction.NAME), Function.identity(), true, Setting.Scope.CLUSTER); + private final ESLogger tracerLog; volatile String[] tracerLogInclude; volatile String[] tracelLogExclude; - private final ApplySettings settingsListener = new ApplySettings(); /** if set will call requests sent to this id to shortcut and executed locally */ volatile DiscoveryNode localNode = null; @@ -109,8 +106,8 @@ public class TransportService extends AbstractLifecycleComponent<TransportServic super(settings); this.transport = transport; this.threadPool = threadPool; - this.tracerLogInclude = settings.getAsArray(SETTING_TRACE_LOG_INCLUDE, Strings.EMPTY_ARRAY, true); - this.tracelLogExclude = settings.getAsArray(SETTING_TRACE_LOG_EXCLUDE, new String[]{"internal:discovery/zen/fd*", TransportLivenessAction.NAME}, true); + setTracerLogInclude(TRACE_LOG_INCLUDE_SETTING.get(settings)); + setTracerLogExclude(TRACE_LOG_EXCLUDE_SETTING.get(settings)); tracerLog = Loggers.getLogger(logger, ".tracer"); adapter = createAdapter(); } @@ -134,34 +131,18 @@ public class TransportService extends AbstractLifecycleComponent<TransportServic // These need to be optional as they don't exist in the context of a transport client @Inject(optional = true) - public void setDynamicSettings(NodeSettingsService nodeSettingsService) { - nodeSettingsService.addListener(settingsListener); + public void setDynamicSettings(ClusterSettings clusterSettings) { + clusterSettings.addSettingsUpdateConsumer(TRACE_LOG_INCLUDE_SETTING, this::setTracerLogInclude); + clusterSettings.addSettingsUpdateConsumer(TRACE_LOG_EXCLUDE_SETTING, this::setTracerLogExclude); } - - class ApplySettings implements NodeSettingsService.Listener { - @Override - public void onRefreshSettings(Settings settings) { - String[] newTracerLogInclude = settings.getAsArray(SETTING_TRACE_LOG_INCLUDE, TransportService.this.tracerLogInclude, true); - String[] newTracerLogExclude = settings.getAsArray(SETTING_TRACE_LOG_EXCLUDE, TransportService.this.tracelLogExclude, true); - if (newTracerLogInclude == TransportService.this.tracerLogInclude && newTracerLogExclude == TransportService.this.tracelLogExclude) { - return; - } - if (Arrays.equals(newTracerLogInclude, TransportService.this.tracerLogInclude) && - Arrays.equals(newTracerLogExclude, TransportService.this.tracelLogExclude)) { - return; - } - TransportService.this.tracerLogInclude = newTracerLogInclude; - TransportService.this.tracelLogExclude = newTracerLogExclude; - logger.info("tracer log updated to use include: {}, exclude: {}", newTracerLogInclude, newTracerLogExclude); - } + void setTracerLogInclude(List<String> tracerLogInclude) { + this.tracerLogInclude = tracerLogInclude.toArray(Strings.EMPTY_ARRAY); } - // used for testing - public void applySettings(Settings settings) { - settingsListener.onRefreshSettings(settings); + void setTracerLogExclude(List<String> tracelLogExclude) { + this.tracelLogExclude = tracelLogExclude.toArray(Strings.EMPTY_ARRAY); } - @Override protected void doStart() { adapter.rxMetric.clear(); diff --git a/core/src/main/java/org/elasticsearch/transport/netty/NettyTransport.java b/core/src/main/java/org/elasticsearch/transport/netty/NettyTransport.java index ab39a35d22..6a6a6c3801 100644 --- a/core/src/main/java/org/elasticsearch/transport/netty/NettyTransport.java +++ b/core/src/main/java/org/elasticsearch/transport/netty/NettyTransport.java @@ -87,6 +87,7 @@ import org.jboss.netty.channel.socket.oio.OioServerSocketChannelFactory; import org.jboss.netty.util.HashedWheelTimer; import java.io.IOException; +import java.net.BindException; import java.net.InetAddress; import java.net.InetSocketAddress; import java.net.SocketAddress; @@ -224,7 +225,7 @@ public class NettyTransport extends AbstractLifecycleComponent<Transport> implem this.connectTimeout = this.settings.getAsTime("transport.netty.connect_timeout", settings.getAsTime("transport.tcp.connect_timeout", settings.getAsTime(TCP_CONNECT_TIMEOUT, TCP_DEFAULT_CONNECT_TIMEOUT))); this.maxCumulationBufferCapacity = this.settings.getAsBytesSize("transport.netty.max_cumulation_buffer_capacity", null); this.maxCompositeBufferComponents = this.settings.getAsInt("transport.netty.max_composite_buffer_components", -1); - this.compress = settings.getAsBoolean(TransportSettings.TRANSPORT_TCP_COMPRESS, false); + this.compress = Transport.TRANSPORT_TCP_COMPRESS.get(settings); this.connectionsPerNodeRecovery = this.settings.getAsInt("transport.netty.connections_per_node.recovery", settings.getAsInt(CONNECTIONS_PER_NODE_RECOVERY, 2)); this.connectionsPerNodeBulk = this.settings.getAsInt("transport.netty.connections_per_node.bulk", settings.getAsInt(CONNECTIONS_PER_NODE_BULK, 3)); @@ -294,7 +295,7 @@ public class NettyTransport extends AbstractLifecycleComponent<Transport> implem this.serverOpenChannels = openChannels; // extract default profile first and create standard bootstrap - Map<String, Settings> profiles = settings.getGroups("transport.profiles", true); + Map<String, Settings> profiles = TRANSPORT_PROFILES_SETTING.get(settings()).getAsGroups(true); if (!profiles.containsKey(DEFAULT_PROFILE)) { profiles = new HashMap<>(profiles); profiles.put(DEFAULT_PROFILE, Settings.EMPTY); @@ -763,6 +764,11 @@ public class NettyTransport extends AbstractLifecycleComponent<Transport> implem // close the channel as safe measure, which will cause a node to be disconnected if relevant ctx.getChannel().close(); disconnectFromNodeChannel(ctx.getChannel(), e.getCause()); + } else if (e.getCause() instanceof BindException) { + logger.trace("bind exception caught on transport layer [{}]", e.getCause(), ctx.getChannel()); + // close the channel as safe measure, which will cause a node to be disconnected if relevant + ctx.getChannel().close(); + disconnectFromNodeChannel(ctx.getChannel(), e.getCause()); } else if (e.getCause() instanceof CancelledKeyException) { logger.trace("cancelled key exception caught on transport layer [{}], disconnecting from relevant node", e.getCause(), ctx.getChannel()); // close the channel as safe measure, which will cause a node to be disconnected if relevant diff --git a/core/src/main/java/org/elasticsearch/tribe/TribeService.java b/core/src/main/java/org/elasticsearch/tribe/TribeService.java index f577415ee6..78453c9eac 100644 --- a/core/src/main/java/org/elasticsearch/tribe/TribeService.java +++ b/core/src/main/java/org/elasticsearch/tribe/TribeService.java @@ -26,7 +26,8 @@ import org.elasticsearch.cluster.ClusterChangedEvent; import org.elasticsearch.cluster.ClusterService; import org.elasticsearch.cluster.ClusterState; import org.elasticsearch.cluster.ClusterStateListener; -import org.elasticsearch.cluster.ClusterStateUpdateTask; +import org.elasticsearch.cluster.ClusterStateTaskConfig; +import org.elasticsearch.cluster.ClusterStateTaskExecutor; import org.elasticsearch.cluster.block.ClusterBlock; import org.elasticsearch.cluster.block.ClusterBlockLevel; import org.elasticsearch.cluster.block.ClusterBlocks; @@ -36,6 +37,7 @@ import org.elasticsearch.cluster.node.DiscoveryNode; import org.elasticsearch.cluster.node.DiscoveryNodes; import org.elasticsearch.cluster.routing.IndexRoutingTable; import org.elasticsearch.cluster.routing.RoutingTable; +import org.elasticsearch.common.Priority; import org.elasticsearch.common.Strings; import org.elasticsearch.common.component.AbstractLifecycleComponent; import org.elasticsearch.common.inject.Inject; @@ -205,142 +207,180 @@ public class TribeService extends AbstractLifecycleComponent<TribeService> { } } - class TribeClusterStateListener implements ClusterStateListener { + class TribeClusterStateListener implements ClusterStateListener { private final String tribeName; + private final TribeNodeClusterStateTaskExecutor executor; TribeClusterStateListener(Node tribeNode) { - this.tribeName = tribeNode.settings().get(TRIBE_NAME); + String tribeName = tribeNode.settings().get(TRIBE_NAME); + this.tribeName = tribeName; + executor = new TribeNodeClusterStateTaskExecutor(tribeName); } @Override public void clusterChanged(final ClusterChangedEvent event) { logger.debug("[{}] received cluster event, [{}]", tribeName, event.source()); - clusterService.submitStateUpdateTask("cluster event from " + tribeName + ", " + event.source(), new ClusterStateUpdateTask() { - @Override - public boolean runOnlyOnMaster() { - return false; - } + clusterService.submitStateUpdateTask( + "cluster event from " + tribeName + ", " + event.source(), + event, + ClusterStateTaskConfig.build(Priority.NORMAL), + executor, + (source, t) -> logger.warn("failed to process [{}]", t, source)); + } + } - @Override - public ClusterState execute(ClusterState currentState) throws Exception { - ClusterState tribeState = event.state(); - DiscoveryNodes.Builder nodes = DiscoveryNodes.builder(currentState.nodes()); - // -- merge nodes - // go over existing nodes, and see if they need to be removed - for (DiscoveryNode discoNode : currentState.nodes()) { - String markedTribeName = discoNode.attributes().get(TRIBE_NAME); - if (markedTribeName != null && markedTribeName.equals(tribeName)) { - if (tribeState.nodes().get(discoNode.id()) == null) { - logger.info("[{}] removing node [{}]", tribeName, discoNode); - nodes.remove(discoNode.id()); - } - } + class TribeNodeClusterStateTaskExecutor implements ClusterStateTaskExecutor<ClusterChangedEvent> { + private final String tribeName; + + TribeNodeClusterStateTaskExecutor(String tribeName) { + this.tribeName = tribeName; + } + + + @Override + public boolean runOnlyOnMaster() { + return false; + } + + @Override + public BatchResult<ClusterChangedEvent> execute(ClusterState currentState, List<ClusterChangedEvent> tasks) throws Exception { + ClusterState accumulator = ClusterState.builder(currentState).build(); + BatchResult.Builder<ClusterChangedEvent> builder = BatchResult.builder(); + + try { + // we only need to apply the latest cluster state update + accumulator = applyUpdate(accumulator, tasks.get(tasks.size() - 1)); + builder.successes(tasks); + } catch (Throwable t) { + builder.failures(tasks, t); + } + + return builder.build(accumulator); + } + + private ClusterState applyUpdate(ClusterState currentState, ClusterChangedEvent task) { + boolean clusterStateChanged = false; + ClusterState tribeState = task.state(); + DiscoveryNodes.Builder nodes = DiscoveryNodes.builder(currentState.nodes()); + // -- merge nodes + // go over existing nodes, and see if they need to be removed + for (DiscoveryNode discoNode : currentState.nodes()) { + String markedTribeName = discoNode.attributes().get(TRIBE_NAME); + if (markedTribeName != null && markedTribeName.equals(tribeName)) { + if (tribeState.nodes().get(discoNode.id()) == null) { + clusterStateChanged = true; + logger.info("[{}] removing node [{}]", tribeName, discoNode); + nodes.remove(discoNode.id()); } - // go over tribe nodes, and see if they need to be added - for (DiscoveryNode tribe : tribeState.nodes()) { - if (currentState.nodes().get(tribe.id()) == null) { - // a new node, add it, but also add the tribe name to the attributes - Map<String, String> tribeAttr = new HashMap<>(); - for (ObjectObjectCursor<String, String> attr : tribe.attributes()) { - tribeAttr.put(attr.key, attr.value); - } - tribeAttr.put(TRIBE_NAME, tribeName); - DiscoveryNode discoNode = new DiscoveryNode(tribe.name(), tribe.id(), tribe.getHostName(), tribe.getHostAddress(), tribe.address(), unmodifiableMap(tribeAttr), tribe.version()); - logger.info("[{}] adding node [{}]", tribeName, discoNode); - nodes.put(discoNode); - } + } + } + // go over tribe nodes, and see if they need to be added + for (DiscoveryNode tribe : tribeState.nodes()) { + if (currentState.nodes().get(tribe.id()) == null) { + // a new node, add it, but also add the tribe name to the attributes + Map<String, String> tribeAttr = new HashMap<>(); + for (ObjectObjectCursor<String, String> attr : tribe.attributes()) { + tribeAttr.put(attr.key, attr.value); } + tribeAttr.put(TRIBE_NAME, tribeName); + DiscoveryNode discoNode = new DiscoveryNode(tribe.name(), tribe.id(), tribe.getHostName(), tribe.getHostAddress(), tribe.address(), unmodifiableMap(tribeAttr), tribe.version()); + clusterStateChanged = true; + logger.info("[{}] adding node [{}]", tribeName, discoNode); + nodes.put(discoNode); + } + } - // -- merge metadata - ClusterBlocks.Builder blocks = ClusterBlocks.builder().blocks(currentState.blocks()); - MetaData.Builder metaData = MetaData.builder(currentState.metaData()); - RoutingTable.Builder routingTable = RoutingTable.builder(currentState.routingTable()); - // go over existing indices, and see if they need to be removed - for (IndexMetaData index : currentState.metaData()) { - String markedTribeName = index.getSettings().get(TRIBE_NAME); - if (markedTribeName != null && markedTribeName.equals(tribeName)) { - IndexMetaData tribeIndex = tribeState.metaData().index(index.getIndex()); - if (tribeIndex == null || tribeIndex.getState() == IndexMetaData.State.CLOSE) { - logger.info("[{}] removing index [{}]", tribeName, index.getIndex()); - removeIndex(blocks, metaData, routingTable, index); - } else { - // always make sure to update the metadata and routing table, in case - // there are changes in them (new mapping, shards moving from initializing to started) - routingTable.add(tribeState.routingTable().index(index.getIndex())); - Settings tribeSettings = Settings.builder().put(tribeIndex.getSettings()).put(TRIBE_NAME, tribeName).build(); - metaData.put(IndexMetaData.builder(tribeIndex).settings(tribeSettings)); - } - } + // -- merge metadata + ClusterBlocks.Builder blocks = ClusterBlocks.builder().blocks(currentState.blocks()); + MetaData.Builder metaData = MetaData.builder(currentState.metaData()); + RoutingTable.Builder routingTable = RoutingTable.builder(currentState.routingTable()); + // go over existing indices, and see if they need to be removed + for (IndexMetaData index : currentState.metaData()) { + String markedTribeName = index.getSettings().get(TRIBE_NAME); + if (markedTribeName != null && markedTribeName.equals(tribeName)) { + IndexMetaData tribeIndex = tribeState.metaData().index(index.getIndex()); + clusterStateChanged = true; + if (tribeIndex == null || tribeIndex.getState() == IndexMetaData.State.CLOSE) { + logger.info("[{}] removing index [{}]", tribeName, index.getIndex()); + removeIndex(blocks, metaData, routingTable, index); + } else { + // always make sure to update the metadata and routing table, in case + // there are changes in them (new mapping, shards moving from initializing to started) + routingTable.add(tribeState.routingTable().index(index.getIndex())); + Settings tribeSettings = Settings.builder().put(tribeIndex.getSettings()).put(TRIBE_NAME, tribeName).build(); + metaData.put(IndexMetaData.builder(tribeIndex).settings(tribeSettings)); } - // go over tribe one, and see if they need to be added - for (IndexMetaData tribeIndex : tribeState.metaData()) { - // if there is no routing table yet, do nothing with it... - IndexRoutingTable table = tribeState.routingTable().index(tribeIndex.getIndex()); - if (table == null) { - continue; - } - final IndexMetaData indexMetaData = currentState.metaData().index(tribeIndex.getIndex()); - if (indexMetaData == null) { - if (!droppedIndices.contains(tribeIndex.getIndex())) { - // a new index, add it, and add the tribe name as a setting - logger.info("[{}] adding index [{}]", tribeName, tribeIndex.getIndex()); + } + } + // go over tribe one, and see if they need to be added + for (IndexMetaData tribeIndex : tribeState.metaData()) { + // if there is no routing table yet, do nothing with it... + IndexRoutingTable table = tribeState.routingTable().index(tribeIndex.getIndex()); + if (table == null) { + continue; + } + final IndexMetaData indexMetaData = currentState.metaData().index(tribeIndex.getIndex()); + if (indexMetaData == null) { + if (!droppedIndices.contains(tribeIndex.getIndex())) { + // a new index, add it, and add the tribe name as a setting + clusterStateChanged = true; + logger.info("[{}] adding index [{}]", tribeName, tribeIndex.getIndex()); + addNewIndex(tribeState, blocks, metaData, routingTable, tribeIndex); + } + } else { + String existingFromTribe = indexMetaData.getSettings().get(TRIBE_NAME); + if (!tribeName.equals(existingFromTribe)) { + // we have a potential conflict on index names, decide what to do... + if (ON_CONFLICT_ANY.equals(onConflict)) { + // we chose any tribe, carry on + } else if (ON_CONFLICT_DROP.equals(onConflict)) { + // drop the indices, there is a conflict + clusterStateChanged = true; + logger.info("[{}] dropping index [{}] due to conflict with [{}]", tribeName, tribeIndex.getIndex(), existingFromTribe); + removeIndex(blocks, metaData, routingTable, tribeIndex); + droppedIndices.add(tribeIndex.getIndex()); + } else if (onConflict.startsWith(ON_CONFLICT_PREFER)) { + // on conflict, prefer a tribe... + String preferredTribeName = onConflict.substring(ON_CONFLICT_PREFER.length()); + if (tribeName.equals(preferredTribeName)) { + // the new one is hte preferred one, replace... + clusterStateChanged = true; + logger.info("[{}] adding index [{}], preferred over [{}]", tribeName, tribeIndex.getIndex(), existingFromTribe); + removeIndex(blocks, metaData, routingTable, tribeIndex); addNewIndex(tribeState, blocks, metaData, routingTable, tribeIndex); - } - } else { - String existingFromTribe = indexMetaData.getSettings().get(TRIBE_NAME); - if (!tribeName.equals(existingFromTribe)) { - // we have a potential conflict on index names, decide what to do... - if (ON_CONFLICT_ANY.equals(onConflict)) { - // we chose any tribe, carry on - } else if (ON_CONFLICT_DROP.equals(onConflict)) { - // drop the indices, there is a conflict - logger.info("[{}] dropping index [{}] due to conflict with [{}]", tribeName, tribeIndex.getIndex(), existingFromTribe); - removeIndex(blocks, metaData, routingTable, tribeIndex); - droppedIndices.add(tribeIndex.getIndex()); - } else if (onConflict.startsWith(ON_CONFLICT_PREFER)) { - // on conflict, prefer a tribe... - String preferredTribeName = onConflict.substring(ON_CONFLICT_PREFER.length()); - if (tribeName.equals(preferredTribeName)) { - // the new one is hte preferred one, replace... - logger.info("[{}] adding index [{}], preferred over [{}]", tribeName, tribeIndex.getIndex(), existingFromTribe); - removeIndex(blocks, metaData, routingTable, tribeIndex); - addNewIndex(tribeState, blocks, metaData, routingTable, tribeIndex); - } // else: either the existing one is the preferred one, or we haven't seen one, carry on - } - } + } // else: either the existing one is the preferred one, or we haven't seen one, carry on } } - - return ClusterState.builder(currentState).incrementVersion().blocks(blocks).nodes(nodes).metaData(metaData).routingTable(routingTable.build()).build(); } + } - private void removeIndex(ClusterBlocks.Builder blocks, MetaData.Builder metaData, RoutingTable.Builder routingTable, IndexMetaData index) { - metaData.remove(index.getIndex()); - routingTable.remove(index.getIndex()); - blocks.removeIndexBlocks(index.getIndex()); - } + if (!clusterStateChanged) { + return currentState; + } else { + return ClusterState.builder(currentState).incrementVersion().blocks(blocks).nodes(nodes).metaData(metaData).routingTable(routingTable.build()).build(); + } + } - private void addNewIndex(ClusterState tribeState, ClusterBlocks.Builder blocks, MetaData.Builder metaData, RoutingTable.Builder routingTable, IndexMetaData tribeIndex) { - Settings tribeSettings = Settings.builder().put(tribeIndex.getSettings()).put(TRIBE_NAME, tribeName).build(); - metaData.put(IndexMetaData.builder(tribeIndex).settings(tribeSettings)); - routingTable.add(tribeState.routingTable().index(tribeIndex.getIndex())); - if (Regex.simpleMatch(blockIndicesMetadata, tribeIndex.getIndex())) { - blocks.addIndexBlock(tribeIndex.getIndex(), IndexMetaData.INDEX_METADATA_BLOCK); - } - if (Regex.simpleMatch(blockIndicesRead, tribeIndex.getIndex())) { - blocks.addIndexBlock(tribeIndex.getIndex(), IndexMetaData.INDEX_READ_BLOCK); - } - if (Regex.simpleMatch(blockIndicesWrite, tribeIndex.getIndex())) { - blocks.addIndexBlock(tribeIndex.getIndex(), IndexMetaData.INDEX_WRITE_BLOCK); - } - } + private void removeIndex(ClusterBlocks.Builder blocks, MetaData.Builder metaData, RoutingTable.Builder routingTable, IndexMetaData index) { + metaData.remove(index.getIndex()); + routingTable.remove(index.getIndex()); + blocks.removeIndexBlocks(index.getIndex()); + } - @Override - public void onFailure(String source, Throwable t) { - logger.warn("failed to process [{}]", t, source); - } - }); + private void addNewIndex(ClusterState tribeState, ClusterBlocks.Builder blocks, MetaData.Builder metaData, RoutingTable.Builder routingTable, IndexMetaData tribeIndex) { + Settings tribeSettings = Settings.builder().put(tribeIndex.getSettings()).put(TRIBE_NAME, tribeName).build(); + metaData.put(IndexMetaData.builder(tribeIndex).settings(tribeSettings)); + routingTable.add(tribeState.routingTable().index(tribeIndex.getIndex())); + if (Regex.simpleMatch(blockIndicesMetadata, tribeIndex.getIndex())) { + blocks.addIndexBlock(tribeIndex.getIndex(), IndexMetaData.INDEX_METADATA_BLOCK); + } + if (Regex.simpleMatch(blockIndicesRead, tribeIndex.getIndex())) { + blocks.addIndexBlock(tribeIndex.getIndex(), IndexMetaData.INDEX_READ_BLOCK); + } + if (Regex.simpleMatch(blockIndicesWrite, tribeIndex.getIndex())) { + blocks.addIndexBlock(tribeIndex.getIndex(), IndexMetaData.INDEX_WRITE_BLOCK); + } } } } diff --git a/core/src/main/resources/org/elasticsearch/plugins/plugin-install.help b/core/src/main/resources/org/elasticsearch/plugins/plugin-install.help index 2a4e6a6382..8c73e3837a 100644 --- a/core/src/main/resources/org/elasticsearch/plugins/plugin-install.help +++ b/core/src/main/resources/org/elasticsearch/plugins/plugin-install.help @@ -50,6 +50,7 @@ OFFICIAL PLUGINS - mapper-murmur3 - mapper-size - repository-azure + - repository-hdfs - repository-s3 - store-smb diff --git a/core/src/test/java/org/apache/lucene/queries/BlendedTermQueryTests.java b/core/src/test/java/org/apache/lucene/queries/BlendedTermQueryTests.java index 0f29ed5a2f..a287ec119e 100644 --- a/core/src/test/java/org/apache/lucene/queries/BlendedTermQueryTests.java +++ b/core/src/test/java/org/apache/lucene/queries/BlendedTermQueryTests.java @@ -23,19 +23,8 @@ import org.apache.lucene.document.Document; import org.apache.lucene.document.Field; import org.apache.lucene.document.FieldType; import org.apache.lucene.document.TextField; -import org.apache.lucene.index.DirectoryReader; -import org.apache.lucene.index.IndexOptions; -import org.apache.lucene.index.IndexWriter; -import org.apache.lucene.index.MultiReader; -import org.apache.lucene.index.Term; -import org.apache.lucene.search.BooleanClause; -import org.apache.lucene.search.BooleanQuery; -import org.apache.lucene.search.DisjunctionMaxQuery; -import org.apache.lucene.search.IndexSearcher; -import org.apache.lucene.search.QueryUtils; -import org.apache.lucene.search.ScoreDoc; -import org.apache.lucene.search.TermQuery; -import org.apache.lucene.search.TopDocs; +import org.apache.lucene.index.*; +import org.apache.lucene.search.*; import org.apache.lucene.search.similarities.BM25Similarity; import org.apache.lucene.search.similarities.DefaultSimilarity; import org.apache.lucene.search.similarities.Similarity; @@ -44,11 +33,7 @@ import org.apache.lucene.util.TestUtil; import org.elasticsearch.test.ESTestCase; import java.io.IOException; -import java.util.Arrays; -import java.util.Collections; -import java.util.HashSet; -import java.util.List; -import java.util.Set; +import java.util.*; import static org.hamcrest.Matchers.containsInAnyOrder; import static org.hamcrest.Matchers.equalTo; diff --git a/core/src/test/java/org/elasticsearch/action/admin/cluster/settings/SettingsUpdaterTests.java b/core/src/test/java/org/elasticsearch/action/admin/cluster/settings/SettingsUpdaterTests.java new file mode 100644 index 0000000000..bd1377b89f --- /dev/null +++ b/core/src/test/java/org/elasticsearch/action/admin/cluster/settings/SettingsUpdaterTests.java @@ -0,0 +1,126 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ +package org.elasticsearch.action.admin.cluster.settings; + +import org.elasticsearch.cluster.ClusterName; +import org.elasticsearch.cluster.ClusterState; +import org.elasticsearch.cluster.metadata.MetaData; +import org.elasticsearch.cluster.routing.allocation.allocator.BalancedShardsAllocator; +import org.elasticsearch.common.settings.ClusterSettings; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.test.ESTestCase; + +import java.util.concurrent.atomic.AtomicReference; + +public class SettingsUpdaterTests extends ESTestCase { + + + public void testUpdateSetting() { + AtomicReference<Float> index = new AtomicReference<>(); + AtomicReference<Float> shard = new AtomicReference<>(); + ClusterState.Builder builder = ClusterState.builder(new ClusterName("foo")); + ClusterSettings settingsService = new ClusterSettings(Settings.EMPTY, ClusterSettings.BUILT_IN_CLUSTER_SETTINGS); + settingsService.addSettingsUpdateConsumer(BalancedShardsAllocator.INDEX_BALANCE_FACTOR_SETTING, index::set); + settingsService.addSettingsUpdateConsumer(BalancedShardsAllocator.SHARD_BALANCE_FACTOR_SETTING, shard::set); + SettingsUpdater updater = new SettingsUpdater(settingsService); + MetaData.Builder metaData = MetaData.builder() + .persistentSettings(Settings.builder().put(BalancedShardsAllocator.INDEX_BALANCE_FACTOR_SETTING.getKey(), 1.5) + .put(BalancedShardsAllocator.SHARD_BALANCE_FACTOR_SETTING.getKey(), 2.5).build()) + .transientSettings(Settings.builder().put(BalancedShardsAllocator.INDEX_BALANCE_FACTOR_SETTING.getKey(), 3.5) + .put(BalancedShardsAllocator.SHARD_BALANCE_FACTOR_SETTING.getKey(), 4.5).build()); + ClusterState build = builder.metaData(metaData).build(); + ClusterState clusterState = updater.updateSettings(build, Settings.builder().put(BalancedShardsAllocator.INDEX_BALANCE_FACTOR_SETTING.getKey(), 0.5).build(), + Settings.builder().put(BalancedShardsAllocator.INDEX_BALANCE_FACTOR_SETTING.getKey(), 0.4).build()); + assertNotSame(clusterState, build); + assertEquals(BalancedShardsAllocator.INDEX_BALANCE_FACTOR_SETTING.get(clusterState.metaData().persistentSettings()), 0.4, 0.1); + assertEquals(BalancedShardsAllocator.SHARD_BALANCE_FACTOR_SETTING.get(clusterState.metaData().persistentSettings()), 2.5, 0.1); + assertEquals(BalancedShardsAllocator.INDEX_BALANCE_FACTOR_SETTING.get(clusterState.metaData().transientSettings()), 0.5, 0.1); + assertEquals(BalancedShardsAllocator.SHARD_BALANCE_FACTOR_SETTING.get(clusterState.metaData().transientSettings()), 4.5, 0.1); + + clusterState = updater.updateSettings(clusterState, Settings.builder().putNull("cluster.routing.*").build(), + Settings.EMPTY); + assertEquals(BalancedShardsAllocator.INDEX_BALANCE_FACTOR_SETTING.get(clusterState.metaData().persistentSettings()), 0.4, 0.1); + assertEquals(BalancedShardsAllocator.SHARD_BALANCE_FACTOR_SETTING.get(clusterState.metaData().persistentSettings()), 2.5, 0.1); + assertFalse(BalancedShardsAllocator.INDEX_BALANCE_FACTOR_SETTING.exists(clusterState.metaData().transientSettings())); + assertFalse(BalancedShardsAllocator.SHARD_BALANCE_FACTOR_SETTING.exists(clusterState.metaData().transientSettings())); + + clusterState = updater.updateSettings(clusterState, + Settings.EMPTY, Settings.builder().putNull("cluster.routing.*").put(BalancedShardsAllocator.INDEX_BALANCE_FACTOR_SETTING.getKey(), 10.0).build()); + + assertEquals(BalancedShardsAllocator.INDEX_BALANCE_FACTOR_SETTING.get(clusterState.metaData().persistentSettings()), 10.0, 0.1); + assertFalse(BalancedShardsAllocator.SHARD_BALANCE_FACTOR_SETTING.exists(clusterState.metaData().persistentSettings())); + assertFalse(BalancedShardsAllocator.INDEX_BALANCE_FACTOR_SETTING.exists(clusterState.metaData().transientSettings())); + assertFalse(BalancedShardsAllocator.SHARD_BALANCE_FACTOR_SETTING.exists(clusterState.metaData().transientSettings())); + assertNull("updater only does a dryRun", index.get()); + assertNull("updater only does a dryRun", shard.get()); + } + + public void testAllOrNothing() { + ClusterState.Builder builder = ClusterState.builder(new ClusterName("foo")); + ClusterSettings settingsService = new ClusterSettings(Settings.EMPTY, ClusterSettings.BUILT_IN_CLUSTER_SETTINGS); + AtomicReference<Float> index = new AtomicReference<>(); + AtomicReference<Float> shard = new AtomicReference<>(); + settingsService.addSettingsUpdateConsumer(BalancedShardsAllocator.INDEX_BALANCE_FACTOR_SETTING, index::set); + settingsService.addSettingsUpdateConsumer(BalancedShardsAllocator.SHARD_BALANCE_FACTOR_SETTING, shard::set); + SettingsUpdater updater = new SettingsUpdater(settingsService); + MetaData.Builder metaData = MetaData.builder() + .persistentSettings(Settings.builder().put(BalancedShardsAllocator.INDEX_BALANCE_FACTOR_SETTING.getKey(), 1.5) + .put(BalancedShardsAllocator.SHARD_BALANCE_FACTOR_SETTING.getKey(), 2.5).build()) + .transientSettings(Settings.builder().put(BalancedShardsAllocator.INDEX_BALANCE_FACTOR_SETTING.getKey(), 3.5) + .put(BalancedShardsAllocator.SHARD_BALANCE_FACTOR_SETTING.getKey(), 4.5).build()); + ClusterState build = builder.metaData(metaData).build(); + + try { + updater.updateSettings(build, Settings.builder().put(BalancedShardsAllocator.INDEX_BALANCE_FACTOR_SETTING.getKey(), "not a float").build(), + Settings.builder().put(BalancedShardsAllocator.INDEX_BALANCE_FACTOR_SETTING.getKey(), "not a float").put(BalancedShardsAllocator.SHARD_BALANCE_FACTOR_SETTING.getKey(), 1.0f).build()); + fail("all or nothing"); + } catch (IllegalArgumentException ex) { + logger.info("", ex); + assertEquals("Failed to parse value [not a float] for setting [cluster.routing.allocation.balance.index]", ex.getMessage()); + } + assertNull("updater only does a dryRun", index.get()); + assertNull("updater only does a dryRun", shard.get()); + } + + public void testClusterBlock() { + ClusterState.Builder builder = ClusterState.builder(new ClusterName("foo")); + ClusterSettings settingsService = new ClusterSettings(Settings.EMPTY, ClusterSettings.BUILT_IN_CLUSTER_SETTINGS); + AtomicReference<Float> index = new AtomicReference<>(); + AtomicReference<Float> shard = new AtomicReference<>(); + settingsService.addSettingsUpdateConsumer(BalancedShardsAllocator.INDEX_BALANCE_FACTOR_SETTING, index::set); + settingsService.addSettingsUpdateConsumer(BalancedShardsAllocator.SHARD_BALANCE_FACTOR_SETTING, shard::set); + SettingsUpdater updater = new SettingsUpdater(settingsService); + MetaData.Builder metaData = MetaData.builder() + .persistentSettings(Settings.builder().put(BalancedShardsAllocator.INDEX_BALANCE_FACTOR_SETTING.getKey(), 1.5) + .put(BalancedShardsAllocator.SHARD_BALANCE_FACTOR_SETTING.getKey(), 2.5).build()) + .transientSettings(Settings.builder().put(BalancedShardsAllocator.INDEX_BALANCE_FACTOR_SETTING.getKey(), 3.5) + .put(BalancedShardsAllocator.SHARD_BALANCE_FACTOR_SETTING.getKey(), 4.5).build()); + ClusterState build = builder.metaData(metaData).build(); + + ClusterState clusterState = updater.updateSettings(build, Settings.builder().put(MetaData.SETTING_READ_ONLY_SETTING.getKey(), true).build(), + Settings.builder().put(BalancedShardsAllocator.INDEX_BALANCE_FACTOR_SETTING.getKey(), 1.6).put(BalancedShardsAllocator.SHARD_BALANCE_FACTOR_SETTING.getKey(), 1.0f).build()); + assertEquals(clusterState.blocks().global().size(), 1); + assertEquals(clusterState.blocks().global().iterator().next(), MetaData.CLUSTER_READ_ONLY_BLOCK); + + clusterState = updater.updateSettings(build, Settings.EMPTY, + Settings.builder().put(MetaData.SETTING_READ_ONLY_SETTING.getKey(), false).build()); + assertEquals(clusterState.blocks().global().size(), 0); + + } +} diff --git a/core/src/test/java/org/elasticsearch/action/admin/indices/create/CreateIndexIT.java b/core/src/test/java/org/elasticsearch/action/admin/indices/create/CreateIndexIT.java index bb15421821..3ce9e99f4d 100644 --- a/core/src/test/java/org/elasticsearch/action/admin/indices/create/CreateIndexIT.java +++ b/core/src/test/java/org/elasticsearch/action/admin/indices/create/CreateIndexIT.java @@ -285,4 +285,11 @@ public class CreateIndexIT extends ESIntegTestCase { assertThat(messages.toString(), containsString("mapper [text] is used by multiple types")); } } + + public void testRestartIndexCreationAfterFullClusterRestart() throws Exception { + client().admin().cluster().prepareUpdateSettings().setTransientSettings(Settings.builder().put("cluster.routing.allocation.enable", "none")).get(); + client().admin().indices().prepareCreate("test").setSettings(indexSettings()).get(); + internalCluster().fullRestart(); + ensureGreen("test"); + } } diff --git a/core/src/test/java/org/elasticsearch/indices/flush/SyncedFlushUnitTests.java b/core/src/test/java/org/elasticsearch/action/admin/indices/flush/SyncedFlushUnitTests.java index 19cce93c6e..dfc6ea67c4 100644 --- a/core/src/test/java/org/elasticsearch/indices/flush/SyncedFlushUnitTests.java +++ b/core/src/test/java/org/elasticsearch/action/admin/indices/flush/SyncedFlushUnitTests.java @@ -17,16 +17,20 @@ * under the License. */ -package org.elasticsearch.indices.flush; +package org.elasticsearch.action.admin.indices.flush; import com.carrotsearch.hppc.ObjectIntHashMap; import com.carrotsearch.hppc.ObjectIntMap; +import org.elasticsearch.action.admin.indices.flush.SyncedFlushResponse; +import org.elasticsearch.action.admin.indices.flush.SyncedFlushResponse.ShardCounts; import org.elasticsearch.cluster.routing.ShardRouting; import org.elasticsearch.cluster.routing.ShardRoutingState; import org.elasticsearch.cluster.routing.TestShardRouting; +import org.elasticsearch.common.io.stream.BytesStreamOutput; +import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.index.shard.ShardId; -import org.elasticsearch.indices.flush.IndicesSyncedFlushResult.ShardCounts; -import org.elasticsearch.indices.flush.SyncedFlushService.SyncedFlushResponse; +import org.elasticsearch.indices.flush.ShardsSyncedFlushResult; +import org.elasticsearch.indices.flush.SyncedFlushService; import org.elasticsearch.rest.RestStatus; import org.elasticsearch.test.ESTestCase; @@ -42,14 +46,11 @@ import static org.hamcrest.Matchers.hasSize; public class SyncedFlushUnitTests extends ESTestCase { - private static class TestPlan { - public ShardCounts totalCounts; - public Map<String, ShardCounts> countsPerIndex = new HashMap<>(); + public SyncedFlushResponse.ShardCounts totalCounts; + public Map<String, SyncedFlushResponse.ShardCounts> countsPerIndex = new HashMap<>(); public ObjectIntMap<String> expectedFailuresPerIndex = new ObjectIntHashMap<>(); - - public IndicesSyncedFlushResult result; - + public SyncedFlushResponse result; } public void testIndicesSyncedFlushResult() throws IOException { @@ -76,6 +77,56 @@ public class SyncedFlushUnitTests extends ESTestCase { } } + public void testResponseStreaming() throws IOException { + final TestPlan testPlan = createTestPlan(); + assertThat(testPlan.result.totalShards(), equalTo(testPlan.totalCounts.total)); + assertThat(testPlan.result.successfulShards(), equalTo(testPlan.totalCounts.successful)); + assertThat(testPlan.result.failedShards(), equalTo(testPlan.totalCounts.failed)); + assertThat(testPlan.result.restStatus(), equalTo(testPlan.totalCounts.failed > 0 ? RestStatus.CONFLICT : RestStatus.OK)); + BytesStreamOutput out = new BytesStreamOutput(); + testPlan.result.writeTo(out); + StreamInput in = StreamInput.wrap(out.bytes()); + SyncedFlushResponse readResponse = new SyncedFlushResponse(); + readResponse.readFrom(in); + assertThat(readResponse.totalShards(), equalTo(testPlan.totalCounts.total)); + assertThat(readResponse.successfulShards(), equalTo(testPlan.totalCounts.successful)); + assertThat(readResponse.failedShards(), equalTo(testPlan.totalCounts.failed)); + assertThat(readResponse.restStatus(), equalTo(testPlan.totalCounts.failed > 0 ? RestStatus.CONFLICT : RestStatus.OK)); + assertThat(readResponse.shardsResultPerIndex.size(), equalTo(testPlan.result.getShardsResultPerIndex().size())); + for (Map.Entry<String, List<ShardsSyncedFlushResult>> entry : readResponse.getShardsResultPerIndex().entrySet()) { + List<ShardsSyncedFlushResult> originalShardsResults = testPlan.result.getShardsResultPerIndex().get(entry.getKey()); + assertNotNull(originalShardsResults); + List<ShardsSyncedFlushResult> readShardsResults = entry.getValue(); + assertThat(readShardsResults.size(), equalTo(originalShardsResults.size())); + for (int i = 0; i < readShardsResults.size(); i++) { + ShardsSyncedFlushResult originalShardResult = originalShardsResults.get(i); + ShardsSyncedFlushResult readShardResult = readShardsResults.get(i); + assertThat(originalShardResult.failureReason(), equalTo(readShardResult.failureReason())); + assertThat(originalShardResult.failed(), equalTo(readShardResult.failed())); + assertThat(originalShardResult.getShardId(), equalTo(readShardResult.getShardId())); + assertThat(originalShardResult.successfulShards(), equalTo(readShardResult.successfulShards())); + assertThat(originalShardResult.syncId(), equalTo(readShardResult.syncId())); + assertThat(originalShardResult.totalShards(), equalTo(readShardResult.totalShards())); + assertThat(originalShardResult.failedShards().size(), equalTo(readShardResult.failedShards().size())); + for (Map.Entry<ShardRouting, SyncedFlushService.ShardSyncedFlushResponse> shardEntry : originalShardResult.failedShards().entrySet()) { + SyncedFlushService.ShardSyncedFlushResponse readShardResponse = readShardResult.failedShards().get(shardEntry.getKey()); + assertNotNull(readShardResponse); + SyncedFlushService.ShardSyncedFlushResponse originalShardResponse = shardEntry.getValue(); + assertThat(originalShardResponse.failureReason(), equalTo(readShardResponse.failureReason())); + assertThat(originalShardResponse.success(), equalTo(readShardResponse.success())); + } + assertThat(originalShardResult.shardResponses().size(), equalTo(readShardResult.shardResponses().size())); + for (Map.Entry<ShardRouting, SyncedFlushService.ShardSyncedFlushResponse> shardEntry : originalShardResult.shardResponses().entrySet()) { + SyncedFlushService.ShardSyncedFlushResponse readShardResponse = readShardResult.shardResponses().get(shardEntry.getKey()); + assertNotNull(readShardResponse); + SyncedFlushService.ShardSyncedFlushResponse originalShardResponse = shardEntry.getValue(); + assertThat(originalShardResponse.failureReason(), equalTo(readShardResponse.failureReason())); + assertThat(originalShardResponse.success(), equalTo(readShardResponse.success())); + } + } + } + } + private void assertShardCount(String name, Map<String, Object> header, ShardCounts expectedCounts) { assertThat(name + " has unexpected total count", (Integer) header.get("total"), equalTo(expectedCounts.total)); assertThat(name + " has unexpected successful count", (Integer) header.get("successful"), equalTo(expectedCounts.successful)); @@ -105,32 +156,33 @@ public class SyncedFlushUnitTests extends ESTestCase { failures++; shardsResults.add(new ShardsSyncedFlushResult(shardId, replicas + 1, "simulated total failure")); } else { - Map<ShardRouting, SyncedFlushResponse> shardResponses = new HashMap<>(); + Map<ShardRouting, SyncedFlushService.ShardSyncedFlushResponse> shardResponses = new HashMap<>(); for (int copy = 0; copy < replicas + 1; copy++) { final ShardRouting shardRouting = TestShardRouting.newShardRouting(index, shard, "node_" + shardId + "_" + copy, null, - copy == 0, ShardRoutingState.STARTED, 0); + copy == 0, ShardRoutingState.STARTED, 0); if (randomInt(5) < 2) { // shard copy failure failed++; failures++; - shardResponses.put(shardRouting, new SyncedFlushResponse("copy failure " + shardId)); + shardResponses.put(shardRouting, new SyncedFlushService.ShardSyncedFlushResponse("copy failure " + shardId)); } else { successful++; - shardResponses.put(shardRouting, new SyncedFlushResponse()); + shardResponses.put(shardRouting, new SyncedFlushService.ShardSyncedFlushResponse()); } } shardsResults.add(new ShardsSyncedFlushResult(shardId, "_sync_id_" + shard, replicas + 1, shardResponses)); } } indicesResults.put(index, shardsResults); - testPlan.countsPerIndex.put(index, new ShardCounts(shards * (replicas + 1), successful, failed)); + testPlan.countsPerIndex.put(index, new SyncedFlushResponse.ShardCounts(shards * (replicas + 1), successful, failed)); testPlan.expectedFailuresPerIndex.put(index, failures); totalFailed += failed; totalShards += shards * (replicas + 1); totalSuccesful += successful; } - testPlan.result = new IndicesSyncedFlushResult(indicesResults); - testPlan.totalCounts = new ShardCounts(totalShards, totalSuccesful, totalFailed); + testPlan.result = new SyncedFlushResponse(indicesResults); + testPlan.totalCounts = new SyncedFlushResponse.ShardCounts(totalShards, totalSuccesful, totalFailed); return testPlan; } + } diff --git a/core/src/test/java/org/elasticsearch/action/admin/indices/shards/IndicesShardStoreRequestIT.java b/core/src/test/java/org/elasticsearch/action/admin/indices/shards/IndicesShardStoreRequestIT.java index ffb9e630b7..ebd32ccb48 100644 --- a/core/src/test/java/org/elasticsearch/action/admin/indices/shards/IndicesShardStoreRequestIT.java +++ b/core/src/test/java/org/elasticsearch/action/admin/indices/shards/IndicesShardStoreRequestIT.java @@ -87,6 +87,7 @@ public class IndicesShardStoreRequestIT extends ESIntegTestCase { for (ObjectCursor<List<IndicesShardStoresResponse.StoreStatus>> shardStoreStatuses : shardStores.values()) { for (IndicesShardStoresResponse.StoreStatus storeStatus : shardStoreStatuses.value) { assertThat(storeStatus.getVersion(), greaterThan(-1l)); + assertThat(storeStatus.getAllocationId(), notNullValue()); assertThat(storeStatus.getNode(), notNullValue()); assertThat(storeStatus.getStoreException(), nullValue()); } @@ -108,7 +109,7 @@ public class IndicesShardStoreRequestIT extends ESIntegTestCase { assertThat(shardStoresStatuses.size(), equalTo(unassignedShards.size())); for (IntObjectCursor<List<IndicesShardStoresResponse.StoreStatus>> storesStatus : shardStoresStatuses) { assertThat("must report for one store", storesStatus.value.size(), equalTo(1)); - assertThat("reported store should be primary", storesStatus.value.get(0).getAllocation(), equalTo(IndicesShardStoresResponse.StoreStatus.Allocation.PRIMARY)); + assertThat("reported store should be primary", storesStatus.value.get(0).getAllocationStatus(), equalTo(IndicesShardStoresResponse.StoreStatus.AllocationStatus.PRIMARY)); } logger.info("--> enable allocation"); enableAllocation(index); diff --git a/core/src/test/java/org/elasticsearch/action/admin/indices/shards/IndicesShardStoreResponseTests.java b/core/src/test/java/org/elasticsearch/action/admin/indices/shards/IndicesShardStoreResponseTests.java index c862583a3f..6d1159c82a 100644 --- a/core/src/test/java/org/elasticsearch/action/admin/indices/shards/IndicesShardStoreResponseTests.java +++ b/core/src/test/java/org/elasticsearch/action/admin/indices/shards/IndicesShardStoreResponseTests.java @@ -22,24 +22,17 @@ package org.elasticsearch.action.admin.indices.shards; import org.apache.lucene.util.CollectionUtil; import org.elasticsearch.Version; import org.elasticsearch.cluster.node.DiscoveryNode; +import org.elasticsearch.common.Strings; import org.elasticsearch.common.bytes.BytesReference; import org.elasticsearch.common.collect.ImmutableOpenIntMap; import org.elasticsearch.common.collect.ImmutableOpenMap; import org.elasticsearch.common.transport.DummyTransportAddress; -import org.elasticsearch.common.xcontent.ToXContent; -import org.elasticsearch.common.xcontent.XContentBuilder; -import org.elasticsearch.common.xcontent.XContentFactory; -import org.elasticsearch.common.xcontent.XContentParser; -import org.elasticsearch.common.xcontent.XContentType; +import org.elasticsearch.common.xcontent.*; import org.elasticsearch.test.ESTestCase; import org.elasticsearch.transport.NodeDisconnectedException; import java.io.IOException; -import java.util.ArrayList; -import java.util.Collections; -import java.util.HashMap; -import java.util.List; -import java.util.Map; +import java.util.*; import static org.hamcrest.Matchers.equalTo; @@ -52,9 +45,9 @@ public class IndicesShardStoreResponseTests extends ESTestCase { DiscoveryNode node1 = new DiscoveryNode("node1", DummyTransportAddress.INSTANCE, Version.CURRENT); DiscoveryNode node2 = new DiscoveryNode("node2", DummyTransportAddress.INSTANCE, Version.CURRENT); List<IndicesShardStoresResponse.StoreStatus> storeStatusList = new ArrayList<>(); - storeStatusList.add(new IndicesShardStoresResponse.StoreStatus(node1, 3, IndicesShardStoresResponse.StoreStatus.Allocation.PRIMARY, null)); - storeStatusList.add(new IndicesShardStoresResponse.StoreStatus(node2, 2, IndicesShardStoresResponse.StoreStatus.Allocation.REPLICA, null)); - storeStatusList.add(new IndicesShardStoresResponse.StoreStatus(node1, 1, IndicesShardStoresResponse.StoreStatus.Allocation.UNUSED, new IOException("corrupted"))); + storeStatusList.add(new IndicesShardStoresResponse.StoreStatus(node1, 3, null, IndicesShardStoresResponse.StoreStatus.AllocationStatus.PRIMARY, null)); + storeStatusList.add(new IndicesShardStoresResponse.StoreStatus(node2, 2, Strings.randomBase64UUID(), IndicesShardStoresResponse.StoreStatus.AllocationStatus.REPLICA, null)); + storeStatusList.add(new IndicesShardStoresResponse.StoreStatus(node1, 1, Strings.randomBase64UUID(), IndicesShardStoresResponse.StoreStatus.AllocationStatus.UNUSED, new IOException("corrupted"))); storeStatuses.put(0, storeStatusList); storeStatuses.put(1, storeStatusList); ImmutableOpenIntMap<List<IndicesShardStoresResponse.StoreStatus>> storesMap = storeStatuses.build(); @@ -97,8 +90,10 @@ public class IndicesShardStoreResponseTests extends ESTestCase { IndicesShardStoresResponse.StoreStatus storeStatus = storeStatusList.get(i); assertThat(storeInfo.containsKey("version"), equalTo(true)); assertThat(((int) storeInfo.get("version")), equalTo(((int) storeStatus.getVersion()))); + assertThat(storeInfo.containsKey("allocation_id"), equalTo(true)); + assertThat(((String) storeInfo.get("allocation_id")), equalTo((storeStatus.getAllocationId()))); assertThat(storeInfo.containsKey("allocation"), equalTo(true)); - assertThat(((String) storeInfo.get("allocation")), equalTo(storeStatus.getAllocation().value())); + assertThat(((String) storeInfo.get("allocation")), equalTo(storeStatus.getAllocationStatus().value())); assertThat(storeInfo.containsKey(storeStatus.getNode().id()), equalTo(true)); if (storeStatus.getStoreException() != null) { assertThat(storeInfo.containsKey("store_exception"), equalTo(true)); @@ -112,14 +107,14 @@ public class IndicesShardStoreResponseTests extends ESTestCase { public void testStoreStatusOrdering() throws Exception { DiscoveryNode node1 = new DiscoveryNode("node1", DummyTransportAddress.INSTANCE, Version.CURRENT); List<IndicesShardStoresResponse.StoreStatus> orderedStoreStatuses = new ArrayList<>(); - orderedStoreStatuses.add(new IndicesShardStoresResponse.StoreStatus(node1, 2, IndicesShardStoresResponse.StoreStatus.Allocation.PRIMARY, null)); - orderedStoreStatuses.add(new IndicesShardStoresResponse.StoreStatus(node1, 1, IndicesShardStoresResponse.StoreStatus.Allocation.PRIMARY, null)); - orderedStoreStatuses.add(new IndicesShardStoresResponse.StoreStatus(node1, 1, IndicesShardStoresResponse.StoreStatus.Allocation.REPLICA, null)); - orderedStoreStatuses.add(new IndicesShardStoresResponse.StoreStatus(node1, 1, IndicesShardStoresResponse.StoreStatus.Allocation.UNUSED, null)); - orderedStoreStatuses.add(new IndicesShardStoresResponse.StoreStatus(node1, 3, IndicesShardStoresResponse.StoreStatus.Allocation.REPLICA, new IOException("corrupted"))); + orderedStoreStatuses.add(new IndicesShardStoresResponse.StoreStatus(node1, 2, Strings.randomBase64UUID(), IndicesShardStoresResponse.StoreStatus.AllocationStatus.PRIMARY, null)); + orderedStoreStatuses.add(new IndicesShardStoresResponse.StoreStatus(node1, 1, Strings.randomBase64UUID(), IndicesShardStoresResponse.StoreStatus.AllocationStatus.PRIMARY, null)); + orderedStoreStatuses.add(new IndicesShardStoresResponse.StoreStatus(node1, 1, Strings.randomBase64UUID(), IndicesShardStoresResponse.StoreStatus.AllocationStatus.REPLICA, null)); + orderedStoreStatuses.add(new IndicesShardStoresResponse.StoreStatus(node1, 1, Strings.randomBase64UUID(), IndicesShardStoresResponse.StoreStatus.AllocationStatus.UNUSED, null)); + orderedStoreStatuses.add(new IndicesShardStoresResponse.StoreStatus(node1, 3, Strings.randomBase64UUID(), IndicesShardStoresResponse.StoreStatus.AllocationStatus.REPLICA, new IOException("corrupted"))); List<IndicesShardStoresResponse.StoreStatus> storeStatuses = new ArrayList<>(orderedStoreStatuses); - Collections.shuffle(storeStatuses); + Collections.shuffle(storeStatuses, random()); CollectionUtil.timSort(storeStatuses); assertThat(storeStatuses, equalTo(orderedStoreStatuses)); } diff --git a/core/src/test/java/org/elasticsearch/action/admin/indices/upgrade/UpgradeIT.java b/core/src/test/java/org/elasticsearch/action/admin/indices/upgrade/UpgradeIT.java index 143300ecc0..a38a46d0de 100644 --- a/core/src/test/java/org/elasticsearch/action/admin/indices/upgrade/UpgradeIT.java +++ b/core/src/test/java/org/elasticsearch/action/admin/indices/upgrade/UpgradeIT.java @@ -65,7 +65,7 @@ public class UpgradeIT extends ESBackcompatTestCase { public void testUpgrade() throws Exception { // allow the cluster to rebalance quickly - 2 concurrent rebalance are default we can do higher Settings.Builder builder = Settings.builder(); - builder.put(ConcurrentRebalanceAllocationDecider.CLUSTER_ROUTING_ALLOCATION_CLUSTER_CONCURRENT_REBALANCE, 100); + builder.put(ConcurrentRebalanceAllocationDecider.CLUSTER_ROUTING_ALLOCATION_CLUSTER_CONCURRENT_REBALANCE_SETTING.getKey(), 100); client().admin().cluster().prepareUpdateSettings().setPersistentSettings(builder).get(); int numIndexes = randomIntBetween(2, 4); @@ -117,13 +117,13 @@ public class UpgradeIT extends ESBackcompatTestCase { ensureGreen(); // disable allocation entirely until all nodes are upgraded builder = Settings.builder(); - builder.put(EnableAllocationDecider.CLUSTER_ROUTING_ALLOCATION_ENABLE, EnableAllocationDecider.Allocation.NONE); + builder.put(EnableAllocationDecider.CLUSTER_ROUTING_ALLOCATION_ENABLE_SETTING.getKey(), EnableAllocationDecider.Allocation.NONE); client().admin().cluster().prepareUpdateSettings().setTransientSettings(builder).get(); backwardsCluster().upgradeAllNodes(); builder = Settings.builder(); // disable rebalanceing entirely for the time being otherwise we might get relocations / rebalance from nodes with old segments - builder.put(EnableAllocationDecider.CLUSTER_ROUTING_REBALANCE_ENABLE, EnableAllocationDecider.Rebalance.NONE); - builder.put(EnableAllocationDecider.CLUSTER_ROUTING_ALLOCATION_ENABLE, EnableAllocationDecider.Allocation.ALL); + builder.put(EnableAllocationDecider.CLUSTER_ROUTING_REBALANCE_ENABLE_SETTING.getKey(), EnableAllocationDecider.Rebalance.NONE); + builder.put(EnableAllocationDecider.CLUSTER_ROUTING_ALLOCATION_ENABLE_SETTING.getKey(), EnableAllocationDecider.Allocation.ALL); client().admin().cluster().prepareUpdateSettings().setTransientSettings(builder).get(); ensureGreen(); logger.info("--> Nodes upgrade complete"); diff --git a/core/src/test/java/org/elasticsearch/action/bulk/BulkProcessorRetryIT.java b/core/src/test/java/org/elasticsearch/action/bulk/BulkProcessorRetryIT.java new file mode 100644 index 0000000000..3c38e2ef0f --- /dev/null +++ b/core/src/test/java/org/elasticsearch/action/bulk/BulkProcessorRetryIT.java @@ -0,0 +1,164 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ +package org.elasticsearch.action.bulk; + +import org.elasticsearch.ExceptionsHelper; +import org.elasticsearch.action.admin.indices.refresh.RefreshRequest; +import org.elasticsearch.action.search.SearchResponse; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.common.util.concurrent.EsRejectedExecutionException; +import org.elasticsearch.index.query.QueryBuilders; +import org.elasticsearch.test.ESIntegTestCase; +import org.hamcrest.Matcher; + +import java.util.Collections; +import java.util.Set; +import java.util.concurrent.ConcurrentHashMap; +import java.util.concurrent.CountDownLatch; +import java.util.concurrent.TimeUnit; + +import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertAcked; +import static org.hamcrest.Matchers.equalTo; +import static org.hamcrest.Matchers.lessThanOrEqualTo; + +@ESIntegTestCase.ClusterScope(scope = ESIntegTestCase.Scope.SUITE, numDataNodes = 2) +public class BulkProcessorRetryIT extends ESIntegTestCase { + private static final String INDEX_NAME = "test"; + private static final String TYPE_NAME = "type"; + + @Override + protected Settings nodeSettings(int nodeOrdinal) { + //Have very low pool and queue sizes to overwhelm internal pools easily + return Settings.builder() + .put(super.nodeSettings(nodeOrdinal)) + .put("threadpool.generic.size", 1) + .put("threadpool.generic.queue_size", 1) + // don't mess with this one! It's quite sensitive to a low queue size + // (see also ThreadedActionListener which is happily spawning threads even when we already got rejected) + //.put("threadpool.listener.queue_size", 1) + .put("threadpool.get.queue_size", 1) + // default is 50 + .put("threadpool.bulk.queue_size", 20) + .build(); + } + + + public void testBulkRejectionLoadWithoutBackoff() throws Throwable { + boolean rejectedExecutionExpected = true; + executeBulkRejectionLoad(BackoffPolicy.noBackoff(), rejectedExecutionExpected); + } + + public void testBulkRejectionLoadWithBackoff() throws Throwable { + boolean rejectedExecutionExpected = false; + executeBulkRejectionLoad(BackoffPolicy.exponentialBackoff(), rejectedExecutionExpected); + } + + private void executeBulkRejectionLoad(BackoffPolicy backoffPolicy, boolean rejectedExecutionExpected) throws Throwable { + int numberOfAsyncOps = randomIntBetween(600, 700); + final CountDownLatch latch = new CountDownLatch(numberOfAsyncOps); + final Set<Object> responses = Collections.newSetFromMap(new ConcurrentHashMap<>()); + + assertAcked(prepareCreate(INDEX_NAME)); + ensureGreen(); + + BulkProcessor bulkProcessor = BulkProcessor.builder(client(), new BulkProcessor.Listener() { + @Override + public void beforeBulk(long executionId, BulkRequest request) { + // no op + } + + @Override + public void afterBulk(long executionId, BulkRequest request, BulkResponse response) { + responses.add(response); + latch.countDown(); + } + + @Override + public void afterBulk(long executionId, BulkRequest request, Throwable failure) { + responses.add(failure); + latch.countDown(); + } + }).setBulkActions(1) + // zero means that we're in the sync case, more means that we're in the async case + .setConcurrentRequests(randomIntBetween(0, 100)) + .setBackoffPolicy(backoffPolicy) + .build(); + indexDocs(bulkProcessor, numberOfAsyncOps); + latch.await(10, TimeUnit.SECONDS); + bulkProcessor.close(); + + assertThat(responses.size(), equalTo(numberOfAsyncOps)); + + // validate all responses + for (Object response : responses) { + if (response instanceof BulkResponse) { + BulkResponse bulkResponse = (BulkResponse) response; + for (BulkItemResponse bulkItemResponse : bulkResponse.getItems()) { + if (bulkItemResponse.isFailed()) { + BulkItemResponse.Failure failure = bulkItemResponse.getFailure(); + Throwable rootCause = ExceptionsHelper.unwrapCause(failure.getCause()); + if (rootCause instanceof EsRejectedExecutionException) { + if (rejectedExecutionExpected == false) { + // we're not expecting that we overwhelmed it even once + throw new AssertionError("Unexpected failure reason", rootCause); + } + } else { + throw new AssertionError("Unexpected failure", rootCause); + } + } + } + } else { + Throwable t = (Throwable) response; + // we're not expecting any other errors + throw new AssertionError("Unexpected failure", t); + } + } + + client().admin().indices().refresh(new RefreshRequest()).get(); + + // validate we did not create any duplicates due to retries + Matcher<Long> searchResultCount; + if (rejectedExecutionExpected) { + // it is ok if we lost some index operations to rejected executions + searchResultCount = lessThanOrEqualTo((long) numberOfAsyncOps); + } else { + searchResultCount = equalTo((long) numberOfAsyncOps); + } + + SearchResponse results = client() + .prepareSearch(INDEX_NAME) + .setTypes(TYPE_NAME) + .setQuery(QueryBuilders.matchAllQuery()) + .setSize(0) + .get(); + assertThat(results.getHits().totalHits(), searchResultCount); + } + + private static void indexDocs(BulkProcessor processor, int numDocs) { + for (int i = 1; i <= numDocs; i++) { + processor.add(client() + .prepareIndex() + .setIndex(INDEX_NAME) + .setType(TYPE_NAME) + .setId(Integer.toString(i)) + .setSource("field", randomRealisticUnicodeOfLengthBetween(1, 30)) + .request()); + } + } +} diff --git a/core/src/test/java/org/elasticsearch/action/bulk/RetryTests.java b/core/src/test/java/org/elasticsearch/action/bulk/RetryTests.java new file mode 100644 index 0000000000..4d73f932d2 --- /dev/null +++ b/core/src/test/java/org/elasticsearch/action/bulk/RetryTests.java @@ -0,0 +1,220 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ +package org.elasticsearch.action.bulk; + +import org.elasticsearch.action.ActionFuture; +import org.elasticsearch.action.ActionListener; +import org.elasticsearch.action.delete.DeleteResponse; +import org.elasticsearch.action.support.PlainActionFuture; +import org.elasticsearch.action.update.UpdateRequest; +import org.elasticsearch.common.unit.TimeValue; +import org.elasticsearch.common.util.concurrent.EsRejectedExecutionException; +import org.elasticsearch.rest.NoOpClient; +import org.elasticsearch.test.ESTestCase; +import org.junit.After; +import org.junit.Before; + +import java.util.concurrent.CountDownLatch; +import java.util.concurrent.atomic.AtomicInteger; + +import static org.hamcrest.Matchers.*; + +public class RetryTests extends ESTestCase { + // no need to wait fof a long time in tests + private static final TimeValue DELAY = TimeValue.timeValueMillis(1L); + private static final int CALLS_TO_FAIL = 5; + + private MockBulkClient bulkClient; + + @Override + @Before + public void setUp() throws Exception { + super.setUp(); + this.bulkClient = new MockBulkClient(getTestName(), CALLS_TO_FAIL); + } + + @Override + @After + public void tearDown() throws Exception { + super.tearDown(); + this.bulkClient.close(); + } + + private BulkRequest createBulkRequest() { + BulkRequest request = new BulkRequest(); + request.add(new UpdateRequest("shop", "products", "1")); + request.add(new UpdateRequest("shop", "products", "2")); + request.add(new UpdateRequest("shop", "products", "3")); + request.add(new UpdateRequest("shop", "products", "4")); + request.add(new UpdateRequest("shop", "products", "5")); + return request; + } + + public void testSyncRetryBacksOff() throws Exception { + BackoffPolicy backoff = BackoffPolicy.constantBackoff(DELAY, CALLS_TO_FAIL); + + BulkRequest bulkRequest = createBulkRequest(); + BulkResponse response = Retry + .on(EsRejectedExecutionException.class) + .policy(backoff) + .withSyncBackoff(bulkClient, bulkRequest); + + assertFalse(response.hasFailures()); + assertThat(response.getItems().length, equalTo(bulkRequest.numberOfActions())); + } + + public void testSyncRetryFailsAfterBackoff() throws Exception { + BackoffPolicy backoff = BackoffPolicy.constantBackoff(DELAY, CALLS_TO_FAIL - 1); + + BulkRequest bulkRequest = createBulkRequest(); + BulkResponse response = Retry + .on(EsRejectedExecutionException.class) + .policy(backoff) + .withSyncBackoff(bulkClient, bulkRequest); + + assertTrue(response.hasFailures()); + assertThat(response.getItems().length, equalTo(bulkRequest.numberOfActions())); + } + + public void testAsyncRetryBacksOff() throws Exception { + BackoffPolicy backoff = BackoffPolicy.constantBackoff(DELAY, CALLS_TO_FAIL); + AssertingListener listener = new AssertingListener(); + + BulkRequest bulkRequest = createBulkRequest(); + Retry.on(EsRejectedExecutionException.class) + .policy(backoff) + .withAsyncBackoff(bulkClient, bulkRequest, listener); + + listener.awaitCallbacksCalled(); + listener.assertOnResponseCalled(); + listener.assertResponseWithoutFailures(); + listener.assertResponseWithNumberOfItems(bulkRequest.numberOfActions()); + listener.assertOnFailureNeverCalled(); + } + + public void testAsyncRetryFailsAfterBacksOff() throws Exception { + BackoffPolicy backoff = BackoffPolicy.constantBackoff(DELAY, CALLS_TO_FAIL - 1); + AssertingListener listener = new AssertingListener(); + + BulkRequest bulkRequest = createBulkRequest(); + Retry.on(EsRejectedExecutionException.class) + .policy(backoff) + .withAsyncBackoff(bulkClient, bulkRequest, listener); + + listener.awaitCallbacksCalled(); + + listener.assertOnResponseCalled(); + listener.assertResponseWithFailures(); + listener.assertResponseWithNumberOfItems(bulkRequest.numberOfActions()); + listener.assertOnFailureNeverCalled(); + } + + private static class AssertingListener implements ActionListener<BulkResponse> { + private final CountDownLatch latch; + private final AtomicInteger countOnResponseCalled = new AtomicInteger(); + private volatile Throwable lastFailure; + private volatile BulkResponse response; + + private AssertingListener() { + latch = new CountDownLatch(1); + } + + public void awaitCallbacksCalled() throws InterruptedException { + latch.await(); + } + + @Override + public void onResponse(BulkResponse bulkItemResponses) { + this.response = bulkItemResponses; + countOnResponseCalled.incrementAndGet(); + latch.countDown(); + } + + @Override + public void onFailure(Throwable e) { + this.lastFailure = e; + latch.countDown(); + } + + public void assertOnResponseCalled() { + assertThat(countOnResponseCalled.get(), equalTo(1)); + } + + public void assertResponseWithNumberOfItems(int numItems) { + assertThat(response.getItems().length, equalTo(numItems)); + } + + public void assertResponseWithoutFailures() { + assertThat(response, notNullValue()); + assertFalse("Response should not have failures", response.hasFailures()); + } + + public void assertResponseWithFailures() { + assertThat(response, notNullValue()); + assertTrue("Response should have failures", response.hasFailures()); + } + + public void assertOnFailureNeverCalled() { + assertThat(lastFailure, nullValue()); + } + } + + private static class MockBulkClient extends NoOpClient { + private int numberOfCallsToFail; + + private MockBulkClient(String testName, int numberOfCallsToFail) { + super(testName); + this.numberOfCallsToFail = numberOfCallsToFail; + } + + @Override + public ActionFuture<BulkResponse> bulk(BulkRequest request) { + PlainActionFuture<BulkResponse> responseFuture = new PlainActionFuture<>(); + bulk(request, responseFuture); + return responseFuture; + } + + @Override + public void bulk(BulkRequest request, ActionListener<BulkResponse> listener) { + // do everything synchronously, that's fine for a test + boolean shouldFail = numberOfCallsToFail > 0; + numberOfCallsToFail--; + + BulkItemResponse[] itemResponses = new BulkItemResponse[request.requests().size()]; + // if we have to fail, we need to fail at least once "reliably", the rest can be random + int itemToFail = randomInt(request.requests().size() - 1); + for (int idx = 0; idx < request.requests().size(); idx++) { + if (shouldFail && (randomBoolean() || idx == itemToFail)) { + itemResponses[idx] = failedResponse(); + } else { + itemResponses[idx] = successfulResponse(); + } + } + listener.onResponse(new BulkResponse(itemResponses, 1000L)); + } + + private BulkItemResponse successfulResponse() { + return new BulkItemResponse(1, "update", new DeleteResponse()); + } + + private BulkItemResponse failedResponse() { + return new BulkItemResponse(1, "update", new BulkItemResponse.Failure("test", "test", "1", new EsRejectedExecutionException("pool full"))); + } + } +} diff --git a/core/src/test/java/org/elasticsearch/action/support/master/IndexingMasterFailoverIT.java b/core/src/test/java/org/elasticsearch/action/support/master/IndexingMasterFailoverIT.java index 825e3e4089..b0c13f851a 100644 --- a/core/src/test/java/org/elasticsearch/action/support/master/IndexingMasterFailoverIT.java +++ b/core/src/test/java/org/elasticsearch/action/support/master/IndexingMasterFailoverIT.java @@ -1,5 +1,24 @@ package org.elasticsearch.action.support.master; +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + import org.elasticsearch.action.index.IndexResponse; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.unit.TimeValue; @@ -45,8 +64,8 @@ public class IndexingMasterFailoverIT extends ESIntegTestCase { .put(FaultDetection.SETTING_PING_TIMEOUT, "1s") // for hitting simulated network failures quickly .put(FaultDetection.SETTING_PING_RETRIES, "1") // for hitting simulated network failures quickly .put("discovery.zen.join_timeout", "10s") // still long to induce failures but to long so test won't time out - .put(DiscoverySettings.PUBLISH_TIMEOUT, "1s") // <-- for hitting simulated network failures quickly - .put(ElectMasterService.DISCOVERY_ZEN_MINIMUM_MASTER_NODES, 2) + .put(DiscoverySettings.PUBLISH_TIMEOUT_SETTING.getKey(), "1s") // <-- for hitting simulated network failures quickly + .put(ElectMasterService.DISCOVERY_ZEN_MINIMUM_MASTER_NODES_SETTING.getKey(), 2) .build(); internalCluster().startMasterOnlyNodesAsync(3, sharedSettings).get(); diff --git a/core/src/test/java/org/elasticsearch/aliases/IndexAliasesIT.java b/core/src/test/java/org/elasticsearch/aliases/IndexAliasesIT.java index 3a3876b60b..1cbe05da6a 100644 --- a/core/src/test/java/org/elasticsearch/aliases/IndexAliasesIT.java +++ b/core/src/test/java/org/elasticsearch/aliases/IndexAliasesIT.java @@ -865,7 +865,7 @@ public class IndexAliasesIT extends ESIntegTestCase { assertAcked(prepareCreate("test") .addMapping("type", "field", "type=string") .addAlias(new Alias("alias1")) - .addAlias(new Alias("alias2").filter(QueryBuilders.missingQuery("field"))) + .addAlias(new Alias("alias2").filter(QueryBuilders.boolQuery().mustNot(QueryBuilders.existsQuery("field")))) .addAlias(new Alias("alias3").indexRouting("index").searchRouting("search"))); checkAliases(); diff --git a/core/src/test/java/org/elasticsearch/bwcompat/BasicBackwardsCompatibilityIT.java b/core/src/test/java/org/elasticsearch/bwcompat/BasicBackwardsCompatibilityIT.java index fbc6cc5806..95735b8648 100644 --- a/core/src/test/java/org/elasticsearch/bwcompat/BasicBackwardsCompatibilityIT.java +++ b/core/src/test/java/org/elasticsearch/bwcompat/BasicBackwardsCompatibilityIT.java @@ -71,7 +71,6 @@ import java.util.concurrent.ExecutionException; import static org.elasticsearch.common.xcontent.XContentFactory.jsonBuilder; import static org.elasticsearch.index.query.QueryBuilders.constantScoreQuery; import static org.elasticsearch.index.query.QueryBuilders.existsQuery; -import static org.elasticsearch.index.query.QueryBuilders.missingQuery; import static org.elasticsearch.index.query.QueryBuilders.queryStringQuery; import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertAcked; import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertHitCount; @@ -440,25 +439,9 @@ public class BasicBackwardsCompatibilityIT extends ESBackcompatTestCase { countResponse = client().prepareSearch().setSize(0).setQuery(existsQuery("obj1")).get(); assertHitCount(countResponse, 2l); - countResponse = client().prepareSearch().setSize(0).setQuery(missingQuery("field1")).get(); - assertHitCount(countResponse, 2l); - - countResponse = client().prepareSearch().setSize(0).setQuery(missingQuery("field1")).get(); - assertHitCount(countResponse, 2l); - - countResponse = client().prepareSearch().setSize(0).setQuery(constantScoreQuery(missingQuery("field1"))).get(); - assertHitCount(countResponse, 2l); - countResponse = client().prepareSearch().setSize(0).setQuery(queryStringQuery("_missing_:field1")).get(); assertHitCount(countResponse, 2l); - // wildcard check - countResponse = client().prepareSearch().setSize(0).setQuery(missingQuery("x*")).get(); - assertHitCount(countResponse, 2l); - - // object check - countResponse = client().prepareSearch().setSize(0).setQuery(missingQuery("obj1")).get(); - assertHitCount(countResponse, 2l); if (!backwardsCluster().upgradeOneNode()) { break; } diff --git a/core/src/test/java/org/elasticsearch/bwcompat/OldIndexBackwardsCompatibilityIT.java b/core/src/test/java/org/elasticsearch/bwcompat/OldIndexBackwardsCompatibilityIT.java index 137c6c5b2c..bf71c0c246 100644 --- a/core/src/test/java/org/elasticsearch/bwcompat/OldIndexBackwardsCompatibilityIT.java +++ b/core/src/test/java/org/elasticsearch/bwcompat/OldIndexBackwardsCompatibilityIT.java @@ -32,8 +32,6 @@ import org.elasticsearch.action.admin.indices.upgrade.UpgradeIT; import org.elasticsearch.action.get.GetResponse; import org.elasticsearch.action.search.SearchRequestBuilder; import org.elasticsearch.action.search.SearchResponse; -import org.elasticsearch.cluster.ClusterState; -import org.elasticsearch.cluster.metadata.IndexMetaData; import org.elasticsearch.common.io.FileSystemUtils; import org.elasticsearch.common.logging.ESLogger; import org.elasticsearch.common.settings.Settings; @@ -62,23 +60,12 @@ import org.junit.Before; import java.io.IOException; import java.io.InputStream; -import java.nio.file.DirectoryStream; -import java.nio.file.FileVisitResult; -import java.nio.file.Files; -import java.nio.file.Path; -import java.nio.file.SimpleFileVisitor; +import java.nio.file.*; import java.nio.file.attribute.BasicFileAttributes; -import java.util.ArrayList; -import java.util.Collections; -import java.util.List; -import java.util.Locale; -import java.util.Map; -import java.util.SortedSet; -import java.util.TreeSet; +import java.util.*; import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertAcked; import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertNoFailures; -import static org.hamcrest.CoreMatchers.containsString; import static org.hamcrest.Matchers.greaterThanOrEqualTo; // needs at least 2 nodes since it bumps replicas to 1 @@ -120,7 +107,7 @@ public class OldIndexBackwardsCompatibilityIT extends ESIntegTestCase { public Settings nodeSettings(int ord) { return Settings.builder() .put(MergePolicyConfig.INDEX_MERGE_ENABLED, false) // disable merging so no segments will be upgraded - .put(RecoverySettings.INDICES_RECOVERY_CONCURRENT_SMALL_FILE_STREAMS, 30) // increase recovery speed for small files + .put(RecoverySettings.INDICES_RECOVERY_CONCURRENT_SMALL_FILE_STREAMS_SETTING.getKey(), 30) // increase recovery speed for small files .build(); } @@ -271,7 +258,7 @@ public class OldIndexBackwardsCompatibilityIT extends ESIntegTestCase { public void testOldIndexes() throws Exception { setupCluster(); - Collections.shuffle(indexes, getRandom()); + Collections.shuffle(indexes, random()); for (String index : indexes) { long startTime = System.currentTimeMillis(); logger.info("--> Testing old index " + index); @@ -341,13 +328,6 @@ public class OldIndexBackwardsCompatibilityIT extends ESIntegTestCase { searchRsp = searchReq.get(); ElasticsearchAssertions.assertNoFailures(searchRsp); assertEquals(numDocs, searchRsp.getHits().getTotalHits()); - - logger.info("--> testing missing filter"); - // the field for the missing filter here needs to be different than the exists filter above, to avoid being found in the cache - searchReq = client().prepareSearch(indexName).setQuery(QueryBuilders.missingQuery("long_sort")); - searchRsp = searchReq.get(); - ElasticsearchAssertions.assertNoFailures(searchRsp); - assertEquals(0, searchRsp.getHits().getTotalHits()); } void assertBasicAggregationWorks(String indexName) { diff --git a/core/src/test/java/org/elasticsearch/bwcompat/RestoreBackwardsCompatIT.java b/core/src/test/java/org/elasticsearch/bwcompat/RestoreBackwardsCompatIT.java index 228f1a6512..6ad05b3ff8 100644 --- a/core/src/test/java/org/elasticsearch/bwcompat/RestoreBackwardsCompatIT.java +++ b/core/src/test/java/org/elasticsearch/bwcompat/RestoreBackwardsCompatIT.java @@ -181,7 +181,7 @@ public class RestoreBackwardsCompatIT extends AbstractSnapshotIntegTestCase { logger.info("--> check settings"); ClusterState clusterState = client().admin().cluster().prepareState().get().getState(); - assertThat(clusterState.metaData().persistentSettings().get(FilterAllocationDecider.CLUSTER_ROUTING_EXCLUDE_GROUP + "version_attr"), equalTo(version)); + assertThat(clusterState.metaData().persistentSettings().get(FilterAllocationDecider.CLUSTER_ROUTING_EXCLUDE_GROUP_SETTING.getKey() + "version_attr"), equalTo(version)); logger.info("--> check templates"); IndexTemplateMetaData template = clusterState.getMetaData().templates().get("template_" + version.toLowerCase(Locale.ROOT)); diff --git a/core/src/test/java/org/elasticsearch/client/transport/TransportClientHeadersTests.java b/core/src/test/java/org/elasticsearch/client/transport/TransportClientHeadersTests.java index f4b29768b9..f452bb5c36 100644 --- a/core/src/test/java/org/elasticsearch/client/transport/TransportClientHeadersTests.java +++ b/core/src/test/java/org/elasticsearch/client/transport/TransportClientHeadersTests.java @@ -32,6 +32,7 @@ import org.elasticsearch.cluster.ClusterState; import org.elasticsearch.cluster.node.DiscoveryNode; import org.elasticsearch.cluster.node.DiscoveryNodes; import org.elasticsearch.common.inject.Inject; +import org.elasticsearch.common.network.NetworkModule; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.transport.LocalTransportAddress; import org.elasticsearch.common.transport.TransportAddress; @@ -40,7 +41,6 @@ import org.elasticsearch.threadpool.ThreadPool; import org.elasticsearch.transport.ConnectTransportException; import org.elasticsearch.transport.Transport; import org.elasticsearch.transport.TransportException; -import org.elasticsearch.transport.TransportModule; import org.elasticsearch.transport.TransportRequest; import org.elasticsearch.transport.TransportRequestOptions; import org.elasticsearch.transport.TransportResponse; @@ -114,12 +114,12 @@ public class TransportClientHeadersTests extends AbstractClientHeadersTestCase { public String description() { return "a mock transport service"; } - public void onModule(TransportModule transportModule) { - transportModule.addTransportService("internal", InternalTransportService.class); + public void onModule(NetworkModule transportModule) { + transportModule.registerTransportService("internal", InternalTransportService.class); } @Override public Settings additionalSettings() { - return Settings.builder().put(TransportModule.TRANSPORT_SERVICE_TYPE_KEY, "internal").build(); + return Settings.builder().put(NetworkModule.TRANSPORT_SERVICE_TYPE_KEY, "internal").build(); } } diff --git a/core/src/test/java/org/elasticsearch/cluster/ClusterInfoServiceIT.java b/core/src/test/java/org/elasticsearch/cluster/ClusterInfoServiceIT.java index 5ed45620a0..8aa065548d 100644 --- a/core/src/test/java/org/elasticsearch/cluster/ClusterInfoServiceIT.java +++ b/core/src/test/java/org/elasticsearch/cluster/ClusterInfoServiceIT.java @@ -39,6 +39,7 @@ import org.elasticsearch.common.Strings; import org.elasticsearch.common.collect.ImmutableOpenMap; import org.elasticsearch.common.inject.Inject; import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.common.unit.TimeValue; import org.elasticsearch.index.IndexService; import org.elasticsearch.index.shard.IndexShard; import org.elasticsearch.index.store.Store; @@ -126,7 +127,7 @@ public class ClusterInfoServiceIT extends ESIntegTestCase { protected Settings nodeSettings(int nodeOrdinal) { return Settings.builder() // manual collection or upon cluster forming. - .put(InternalClusterInfoService.INTERNAL_CLUSTER_INFO_TIMEOUT, "1s") + .put(InternalClusterInfoService.INTERNAL_CLUSTER_INFO_TIMEOUT_SETTING.getKey(), "1s") .build(); } @@ -137,9 +138,7 @@ public class ClusterInfoServiceIT extends ESIntegTestCase { } public void testClusterInfoServiceCollectsInformation() throws Exception { - internalCluster().startNodesAsync(2, - Settings.builder().put(InternalClusterInfoService.INTERNAL_CLUSTER_INFO_UPDATE_INTERVAL, "200ms").build()) - .get(); + internalCluster().startNodesAsync(2).get(); assertAcked(prepareCreate("test").setSettings(settingsBuilder() .put(Store.INDEX_STORE_STATS_REFRESH_INTERVAL, 0) .put(EnableAllocationDecider.INDEX_ROUTING_REBALANCE_ENABLE, EnableAllocationDecider.Rebalance.NONE).build())); @@ -147,6 +146,8 @@ public class ClusterInfoServiceIT extends ESIntegTestCase { InternalTestCluster internalTestCluster = internalCluster(); // Get the cluster info service on the master node final InternalClusterInfoService infoService = (InternalClusterInfoService) internalTestCluster.getInstance(ClusterInfoService.class, internalTestCluster.getMasterName()); + infoService.setUpdateFrequency(TimeValue.timeValueMillis(200)); + infoService.onMaster(); ClusterInfo info = infoService.refresh(); assertNotNull("info should not be null", info); ImmutableOpenMap<String, DiskUsage> leastUsages = info.getNodeLeastAvailableDiskUsages(); @@ -188,7 +189,7 @@ public class ClusterInfoServiceIT extends ESIntegTestCase { public void testClusterInfoServiceInformationClearOnError() throws InterruptedException, ExecutionException { internalCluster().startNodesAsync(2, // manually control publishing - Settings.builder().put(InternalClusterInfoService.INTERNAL_CLUSTER_INFO_UPDATE_INTERVAL, "60m").build()) + Settings.builder().put(InternalClusterInfoService.INTERNAL_CLUSTER_INFO_UPDATE_INTERVAL_SETTING.getKey(), "60m").build()) .get(); prepareCreate("test").setSettings(IndexMetaData.SETTING_NUMBER_OF_REPLICAS, 1).get(); ensureGreen("test"); diff --git a/core/src/test/java/org/elasticsearch/cluster/ClusterModuleTests.java b/core/src/test/java/org/elasticsearch/cluster/ClusterModuleTests.java index 4ca0fffbdf..48b2591559 100644 --- a/core/src/test/java/org/elasticsearch/cluster/ClusterModuleTests.java +++ b/core/src/test/java/org/elasticsearch/cluster/ClusterModuleTests.java @@ -31,11 +31,10 @@ import org.elasticsearch.cluster.routing.allocation.allocator.BalancedShardsAllo import org.elasticsearch.cluster.routing.allocation.allocator.ShardsAllocator; import org.elasticsearch.cluster.routing.allocation.decider.AllocationDecider; import org.elasticsearch.cluster.routing.allocation.decider.EnableAllocationDecider; -import org.elasticsearch.cluster.settings.ClusterDynamicSettings; import org.elasticsearch.cluster.settings.DynamicSettings; import org.elasticsearch.cluster.settings.Validator; import org.elasticsearch.common.inject.ModuleTestCase; -import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.common.settings.*; import org.elasticsearch.index.settings.IndexDynamicSettings; public class ClusterModuleTests extends ModuleTestCase { @@ -73,18 +72,20 @@ public class ClusterModuleTests extends ModuleTestCase { } public void testRegisterClusterDynamicSettingDuplicate() { - ClusterModule module = new ClusterModule(Settings.EMPTY); + final SettingsFilter settingsFilter = new SettingsFilter(Settings.EMPTY); + SettingsModule module = new SettingsModule(Settings.EMPTY, settingsFilter); try { - module.registerClusterDynamicSetting(EnableAllocationDecider.CLUSTER_ROUTING_ALLOCATION_ENABLE, Validator.EMPTY); + module.registerSetting(EnableAllocationDecider.CLUSTER_ROUTING_ALLOCATION_ENABLE_SETTING); } catch (IllegalArgumentException e) { - assertEquals(e.getMessage(), "Cannot register setting [" + EnableAllocationDecider.CLUSTER_ROUTING_ALLOCATION_ENABLE + "] twice"); + assertEquals(e.getMessage(), "Cannot register setting [" + EnableAllocationDecider.CLUSTER_ROUTING_ALLOCATION_ENABLE_SETTING.getKey() + "] twice"); } } public void testRegisterClusterDynamicSetting() { - ClusterModule module = new ClusterModule(Settings.EMPTY); - module.registerClusterDynamicSetting("foo.bar", Validator.EMPTY); - assertInstanceBindingWithAnnotation(module, DynamicSettings.class, dynamicSettings -> dynamicSettings.hasDynamicSetting("foo.bar"), ClusterDynamicSettings.class); + final SettingsFilter settingsFilter = new SettingsFilter(Settings.EMPTY); + SettingsModule module = new SettingsModule(Settings.EMPTY, settingsFilter); + module.registerSetting(Setting.boolSetting("foo.bar", false, true, Setting.Scope.CLUSTER)); + assertInstanceBinding(module, ClusterSettings.class, service -> service.hasDynamicSetting("foo.bar")); } public void testRegisterIndexDynamicSettingDuplicate() { diff --git a/core/src/test/java/org/elasticsearch/cluster/ClusterServiceIT.java b/core/src/test/java/org/elasticsearch/cluster/ClusterServiceIT.java index 9e842a3872..9d453ead4d 100644 --- a/core/src/test/java/org/elasticsearch/cluster/ClusterServiceIT.java +++ b/core/src/test/java/org/elasticsearch/cluster/ClusterServiceIT.java @@ -43,17 +43,29 @@ import org.elasticsearch.test.MockLogAppender; import org.elasticsearch.test.junit.annotations.TestLogging; import org.elasticsearch.threadpool.ThreadPool; -import java.util.*; +import java.util.ArrayList; +import java.util.Arrays; +import java.util.Collection; +import java.util.HashMap; +import java.util.HashSet; +import java.util.List; +import java.util.Map; +import java.util.Set; import java.util.concurrent.ConcurrentHashMap; import java.util.concurrent.ConcurrentMap; import java.util.concurrent.CountDownLatch; +import java.util.concurrent.Semaphore; import java.util.concurrent.TimeUnit; import java.util.concurrent.atomic.AtomicBoolean; import java.util.concurrent.atomic.AtomicInteger; import static org.elasticsearch.common.settings.Settings.settingsBuilder; import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertAcked; -import static org.hamcrest.Matchers.*; +import static org.hamcrest.Matchers.equalTo; +import static org.hamcrest.Matchers.greaterThan; +import static org.hamcrest.Matchers.greaterThanOrEqualTo; +import static org.hamcrest.Matchers.is; +import static org.hamcrest.Matchers.notNullValue; /** * @@ -751,23 +763,40 @@ public class ClusterServiceIT extends ESIntegTestCase { } } + int numberOfThreads = randomIntBetween(2, 8); + int tasksSubmittedPerThread = randomIntBetween(1, 1024); + int numberOfExecutors = Math.max(1, numberOfThreads / 4); + final Semaphore semaphore = new Semaphore(numberOfExecutors); + class TaskExecutor implements ClusterStateTaskExecutor<Task> { private AtomicInteger counter = new AtomicInteger(); + private AtomicInteger batches = new AtomicInteger(); + private AtomicInteger published = new AtomicInteger(); @Override public BatchResult<Task> execute(ClusterState currentState, List<Task> tasks) throws Exception { tasks.forEach(task -> task.execute()); counter.addAndGet(tasks.size()); - return BatchResult.<Task>builder().successes(tasks).build(currentState); + ClusterState maybeUpdatedClusterState = currentState; + if (randomBoolean()) { + maybeUpdatedClusterState = ClusterState.builder(currentState).build(); + batches.incrementAndGet(); + semaphore.acquire(); + } + return BatchResult.<Task>builder().successes(tasks).build(maybeUpdatedClusterState); } @Override public boolean runOnlyOnMaster() { return false; } + + @Override + public void clusterStatePublished(ClusterState newClusterState) { + published.incrementAndGet(); + semaphore.release(); + } } - int numberOfThreads = randomIntBetween(2, 8); - int tasksSubmittedPerThread = randomIntBetween(1, 1024); ConcurrentMap<String, AtomicInteger> counters = new ConcurrentHashMap<>(); CountDownLatch updateLatch = new CountDownLatch(numberOfThreads * tasksSubmittedPerThread); @@ -784,7 +813,6 @@ public class ClusterServiceIT extends ESIntegTestCase { } }; - int numberOfExecutors = Math.max(1, numberOfThreads / 4); List<TaskExecutor> executors = new ArrayList<>(); for (int i = 0; i < numberOfExecutors; i++) { executors.add(new TaskExecutor()); @@ -830,6 +858,8 @@ public class ClusterServiceIT extends ESIntegTestCase { // wait until all the cluster state updates have been processed updateLatch.await(); + // and until all of the publication callbacks have completed + semaphore.acquire(numberOfExecutors); // assert the number of executed tasks is correct assertEquals(numberOfThreads * tasksSubmittedPerThread, counter.get()); @@ -838,6 +868,7 @@ public class ClusterServiceIT extends ESIntegTestCase { for (TaskExecutor executor : executors) { if (counts.containsKey(executor)) { assertEquals((int) counts.get(executor), executor.counter.get()); + assertEquals(executor.batches.get(), executor.published.get()); } } @@ -940,7 +971,7 @@ public class ClusterServiceIT extends ESIntegTestCase { public void testLongClusterStateUpdateLogging() throws Exception { Settings settings = settingsBuilder() .put("discovery.type", "local") - .put(InternalClusterService.SETTING_CLUSTER_SERVICE_SLOW_TASK_LOGGING_THRESHOLD, "10s") + .put(InternalClusterService.CLUSTER_SERVICE_SLOW_TASK_LOGGING_THRESHOLD_SETTING.getKey(), "10s") .build(); internalCluster().startNode(settings); ClusterService clusterService1 = internalCluster().getInstance(ClusterService.class); @@ -976,7 +1007,7 @@ public class ClusterServiceIT extends ESIntegTestCase { processedFirstTask.await(1, TimeUnit.SECONDS); assertAcked(client().admin().cluster().prepareUpdateSettings().setTransientSettings(settingsBuilder() - .put(InternalClusterService.SETTING_CLUSTER_SERVICE_SLOW_TASK_LOGGING_THRESHOLD, "10ms"))); + .put(InternalClusterService.CLUSTER_SERVICE_SLOW_TASK_LOGGING_THRESHOLD_SETTING.getKey(), "10ms"))); clusterService1.submitStateUpdateTask("test2", new ClusterStateUpdateTask() { @Override diff --git a/core/src/test/java/org/elasticsearch/cluster/MinimumMasterNodesIT.java b/core/src/test/java/org/elasticsearch/cluster/MinimumMasterNodesIT.java index 648356be17..2f1e5d33f7 100644 --- a/core/src/test/java/org/elasticsearch/cluster/MinimumMasterNodesIT.java +++ b/core/src/test/java/org/elasticsearch/cluster/MinimumMasterNodesIT.java @@ -280,7 +280,7 @@ public class MinimumMasterNodesIT extends ESIntegTestCase { assertNoMasterBlockOnAllNodes(); logger.info("--> bringing another node up"); - internalCluster().startNode(settingsBuilder().put(settings).put(ElectMasterService.DISCOVERY_ZEN_MINIMUM_MASTER_NODES, 2).build()); + internalCluster().startNode(settingsBuilder().put(settings).put(ElectMasterService.DISCOVERY_ZEN_MINIMUM_MASTER_NODES_SETTING.getKey(), 2).build()); clusterHealthResponse = client().admin().cluster().prepareHealth().setWaitForEvents(Priority.LANGUID).setWaitForNodes("2").get(); assertThat(clusterHealthResponse.isTimedOut(), equalTo(false)); } @@ -317,7 +317,7 @@ public class MinimumMasterNodesIT extends ESIntegTestCase { // set an initial value which is at least quorum to avoid split brains during initial startup int initialMinMasterNodes = randomIntBetween(nodeCount / 2 + 1, nodeCount); - settings.put(ElectMasterService.DISCOVERY_ZEN_MINIMUM_MASTER_NODES, initialMinMasterNodes); + settings.put(ElectMasterService.DISCOVERY_ZEN_MINIMUM_MASTER_NODES_SETTING.getKey(), initialMinMasterNodes); logger.info("--> starting [{}] nodes. min_master_nodes set to [{}]", nodeCount, initialMinMasterNodes); @@ -328,19 +328,21 @@ public class MinimumMasterNodesIT extends ESIntegTestCase { int updateCount = randomIntBetween(1, nodeCount); - logger.info("--> updating [{}] to [{}]", ElectMasterService.DISCOVERY_ZEN_MINIMUM_MASTER_NODES, updateCount); + logger.info("--> updating [{}] to [{}]", ElectMasterService.DISCOVERY_ZEN_MINIMUM_MASTER_NODES_SETTING.getKey(), updateCount); assertAcked(client().admin().cluster().prepareUpdateSettings() - .setPersistentSettings(settingsBuilder().put(ElectMasterService.DISCOVERY_ZEN_MINIMUM_MASTER_NODES, updateCount))); + .setPersistentSettings(settingsBuilder().put(ElectMasterService.DISCOVERY_ZEN_MINIMUM_MASTER_NODES_SETTING.getKey(), updateCount))); logger.info("--> verifying no node left and master is up"); assertFalse(client().admin().cluster().prepareHealth().setWaitForNodes(Integer.toString(nodeCount)).get().isTimedOut()); updateCount = nodeCount + randomIntBetween(1, 2000); - logger.info("--> trying to updating [{}] to [{}]", ElectMasterService.DISCOVERY_ZEN_MINIMUM_MASTER_NODES, updateCount); - assertThat(client().admin().cluster().prepareUpdateSettings() - .setPersistentSettings(settingsBuilder().put(ElectMasterService.DISCOVERY_ZEN_MINIMUM_MASTER_NODES, updateCount)) - .get().getPersistentSettings().getAsMap().keySet(), - empty()); + logger.info("--> trying to updating [{}] to [{}]", ElectMasterService.DISCOVERY_ZEN_MINIMUM_MASTER_NODES_SETTING.getKey(), updateCount); + try { + client().admin().cluster().prepareUpdateSettings() + .setPersistentSettings(settingsBuilder().put(ElectMasterService.DISCOVERY_ZEN_MINIMUM_MASTER_NODES_SETTING.getKey(), updateCount)); + } catch (IllegalArgumentException ex) { + assertEquals(ex.getMessage(), "cannot set discovery.zen.minimum_master_nodes to more than the current master nodes count [" +updateCount+ "]"); + } logger.info("--> verifying no node left and master is up"); assertFalse(client().admin().cluster().prepareHealth().setWaitForNodes(Integer.toString(nodeCount)).get().isTimedOut()); @@ -351,8 +353,8 @@ public class MinimumMasterNodesIT extends ESIntegTestCase { .put("discovery.type", "zen") .put(FaultDetection.SETTING_PING_TIMEOUT, "1h") // disable it .put(ZenDiscovery.SETTING_PING_TIMEOUT, "200ms") - .put(ElectMasterService.DISCOVERY_ZEN_MINIMUM_MASTER_NODES, 2) - .put(DiscoverySettings.COMMIT_TIMEOUT, "100ms") // speed things up + .put(ElectMasterService.DISCOVERY_ZEN_MINIMUM_MASTER_NODES_SETTING.getKey(), 2) + .put(DiscoverySettings.COMMIT_TIMEOUT_SETTING.getKey(), "100ms") // speed things up .build(); internalCluster().startNodesAsync(3, settings).get(); ensureGreen(); // ensure cluster state is recovered before we disrupt things diff --git a/core/src/test/java/org/elasticsearch/cluster/NoMasterNodeIT.java b/core/src/test/java/org/elasticsearch/cluster/NoMasterNodeIT.java index e0f8b2cb84..8e5479d6f8 100644 --- a/core/src/test/java/org/elasticsearch/cluster/NoMasterNodeIT.java +++ b/core/src/test/java/org/elasticsearch/cluster/NoMasterNodeIT.java @@ -67,7 +67,7 @@ public class NoMasterNodeIT extends ESIntegTestCase { .put("discovery.zen.minimum_master_nodes", 2) .put(ZenDiscovery.SETTING_PING_TIMEOUT, "200ms") .put("discovery.initial_state_timeout", "500ms") - .put(DiscoverySettings.NO_MASTER_BLOCK, "all") + .put(DiscoverySettings.NO_MASTER_BLOCK_SETTING.getKey(), "all") .build(); TimeValue timeout = TimeValue.timeValueMillis(200); @@ -219,7 +219,7 @@ public class NoMasterNodeIT extends ESIntegTestCase { .put("discovery.zen.minimum_master_nodes", 2) .put(ZenDiscovery.SETTING_PING_TIMEOUT, "200ms") .put("discovery.initial_state_timeout", "500ms") - .put(DiscoverySettings.NO_MASTER_BLOCK, "write") + .put(DiscoverySettings.NO_MASTER_BLOCK_SETTING.getKey(), "write") .build(); internalCluster().startNode(settings); diff --git a/core/src/test/java/org/elasticsearch/cluster/ack/AckClusterUpdateSettingsIT.java b/core/src/test/java/org/elasticsearch/cluster/ack/AckClusterUpdateSettingsIT.java index 81de8b1a43..c5e48a97df 100644 --- a/core/src/test/java/org/elasticsearch/cluster/ack/AckClusterUpdateSettingsIT.java +++ b/core/src/test/java/org/elasticsearch/cluster/ack/AckClusterUpdateSettingsIT.java @@ -50,8 +50,8 @@ public class AckClusterUpdateSettingsIT extends ESIntegTestCase { .put(super.nodeSettings(nodeOrdinal)) //make sure that enough concurrent reroutes can happen at the same time //we have a minimum of 2 nodes, and a maximum of 10 shards, thus 5 should be enough - .put(ThrottlingAllocationDecider.CLUSTER_ROUTING_ALLOCATION_NODE_CONCURRENT_RECOVERIES, 5) - .put(ConcurrentRebalanceAllocationDecider.CLUSTER_ROUTING_ALLOCATION_CLUSTER_CONCURRENT_REBALANCE, 10) + .put(ThrottlingAllocationDecider.CLUSTER_ROUTING_ALLOCATION_NODE_CONCURRENT_RECOVERIES_SETTING.getKey(), 5) + .put(ConcurrentRebalanceAllocationDecider.CLUSTER_ROUTING_ALLOCATION_CLUSTER_CONCURRENT_REBALANCE_SETTING.getKey(), 10) .build(); } @@ -69,7 +69,7 @@ public class AckClusterUpdateSettingsIT extends ESIntegTestCase { private void removePublishTimeout() { //to test that the acknowledgement mechanism is working we better disable the wait for publish //otherwise the operation is most likely acknowledged even if it doesn't support ack - assertAcked(client().admin().cluster().prepareUpdateSettings().setTransientSettings(Settings.builder().put(DiscoverySettings.PUBLISH_TIMEOUT, "0"))); + assertAcked(client().admin().cluster().prepareUpdateSettings().setTransientSettings(Settings.builder().put(DiscoverySettings.PUBLISH_TIMEOUT_SETTING.getKey(), "0"))); } public void testClusterUpdateSettingsAcknowledgement() { diff --git a/core/src/test/java/org/elasticsearch/cluster/ack/AckIT.java b/core/src/test/java/org/elasticsearch/cluster/ack/AckIT.java index 47517a753a..7d3825a14b 100644 --- a/core/src/test/java/org/elasticsearch/cluster/ack/AckIT.java +++ b/core/src/test/java/org/elasticsearch/cluster/ack/AckIT.java @@ -67,8 +67,8 @@ public class AckIT extends ESIntegTestCase { //to test that the acknowledgement mechanism is working we better disable the wait for publish //otherwise the operation is most likely acknowledged even if it doesn't support ack return Settings.builder().put(super.nodeSettings(nodeOrdinal)) - .put(DiscoverySettings.PUBLISH_TIMEOUT, 0).build(); - } + .put(DiscoverySettings.PUBLISH_TIMEOUT_SETTING.getKey(), 0).build(); +} public void testUpdateSettingsAcknowledgement() { createIndex("test"); diff --git a/core/src/test/java/org/elasticsearch/cluster/allocation/AwarenessAllocationIT.java b/core/src/test/java/org/elasticsearch/cluster/allocation/AwarenessAllocationIT.java index f9151628b8..726590104f 100644 --- a/core/src/test/java/org/elasticsearch/cluster/allocation/AwarenessAllocationIT.java +++ b/core/src/test/java/org/elasticsearch/cluster/allocation/AwarenessAllocationIT.java @@ -106,9 +106,9 @@ public class AwarenessAllocationIT extends ESIntegTestCase { public void testAwarenessZones() throws Exception { Settings commonSettings = Settings.settingsBuilder() - .put(AwarenessAllocationDecider.CLUSTER_ROUTING_ALLOCATION_AWARENESS_FORCE_GROUP + "zone.values", "a,b") - .put(AwarenessAllocationDecider.CLUSTER_ROUTING_ALLOCATION_AWARENESS_ATTRIBUTES, "zone") - .put(ElectMasterService.DISCOVERY_ZEN_MINIMUM_MASTER_NODES, 3) + .put(AwarenessAllocationDecider.CLUSTER_ROUTING_ALLOCATION_AWARENESS_FORCE_GROUP_SETTING.getKey() + "zone.values", "a,b") + .put(AwarenessAllocationDecider.CLUSTER_ROUTING_ALLOCATION_AWARENESS_ATTRIBUTE_SETTING.getKey(), "zone") + .put(ElectMasterService.DISCOVERY_ZEN_MINIMUM_MASTER_NODES_SETTING.getKey(), 3) .put(ZenDiscovery.SETTING_JOIN_TIMEOUT, "10s") .build(); diff --git a/core/src/test/java/org/elasticsearch/cluster/allocation/ClusterRerouteIT.java b/core/src/test/java/org/elasticsearch/cluster/allocation/ClusterRerouteIT.java index 1605e70637..b85c17097f 100644 --- a/core/src/test/java/org/elasticsearch/cluster/allocation/ClusterRerouteIT.java +++ b/core/src/test/java/org/elasticsearch/cluster/allocation/ClusterRerouteIT.java @@ -56,7 +56,7 @@ import static org.elasticsearch.cluster.metadata.IndexMetaData.SETTING_BLOCKS_ME import static org.elasticsearch.cluster.metadata.IndexMetaData.SETTING_BLOCKS_READ; import static org.elasticsearch.cluster.metadata.IndexMetaData.SETTING_BLOCKS_WRITE; import static org.elasticsearch.cluster.metadata.IndexMetaData.SETTING_READ_ONLY; -import static org.elasticsearch.cluster.routing.allocation.decider.EnableAllocationDecider.CLUSTER_ROUTING_ALLOCATION_ENABLE; +import static org.elasticsearch.cluster.routing.allocation.decider.EnableAllocationDecider.CLUSTER_ROUTING_ALLOCATION_ENABLE_SETTING; import static org.elasticsearch.common.settings.Settings.settingsBuilder; import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertAcked; import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertBlocked; @@ -71,15 +71,15 @@ public class ClusterRerouteIT extends ESIntegTestCase { public void testRerouteWithCommands_disableAllocationSettings() throws Exception { Settings commonSettings = settingsBuilder() - .put(EnableAllocationDecider.CLUSTER_ROUTING_ALLOCATION_ENABLE, "none") - .put(EnableAllocationDecider.CLUSTER_ROUTING_REBALANCE_ENABLE, "none") + .put(EnableAllocationDecider.CLUSTER_ROUTING_ALLOCATION_ENABLE_SETTING.getKey(), "none") + .put(EnableAllocationDecider.CLUSTER_ROUTING_REBALANCE_ENABLE_SETTING.getKey(), "none") .build(); rerouteWithCommands(commonSettings); } public void testRerouteWithCommands_enableAllocationSettings() throws Exception { Settings commonSettings = settingsBuilder() - .put(CLUSTER_ROUTING_ALLOCATION_ENABLE, Allocation.NONE.name()) + .put(CLUSTER_ROUTING_ALLOCATION_ENABLE_SETTING.getKey(), Allocation.NONE.name()) .build(); rerouteWithCommands(commonSettings); } @@ -147,15 +147,15 @@ public class ClusterRerouteIT extends ESIntegTestCase { public void testRerouteWithAllocateLocalGateway_disableAllocationSettings() throws Exception { Settings commonSettings = settingsBuilder() - .put(EnableAllocationDecider.CLUSTER_ROUTING_ALLOCATION_ENABLE, "none") - .put(EnableAllocationDecider.CLUSTER_ROUTING_REBALANCE_ENABLE, "none") + .put(EnableAllocationDecider.CLUSTER_ROUTING_ALLOCATION_ENABLE_SETTING.getKey(), "none") + .put(EnableAllocationDecider.CLUSTER_ROUTING_REBALANCE_ENABLE_SETTING.getKey(), "none") .build(); rerouteWithAllocateLocalGateway(commonSettings); } public void testRerouteWithAllocateLocalGateway_enableAllocationSettings() throws Exception { Settings commonSettings = settingsBuilder() - .put(CLUSTER_ROUTING_ALLOCATION_ENABLE, Allocation.NONE.name()) + .put(CLUSTER_ROUTING_ALLOCATION_ENABLE_SETTING.getKey(), Allocation.NONE.name()) .build(); rerouteWithAllocateLocalGateway(commonSettings); } @@ -279,7 +279,7 @@ public class ClusterRerouteIT extends ESIntegTestCase { logger.info("--> disable allocation"); Settings newSettings = settingsBuilder() - .put(CLUSTER_ROUTING_ALLOCATION_ENABLE, Allocation.NONE.name()) + .put(CLUSTER_ROUTING_ALLOCATION_ENABLE_SETTING.getKey(), Allocation.NONE.name()) .build(); client().admin().cluster().prepareUpdateSettings().setTransientSettings(newSettings).execute().actionGet(); diff --git a/core/src/test/java/org/elasticsearch/cluster/node/DiscoveryNodeFiltersTests.java b/core/src/test/java/org/elasticsearch/cluster/node/DiscoveryNodeFiltersTests.java index ef54814019..9a91e1cd56 100644 --- a/core/src/test/java/org/elasticsearch/cluster/node/DiscoveryNodeFiltersTests.java +++ b/core/src/test/java/org/elasticsearch/cluster/node/DiscoveryNodeFiltersTests.java @@ -29,11 +29,7 @@ import org.junit.BeforeClass; import java.net.InetAddress; import java.net.UnknownHostException; -import java.util.ArrayList; -import java.util.Collections; -import java.util.HashMap; -import java.util.List; -import java.util.Map; +import java.util.*; import static java.util.Collections.emptyMap; import static java.util.Collections.singletonMap; @@ -246,7 +242,7 @@ public class DiscoveryNodeFiltersTests extends ESTestCase { private Settings shuffleSettings(Settings source) { Settings.Builder settings = Settings.settingsBuilder(); List<String> keys = new ArrayList<>(source.getAsMap().keySet()); - Collections.shuffle(keys, getRandom()); + Collections.shuffle(keys, random()); for (String o : keys) { settings.put(o, source.getAsMap().get(o)); } diff --git a/core/src/test/java/org/elasticsearch/cluster/routing/DelayedAllocationIT.java b/core/src/test/java/org/elasticsearch/cluster/routing/DelayedAllocationIT.java index c236ea5487..2d704380ae 100644 --- a/core/src/test/java/org/elasticsearch/cluster/routing/DelayedAllocationIT.java +++ b/core/src/test/java/org/elasticsearch/cluster/routing/DelayedAllocationIT.java @@ -170,7 +170,7 @@ public class DelayedAllocationIT extends ESIntegTestCase { private String findNodeWithShard() { ClusterState state = client().admin().cluster().prepareState().get().getState(); List<ShardRouting> startedShards = state.routingTable().shardsWithState(ShardRoutingState.STARTED); - Collections.shuffle(startedShards, getRandom()); + Collections.shuffle(startedShards,random()); return state.nodes().get(startedShards.get(0).currentNodeId()).getName(); } } diff --git a/core/src/test/java/org/elasticsearch/cluster/routing/PrimaryAllocationIT.java b/core/src/test/java/org/elasticsearch/cluster/routing/PrimaryAllocationIT.java new file mode 100644 index 0000000000..340fdcc3c9 --- /dev/null +++ b/core/src/test/java/org/elasticsearch/cluster/routing/PrimaryAllocationIT.java @@ -0,0 +1,122 @@ +package org.elasticsearch.cluster.routing; + +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +import org.elasticsearch.cluster.ClusterState; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.gateway.GatewayAllocator; +import org.elasticsearch.test.ESIntegTestCase; +import org.elasticsearch.test.InternalTestCluster; +import org.elasticsearch.test.disruption.NetworkDisconnectPartition; + +import java.util.Arrays; +import java.util.Collections; +import java.util.HashSet; +import java.util.List; + +import static org.elasticsearch.common.xcontent.XContentFactory.jsonBuilder; +import static org.elasticsearch.index.query.QueryBuilders.matchAllQuery; +import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertAcked; +import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertHitCount; +import static org.hamcrest.Matchers.equalTo; + +@ESIntegTestCase.ClusterScope(scope = ESIntegTestCase.Scope.TEST, numDataNodes = 0) +@ESIntegTestCase.SuppressLocalMode +public class PrimaryAllocationIT extends ESIntegTestCase { + + public void testDoNotAllowStaleReplicasToBePromotedToPrimary() throws Exception { + logger.info("--> starting 3 nodes, 1 master, 2 data"); + String master = internalCluster().startMasterOnlyNode(Settings.EMPTY); + internalCluster().startDataOnlyNodesAsync(2).get(); + + assertAcked(client().admin().indices().prepareCreate("test").setSettings(Settings.builder() + .put("index.number_of_shards", 1).put("index.number_of_replicas", 1)).get()); + ensureGreen(); + logger.info("--> indexing..."); + client().prepareIndex("test", "type1").setSource(jsonBuilder().startObject().field("field", "value1").endObject()).get(); + refresh(); + + ClusterState state = client().admin().cluster().prepareState().all().get().getState(); + List<ShardRouting> shards = state.routingTable().allShards("test"); + assertThat(shards.size(), equalTo(2)); + + final String primaryNode; + final String replicaNode; + if (shards.get(0).primary()) { + primaryNode = state.getRoutingNodes().node(shards.get(0).currentNodeId()).node().name(); + replicaNode = state.getRoutingNodes().node(shards.get(1).currentNodeId()).node().name(); + } else { + primaryNode = state.getRoutingNodes().node(shards.get(1).currentNodeId()).node().name(); + replicaNode = state.getRoutingNodes().node(shards.get(0).currentNodeId()).node().name(); + } + + NetworkDisconnectPartition partition = new NetworkDisconnectPartition( + new HashSet<>(Arrays.asList(master, replicaNode)), Collections.singleton(primaryNode), random()); + internalCluster().setDisruptionScheme(partition); + logger.info("--> partitioning node with primary shard from rest of cluster"); + partition.startDisrupting(); + + ensureStableCluster(2, master); + + logger.info("--> index a document into previous replica shard (that is now primary)"); + client(replicaNode).prepareIndex("test", "type1").setSource(jsonBuilder().startObject().field("field", "value1").endObject()).get(); + + logger.info("--> shut down node that has new acknowledged document"); + internalCluster().stopRandomNode(InternalTestCluster.nameFilter(replicaNode)); + + ensureStableCluster(1, master); + + partition.stopDisrupting(); + + logger.info("--> waiting for node with old primary shard to rejoin the cluster"); + ensureStableCluster(2, master); + + logger.info("--> check that old primary shard does not get promoted to primary again"); + // kick reroute and wait for all shard states to be fetched + client(master).admin().cluster().prepareReroute().get(); + assertBusy(() -> assertThat(internalCluster().getInstance(GatewayAllocator.class, master).getNumberOfInFlightFetch(), equalTo(0))); + // kick reroute a second time and check that all shards are unassigned + assertThat(client(master).admin().cluster().prepareReroute().get().getState().getRoutingNodes().unassigned().size(), equalTo(2)); + + logger.info("--> starting node that reuses data folder with the up-to-date primary shard"); + internalCluster().startDataOnlyNode(Settings.EMPTY); + + logger.info("--> check that the up-to-date primary shard gets promoted and that documents are available"); + ensureYellow("test"); + assertHitCount(client().prepareSearch().setSize(0).setQuery(matchAllQuery()).get(), 2l); + } + + public void testNotWaitForQuorumCopies() throws Exception { + logger.info("--> starting 3 nodes"); + internalCluster().startNodesAsync(3).get(); + logger.info("--> creating index with 1 primary and 2 replicas"); + assertAcked(client().admin().indices().prepareCreate("test").setSettings(Settings.builder() + .put("index.number_of_shards", randomIntBetween(1, 3)).put("index.number_of_replicas", 2)).get()); + ensureGreen("test"); + client().prepareIndex("test", "type1").setSource(jsonBuilder().startObject().field("field", "value1").endObject()).get(); + logger.info("--> removing 2 nodes from cluster"); + internalCluster().stopRandomDataNode(); + internalCluster().stopRandomDataNode(); + internalCluster().fullRestart(); + logger.info("--> checking that index still gets allocated with only 1 shard copy being available"); + ensureYellow("test"); + assertHitCount(client().prepareSearch().setSize(0).setQuery(matchAllQuery()).get(), 1l); + } +} diff --git a/core/src/test/java/org/elasticsearch/cluster/routing/allocation/ActiveAllocationIdTests.java b/core/src/test/java/org/elasticsearch/cluster/routing/allocation/ActiveAllocationIdTests.java index 7a7f4722e9..2e54512b95 100644 --- a/core/src/test/java/org/elasticsearch/cluster/routing/allocation/ActiveAllocationIdTests.java +++ b/core/src/test/java/org/elasticsearch/cluster/routing/allocation/ActiveAllocationIdTests.java @@ -1,5 +1,24 @@ package org.elasticsearch.cluster.routing.allocation; +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + import org.elasticsearch.Version; import org.elasticsearch.cluster.ClusterState; import org.elasticsearch.cluster.metadata.IndexMetaData; diff --git a/core/src/test/java/org/elasticsearch/cluster/routing/allocation/AddIncrementallyTests.java b/core/src/test/java/org/elasticsearch/cluster/routing/allocation/AddIncrementallyTests.java index d10912e69d..91ba1f4999 100644 --- a/core/src/test/java/org/elasticsearch/cluster/routing/allocation/AddIncrementallyTests.java +++ b/core/src/test/java/org/elasticsearch/cluster/routing/allocation/AddIncrementallyTests.java @@ -20,7 +20,6 @@ package org.elasticsearch.cluster.routing.allocation; import com.carrotsearch.hppc.cursors.ObjectCursor; - import org.elasticsearch.Version; import org.elasticsearch.cluster.ClusterState; import org.elasticsearch.cluster.metadata.IndexMetaData; @@ -51,7 +50,7 @@ public class AddIncrementallyTests extends ESAllocationTestCase { public void testAddNodesAndIndices() { Settings.Builder settings = settingsBuilder(); - settings.put(ClusterRebalanceAllocationDecider.CLUSTER_ROUTING_ALLOCATION_ALLOW_REBALANCE, ClusterRebalanceAllocationDecider.ClusterRebalanceType.ALWAYS.toString()); + settings.put(ClusterRebalanceAllocationDecider.CLUSTER_ROUTING_ALLOCATION_ALLOW_REBALANCE_SETTING.getKey(), ClusterRebalanceAllocationDecider.ClusterRebalanceType.ALWAYS.toString()); AllocationService service = createAllocationService(settings.build()); ClusterState clusterState = initCluster(service, 1, 3, 3, 1); @@ -94,7 +93,7 @@ public class AddIncrementallyTests extends ESAllocationTestCase { public void testMinimalRelocations() { Settings.Builder settings = settingsBuilder(); - settings.put(ClusterRebalanceAllocationDecider.CLUSTER_ROUTING_ALLOCATION_ALLOW_REBALANCE, ClusterRebalanceAllocationDecider.ClusterRebalanceType.ALWAYS.toString()) + settings.put(ClusterRebalanceAllocationDecider.CLUSTER_ROUTING_ALLOCATION_ALLOW_REBALANCE_SETTING.getKey(), ClusterRebalanceAllocationDecider.ClusterRebalanceType.ALWAYS.toString()) .put("cluster.routing.allocation.node_concurrent_recoveries", 2); AllocationService service = createAllocationService(settings.build()); @@ -162,7 +161,7 @@ public class AddIncrementallyTests extends ESAllocationTestCase { public void testMinimalRelocationsNoLimit() { Settings.Builder settings = settingsBuilder(); - settings.put(ClusterRebalanceAllocationDecider.CLUSTER_ROUTING_ALLOCATION_ALLOW_REBALANCE, ClusterRebalanceAllocationDecider.ClusterRebalanceType.ALWAYS.toString()) + settings.put(ClusterRebalanceAllocationDecider.CLUSTER_ROUTING_ALLOCATION_ALLOW_REBALANCE_SETTING.getKey(), ClusterRebalanceAllocationDecider.ClusterRebalanceType.ALWAYS.toString()) .put("cluster.routing.allocation.node_concurrent_recoveries", 100) .put("cluster.routing.allocation.node_initial_primaries_recoveries", 100); AllocationService service = createAllocationService(settings.build()); @@ -388,7 +387,7 @@ public class AddIncrementallyTests extends ESAllocationTestCase { logger.info("Removing [{}] nodes", numNodes); DiscoveryNodes.Builder nodes = DiscoveryNodes.builder(clusterState.nodes()); ArrayList<DiscoveryNode> discoveryNodes = CollectionUtils.iterableAsArrayList(clusterState.nodes()); - Collections.shuffle(discoveryNodes, getRandom()); + Collections.shuffle(discoveryNodes, random()); for (DiscoveryNode node : discoveryNodes) { nodes.remove(node.id()); numNodes--; diff --git a/core/src/test/java/org/elasticsearch/cluster/routing/allocation/AllocationCommandsTests.java b/core/src/test/java/org/elasticsearch/cluster/routing/allocation/AllocationCommandsTests.java index 6ac2b7df9c..1cf5ba0083 100644 --- a/core/src/test/java/org/elasticsearch/cluster/routing/allocation/AllocationCommandsTests.java +++ b/core/src/test/java/org/elasticsearch/cluster/routing/allocation/AllocationCommandsTests.java @@ -98,8 +98,8 @@ public class AllocationCommandsTests extends ESAllocationTestCase { public void testAllocateCommand() { AllocationService allocation = createAllocationService(settingsBuilder() - .put(EnableAllocationDecider.CLUSTER_ROUTING_ALLOCATION_ENABLE, "none") - .put(EnableAllocationDecider.CLUSTER_ROUTING_REBALANCE_ENABLE, "none") + .put(EnableAllocationDecider.CLUSTER_ROUTING_ALLOCATION_ENABLE_SETTING.getKey(), "none") + .put(EnableAllocationDecider.CLUSTER_ROUTING_REBALANCE_ENABLE_SETTING.getKey(), "none") .build()); logger.info("--> building initial routing table"); @@ -186,8 +186,8 @@ public class AllocationCommandsTests extends ESAllocationTestCase { public void testCancelCommand() { AllocationService allocation = createAllocationService(settingsBuilder() - .put(EnableAllocationDecider.CLUSTER_ROUTING_ALLOCATION_ENABLE, "none") - .put(EnableAllocationDecider.CLUSTER_ROUTING_REBALANCE_ENABLE, "none") + .put(EnableAllocationDecider.CLUSTER_ROUTING_ALLOCATION_ENABLE_SETTING.getKey(), "none") + .put(EnableAllocationDecider.CLUSTER_ROUTING_REBALANCE_ENABLE_SETTING.getKey(), "none") .build()); logger.info("--> building initial routing table"); diff --git a/core/src/test/java/org/elasticsearch/cluster/routing/allocation/AllocationPriorityTests.java b/core/src/test/java/org/elasticsearch/cluster/routing/allocation/AllocationPriorityTests.java index d7a049d1b9..8d510e7f0c 100644 --- a/core/src/test/java/org/elasticsearch/cluster/routing/allocation/AllocationPriorityTests.java +++ b/core/src/test/java/org/elasticsearch/cluster/routing/allocation/AllocationPriorityTests.java @@ -39,8 +39,8 @@ public class AllocationPriorityTests extends ESAllocationTestCase { public void testPrioritizedIndicesAllocatedFirst() { AllocationService allocation = createAllocationService(settingsBuilder(). put(ThrottlingAllocationDecider.CLUSTER_ROUTING_ALLOCATION_CONCURRENT_RECOVERIES, 1) - .put(ThrottlingAllocationDecider.CLUSTER_ROUTING_ALLOCATION_NODE_INITIAL_PRIMARIES_RECOVERIES, 1) - .put(ThrottlingAllocationDecider.CLUSTER_ROUTING_ALLOCATION_NODE_CONCURRENT_RECOVERIES, 1).build()); + .put(ThrottlingAllocationDecider.CLUSTER_ROUTING_ALLOCATION_NODE_INITIAL_PRIMARIES_RECOVERIES_SETTING.getKey(), 1) + .put(ThrottlingAllocationDecider.CLUSTER_ROUTING_ALLOCATION_NODE_CONCURRENT_RECOVERIES_SETTING.getKey(), 1).build()); final String highPriorityName; final String lowPriorityName; final int priorityFirst; diff --git a/core/src/test/java/org/elasticsearch/cluster/routing/allocation/AwarenessAllocationTests.java b/core/src/test/java/org/elasticsearch/cluster/routing/allocation/AwarenessAllocationTests.java index 7be6037cf7..e9d0f75b1c 100644 --- a/core/src/test/java/org/elasticsearch/cluster/routing/allocation/AwarenessAllocationTests.java +++ b/core/src/test/java/org/elasticsearch/cluster/routing/allocation/AwarenessAllocationTests.java @@ -55,7 +55,7 @@ public class AwarenessAllocationTests extends ESAllocationTestCase { public void testMoveShardOnceNewNodeWithAttributeAdded1() { AllocationService strategy = createAllocationService(settingsBuilder() .put("cluster.routing.allocation.concurrent_recoveries", 10) - .put(ClusterRebalanceAllocationDecider.CLUSTER_ROUTING_ALLOCATION_ALLOW_REBALANCE, "always") + .put(ClusterRebalanceAllocationDecider.CLUSTER_ROUTING_ALLOCATION_ALLOW_REBALANCE_SETTING.getKey(), "always") .put("cluster.routing.allocation.awareness.attributes", "rack_id") .build()); @@ -123,7 +123,7 @@ public class AwarenessAllocationTests extends ESAllocationTestCase { public void testMoveShardOnceNewNodeWithAttributeAdded2() { AllocationService strategy = createAllocationService(settingsBuilder() .put("cluster.routing.allocation.concurrent_recoveries", 10) - .put(ClusterRebalanceAllocationDecider.CLUSTER_ROUTING_ALLOCATION_ALLOW_REBALANCE, "always") + .put(ClusterRebalanceAllocationDecider.CLUSTER_ROUTING_ALLOCATION_ALLOW_REBALANCE_SETTING.getKey(), "always") .put("cluster.routing.allocation.awareness.attributes", "rack_id") .build()); @@ -193,7 +193,7 @@ public class AwarenessAllocationTests extends ESAllocationTestCase { AllocationService strategy = createAllocationService(settingsBuilder() .put("cluster.routing.allocation.node_concurrent_recoveries", 10) .put("cluster.routing.allocation.node_initial_primaries_recoveries", 10) - .put(ClusterRebalanceAllocationDecider.CLUSTER_ROUTING_ALLOCATION_ALLOW_REBALANCE, "always") + .put(ClusterRebalanceAllocationDecider.CLUSTER_ROUTING_ALLOCATION_ALLOW_REBALANCE_SETTING.getKey(), "always") .put("cluster.routing.allocation.cluster_concurrent_rebalance", -1) .put("cluster.routing.allocation.awareness.attributes", "rack_id") .put("cluster.routing.allocation.balance.index", 0.0f) @@ -293,7 +293,7 @@ public class AwarenessAllocationTests extends ESAllocationTestCase { AllocationService strategy = createAllocationService(settingsBuilder() .put("cluster.routing.allocation.node_concurrent_recoveries", 10) .put("cluster.routing.allocation.node_initial_primaries_recoveries", 10) - .put(ClusterRebalanceAllocationDecider.CLUSTER_ROUTING_ALLOCATION_ALLOW_REBALANCE, "always") + .put(ClusterRebalanceAllocationDecider.CLUSTER_ROUTING_ALLOCATION_ALLOW_REBALANCE_SETTING.getKey(), "always") .put("cluster.routing.allocation.cluster_concurrent_rebalance", -1) .put("cluster.routing.allocation.awareness.attributes", "rack_id") .build()); @@ -387,7 +387,7 @@ public class AwarenessAllocationTests extends ESAllocationTestCase { public void testMoveShardOnceNewNodeWithAttributeAdded5() { AllocationService strategy = createAllocationService(settingsBuilder() .put("cluster.routing.allocation.concurrent_recoveries", 10) - .put(ClusterRebalanceAllocationDecider.CLUSTER_ROUTING_ALLOCATION_ALLOW_REBALANCE, "always") + .put(ClusterRebalanceAllocationDecider.CLUSTER_ROUTING_ALLOCATION_ALLOW_REBALANCE_SETTING.getKey(), "always") .put("cluster.routing.allocation.awareness.attributes", "rack_id") .build()); @@ -465,7 +465,7 @@ public class AwarenessAllocationTests extends ESAllocationTestCase { public void testMoveShardOnceNewNodeWithAttributeAdded6() { AllocationService strategy = createAllocationService(settingsBuilder() .put("cluster.routing.allocation.concurrent_recoveries", 10) - .put(ClusterRebalanceAllocationDecider.CLUSTER_ROUTING_ALLOCATION_ALLOW_REBALANCE, "always") + .put(ClusterRebalanceAllocationDecider.CLUSTER_ROUTING_ALLOCATION_ALLOW_REBALANCE_SETTING.getKey(), "always") .put("cluster.routing.allocation.awareness.attributes", "rack_id") .build()); @@ -545,7 +545,7 @@ public class AwarenessAllocationTests extends ESAllocationTestCase { public void testFullAwareness1() { AllocationService strategy = createAllocationService(settingsBuilder() .put("cluster.routing.allocation.concurrent_recoveries", 10) - .put(ClusterRebalanceAllocationDecider.CLUSTER_ROUTING_ALLOCATION_ALLOW_REBALANCE, "always") + .put(ClusterRebalanceAllocationDecider.CLUSTER_ROUTING_ALLOCATION_ALLOW_REBALANCE_SETTING.getKey(), "always") .put("cluster.routing.allocation.awareness.force.rack_id.values", "1,2") .put("cluster.routing.allocation.awareness.attributes", "rack_id") .build()); @@ -612,7 +612,7 @@ public class AwarenessAllocationTests extends ESAllocationTestCase { public void testFullAwareness2() { AllocationService strategy = createAllocationService(settingsBuilder() .put("cluster.routing.allocation.concurrent_recoveries", 10) - .put(ClusterRebalanceAllocationDecider.CLUSTER_ROUTING_ALLOCATION_ALLOW_REBALANCE, "always") + .put(ClusterRebalanceAllocationDecider.CLUSTER_ROUTING_ALLOCATION_ALLOW_REBALANCE_SETTING.getKey(), "always") .put("cluster.routing.allocation.awareness.force.rack_id.values", "1,2") .put("cluster.routing.allocation.awareness.attributes", "rack_id") .build()); @@ -681,7 +681,7 @@ public class AwarenessAllocationTests extends ESAllocationTestCase { AllocationService strategy = createAllocationService(settingsBuilder() .put("cluster.routing.allocation.node_concurrent_recoveries", 10) .put("cluster.routing.allocation.node_initial_primaries_recoveries", 10) - .put(ClusterRebalanceAllocationDecider.CLUSTER_ROUTING_ALLOCATION_ALLOW_REBALANCE, "always") + .put(ClusterRebalanceAllocationDecider.CLUSTER_ROUTING_ALLOCATION_ALLOW_REBALANCE_SETTING.getKey(), "always") .put("cluster.routing.allocation.cluster_concurrent_rebalance", -1) .put("cluster.routing.allocation.awareness.force.rack_id.values", "1,2") .put("cluster.routing.allocation.awareness.attributes", "rack_id") @@ -767,7 +767,7 @@ public class AwarenessAllocationTests extends ESAllocationTestCase { .put("cluster.routing.allocation.awareness.attributes", "zone") .put("cluster.routing.allocation.node_concurrent_recoveries", 10) .put("cluster.routing.allocation.node_initial_primaries_recoveries", 10) - .put(ClusterRebalanceAllocationDecider.CLUSTER_ROUTING_ALLOCATION_ALLOW_REBALANCE, "always") + .put(ClusterRebalanceAllocationDecider.CLUSTER_ROUTING_ALLOCATION_ALLOW_REBALANCE_SETTING.getKey(), "always") .put("cluster.routing.allocation.cluster_concurrent_rebalance", -1) .build()); @@ -828,7 +828,7 @@ public class AwarenessAllocationTests extends ESAllocationTestCase { public void testUnassignedShardsWithUnbalancedZones() { AllocationService strategy = createAllocationService(settingsBuilder() .put("cluster.routing.allocation.concurrent_recoveries", 10) - .put(ClusterRebalanceAllocationDecider.CLUSTER_ROUTING_ALLOCATION_ALLOW_REBALANCE, "always") + .put(ClusterRebalanceAllocationDecider.CLUSTER_ROUTING_ALLOCATION_ALLOW_REBALANCE_SETTING.getKey(), "always") .put("cluster.routing.allocation.awareness.attributes", "zone") .build()); diff --git a/core/src/test/java/org/elasticsearch/cluster/routing/allocation/BalanceConfigurationTests.java b/core/src/test/java/org/elasticsearch/cluster/routing/allocation/BalanceConfigurationTests.java index 1092b2ede1..e622036e13 100644 --- a/core/src/test/java/org/elasticsearch/cluster/routing/allocation/BalanceConfigurationTests.java +++ b/core/src/test/java/org/elasticsearch/cluster/routing/allocation/BalanceConfigurationTests.java @@ -37,10 +37,10 @@ import org.elasticsearch.cluster.routing.allocation.allocator.BalancedShardsAllo import org.elasticsearch.cluster.routing.allocation.allocator.ShardsAllocator; import org.elasticsearch.cluster.routing.allocation.allocator.ShardsAllocators; import org.elasticsearch.cluster.routing.allocation.decider.ClusterRebalanceAllocationDecider; +import org.elasticsearch.common.settings.ClusterSettings; import org.elasticsearch.common.logging.ESLogger; import org.elasticsearch.common.logging.Loggers; import org.elasticsearch.common.settings.Settings; -import org.elasticsearch.node.settings.NodeSettingsService; import org.elasticsearch.test.ESAllocationTestCase; import org.elasticsearch.test.gateway.NoopGatewayAllocator; import org.hamcrest.Matchers; @@ -65,10 +65,10 @@ public class BalanceConfigurationTests extends ESAllocationTestCase { final float balanceTreshold = 1.0f; Settings.Builder settings = settingsBuilder(); - settings.put(ClusterRebalanceAllocationDecider.CLUSTER_ROUTING_ALLOCATION_ALLOW_REBALANCE, ClusterRebalanceAllocationDecider.ClusterRebalanceType.ALWAYS.toString()); - settings.put(BalancedShardsAllocator.SETTING_INDEX_BALANCE_FACTOR, indexBalance); - settings.put(BalancedShardsAllocator.SETTING_SHARD_BALANCE_FACTOR, replicaBalance); - settings.put(BalancedShardsAllocator.SETTING_THRESHOLD, balanceTreshold); + settings.put(ClusterRebalanceAllocationDecider.CLUSTER_ROUTING_ALLOCATION_ALLOW_REBALANCE_SETTING.getKey(), ClusterRebalanceAllocationDecider.ClusterRebalanceType.ALWAYS.toString()); + settings.put(BalancedShardsAllocator.INDEX_BALANCE_FACTOR_SETTING.getKey(), indexBalance); + settings.put(BalancedShardsAllocator.SHARD_BALANCE_FACTOR_SETTING.getKey(), replicaBalance); + settings.put(BalancedShardsAllocator.THRESHOLD_SETTING.getKey(), balanceTreshold); AllocationService strategy = createAllocationService(settings.build()); @@ -90,10 +90,10 @@ public class BalanceConfigurationTests extends ESAllocationTestCase { final float balanceTreshold = 1.0f; Settings.Builder settings = settingsBuilder(); - settings.put(ClusterRebalanceAllocationDecider.CLUSTER_ROUTING_ALLOCATION_ALLOW_REBALANCE, ClusterRebalanceAllocationDecider.ClusterRebalanceType.ALWAYS.toString()); - settings.put(BalancedShardsAllocator.SETTING_INDEX_BALANCE_FACTOR, indexBalance); - settings.put(BalancedShardsAllocator.SETTING_SHARD_BALANCE_FACTOR, replicaBalance); - settings.put(BalancedShardsAllocator.SETTING_THRESHOLD, balanceTreshold); + settings.put(ClusterRebalanceAllocationDecider.CLUSTER_ROUTING_ALLOCATION_ALLOW_REBALANCE_SETTING.getKey(), ClusterRebalanceAllocationDecider.ClusterRebalanceType.ALWAYS.toString()); + settings.put(BalancedShardsAllocator.INDEX_BALANCE_FACTOR_SETTING.getKey(), indexBalance); + settings.put(BalancedShardsAllocator.SHARD_BALANCE_FACTOR_SETTING.getKey(), replicaBalance); + settings.put(BalancedShardsAllocator.THRESHOLD_SETTING.getKey(), balanceTreshold); AllocationService strategy = createAllocationService(settings.build()); @@ -279,36 +279,30 @@ public class BalanceConfigurationTests extends ESAllocationTestCase { public void testPersistedSettings() { Settings.Builder settings = settingsBuilder(); - settings.put(BalancedShardsAllocator.SETTING_INDEX_BALANCE_FACTOR, 0.2); - settings.put(BalancedShardsAllocator.SETTING_SHARD_BALANCE_FACTOR, 0.3); - settings.put(BalancedShardsAllocator.SETTING_THRESHOLD, 2.0); - final NodeSettingsService.Listener[] listeners = new NodeSettingsService.Listener[1]; - NodeSettingsService service = new NodeSettingsService(settingsBuilder().build()) { - - @Override - public void addListener(Listener listener) { - assertNull("addListener was called twice while only one time was expected", listeners[0]); - listeners[0] = listener; - } - - }; + settings.put(BalancedShardsAllocator.INDEX_BALANCE_FACTOR_SETTING.getKey(), 0.2); + settings.put(BalancedShardsAllocator.SHARD_BALANCE_FACTOR_SETTING.getKey(), 0.3); + settings.put(BalancedShardsAllocator.THRESHOLD_SETTING.getKey(), 2.0); + ClusterSettings service = new ClusterSettings(settingsBuilder().build(), ClusterSettings.BUILT_IN_CLUSTER_SETTINGS); BalancedShardsAllocator allocator = new BalancedShardsAllocator(settings.build(), service); assertThat(allocator.getIndexBalance(), Matchers.equalTo(0.2f)); assertThat(allocator.getShardBalance(), Matchers.equalTo(0.3f)); assertThat(allocator.getThreshold(), Matchers.equalTo(2.0f)); settings = settingsBuilder(); - settings.put(ClusterRebalanceAllocationDecider.CLUSTER_ROUTING_ALLOCATION_ALLOW_REBALANCE, ClusterRebalanceAllocationDecider.ClusterRebalanceType.ALWAYS.toString()); - listeners[0].onRefreshSettings(settings.build()); + settings.put(BalancedShardsAllocator.INDEX_BALANCE_FACTOR_SETTING.getKey(), 0.2); + settings.put(BalancedShardsAllocator.SHARD_BALANCE_FACTOR_SETTING.getKey(), 0.3); + settings.put(BalancedShardsAllocator.THRESHOLD_SETTING.getKey(), 2.0); + settings.put(ClusterRebalanceAllocationDecider.CLUSTER_ROUTING_ALLOCATION_ALLOW_REBALANCE_SETTING.getKey(), ClusterRebalanceAllocationDecider.ClusterRebalanceType.ALWAYS.toString()); + service.applySettings(settings.build()); assertThat(allocator.getIndexBalance(), Matchers.equalTo(0.2f)); assertThat(allocator.getShardBalance(), Matchers.equalTo(0.3f)); assertThat(allocator.getThreshold(), Matchers.equalTo(2.0f)); settings = settingsBuilder(); - settings.put(BalancedShardsAllocator.SETTING_INDEX_BALANCE_FACTOR, 0.5); - settings.put(BalancedShardsAllocator.SETTING_SHARD_BALANCE_FACTOR, 0.1); - settings.put(BalancedShardsAllocator.SETTING_THRESHOLD, 3.0); - listeners[0].onRefreshSettings(settings.build()); + settings.put(BalancedShardsAllocator.INDEX_BALANCE_FACTOR_SETTING.getKey(), 0.5); + settings.put(BalancedShardsAllocator.SHARD_BALANCE_FACTOR_SETTING.getKey(), 0.1); + settings.put(BalancedShardsAllocator.THRESHOLD_SETTING.getKey(), 3.0); + service.applySettings(settings.build()); assertThat(allocator.getIndexBalance(), Matchers.equalTo(0.5f)); assertThat(allocator.getShardBalance(), Matchers.equalTo(0.1f)); assertThat(allocator.getThreshold(), Matchers.equalTo(3.0f)); @@ -317,7 +311,7 @@ public class BalanceConfigurationTests extends ESAllocationTestCase { public void testNoRebalanceOnPrimaryOverload() { Settings.Builder settings = settingsBuilder(); AllocationService strategy = new AllocationService(settings.build(), randomAllocationDeciders(settings.build(), - new NodeSettingsService(Settings.Builder.EMPTY_SETTINGS), getRandom()), new ShardsAllocators(settings.build(), + new ClusterSettings(Settings.Builder.EMPTY_SETTINGS, ClusterSettings.BUILT_IN_CLUSTER_SETTINGS), getRandom()), new ShardsAllocators(settings.build(), NoopGatewayAllocator.INSTANCE, new ShardsAllocator() { @Override diff --git a/core/src/test/java/org/elasticsearch/cluster/routing/allocation/ClusterRebalanceRoutingTests.java b/core/src/test/java/org/elasticsearch/cluster/routing/allocation/ClusterRebalanceRoutingTests.java index 8dad41db2f..15a6ea0a5f 100644 --- a/core/src/test/java/org/elasticsearch/cluster/routing/allocation/ClusterRebalanceRoutingTests.java +++ b/core/src/test/java/org/elasticsearch/cluster/routing/allocation/ClusterRebalanceRoutingTests.java @@ -46,7 +46,7 @@ public class ClusterRebalanceRoutingTests extends ESAllocationTestCase { private final ESLogger logger = Loggers.getLogger(ClusterRebalanceRoutingTests.class); public void testAlways() { - AllocationService strategy = createAllocationService(settingsBuilder().put(ClusterRebalanceAllocationDecider.CLUSTER_ROUTING_ALLOCATION_ALLOW_REBALANCE, + AllocationService strategy = createAllocationService(settingsBuilder().put(ClusterRebalanceAllocationDecider.CLUSTER_ROUTING_ALLOCATION_ALLOW_REBALANCE_SETTING.getKey(), ClusterRebalanceAllocationDecider.ClusterRebalanceType.ALWAYS.toString()).build()); MetaData metaData = MetaData.builder() @@ -132,7 +132,7 @@ public class ClusterRebalanceRoutingTests extends ESAllocationTestCase { public void testClusterPrimariesActive1() { - AllocationService strategy = createAllocationService(settingsBuilder().put(ClusterRebalanceAllocationDecider.CLUSTER_ROUTING_ALLOCATION_ALLOW_REBALANCE, + AllocationService strategy = createAllocationService(settingsBuilder().put(ClusterRebalanceAllocationDecider.CLUSTER_ROUTING_ALLOCATION_ALLOW_REBALANCE_SETTING.getKey(), ClusterRebalanceAllocationDecider.ClusterRebalanceType.INDICES_PRIMARIES_ACTIVE.toString()).build()); MetaData metaData = MetaData.builder() @@ -236,7 +236,7 @@ public class ClusterRebalanceRoutingTests extends ESAllocationTestCase { } public void testClusterPrimariesActive2() { - AllocationService strategy = createAllocationService(settingsBuilder().put(ClusterRebalanceAllocationDecider.CLUSTER_ROUTING_ALLOCATION_ALLOW_REBALANCE, + AllocationService strategy = createAllocationService(settingsBuilder().put(ClusterRebalanceAllocationDecider.CLUSTER_ROUTING_ALLOCATION_ALLOW_REBALANCE_SETTING.getKey(), ClusterRebalanceAllocationDecider.ClusterRebalanceType.INDICES_PRIMARIES_ACTIVE.toString()).build()); MetaData metaData = MetaData.builder() @@ -320,7 +320,7 @@ public class ClusterRebalanceRoutingTests extends ESAllocationTestCase { } public void testClusterAllActive1() { - AllocationService strategy = createAllocationService(settingsBuilder().put(ClusterRebalanceAllocationDecider.CLUSTER_ROUTING_ALLOCATION_ALLOW_REBALANCE, + AllocationService strategy = createAllocationService(settingsBuilder().put(ClusterRebalanceAllocationDecider.CLUSTER_ROUTING_ALLOCATION_ALLOW_REBALANCE_SETTING.getKey(), ClusterRebalanceAllocationDecider.ClusterRebalanceType.INDICES_ALL_ACTIVE.toString()).build()); MetaData metaData = MetaData.builder() @@ -443,7 +443,7 @@ public class ClusterRebalanceRoutingTests extends ESAllocationTestCase { } public void testClusterAllActive2() { - AllocationService strategy = createAllocationService(settingsBuilder().put(ClusterRebalanceAllocationDecider.CLUSTER_ROUTING_ALLOCATION_ALLOW_REBALANCE, + AllocationService strategy = createAllocationService(settingsBuilder().put(ClusterRebalanceAllocationDecider.CLUSTER_ROUTING_ALLOCATION_ALLOW_REBALANCE_SETTING.getKey(), ClusterRebalanceAllocationDecider.ClusterRebalanceType.INDICES_ALL_ACTIVE.toString()).build()); MetaData metaData = MetaData.builder() @@ -527,7 +527,7 @@ public class ClusterRebalanceRoutingTests extends ESAllocationTestCase { } public void testClusterAllActive3() { - AllocationService strategy = createAllocationService(settingsBuilder().put(ClusterRebalanceAllocationDecider.CLUSTER_ROUTING_ALLOCATION_ALLOW_REBALANCE, + AllocationService strategy = createAllocationService(settingsBuilder().put(ClusterRebalanceAllocationDecider.CLUSTER_ROUTING_ALLOCATION_ALLOW_REBALANCE_SETTING.getKey(), ClusterRebalanceAllocationDecider.ClusterRebalanceType.INDICES_ALL_ACTIVE.toString()).build()); MetaData metaData = MetaData.builder() @@ -737,7 +737,7 @@ public class ClusterRebalanceRoutingTests extends ESAllocationTestCase { public void testRebalanceWhileShardFetching() { final AtomicBoolean hasFetches = new AtomicBoolean(true); - AllocationService strategy = createAllocationService(settingsBuilder().put(ClusterRebalanceAllocationDecider.CLUSTER_ROUTING_ALLOCATION_ALLOW_REBALANCE, + AllocationService strategy = createAllocationService(settingsBuilder().put(ClusterRebalanceAllocationDecider.CLUSTER_ROUTING_ALLOCATION_ALLOW_REBALANCE_SETTING.getKey(), ClusterRebalanceAllocationDecider.ClusterRebalanceType.ALWAYS.toString()).build(), new NoopGatewayAllocator() { @Override public boolean allocateUnassigned(RoutingAllocation allocation) { diff --git a/core/src/test/java/org/elasticsearch/cluster/routing/allocation/DeadNodesAllocationTests.java b/core/src/test/java/org/elasticsearch/cluster/routing/allocation/DeadNodesAllocationTests.java index e16e7cc2ce..d807dc1b5c 100644 --- a/core/src/test/java/org/elasticsearch/cluster/routing/allocation/DeadNodesAllocationTests.java +++ b/core/src/test/java/org/elasticsearch/cluster/routing/allocation/DeadNodesAllocationTests.java @@ -46,7 +46,7 @@ public class DeadNodesAllocationTests extends ESAllocationTestCase { public void testSimpleDeadNodeOnStartedPrimaryShard() { AllocationService allocation = createAllocationService(settingsBuilder() .put("cluster.routing.allocation.concurrent_recoveries", 10) - .put(ClusterRebalanceAllocationDecider.CLUSTER_ROUTING_ALLOCATION_ALLOW_REBALANCE, "always") + .put(ClusterRebalanceAllocationDecider.CLUSTER_ROUTING_ALLOCATION_ALLOW_REBALANCE_SETTING.getKey(), "always") .build()); logger.info("--> building initial routing table"); @@ -97,7 +97,7 @@ public class DeadNodesAllocationTests extends ESAllocationTestCase { public void testDeadNodeWhileRelocatingOnToNode() { AllocationService allocation = createAllocationService(settingsBuilder() .put("cluster.routing.allocation.concurrent_recoveries", 10) - .put(ClusterRebalanceAllocationDecider.CLUSTER_ROUTING_ALLOCATION_ALLOW_REBALANCE, "always") + .put(ClusterRebalanceAllocationDecider.CLUSTER_ROUTING_ALLOCATION_ALLOW_REBALANCE_SETTING.getKey(), "always") .build()); logger.info("--> building initial routing table"); @@ -171,7 +171,7 @@ public class DeadNodesAllocationTests extends ESAllocationTestCase { public void testDeadNodeWhileRelocatingOnFromNode() { AllocationService allocation = createAllocationService(settingsBuilder() .put("cluster.routing.allocation.concurrent_recoveries", 10) - .put(ClusterRebalanceAllocationDecider.CLUSTER_ROUTING_ALLOCATION_ALLOW_REBALANCE, "always") + .put(ClusterRebalanceAllocationDecider.CLUSTER_ROUTING_ALLOCATION_ALLOW_REBALANCE_SETTING.getKey(), "always") .build()); logger.info("--> building initial routing table"); diff --git a/core/src/test/java/org/elasticsearch/cluster/routing/allocation/FailedNodeRoutingTests.java b/core/src/test/java/org/elasticsearch/cluster/routing/allocation/FailedNodeRoutingTests.java index 3b242d8676..affab78521 100644 --- a/core/src/test/java/org/elasticsearch/cluster/routing/allocation/FailedNodeRoutingTests.java +++ b/core/src/test/java/org/elasticsearch/cluster/routing/allocation/FailedNodeRoutingTests.java @@ -41,7 +41,7 @@ public class FailedNodeRoutingTests extends ESAllocationTestCase { private final ESLogger logger = Loggers.getLogger(FailedNodeRoutingTests.class); public void testSimpleFailedNodeTest() { - AllocationService strategy = createAllocationService(settingsBuilder().put(ClusterRebalanceAllocationDecider.CLUSTER_ROUTING_ALLOCATION_ALLOW_REBALANCE, + AllocationService strategy = createAllocationService(settingsBuilder().put(ClusterRebalanceAllocationDecider.CLUSTER_ROUTING_ALLOCATION_ALLOW_REBALANCE_SETTING.getKey(), ClusterRebalanceAllocationDecider.ClusterRebalanceType.ALWAYS.toString()).build()); MetaData metaData = MetaData.builder() diff --git a/core/src/test/java/org/elasticsearch/cluster/routing/allocation/FailedShardsRoutingTests.java b/core/src/test/java/org/elasticsearch/cluster/routing/allocation/FailedShardsRoutingTests.java index 9bfaf7e999..8dffacaa37 100644 --- a/core/src/test/java/org/elasticsearch/cluster/routing/allocation/FailedShardsRoutingTests.java +++ b/core/src/test/java/org/elasticsearch/cluster/routing/allocation/FailedShardsRoutingTests.java @@ -57,7 +57,7 @@ public class FailedShardsRoutingTests extends ESAllocationTestCase { public void testFailedShardPrimaryRelocatingToAndFrom() { AllocationService allocation = createAllocationService(settingsBuilder() .put("cluster.routing.allocation.concurrent_recoveries", 10) - .put(ClusterRebalanceAllocationDecider.CLUSTER_ROUTING_ALLOCATION_ALLOW_REBALANCE, "always") + .put(ClusterRebalanceAllocationDecider.CLUSTER_ROUTING_ALLOCATION_ALLOW_REBALANCE_SETTING.getKey(), "always") .build()); logger.info("--> building initial routing table"); @@ -145,7 +145,7 @@ public class FailedShardsRoutingTests extends ESAllocationTestCase { public void testFailPrimaryStartedCheckReplicaElected() { AllocationService strategy = createAllocationService(settingsBuilder() .put("cluster.routing.allocation.concurrent_recoveries", 10) - .put(ClusterRebalanceAllocationDecider.CLUSTER_ROUTING_ALLOCATION_ALLOW_REBALANCE, "always") + .put(ClusterRebalanceAllocationDecider.CLUSTER_ROUTING_ALLOCATION_ALLOW_REBALANCE_SETTING.getKey(), "always") .build()); logger.info("Building initial routing table"); @@ -226,7 +226,7 @@ public class FailedShardsRoutingTests extends ESAllocationTestCase { public void testFirstAllocationFailureSingleNode() { AllocationService strategy = createAllocationService(settingsBuilder() .put("cluster.routing.allocation.concurrent_recoveries", 10) - .put(ClusterRebalanceAllocationDecider.CLUSTER_ROUTING_ALLOCATION_ALLOW_REBALANCE, "always") + .put(ClusterRebalanceAllocationDecider.CLUSTER_ROUTING_ALLOCATION_ALLOW_REBALANCE_SETTING.getKey(), "always") .build()); logger.info("Building initial routing table"); @@ -282,7 +282,7 @@ public class FailedShardsRoutingTests extends ESAllocationTestCase { public void testSingleShardMultipleAllocationFailures() { AllocationService strategy = createAllocationService(settingsBuilder() .put("cluster.routing.allocation.concurrent_recoveries", 10) - .put(ClusterRebalanceAllocationDecider.CLUSTER_ROUTING_ALLOCATION_ALLOW_REBALANCE, "always") + .put(ClusterRebalanceAllocationDecider.CLUSTER_ROUTING_ALLOCATION_ALLOW_REBALANCE_SETTING.getKey(), "always") .build()); logger.info("Building initial routing table"); @@ -338,7 +338,7 @@ public class FailedShardsRoutingTests extends ESAllocationTestCase { public void testFirstAllocationFailureTwoNodes() { AllocationService strategy = createAllocationService(settingsBuilder() .put("cluster.routing.allocation.concurrent_recoveries", 10) - .put(ClusterRebalanceAllocationDecider.CLUSTER_ROUTING_ALLOCATION_ALLOW_REBALANCE, "always") + .put(ClusterRebalanceAllocationDecider.CLUSTER_ROUTING_ALLOCATION_ALLOW_REBALANCE_SETTING.getKey(), "always") .build()); logger.info("Building initial routing table"); @@ -398,7 +398,7 @@ public class FailedShardsRoutingTests extends ESAllocationTestCase { public void testRebalanceFailure() { AllocationService strategy = createAllocationService(settingsBuilder() .put("cluster.routing.allocation.concurrent_recoveries", 10) - .put(ClusterRebalanceAllocationDecider.CLUSTER_ROUTING_ALLOCATION_ALLOW_REBALANCE, "always") + .put(ClusterRebalanceAllocationDecider.CLUSTER_ROUTING_ALLOCATION_ALLOW_REBALANCE_SETTING.getKey(), "always") .build()); logger.info("Building initial routing table"); diff --git a/core/src/test/java/org/elasticsearch/cluster/routing/allocation/IndexBalanceTests.java b/core/src/test/java/org/elasticsearch/cluster/routing/allocation/IndexBalanceTests.java index aa6fdef828..d5f8134d95 100644 --- a/core/src/test/java/org/elasticsearch/cluster/routing/allocation/IndexBalanceTests.java +++ b/core/src/test/java/org/elasticsearch/cluster/routing/allocation/IndexBalanceTests.java @@ -48,7 +48,7 @@ public class IndexBalanceTests extends ESAllocationTestCase { AllocationService strategy = createAllocationService(settingsBuilder() .put("cluster.routing.allocation.node_concurrent_recoveries", 10) .put("cluster.routing.allocation.node_initial_primaries_recoveries", 10) - .put(ClusterRebalanceAllocationDecider.CLUSTER_ROUTING_ALLOCATION_ALLOW_REBALANCE, "always") + .put(ClusterRebalanceAllocationDecider.CLUSTER_ROUTING_ALLOCATION_ALLOW_REBALANCE_SETTING.getKey(), "always") .put("cluster.routing.allocation.cluster_concurrent_rebalance", -1).build()); logger.info("Building initial routing table"); @@ -178,7 +178,7 @@ public class IndexBalanceTests extends ESAllocationTestCase { AllocationService strategy = createAllocationService(settingsBuilder() .put("cluster.routing.allocation.node_concurrent_recoveries", 10) .put("cluster.routing.allocation.node_initial_primaries_recoveries", 10) - .put(ClusterRebalanceAllocationDecider.CLUSTER_ROUTING_ALLOCATION_ALLOW_REBALANCE, "always") + .put(ClusterRebalanceAllocationDecider.CLUSTER_ROUTING_ALLOCATION_ALLOW_REBALANCE_SETTING.getKey(), "always") .put("cluster.routing.allocation.cluster_concurrent_rebalance", -1).build()); logger.info("Building initial routing table"); @@ -340,7 +340,7 @@ public class IndexBalanceTests extends ESAllocationTestCase { AllocationService strategy = createAllocationService(settingsBuilder() .put("cluster.routing.allocation.node_concurrent_recoveries", 10) .put("cluster.routing.allocation.node_initial_primaries_recoveries", 10) - .put(ClusterRebalanceAllocationDecider.CLUSTER_ROUTING_ALLOCATION_ALLOW_REBALANCE, "always") + .put(ClusterRebalanceAllocationDecider.CLUSTER_ROUTING_ALLOCATION_ALLOW_REBALANCE_SETTING.getKey(), "always") .put("cluster.routing.allocation.cluster_concurrent_rebalance", -1).build()); logger.info("Building initial routing table"); diff --git a/core/src/test/java/org/elasticsearch/cluster/routing/allocation/NodeVersionAllocationDeciderTests.java b/core/src/test/java/org/elasticsearch/cluster/routing/allocation/NodeVersionAllocationDeciderTests.java index 62501cbf9f..7262de2b29 100644 --- a/core/src/test/java/org/elasticsearch/cluster/routing/allocation/NodeVersionAllocationDeciderTests.java +++ b/core/src/test/java/org/elasticsearch/cluster/routing/allocation/NodeVersionAllocationDeciderTests.java @@ -39,15 +39,10 @@ import java.util.ArrayList; import java.util.Collections; import java.util.List; -import static org.elasticsearch.cluster.routing.ShardRoutingState.INITIALIZING; -import static org.elasticsearch.cluster.routing.ShardRoutingState.STARTED; -import static org.elasticsearch.cluster.routing.ShardRoutingState.UNASSIGNED; +import static org.elasticsearch.cluster.routing.ShardRoutingState.*; import static org.elasticsearch.common.settings.Settings.settingsBuilder; import static org.elasticsearch.test.VersionUtils.randomVersion; -import static org.hamcrest.Matchers.equalTo; -import static org.hamcrest.Matchers.is; -import static org.hamcrest.Matchers.notNullValue; -import static org.hamcrest.Matchers.nullValue; +import static org.hamcrest.Matchers.*; /** * @@ -58,7 +53,7 @@ public class NodeVersionAllocationDeciderTests extends ESAllocationTestCase { public void testDoNotAllocateFromPrimary() { AllocationService strategy = createAllocationService(settingsBuilder() .put("cluster.routing.allocation.concurrent_recoveries", 10) - .put(ClusterRebalanceAllocationDecider.CLUSTER_ROUTING_ALLOCATION_ALLOW_REBALANCE, "always") + .put(ClusterRebalanceAllocationDecider.CLUSTER_ROUTING_ALLOCATION_ALLOW_REBALANCE_SETTING.getKey(), "always") .put("cluster.routing.allocation.cluster_concurrent_rebalance", -1) .build()); @@ -172,7 +167,7 @@ public class NodeVersionAllocationDeciderTests extends ESAllocationTestCase { public void testRandom() { AllocationService service = createAllocationService(settingsBuilder() .put("cluster.routing.allocation.concurrent_recoveries", 10) - .put(ClusterRebalanceAllocationDecider.CLUSTER_ROUTING_ALLOCATION_ALLOW_REBALANCE, "always") + .put(ClusterRebalanceAllocationDecider.CLUSTER_ROUTING_ALLOCATION_ALLOW_REBALANCE_SETTING.getKey(), "always") .put("cluster.routing.allocation.cluster_concurrent_rebalance", -1) .build()); @@ -199,7 +194,7 @@ public class NodeVersionAllocationDeciderTests extends ESAllocationTestCase { DiscoveryNodes.Builder nodesBuilder = DiscoveryNodes.builder(); int numNodes = between(1, 20); if (nodes.size() > numNodes) { - Collections.shuffle(nodes, getRandom()); + Collections.shuffle(nodes, random()); nodes = nodes.subList(0, numNodes); } else { for (int j = nodes.size(); j < numNodes; j++) { @@ -221,7 +216,7 @@ public class NodeVersionAllocationDeciderTests extends ESAllocationTestCase { public void testRollingRestart() { AllocationService service = createAllocationService(settingsBuilder() .put("cluster.routing.allocation.concurrent_recoveries", 10) - .put(ClusterRebalanceAllocationDecider.CLUSTER_ROUTING_ALLOCATION_ALLOW_REBALANCE, "always") + .put(ClusterRebalanceAllocationDecider.CLUSTER_ROUTING_ALLOCATION_ALLOW_REBALANCE_SETTING.getKey(), "always") .put("cluster.routing.allocation.cluster_concurrent_rebalance", -1) .build()); diff --git a/core/src/test/java/org/elasticsearch/cluster/routing/allocation/RebalanceAfterActiveTests.java b/core/src/test/java/org/elasticsearch/cluster/routing/allocation/RebalanceAfterActiveTests.java index fbc742573e..18725a0de7 100644 --- a/core/src/test/java/org/elasticsearch/cluster/routing/allocation/RebalanceAfterActiveTests.java +++ b/core/src/test/java/org/elasticsearch/cluster/routing/allocation/RebalanceAfterActiveTests.java @@ -57,7 +57,7 @@ public class RebalanceAfterActiveTests extends ESAllocationTestCase { AllocationService strategy = createAllocationService(settingsBuilder() .put("cluster.routing.allocation.concurrent_recoveries", 10) - .put(ClusterRebalanceAllocationDecider.CLUSTER_ROUTING_ALLOCATION_ALLOW_REBALANCE, "always") + .put(ClusterRebalanceAllocationDecider.CLUSTER_ROUTING_ALLOCATION_ALLOW_REBALANCE_SETTING.getKey(), "always") .put("cluster.routing.allocation.cluster_concurrent_rebalance", -1) .build(), new ClusterInfoService() { diff --git a/core/src/test/java/org/elasticsearch/cluster/routing/allocation/RoutingNodesIntegrityTests.java b/core/src/test/java/org/elasticsearch/cluster/routing/allocation/RoutingNodesIntegrityTests.java index eca2a227f8..eec1b48be9 100644 --- a/core/src/test/java/org/elasticsearch/cluster/routing/allocation/RoutingNodesIntegrityTests.java +++ b/core/src/test/java/org/elasticsearch/cluster/routing/allocation/RoutingNodesIntegrityTests.java @@ -47,7 +47,7 @@ public class RoutingNodesIntegrityTests extends ESAllocationTestCase { AllocationService strategy = createAllocationService(settingsBuilder() .put("cluster.routing.allocation.node_concurrent_recoveries", 10) .put("cluster.routing.allocation.node_initial_primaries_recoveries", 10) - .put(ClusterRebalanceAllocationDecider.CLUSTER_ROUTING_ALLOCATION_ALLOW_REBALANCE, "always") + .put(ClusterRebalanceAllocationDecider.CLUSTER_ROUTING_ALLOCATION_ALLOW_REBALANCE_SETTING.getKey(), "always") .put("cluster.routing.allocation.cluster_concurrent_rebalance", -1).build()); logger.info("Building initial routing table"); @@ -119,7 +119,7 @@ public class RoutingNodesIntegrityTests extends ESAllocationTestCase { AllocationService strategy = createAllocationService(settingsBuilder() .put("cluster.routing.allocation.node_concurrent_recoveries", 10) .put("cluster.routing.allocation.node_initial_primaries_recoveries", 10) - .put(ClusterRebalanceAllocationDecider.CLUSTER_ROUTING_ALLOCATION_ALLOW_REBALANCE, "always") + .put(ClusterRebalanceAllocationDecider.CLUSTER_ROUTING_ALLOCATION_ALLOW_REBALANCE_SETTING.getKey(), "always") .put("cluster.routing.allocation.cluster_concurrent_rebalance", -1).build()); logger.info("Building initial routing table"); @@ -211,7 +211,7 @@ public class RoutingNodesIntegrityTests extends ESAllocationTestCase { AllocationService strategy = createAllocationService(settingsBuilder() .put("cluster.routing.allocation.node_concurrent_recoveries", 1) .put("cluster.routing.allocation.node_initial_primaries_recoveries", 3) - .put(ClusterRebalanceAllocationDecider.CLUSTER_ROUTING_ALLOCATION_ALLOW_REBALANCE, "always") + .put(ClusterRebalanceAllocationDecider.CLUSTER_ROUTING_ALLOCATION_ALLOW_REBALANCE_SETTING.getKey(), "always") .put("cluster.routing.allocation.cluster_concurrent_rebalance", -1).build()); logger.info("Building initial routing table"); diff --git a/core/src/test/java/org/elasticsearch/cluster/routing/allocation/ShardVersioningTests.java b/core/src/test/java/org/elasticsearch/cluster/routing/allocation/ShardVersioningTests.java index f096ab0b13..e1586c433a 100644 --- a/core/src/test/java/org/elasticsearch/cluster/routing/allocation/ShardVersioningTests.java +++ b/core/src/test/java/org/elasticsearch/cluster/routing/allocation/ShardVersioningTests.java @@ -41,7 +41,7 @@ public class ShardVersioningTests extends ESAllocationTestCase { private final ESLogger logger = Loggers.getLogger(ShardVersioningTests.class); public void testSimple() { - AllocationService strategy = createAllocationService(settingsBuilder().put(ClusterRebalanceAllocationDecider.CLUSTER_ROUTING_ALLOCATION_ALLOW_REBALANCE, + AllocationService strategy = createAllocationService(settingsBuilder().put(ClusterRebalanceAllocationDecider.CLUSTER_ROUTING_ALLOCATION_ALLOW_REBALANCE_SETTING.getKey(), ClusterRebalanceAllocationDecider.ClusterRebalanceType.ALWAYS.toString()).build()); MetaData metaData = MetaData.builder() diff --git a/core/src/test/java/org/elasticsearch/cluster/routing/allocation/ShardsLimitAllocationTests.java b/core/src/test/java/org/elasticsearch/cluster/routing/allocation/ShardsLimitAllocationTests.java index 11d41a6a33..c0f0c0c225 100644 --- a/core/src/test/java/org/elasticsearch/cluster/routing/allocation/ShardsLimitAllocationTests.java +++ b/core/src/test/java/org/elasticsearch/cluster/routing/allocation/ShardsLimitAllocationTests.java @@ -90,7 +90,7 @@ public class ShardsLimitAllocationTests extends ESAllocationTestCase { public void testClusterLevelShardsLimitAllocate() { AllocationService strategy = createAllocationService(settingsBuilder() .put("cluster.routing.allocation.concurrent_recoveries", 10) - .put(ShardsLimitAllocationDecider.CLUSTER_TOTAL_SHARDS_PER_NODE, 1) + .put(ShardsLimitAllocationDecider.CLUSTER_TOTAL_SHARDS_PER_NODE_SETTING.getKey(), 1) .build()); logger.info("Building initial routing table"); @@ -126,7 +126,7 @@ public class ShardsLimitAllocationTests extends ESAllocationTestCase { // Bump the cluster total shards to 2 strategy = createAllocationService(settingsBuilder() .put("cluster.routing.allocation.concurrent_recoveries", 10) - .put(ShardsLimitAllocationDecider.CLUSTER_TOTAL_SHARDS_PER_NODE, 2) + .put(ShardsLimitAllocationDecider.CLUSTER_TOTAL_SHARDS_PER_NODE_SETTING.getKey(), 2) .build()); logger.info("Do another reroute, make sure shards are now allocated"); diff --git a/core/src/test/java/org/elasticsearch/cluster/routing/allocation/SingleShardNoReplicasRoutingTests.java b/core/src/test/java/org/elasticsearch/cluster/routing/allocation/SingleShardNoReplicasRoutingTests.java index ed44b84a88..29ef451324 100644 --- a/core/src/test/java/org/elasticsearch/cluster/routing/allocation/SingleShardNoReplicasRoutingTests.java +++ b/core/src/test/java/org/elasticsearch/cluster/routing/allocation/SingleShardNoReplicasRoutingTests.java @@ -211,7 +211,7 @@ public class SingleShardNoReplicasRoutingTests extends ESAllocationTestCase { public void testMultiIndexEvenDistribution() { AllocationService strategy = createAllocationService(settingsBuilder() .put("cluster.routing.allocation.concurrent_recoveries", 10) - .put(ClusterRebalanceAllocationDecider.CLUSTER_ROUTING_ALLOCATION_ALLOW_REBALANCE, "always") + .put(ClusterRebalanceAllocationDecider.CLUSTER_ROUTING_ALLOCATION_ALLOW_REBALANCE_SETTING.getKey(), "always") .put("cluster.routing.allocation.cluster_concurrent_rebalance", -1) .build()); @@ -323,7 +323,7 @@ public class SingleShardNoReplicasRoutingTests extends ESAllocationTestCase { public void testMultiIndexUnevenNodes() { AllocationService strategy = createAllocationService(settingsBuilder() .put("cluster.routing.allocation.concurrent_recoveries", 10) - .put(ClusterRebalanceAllocationDecider.CLUSTER_ROUTING_ALLOCATION_ALLOW_REBALANCE, "always") + .put(ClusterRebalanceAllocationDecider.CLUSTER_ROUTING_ALLOCATION_ALLOW_REBALANCE_SETTING.getKey(), "always") .put("cluster.routing.allocation.cluster_concurrent_rebalance", -1) .build()); diff --git a/core/src/test/java/org/elasticsearch/cluster/routing/allocation/TenShardsOneReplicaRoutingTests.java b/core/src/test/java/org/elasticsearch/cluster/routing/allocation/TenShardsOneReplicaRoutingTests.java index 671cce007c..aec81a6e06 100644 --- a/core/src/test/java/org/elasticsearch/cluster/routing/allocation/TenShardsOneReplicaRoutingTests.java +++ b/core/src/test/java/org/elasticsearch/cluster/routing/allocation/TenShardsOneReplicaRoutingTests.java @@ -50,7 +50,7 @@ public class TenShardsOneReplicaRoutingTests extends ESAllocationTestCase { AllocationService strategy = createAllocationService(settingsBuilder() .put("cluster.routing.allocation.node_concurrent_recoveries", 10) .put("cluster.routing.allocation.node_initial_primaries_recoveries", 10) - .put(ClusterRebalanceAllocationDecider.CLUSTER_ROUTING_ALLOCATION_ALLOW_REBALANCE, "always") + .put(ClusterRebalanceAllocationDecider.CLUSTER_ROUTING_ALLOCATION_ALLOW_REBALANCE_SETTING.getKey(), "always") .put("cluster.routing.allocation.cluster_concurrent_rebalance", -1) .put("cluster.routing.allocation.balance.index", 0.0f) .put("cluster.routing.allocation.balance.replica", 1.0f) diff --git a/core/src/test/java/org/elasticsearch/cluster/routing/allocation/decider/DiskThresholdDeciderTests.java b/core/src/test/java/org/elasticsearch/cluster/routing/allocation/decider/DiskThresholdDeciderTests.java index a739f30856..5377d09d4b 100644 --- a/core/src/test/java/org/elasticsearch/cluster/routing/allocation/decider/DiskThresholdDeciderTests.java +++ b/core/src/test/java/org/elasticsearch/cluster/routing/allocation/decider/DiskThresholdDeciderTests.java @@ -60,9 +60,9 @@ public class DiskThresholdDeciderTests extends ESAllocationTestCase { public void testDiskThreshold() { Settings diskSettings = settingsBuilder() - .put(DiskThresholdDecider.CLUSTER_ROUTING_ALLOCATION_DISK_THRESHOLD_ENABLED, true) - .put(DiskThresholdDecider.CLUSTER_ROUTING_ALLOCATION_LOW_DISK_WATERMARK, 0.7) - .put(DiskThresholdDecider.CLUSTER_ROUTING_ALLOCATION_HIGH_DISK_WATERMARK, 0.8).build(); + .put(DiskThresholdDecider.CLUSTER_ROUTING_ALLOCATION_DISK_THRESHOLD_ENABLED_SETTING.getKey(), true) + .put(DiskThresholdDecider.CLUSTER_ROUTING_ALLOCATION_LOW_DISK_WATERMARK_SETTING.getKey(), 0.7) + .put(DiskThresholdDecider.CLUSTER_ROUTING_ALLOCATION_HIGH_DISK_WATERMARK_SETTING.getKey(), 0.8).build(); ImmutableOpenMap.Builder<String, DiskUsage> usagesBuilder = ImmutableOpenMap.builder(); usagesBuilder.put("node1", new DiskUsage("node1", "node1", "/dev/null", 100, 10)); // 90% used @@ -96,7 +96,7 @@ public class DiskThresholdDeciderTests extends ESAllocationTestCase { }; AllocationService strategy = new AllocationService(settingsBuilder() .put("cluster.routing.allocation.concurrent_recoveries", 10) - .put(ClusterRebalanceAllocationDecider.CLUSTER_ROUTING_ALLOCATION_ALLOW_REBALANCE, "always") + .put(ClusterRebalanceAllocationDecider.CLUSTER_ROUTING_ALLOCATION_ALLOW_REBALANCE_SETTING.getKey(), "always") .put("cluster.routing.allocation.cluster_concurrent_rebalance", -1) .build(), deciders, makeShardsAllocators(), cis); @@ -170,9 +170,9 @@ public class DiskThresholdDeciderTests extends ESAllocationTestCase { // Set the high threshold to 70 instead of 80 // node2 now should not have new shards allocated to it, but shards can remain diskSettings = settingsBuilder() - .put(DiskThresholdDecider.CLUSTER_ROUTING_ALLOCATION_DISK_THRESHOLD_ENABLED, true) - .put(DiskThresholdDecider.CLUSTER_ROUTING_ALLOCATION_LOW_DISK_WATERMARK, "60%") - .put(DiskThresholdDecider.CLUSTER_ROUTING_ALLOCATION_HIGH_DISK_WATERMARK, 0.7).build(); + .put(DiskThresholdDecider.CLUSTER_ROUTING_ALLOCATION_DISK_THRESHOLD_ENABLED_SETTING.getKey(), true) + .put(DiskThresholdDecider.CLUSTER_ROUTING_ALLOCATION_LOW_DISK_WATERMARK_SETTING.getKey(), "60%") + .put(DiskThresholdDecider.CLUSTER_ROUTING_ALLOCATION_HIGH_DISK_WATERMARK_SETTING.getKey(), 0.7).build(); deciders = new AllocationDeciders(Settings.EMPTY, new HashSet<>(Arrays.asList( @@ -181,7 +181,7 @@ public class DiskThresholdDeciderTests extends ESAllocationTestCase { strategy = new AllocationService(settingsBuilder() .put("cluster.routing.allocation.concurrent_recoveries", 10) - .put(ClusterRebalanceAllocationDecider.CLUSTER_ROUTING_ALLOCATION_ALLOW_REBALANCE, "always") + .put(ClusterRebalanceAllocationDecider.CLUSTER_ROUTING_ALLOCATION_ALLOW_REBALANCE_SETTING.getKey(), "always") .put("cluster.routing.allocation.cluster_concurrent_rebalance", -1) .build(), deciders, makeShardsAllocators(), cis); @@ -201,9 +201,9 @@ public class DiskThresholdDeciderTests extends ESAllocationTestCase { // Set the high threshold to 60 instead of 70 // node2 now should not have new shards allocated to it, and shards cannot remain diskSettings = settingsBuilder() - .put(DiskThresholdDecider.CLUSTER_ROUTING_ALLOCATION_DISK_THRESHOLD_ENABLED, true) - .put(DiskThresholdDecider.CLUSTER_ROUTING_ALLOCATION_LOW_DISK_WATERMARK, 0.5) - .put(DiskThresholdDecider.CLUSTER_ROUTING_ALLOCATION_HIGH_DISK_WATERMARK, 0.6).build(); + .put(DiskThresholdDecider.CLUSTER_ROUTING_ALLOCATION_DISK_THRESHOLD_ENABLED_SETTING.getKey(), true) + .put(DiskThresholdDecider.CLUSTER_ROUTING_ALLOCATION_LOW_DISK_WATERMARK_SETTING.getKey(), 0.5) + .put(DiskThresholdDecider.CLUSTER_ROUTING_ALLOCATION_HIGH_DISK_WATERMARK_SETTING.getKey(), 0.6).build(); deciders = new AllocationDeciders(Settings.EMPTY, new HashSet<>(Arrays.asList( @@ -212,7 +212,7 @@ public class DiskThresholdDeciderTests extends ESAllocationTestCase { strategy = new AllocationService(settingsBuilder() .put("cluster.routing.allocation.concurrent_recoveries", 10) - .put(ClusterRebalanceAllocationDecider.CLUSTER_ROUTING_ALLOCATION_ALLOW_REBALANCE, "always") + .put(ClusterRebalanceAllocationDecider.CLUSTER_ROUTING_ALLOCATION_ALLOW_REBALANCE_SETTING.getKey(), "always") .put("cluster.routing.allocation.cluster_concurrent_rebalance", -1) .build(), deciders, makeShardsAllocators(), cis); @@ -254,9 +254,9 @@ public class DiskThresholdDeciderTests extends ESAllocationTestCase { public void testDiskThresholdWithAbsoluteSizes() { Settings diskSettings = settingsBuilder() - .put(DiskThresholdDecider.CLUSTER_ROUTING_ALLOCATION_DISK_THRESHOLD_ENABLED, true) - .put(DiskThresholdDecider.CLUSTER_ROUTING_ALLOCATION_LOW_DISK_WATERMARK, "30b") - .put(DiskThresholdDecider.CLUSTER_ROUTING_ALLOCATION_HIGH_DISK_WATERMARK, "9b").build(); + .put(DiskThresholdDecider.CLUSTER_ROUTING_ALLOCATION_DISK_THRESHOLD_ENABLED_SETTING.getKey(), true) + .put(DiskThresholdDecider.CLUSTER_ROUTING_ALLOCATION_LOW_DISK_WATERMARK_SETTING.getKey(), "30b") + .put(DiskThresholdDecider.CLUSTER_ROUTING_ALLOCATION_HIGH_DISK_WATERMARK_SETTING.getKey(), "9b").build(); ImmutableOpenMap.Builder<String, DiskUsage> usagesBuilder = ImmutableOpenMap.builder(); usagesBuilder.put("node1", new DiskUsage("node1", "n1", "/dev/null", 100, 10)); // 90% used @@ -292,7 +292,7 @@ public class DiskThresholdDeciderTests extends ESAllocationTestCase { AllocationService strategy = new AllocationService(settingsBuilder() .put("cluster.routing.allocation.concurrent_recoveries", 10) - .put(ClusterRebalanceAllocationDecider.CLUSTER_ROUTING_ALLOCATION_ALLOW_REBALANCE, "always") + .put(ClusterRebalanceAllocationDecider.CLUSTER_ROUTING_ALLOCATION_ALLOW_REBALANCE_SETTING.getKey(), "always") .put("cluster.routing.allocation.cluster_concurrent_rebalance", -1) .build(), deciders, makeShardsAllocators(), cis); @@ -349,7 +349,7 @@ public class DiskThresholdDeciderTests extends ESAllocationTestCase { }; strategy = new AllocationService(settingsBuilder() .put("cluster.routing.allocation.concurrent_recoveries", 10) - .put(ClusterRebalanceAllocationDecider.CLUSTER_ROUTING_ALLOCATION_ALLOW_REBALANCE, "always") + .put(ClusterRebalanceAllocationDecider.CLUSTER_ROUTING_ALLOCATION_ALLOW_REBALANCE_SETTING.getKey(), "always") .put("cluster.routing.allocation.cluster_concurrent_rebalance", -1) .build(), deciders, makeShardsAllocators(), cis); @@ -405,9 +405,9 @@ public class DiskThresholdDeciderTests extends ESAllocationTestCase { // Set the high threshold to 70 instead of 80 // node2 now should not have new shards allocated to it, but shards can remain diskSettings = settingsBuilder() - .put(DiskThresholdDecider.CLUSTER_ROUTING_ALLOCATION_DISK_THRESHOLD_ENABLED, true) - .put(DiskThresholdDecider.CLUSTER_ROUTING_ALLOCATION_LOW_DISK_WATERMARK, "40b") - .put(DiskThresholdDecider.CLUSTER_ROUTING_ALLOCATION_HIGH_DISK_WATERMARK, "30b").build(); + .put(DiskThresholdDecider.CLUSTER_ROUTING_ALLOCATION_DISK_THRESHOLD_ENABLED_SETTING.getKey(), true) + .put(DiskThresholdDecider.CLUSTER_ROUTING_ALLOCATION_LOW_DISK_WATERMARK_SETTING.getKey(), "40b") + .put(DiskThresholdDecider.CLUSTER_ROUTING_ALLOCATION_HIGH_DISK_WATERMARK_SETTING.getKey(), "30b").build(); deciders = new AllocationDeciders(Settings.EMPTY, new HashSet<>(Arrays.asList( @@ -416,7 +416,7 @@ public class DiskThresholdDeciderTests extends ESAllocationTestCase { strategy = new AllocationService(settingsBuilder() .put("cluster.routing.allocation.concurrent_recoveries", 10) - .put(ClusterRebalanceAllocationDecider.CLUSTER_ROUTING_ALLOCATION_ALLOW_REBALANCE, "always") + .put(ClusterRebalanceAllocationDecider.CLUSTER_ROUTING_ALLOCATION_ALLOW_REBALANCE_SETTING.getKey(), "always") .put("cluster.routing.allocation.cluster_concurrent_rebalance", -1) .build(), deciders, makeShardsAllocators(), cis); @@ -436,9 +436,9 @@ public class DiskThresholdDeciderTests extends ESAllocationTestCase { // Set the high threshold to 60 instead of 70 // node2 now should not have new shards allocated to it, and shards cannot remain diskSettings = settingsBuilder() - .put(DiskThresholdDecider.CLUSTER_ROUTING_ALLOCATION_DISK_THRESHOLD_ENABLED, true) - .put(DiskThresholdDecider.CLUSTER_ROUTING_ALLOCATION_LOW_DISK_WATERMARK, "50b") - .put(DiskThresholdDecider.CLUSTER_ROUTING_ALLOCATION_HIGH_DISK_WATERMARK, "40b").build(); + .put(DiskThresholdDecider.CLUSTER_ROUTING_ALLOCATION_DISK_THRESHOLD_ENABLED_SETTING.getKey(), true) + .put(DiskThresholdDecider.CLUSTER_ROUTING_ALLOCATION_LOW_DISK_WATERMARK_SETTING.getKey(), "50b") + .put(DiskThresholdDecider.CLUSTER_ROUTING_ALLOCATION_HIGH_DISK_WATERMARK_SETTING.getKey(), "40b").build(); deciders = new AllocationDeciders(Settings.EMPTY, new HashSet<>(Arrays.asList( @@ -447,7 +447,7 @@ public class DiskThresholdDeciderTests extends ESAllocationTestCase { strategy = new AllocationService(settingsBuilder() .put("cluster.routing.allocation.concurrent_recoveries", 10) - .put(ClusterRebalanceAllocationDecider.CLUSTER_ROUTING_ALLOCATION_ALLOW_REBALANCE, "always") + .put(ClusterRebalanceAllocationDecider.CLUSTER_ROUTING_ALLOCATION_ALLOW_REBALANCE_SETTING.getKey(), "always") .put("cluster.routing.allocation.cluster_concurrent_rebalance", -1) .build(), deciders, makeShardsAllocators(), cis); @@ -522,9 +522,9 @@ public class DiskThresholdDeciderTests extends ESAllocationTestCase { public void testDiskThresholdWithShardSizes() { Settings diskSettings = settingsBuilder() - .put(DiskThresholdDecider.CLUSTER_ROUTING_ALLOCATION_DISK_THRESHOLD_ENABLED, true) - .put(DiskThresholdDecider.CLUSTER_ROUTING_ALLOCATION_LOW_DISK_WATERMARK, 0.7) - .put(DiskThresholdDecider.CLUSTER_ROUTING_ALLOCATION_HIGH_DISK_WATERMARK, "71%").build(); + .put(DiskThresholdDecider.CLUSTER_ROUTING_ALLOCATION_DISK_THRESHOLD_ENABLED_SETTING.getKey(), true) + .put(DiskThresholdDecider.CLUSTER_ROUTING_ALLOCATION_LOW_DISK_WATERMARK_SETTING.getKey(), 0.7) + .put(DiskThresholdDecider.CLUSTER_ROUTING_ALLOCATION_HIGH_DISK_WATERMARK_SETTING.getKey(), "71%").build(); ImmutableOpenMap.Builder<String, DiskUsage> usagesBuilder = ImmutableOpenMap.builder(); usagesBuilder.put("node1", new DiskUsage("node1", "n1", "/dev/null", 100, 31)); // 69% used @@ -556,7 +556,7 @@ public class DiskThresholdDeciderTests extends ESAllocationTestCase { AllocationService strategy = new AllocationService(settingsBuilder() .put("cluster.routing.allocation.concurrent_recoveries", 10) - .put(ClusterRebalanceAllocationDecider.CLUSTER_ROUTING_ALLOCATION_ALLOW_REBALANCE, "always") + .put(ClusterRebalanceAllocationDecider.CLUSTER_ROUTING_ALLOCATION_ALLOW_REBALANCE_SETTING.getKey(), "always") .put("cluster.routing.allocation.cluster_concurrent_rebalance", -1) .build(), deciders, makeShardsAllocators(), cis); @@ -589,9 +589,9 @@ public class DiskThresholdDeciderTests extends ESAllocationTestCase { public void testUnknownDiskUsage() { Settings diskSettings = settingsBuilder() - .put(DiskThresholdDecider.CLUSTER_ROUTING_ALLOCATION_DISK_THRESHOLD_ENABLED, true) - .put(DiskThresholdDecider.CLUSTER_ROUTING_ALLOCATION_LOW_DISK_WATERMARK, 0.7) - .put(DiskThresholdDecider.CLUSTER_ROUTING_ALLOCATION_HIGH_DISK_WATERMARK, 0.85).build(); + .put(DiskThresholdDecider.CLUSTER_ROUTING_ALLOCATION_DISK_THRESHOLD_ENABLED_SETTING.getKey(), true) + .put(DiskThresholdDecider.CLUSTER_ROUTING_ALLOCATION_LOW_DISK_WATERMARK_SETTING.getKey(), 0.7) + .put(DiskThresholdDecider.CLUSTER_ROUTING_ALLOCATION_HIGH_DISK_WATERMARK_SETTING.getKey(), 0.85).build(); ImmutableOpenMap.Builder<String, DiskUsage> usagesBuilder = ImmutableOpenMap.builder(); usagesBuilder.put("node2", new DiskUsage("node2", "node2", "/dev/null", 100, 50)); // 50% used @@ -624,7 +624,7 @@ public class DiskThresholdDeciderTests extends ESAllocationTestCase { AllocationService strategy = new AllocationService(settingsBuilder() .put("cluster.routing.allocation.concurrent_recoveries", 10) - .put(ClusterRebalanceAllocationDecider.CLUSTER_ROUTING_ALLOCATION_ALLOW_REBALANCE, "always") + .put(ClusterRebalanceAllocationDecider.CLUSTER_ROUTING_ALLOCATION_ALLOW_REBALANCE_SETTING.getKey(), "always") .put("cluster.routing.allocation.cluster_concurrent_rebalance", -1) .build(), deciders, makeShardsAllocators(), cis); @@ -688,10 +688,10 @@ public class DiskThresholdDeciderTests extends ESAllocationTestCase { public void testShardRelocationsTakenIntoAccount() { Settings diskSettings = settingsBuilder() - .put(DiskThresholdDecider.CLUSTER_ROUTING_ALLOCATION_DISK_THRESHOLD_ENABLED, true) - .put(DiskThresholdDecider.CLUSTER_ROUTING_ALLOCATION_INCLUDE_RELOCATIONS, true) - .put(DiskThresholdDecider.CLUSTER_ROUTING_ALLOCATION_LOW_DISK_WATERMARK, 0.7) - .put(DiskThresholdDecider.CLUSTER_ROUTING_ALLOCATION_HIGH_DISK_WATERMARK, 0.8).build(); + .put(DiskThresholdDecider.CLUSTER_ROUTING_ALLOCATION_DISK_THRESHOLD_ENABLED_SETTING.getKey(), true) + .put(DiskThresholdDecider.CLUSTER_ROUTING_ALLOCATION_INCLUDE_RELOCATIONS_SETTING.getKey(), true) + .put(DiskThresholdDecider.CLUSTER_ROUTING_ALLOCATION_LOW_DISK_WATERMARK_SETTING.getKey(), 0.7) + .put(DiskThresholdDecider.CLUSTER_ROUTING_ALLOCATION_HIGH_DISK_WATERMARK_SETTING.getKey(), 0.8).build(); ImmutableOpenMap.Builder<String, DiskUsage> usagesBuilder = ImmutableOpenMap.builder(); usagesBuilder.put("node1", new DiskUsage("node1", "n1", "/dev/null", 100, 40)); // 60% used @@ -727,7 +727,7 @@ public class DiskThresholdDeciderTests extends ESAllocationTestCase { AllocationService strategy = new AllocationService(settingsBuilder() .put("cluster.routing.allocation.concurrent_recoveries", 10) - .put(ClusterRebalanceAllocationDecider.CLUSTER_ROUTING_ALLOCATION_ALLOW_REBALANCE, "always") + .put(ClusterRebalanceAllocationDecider.CLUSTER_ROUTING_ALLOCATION_ALLOW_REBALANCE_SETTING.getKey(), "always") .put("cluster.routing.allocation.cluster_concurrent_rebalance", -1) .build(), deciders, makeShardsAllocators(), cis); @@ -794,10 +794,10 @@ public class DiskThresholdDeciderTests extends ESAllocationTestCase { public void testCanRemainWithShardRelocatingAway() { Settings diskSettings = settingsBuilder() - .put(DiskThresholdDecider.CLUSTER_ROUTING_ALLOCATION_DISK_THRESHOLD_ENABLED, true) - .put(DiskThresholdDecider.CLUSTER_ROUTING_ALLOCATION_INCLUDE_RELOCATIONS, true) - .put(DiskThresholdDecider.CLUSTER_ROUTING_ALLOCATION_LOW_DISK_WATERMARK, "60%") - .put(DiskThresholdDecider.CLUSTER_ROUTING_ALLOCATION_HIGH_DISK_WATERMARK, "70%").build(); + .put(DiskThresholdDecider.CLUSTER_ROUTING_ALLOCATION_DISK_THRESHOLD_ENABLED_SETTING.getKey(), true) + .put(DiskThresholdDecider.CLUSTER_ROUTING_ALLOCATION_INCLUDE_RELOCATIONS_SETTING.getKey(), true) + .put(DiskThresholdDecider.CLUSTER_ROUTING_ALLOCATION_LOW_DISK_WATERMARK_SETTING.getKey(), "60%") + .put(DiskThresholdDecider.CLUSTER_ROUTING_ALLOCATION_HIGH_DISK_WATERMARK_SETTING.getKey(), "70%").build(); // We have an index with 2 primary shards each taking 40 bytes. Each node has 100 bytes available ImmutableOpenMap.Builder<String, DiskUsage> usagesBuilder = ImmutableOpenMap.builder(); @@ -889,7 +889,7 @@ public class DiskThresholdDeciderTests extends ESAllocationTestCase { ))); AllocationService strategy = new AllocationService(settingsBuilder() .put("cluster.routing.allocation.concurrent_recoveries", 10) - .put(ClusterRebalanceAllocationDecider.CLUSTER_ROUTING_ALLOCATION_ALLOW_REBALANCE, "always") + .put(ClusterRebalanceAllocationDecider.CLUSTER_ROUTING_ALLOCATION_ALLOW_REBALANCE_SETTING.getKey(), "always") .put("cluster.routing.allocation.cluster_concurrent_rebalance", -1) .build(), deciders, makeShardsAllocators(), cis); // Ensure that the reroute call doesn't alter the routing table, since the first primary is relocating away @@ -906,10 +906,10 @@ public class DiskThresholdDeciderTests extends ESAllocationTestCase { public void testForSingleDataNode() { Settings diskSettings = settingsBuilder() - .put(DiskThresholdDecider.CLUSTER_ROUTING_ALLOCATION_DISK_THRESHOLD_ENABLED, true) - .put(DiskThresholdDecider.CLUSTER_ROUTING_ALLOCATION_INCLUDE_RELOCATIONS, true) - .put(DiskThresholdDecider.CLUSTER_ROUTING_ALLOCATION_LOW_DISK_WATERMARK, "60%") - .put(DiskThresholdDecider.CLUSTER_ROUTING_ALLOCATION_HIGH_DISK_WATERMARK, "70%").build(); + .put(DiskThresholdDecider.CLUSTER_ROUTING_ALLOCATION_DISK_THRESHOLD_ENABLED_SETTING.getKey(), true) + .put(DiskThresholdDecider.CLUSTER_ROUTING_ALLOCATION_INCLUDE_RELOCATIONS_SETTING.getKey(), true) + .put(DiskThresholdDecider.CLUSTER_ROUTING_ALLOCATION_LOW_DISK_WATERMARK_SETTING.getKey(), "60%") + .put(DiskThresholdDecider.CLUSTER_ROUTING_ALLOCATION_HIGH_DISK_WATERMARK_SETTING.getKey(), "70%").build(); ImmutableOpenMap.Builder<String, DiskUsage> usagesBuilder = ImmutableOpenMap.builder(); usagesBuilder.put("node1", new DiskUsage("node1", "n1", "/dev/null", 100, 100)); // 0% used @@ -989,7 +989,7 @@ public class DiskThresholdDeciderTests extends ESAllocationTestCase { AllocationService strategy = new AllocationService(settingsBuilder() .put("cluster.routing.allocation.concurrent_recoveries", 10) - .put(ClusterRebalanceAllocationDecider.CLUSTER_ROUTING_ALLOCATION_ALLOW_REBALANCE, "always") + .put(ClusterRebalanceAllocationDecider.CLUSTER_ROUTING_ALLOCATION_ALLOW_REBALANCE_SETTING.getKey(), "always") .put("cluster.routing.allocation.cluster_concurrent_rebalance", -1) .build(), deciders, makeShardsAllocators(), cis); RoutingAllocation.Result result = strategy.reroute(clusterState, "reroute"); diff --git a/core/src/test/java/org/elasticsearch/cluster/routing/allocation/decider/DiskThresholdDeciderUnitTests.java b/core/src/test/java/org/elasticsearch/cluster/routing/allocation/decider/DiskThresholdDeciderUnitTests.java index a386883ad1..52e88ea3bc 100644 --- a/core/src/test/java/org/elasticsearch/cluster/routing/allocation/decider/DiskThresholdDeciderUnitTests.java +++ b/core/src/test/java/org/elasticsearch/cluster/routing/allocation/decider/DiskThresholdDeciderUnitTests.java @@ -28,12 +28,12 @@ import org.elasticsearch.cluster.node.DiscoveryNode; import org.elasticsearch.cluster.node.DiscoveryNodes; import org.elasticsearch.cluster.routing.*; import org.elasticsearch.cluster.routing.allocation.RoutingAllocation; +import org.elasticsearch.common.settings.ClusterSettings; import org.elasticsearch.common.collect.ImmutableOpenMap; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.transport.DummyTransportAddress; import org.elasticsearch.common.transport.LocalTransportAddress; import org.elasticsearch.common.unit.ByteSizeValue; -import org.elasticsearch.node.settings.NodeSettingsService; import org.elasticsearch.test.ESTestCase; import java.util.Arrays; @@ -45,7 +45,7 @@ import static org.hamcrest.CoreMatchers.equalTo; */ public class DiskThresholdDeciderUnitTests extends ESTestCase { public void testDynamicSettings() { - NodeSettingsService nss = new NodeSettingsService(Settings.EMPTY); + ClusterSettings nss = new ClusterSettings(Settings.EMPTY, ClusterSettings.BUILT_IN_CLUSTER_SETTINGS); ClusterInfoService cis = EmptyClusterInfoService.INSTANCE; DiskThresholdDecider decider = new DiskThresholdDecider(Settings.EMPTY, nss, cis, null); @@ -59,18 +59,15 @@ public class DiskThresholdDeciderUnitTests extends ESTestCase { assertTrue(decider.isEnabled()); assertTrue(decider.isIncludeRelocations()); - DiskThresholdDecider.ApplySettings applySettings = decider.newApplySettings(); - Settings newSettings = Settings.builder() - .put(DiskThresholdDecider.CLUSTER_ROUTING_ALLOCATION_DISK_THRESHOLD_ENABLED, false) - .put(DiskThresholdDecider.CLUSTER_ROUTING_ALLOCATION_INCLUDE_RELOCATIONS, false) - .put(DiskThresholdDecider.CLUSTER_ROUTING_ALLOCATION_HIGH_DISK_WATERMARK, "70%") - .put(DiskThresholdDecider.CLUSTER_ROUTING_ALLOCATION_LOW_DISK_WATERMARK, "500mb") - .put(DiskThresholdDecider.CLUSTER_ROUTING_ALLOCATION_REROUTE_INTERVAL, "30s") + .put(DiskThresholdDecider.CLUSTER_ROUTING_ALLOCATION_DISK_THRESHOLD_ENABLED_SETTING.getKey(), false) + .put(DiskThresholdDecider.CLUSTER_ROUTING_ALLOCATION_INCLUDE_RELOCATIONS_SETTING.getKey(), false) + .put(DiskThresholdDecider.CLUSTER_ROUTING_ALLOCATION_HIGH_DISK_WATERMARK_SETTING.getKey(), "70%") + .put(DiskThresholdDecider.CLUSTER_ROUTING_ALLOCATION_LOW_DISK_WATERMARK_SETTING.getKey(), "500mb") + .put(DiskThresholdDecider.CLUSTER_ROUTING_ALLOCATION_REROUTE_INTERVAL_SETTING.getKey(), "30s") .build(); - applySettings.onRefreshSettings(newSettings); - + nss.applySettings(newSettings); assertThat("high threshold bytes should be unset", decider.getFreeBytesThresholdHigh(), equalTo(ByteSizeValue.parseBytesSizeValue("0b", "test"))); assertThat("high threshold percentage should be changed", @@ -86,7 +83,7 @@ public class DiskThresholdDeciderUnitTests extends ESTestCase { } public void testCanAllocateUsesMaxAvailableSpace() { - NodeSettingsService nss = new NodeSettingsService(Settings.EMPTY); + ClusterSettings nss = new ClusterSettings(Settings.EMPTY, ClusterSettings.BUILT_IN_CLUSTER_SETTINGS); ClusterInfoService cis = EmptyClusterInfoService.INSTANCE; DiskThresholdDecider decider = new DiskThresholdDecider(Settings.EMPTY, nss, cis, null); @@ -127,7 +124,7 @@ public class DiskThresholdDeciderUnitTests extends ESTestCase { } public void testCanRemainUsesLeastAvailableSpace() { - NodeSettingsService nss = new NodeSettingsService(Settings.EMPTY); + ClusterSettings nss = new ClusterSettings(Settings.EMPTY, ClusterSettings.BUILT_IN_CLUSTER_SETTINGS); ClusterInfoService cis = EmptyClusterInfoService.INSTANCE; DiskThresholdDecider decider = new DiskThresholdDecider(Settings.EMPTY, nss, cis, null); ImmutableOpenMap.Builder<ShardRouting, String> shardRoutingMap = ImmutableOpenMap.builder(); diff --git a/core/src/test/java/org/elasticsearch/cluster/routing/allocation/decider/EnableAllocationDeciderIT.java b/core/src/test/java/org/elasticsearch/cluster/routing/allocation/decider/EnableAllocationDeciderIT.java index 940634a465..be64aafc61 100644 --- a/core/src/test/java/org/elasticsearch/cluster/routing/allocation/decider/EnableAllocationDeciderIT.java +++ b/core/src/test/java/org/elasticsearch/cluster/routing/allocation/decider/EnableAllocationDeciderIT.java @@ -37,7 +37,7 @@ public class EnableAllocationDeciderIT extends ESIntegTestCase { public void testEnableRebalance() throws InterruptedException { final String firstNode = internalCluster().startNode(); - client().admin().cluster().prepareUpdateSettings().setTransientSettings(settingsBuilder().put(EnableAllocationDecider.CLUSTER_ROUTING_REBALANCE_ENABLE, EnableAllocationDecider.Rebalance.NONE)).get(); + client().admin().cluster().prepareUpdateSettings().setTransientSettings(settingsBuilder().put(EnableAllocationDecider.CLUSTER_ROUTING_REBALANCE_ENABLE_SETTING.getKey(), EnableAllocationDecider.Rebalance.NONE)).get(); // we test with 2 shards since otherwise it's pretty fragile if there are difference in the num or shards such that // all shards are relocated to the second node which is not what we want here. It's solely a test for the settings to take effect final int numShards = 2; @@ -64,7 +64,7 @@ public class EnableAllocationDeciderIT extends ESIntegTestCase { assertThat("index: [test] expected to be rebalanced on both nodes", test.size(), equalTo(2)); // flip the cluster wide setting such that we can also balance for index test_1 eventually we should have one shard of each index on each node - client().admin().cluster().prepareUpdateSettings().setTransientSettings(settingsBuilder().put(EnableAllocationDecider.CLUSTER_ROUTING_REBALANCE_ENABLE, randomBoolean() ? EnableAllocationDecider.Rebalance.PRIMARIES : EnableAllocationDecider.Rebalance.ALL)).get(); + client().admin().cluster().prepareUpdateSettings().setTransientSettings(settingsBuilder().put(EnableAllocationDecider.CLUSTER_ROUTING_REBALANCE_ENABLE_SETTING.getKey(), randomBoolean() ? EnableAllocationDecider.Rebalance.PRIMARIES : EnableAllocationDecider.Rebalance.ALL)).get(); logger.info("--> balance index [test_1]"); client().admin().cluster().prepareReroute().get(); ensureGreen("test_1"); diff --git a/core/src/test/java/org/elasticsearch/cluster/routing/allocation/decider/EnableAllocationTests.java b/core/src/test/java/org/elasticsearch/cluster/routing/allocation/decider/EnableAllocationTests.java index 0049a12077..1bdc39036a 100644 --- a/core/src/test/java/org/elasticsearch/cluster/routing/allocation/decider/EnableAllocationTests.java +++ b/core/src/test/java/org/elasticsearch/cluster/routing/allocation/decider/EnableAllocationTests.java @@ -22,7 +22,6 @@ package org.elasticsearch.cluster.routing.allocation.decider; import com.carrotsearch.randomizedtesting.generators.RandomPicks; import org.elasticsearch.Version; -import org.elasticsearch.cluster.ClusterChangedEvent; import org.elasticsearch.cluster.ClusterState; import org.elasticsearch.cluster.metadata.IndexMetaData; import org.elasticsearch.cluster.metadata.MetaData; @@ -32,10 +31,10 @@ import org.elasticsearch.cluster.routing.ShardRouting; import org.elasticsearch.cluster.routing.allocation.AllocationService; import org.elasticsearch.cluster.routing.allocation.decider.EnableAllocationDecider.Allocation; import org.elasticsearch.cluster.routing.allocation.decider.EnableAllocationDecider.Rebalance; +import org.elasticsearch.common.settings.ClusterSettings; import org.elasticsearch.common.logging.ESLogger; import org.elasticsearch.common.logging.Loggers; import org.elasticsearch.common.settings.Settings; -import org.elasticsearch.node.settings.NodeSettingsService; import org.elasticsearch.test.ESAllocationTestCase; import java.util.EnumSet; @@ -44,8 +43,8 @@ import java.util.List; import static org.elasticsearch.cluster.routing.ShardRoutingState.INITIALIZING; import static org.elasticsearch.cluster.routing.ShardRoutingState.RELOCATING; import static org.elasticsearch.cluster.routing.ShardRoutingState.STARTED; -import static org.elasticsearch.cluster.routing.allocation.decider.EnableAllocationDecider.CLUSTER_ROUTING_ALLOCATION_ENABLE; -import static org.elasticsearch.cluster.routing.allocation.decider.EnableAllocationDecider.CLUSTER_ROUTING_REBALANCE_ENABLE; +import static org.elasticsearch.cluster.routing.allocation.decider.EnableAllocationDecider.CLUSTER_ROUTING_ALLOCATION_ENABLE_SETTING; +import static org.elasticsearch.cluster.routing.allocation.decider.EnableAllocationDecider.CLUSTER_ROUTING_REBALANCE_ENABLE_SETTING; import static org.elasticsearch.cluster.routing.allocation.decider.EnableAllocationDecider.INDEX_ROUTING_ALLOCATION_ENABLE; import static org.elasticsearch.common.settings.Settings.settingsBuilder; import static org.hamcrest.Matchers.equalTo; @@ -58,7 +57,7 @@ public class EnableAllocationTests extends ESAllocationTestCase { public void testClusterEnableNone() { AllocationService strategy = createAllocationService(settingsBuilder() - .put(CLUSTER_ROUTING_ALLOCATION_ENABLE, Allocation.NONE.name()) + .put(CLUSTER_ROUTING_ALLOCATION_ENABLE_SETTING.getKey(), Allocation.NONE.name()) .build()); logger.info("Building initial routing table"); @@ -86,7 +85,7 @@ public class EnableAllocationTests extends ESAllocationTestCase { public void testClusterEnableOnlyPrimaries() { AllocationService strategy = createAllocationService(settingsBuilder() - .put(CLUSTER_ROUTING_ALLOCATION_ENABLE, Allocation.PRIMARIES.name()) + .put(CLUSTER_ROUTING_ALLOCATION_ENABLE_SETTING.getKey(), Allocation.PRIMARIES.name()) .build()); logger.info("Building initial routing table"); @@ -159,11 +158,11 @@ public class EnableAllocationTests extends ESAllocationTestCase { final boolean useClusterSetting = randomBoolean(); final Rebalance allowedOnes = RandomPicks.randomFrom(getRandom(), EnumSet.of(Rebalance.PRIMARIES, Rebalance.REPLICAS, Rebalance.ALL)); Settings build = settingsBuilder() - .put(CLUSTER_ROUTING_REBALANCE_ENABLE, useClusterSetting ? Rebalance.NONE: RandomPicks.randomFrom(getRandom(), Rebalance.values())) // index settings override cluster settings - .put(ConcurrentRebalanceAllocationDecider.CLUSTER_ROUTING_ALLOCATION_CLUSTER_CONCURRENT_REBALANCE, 3) + .put(CLUSTER_ROUTING_REBALANCE_ENABLE_SETTING.getKey(), useClusterSetting ? Rebalance.NONE: RandomPicks.randomFrom(getRandom(), Rebalance.values())) // index settings override cluster settings + .put(ConcurrentRebalanceAllocationDecider.CLUSTER_ROUTING_ALLOCATION_CLUSTER_CONCURRENT_REBALANCE_SETTING.getKey(), 3) .build(); - NodeSettingsService nodeSettingsService = new NodeSettingsService(build); - AllocationService strategy = createAllocationService(build, nodeSettingsService, getRandom()); + ClusterSettings clusterSettings = new ClusterSettings(build, ClusterSettings.BUILT_IN_CLUSTER_SETTINGS); + AllocationService strategy = createAllocationService(build, clusterSettings, getRandom()); Settings indexSettings = useClusterSetting ? Settings.EMPTY : settingsBuilder().put(EnableAllocationDecider.INDEX_ROUTING_REBALANCE_ENABLE, Rebalance.NONE).build(); logger.info("Building initial routing table"); @@ -213,7 +212,7 @@ public class EnableAllocationTests extends ESAllocationTestCase { if (useClusterSetting) { prevState = clusterState; clusterState = ClusterState.builder(clusterState).metaData(MetaData.builder(metaData).transientSettings(settingsBuilder() - .put(CLUSTER_ROUTING_REBALANCE_ENABLE, allowedOnes) + .put(CLUSTER_ROUTING_REBALANCE_ENABLE_SETTING.getKey(), allowedOnes) .build())).build(); } else { prevState = clusterState; @@ -224,7 +223,7 @@ public class EnableAllocationTests extends ESAllocationTestCase { .build(); } - nodeSettingsService.clusterChanged(new ClusterChangedEvent("foo", clusterState, prevState)); + clusterSettings.applySettings(clusterState.metaData().settings()); routingTable = strategy.reroute(clusterState, "reroute").routingTable(); clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build(); assertThat("expected 6 shards to be started 2 to relocate useClusterSettings: " + useClusterSetting, clusterState.getRoutingNodes().shardsWithState(STARTED).size(), equalTo(6)); @@ -261,11 +260,11 @@ public class EnableAllocationTests extends ESAllocationTestCase { public void testEnableClusterBalanceNoReplicas() { final boolean useClusterSetting = randomBoolean(); Settings build = settingsBuilder() - .put(CLUSTER_ROUTING_REBALANCE_ENABLE, useClusterSetting ? Rebalance.NONE: RandomPicks.randomFrom(getRandom(), Rebalance.values())) // index settings override cluster settings - .put(ConcurrentRebalanceAllocationDecider.CLUSTER_ROUTING_ALLOCATION_CLUSTER_CONCURRENT_REBALANCE, 3) + .put(CLUSTER_ROUTING_REBALANCE_ENABLE_SETTING.getKey(), useClusterSetting ? Rebalance.NONE: RandomPicks.randomFrom(getRandom(), Rebalance.values())) // index settings override cluster settings + .put(ConcurrentRebalanceAllocationDecider.CLUSTER_ROUTING_ALLOCATION_CLUSTER_CONCURRENT_REBALANCE_SETTING.getKey(), 3) .build(); - NodeSettingsService nodeSettingsService = new NodeSettingsService(build); - AllocationService strategy = createAllocationService(build, nodeSettingsService, getRandom()); + ClusterSettings clusterSettings = new ClusterSettings(build, ClusterSettings.BUILT_IN_CLUSTER_SETTINGS); + AllocationService strategy = createAllocationService(build, clusterSettings, getRandom()); Settings indexSettings = useClusterSetting ? Settings.EMPTY : settingsBuilder().put(EnableAllocationDecider.INDEX_ROUTING_REBALANCE_ENABLE, Rebalance.NONE).build(); logger.info("Building initial routing table"); @@ -307,7 +306,7 @@ public class EnableAllocationTests extends ESAllocationTestCase { if (useClusterSetting) { prevState = clusterState; clusterState = ClusterState.builder(clusterState).metaData(MetaData.builder(metaData).transientSettings(settingsBuilder() - .put(CLUSTER_ROUTING_REBALANCE_ENABLE, randomBoolean() ? Rebalance.PRIMARIES : Rebalance.ALL) + .put(CLUSTER_ROUTING_REBALANCE_ENABLE_SETTING.getKey(), randomBoolean() ? Rebalance.PRIMARIES : Rebalance.ALL) .build())).build(); } else { prevState = clusterState; @@ -315,7 +314,7 @@ public class EnableAllocationTests extends ESAllocationTestCase { clusterState = ClusterState.builder(clusterState).metaData(MetaData.builder(metaData).removeAllIndices() .put(IndexMetaData.builder(meta).settings(settingsBuilder().put(meta.getSettings()).put(EnableAllocationDecider.INDEX_ROUTING_REBALANCE_ENABLE, randomBoolean() ? Rebalance.PRIMARIES : Rebalance.ALL).build()))).build(); } - nodeSettingsService.clusterChanged(new ClusterChangedEvent("foo", clusterState, prevState)); + clusterSettings.applySettings(clusterState.metaData().settings()); routingTable = strategy.reroute(clusterState, "reroute").routingTable(); clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build(); assertThat("expected 4 primaries to be started and 2 to relocate useClusterSettings: " + useClusterSetting, clusterState.getRoutingNodes().shardsWithState(STARTED).size(), equalTo(4)); diff --git a/core/src/test/java/org/elasticsearch/cluster/routing/allocation/decider/MockDiskUsagesIT.java b/core/src/test/java/org/elasticsearch/cluster/routing/allocation/decider/MockDiskUsagesIT.java index 126799f593..a17017f630 100644 --- a/core/src/test/java/org/elasticsearch/cluster/routing/allocation/decider/MockDiskUsagesIT.java +++ b/core/src/test/java/org/elasticsearch/cluster/routing/allocation/decider/MockDiskUsagesIT.java @@ -28,6 +28,7 @@ import org.elasticsearch.cluster.InternalClusterInfoService; import org.elasticsearch.cluster.MockInternalClusterInfoService; import org.elasticsearch.cluster.routing.RoutingNode; import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.common.unit.TimeValue; import org.elasticsearch.plugins.Plugin; import org.elasticsearch.test.ESIntegTestCase; @@ -47,21 +48,11 @@ import static org.hamcrest.Matchers.greaterThanOrEqualTo; public class MockDiskUsagesIT extends ESIntegTestCase { @Override - protected Settings nodeSettings(int nodeOrdinal) { - return Settings.builder() - .put(super.nodeSettings(nodeOrdinal)) - // Update more frequently - .put(InternalClusterInfoService.INTERNAL_CLUSTER_INFO_UPDATE_INTERVAL, "1s") - .build(); - } - - @Override protected Collection<Class<? extends Plugin>> nodePlugins() { // Use the mock internal cluster info service, which has fake-able disk usages return pluginList(MockInternalClusterInfoService.TestPlugin.class); } - //@TestLogging("org.elasticsearch.cluster:TRACE,org.elasticsearch.cluster.routing.allocation.decider:TRACE") public void testRerouteOccursOnDiskPassingHighWatermark() throws Exception { List<String> nodes = internalCluster().startNodesAsync(3).get(); @@ -77,15 +68,16 @@ public class MockDiskUsagesIT extends ESIntegTestCase { // Start with all nodes at 50% usage final MockInternalClusterInfoService cis = (MockInternalClusterInfoService) internalCluster().getInstance(ClusterInfoService.class, internalCluster().getMasterName()); + cis.setUpdateFrequency(TimeValue.timeValueMillis(200)); + cis.onMaster(); cis.setN1Usage(nodes.get(0), new DiskUsage(nodes.get(0), "n1", "/dev/null", 100, 50)); cis.setN2Usage(nodes.get(1), new DiskUsage(nodes.get(1), "n2", "/dev/null", 100, 50)); cis.setN3Usage(nodes.get(2), new DiskUsage(nodes.get(2), "n3", "/dev/null", 100, 50)); client().admin().cluster().prepareUpdateSettings().setTransientSettings(settingsBuilder() - .put(DiskThresholdDecider.CLUSTER_ROUTING_ALLOCATION_LOW_DISK_WATERMARK, randomFrom("20b", "80%")) - .put(DiskThresholdDecider.CLUSTER_ROUTING_ALLOCATION_HIGH_DISK_WATERMARK, randomFrom("10b", "90%")) - .put(DiskThresholdDecider.CLUSTER_ROUTING_ALLOCATION_REROUTE_INTERVAL, "1ms")).get(); - + .put(DiskThresholdDecider.CLUSTER_ROUTING_ALLOCATION_LOW_DISK_WATERMARK_SETTING.getKey(), randomFrom("20b", "80%")) + .put(DiskThresholdDecider.CLUSTER_ROUTING_ALLOCATION_HIGH_DISK_WATERMARK_SETTING.getKey(), randomFrom("10b", "90%")) + .put(DiskThresholdDecider.CLUSTER_ROUTING_ALLOCATION_REROUTE_INTERVAL_SETTING.getKey(), "1ms")).get(); // Create an index with 10 shards so we can check allocation for it prepareCreate("test").setSettings(settingsBuilder() .put("number_of_shards", 10) diff --git a/core/src/test/java/org/elasticsearch/cluster/settings/ClusterSettingsIT.java b/core/src/test/java/org/elasticsearch/cluster/settings/ClusterSettingsIT.java index 65d5b0b9fc..fb8a8e28b3 100644 --- a/core/src/test/java/org/elasticsearch/cluster/settings/ClusterSettingsIT.java +++ b/core/src/test/java/org/elasticsearch/cluster/settings/ClusterSettingsIT.java @@ -19,6 +19,7 @@ package org.elasticsearch.cluster.settings; +import org.elasticsearch.ElasticsearchParseException; import org.elasticsearch.action.admin.cluster.settings.ClusterUpdateSettingsRequestBuilder; import org.elasticsearch.action.admin.cluster.settings.ClusterUpdateSettingsResponse; import org.elasticsearch.cluster.ClusterName; @@ -32,8 +33,8 @@ import org.elasticsearch.discovery.DiscoverySettings; import org.elasticsearch.index.store.IndexStoreConfig; import org.elasticsearch.test.ESIntegTestCase; import org.elasticsearch.test.ESIntegTestCase.ClusterScope; -import org.hamcrest.Matchers; +import static org.elasticsearch.common.inject.matcher.Matchers.not; import static org.elasticsearch.common.settings.Settings.settingsBuilder; import static org.elasticsearch.test.ESIntegTestCase.Scope.TEST; import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertAcked; @@ -48,22 +49,142 @@ public class ClusterSettingsIT extends ESIntegTestCase { public void testClusterNonExistingSettingsUpdate() { String key1 = "no_idea_what_you_are_talking_about"; int value1 = 10; + try { + client().admin().cluster() + .prepareUpdateSettings() + .setTransientSettings(Settings.builder().put(key1, value1).build()) + .get(); + fail("bogus value"); + } catch (IllegalArgumentException ex) { + assertEquals(ex.getMessage(), "transient setting [no_idea_what_you_are_talking_about], not dynamically updateable"); + } + } + + public void testDeleteIsAppliedFirst() { + DiscoverySettings discoverySettings = internalCluster().getInstance(DiscoverySettings.class); + + assertEquals(discoverySettings.getPublishTimeout(), DiscoverySettings.PUBLISH_TIMEOUT_SETTING.get(Settings.EMPTY)); + assertTrue(DiscoverySettings.PUBLISH_DIFF_ENABLE_SETTING.get(Settings.EMPTY)); ClusterUpdateSettingsResponse response = client().admin().cluster() + .prepareUpdateSettings() + .setTransientSettings(Settings.builder() + .put(DiscoverySettings.PUBLISH_DIFF_ENABLE_SETTING.getKey(), false) + .put(DiscoverySettings.PUBLISH_TIMEOUT_SETTING.getKey(), "1s").build()) + .get(); + + assertAcked(response); + assertEquals(response.getTransientSettings().getAsMap().get(DiscoverySettings.PUBLISH_TIMEOUT_SETTING.getKey()), "1s"); + assertTrue(DiscoverySettings.PUBLISH_DIFF_ENABLE_SETTING.get(Settings.EMPTY)); + assertFalse(response.getTransientSettings().getAsBoolean(DiscoverySettings.PUBLISH_DIFF_ENABLE_SETTING.getKey(), null)); + + response = client().admin().cluster() + .prepareUpdateSettings() + .setTransientSettings(Settings.builder().putNull((randomBoolean() ? "discovery.zen.*" : "*")).put(DiscoverySettings.PUBLISH_TIMEOUT_SETTING.getKey(), "2s")) + .get(); + assertEquals(response.getTransientSettings().getAsMap().get(DiscoverySettings.PUBLISH_TIMEOUT_SETTING.getKey()), "2s"); + assertNull(response.getTransientSettings().getAsBoolean(DiscoverySettings.PUBLISH_DIFF_ENABLE_SETTING.getKey(), null)); + } + + public void testResetClusterSetting() { + DiscoverySettings discoverySettings = internalCluster().getInstance(DiscoverySettings.class); + + assertThat(discoverySettings.getPublishTimeout(), equalTo(DiscoverySettings.PUBLISH_TIMEOUT_SETTING.get(Settings.EMPTY))); + assertThat(discoverySettings.getPublishDiff(), equalTo(DiscoverySettings.PUBLISH_DIFF_ENABLE_SETTING.get(Settings.EMPTY))); + + ClusterUpdateSettingsResponse response = client().admin().cluster() + .prepareUpdateSettings() + .setTransientSettings(Settings.builder().put(DiscoverySettings.PUBLISH_TIMEOUT_SETTING.getKey(), "1s").build()) + .get(); + + assertAcked(response); + assertThat(response.getTransientSettings().getAsMap().get(DiscoverySettings.PUBLISH_TIMEOUT_SETTING.getKey()), equalTo("1s")); + assertThat(discoverySettings.getPublishTimeout().seconds(), equalTo(1l)); + assertThat(discoverySettings.getPublishDiff(), equalTo(DiscoverySettings.PUBLISH_DIFF_ENABLE_SETTING.get(Settings.EMPTY))); + + + response = client().admin().cluster() .prepareUpdateSettings() - .setTransientSettings(Settings.builder().put(key1, value1).build()) + .setTransientSettings(Settings.builder().putNull(DiscoverySettings.PUBLISH_TIMEOUT_SETTING.getKey())) .get(); assertAcked(response); - assertThat(response.getTransientSettings().getAsMap().entrySet(), Matchers.emptyIterable()); + assertNull(response.getTransientSettings().getAsMap().get(DiscoverySettings.PUBLISH_TIMEOUT_SETTING.getKey())); + assertThat(discoverySettings.getPublishTimeout(), equalTo(DiscoverySettings.PUBLISH_TIMEOUT_SETTING.get(Settings.EMPTY))); + assertThat(discoverySettings.getPublishDiff(), equalTo(DiscoverySettings.PUBLISH_DIFF_ENABLE_SETTING.get(Settings.EMPTY))); + + response = client().admin().cluster() + .prepareUpdateSettings() + .setTransientSettings(Settings.builder() + .put(DiscoverySettings.PUBLISH_TIMEOUT_SETTING.getKey(), "1s") + .put(DiscoverySettings.PUBLISH_DIFF_ENABLE_SETTING.getKey(), false).build()) + .get(); + + assertAcked(response); + assertThat(response.getTransientSettings().getAsMap().get(DiscoverySettings.PUBLISH_TIMEOUT_SETTING.getKey()), equalTo("1s")); + assertThat(discoverySettings.getPublishTimeout().seconds(), equalTo(1l)); + assertFalse(discoverySettings.getPublishDiff()); + response = client().admin().cluster() + .prepareUpdateSettings() + .setTransientSettings(Settings.builder().putNull((randomBoolean() ? "discovery.zen.*" : "*"))) + .get(); + + assertNull(response.getTransientSettings().getAsMap().get(DiscoverySettings.PUBLISH_TIMEOUT_SETTING.getKey())); + assertNull(response.getTransientSettings().getAsMap().get(DiscoverySettings.PUBLISH_DIFF_ENABLE_SETTING.getKey())); + assertThat(discoverySettings.getPublishTimeout(), equalTo(DiscoverySettings.PUBLISH_TIMEOUT_SETTING.get(Settings.EMPTY))); + assertThat(discoverySettings.getPublishDiff(), equalTo(DiscoverySettings.PUBLISH_DIFF_ENABLE_SETTING.get(Settings.EMPTY))); + + // now persistent + response = client().admin().cluster() + .prepareUpdateSettings() + .setPersistentSettings(Settings.builder().put(DiscoverySettings.PUBLISH_TIMEOUT_SETTING.getKey(), "1s").build()) + .get(); + + assertAcked(response); + assertThat(response.getPersistentSettings().getAsMap().get(DiscoverySettings.PUBLISH_TIMEOUT_SETTING.getKey()), equalTo("1s")); + assertThat(discoverySettings.getPublishTimeout().seconds(), equalTo(1l)); + assertThat(discoverySettings.getPublishDiff(), equalTo(DiscoverySettings.PUBLISH_DIFF_ENABLE_SETTING.get(Settings.EMPTY))); + + + response = client().admin().cluster() + .prepareUpdateSettings() + .setPersistentSettings(Settings.builder().putNull((DiscoverySettings.PUBLISH_TIMEOUT_SETTING.getKey()))) + .get(); + + assertAcked(response); + assertNull(response.getPersistentSettings().getAsMap().get(DiscoverySettings.PUBLISH_TIMEOUT_SETTING.getKey())); + assertThat(discoverySettings.getPublishTimeout(), equalTo(DiscoverySettings.PUBLISH_TIMEOUT_SETTING.get(Settings.EMPTY))); + assertThat(discoverySettings.getPublishDiff(), equalTo(DiscoverySettings.PUBLISH_DIFF_ENABLE_SETTING.get(Settings.EMPTY))); + + + response = client().admin().cluster() + .prepareUpdateSettings() + .setPersistentSettings(Settings.builder() + .put(DiscoverySettings.PUBLISH_TIMEOUT_SETTING.getKey(), "1s") + .put(DiscoverySettings.PUBLISH_DIFF_ENABLE_SETTING.getKey(), false).build()) + .get(); + + assertAcked(response); + assertThat(response.getPersistentSettings().getAsMap().get(DiscoverySettings.PUBLISH_TIMEOUT_SETTING.getKey()), equalTo("1s")); + assertThat(discoverySettings.getPublishTimeout().seconds(), equalTo(1l)); + assertFalse(discoverySettings.getPublishDiff()); + response = client().admin().cluster() + .prepareUpdateSettings() + .setPersistentSettings(Settings.builder().putNull((randomBoolean() ? "discovery.zen.*" : "*"))) + .get(); + + assertNull(response.getPersistentSettings().getAsMap().get(DiscoverySettings.PUBLISH_TIMEOUT_SETTING.getKey())); + assertNull(response.getPersistentSettings().getAsMap().get(DiscoverySettings.PUBLISH_DIFF_ENABLE_SETTING.getKey())); + assertThat(discoverySettings.getPublishTimeout(), equalTo(DiscoverySettings.PUBLISH_TIMEOUT_SETTING.get(Settings.EMPTY))); + assertThat(discoverySettings.getPublishDiff(), equalTo(DiscoverySettings.PUBLISH_DIFF_ENABLE_SETTING.get(Settings.EMPTY))); } public void testClusterSettingsUpdateResponse() { - String key1 = IndexStoreConfig.INDICES_STORE_THROTTLE_MAX_BYTES_PER_SEC; + String key1 = IndexStoreConfig.INDICES_STORE_THROTTLE_MAX_BYTES_PER_SEC_SETTING.getKey(); int value1 = 10; - String key2 = EnableAllocationDecider.CLUSTER_ROUTING_ALLOCATION_ENABLE; - boolean value2 = false; + String key2 = EnableAllocationDecider.CLUSTER_ROUTING_ALLOCATION_ENABLE_SETTING.getKey(); + String value2 = EnableAllocationDecider.Allocation.NONE.name(); Settings transientSettings1 = Settings.builder().put(key1, value1, ByteSizeUnit.BYTES).build(); Settings persistentSettings1 = Settings.builder().put(key2, value2).build(); @@ -114,43 +235,59 @@ public class ClusterSettingsIT extends ESIntegTestCase { assertThat(response3.getPersistentSettings().get(key2), notNullValue()); } + public void testCanUpdateTracerSettings() { + ClusterUpdateSettingsResponse clusterUpdateSettingsResponse = client().admin().cluster() + .prepareUpdateSettings() + .setTransientSettings(Settings.builder().putArray("transport.tracer.include", "internal:index/shard/recovery/*", + "internal:gateway/local*")) + .get(); + assertArrayEquals(clusterUpdateSettingsResponse.getTransientSettings().getAsArray("transport.tracer.include"), new String[] {"internal:index/shard/recovery/*", + "internal:gateway/local*"}); + } + public void testUpdateDiscoveryPublishTimeout() { DiscoverySettings discoverySettings = internalCluster().getInstance(DiscoverySettings.class); - assertThat(discoverySettings.getPublishTimeout(), equalTo(DiscoverySettings.DEFAULT_PUBLISH_TIMEOUT)); + assertThat(discoverySettings.getPublishTimeout(), equalTo(DiscoverySettings.PUBLISH_TIMEOUT_SETTING.get(Settings.EMPTY))); ClusterUpdateSettingsResponse response = client().admin().cluster() .prepareUpdateSettings() - .setTransientSettings(Settings.builder().put(DiscoverySettings.PUBLISH_TIMEOUT, "1s").build()) + .setTransientSettings(Settings.builder().put(DiscoverySettings.PUBLISH_TIMEOUT_SETTING.getKey(), "1s").build()) .get(); assertAcked(response); - assertThat(response.getTransientSettings().getAsMap().get(DiscoverySettings.PUBLISH_TIMEOUT), equalTo("1s")); + assertThat(response.getTransientSettings().getAsMap().get(DiscoverySettings.PUBLISH_TIMEOUT_SETTING.getKey()), equalTo("1s")); assertThat(discoverySettings.getPublishTimeout().seconds(), equalTo(1l)); - response = client().admin().cluster() - .prepareUpdateSettings() - .setTransientSettings(Settings.builder().put(DiscoverySettings.PUBLISH_TIMEOUT, "whatever").build()) - .get(); + try { + client().admin().cluster() + .prepareUpdateSettings() + .setTransientSettings(Settings.builder().put(DiscoverySettings.PUBLISH_TIMEOUT_SETTING.getKey(), "whatever").build()) + .get(); + fail("bogus value"); + } catch (IllegalArgumentException ex) { + assertEquals(ex.getMessage(), "Failed to parse setting [discovery.zen.commit_timeout] with value [whatever] as a time value: unit is missing or unrecognized"); + } - assertAcked(response); - assertThat(response.getTransientSettings().getAsMap().entrySet(), Matchers.emptyIterable()); assertThat(discoverySettings.getPublishTimeout().seconds(), equalTo(1l)); - response = client().admin().cluster() - .prepareUpdateSettings() - .setTransientSettings(Settings.builder().put(DiscoverySettings.PUBLISH_TIMEOUT, -1).build()) - .get(); + try { + client().admin().cluster() + .prepareUpdateSettings() + .setTransientSettings(Settings.builder().put(DiscoverySettings.PUBLISH_TIMEOUT_SETTING.getKey(), -1).build()) + .get(); + fail("bogus value"); + } catch (IllegalArgumentException ex) { + assertEquals(ex.getMessage(), "Failed to parse value [-1] for setting [discovery.zen.publish_timeout] must be >= 0s"); + } - assertAcked(response); - assertThat(response.getTransientSettings().getAsMap().entrySet(), Matchers.emptyIterable()); assertThat(discoverySettings.getPublishTimeout().seconds(), equalTo(1l)); } public void testClusterUpdateSettingsWithBlocks() { String key1 = "cluster.routing.allocation.enable"; - Settings transientSettings = Settings.builder().put(key1, false).build(); + Settings transientSettings = Settings.builder().put(key1, EnableAllocationDecider.Allocation.NONE.name()).build(); String key2 = "cluster.routing.allocation.node_concurrent_recoveries"; Settings persistentSettings = Settings.builder().put(key2, "5").build(); @@ -165,7 +302,7 @@ public class ClusterSettingsIT extends ESIntegTestCase { assertBlocked(request, MetaData.CLUSTER_READ_ONLY_BLOCK); // But it's possible to update the settings to update the "cluster.blocks.read_only" setting - Settings settings = settingsBuilder().put(MetaData.SETTING_READ_ONLY, false).build(); + Settings settings = settingsBuilder().put(MetaData.SETTING_READ_ONLY_SETTING.getKey(), false).build(); assertAcked(client().admin().cluster().prepareUpdateSettings().setTransientSettings(settings).get()); } finally { diff --git a/core/src/test/java/org/elasticsearch/cluster/structure/RoutingIteratorTests.java b/core/src/test/java/org/elasticsearch/cluster/structure/RoutingIteratorTests.java index c5a695d16e..6094d49234 100644 --- a/core/src/test/java/org/elasticsearch/cluster/structure/RoutingIteratorTests.java +++ b/core/src/test/java/org/elasticsearch/cluster/structure/RoutingIteratorTests.java @@ -225,7 +225,7 @@ public class RoutingIteratorTests extends ESAllocationTestCase { public void testAttributePreferenceRouting() { AllocationService strategy = createAllocationService(settingsBuilder() .put("cluster.routing.allocation.concurrent_recoveries", 10) - .put(ClusterRebalanceAllocationDecider.CLUSTER_ROUTING_ALLOCATION_ALLOW_REBALANCE, "always") + .put(ClusterRebalanceAllocationDecider.CLUSTER_ROUTING_ALLOCATION_ALLOW_REBALANCE_SETTING.getKey(), "always") .put("cluster.routing.allocation.awareness.attributes", "rack_id,zone") .build()); @@ -280,7 +280,7 @@ public class RoutingIteratorTests extends ESAllocationTestCase { public void testNodeSelectorRouting(){ AllocationService strategy = createAllocationService(settingsBuilder() .put("cluster.routing.allocation.concurrent_recoveries", 10) - .put(ClusterRebalanceAllocationDecider.CLUSTER_ROUTING_ALLOCATION_ALLOW_REBALANCE, "always") + .put(ClusterRebalanceAllocationDecider.CLUSTER_ROUTING_ALLOCATION_ALLOW_REBALANCE_SETTING.getKey(), "always") .build()); MetaData metaData = MetaData.builder() diff --git a/core/src/test/java/org/elasticsearch/common/breaker/MemoryCircuitBreakerTests.java b/core/src/test/java/org/elasticsearch/common/breaker/MemoryCircuitBreakerTests.java index fa4ce357a5..bb9d23db1c 100644 --- a/core/src/test/java/org/elasticsearch/common/breaker/MemoryCircuitBreakerTests.java +++ b/core/src/test/java/org/elasticsearch/common/breaker/MemoryCircuitBreakerTests.java @@ -19,12 +19,12 @@ package org.elasticsearch.common.breaker; +import org.elasticsearch.common.settings.ClusterSettings; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.unit.ByteSizeValue; import org.elasticsearch.indices.breaker.BreakerSettings; import org.elasticsearch.indices.breaker.CircuitBreakerService; import org.elasticsearch.indices.breaker.HierarchyCircuitBreakerService; -import org.elasticsearch.node.settings.NodeSettingsService; import org.elasticsearch.test.ESTestCase; import java.util.concurrent.atomic.AtomicBoolean; @@ -87,7 +87,7 @@ public class MemoryCircuitBreakerTests extends ESTestCase { final AtomicReference<Throwable> lastException = new AtomicReference<>(null); final AtomicReference<ChildMemoryCircuitBreaker> breakerRef = new AtomicReference<>(null); - final CircuitBreakerService service = new HierarchyCircuitBreakerService(Settings.EMPTY, new NodeSettingsService(Settings.EMPTY)) { + final CircuitBreakerService service = new HierarchyCircuitBreakerService(Settings.EMPTY, new ClusterSettings(Settings.EMPTY, ClusterSettings.BUILT_IN_CLUSTER_SETTINGS)) { @Override public CircuitBreaker getBreaker(String name) { @@ -147,7 +147,7 @@ public class MemoryCircuitBreakerTests extends ESTestCase { final AtomicInteger parentTripped = new AtomicInteger(0); final AtomicReference<ChildMemoryCircuitBreaker> breakerRef = new AtomicReference<>(null); - final CircuitBreakerService service = new HierarchyCircuitBreakerService(Settings.EMPTY, new NodeSettingsService(Settings.EMPTY)) { + final CircuitBreakerService service = new HierarchyCircuitBreakerService(Settings.EMPTY, new ClusterSettings(Settings.EMPTY, ClusterSettings.BUILT_IN_CLUSTER_SETTINGS)) { @Override public CircuitBreaker getBreaker(String name) { diff --git a/core/src/test/java/org/elasticsearch/common/geo/builders/AbstractShapeBuilderTestCase.java b/core/src/test/java/org/elasticsearch/common/geo/builders/AbstractShapeBuilderTestCase.java index f15a731e86..279e31aadd 100644 --- a/core/src/test/java/org/elasticsearch/common/geo/builders/AbstractShapeBuilderTestCase.java +++ b/core/src/test/java/org/elasticsearch/common/geo/builders/AbstractShapeBuilderTestCase.java @@ -23,14 +23,20 @@ import org.elasticsearch.common.io.stream.BytesStreamOutput; import org.elasticsearch.common.io.stream.NamedWriteableAwareStreamInput; import org.elasticsearch.common.io.stream.NamedWriteableRegistry; import org.elasticsearch.common.io.stream.StreamInput; -import org.elasticsearch.common.xcontent.*; +import org.elasticsearch.common.xcontent.ToXContent; +import org.elasticsearch.common.xcontent.XContentBuilder; +import org.elasticsearch.common.xcontent.XContentFactory; +import org.elasticsearch.common.xcontent.XContentHelper; +import org.elasticsearch.common.xcontent.XContentParser; +import org.elasticsearch.common.xcontent.XContentType; import org.elasticsearch.test.ESTestCase; import org.junit.AfterClass; import org.junit.BeforeClass; import java.io.IOException; -import static org.hamcrest.Matchers.*; +import static org.hamcrest.Matchers.equalTo; +import static org.hamcrest.Matchers.not; public abstract class AbstractShapeBuilderTestCase<SB extends ShapeBuilder> extends ESTestCase { @@ -47,6 +53,12 @@ public abstract class AbstractShapeBuilderTestCase<SB extends ShapeBuilder> exte namedWriteableRegistry.registerPrototype(ShapeBuilder.class, PointBuilder.PROTOTYPE); namedWriteableRegistry.registerPrototype(ShapeBuilder.class, CircleBuilder.PROTOTYPE); namedWriteableRegistry.registerPrototype(ShapeBuilder.class, EnvelopeBuilder.PROTOTYPE); + namedWriteableRegistry.registerPrototype(ShapeBuilder.class, MultiPointBuilder.PROTOTYPE); + namedWriteableRegistry.registerPrototype(ShapeBuilder.class, LineStringBuilder.PROTOTYPE); + namedWriteableRegistry.registerPrototype(ShapeBuilder.class, MultiLineStringBuilder.PROTOTYPE); + namedWriteableRegistry.registerPrototype(ShapeBuilder.class, PolygonBuilder.PROTOTYPE); + namedWriteableRegistry.registerPrototype(ShapeBuilder.class, MultiPolygonBuilder.PROTOTYPE); + namedWriteableRegistry.registerPrototype(ShapeBuilder.class, GeometryCollectionBuilder.PROTOTYPE); } } @@ -63,7 +75,7 @@ public abstract class AbstractShapeBuilderTestCase<SB extends ShapeBuilder> exte /** * mutate the given shape so the returned shape is different */ - protected abstract SB mutate(SB original) throws IOException; + protected abstract SB createMutation(SB original) throws IOException; /** * Test that creates new shape from a random test shape and checks both for equality @@ -89,19 +101,21 @@ public abstract class AbstractShapeBuilderTestCase<SB extends ShapeBuilder> exte /** * Test serialization and deserialization of the test shape. */ + @SuppressWarnings("unchecked") public void testSerialization() throws IOException { for (int runs = 0; runs < NUMBER_OF_TESTBUILDERS; runs++) { SB testShape = createTestShapeBuilder(); - SB deserializedShape = copyShape(testShape); - assertEquals(deserializedShape, testShape); - assertEquals(deserializedShape.hashCode(), testShape.hashCode()); - assertNotSame(deserializedShape, testShape); + SB deserializedShape = (SB) copyShape(testShape); + assertEquals(testShape, deserializedShape); + assertEquals(testShape.hashCode(), deserializedShape.hashCode()); + assertNotSame(testShape, deserializedShape); } } /** * Test equality and hashCode properties */ + @SuppressWarnings("unchecked") public void testEqualsAndHashcode() throws IOException { for (int runs = 0; runs < NUMBER_OF_TESTBUILDERS; runs++) { SB firstShape = createTestShapeBuilder(); @@ -110,15 +124,15 @@ public abstract class AbstractShapeBuilderTestCase<SB extends ShapeBuilder> exte assertTrue("shape is not equal to self", firstShape.equals(firstShape)); assertThat("same shape's hashcode returns different values if called multiple times", firstShape.hashCode(), equalTo(firstShape.hashCode())); - assertThat("different shapes should not be equal", mutate(firstShape), not(equalTo(firstShape))); + assertThat("different shapes should not be equal", createMutation(firstShape), not(equalTo(firstShape))); - SB secondShape = copyShape(firstShape); + SB secondShape = (SB) copyShape(firstShape); assertTrue("shape is not equal to self", secondShape.equals(secondShape)); assertTrue("shape is not equal to its copy", firstShape.equals(secondShape)); assertTrue("equals is not symmetric", secondShape.equals(firstShape)); assertThat("shape copy's hashcode is different from original hashcode", secondShape.hashCode(), equalTo(firstShape.hashCode())); - SB thirdShape = copyShape(secondShape); + SB thirdShape = (SB) copyShape(secondShape); assertTrue("shape is not equal to self", thirdShape.equals(thirdShape)); assertTrue("shape is not equal to its copy", secondShape.equals(thirdShape)); assertThat("shape copy's hashcode is different from original hashcode", secondShape.hashCode(), equalTo(thirdShape.hashCode())); @@ -129,14 +143,12 @@ public abstract class AbstractShapeBuilderTestCase<SB extends ShapeBuilder> exte } } - protected SB copyShape(SB original) throws IOException { + static ShapeBuilder copyShape(ShapeBuilder original) throws IOException { try (BytesStreamOutput output = new BytesStreamOutput()) { original.writeTo(output); try (StreamInput in = new NamedWriteableAwareStreamInput(StreamInput.wrap(output.bytes()), namedWriteableRegistry)) { ShapeBuilder prototype = (ShapeBuilder) namedWriteableRegistry.getPrototype(ShapeBuilder.class, original.getWriteableName()); - @SuppressWarnings("unchecked") - SB copy = (SB) prototype.readFrom(in); - return copy; + return prototype.readFrom(in); } } } diff --git a/core/src/test/java/org/elasticsearch/common/geo/builders/CirlceBuilderTests.java b/core/src/test/java/org/elasticsearch/common/geo/builders/CircleBuilderTests.java index 6b102b87b2..1db9da428a 100644 --- a/core/src/test/java/org/elasticsearch/common/geo/builders/CirlceBuilderTests.java +++ b/core/src/test/java/org/elasticsearch/common/geo/builders/CircleBuilderTests.java @@ -25,20 +25,20 @@ import org.elasticsearch.common.unit.DistanceUnit; import java.io.IOException; -public class CirlceBuilderTests extends AbstractShapeBuilderTestCase<CircleBuilder> { +public class CircleBuilderTests extends AbstractShapeBuilderTestCase<CircleBuilder> { @Override protected CircleBuilder createTestShapeBuilder() { - double centerX = randomDoubleBetween(-180, 180, false); - double centerY = randomDoubleBetween(-90, 90, false); - return new CircleBuilder() - .center(new Coordinate(centerX, centerY)) - .radius(randomDoubleBetween(0.1, 10.0, false), randomFrom(DistanceUnit.values())); + return createRandomShape(); } @Override - protected CircleBuilder mutate(CircleBuilder original) throws IOException { - CircleBuilder mutation = copyShape(original); + protected CircleBuilder createMutation(CircleBuilder original) throws IOException { + return mutate(original); + } + + static CircleBuilder mutate(CircleBuilder original) throws IOException { + CircleBuilder mutation = (CircleBuilder) copyShape(original); double radius = original.radius(); DistanceUnit unit = original.unit(); @@ -55,4 +55,12 @@ public class CirlceBuilderTests extends AbstractShapeBuilderTestCase<CircleBuild } return mutation.radius(radius, unit); } + + static CircleBuilder createRandomShape() { + double centerX = randomDoubleBetween(-180, 180, false); + double centerY = randomDoubleBetween(-90, 90, false); + return new CircleBuilder() + .center(new Coordinate(centerX, centerY)) + .radius(randomDoubleBetween(0.1, 10.0, false), randomFrom(DistanceUnit.values())); + } } diff --git a/core/src/test/java/org/elasticsearch/common/geo/builders/EnvelopeBuilderTests.java b/core/src/test/java/org/elasticsearch/common/geo/builders/EnvelopeBuilderTests.java index e6f3db2f8a..2015f2b0bc 100644 --- a/core/src/test/java/org/elasticsearch/common/geo/builders/EnvelopeBuilderTests.java +++ b/core/src/test/java/org/elasticsearch/common/geo/builders/EnvelopeBuilderTests.java @@ -22,7 +22,6 @@ package org.elasticsearch.common.geo.builders; import com.spatial4j.core.shape.Rectangle; import com.vividsolutions.jts.geom.Coordinate; -import org.elasticsearch.common.geo.builders.ShapeBuilder.Orientation; import org.elasticsearch.test.geo.RandomShapeGenerator; import java.io.IOException; @@ -31,36 +30,39 @@ public class EnvelopeBuilderTests extends AbstractShapeBuilderTestCase<EnvelopeB @Override protected EnvelopeBuilder createTestShapeBuilder() { - EnvelopeBuilder envelope = new EnvelopeBuilder(randomFrom(Orientation.values())); - Rectangle box = RandomShapeGenerator.xRandomRectangle(getRandom(), RandomShapeGenerator.xRandomPoint(getRandom())); - envelope.topLeft(box.getMinX(), box.getMaxY()) - .bottomRight(box.getMaxX(), box.getMinY()); - return envelope; + return createRandomShape(); } @Override - protected EnvelopeBuilder mutate(EnvelopeBuilder original) throws IOException { - EnvelopeBuilder mutation = copyShape(original); - if (randomBoolean()) { - // toggle orientation - mutation.orientation = (original.orientation == Orientation.LEFT ? Orientation.RIGHT : Orientation.LEFT); - } else { - // move one corner to the middle of original - switch (randomIntBetween(0, 3)) { - case 0: - mutation.topLeft(new Coordinate(randomDoubleBetween(-180.0, original.bottomRight.x, true), original.topLeft.y)); - break; - case 1: - mutation.topLeft(new Coordinate(original.topLeft.x, randomDoubleBetween(original.bottomRight.y, 90.0, true))); - break; - case 2: - mutation.bottomRight(new Coordinate(randomDoubleBetween(original.topLeft.x, 180.0, true), original.bottomRight.y)); - break; - case 3: - mutation.bottomRight(new Coordinate(original.bottomRight.x, randomDoubleBetween(-90.0, original.topLeft.y, true))); - break; - } + protected EnvelopeBuilder createMutation(EnvelopeBuilder original) throws IOException { + return mutate(original); + } + + static EnvelopeBuilder mutate(EnvelopeBuilder original) throws IOException { + EnvelopeBuilder mutation = (EnvelopeBuilder) copyShape(original); + // move one corner to the middle of original + switch (randomIntBetween(0, 3)) { + case 0: + mutation.topLeft(new Coordinate(randomDoubleBetween(-180.0, original.bottomRight().x, true), original.topLeft().y)); + break; + case 1: + mutation.topLeft(new Coordinate(original.topLeft().x, randomDoubleBetween(original.bottomRight().y, 90.0, true))); + break; + case 2: + mutation.bottomRight(new Coordinate(randomDoubleBetween(original.topLeft().x, 180.0, true), original.bottomRight().y)); + break; + case 3: + mutation.bottomRight(new Coordinate(original.bottomRight().x, randomDoubleBetween(-90.0, original.topLeft().y, true))); + break; } return mutation; } + + static EnvelopeBuilder createRandomShape() { + EnvelopeBuilder envelope = new EnvelopeBuilder(); + Rectangle box = RandomShapeGenerator.xRandomRectangle(getRandom(), RandomShapeGenerator.xRandomPoint(getRandom())); + envelope.topLeft(box.getMinX(), box.getMaxY()) + .bottomRight(box.getMaxX(), box.getMinY()); + return envelope; + } } diff --git a/core/src/test/java/org/elasticsearch/common/geo/builders/GeometryCollectionBuilderTests.java b/core/src/test/java/org/elasticsearch/common/geo/builders/GeometryCollectionBuilderTests.java new file mode 100644 index 0000000000..e53715b0ec --- /dev/null +++ b/core/src/test/java/org/elasticsearch/common/geo/builders/GeometryCollectionBuilderTests.java @@ -0,0 +1,107 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.common.geo.builders; + +import org.elasticsearch.test.geo.RandomShapeGenerator; + +import java.io.IOException; + +public class GeometryCollectionBuilderTests extends AbstractShapeBuilderTestCase<GeometryCollectionBuilder> { + + @Override + protected GeometryCollectionBuilder createTestShapeBuilder() { + GeometryCollectionBuilder geometryCollection = new GeometryCollectionBuilder(); + int shapes = randomIntBetween(0, 8); + for (int i = 0; i < shapes; i++) { + switch (randomIntBetween(0, 7)) { + case 0: + geometryCollection.shape(PointBuilderTests.createRandomShape()); + break; + case 1: + geometryCollection.shape(CircleBuilderTests.createRandomShape()); + break; + case 2: + geometryCollection.shape(EnvelopeBuilderTests.createRandomShape()); + break; + case 3: + geometryCollection.shape(LineStringBuilderTests.createRandomShape()); + break; + case 4: + geometryCollection.shape(MultiLineStringBuilderTests.createRandomShape()); + break; + case 5: + geometryCollection.shape(MultiPolygonBuilderTests.createRandomShape()); + break; + case 6: + geometryCollection.shape(MultiPointBuilderTests.createRandomShape()); + break; + case 7: + geometryCollection.shape(PolygonBuilderTests.createRandomShape()); + break; + } + } + return geometryCollection; + } + + @Override + protected GeometryCollectionBuilder createMutation(GeometryCollectionBuilder original) throws IOException { + return mutate(original); + } + + static GeometryCollectionBuilder mutate(GeometryCollectionBuilder original) throws IOException { + GeometryCollectionBuilder mutation = (GeometryCollectionBuilder) copyShape(original); + if (mutation.shapes.size() > 0) { + int shapePosition = randomIntBetween(0, mutation.shapes.size() - 1); + ShapeBuilder shapeToChange = mutation.shapes.get(shapePosition); + switch (shapeToChange.type()) { + case POINT: + shapeToChange = PointBuilderTests.mutate((PointBuilder) shapeToChange); + break; + case CIRCLE: + shapeToChange = CircleBuilderTests.mutate((CircleBuilder) shapeToChange); + break; + case ENVELOPE: + shapeToChange = EnvelopeBuilderTests.mutate((EnvelopeBuilder) shapeToChange); + break; + case LINESTRING: + shapeToChange = LineStringBuilderTests.mutate((LineStringBuilder) shapeToChange); + break; + case MULTILINESTRING: + shapeToChange = MultiLineStringBuilderTests.mutate((MultiLineStringBuilder) shapeToChange); + break; + case MULTIPOLYGON: + shapeToChange = MultiPolygonBuilderTests.mutate((MultiPolygonBuilder) shapeToChange); + break; + case MULTIPOINT: + shapeToChange = MultiPointBuilderTests.mutate((MultiPointBuilder) shapeToChange); + break; + case POLYGON: + shapeToChange = PolygonBuilderTests.mutate((PolygonBuilder) shapeToChange); + break; + case GEOMETRYCOLLECTION: + throw new UnsupportedOperationException("GeometryCollection should not be nested inside each other"); + } + mutation.shapes.set(shapePosition, shapeToChange); + } else { + mutation.shape(RandomShapeGenerator.createShape(getRandom())); + } + return mutation; + } +} diff --git a/core/src/test/java/org/elasticsearch/common/geo/builders/LineStringBuilderTests.java b/core/src/test/java/org/elasticsearch/common/geo/builders/LineStringBuilderTests.java new file mode 100644 index 0000000000..53e30cc5a8 --- /dev/null +++ b/core/src/test/java/org/elasticsearch/common/geo/builders/LineStringBuilderTests.java @@ -0,0 +1,68 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.common.geo.builders; + +import com.vividsolutions.jts.geom.Coordinate; + +import org.elasticsearch.test.geo.RandomShapeGenerator; +import org.elasticsearch.test.geo.RandomShapeGenerator.ShapeType; + +import java.io.IOException; + +public class LineStringBuilderTests extends AbstractShapeBuilderTestCase<LineStringBuilder> { + + @Override + protected LineStringBuilder createTestShapeBuilder() { + return createRandomShape(); + } + + @Override + protected LineStringBuilder createMutation(LineStringBuilder original) throws IOException { + return mutate(original); + } + + static LineStringBuilder mutate(LineStringBuilder original) throws IOException { + LineStringBuilder mutation = (LineStringBuilder) copyShape(original); + Coordinate[] coordinates = original.coordinates(false); + Coordinate coordinate = randomFrom(coordinates); + if (randomBoolean()) { + if (coordinate.x != 0.0) { + coordinate.x = coordinate.x / 2; + } else { + coordinate.x = randomDoubleBetween(-180.0, 180.0, true); + } + } else { + if (coordinate.y != 0.0) { + coordinate.y = coordinate.y / 2; + } else { + coordinate.y = randomDoubleBetween(-90.0, 90.0, true); + } + } + return mutation.points(coordinates); + } + + static LineStringBuilder createRandomShape() { + LineStringBuilder lsb = (LineStringBuilder) RandomShapeGenerator.createShape(getRandom(), ShapeType.LINESTRING); + if (randomBoolean()) { + lsb.close(); + } + return lsb; + } +} diff --git a/core/src/test/java/org/elasticsearch/common/geo/builders/MultiLineStringBuilderTests.java b/core/src/test/java/org/elasticsearch/common/geo/builders/MultiLineStringBuilderTests.java new file mode 100644 index 0000000000..5d0ad7ed13 --- /dev/null +++ b/core/src/test/java/org/elasticsearch/common/geo/builders/MultiLineStringBuilderTests.java @@ -0,0 +1,70 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.common.geo.builders; + +import com.vividsolutions.jts.geom.Coordinate; + +import org.elasticsearch.test.geo.RandomShapeGenerator; +import org.elasticsearch.test.geo.RandomShapeGenerator.ShapeType; + +import java.io.IOException; + +public class MultiLineStringBuilderTests extends AbstractShapeBuilderTestCase<MultiLineStringBuilder> { + + @Override + protected MultiLineStringBuilder createTestShapeBuilder() { + return createRandomShape(); + } + + @Override + protected MultiLineStringBuilder createMutation(MultiLineStringBuilder original) throws IOException { + return mutate(original); + } + + static MultiLineStringBuilder mutate(MultiLineStringBuilder original) throws IOException { + MultiLineStringBuilder mutation = (MultiLineStringBuilder) copyShape(original); + Coordinate[][] coordinates = mutation.coordinates(); + int lineToChange = randomInt(coordinates.length - 1); + for (int i = 0; i < coordinates.length; i++) { + Coordinate[] line = coordinates[i]; + if (i == lineToChange) { + Coordinate coordinate = randomFrom(line); + if (randomBoolean()) { + if (coordinate.x != 0.0) { + coordinate.x = coordinate.x / 2; + } else { + coordinate.x = randomDoubleBetween(-180.0, 180.0, true); + } + } else { + if (coordinate.y != 0.0) { + coordinate.y = coordinate.y / 2; + } else { + coordinate.y = randomDoubleBetween(-90.0, 90.0, true); + } + } + } + } + return mutation; + } + + static MultiLineStringBuilder createRandomShape() { + return (MultiLineStringBuilder) RandomShapeGenerator.createShape(getRandom(), ShapeType.MULTILINESTRING); + } +} diff --git a/core/src/test/java/org/elasticsearch/common/geo/builders/MultiPointBuilderTests.java b/core/src/test/java/org/elasticsearch/common/geo/builders/MultiPointBuilderTests.java new file mode 100644 index 0000000000..fca76e2e97 --- /dev/null +++ b/core/src/test/java/org/elasticsearch/common/geo/builders/MultiPointBuilderTests.java @@ -0,0 +1,64 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.common.geo.builders; + +import com.vividsolutions.jts.geom.Coordinate; + +import org.elasticsearch.test.geo.RandomShapeGenerator; +import org.elasticsearch.test.geo.RandomShapeGenerator.ShapeType; + +import java.io.IOException; + +public class MultiPointBuilderTests extends AbstractShapeBuilderTestCase<MultiPointBuilder> { + + @Override + protected MultiPointBuilder createTestShapeBuilder() { + return createRandomShape(); + } + + @Override + protected MultiPointBuilder createMutation(MultiPointBuilder original) throws IOException { + return mutate(original); + } + + static MultiPointBuilder mutate(MultiPointBuilder original) throws IOException { + MultiPointBuilder mutation = (MultiPointBuilder) copyShape(original); + Coordinate[] coordinates = original.coordinates(false); + Coordinate coordinate = randomFrom(coordinates); + if (randomBoolean()) { + if (coordinate.x != 0.0) { + coordinate.x = coordinate.x / 2; + } else { + coordinate.x = randomDoubleBetween(-180.0, 180.0, true); + } + } else { + if (coordinate.y != 0.0) { + coordinate.y = coordinate.y / 2; + } else { + coordinate.y = randomDoubleBetween(-90.0, 90.0, true); + } + } + return mutation.points(coordinates); + } + + static MultiPointBuilder createRandomShape() { + return (MultiPointBuilder) RandomShapeGenerator.createShape(getRandom(), ShapeType.MULTIPOINT); + } +} diff --git a/core/src/test/java/org/elasticsearch/common/geo/builders/MultiPolygonBuilderTests.java b/core/src/test/java/org/elasticsearch/common/geo/builders/MultiPolygonBuilderTests.java new file mode 100644 index 0000000000..702114a2cb --- /dev/null +++ b/core/src/test/java/org/elasticsearch/common/geo/builders/MultiPolygonBuilderTests.java @@ -0,0 +1,68 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.common.geo.builders; + +import org.elasticsearch.common.geo.builders.ShapeBuilder.Orientation; +import org.elasticsearch.test.geo.RandomShapeGenerator; +import org.elasticsearch.test.geo.RandomShapeGenerator.ShapeType; + +import java.io.IOException; + +public class MultiPolygonBuilderTests extends AbstractShapeBuilderTestCase<MultiPolygonBuilder> { + + @Override + protected MultiPolygonBuilder createTestShapeBuilder() { + return createRandomShape(); + } + + @Override + protected MultiPolygonBuilder createMutation(MultiPolygonBuilder original) throws IOException { + return mutate(original); + } + + static MultiPolygonBuilder mutate(MultiPolygonBuilder original) throws IOException { + MultiPolygonBuilder mutation; + if (randomBoolean()) { + mutation = new MultiPolygonBuilder(original.orientation() == Orientation.LEFT ? Orientation.RIGHT : Orientation.LEFT); + for (PolygonBuilder pb : original.polygons()) { + mutation.polygon((PolygonBuilder) copyShape(pb)); + } + } else { + mutation = (MultiPolygonBuilder) copyShape(original); + if (mutation.polygons().size() > 0) { + int polyToChange = randomInt(mutation.polygons().size() - 1); + mutation.polygons().set(polyToChange, PolygonBuilderTests.mutatePolygonBuilder(mutation.polygons().get(polyToChange))); + } else { + mutation.polygon((PolygonBuilder) RandomShapeGenerator.createShape(getRandom(), ShapeType.POLYGON)); + } + } + return mutation; + } + + static MultiPolygonBuilder createRandomShape() { + MultiPolygonBuilder mpb = new MultiPolygonBuilder(randomFrom(Orientation.values())); + int polys = randomIntBetween(0, 10); + for (int i = 0; i < polys; i++) { + PolygonBuilder pgb = (PolygonBuilder) RandomShapeGenerator.createShape(getRandom(), ShapeType.POLYGON); + mpb.polygon(pgb); + } + return mpb; + } +} diff --git a/core/src/test/java/org/elasticsearch/common/geo/builders/PointBuilderTests.java b/core/src/test/java/org/elasticsearch/common/geo/builders/PointBuilderTests.java index 1e94a1bab3..1946d24581 100644 --- a/core/src/test/java/org/elasticsearch/common/geo/builders/PointBuilderTests.java +++ b/core/src/test/java/org/elasticsearch/common/geo/builders/PointBuilderTests.java @@ -24,15 +24,27 @@ import com.vividsolutions.jts.geom.Coordinate; import org.elasticsearch.test.geo.RandomShapeGenerator; import org.elasticsearch.test.geo.RandomShapeGenerator.ShapeType; +import java.io.IOException; + public class PointBuilderTests extends AbstractShapeBuilderTestCase<PointBuilder> { @Override protected PointBuilder createTestShapeBuilder() { - return (PointBuilder) RandomShapeGenerator.createShape(getRandom(), ShapeType.POINT); + return createRandomShape(); } @Override - protected PointBuilder mutate(PointBuilder original) { - return new PointBuilder().coordinate(new Coordinate(original.longitude()/2, original.latitude()/2)); + protected PointBuilder createMutation(PointBuilder original) throws IOException { + return mutate(original); } + + static PointBuilder mutate(PointBuilder original) { + return new PointBuilder().coordinate(new Coordinate(original.longitude() / 2, original.latitude() / 2)); + } + + static PointBuilder createRandomShape() { + return (PointBuilder) RandomShapeGenerator.createShape(getRandom(), ShapeType.POINT); + } + + } diff --git a/core/src/test/java/org/elasticsearch/common/geo/builders/PolygonBuilderTests.java b/core/src/test/java/org/elasticsearch/common/geo/builders/PolygonBuilderTests.java new file mode 100644 index 0000000000..ad8b3b817f --- /dev/null +++ b/core/src/test/java/org/elasticsearch/common/geo/builders/PolygonBuilderTests.java @@ -0,0 +1,96 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.common.geo.builders; + +import com.vividsolutions.jts.geom.Coordinate; + +import org.elasticsearch.common.geo.builders.ShapeBuilder.Orientation; +import org.elasticsearch.test.geo.RandomShapeGenerator; +import org.elasticsearch.test.geo.RandomShapeGenerator.ShapeType; + +import java.io.IOException; + +public class PolygonBuilderTests extends AbstractShapeBuilderTestCase<PolygonBuilder> { + + @Override + protected PolygonBuilder createTestShapeBuilder() { + return createRandomShape(); + } + + @Override + protected PolygonBuilder createMutation(PolygonBuilder original) throws IOException { + return mutate(original); + } + + static PolygonBuilder mutate(PolygonBuilder original) throws IOException { + PolygonBuilder mutation = (PolygonBuilder) copyShape(original); + return mutatePolygonBuilder(mutation); + } + + static PolygonBuilder mutatePolygonBuilder(PolygonBuilder pb) { + if (randomBoolean()) { + pb = polyWithOposingOrientation(pb); + } else { + // change either point in shell or in random hole + LineStringBuilder lineToChange; + if (randomBoolean() || pb.holes().size() == 0) { + lineToChange = pb.shell(); + } else { + lineToChange = randomFrom(pb.holes()); + } + Coordinate coordinate = randomFrom(lineToChange.coordinates(false)); + if (randomBoolean()) { + if (coordinate.x != 0.0) { + coordinate.x = coordinate.x / 2; + } else { + coordinate.x = randomDoubleBetween(-180.0, 180.0, true); + } + } else { + if (coordinate.y != 0.0) { + coordinate.y = coordinate.y / 2; + } else { + coordinate.y = randomDoubleBetween(-90.0, 90.0, true); + } + } + } + return pb; + } + + /** + * Takes an input polygon and returns an identical one, only with opposing orientation setting. + * This is done so we don't have to expose a setter for orientation in the actual class + */ + private static PolygonBuilder polyWithOposingOrientation(PolygonBuilder pb) { + PolygonBuilder mutation = new PolygonBuilder(pb.orientation() == Orientation.LEFT ? Orientation.RIGHT : Orientation.LEFT); + mutation.points(pb.shell().coordinates(false)); + for (LineStringBuilder hole : pb.holes()) { + mutation.hole(hole); + } + return mutation; + } + + static PolygonBuilder createRandomShape() { + PolygonBuilder pgb = (PolygonBuilder) RandomShapeGenerator.createShape(getRandom(), ShapeType.POLYGON); + if (randomBoolean()) { + pgb = polyWithOposingOrientation(pgb); + } + return pgb; + } +} diff --git a/core/src/test/java/org/elasticsearch/common/inject/ModuleTestCase.java b/core/src/test/java/org/elasticsearch/common/inject/ModuleTestCase.java index 7901694bd4..7d696b0cd8 100644 --- a/core/src/test/java/org/elasticsearch/common/inject/ModuleTestCase.java +++ b/core/src/test/java/org/elasticsearch/common/inject/ModuleTestCase.java @@ -60,6 +60,24 @@ public abstract class ModuleTestCase extends ESTestCase { fail("Did not find any binding to " + to.getName() + ". Found these bindings:\n" + s); } + /** Configures the module and asserts "clazz" is not bound to anything. */ + public void assertNotBound(Module module, Class clazz) { + List<Element> elements = Elements.getElements(module); + for (Element element : elements) { + if (element instanceof LinkedKeyBinding) { + LinkedKeyBinding binding = (LinkedKeyBinding) element; + if (clazz.equals(binding.getKey().getTypeLiteral().getType())) { + fail("Found binding for " + clazz.getName() + " to " + binding.getKey().getTypeLiteral().getType().getTypeName()); + } + } else if (element instanceof UntargettedBinding) { + UntargettedBinding binding = (UntargettedBinding) element; + if (clazz.equals(binding.getKey().getTypeLiteral().getType())) { + fail("Found binding for " + clazz.getName()); + } + } + } + } + /** * Attempts to configure the module, and asserts an {@link IllegalArgumentException} is * caught, containing the given messages diff --git a/core/src/test/java/org/elasticsearch/common/lucene/LuceneTests.java b/core/src/test/java/org/elasticsearch/common/lucene/LuceneTests.java index fcd2f7d876..17345fd714 100644 --- a/core/src/test/java/org/elasticsearch/common/lucene/LuceneTests.java +++ b/core/src/test/java/org/elasticsearch/common/lucene/LuceneTests.java @@ -17,20 +17,14 @@ * under the License. */ package org.elasticsearch.common.lucene; + import org.apache.lucene.analysis.MockAnalyzer; import org.apache.lucene.document.Document; import org.apache.lucene.document.Field; import org.apache.lucene.document.Field.Store; import org.apache.lucene.document.StringField; import org.apache.lucene.document.TextField; -import org.apache.lucene.index.DirectoryReader; -import org.apache.lucene.index.IndexWriter; -import org.apache.lucene.index.IndexWriterConfig; -import org.apache.lucene.index.NoDeletionPolicy; -import org.apache.lucene.index.NoMergePolicy; -import org.apache.lucene.index.RandomIndexWriter; -import org.apache.lucene.index.SegmentInfos; -import org.apache.lucene.index.Term; +import org.apache.lucene.index.*; import org.apache.lucene.search.IndexSearcher; import org.apache.lucene.search.MatchAllDocsQuery; import org.apache.lucene.search.TermQuery; @@ -41,11 +35,7 @@ import org.apache.lucene.util.Version; import org.elasticsearch.test.ESTestCase; import java.io.IOException; -import java.util.ArrayList; -import java.util.Collections; -import java.util.HashSet; -import java.util.List; -import java.util.Set; +import java.util.*; import java.util.concurrent.CountDownLatch; import java.util.concurrent.atomic.AtomicBoolean; diff --git a/core/src/test/java/org/elasticsearch/common/lucene/all/SimpleAllTests.java b/core/src/test/java/org/elasticsearch/common/lucene/all/SimpleAllTests.java index f4f3034528..7ee238ae7f 100644 --- a/core/src/test/java/org/elasticsearch/common/lucene/all/SimpleAllTests.java +++ b/core/src/test/java/org/elasticsearch/common/lucene/all/SimpleAllTests.java @@ -220,6 +220,41 @@ public class SimpleAllTests extends ESTestCase { indexWriter.close(); } + public void testTermMissingFromOneSegment() throws Exception { + Directory dir = new RAMDirectory(); + IndexWriter indexWriter = new IndexWriter(dir, new IndexWriterConfig(Lucene.STANDARD_ANALYZER)); + + Document doc = new Document(); + doc.add(new Field("_id", "1", StoredField.TYPE)); + AllEntries allEntries = new AllEntries(); + allEntries.addText("field", "something", 2.0f); + allEntries.reset(); + doc.add(new TextField("_all", AllTokenStream.allTokenStream("_all", allEntries, Lucene.STANDARD_ANALYZER))); + + indexWriter.addDocument(doc); + indexWriter.commit(); + + doc = new Document(); + doc.add(new Field("_id", "2", StoredField.TYPE)); + allEntries = new AllEntries(); + allEntries.addText("field", "else", 1.0f); + allEntries.reset(); + doc.add(new TextField("_all", AllTokenStream.allTokenStream("_all", allEntries, Lucene.STANDARD_ANALYZER))); + + indexWriter.addDocument(doc); + + IndexReader reader = DirectoryReader.open(indexWriter, true); + assertEquals(2, reader.leaves().size()); + IndexSearcher searcher = new IndexSearcher(reader); + + // "something" only appears in the first segment: + Query query = new AllTermQuery(new Term("_all", "something")); + TopDocs docs = searcher.search(query, 10); + assertEquals(1, docs.totalHits); + + indexWriter.close(); + } + public void testMultipleTokensAllNoBoost() throws Exception { Directory dir = new RAMDirectory(); IndexWriter indexWriter = new IndexWriter(dir, new IndexWriterConfig(Lucene.STANDARD_ANALYZER)); diff --git a/core/src/test/java/org/elasticsearch/common/lucene/index/FreqTermsEnumTests.java b/core/src/test/java/org/elasticsearch/common/lucene/index/FreqTermsEnumTests.java index 96715a05b3..ad811a38ae 100644 --- a/core/src/test/java/org/elasticsearch/common/lucene/index/FreqTermsEnumTests.java +++ b/core/src/test/java/org/elasticsearch/common/lucene/index/FreqTermsEnumTests.java @@ -24,13 +24,7 @@ import org.apache.lucene.document.Document; import org.apache.lucene.document.Field; import org.apache.lucene.document.StringField; import org.apache.lucene.document.TextField; -import org.apache.lucene.index.DirectoryReader; -import org.apache.lucene.index.IndexReader; -import org.apache.lucene.index.IndexWriter; -import org.apache.lucene.index.IndexWriterConfig; -import org.apache.lucene.index.IndexableField; -import org.apache.lucene.index.NoMergePolicy; -import org.apache.lucene.index.Term; +import org.apache.lucene.index.*; import org.apache.lucene.queries.TermsQuery; import org.apache.lucene.search.Query; import org.apache.lucene.store.Directory; @@ -42,14 +36,7 @@ import org.elasticsearch.test.ESTestCase; import org.junit.After; import org.junit.Before; -import java.util.ArrayList; -import java.util.Arrays; -import java.util.Collections; -import java.util.HashMap; -import java.util.HashSet; -import java.util.List; -import java.util.Map; -import java.util.Set; +import java.util.*; import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.Matchers.is; @@ -201,7 +188,7 @@ public class FreqTermsEnumTests extends ESTestCase { for (int i = 0; i < cycles; i++) { List<String> terms = new ArrayList<>(Arrays.asList(this.terms)); - Collections.shuffle(terms, getRandom()); + Collections.shuffle(terms, random()); for (String term : terms) { if (!termsEnum.seekExact(new BytesRef(term))) { assertThat("term : " + term, reference.get(term).docFreq, is(0)); diff --git a/core/src/test/java/org/elasticsearch/common/network/NetworkModuleTests.java b/core/src/test/java/org/elasticsearch/common/network/NetworkModuleTests.java new file mode 100644 index 0000000000..798e82a979 --- /dev/null +++ b/core/src/test/java/org/elasticsearch/common/network/NetworkModuleTests.java @@ -0,0 +1,176 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.common.network; + +import org.elasticsearch.client.Client; +import org.elasticsearch.common.Table; +import org.elasticsearch.common.component.AbstractLifecycleComponent; +import org.elasticsearch.common.inject.ModuleTestCase; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.common.transport.BoundTransportAddress; +import org.elasticsearch.http.HttpInfo; +import org.elasticsearch.http.HttpServerAdapter; +import org.elasticsearch.http.HttpServerTransport; +import org.elasticsearch.http.HttpStats; +import org.elasticsearch.rest.BaseRestHandler; +import org.elasticsearch.rest.RestChannel; +import org.elasticsearch.rest.RestHandler; +import org.elasticsearch.rest.RestRequest; +import org.elasticsearch.rest.action.cat.AbstractCatAction; +import org.elasticsearch.rest.action.cat.RestNodesAction; +import org.elasticsearch.rest.action.main.RestMainAction; +import org.elasticsearch.test.transport.AssertingLocalTransport; +import org.elasticsearch.transport.Transport; +import org.elasticsearch.transport.TransportService; + +public class NetworkModuleTests extends ModuleTestCase { + + static class FakeTransportService extends TransportService { + public FakeTransportService() { + super(null, null); + } + } + + static class FakeTransport extends AssertingLocalTransport { + public FakeTransport() { + super(null, null, null, null); + } + } + + static class FakeHttpTransport extends AbstractLifecycleComponent<HttpServerTransport> implements HttpServerTransport { + public FakeHttpTransport() { + super(null); + } + @Override + protected void doStart() {} + @Override + protected void doStop() {} + @Override + protected void doClose() {} + @Override + public BoundTransportAddress boundAddress() { + return null; + } + @Override + public HttpInfo info() { + return null; + } + @Override + public HttpStats stats() { + return null; + } + @Override + public void httpServerAdapter(HttpServerAdapter httpServerAdapter) {} + } + + static class FakeRestHandler extends BaseRestHandler { + public FakeRestHandler() { + super(null, null, null); + } + @Override + protected void handleRequest(RestRequest request, RestChannel channel, Client client) throws Exception {} + } + + static class FakeCatRestHandler extends AbstractCatAction { + public FakeCatRestHandler() { + super(null, null, null); + } + @Override + protected void doRequest(RestRequest request, RestChannel channel, Client client) {} + @Override + protected void documentation(StringBuilder sb) {} + @Override + protected Table getTableWithHeader(RestRequest request) { + return null; + } + } + + public void testRegisterTransportService() { + Settings settings = Settings.builder().put(NetworkModule.TRANSPORT_SERVICE_TYPE_KEY, "custom").build(); + NetworkModule module = new NetworkModule(new NetworkService(settings), settings, false); + module.registerTransportService("custom", FakeTransportService.class); + assertBinding(module, TransportService.class, FakeTransportService.class); + + // check it works with transport only as well + module = new NetworkModule(new NetworkService(settings), settings, true); + module.registerTransportService("custom", FakeTransportService.class); + assertBinding(module, TransportService.class, FakeTransportService.class); + } + + public void testRegisterTransport() { + Settings settings = Settings.builder().put(NetworkModule.TRANSPORT_TYPE_KEY, "custom").build(); + NetworkModule module = new NetworkModule(new NetworkService(settings), settings, false); + module.registerTransport("custom", FakeTransport.class); + assertBinding(module, Transport.class, FakeTransport.class); + + // check it works with transport only as well + module = new NetworkModule(new NetworkService(settings), settings, true); + module.registerTransport("custom", FakeTransport.class); + assertBinding(module, Transport.class, FakeTransport.class); + } + + public void testRegisterHttpTransport() { + Settings settings = Settings.builder().put(NetworkModule.HTTP_TYPE_KEY, "custom").build(); + NetworkModule module = new NetworkModule(new NetworkService(settings), settings, false); + module.registerHttpTransport("custom", FakeHttpTransport.class); + assertBinding(module, HttpServerTransport.class, FakeHttpTransport.class); + + // check registration not allowed for transport only + module = new NetworkModule(new NetworkService(settings), settings, true); + try { + module.registerHttpTransport("custom", FakeHttpTransport.class); + fail(); + } catch (IllegalArgumentException e) { + assertTrue(e.getMessage().contains("Cannot register http transport")); + assertTrue(e.getMessage().contains("for transport client")); + } + + // not added if http is disabled + settings = Settings.builder().put(NetworkModule.HTTP_ENABLED, false).build(); + module = new NetworkModule(new NetworkService(settings), settings, false); + assertNotBound(module, HttpServerTransport.class); + } + + public void testRegisterRestHandler() { + Settings settings = Settings.EMPTY; + NetworkModule module = new NetworkModule(new NetworkService(settings), settings, false); + module.registerRestHandler(FakeRestHandler.class); + // also check a builtin is bound + assertSetMultiBinding(module, RestHandler.class, FakeRestHandler.class, RestMainAction.class); + + // check registration not allowed for transport only + module = new NetworkModule(new NetworkService(settings), settings, true); + try { + module.registerRestHandler(FakeRestHandler.class); + fail(); + } catch (IllegalArgumentException e) { + assertTrue(e.getMessage().contains("Cannot register rest handler")); + assertTrue(e.getMessage().contains("for transport client")); + } + } + + public void testRegisterCatRestHandler() { + Settings settings = Settings.EMPTY; + NetworkModule module = new NetworkModule(new NetworkService(settings), settings, false); + module.registerRestHandler(FakeCatRestHandler.class); + // also check a builtin is bound + assertSetMultiBinding(module, AbstractCatAction.class, FakeCatRestHandler.class, RestNodesAction.class); + } +} diff --git a/core/src/test/java/org/elasticsearch/common/settings/ScopedSettingsTests.java b/core/src/test/java/org/elasticsearch/common/settings/ScopedSettingsTests.java new file mode 100644 index 0000000000..97393c51b8 --- /dev/null +++ b/core/src/test/java/org/elasticsearch/common/settings/ScopedSettingsTests.java @@ -0,0 +1,168 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ +package org.elasticsearch.common.settings; + +import org.elasticsearch.cluster.routing.allocation.decider.FilterAllocationDecider; +import org.elasticsearch.cluster.routing.allocation.decider.ShardsLimitAllocationDecider; +import org.elasticsearch.test.ESTestCase; +import org.elasticsearch.transport.TransportService; + +import java.io.IOException; +import java.util.Arrays; +import java.util.Collections; +import java.util.HashSet; +import java.util.List; +import java.util.concurrent.atomic.AtomicInteger; +import java.util.concurrent.atomic.AtomicReference; + +public class ScopedSettingsTests extends ESTestCase { + + public void testAddConsumer() { + Setting<Integer> testSetting = Setting.intSetting("foo.bar", 1, true, Setting.Scope.CLUSTER); + Setting<Integer> testSetting2 = Setting.intSetting("foo.bar.baz", 1, true, Setting.Scope.CLUSTER); + AbstractScopedSettings service = new ClusterSettings(Settings.EMPTY, Collections.singleton(testSetting)); + + AtomicInteger consumer = new AtomicInteger(); + service.addSettingsUpdateConsumer(testSetting, consumer::set); + AtomicInteger consumer2 = new AtomicInteger(); + try { + service.addSettingsUpdateConsumer(testSetting2, consumer2::set); + fail("setting not registered"); + } catch (IllegalArgumentException ex) { + assertEquals("Setting is not registered for key [foo.bar.baz]", ex.getMessage()); + } + + try { + service.addSettingsUpdateConsumer(testSetting, testSetting2, (a, b) -> {consumer.set(a); consumer2.set(b);}); + fail("setting not registered"); + } catch (IllegalArgumentException ex) { + assertEquals("Setting is not registered for key [foo.bar.baz]", ex.getMessage()); + } + assertEquals(0, consumer.get()); + assertEquals(0, consumer2.get()); + service.applySettings(Settings.builder().put("foo.bar", 2).put("foo.bar.baz", 15).build()); + assertEquals(2, consumer.get()); + assertEquals(0, consumer2.get()); + } + + public void testApply() { + Setting<Integer> testSetting = Setting.intSetting("foo.bar", 1, true, Setting.Scope.CLUSTER); + Setting<Integer> testSetting2 = Setting.intSetting("foo.bar.baz", 1, true, Setting.Scope.CLUSTER); + AbstractScopedSettings service = new ClusterSettings(Settings.EMPTY, new HashSet<>(Arrays.asList(testSetting, testSetting2))); + + AtomicInteger consumer = new AtomicInteger(); + service.addSettingsUpdateConsumer(testSetting, consumer::set); + AtomicInteger consumer2 = new AtomicInteger(); + service.addSettingsUpdateConsumer(testSetting2, consumer2::set, (s) -> assertTrue(s > 0)); + + AtomicInteger aC = new AtomicInteger(); + AtomicInteger bC = new AtomicInteger(); + service.addSettingsUpdateConsumer(testSetting, testSetting2, (a, b) -> {aC.set(a); bC.set(b);}); + + assertEquals(0, consumer.get()); + assertEquals(0, consumer2.get()); + assertEquals(0, aC.get()); + assertEquals(0, bC.get()); + try { + service.applySettings(Settings.builder().put("foo.bar", 2).put("foo.bar.baz", -15).build()); + fail("invalid value"); + } catch (IllegalArgumentException ex) { + assertEquals("illegal value can't update [foo.bar.baz] from [1] to [-15]", ex.getMessage()); + } + assertEquals(0, consumer.get()); + assertEquals(0, consumer2.get()); + assertEquals(0, aC.get()); + assertEquals(0, bC.get()); + try { + service.dryRun(Settings.builder().put("foo.bar", 2).put("foo.bar.baz", -15).build()); + fail("invalid value"); + } catch (IllegalArgumentException ex) { + assertEquals("illegal value can't update [foo.bar.baz] from [1] to [-15]", ex.getMessage()); + } + + assertEquals(0, consumer.get()); + assertEquals(0, consumer2.get()); + assertEquals(0, aC.get()); + assertEquals(0, bC.get()); + service.dryRun(Settings.builder().put("foo.bar", 2).put("foo.bar.baz", 15).build()); + assertEquals(0, consumer.get()); + assertEquals(0, consumer2.get()); + assertEquals(0, aC.get()); + assertEquals(0, bC.get()); + + service.applySettings(Settings.builder().put("foo.bar", 2).put("foo.bar.baz", 15).build()); + assertEquals(2, consumer.get()); + assertEquals(15, consumer2.get()); + assertEquals(2, aC.get()); + assertEquals(15, bC.get()); + } + + public void testGet() { + ClusterSettings settings = new ClusterSettings(Settings.EMPTY, ClusterSettings.BUILT_IN_CLUSTER_SETTINGS); + + // group setting - complex matcher + Setting setting = settings.get("cluster.routing.allocation.require.value"); + assertEquals(setting, FilterAllocationDecider.CLUSTER_ROUTING_REQUIRE_GROUP_SETTING); + + setting = settings.get("cluster.routing.allocation.total_shards_per_node"); + assertEquals(setting, ShardsLimitAllocationDecider.CLUSTER_TOTAL_SHARDS_PER_NODE_SETTING); + + // array settings - complex matcher + assertNotNull(settings.get("transport.tracer.include." + randomIntBetween(1, 100))); + assertSame(TransportService.TRACE_LOG_INCLUDE_SETTING, settings.get("transport.tracer.include." + randomIntBetween(1, 100))); + + // array settings - complex matcher - only accepts numbers + assertNull(settings.get("transport.tracer.include.FOO")); + } + + public void testIsDynamic(){ + ClusterSettings settings = new ClusterSettings(Settings.EMPTY, new HashSet<>(Arrays.asList(Setting.intSetting("foo.bar", 1, true, Setting.Scope.CLUSTER), Setting.intSetting("foo.bar.baz", 1, false, Setting.Scope.CLUSTER)))); + assertFalse(settings.hasDynamicSetting("foo.bar.baz")); + assertTrue(settings.hasDynamicSetting("foo.bar")); + assertNotNull(settings.get("foo.bar.baz")); + settings = new ClusterSettings(Settings.EMPTY, ClusterSettings.BUILT_IN_CLUSTER_SETTINGS); + assertTrue(settings.hasDynamicSetting("transport.tracer.include." + randomIntBetween(1, 100))); + assertFalse(settings.hasDynamicSetting("transport.tracer.include.BOOM")); + assertTrue(settings.hasDynamicSetting("cluster.routing.allocation.require.value")); + } + + public void testDiff() throws IOException { + Setting<Integer> foobarbaz = Setting.intSetting("foo.bar.baz", 1, false, Setting.Scope.CLUSTER); + Setting<Integer> foobar = Setting.intSetting("foo.bar", 1, true, Setting.Scope.CLUSTER); + ClusterSettings settings = new ClusterSettings(Settings.EMPTY, new HashSet<>(Arrays.asList(foobar, foobarbaz))); + Settings diff = settings.diff(Settings.builder().put("foo.bar", 5).build(), Settings.EMPTY); + assertEquals(diff.getAsMap().size(), 1); + assertEquals(diff.getAsInt("foo.bar.baz", null), Integer.valueOf(1)); + + diff = settings.diff(Settings.builder().put("foo.bar", 5).build(), Settings.builder().put("foo.bar.baz", 17).build()); + assertEquals(diff.getAsMap().size(), 1); + assertEquals(diff.getAsInt("foo.bar.baz", null), Integer.valueOf(17)); + } + + public void testUpdateTracer() { + ClusterSettings settings = new ClusterSettings(Settings.EMPTY, ClusterSettings.BUILT_IN_CLUSTER_SETTINGS); + AtomicReference<List<String>> ref = new AtomicReference<>(); + settings.addSettingsUpdateConsumer(TransportService.TRACE_LOG_INCLUDE_SETTING, ref::set); + settings.applySettings(Settings.builder().putArray("transport.tracer.include", "internal:index/shard/recovery/*", "internal:gateway/local*").build()); + assertNotNull(ref.get().size()); + assertEquals(ref.get().size(), 2); + assertTrue(ref.get().contains("internal:index/shard/recovery/*")); + assertTrue(ref.get().contains("internal:gateway/local*")); + } +} diff --git a/core/src/test/java/org/elasticsearch/common/settings/SettingTests.java b/core/src/test/java/org/elasticsearch/common/settings/SettingTests.java new file mode 100644 index 0000000000..069418a7e1 --- /dev/null +++ b/core/src/test/java/org/elasticsearch/common/settings/SettingTests.java @@ -0,0 +1,323 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ +package org.elasticsearch.common.settings; + +import org.elasticsearch.ElasticsearchParseException; +import org.elasticsearch.common.collect.Tuple; +import org.elasticsearch.common.unit.ByteSizeValue; +import org.elasticsearch.common.unit.TimeValue; +import org.elasticsearch.test.ESTestCase; + +import java.util.Arrays; +import java.util.Collections; +import java.util.List; +import java.util.Map; +import java.util.concurrent.atomic.AtomicReference; + +public class SettingTests extends ESTestCase { + + + public void testGet() { + Setting<Boolean> booleanSetting = Setting.boolSetting("foo.bar", false, true, Setting.Scope.CLUSTER); + assertFalse(booleanSetting.get(Settings.EMPTY)); + assertFalse(booleanSetting.get(Settings.builder().put("foo.bar", false).build())); + assertTrue(booleanSetting.get(Settings.builder().put("foo.bar", true).build())); + } + + public void testByteSize() { + Setting<ByteSizeValue> byteSizeValueSetting = Setting.byteSizeSetting("a.byte.size", new ByteSizeValue(1024), true, Setting.Scope.CLUSTER); + assertFalse(byteSizeValueSetting.isGroupSetting()); + ByteSizeValue byteSizeValue = byteSizeValueSetting.get(Settings.EMPTY); + assertEquals(byteSizeValue.bytes(), 1024); + AtomicReference<ByteSizeValue> value = new AtomicReference<>(null); + ClusterSettings.SettingUpdater settingUpdater = byteSizeValueSetting.newUpdater(value::set, logger); + try { + settingUpdater.apply(Settings.builder().put("a.byte.size", 12).build(), Settings.EMPTY); + fail("no unit"); + } catch (IllegalArgumentException ex) { + assertEquals("failed to parse setting [a.byte.size] with value [12] as a size in bytes: unit is missing or unrecognized", ex.getMessage()); + } + + assertTrue(settingUpdater.apply(Settings.builder().put("a.byte.size", "12b").build(), Settings.EMPTY)); + assertEquals(new ByteSizeValue(12), value.get()); + } + + public void testSimpleUpdate() { + Setting<Boolean> booleanSetting = Setting.boolSetting("foo.bar", false, true, Setting.Scope.CLUSTER); + AtomicReference<Boolean> atomicBoolean = new AtomicReference<>(null); + ClusterSettings.SettingUpdater settingUpdater = booleanSetting.newUpdater(atomicBoolean::set, logger); + Settings build = Settings.builder().put("foo.bar", false).build(); + settingUpdater.apply(build, Settings.EMPTY); + assertNull(atomicBoolean.get()); + build = Settings.builder().put("foo.bar", true).build(); + settingUpdater.apply(build, Settings.EMPTY); + assertTrue(atomicBoolean.get()); + + // try update bogus value + build = Settings.builder().put("foo.bar", "I am not a boolean").build(); + try { + settingUpdater.apply(build, Settings.EMPTY); + fail("not a boolean"); + } catch (IllegalArgumentException ex) { + assertEquals("Failed to parse value [I am not a boolean] cannot be parsed to boolean [ true/1/on/yes OR false/0/off/no ]", ex.getMessage()); + } + } + + public void testUpdateNotDynamic() { + Setting<Boolean> booleanSetting = Setting.boolSetting("foo.bar", false, false, Setting.Scope.CLUSTER); + assertFalse(booleanSetting.isGroupSetting()); + AtomicReference<Boolean> atomicBoolean = new AtomicReference<>(null); + try { + booleanSetting.newUpdater(atomicBoolean::set, logger); + fail("not dynamic"); + } catch (IllegalStateException ex) { + assertEquals("setting [foo.bar] is not dynamic", ex.getMessage()); + } + } + + public void testUpdaterIsIsolated() { + Setting<Boolean> booleanSetting = Setting.boolSetting("foo.bar", false, true, Setting.Scope.CLUSTER); + AtomicReference<Boolean> ab1 = new AtomicReference<>(null); + AtomicReference<Boolean> ab2 = new AtomicReference<>(null); + ClusterSettings.SettingUpdater settingUpdater = booleanSetting.newUpdater(ab1::set, logger); + ClusterSettings.SettingUpdater settingUpdater2 = booleanSetting.newUpdater(ab2::set, logger); + settingUpdater.apply(Settings.builder().put("foo.bar", true).build(), Settings.EMPTY); + assertTrue(ab1.get()); + assertNull(ab2.get()); + } + + public void testDefault() { + TimeValue defautlValue = TimeValue.timeValueMillis(randomIntBetween(0, 1000000)); + Setting<TimeValue> setting = Setting.positiveTimeSetting("my.time.value", defautlValue, randomBoolean(), Setting.Scope.CLUSTER); + assertFalse(setting.isGroupSetting()); + String aDefault = setting.getDefault(Settings.EMPTY); + assertEquals(defautlValue.millis() + "ms", aDefault); + assertEquals(defautlValue.millis(), setting.get(Settings.EMPTY).millis()); + + Setting<String> secondaryDefault = new Setting<>("foo.bar", (s) -> s.get("old.foo.bar", "some_default"), (s) -> s, randomBoolean(), Setting.Scope.CLUSTER); + assertEquals("some_default", secondaryDefault.get(Settings.EMPTY)); + assertEquals("42", secondaryDefault.get(Settings.builder().put("old.foo.bar", 42).build())); + } + + public void testComplexType() { + AtomicReference<ComplexType> ref = new AtomicReference<>(null); + Setting<ComplexType> setting = new Setting<>("foo.bar", (s) -> "", (s) -> new ComplexType(s), true, Setting.Scope.CLUSTER); + assertFalse(setting.isGroupSetting()); + ref.set(setting.get(Settings.EMPTY)); + ComplexType type = ref.get(); + ClusterSettings.SettingUpdater settingUpdater = setting.newUpdater(ref::set, logger); + assertFalse(settingUpdater.apply(Settings.EMPTY, Settings.EMPTY)); + assertSame("no update - type has not changed", type, ref.get()); + + // change from default + assertTrue(settingUpdater.apply(Settings.builder().put("foo.bar", "2").build(), Settings.EMPTY)); + assertNotSame("update - type has changed", type, ref.get()); + assertEquals("2", ref.get().foo); + + + // change back to default... + assertTrue(settingUpdater.apply(Settings.EMPTY, Settings.builder().put("foo.bar", "2").build())); + assertNotSame("update - type has changed", type, ref.get()); + assertEquals("", ref.get().foo); + } + + public void testType() { + Setting<Integer> integerSetting = Setting.intSetting("foo.int.bar", 1, true, Setting.Scope.CLUSTER); + assertEquals(integerSetting.getScope(), Setting.Scope.CLUSTER); + integerSetting = Setting.intSetting("foo.int.bar", 1, true, Setting.Scope.INDEX); + assertEquals(integerSetting.getScope(), Setting.Scope.INDEX); + } + + public void testGroups() { + AtomicReference<Settings> ref = new AtomicReference<>(null); + Setting<Settings> setting = Setting.groupSetting("foo.bar.", true, Setting.Scope.CLUSTER); + assertTrue(setting.isGroupSetting()); + ClusterSettings.SettingUpdater settingUpdater = setting.newUpdater(ref::set, logger); + + Settings currentInput = Settings.builder().put("foo.bar.1.value", "1").put("foo.bar.2.value", "2").put("foo.bar.3.value", "3").build(); + Settings previousInput = Settings.EMPTY; + assertTrue(settingUpdater.apply(currentInput, previousInput)); + assertNotNull(ref.get()); + Settings settings = ref.get(); + Map<String, Settings> asMap = settings.getAsGroups(); + assertEquals(3, asMap.size()); + assertEquals(asMap.get("1").get("value"), "1"); + assertEquals(asMap.get("2").get("value"), "2"); + assertEquals(asMap.get("3").get("value"), "3"); + + previousInput = currentInput; + currentInput = Settings.builder().put("foo.bar.1.value", "1").put("foo.bar.2.value", "2").put("foo.bar.3.value", "3").build(); + Settings current = ref.get(); + assertFalse(settingUpdater.apply(currentInput, previousInput)); + assertSame(current, ref.get()); + + previousInput = currentInput; + currentInput = Settings.builder().put("foo.bar.1.value", "1").put("foo.bar.2.value", "2").build(); + // now update and check that we got it + assertTrue(settingUpdater.apply(currentInput, previousInput)); + assertNotSame(current, ref.get()); + + asMap = ref.get().getAsGroups(); + assertEquals(2, asMap.size()); + assertEquals(asMap.get("1").get("value"), "1"); + assertEquals(asMap.get("2").get("value"), "2"); + + previousInput = currentInput; + currentInput = Settings.builder().put("foo.bar.1.value", "1").put("foo.bar.2.value", "4").build(); + // now update and check that we got it + assertTrue(settingUpdater.apply(currentInput, previousInput)); + assertNotSame(current, ref.get()); + + asMap = ref.get().getAsGroups(); + assertEquals(2, asMap.size()); + assertEquals(asMap.get("1").get("value"), "1"); + assertEquals(asMap.get("2").get("value"), "4"); + + assertTrue(setting.match("foo.bar.baz")); + assertFalse(setting.match("foo.baz.bar")); + + ClusterSettings.SettingUpdater predicateSettingUpdater = setting.newUpdater(ref::set, logger,(s) -> assertFalse(true)); + try { + predicateSettingUpdater.apply(Settings.builder().put("foo.bar.1.value", "1").put("foo.bar.2.value", "2").build(), Settings.EMPTY); + fail("not accepted"); + } catch (IllegalArgumentException ex) { + assertEquals(ex.getMessage(), "illegal value can't update [foo.bar.] from [{}] to [{1.value=1, 2.value=2}]"); + } + } + + public static class ComplexType { + + final String foo; + + public ComplexType(String foo) { + this.foo = foo; + } + } + + public static class Composite { + + private Integer b; + private Integer a; + + public void set(Integer a, Integer b) { + this.a = a; + this.b = b; + } + } + + + public void testComposite() { + Composite c = new Composite(); + Setting<Integer> a = Setting.intSetting("foo.int.bar.a", 1, true, Setting.Scope.CLUSTER); + Setting<Integer> b = Setting.intSetting("foo.int.bar.b", 1, true, Setting.Scope.CLUSTER); + ClusterSettings.SettingUpdater<Tuple<Integer, Integer>> settingUpdater = Setting.compoundUpdater(c::set, a, b, logger); + assertFalse(settingUpdater.apply(Settings.EMPTY, Settings.EMPTY)); + assertNull(c.a); + assertNull(c.b); + + Settings build = Settings.builder().put("foo.int.bar.a", 2).build(); + assertTrue(settingUpdater.apply(build, Settings.EMPTY)); + assertEquals(2, c.a.intValue()); + assertEquals(1, c.b.intValue()); + + Integer aValue = c.a; + assertFalse(settingUpdater.apply(build, build)); + assertSame(aValue, c.a); + Settings previous = build; + build = Settings.builder().put("foo.int.bar.a", 2).put("foo.int.bar.b", 5).build(); + assertTrue(settingUpdater.apply(build, previous)); + assertEquals(2, c.a.intValue()); + assertEquals(5, c.b.intValue()); + + // reset to default + assertTrue(settingUpdater.apply(Settings.EMPTY, build)); + assertEquals(1, c.a.intValue()); + assertEquals(1, c.b.intValue()); + + } + + public void testListSettings() { + Setting<List<String>> listSetting = Setting.listSetting("foo.bar", Arrays.asList("foo,bar"), (s) -> s.toString(), true, Setting.Scope.CLUSTER); + List<String> value = listSetting.get(Settings.EMPTY); + assertEquals(1, value.size()); + assertEquals("foo,bar", value.get(0)); + + List<String> input = Arrays.asList("test", "test1, test2", "test", ",,,,"); + Settings.Builder builder = Settings.builder().putArray("foo.bar", input.toArray(new String[0])); + value = listSetting.get(builder.build()); + assertEquals(input.size(), value.size()); + assertArrayEquals(value.toArray(new String[0]), input.toArray(new String[0])); + + // try to parse this really annoying format + builder = Settings.builder(); + for (int i = 0; i < input.size(); i++) { + builder.put("foo.bar." + i, input.get(i)); + } + value = listSetting.get(builder.build()); + assertEquals(input.size(), value.size()); + assertArrayEquals(value.toArray(new String[0]), input.toArray(new String[0])); + + AtomicReference<List<String>> ref = new AtomicReference<>(); + AbstractScopedSettings.SettingUpdater settingUpdater = listSetting.newUpdater(ref::set, logger); + assertTrue(settingUpdater.hasChanged(builder.build(), Settings.EMPTY)); + settingUpdater.apply(builder.build(), Settings.EMPTY); + assertEquals(input.size(), ref.get().size()); + assertArrayEquals(ref.get().toArray(new String[0]), input.toArray(new String[0])); + + settingUpdater.apply(Settings.builder().putArray("foo.bar", "123").build(), builder.build()); + assertEquals(1, ref.get().size()); + assertArrayEquals(ref.get().toArray(new String[0]), new String[] {"123"}); + + settingUpdater.apply(Settings.builder().put("foo.bar", "1,2,3").build(), Settings.builder().putArray("foo.bar", "123").build()); + assertEquals(3, ref.get().size()); + assertArrayEquals(ref.get().toArray(new String[0]), new String[] {"1", "2", "3"}); + + settingUpdater.apply(Settings.EMPTY, Settings.builder().put("foo.bar", "1,2,3").build()); + assertEquals(1, ref.get().size()); + assertEquals("foo,bar", ref.get().get(0)); + + Setting<List<Integer>> otherSettings = Setting.listSetting("foo.bar", Collections.emptyList(), Integer::parseInt, true, Setting.Scope.CLUSTER); + List<Integer> defaultValue = otherSettings.get(Settings.EMPTY); + assertEquals(0, defaultValue.size()); + List<Integer> intValues = otherSettings.get(Settings.builder().put("foo.bar", "0,1,2,3").build()); + assertEquals(4, intValues.size()); + for (int i = 0; i < intValues.size(); i++) { + assertEquals(i, intValues.get(i).intValue()); + } + } + + public void testListSettingAcceptsNumberSyntax() { + Setting<List<String>> listSetting = Setting.listSetting("foo.bar", Arrays.asList("foo,bar"), (s) -> s.toString(), true, Setting.Scope.CLUSTER); + List<String> input = Arrays.asList("test", "test1, test2", "test", ",,,,"); + Settings.Builder builder = Settings.builder().putArray("foo.bar", input.toArray(new String[0])); + // try to parse this really annoying format + for (String key : builder.internalMap().keySet()) { + assertTrue("key: " + key + " doesn't match", listSetting.match(key)); + } + builder = Settings.builder().put("foo.bar", "1,2,3"); + for (String key : builder.internalMap().keySet()) { + assertTrue("key: " + key + " doesn't match", listSetting.match(key)); + } + assertFalse(listSetting.match("foo_bar")); + assertFalse(listSetting.match("foo_bar.1")); + assertTrue(listSetting.match("foo.bar")); + assertTrue(listSetting.match("foo.bar." + randomIntBetween(0,10000))); + + } +} diff --git a/core/src/test/java/org/elasticsearch/common/unit/TimeValueTests.java b/core/src/test/java/org/elasticsearch/common/unit/TimeValueTests.java index ec0e26091d..2945d86fe5 100644 --- a/core/src/test/java/org/elasticsearch/common/unit/TimeValueTests.java +++ b/core/src/test/java/org/elasticsearch/common/unit/TimeValueTests.java @@ -162,4 +162,14 @@ public class TimeValueTests extends ESTestCase { assertThat(e.getMessage(), containsString("Failed to parse")); } } + + public void testToStringRep() { + assertEquals("-1", new TimeValue(-1).getStringRep()); + assertEquals("10ms", new TimeValue(10, TimeUnit.MILLISECONDS).getStringRep()); + assertEquals("1533ms", new TimeValue(1533, TimeUnit.MILLISECONDS).getStringRep()); + assertEquals("90s", new TimeValue(90, TimeUnit.SECONDS).getStringRep()); + assertEquals("90m", new TimeValue(90, TimeUnit.MINUTES).getStringRep()); + assertEquals("36h", new TimeValue(36, TimeUnit.HOURS).getStringRep()); + assertEquals("1000d", new TimeValue(1000, TimeUnit.DAYS).getStringRep()); + } } diff --git a/core/src/test/java/org/elasticsearch/common/util/BigArraysTests.java b/core/src/test/java/org/elasticsearch/common/util/BigArraysTests.java index 7d36c09ee1..184de7f385 100644 --- a/core/src/test/java/org/elasticsearch/common/util/BigArraysTests.java +++ b/core/src/test/java/org/elasticsearch/common/util/BigArraysTests.java @@ -21,13 +21,13 @@ package org.elasticsearch.common.util; import org.apache.lucene.util.BytesRef; import org.elasticsearch.cache.recycler.PageCacheRecycler; +import org.elasticsearch.common.settings.ClusterSettings; import org.elasticsearch.common.breaker.CircuitBreaker; import org.elasticsearch.common.breaker.CircuitBreakingException; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.unit.ByteSizeUnit; import org.elasticsearch.indices.breaker.HierarchyCircuitBreakerService; import org.elasticsearch.indices.breaker.NoneCircuitBreakerService; -import org.elasticsearch.node.settings.NodeSettingsService; import org.elasticsearch.test.ESSingleNodeTestCase; import org.junit.Before; @@ -336,9 +336,9 @@ public class BigArraysTests extends ESSingleNodeTestCase { for (String type : Arrays.asList("Byte", "Int", "Long", "Float", "Double", "Object")) { HierarchyCircuitBreakerService hcbs = new HierarchyCircuitBreakerService( Settings.builder() - .put(HierarchyCircuitBreakerService.REQUEST_CIRCUIT_BREAKER_LIMIT_SETTING, size - 1, ByteSizeUnit.BYTES) + .put(HierarchyCircuitBreakerService.REQUEST_CIRCUIT_BREAKER_LIMIT_SETTING.getKey(), size - 1, ByteSizeUnit.BYTES) .build(), - new NodeSettingsService(Settings.EMPTY)); + new ClusterSettings(Settings.EMPTY, ClusterSettings.BUILT_IN_CLUSTER_SETTINGS)); BigArrays bigArrays = new BigArrays(null, hcbs).withCircuitBreaking(); Method create = BigArrays.class.getMethod("new" + type + "Array", long.class); try { @@ -356,9 +356,9 @@ public class BigArraysTests extends ESSingleNodeTestCase { final long maxSize = randomIntBetween(1 << 10, 1 << 22); HierarchyCircuitBreakerService hcbs = new HierarchyCircuitBreakerService( Settings.builder() - .put(HierarchyCircuitBreakerService.REQUEST_CIRCUIT_BREAKER_LIMIT_SETTING, maxSize, ByteSizeUnit.BYTES) + .put(HierarchyCircuitBreakerService.REQUEST_CIRCUIT_BREAKER_LIMIT_SETTING.getKey(), maxSize, ByteSizeUnit.BYTES) .build(), - new NodeSettingsService(Settings.EMPTY)); + new ClusterSettings(Settings.EMPTY, ClusterSettings.BUILT_IN_CLUSTER_SETTINGS)); BigArrays bigArrays = new BigArrays(null, hcbs).withCircuitBreaking(); Method create = BigArrays.class.getMethod("new" + type + "Array", long.class); final int size = scaledRandomIntBetween(1, 20); diff --git a/core/src/test/java/org/elasticsearch/common/util/CollectionUtilsTests.java b/core/src/test/java/org/elasticsearch/common/util/CollectionUtilsTests.java index fe9ba6b1fc..4c3612da8e 100644 --- a/core/src/test/java/org/elasticsearch/common/util/CollectionUtilsTests.java +++ b/core/src/test/java/org/elasticsearch/common/util/CollectionUtilsTests.java @@ -25,14 +25,7 @@ import org.apache.lucene.util.BytesRefBuilder; import org.apache.lucene.util.Counter; import org.elasticsearch.test.ESTestCase; -import java.util.ArrayList; -import java.util.Arrays; -import java.util.Collections; -import java.util.HashSet; -import java.util.Iterator; -import java.util.List; -import java.util.SortedSet; -import java.util.TreeSet; +import java.util.*; import static org.elasticsearch.common.util.CollectionUtils.eagerPartition; import static org.hamcrest.Matchers.equalTo; @@ -80,7 +73,7 @@ public class CollectionUtilsTests extends ESTestCase { array.append(new BytesRef(s)); } if (randomBoolean()) { - Collections.shuffle(tmpList, getRandom()); + Collections.shuffle(tmpList, random()); for (BytesRef ref : tmpList) { array.append(ref); } @@ -111,7 +104,7 @@ public class CollectionUtilsTests extends ESTestCase { array.append(new BytesRef(s)); } if (randomBoolean()) { - Collections.shuffle(values, getRandom()); + Collections.shuffle(values, random()); } int[] indices = new int[array.size()]; for (int i = 0; i < indices.length; i++) { diff --git a/core/src/test/java/org/elasticsearch/common/util/concurrent/PrioritizedExecutorsTests.java b/core/src/test/java/org/elasticsearch/common/util/concurrent/PrioritizedExecutorsTests.java index 1d2d214116..deac15b50d 100644 --- a/core/src/test/java/org/elasticsearch/common/util/concurrent/PrioritizedExecutorsTests.java +++ b/core/src/test/java/org/elasticsearch/common/util/concurrent/PrioritizedExecutorsTests.java @@ -27,13 +27,7 @@ import java.util.ArrayList; import java.util.Arrays; import java.util.Collections; import java.util.List; -import java.util.concurrent.CountDownLatch; -import java.util.concurrent.ExecutorService; -import java.util.concurrent.Executors; -import java.util.concurrent.PriorityBlockingQueue; -import java.util.concurrent.ScheduledExecutorService; -import java.util.concurrent.ScheduledThreadPoolExecutor; -import java.util.concurrent.TimeUnit; +import java.util.concurrent.*; import java.util.concurrent.atomic.AtomicBoolean; import static org.hamcrest.Matchers.equalTo; @@ -46,7 +40,7 @@ public class PrioritizedExecutorsTests extends ESTestCase { public void testPriorityQueue() throws Exception { PriorityBlockingQueue<Priority> queue = new PriorityBlockingQueue<>(); List<Priority> priorities = Arrays.asList(Priority.values()); - Collections.shuffle(priorities); + Collections.shuffle(priorities, random()); for (Priority priority : priorities) { queue.add(priority); diff --git a/core/src/test/java/org/elasticsearch/common/xcontent/builder/XContentBuilderTests.java b/core/src/test/java/org/elasticsearch/common/xcontent/builder/XContentBuilderTests.java index 7ffafc004a..9129e3c05b 100644 --- a/core/src/test/java/org/elasticsearch/common/xcontent/builder/XContentBuilderTests.java +++ b/core/src/test/java/org/elasticsearch/common/xcontent/builder/XContentBuilderTests.java @@ -22,7 +22,6 @@ package org.elasticsearch.common.xcontent.builder; import org.apache.lucene.util.BytesRef; import org.elasticsearch.common.bytes.BytesArray; import org.elasticsearch.common.geo.GeoPoint; -import org.elasticsearch.common.io.FastCharArrayWriter; import org.elasticsearch.common.io.PathUtils; import org.elasticsearch.common.io.stream.BytesStreamOutput; import org.elasticsearch.common.xcontent.XContentBuilder; @@ -39,6 +38,7 @@ import java.nio.file.Path; import java.util.ArrayList; import java.util.Arrays; import java.util.Calendar; +import java.util.Collections; import java.util.Date; import java.util.GregorianCalendar; import java.util.HashMap; @@ -51,9 +51,6 @@ import static org.elasticsearch.common.xcontent.XContentBuilder.FieldCaseConvers import static org.elasticsearch.common.xcontent.XContentBuilder.FieldCaseConversion.UNDERSCORE; import static org.hamcrest.Matchers.equalTo; -/** - * - */ public class XContentBuilderTests extends ESTestCase { public void testPrettyWithLfAtEnd() throws Exception { ByteArrayOutputStream os = new ByteArrayOutputStream(); @@ -350,4 +347,33 @@ public class XContentBuilderTests extends ESTestCase { "}", string.trim()); } + public void testWriteMapWithNullKeys() throws IOException { + XContentBuilder builder = XContentFactory.contentBuilder(randomFrom(XContentType.values())); + try { + builder.map(Collections.singletonMap(null, "test")); + fail("write map should have failed"); + } catch(IllegalArgumentException e) { + assertThat(e.getMessage(), equalTo("field name cannot be null")); + } + } + + public void testWriteMapValueWithNullKeys() throws IOException { + XContentBuilder builder = XContentFactory.contentBuilder(randomFrom(XContentType.values())); + try { + builder.value(Collections.singletonMap(null, "test")); + fail("write map should have failed"); + } catch(IllegalArgumentException e) { + assertThat(e.getMessage(), equalTo("field name cannot be null")); + } + } + + public void testWriteFieldMapWithNullKeys() throws IOException { + XContentBuilder builder = XContentFactory.contentBuilder(randomFrom(XContentType.values())); + try { + builder.field("map", Collections.singletonMap(null, "test")); + fail("write map should have failed"); + } catch(IllegalArgumentException e) { + assertThat(e.getMessage(), equalTo("field name cannot be null")); + } + } } diff --git a/core/src/test/java/org/elasticsearch/discovery/DiscoveryWithServiceDisruptionsIT.java b/core/src/test/java/org/elasticsearch/discovery/DiscoveryWithServiceDisruptionsIT.java index b14792a2c3..b9d4fdec91 100644 --- a/core/src/test/java/org/elasticsearch/discovery/DiscoveryWithServiceDisruptionsIT.java +++ b/core/src/test/java/org/elasticsearch/discovery/DiscoveryWithServiceDisruptionsIT.java @@ -132,7 +132,7 @@ public class DiscoveryWithServiceDisruptionsIT extends ESIntegTestCase { .put(FaultDetection.SETTING_PING_TIMEOUT, "1s") // for hitting simulated network failures quickly .put(FaultDetection.SETTING_PING_RETRIES, "1") // for hitting simulated network failures quickly .put("discovery.zen.join_timeout", "10s") // still long to induce failures but to long so test won't time out - .put(DiscoverySettings.PUBLISH_TIMEOUT, "1s") // <-- for hitting simulated network failures quickly + .put(DiscoverySettings.PUBLISH_TIMEOUT_SETTING.getKey(), "1s") // <-- for hitting simulated network failures quickly .put("http.enabled", false) // just to make test quicker .put("gateway.local.list_timeout", "10s") // still long to induce failures but to long so test won't time out .build(); @@ -150,7 +150,7 @@ public class DiscoveryWithServiceDisruptionsIT extends ESIntegTestCase { // TODO: Rarely use default settings form some of these Settings nodeSettings = Settings.builder() .put(DEFAULT_SETTINGS) - .put(ElectMasterService.DISCOVERY_ZEN_MINIMUM_MASTER_NODES, minimumMasterNode) + .put(ElectMasterService.DISCOVERY_ZEN_MINIMUM_MASTER_NODES_SETTING.getKey(), minimumMasterNode) .build(); if (discoveryConfig == null) { @@ -217,7 +217,7 @@ public class DiscoveryWithServiceDisruptionsIT extends ESIntegTestCase { logger.info("--> reducing min master nodes to 2"); assertAcked(client().admin().cluster().prepareUpdateSettings() - .setTransientSettings(Settings.builder().put(ElectMasterService.DISCOVERY_ZEN_MINIMUM_MASTER_NODES, 2)).get()); + .setTransientSettings(Settings.builder().put(ElectMasterService.DISCOVERY_ZEN_MINIMUM_MASTER_NODES_SETTING.getKey(), 2)).get()); String master = internalCluster().getMasterName(); String nonMaster = null; @@ -293,9 +293,9 @@ public class DiscoveryWithServiceDisruptionsIT extends ESIntegTestCase { // Wait until the master node sees al 3 nodes again. ensureStableCluster(3, new TimeValue(DISRUPTION_HEALING_OVERHEAD.millis() + networkPartition.expectedTimeToHeal().millis())); - logger.info("Verify no master block with {} set to {}", DiscoverySettings.NO_MASTER_BLOCK, "all"); + logger.info("Verify no master block with {} set to {}", DiscoverySettings.NO_MASTER_BLOCK_SETTING.getKey(), "all"); client().admin().cluster().prepareUpdateSettings() - .setTransientSettings(Settings.builder().put(DiscoverySettings.NO_MASTER_BLOCK, "all")) + .setTransientSettings(Settings.builder().put(DiscoverySettings.NO_MASTER_BLOCK_SETTING.getKey(), "all")) .get(); networkPartition.startDisrupting(); @@ -473,7 +473,7 @@ public class DiscoveryWithServiceDisruptionsIT extends ESIntegTestCase { docsPerIndexer = 1 + randomInt(5); logger.info("indexing " + docsPerIndexer + " docs per indexer during partition"); countDownLatchRef.set(new CountDownLatch(docsPerIndexer * indexers.size())); - Collections.shuffle(semaphores); + Collections.shuffle(semaphores, random()); for (Semaphore semaphore : semaphores) { assertThat(semaphore.availablePermits(), equalTo(0)); semaphore.release(docsPerIndexer); @@ -683,7 +683,7 @@ public class DiscoveryWithServiceDisruptionsIT extends ESIntegTestCase { ensureGreen("test"); nodes = new ArrayList<>(nodes); - Collections.shuffle(nodes, getRandom()); + Collections.shuffle(nodes, random()); String isolatedNode = nodes.get(0); String notIsolatedNode = nodes.get(1); @@ -863,7 +863,7 @@ public class DiscoveryWithServiceDisruptionsIT extends ESIntegTestCase { internalCluster().startNodesAsync(3, Settings.builder() .put(DiscoveryService.SETTING_INITIAL_STATE_TIMEOUT, "1ms") - .put(DiscoverySettings.PUBLISH_TIMEOUT, "3s") + .put(DiscoverySettings.PUBLISH_TIMEOUT_SETTING.getKey(), "3s") .build()).get(); logger.info("applying disruption while cluster is forming ..."); @@ -1038,7 +1038,7 @@ public class DiscoveryWithServiceDisruptionsIT extends ESIntegTestCase { new NetworkDisconnectPartition(getRandom()), new SlowClusterStateProcessing(getRandom()) ); - Collections.shuffle(list); + Collections.shuffle(list, random()); setDisruptionScheme(list.get(0)); return list.get(0); } diff --git a/core/src/test/java/org/elasticsearch/discovery/zen/ElectMasterServiceTests.java b/core/src/test/java/org/elasticsearch/discovery/zen/ElectMasterServiceTests.java index e7bded344d..c495556190 100644 --- a/core/src/test/java/org/elasticsearch/discovery/zen/ElectMasterServiceTests.java +++ b/core/src/test/java/org/elasticsearch/discovery/zen/ElectMasterServiceTests.java @@ -26,11 +26,7 @@ import org.elasticsearch.common.transport.DummyTransportAddress; import org.elasticsearch.discovery.zen.elect.ElectMasterService; import org.elasticsearch.test.ESTestCase; -import java.util.ArrayList; -import java.util.Collections; -import java.util.HashMap; -import java.util.List; -import java.util.Map; +import java.util.*; public class ElectMasterServiceTests extends ESTestCase { @@ -53,7 +49,7 @@ public class ElectMasterServiceTests extends ESTestCase { nodes.add(node); } - Collections.shuffle(nodes, getRandom()); + Collections.shuffle(nodes, random()); return nodes; } diff --git a/core/src/test/java/org/elasticsearch/discovery/zen/NodeJoinControllerTests.java b/core/src/test/java/org/elasticsearch/discovery/zen/NodeJoinControllerTests.java index ea590756a8..4a248784f9 100644 --- a/core/src/test/java/org/elasticsearch/discovery/zen/NodeJoinControllerTests.java +++ b/core/src/test/java/org/elasticsearch/discovery/zen/NodeJoinControllerTests.java @@ -30,6 +30,7 @@ import org.elasticsearch.cluster.routing.ShardRouting; import org.elasticsearch.cluster.routing.allocation.AllocationService; import org.elasticsearch.cluster.routing.allocation.FailedRerouteAllocation; import org.elasticsearch.cluster.routing.allocation.RoutingAllocation; +import org.elasticsearch.common.settings.ClusterSettings; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.transport.DummyTransportAddress; import org.elasticsearch.common.transport.LocalTransportAddress; @@ -38,7 +39,6 @@ import org.elasticsearch.common.util.concurrent.AbstractRunnable; import org.elasticsearch.common.util.concurrent.BaseFuture; import org.elasticsearch.discovery.DiscoverySettings; import org.elasticsearch.discovery.zen.membership.MembershipAction; -import org.elasticsearch.node.settings.NodeSettingsService; import org.elasticsearch.test.ESTestCase; import org.elasticsearch.test.cluster.TestClusterService; import org.elasticsearch.test.junit.annotations.TestLogging; @@ -67,7 +67,7 @@ public class NodeJoinControllerTests extends ESTestCase { // make sure we have a master clusterService.setState(ClusterState.builder(clusterService.state()).nodes(DiscoveryNodes.builder(initialNodes).masterNodeId(localNode.id()))); nodeJoinController = new NodeJoinController(clusterService, new NoopRoutingService(Settings.EMPTY), - new DiscoverySettings(Settings.EMPTY, new NodeSettingsService(Settings.EMPTY)), Settings.EMPTY); + new DiscoverySettings(Settings.EMPTY, new ClusterSettings(Settings.EMPTY, ClusterSettings.BUILT_IN_CLUSTER_SETTINGS)), Settings.EMPTY); } public void testSimpleJoinAccumulation() throws InterruptedException, ExecutionException { @@ -244,7 +244,7 @@ public class NodeJoinControllerTests extends ESTestCase { // add - Collections.shuffle(nodesToJoin); + Collections.shuffle(nodesToJoin, random()); logger.debug("--> joining [{}] unique master nodes. Total of [{}] join requests", initialJoins, nodesToJoin.size()); for (DiscoveryNode node : nodesToJoin) { pendingJoins.add(joinNodeAsync(node)); @@ -269,7 +269,7 @@ public class NodeJoinControllerTests extends ESTestCase { } } - Collections.shuffle(nodesToJoin); + Collections.shuffle(nodesToJoin, random()); logger.debug("--> joining [{}] nodes, with repetition a total of [{}]", finalJoins, nodesToJoin.size()); for (DiscoveryNode node : nodesToJoin) { pendingJoins.add(joinNodeAsync(node)); @@ -316,7 +316,7 @@ public class NodeJoinControllerTests extends ESTestCase { nodesToJoin.add(node); } } - Collections.shuffle(nodesToJoin); + Collections.shuffle(nodesToJoin, random()); logger.debug("--> joining [{}] nodes, with repetition a total of [{}]", initialJoins, nodesToJoin.size()); for (DiscoveryNode node : nodesToJoin) { pendingJoins.add(joinNodeAsync(node)); diff --git a/core/src/test/java/org/elasticsearch/discovery/zen/ZenDiscoveryIT.java b/core/src/test/java/org/elasticsearch/discovery/zen/ZenDiscoveryIT.java index 0b5f9997db..217e86526c 100644 --- a/core/src/test/java/org/elasticsearch/discovery/zen/ZenDiscoveryIT.java +++ b/core/src/test/java/org/elasticsearch/discovery/zen/ZenDiscoveryIT.java @@ -24,10 +24,8 @@ import org.elasticsearch.Version; import org.elasticsearch.action.admin.cluster.health.ClusterHealthResponse; import org.elasticsearch.action.admin.cluster.node.stats.NodesStatsResponse; import org.elasticsearch.action.admin.indices.recovery.RecoveryResponse; -import org.elasticsearch.cluster.ClusterChangedEvent; -import org.elasticsearch.cluster.ClusterService; -import org.elasticsearch.cluster.ClusterState; -import org.elasticsearch.cluster.ClusterStateListener; +import org.elasticsearch.cluster.*; +import org.elasticsearch.cluster.metadata.MetaData; import org.elasticsearch.cluster.node.DiscoveryNode; import org.elasticsearch.cluster.node.DiscoveryNodes; import org.elasticsearch.common.Priority; @@ -45,6 +43,7 @@ import org.elasticsearch.discovery.zen.fd.FaultDetection; import org.elasticsearch.discovery.zen.membership.MembershipAction; import org.elasticsearch.discovery.zen.publish.PublishClusterStateAction; import org.elasticsearch.test.ESIntegTestCase; +import org.elasticsearch.test.TestCustomMetaData; import org.elasticsearch.test.junit.annotations.TestLogging; import org.elasticsearch.threadpool.ThreadPool; import org.elasticsearch.transport.BytesTransportRequest; @@ -57,9 +56,7 @@ import org.hamcrest.Matchers; import java.io.IOException; import java.net.InetAddress; import java.net.UnknownHostException; -import java.util.ArrayList; -import java.util.Collections; -import java.util.List; +import java.util.*; import java.util.concurrent.CountDownLatch; import java.util.concurrent.ExecutionException; import java.util.concurrent.atomic.AtomicReference; @@ -84,7 +81,7 @@ public class ZenDiscoveryIT extends ESIntegTestCase { assertThat(zenDiscovery.isRejoinOnMasterGone(), is(true)); client().admin().cluster().prepareUpdateSettings() - .setTransientSettings(Settings.builder().put(ZenDiscovery.SETTING_REJOIN_ON_MASTER_GONE, false)) + .setTransientSettings(Settings.builder().put(ZenDiscovery.REJOIN_ON_MASTER_GONE_SETTING.getKey(), false)) .get(); assertThat(zenDiscovery.isRejoinOnMasterGone(), is(false)); @@ -228,16 +225,69 @@ public class ZenDiscoveryIT extends ESIntegTestCase { assertThat(ExceptionsHelper.detailedMessage(reference.get()), containsString("cluster state from a different master than the current one, rejecting")); } + public void testHandleNodeJoin_incompatibleClusterState() throws UnknownHostException { + Settings nodeSettings = Settings.settingsBuilder() + .put("discovery.type", "zen") // <-- To override the local setting if set externally + .build(); + String masterOnlyNode = internalCluster().startMasterOnlyNode(nodeSettings); + String node1 = internalCluster().startNode(nodeSettings); + ZenDiscovery zenDiscovery = (ZenDiscovery) internalCluster().getInstance(Discovery.class, masterOnlyNode); + ClusterService clusterService = internalCluster().getInstance(ClusterService.class, node1); + final ClusterState state = clusterService.state(); + MetaData.Builder mdBuilder = MetaData.builder(state.metaData()); + mdBuilder.putCustom(CustomMetaData.TYPE, new CustomMetaData("data")); + ClusterState stateWithCustomMetaData = ClusterState.builder(state).metaData(mdBuilder).build(); + + final AtomicReference<IllegalStateException> holder = new AtomicReference<>(); + DiscoveryNode node = state.nodes().localNode(); + zenDiscovery.handleJoinRequest(node, stateWithCustomMetaData, new MembershipAction.JoinCallback() { + @Override + public void onSuccess() { + } + + @Override + public void onFailure(Throwable t) { + holder.set((IllegalStateException) t); + } + }); + + assertThat(holder.get(), notNullValue()); + assertThat(holder.get().getMessage(), equalTo("failure when sending a validation request to node")); + } + + public static class CustomMetaData extends TestCustomMetaData { + public static final String TYPE = "custom_md"; + + CustomMetaData(String data) { + super(data); + } + + @Override + protected TestCustomMetaData newTestCustomMetaData(String data) { + return new CustomMetaData(data); + } + + @Override + public String type() { + return TYPE; + } + + @Override + public EnumSet<MetaData.XContentContext> context() { + return EnumSet.of(MetaData.XContentContext.GATEWAY, MetaData.XContentContext.SNAPSHOT); + } + } + public void testHandleNodeJoin_incompatibleMinVersion() throws UnknownHostException { Settings nodeSettings = Settings.settingsBuilder() .put("discovery.type", "zen") // <-- To override the local setting if set externally .build(); String nodeName = internalCluster().startNode(nodeSettings, Version.V_2_0_0_beta1); ZenDiscovery zenDiscovery = (ZenDiscovery) internalCluster().getInstance(Discovery.class, nodeName); - + ClusterService clusterService = internalCluster().getInstance(ClusterService.class, nodeName); DiscoveryNode node = new DiscoveryNode("_node_id", new InetSocketTransportAddress(InetAddress.getByName("0.0.0.0"), 0), Version.V_1_6_0); final AtomicReference<IllegalStateException> holder = new AtomicReference<>(); - zenDiscovery.handleJoinRequest(node, new MembershipAction.JoinCallback() { + zenDiscovery.handleJoinRequest(node, clusterService.state(), new MembershipAction.JoinCallback() { @Override public void onSuccess() { } diff --git a/core/src/test/java/org/elasticsearch/discovery/zen/ZenPingTests.java b/core/src/test/java/org/elasticsearch/discovery/zen/ZenPingTests.java index 82733d9220..c54489bceb 100644 --- a/core/src/test/java/org/elasticsearch/discovery/zen/ZenPingTests.java +++ b/core/src/test/java/org/elasticsearch/discovery/zen/ZenPingTests.java @@ -62,7 +62,7 @@ public class ZenPingTests extends ESTestCase { } // shuffle - Collections.shuffle(pings); + Collections.shuffle(pings, random()); ZenPing.PingCollection collection = new ZenPing.PingCollection(); collection.addPings(pings.toArray(new ZenPing.PingResponse[pings.size()])); diff --git a/core/src/test/java/org/elasticsearch/discovery/zen/publish/PublishClusterStateActionTests.java b/core/src/test/java/org/elasticsearch/discovery/zen/publish/PublishClusterStateActionTests.java index d33aadd84a..0bac1bc4c2 100644 --- a/core/src/test/java/org/elasticsearch/discovery/zen/publish/PublishClusterStateActionTests.java +++ b/core/src/test/java/org/elasticsearch/discovery/zen/publish/PublishClusterStateActionTests.java @@ -21,17 +21,15 @@ package org.elasticsearch.discovery.zen.publish; import org.elasticsearch.ElasticsearchException; import org.elasticsearch.Version; -import org.elasticsearch.cluster.ClusterChangedEvent; -import org.elasticsearch.cluster.ClusterName; -import org.elasticsearch.cluster.ClusterState; -import org.elasticsearch.cluster.ClusterStateListener; -import org.elasticsearch.cluster.Diff; +import org.elasticsearch.cluster.*; import org.elasticsearch.cluster.block.ClusterBlocks; import org.elasticsearch.cluster.metadata.IndexMetaData; import org.elasticsearch.cluster.metadata.MetaData; import org.elasticsearch.cluster.node.DiscoveryNode; import org.elasticsearch.cluster.node.DiscoveryNodes; +import org.elasticsearch.common.settings.ClusterSettings; import org.elasticsearch.common.Nullable; +import org.elasticsearch.common.Randomness; import org.elasticsearch.common.collect.ImmutableOpenMap; import org.elasticsearch.common.collect.Tuple; import org.elasticsearch.common.io.stream.NamedWriteableRegistry; @@ -42,40 +40,24 @@ import org.elasticsearch.discovery.Discovery; import org.elasticsearch.discovery.DiscoverySettings; import org.elasticsearch.discovery.zen.DiscoveryNodesProvider; import org.elasticsearch.node.service.NodeService; -import org.elasticsearch.node.settings.NodeSettingsService; import org.elasticsearch.test.ESTestCase; import org.elasticsearch.test.junit.annotations.TestLogging; import org.elasticsearch.test.transport.MockTransportService; import org.elasticsearch.threadpool.ThreadPool; -import org.elasticsearch.transport.BytesTransportRequest; -import org.elasticsearch.transport.TransportChannel; -import org.elasticsearch.transport.TransportConnectionListener; -import org.elasticsearch.transport.TransportResponse; -import org.elasticsearch.transport.TransportResponseOptions; -import org.elasticsearch.transport.TransportService; +import org.elasticsearch.transport.*; import org.elasticsearch.transport.local.LocalTransport; import org.junit.After; import org.junit.Before; import java.io.IOException; -import java.util.ArrayList; -import java.util.Arrays; -import java.util.Collections; -import java.util.HashMap; -import java.util.List; -import java.util.Map; +import java.util.*; import java.util.concurrent.CopyOnWriteArrayList; import java.util.concurrent.CountDownLatch; import java.util.concurrent.TimeUnit; import java.util.concurrent.atomic.AtomicBoolean; import java.util.concurrent.atomic.AtomicReference; -import static org.hamcrest.Matchers.containsString; -import static org.hamcrest.Matchers.emptyIterable; -import static org.hamcrest.Matchers.equalTo; -import static org.hamcrest.Matchers.not; -import static org.hamcrest.Matchers.notNullValue; -import static org.hamcrest.Matchers.nullValue; +import static org.hamcrest.Matchers.*; @TestLogging("discovery.zen.publish:TRACE") public class PublishClusterStateActionTests extends ESTestCase { @@ -156,7 +138,7 @@ public class PublishClusterStateActionTests extends ESTestCase { public MockNode createMockNode(String name, Settings settings, Version version, @Nullable ClusterStateListener listener) throws Exception { settings = Settings.builder() .put("name", name) - .put(TransportService.SETTING_TRACE_LOG_INCLUDE, "", TransportService.SETTING_TRACE_LOG_EXCLUDE, "NOTHING") + .put(TransportService.TRACE_LOG_INCLUDE_SETTING.getKey(), "", TransportService.TRACE_LOG_EXCLUDE_SETTING.getKey(), "NOTHING") .put(settings) .build(); @@ -237,7 +219,7 @@ public class PublishClusterStateActionTests extends ESTestCase { protected MockPublishAction buildPublishClusterStateAction(Settings settings, MockTransportService transportService, DiscoveryNodesProvider nodesProvider, PublishClusterStateAction.NewPendingClusterStateListener listener) { - DiscoverySettings discoverySettings = new DiscoverySettings(settings, new NodeSettingsService(settings)); + DiscoverySettings discoverySettings = new DiscoverySettings(settings, new ClusterSettings(settings, ClusterSettings.BUILT_IN_CLUSTER_SETTINGS)); return new MockPublishAction(settings, transportService, nodesProvider, listener, discoverySettings, ClusterName.DEFAULT); } @@ -345,7 +327,7 @@ public class PublishClusterStateActionTests extends ESTestCase { } public void testDisablingDiffPublishing() throws Exception { - Settings noDiffPublishingSettings = Settings.builder().put(DiscoverySettings.PUBLISH_DIFF_ENABLE, false).build(); + Settings noDiffPublishingSettings = Settings.builder().put(DiscoverySettings.PUBLISH_DIFF_ENABLE_SETTING.getKey(), false).build(); MockNode nodeA = createMockNode("nodeA", noDiffPublishingSettings, Version.CURRENT, new ClusterStateListener() { @Override @@ -384,7 +366,7 @@ public class PublishClusterStateActionTests extends ESTestCase { public void testSimultaneousClusterStatePublishing() throws Exception { int numberOfNodes = randomIntBetween(2, 10); int numberOfIterations = scaledRandomIntBetween(5, 50); - Settings settings = Settings.builder().put(DiscoverySettings.PUBLISH_DIFF_ENABLE, randomBoolean()).build(); + Settings settings = Settings.builder().put(DiscoverySettings.PUBLISH_DIFF_ENABLE_SETTING.getKey(), randomBoolean()).build(); MockNode master = createMockNode("node0", settings, Version.CURRENT, new ClusterStateListener() { @Override public void clusterChanged(ClusterChangedEvent event) { @@ -510,8 +492,8 @@ public class PublishClusterStateActionTests extends ESTestCase { final boolean expectingToCommit = randomBoolean(); Settings.Builder settings = Settings.builder(); // make sure we have a reasonable timeout if we expect to timeout, o.w. one that will make the test "hang" - settings.put(DiscoverySettings.COMMIT_TIMEOUT, expectingToCommit == false && timeOutNodes > 0 ? "100ms" : "1h") - .put(DiscoverySettings.PUBLISH_TIMEOUT, "5ms"); // test is about committing + settings.put(DiscoverySettings.COMMIT_TIMEOUT_SETTING.getKey(), expectingToCommit == false && timeOutNodes > 0 ? "100ms" : "1h") + .put(DiscoverySettings.PUBLISH_TIMEOUT_SETTING.getKey(), "5ms"); // test is about committing MockNode master = createMockNode("master", settings.build()); @@ -675,7 +657,7 @@ public class PublishClusterStateActionTests extends ESTestCase { logger.info("--> committing states"); - Collections.shuffle(states, random()); + Randomness.shuffle(states); for (ClusterState state : states) { node.action.handleCommitRequest(new PublishClusterStateAction.CommitClusterStateRequest(state.stateUUID()), channel); assertThat(channel.response.get(), equalTo((TransportResponse) TransportResponse.Empty.INSTANCE)); @@ -695,7 +677,7 @@ public class PublishClusterStateActionTests extends ESTestCase { */ public void testTimeoutOrCommit() throws Exception { Settings settings = Settings.builder() - .put(DiscoverySettings.COMMIT_TIMEOUT, "1ms").build(); // short but so we will sometime commit sometime timeout + .put(DiscoverySettings.COMMIT_TIMEOUT_SETTING.getKey(), "1ms").build(); // short but so we will sometime commit sometime timeout MockNode master = createMockNode("master", settings); MockNode node = createMockNode("node", settings); diff --git a/core/src/test/java/org/elasticsearch/gateway/GatewayMetaStateTests.java b/core/src/test/java/org/elasticsearch/gateway/GatewayMetaStateTests.java index 15ddc9dd77..2c6a55da24 100644 --- a/core/src/test/java/org/elasticsearch/gateway/GatewayMetaStateTests.java +++ b/core/src/test/java/org/elasticsearch/gateway/GatewayMetaStateTests.java @@ -57,7 +57,7 @@ public class GatewayMetaStateTests extends ESAllocationTestCase { //ridiculous settings to make sure we don't run into uninitialized because fo default AllocationService strategy = createAllocationService(settingsBuilder() .put("cluster.routing.allocation.concurrent_recoveries", 100) - .put(ClusterRebalanceAllocationDecider.CLUSTER_ROUTING_ALLOCATION_ALLOW_REBALANCE, "always") + .put(ClusterRebalanceAllocationDecider.CLUSTER_ROUTING_ALLOCATION_ALLOW_REBALANCE_SETTING.getKey(), "always") .put("cluster.routing.allocation.cluster_concurrent_rebalance", 100) .put("cluster.routing.allocation.node_initial_primaries_recoveries", 100) .build()); @@ -111,7 +111,7 @@ public class GatewayMetaStateTests extends ESAllocationTestCase { //ridiculous settings to make sure we don't run into uninitialized because fo default AllocationService strategy = createAllocationService(settingsBuilder() .put("cluster.routing.allocation.concurrent_recoveries", 100) - .put(ClusterRebalanceAllocationDecider.CLUSTER_ROUTING_ALLOCATION_ALLOW_REBALANCE, "always") + .put(ClusterRebalanceAllocationDecider.CLUSTER_ROUTING_ALLOCATION_ALLOW_REBALANCE_SETTING.getKey(), "always") .put("cluster.routing.allocation.cluster_concurrent_rebalance", 100) .put("cluster.routing.allocation.node_initial_primaries_recoveries", 100) .build()); diff --git a/core/src/test/java/org/elasticsearch/gateway/MetaDataStateFormatTests.java b/core/src/test/java/org/elasticsearch/gateway/MetaDataStateFormatTests.java index d7f4c9176d..441314b1e3 100644 --- a/core/src/test/java/org/elasticsearch/gateway/MetaDataStateFormatTests.java +++ b/core/src/test/java/org/elasticsearch/gateway/MetaDataStateFormatTests.java @@ -19,12 +19,7 @@ package org.elasticsearch.gateway; import org.apache.lucene.codecs.CodecUtil; -import org.apache.lucene.store.ChecksumIndexInput; -import org.apache.lucene.store.Directory; -import org.apache.lucene.store.IOContext; -import org.apache.lucene.store.IndexInput; -import org.apache.lucene.store.MockDirectoryWrapper; -import org.apache.lucene.store.SimpleFSDirectory; +import org.apache.lucene.store.*; import org.apache.lucene.util.LuceneTestCase; import org.elasticsearch.ElasticsearchException; import org.elasticsearch.Version; @@ -33,11 +28,7 @@ import org.elasticsearch.cluster.metadata.MetaData; import org.elasticsearch.common.collect.ImmutableOpenMap; import org.elasticsearch.common.io.FileSystemUtils; import org.elasticsearch.common.logging.ESLogger; -import org.elasticsearch.common.xcontent.ToXContent; -import org.elasticsearch.common.xcontent.XContentBuilder; -import org.elasticsearch.common.xcontent.XContentFactory; -import org.elasticsearch.common.xcontent.XContentParser; -import org.elasticsearch.common.xcontent.XContentType; +import org.elasticsearch.common.xcontent.*; import org.elasticsearch.test.ESTestCase; import java.io.IOException; @@ -50,20 +41,10 @@ import java.nio.file.DirectoryStream; import java.nio.file.Files; import java.nio.file.Path; import java.nio.file.StandardOpenOption; -import java.util.ArrayList; -import java.util.Arrays; -import java.util.Collections; -import java.util.HashSet; -import java.util.List; -import java.util.Set; +import java.util.*; import java.util.stream.StreamSupport; -import static org.hamcrest.Matchers.equalTo; -import static org.hamcrest.Matchers.instanceOf; -import static org.hamcrest.Matchers.is; -import static org.hamcrest.Matchers.not; -import static org.hamcrest.Matchers.notNullValue; -import static org.hamcrest.Matchers.startsWith; +import static org.hamcrest.Matchers.*; @LuceneTestCase.SuppressFileSystems("ExtrasFS") // TODO: fix test to work with ExtrasFS public class MetaDataStateFormatTests extends ESTestCase { @@ -349,7 +330,7 @@ public class MetaDataStateFormatTests extends ESTestCase { } List<Path> dirList = Arrays.asList(dirs); - Collections.shuffle(dirList, getRandom()); + Collections.shuffle(dirList, random()); MetaData loadedMetaData = format.loadLatestState(logger, dirList.toArray(new Path[0])); MetaData latestMetaData = meta.get(numStates-1); assertThat(loadedMetaData.clusterUUID(), not(equalTo("_na_"))); diff --git a/core/src/test/java/org/elasticsearch/gateway/PrimaryShardAllocatorTests.java b/core/src/test/java/org/elasticsearch/gateway/PrimaryShardAllocatorTests.java index 73cbb51fae..193985a1c6 100644 --- a/core/src/test/java/org/elasticsearch/gateway/PrimaryShardAllocatorTests.java +++ b/core/src/test/java/org/elasticsearch/gateway/PrimaryShardAllocatorTests.java @@ -36,7 +36,7 @@ import org.elasticsearch.index.shard.ShardId; import org.elasticsearch.test.ESAllocationTestCase; import org.junit.Before; -import java.io.IOException; +import java.util.Arrays; import java.util.Collections; import java.util.HashMap; import java.util.HashSet; @@ -59,25 +59,29 @@ public class PrimaryShardAllocatorTests extends ESAllocationTestCase { this.testAllocator = new TestAllocator(); } - /** - * Verifies that the canProcess method of primary allocation behaves correctly - * and processes only the applicable shard. - */ - public void testNoProcessReplica() { - ShardRouting shard = TestShardRouting.newShardRouting("test", 0, null, null, null, false, ShardRoutingState.UNASSIGNED, 0, new UnassignedInfo(UnassignedInfo.Reason.CLUSTER_RECOVERED, null)); - assertThat(testAllocator.needToFindPrimaryCopy(shard), equalTo(false)); - } - - public void testNoProcessPrimayNotAllcoatedBefore() { - ShardRouting shard = TestShardRouting.newShardRouting("test", 0, null, null, null, true, ShardRoutingState.UNASSIGNED, 0, new UnassignedInfo(UnassignedInfo.Reason.INDEX_CREATED, null)); - assertThat(testAllocator.needToFindPrimaryCopy(shard), equalTo(false)); + public void testNoProcessPrimaryNotAllocatedBefore() { + final RoutingAllocation allocation; + if (randomBoolean()) { + allocation = routingAllocationWithOnePrimaryNoReplicas(yesAllocationDeciders(), randomBoolean(), Version.CURRENT); + } else { + allocation = routingAllocationWithOnePrimaryNoReplicas(yesAllocationDeciders(), true, Version.V_2_1_0); + } + boolean changed = testAllocator.allocateUnassigned(allocation); + assertThat(changed, equalTo(false)); + assertThat(allocation.routingNodes().unassigned().size(), equalTo(1)); + assertThat(allocation.routingNodes().unassigned().iterator().next().shardId(), equalTo(shardId)); } /** * Tests that when async fetch returns that there is no data, the shard will not be allocated. */ public void testNoAsyncFetchData() { - RoutingAllocation allocation = routingAllocationWithOnePrimaryNoReplicas(yesAllocationDeciders()); + final RoutingAllocation allocation; + if (randomBoolean()) { + allocation = routingAllocationWithOnePrimaryNoReplicas(yesAllocationDeciders(), false, Version.CURRENT, "allocId"); + } else { + allocation = routingAllocationWithOnePrimaryNoReplicas(yesAllocationDeciders(), false, Version.V_2_1_0); + } boolean changed = testAllocator.allocateUnassigned(allocation); assertThat(changed, equalTo(false)); assertThat(allocation.routingNodes().unassigned().ignored().size(), equalTo(1)); @@ -85,11 +89,29 @@ public class PrimaryShardAllocatorTests extends ESAllocationTestCase { } /** - * Tests when the node returns that no data was found for it (-1), it will be moved to ignore unassigned. + * Tests when the node returns that no data was found for it (-1 for version and null for allocation id), + * it will be moved to ignore unassigned. */ public void testNoAllocationFound() { - RoutingAllocation allocation = routingAllocationWithOnePrimaryNoReplicas(yesAllocationDeciders()); - testAllocator.addData(node1, -1); + final RoutingAllocation allocation; + if (randomBoolean()) { + allocation = routingAllocationWithOnePrimaryNoReplicas(yesAllocationDeciders(), false, Version.CURRENT, "allocId"); + } else { + allocation = routingAllocationWithOnePrimaryNoReplicas(yesAllocationDeciders(), false, Version.V_2_1_0); + } + testAllocator.addData(node1, -1, null); + boolean changed = testAllocator.allocateUnassigned(allocation); + assertThat(changed, equalTo(false)); + assertThat(allocation.routingNodes().unassigned().ignored().size(), equalTo(1)); + assertThat(allocation.routingNodes().unassigned().ignored().get(0).shardId(), equalTo(shardId)); + } + + /** + * Tests when the node returns data with a shard allocation id that does not match active allocation ids, it will be moved to ignore unassigned. + */ + public void testNoMatchingAllocationIdFound() { + RoutingAllocation allocation = routingAllocationWithOnePrimaryNoReplicas(yesAllocationDeciders(), false, Version.CURRENT, "id2"); + testAllocator.addData(node1, 1, "id1"); boolean changed = testAllocator.allocateUnassigned(allocation); assertThat(changed, equalTo(false)); assertThat(allocation.routingNodes().unassigned().ignored().size(), equalTo(1)); @@ -97,11 +119,31 @@ public class PrimaryShardAllocatorTests extends ESAllocationTestCase { } /** - * Tests when the node returns that no data was found for it (-1), it will be moved to ignore unassigned. + * Tests that when there is a node to allocate the shard to, and there are no active allocation ids, it will be allocated to it. + * This is the case when we have old shards from pre-3.0 days. + */ + public void testNoActiveAllocationIds() { + RoutingAllocation allocation = routingAllocationWithOnePrimaryNoReplicas(yesAllocationDeciders(), false, Version.V_2_1_1); + testAllocator.addData(node1, 1, null); + boolean changed = testAllocator.allocateUnassigned(allocation); + assertThat(changed, equalTo(true)); + assertThat(allocation.routingNodes().unassigned().ignored().isEmpty(), equalTo(true)); + assertThat(allocation.routingNodes().shardsWithState(ShardRoutingState.INITIALIZING).size(), equalTo(1)); + assertThat(allocation.routingNodes().shardsWithState(ShardRoutingState.INITIALIZING).get(0).currentNodeId(), equalTo(node1.id())); + } + + /** + * Tests when the node returns that no data was found for it, it will be moved to ignore unassigned. */ public void testStoreException() { - RoutingAllocation allocation = routingAllocationWithOnePrimaryNoReplicas(yesAllocationDeciders()); - testAllocator.addData(node1, 3, new CorruptIndexException("test", "test")); + final RoutingAllocation allocation; + if (randomBoolean()) { + allocation = routingAllocationWithOnePrimaryNoReplicas(yesAllocationDeciders(), false, randomFrom(Version.V_2_0_0, Version.CURRENT), "allocId1"); + testAllocator.addData(node1, 1, "allocId1", new CorruptIndexException("test", "test")); + } else { + allocation = routingAllocationWithOnePrimaryNoReplicas(yesAllocationDeciders(), false, Version.V_2_1_1); + testAllocator.addData(node1, 3, null, new CorruptIndexException("test", "test")); + } boolean changed = testAllocator.allocateUnassigned(allocation); assertThat(changed, equalTo(false)); assertThat(allocation.routingNodes().unassigned().ignored().size(), equalTo(1)); @@ -112,8 +154,14 @@ public class PrimaryShardAllocatorTests extends ESAllocationTestCase { * Tests that when there is a node to allocate the shard to, it will be allocated to it. */ public void testFoundAllocationAndAllocating() { - RoutingAllocation allocation = routingAllocationWithOnePrimaryNoReplicas(yesAllocationDeciders()); - testAllocator.addData(node1, 10); + final RoutingAllocation allocation; + if (randomBoolean()) { + allocation = routingAllocationWithOnePrimaryNoReplicas(yesAllocationDeciders(), false, randomFrom(Version.V_2_0_0, Version.CURRENT), "allocId1"); + testAllocator.addData(node1, 1, "allocId1"); + } else { + allocation = routingAllocationWithOnePrimaryNoReplicas(yesAllocationDeciders(), false, Version.V_2_2_0); + testAllocator.addData(node1, 3, null); + } boolean changed = testAllocator.allocateUnassigned(allocation); assertThat(changed, equalTo(true)); assertThat(allocation.routingNodes().unassigned().ignored().isEmpty(), equalTo(true)); @@ -126,8 +174,14 @@ public class PrimaryShardAllocatorTests extends ESAllocationTestCase { * it will be moved to ignore unassigned until it can be allocated to. */ public void testFoundAllocationButThrottlingDecider() { - RoutingAllocation allocation = routingAllocationWithOnePrimaryNoReplicas(throttleAllocationDeciders()); - testAllocator.addData(node1, 10); + final RoutingAllocation allocation; + if (randomBoolean()) { + allocation = routingAllocationWithOnePrimaryNoReplicas(throttleAllocationDeciders(), false, randomFrom(Version.V_2_0_0, Version.CURRENT), "allocId1"); + testAllocator.addData(node1, 1, "allocId1"); + } else { + allocation = routingAllocationWithOnePrimaryNoReplicas(throttleAllocationDeciders(), false, Version.V_2_2_0); + testAllocator.addData(node1, 3, null); + } boolean changed = testAllocator.allocateUnassigned(allocation); assertThat(changed, equalTo(false)); assertThat(allocation.routingNodes().unassigned().ignored().size(), equalTo(1)); @@ -139,8 +193,14 @@ public class PrimaryShardAllocatorTests extends ESAllocationTestCase { * force the allocation to it. */ public void testFoundAllocationButNoDecider() { - RoutingAllocation allocation = routingAllocationWithOnePrimaryNoReplicas(noAllocationDeciders()); - testAllocator.addData(node1, 10); + final RoutingAllocation allocation; + if (randomBoolean()) { + allocation = routingAllocationWithOnePrimaryNoReplicas(noAllocationDeciders(), false, randomFrom(Version.V_2_0_0, Version.CURRENT), "allocId1"); + testAllocator.addData(node1, 1, "allocId1"); + } else { + allocation = routingAllocationWithOnePrimaryNoReplicas(noAllocationDeciders(), false, Version.V_2_0_0); + testAllocator.addData(node1, 3, null); + } boolean changed = testAllocator.allocateUnassigned(allocation); assertThat(changed, equalTo(true)); assertThat(allocation.routingNodes().unassigned().ignored().isEmpty(), equalTo(true)); @@ -149,11 +209,11 @@ public class PrimaryShardAllocatorTests extends ESAllocationTestCase { } /** - * Tests that the highest version node is chosed for allocation. + * Tests that the highest version node is chosen for allocation. */ - public void testAllocateToTheHighestVersion() { - RoutingAllocation allocation = routingAllocationWithOnePrimaryNoReplicas(yesAllocationDeciders()); - testAllocator.addData(node1, 10).addData(node2, 12); + public void testAllocateToTheHighestVersionOnLegacyIndex() { + RoutingAllocation allocation = routingAllocationWithOnePrimaryNoReplicas(yesAllocationDeciders(), false, Version.V_2_0_0); + testAllocator.addData(node1, 10, null).addData(node2, 12, null); boolean changed = testAllocator.allocateUnassigned(allocation); assertThat(changed, equalTo(true)); assertThat(allocation.routingNodes().unassigned().ignored().isEmpty(), equalTo(true)); @@ -162,35 +222,150 @@ public class PrimaryShardAllocatorTests extends ESAllocationTestCase { } /** - * Tests that when restoring from snapshot, even if we didn't find any node to allocate on, the shard - * will remain in the unassigned list to be allocated later. + * Tests that when restoring from a snapshot and we find a node with a shard copy and allocation + * deciders say yes, we allocate to that node. + */ + public void testRestore() { + RoutingAllocation allocation = getRestoreRoutingAllocation(yesAllocationDeciders()); + testAllocator.addData(node1, 1, randomFrom(null, "allocId")); + boolean changed = testAllocator.allocateUnassigned(allocation); + assertThat(changed, equalTo(true)); + assertThat(allocation.routingNodes().unassigned().ignored().isEmpty(), equalTo(true)); + assertThat(allocation.routingNodes().shardsWithState(ShardRoutingState.INITIALIZING).size(), equalTo(1)); + } + + /** + * Tests that when restoring from a snapshot and we find a node with a shard copy and allocation + * deciders say throttle, we add it to ignored shards. + */ + public void testRestoreThrottle() { + RoutingAllocation allocation = getRestoreRoutingAllocation(throttleAllocationDeciders()); + testAllocator.addData(node1, 1, randomFrom(null, "allocId")); + boolean changed = testAllocator.allocateUnassigned(allocation); + assertThat(changed, equalTo(false)); + assertThat(allocation.routingNodes().unassigned().ignored().isEmpty(), equalTo(false)); + } + + /** + * Tests that when restoring from a snapshot and we find a node with a shard copy but allocation + * deciders say no, we still allocate to that node. + */ + public void testRestoreForcesAllocateIfShardAvailable() { + RoutingAllocation allocation = getRestoreRoutingAllocation(noAllocationDeciders()); + testAllocator.addData(node1, 1, randomFrom(null, "some allocId")); + boolean changed = testAllocator.allocateUnassigned(allocation); + assertThat(changed, equalTo(true)); + assertThat(allocation.routingNodes().unassigned().ignored().isEmpty(), equalTo(true)); + assertThat(allocation.routingNodes().shardsWithState(ShardRoutingState.INITIALIZING).size(), equalTo(1)); + } + + /** + * Tests that when restoring from a snapshot and we don't find a node with a shard copy, the shard will remain in + * the unassigned list to be allocated later. */ - public void testRestoreIgnoresNoNodesToAllocate() { + public void testRestoreDoesNotAssignIfNoShardAvailable() { + RoutingAllocation allocation = getRestoreRoutingAllocation(yesAllocationDeciders()); + testAllocator.addData(node1, -1, null); + boolean changed = testAllocator.allocateUnassigned(allocation); + assertThat(changed, equalTo(false)); + assertThat(allocation.routingNodes().unassigned().ignored().isEmpty(), equalTo(true)); + assertThat(allocation.routingNodes().unassigned().size(), equalTo(1)); + } + + private RoutingAllocation getRestoreRoutingAllocation(AllocationDeciders allocationDeciders) { + Version version = randomFrom(Version.CURRENT, Version.V_2_0_0); MetaData metaData = MetaData.builder() - .put(IndexMetaData.builder(shardId.getIndex()).settings(settings(Version.CURRENT)).numberOfShards(1).numberOfReplicas(0)) - .build(); + .put(IndexMetaData.builder(shardId.getIndex()).settings(settings(version)).numberOfShards(1).numberOfReplicas(0) + .putActiveAllocationIds(0, version == Version.CURRENT ? new HashSet<>(Arrays.asList("allocId")) : Collections.emptySet())) + .build(); + RoutingTable routingTable = RoutingTable.builder() - .addAsRestore(metaData.index(shardId.getIndex()), new RestoreSource(new SnapshotId("test", "test"), Version.CURRENT, shardId.getIndex())) - .build(); + .addAsRestore(metaData.index(shardId.getIndex()), new RestoreSource(new SnapshotId("test", "test"), version, shardId.getIndex())) + .build(); ClusterState state = ClusterState.builder(org.elasticsearch.cluster.ClusterName.DEFAULT) - .metaData(metaData) - .routingTable(routingTable) - .nodes(DiscoveryNodes.builder().put(node1).put(node2).put(node3)).build(); - RoutingAllocation allocation = new RoutingAllocation(yesAllocationDeciders(), state.getRoutingNodes(), state.nodes(), null, System.nanoTime()); + .metaData(metaData) + .routingTable(routingTable) + .nodes(DiscoveryNodes.builder().put(node1).put(node2).put(node3)).build(); + return new RoutingAllocation(allocationDeciders, new RoutingNodes(state, false), state.nodes(), null, System.nanoTime()); + } + + /** + * Tests that when recovering using "recover_on_any_node" and we find a node with a shard copy and allocation + * deciders say yes, we allocate to that node. + */ + public void testRecoverOnAnyNode() { + RoutingAllocation allocation = getRecoverOnAnyNodeRoutingAllocation(yesAllocationDeciders()); + testAllocator.addData(node1, 1, randomFrom(null, "allocId")); + boolean changed = testAllocator.allocateUnassigned(allocation); + assertThat(changed, equalTo(true)); + assertThat(allocation.routingNodes().unassigned().ignored().isEmpty(), equalTo(true)); + assertThat(allocation.routingNodes().shardsWithState(ShardRoutingState.INITIALIZING).size(), equalTo(1)); + } + + /** + * Tests that when recovering using "recover_on_any_node" and we find a node with a shard copy and allocation + * deciders say throttle, we add it to ignored shards. + */ + public void testRecoverOnAnyNodeThrottle() { + RoutingAllocation allocation = getRestoreRoutingAllocation(throttleAllocationDeciders()); + testAllocator.addData(node1, 1, randomFrom(null, "allocId")); + boolean changed = testAllocator.allocateUnassigned(allocation); + assertThat(changed, equalTo(false)); + assertThat(allocation.routingNodes().unassigned().ignored().isEmpty(), equalTo(false)); + } - testAllocator.addData(node1, -1).addData(node2, -1); + /** + * Tests that when recovering using "recover_on_any_node" and we find a node with a shard copy but allocation + * deciders say no, we still allocate to that node. + */ + public void testRecoverOnAnyNodeForcesAllocateIfShardAvailable() { + RoutingAllocation allocation = getRecoverOnAnyNodeRoutingAllocation(noAllocationDeciders()); + testAllocator.addData(node1, 1, randomFrom(null, "allocId")); + boolean changed = testAllocator.allocateUnassigned(allocation); + assertThat(changed, equalTo(true)); + assertThat(allocation.routingNodes().unassigned().ignored().isEmpty(), equalTo(true)); + assertThat(allocation.routingNodes().shardsWithState(ShardRoutingState.INITIALIZING).size(), equalTo(1)); + } + + /** + * Tests that when recovering using "recover_on_any_node" and we don't find a node with a shard copy we let + * BalancedShardAllocator assign the shard + */ + public void testRecoverOnAnyNodeDoesNotAssignIfNoShardAvailable() { + RoutingAllocation allocation = getRecoverOnAnyNodeRoutingAllocation(yesAllocationDeciders()); + testAllocator.addData(node1, -1, null); boolean changed = testAllocator.allocateUnassigned(allocation); assertThat(changed, equalTo(false)); assertThat(allocation.routingNodes().unassigned().ignored().isEmpty(), equalTo(true)); + assertThat(allocation.routingNodes().unassigned().size(), equalTo(1)); + } + + private RoutingAllocation getRecoverOnAnyNodeRoutingAllocation(AllocationDeciders allocationDeciders) { + Version version = randomFrom(Version.CURRENT, Version.V_2_0_0); + MetaData metaData = MetaData.builder() + .put(IndexMetaData.builder(shardId.getIndex()).settings(settings(version) + .put(IndexMetaData.SETTING_SHARED_FILESYSTEM, true) + .put(IndexMetaData.SETTING_SHARED_FS_ALLOW_RECOVERY_ON_ANY_NODE, true)) + .numberOfShards(1).numberOfReplicas(0).putActiveAllocationIds(0, version == Version.CURRENT ? new HashSet<>(Arrays.asList("allocId")) : Collections.emptySet())) + .build(); + + RoutingTable routingTable = RoutingTable.builder() + .addAsRestore(metaData.index(shardId.getIndex()), new RestoreSource(new SnapshotId("test", "test"), Version.CURRENT, shardId.getIndex())) + .build(); + ClusterState state = ClusterState.builder(org.elasticsearch.cluster.ClusterName.DEFAULT) + .metaData(metaData) + .routingTable(routingTable) + .nodes(DiscoveryNodes.builder().put(node1).put(node2).put(node3)).build(); + return new RoutingAllocation(allocationDeciders, new RoutingNodes(state, false), state.nodes(), null, System.nanoTime()); } /** * Tests that only when enough copies of the shard exists we are going to allocate it. This test * verifies that with same version (1), and quorum allocation. */ - public void testEnoughCopiesFoundForAllocation() { + public void testEnoughCopiesFoundForAllocationOnLegacyIndex() { MetaData metaData = MetaData.builder() - .put(IndexMetaData.builder(shardId.getIndex()).settings(settings(Version.CURRENT)).numberOfShards(1).numberOfReplicas(2)) + .put(IndexMetaData.builder(shardId.getIndex()).settings(settings(Version.V_2_0_0)).numberOfShards(1).numberOfReplicas(2)) .build(); RoutingTable routingTable = RoutingTable.builder() .addAsRecovery(metaData.index(shardId.getIndex())) @@ -207,7 +382,7 @@ public class PrimaryShardAllocatorTests extends ESAllocationTestCase { assertThat(allocation.routingNodes().unassigned().ignored().get(0).shardId(), equalTo(shardId)); assertThat(allocation.routingNodes().shardsWithState(ShardRoutingState.UNASSIGNED).size(), equalTo(2)); // replicas - testAllocator.addData(node1, 1); + testAllocator.addData(node1, 1, null); allocation = new RoutingAllocation(yesAllocationDeciders(), new RoutingNodes(state, false), state.nodes(), null, System.nanoTime()); changed = testAllocator.allocateUnassigned(allocation); assertThat(changed, equalTo(false)); @@ -215,7 +390,7 @@ public class PrimaryShardAllocatorTests extends ESAllocationTestCase { assertThat(allocation.routingNodes().unassigned().ignored().get(0).shardId(), equalTo(shardId)); assertThat(allocation.routingNodes().shardsWithState(ShardRoutingState.UNASSIGNED).size(), equalTo(2)); // replicas - testAllocator.addData(node2, 1); + testAllocator.addData(node2, 1, null); allocation = new RoutingAllocation(yesAllocationDeciders(), new RoutingNodes(state, false), state.nodes(), null, System.nanoTime()); changed = testAllocator.allocateUnassigned(allocation); assertThat(changed, equalTo(true)); @@ -229,9 +404,9 @@ public class PrimaryShardAllocatorTests extends ESAllocationTestCase { * Tests that only when enough copies of the shard exists we are going to allocate it. This test * verifies that even with different version, we treat different versions as a copy, and count them. */ - public void testEnoughCopiesFoundForAllocationWithDifferentVersion() { + public void testEnoughCopiesFoundForAllocationOnLegacyIndexWithDifferentVersion() { MetaData metaData = MetaData.builder() - .put(IndexMetaData.builder(shardId.getIndex()).settings(settings(Version.CURRENT)).numberOfShards(1).numberOfReplicas(2)) + .put(IndexMetaData.builder(shardId.getIndex()).settings(settings(Version.V_2_0_0)).numberOfShards(1).numberOfReplicas(2)) .build(); RoutingTable routingTable = RoutingTable.builder() .addAsRecovery(metaData.index(shardId.getIndex())) @@ -248,7 +423,7 @@ public class PrimaryShardAllocatorTests extends ESAllocationTestCase { assertThat(allocation.routingNodes().unassigned().ignored().get(0).shardId(), equalTo(shardId)); assertThat(allocation.routingNodes().shardsWithState(ShardRoutingState.UNASSIGNED).size(), equalTo(2)); // replicas - testAllocator.addData(node1, 1); + testAllocator.addData(node1, 1, null); allocation = new RoutingAllocation(yesAllocationDeciders(), new RoutingNodes(state, false), state.nodes(), null, System.nanoTime()); changed = testAllocator.allocateUnassigned(allocation); assertThat(changed, equalTo(false)); @@ -256,7 +431,7 @@ public class PrimaryShardAllocatorTests extends ESAllocationTestCase { assertThat(allocation.routingNodes().unassigned().ignored().get(0).shardId(), equalTo(shardId)); assertThat(allocation.routingNodes().shardsWithState(ShardRoutingState.UNASSIGNED).size(), equalTo(2)); // replicas - testAllocator.addData(node2, 2); + testAllocator.addData(node2, 2, null); allocation = new RoutingAllocation(yesAllocationDeciders(), new RoutingNodes(state, false), state.nodes(), null, System.nanoTime()); changed = testAllocator.allocateUnassigned(allocation); assertThat(changed, equalTo(true)); @@ -266,67 +441,20 @@ public class PrimaryShardAllocatorTests extends ESAllocationTestCase { assertThat(allocation.routingNodes().shardsWithState(ShardRoutingState.INITIALIZING).get(0).currentNodeId(), equalTo(node2.id())); } - public void testAllocationOnAnyNodeWithSharedFs() { - ShardRouting shard = TestShardRouting.newShardRouting("test", 0, null, null, null, false, - ShardRoutingState.UNASSIGNED, 0, - new UnassignedInfo(UnassignedInfo.Reason.CLUSTER_RECOVERED, null)); - - Map<DiscoveryNode, TransportNodesListGatewayStartedShards.NodeGatewayStartedShards> data = new HashMap<>(); - data.put(node1, new TransportNodesListGatewayStartedShards.NodeGatewayStartedShards(node1, 1)); - data.put(node2, new TransportNodesListGatewayStartedShards.NodeGatewayStartedShards(node2, 5)); - data.put(node3, new TransportNodesListGatewayStartedShards.NodeGatewayStartedShards(node3, -1)); - AsyncShardFetch.FetchResult<TransportNodesListGatewayStartedShards.NodeGatewayStartedShards> fetches = - new AsyncShardFetch.FetchResult(shardId, data, new HashSet<>(), new HashSet<>()); - - PrimaryShardAllocator.NodesAndVersions nAndV = testAllocator.buildNodesAndVersions(shard, false, new HashSet<String>(), fetches); - assertThat(nAndV.allocationsFound, equalTo(2)); - assertThat(nAndV.highestVersion, equalTo(5L)); - assertThat(nAndV.nodes, contains(node2)); - - nAndV = testAllocator.buildNodesAndVersions(shard, true, new HashSet<String>(), fetches); - assertThat(nAndV.allocationsFound, equalTo(3)); - assertThat(nAndV.highestVersion, equalTo(5L)); - // All three nodes are potential candidates because shards can be recovered on any node - assertThat(nAndV.nodes, contains(node2, node1, node3)); - } - - public void testAllocationOnAnyNodeShouldPutNodesWithExceptionsLast() { - ShardRouting shard = TestShardRouting.newShardRouting("test", 0, null, null, null, false, - ShardRoutingState.UNASSIGNED, 0, - new UnassignedInfo(UnassignedInfo.Reason.CLUSTER_RECOVERED, null)); - - Map<DiscoveryNode, TransportNodesListGatewayStartedShards.NodeGatewayStartedShards> data = new HashMap<>(); - data.put(node1, new TransportNodesListGatewayStartedShards.NodeGatewayStartedShards(node1, 1)); - data.put(node2, new TransportNodesListGatewayStartedShards.NodeGatewayStartedShards(node2, 1)); - data.put(node3, new TransportNodesListGatewayStartedShards.NodeGatewayStartedShards(node3, 1, new IOException("I failed to open"))); - HashSet<String> ignoredNodes = new HashSet<>(); - ignoredNodes.add(node2.id()); - AsyncShardFetch.FetchResult<TransportNodesListGatewayStartedShards.NodeGatewayStartedShards> fetches = - new AsyncShardFetch.FetchResult(shardId, data, new HashSet<>(), ignoredNodes); - - PrimaryShardAllocator.NodesAndVersions nAndV = testAllocator.buildNodesAndVersions(shard, false, ignoredNodes, fetches); - assertThat(nAndV.allocationsFound, equalTo(1)); - assertThat(nAndV.highestVersion, equalTo(1L)); - assertThat(nAndV.nodes, contains(node1)); - - nAndV = testAllocator.buildNodesAndVersions(shard, true, ignoredNodes, fetches); - assertThat(nAndV.allocationsFound, equalTo(2)); - assertThat(nAndV.highestVersion, equalTo(1L)); - // node3 should be last here - assertThat(nAndV.nodes.size(), equalTo(2)); - assertThat(nAndV.nodes, contains(node1, node3)); - } - - private RoutingAllocation routingAllocationWithOnePrimaryNoReplicas(AllocationDeciders deciders) { + private RoutingAllocation routingAllocationWithOnePrimaryNoReplicas(AllocationDeciders deciders, boolean asNew, Version version, String... activeAllocationIds) { MetaData metaData = MetaData.builder() - .put(IndexMetaData.builder(shardId.getIndex()).settings(settings(Version.CURRENT)).numberOfShards(1).numberOfReplicas(0)) - .build(); - RoutingTable routingTable = RoutingTable.builder() - .addAsRecovery(metaData.index(shardId.getIndex())) - .build(); + .put(IndexMetaData.builder(shardId.getIndex()).settings(settings(version)) + .numberOfShards(1).numberOfReplicas(0).putActiveAllocationIds(0, new HashSet<>(Arrays.asList(activeAllocationIds)))) + .build(); + RoutingTable.Builder routingTableBuilder = RoutingTable.builder(); + if (asNew) { + routingTableBuilder.addAsNew(metaData.index(shardId.getIndex())); + } else { + routingTableBuilder.addAsRecovery(metaData.index(shardId.getIndex())); + } ClusterState state = ClusterState.builder(org.elasticsearch.cluster.ClusterName.DEFAULT) .metaData(metaData) - .routingTable(routingTable) + .routingTable(routingTableBuilder.build()) .nodes(DiscoveryNodes.builder().put(node1).put(node2).put(node3)).build(); return new RoutingAllocation(deciders, new RoutingNodes(state, false), state.nodes(), null, System.nanoTime()); } @@ -344,15 +472,15 @@ public class PrimaryShardAllocatorTests extends ESAllocationTestCase { return this; } - public TestAllocator addData(DiscoveryNode node, long version) { - return addData(node, version, null); + public TestAllocator addData(DiscoveryNode node, long version, String allocationId) { + return addData(node, version, allocationId, null); } - public TestAllocator addData(DiscoveryNode node, long version, @Nullable Throwable storeException) { + public TestAllocator addData(DiscoveryNode node, long version, String allocationId, @Nullable Throwable storeException) { if (data == null) { data = new HashMap<>(); } - data.put(node, new TransportNodesListGatewayStartedShards.NodeGatewayStartedShards(node, version, storeException)); + data.put(node, new TransportNodesListGatewayStartedShards.NodeGatewayStartedShards(node, version, allocationId, storeException)); return this; } diff --git a/core/src/test/java/org/elasticsearch/gateway/QuorumGatewayIT.java b/core/src/test/java/org/elasticsearch/gateway/QuorumGatewayIT.java index edde172047..a817b23949 100644 --- a/core/src/test/java/org/elasticsearch/gateway/QuorumGatewayIT.java +++ b/core/src/test/java/org/elasticsearch/gateway/QuorumGatewayIT.java @@ -20,10 +20,10 @@ package org.elasticsearch.gateway; import org.elasticsearch.action.admin.cluster.health.ClusterHealthResponse; -import org.elasticsearch.cluster.health.ClusterHealthStatus; -import org.elasticsearch.action.admin.cluster.state.ClusterStateResponse; import org.elasticsearch.client.Client; +import org.elasticsearch.cluster.health.ClusterHealthStatus; import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.discovery.zen.elect.ElectMasterService; import org.elasticsearch.test.ESIntegTestCase; import org.elasticsearch.test.ESIntegTestCase.ClusterScope; import org.elasticsearch.test.ESIntegTestCase.Scope; @@ -32,14 +32,10 @@ import org.elasticsearch.test.InternalTestCluster.RestartCallback; import java.util.concurrent.TimeUnit; import static org.elasticsearch.client.Requests.clusterHealthRequest; -import static org.elasticsearch.common.settings.Settings.settingsBuilder; import static org.elasticsearch.common.xcontent.XContentFactory.jsonBuilder; import static org.elasticsearch.index.query.QueryBuilders.matchAllQuery; import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertHitCount; import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertNoFailures; -import static org.hamcrest.Matchers.equalTo; -import static org.hamcrest.Matchers.is; -import static org.hamcrest.Matchers.notNullValue; /** * @@ -51,72 +47,12 @@ public class QuorumGatewayIT extends ESIntegTestCase { return 2; } - public void testChangeInitialShardsRecovery() throws Exception { - logger.info("--> starting 3 nodes"); - final String[] nodes = internalCluster().startNodesAsync(3).get().toArray(new String[0]); - - createIndex("test"); - ensureGreen(); - NumShards test = getNumShards("test"); - - logger.info("--> indexing..."); - client().prepareIndex("test", "type1", "1").setSource(jsonBuilder().startObject().field("field", "value1").endObject()).get(); - //We don't check for failures in the flush response: if we do we might get the following: - // FlushNotAllowedEngineException[[test][1] recovery is in progress, flush [COMMIT_TRANSLOG] is not allowed] - flush(); - client().prepareIndex("test", "type1", "2").setSource(jsonBuilder().startObject().field("field", "value2").endObject()).get(); - refresh(); - - for (int i = 0; i < 10; i++) { - assertHitCount(client().prepareSearch().setSize(0).setQuery(matchAllQuery()).get(), 2l); - } - - final String nodeToRemove = nodes[between(0,2)]; - logger.info("--> restarting 1 nodes -- kill 2"); - internalCluster().fullRestart(new RestartCallback() { - @Override - public Settings onNodeStopped(String nodeName) throws Exception { - return Settings.EMPTY; - } - - @Override - public boolean doRestart(String nodeName) { - return nodeToRemove.equals(nodeName); - } - }); - if (randomBoolean()) { - Thread.sleep(between(1, 400)); // wait a bit and give is a chance to try to allocate - } - ClusterHealthResponse clusterHealth = client().admin().cluster().health(clusterHealthRequest().waitForNodes("1")).actionGet(); - assertThat(clusterHealth.isTimedOut(), equalTo(false)); - assertThat(clusterHealth.getStatus(), equalTo(ClusterHealthStatus.RED)); // nothing allocated yet - assertTrue(awaitBusy(() -> { - ClusterStateResponse clusterStateResponse = internalCluster().smartClient().admin().cluster().prepareState().setMasterNodeTimeout("500ms").get(); - return clusterStateResponse.getState() != null && clusterStateResponse.getState().routingTable().index("test") != null; - })); // wait until we get a cluster state - could be null if we quick enough. - final ClusterStateResponse clusterStateResponse = internalCluster().smartClient().admin().cluster().prepareState().setMasterNodeTimeout("500ms").get(); - assertThat(clusterStateResponse.getState(), notNullValue()); - assertThat(clusterStateResponse.getState().routingTable().index("test"), notNullValue()); - assertThat(clusterStateResponse.getState().routingTable().index("test").allPrimaryShardsActive(), is(false)); - logger.info("--> change the recovery.initial_shards setting, and make sure its recovered"); - client().admin().indices().prepareUpdateSettings("test").setSettings(settingsBuilder().put("recovery.initial_shards", 1)).get(); - - logger.info("--> running cluster_health (wait for the shards to startup), primaries only since we only have 1 node"); - clusterHealth = client().admin().cluster().health(clusterHealthRequest().waitForYellowStatus().waitForActiveShards(test.numPrimaries)).actionGet(); - logger.info("--> done cluster_health, status " + clusterHealth.getStatus()); - assertThat(clusterHealth.isTimedOut(), equalTo(false)); - assertThat(clusterHealth.getStatus(), equalTo(ClusterHealthStatus.YELLOW)); - - for (int i = 0; i < 10; i++) { - assertHitCount(client().prepareSearch().setSize(0).setQuery(matchAllQuery()).get(), 2l); - } - } - public void testQuorumRecovery() throws Exception { logger.info("--> starting 3 nodes"); - internalCluster().startNodesAsync(3).get(); // we are shutting down nodes - make sure we don't have 2 clusters if we test network - setMinimumMasterNodes(2); + internalCluster().startNodesAsync(3, + Settings.builder().put(ElectMasterService.DISCOVERY_ZEN_MINIMUM_MASTER_NODES_SETTING.getKey(), 2).build()).get(); + createIndex("test"); ensureGreen(); diff --git a/core/src/test/java/org/elasticsearch/gateway/RecoveryBackwardsCompatibilityIT.java b/core/src/test/java/org/elasticsearch/gateway/RecoveryBackwardsCompatibilityIT.java index 2184fda47c..dbdf747de6 100644 --- a/core/src/test/java/org/elasticsearch/gateway/RecoveryBackwardsCompatibilityIT.java +++ b/core/src/test/java/org/elasticsearch/gateway/RecoveryBackwardsCompatibilityIT.java @@ -82,9 +82,9 @@ public class RecoveryBackwardsCompatibilityIT extends ESBackcompatTestCase { SearchResponse countResponse = client().prepareSearch().setSize(0).get(); assertHitCount(countResponse, numDocs); - client().admin().cluster().prepareUpdateSettings().setTransientSettings(Settings.settingsBuilder().put(EnableAllocationDecider.CLUSTER_ROUTING_ALLOCATION_ENABLE, "none")).execute().actionGet(); + client().admin().cluster().prepareUpdateSettings().setTransientSettings(Settings.settingsBuilder().put(EnableAllocationDecider.CLUSTER_ROUTING_ALLOCATION_ENABLE_SETTING.getKey(), "none")).execute().actionGet(); backwardsCluster().upgradeAllNodes(); - client().admin().cluster().prepareUpdateSettings().setTransientSettings(Settings.settingsBuilder().put(EnableAllocationDecider.CLUSTER_ROUTING_ALLOCATION_ENABLE, "all")).execute().actionGet(); + client().admin().cluster().prepareUpdateSettings().setTransientSettings(Settings.settingsBuilder().put(EnableAllocationDecider.CLUSTER_ROUTING_ALLOCATION_ENABLE_SETTING.getKey(), "all")).execute().actionGet(); ensureGreen(); countResponse = client().prepareSearch().setSize(0).get(); diff --git a/core/src/test/java/org/elasticsearch/gateway/RecoveryFromGatewayIT.java b/core/src/test/java/org/elasticsearch/gateway/RecoveryFromGatewayIT.java index 01c76b465a..1a95b66817 100644 --- a/core/src/test/java/org/elasticsearch/gateway/RecoveryFromGatewayIT.java +++ b/core/src/test/java/org/elasticsearch/gateway/RecoveryFromGatewayIT.java @@ -360,7 +360,7 @@ public class RecoveryFromGatewayIT extends ESIntegTestCase { // Disable allocations while we are closing nodes client().admin().cluster().prepareUpdateSettings() .setTransientSettings(settingsBuilder() - .put(EnableAllocationDecider.CLUSTER_ROUTING_ALLOCATION_ENABLE, EnableAllocationDecider.Allocation.NONE)) + .put(EnableAllocationDecider.CLUSTER_ROUTING_ALLOCATION_ENABLE_SETTING.getKey(), EnableAllocationDecider.Allocation.NONE)) .get(); logger.info("--> full cluster restart"); internalCluster().fullRestart(); @@ -369,7 +369,7 @@ public class RecoveryFromGatewayIT extends ESIntegTestCase { ensureGreen(); } else { logger.info("--> trying to sync flush"); - assertEquals(SyncedFlushUtil.attemptSyncedFlush(internalCluster(), "test").failedShards(), 0); + assertEquals(client().admin().indices().prepareSyncedFlush("test").get().failedShards(), 0); assertSyncIdsNotNull(); } @@ -377,7 +377,7 @@ public class RecoveryFromGatewayIT extends ESIntegTestCase { // Disable allocations while we are closing nodes client().admin().cluster().prepareUpdateSettings() .setTransientSettings(settingsBuilder() - .put(EnableAllocationDecider.CLUSTER_ROUTING_ALLOCATION_ENABLE, EnableAllocationDecider.Allocation.NONE)) + .put(EnableAllocationDecider.CLUSTER_ROUTING_ALLOCATION_ENABLE_SETTING.getKey(), EnableAllocationDecider.Allocation.NONE)) .get(); logger.info("--> full cluster restart"); internalCluster().fullRestart(); diff --git a/core/src/test/java/org/elasticsearch/gateway/ReplicaShardAllocatorTests.java b/core/src/test/java/org/elasticsearch/gateway/ReplicaShardAllocatorTests.java index 9a053b3652..0818999ea7 100644 --- a/core/src/test/java/org/elasticsearch/gateway/ReplicaShardAllocatorTests.java +++ b/core/src/test/java/org/elasticsearch/gateway/ReplicaShardAllocatorTests.java @@ -43,9 +43,11 @@ import org.elasticsearch.indices.store.TransportNodesListShardStoreMetaData; import org.elasticsearch.test.ESAllocationTestCase; import org.junit.Before; +import java.util.Arrays; import java.util.Collections; import java.util.EnumSet; import java.util.HashMap; +import java.util.HashSet; import java.util.Map; import java.util.concurrent.atomic.AtomicBoolean; @@ -275,13 +277,16 @@ public class ReplicaShardAllocatorTests extends ESAllocationTestCase { } private RoutingAllocation onePrimaryOnNode1And1Replica(AllocationDeciders deciders, Settings settings, UnassignedInfo.Reason reason) { + ShardRouting primaryShard = TestShardRouting.newShardRouting(shardId.getIndex(), shardId.getId(), node1.id(), true, ShardRoutingState.STARTED, 10); MetaData metaData = MetaData.builder() - .put(IndexMetaData.builder(shardId.getIndex()).settings(settings(Version.CURRENT).put(settings)).numberOfShards(1).numberOfReplicas(0)) - .build(); + .put(IndexMetaData.builder(shardId.getIndex()).settings(settings(Version.CURRENT).put(settings)) + .numberOfShards(1).numberOfReplicas(1) + .putActiveAllocationIds(0, new HashSet<>(Arrays.asList(primaryShard.allocationId().getId())))) + .build(); RoutingTable routingTable = RoutingTable.builder() .add(IndexRoutingTable.builder(shardId.getIndex()) .addIndexShard(new IndexShardRoutingTable.Builder(shardId) - .addShard(TestShardRouting.newShardRouting(shardId.getIndex(), shardId.getId(), node1.id(), true, ShardRoutingState.STARTED, 10)) + .addShard(primaryShard) .addShard(ShardRouting.newUnassigned(shardId.getIndex(), shardId.getId(), null, false, new UnassignedInfo(reason, null))) .build()) ) @@ -294,13 +299,16 @@ public class ReplicaShardAllocatorTests extends ESAllocationTestCase { } private RoutingAllocation onePrimaryOnNode1And1ReplicaRecovering(AllocationDeciders deciders) { + ShardRouting primaryShard = TestShardRouting.newShardRouting(shardId.getIndex(), shardId.getId(), node1.id(), true, ShardRoutingState.STARTED, 10); MetaData metaData = MetaData.builder() - .put(IndexMetaData.builder(shardId.getIndex()).settings(settings(Version.CURRENT)).numberOfShards(1).numberOfReplicas(0)) + .put(IndexMetaData.builder(shardId.getIndex()).settings(settings(Version.CURRENT)) + .numberOfShards(1).numberOfReplicas(1) + .putActiveAllocationIds(0, new HashSet<>(Arrays.asList(primaryShard.allocationId().getId())))) .build(); RoutingTable routingTable = RoutingTable.builder() .add(IndexRoutingTable.builder(shardId.getIndex()) .addIndexShard(new IndexShardRoutingTable.Builder(shardId) - .addShard(TestShardRouting.newShardRouting(shardId.getIndex(), shardId.getId(), node1.id(), true, ShardRoutingState.STARTED, 10)) + .addShard(primaryShard) .addShard(TestShardRouting.newShardRouting(shardId.getIndex(), shardId.getId(), node2.id(), null, null, false, ShardRoutingState.INITIALIZING, 10, new UnassignedInfo(UnassignedInfo.Reason.CLUSTER_RECOVERED, null))) .build()) ) diff --git a/core/src/test/java/org/elasticsearch/gateway/ReusePeerRecoverySharedTest.java b/core/src/test/java/org/elasticsearch/gateway/ReusePeerRecoverySharedTest.java new file mode 100644 index 0000000000..936a6fa09a --- /dev/null +++ b/core/src/test/java/org/elasticsearch/gateway/ReusePeerRecoverySharedTest.java @@ -0,0 +1,161 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.gateway; + +import org.elasticsearch.action.admin.indices.recovery.RecoveryResponse; +import org.elasticsearch.action.admin.indices.stats.IndexStats; +import org.elasticsearch.action.admin.indices.stats.ShardStats; +import org.elasticsearch.cluster.routing.allocation.decider.EnableAllocationDecider; +import org.elasticsearch.common.logging.ESLogger; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.index.engine.Engine; +import org.elasticsearch.indices.flush.SyncedFlushUtil; +import org.elasticsearch.indices.recovery.RecoveryState; + +import static org.elasticsearch.common.settings.Settings.settingsBuilder; +import static org.elasticsearch.test.ESIntegTestCase.client; +import static org.elasticsearch.test.ESIntegTestCase.internalCluster; +import static org.elasticsearch.test.ESTestCase.randomBoolean; +import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertAcked; +import static org.hamcrest.Matchers.equalTo; +import static org.hamcrest.Matchers.greaterThan; +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertNotNull; +import static org.junit.Assert.assertThat; + +/** + * Test of file reuse on recovery shared between integration tests and backwards + * compatibility tests. + */ +public class ReusePeerRecoverySharedTest { + /** + * Test peer reuse on recovery. This is shared between RecoverFromGatewayIT + * and RecoveryBackwardsCompatibilityIT. + * + * @param indexSettings + * settings for the index to test + * @param restartCluster + * runnable that will restart the cluster under test + * @param logger + * logger for logging + * @param useSyncIds + * should this use synced flush? can't use synced from in the bwc + * tests + */ + public static void testCase(Settings indexSettings, Runnable restartCluster, ESLogger logger, boolean useSyncIds) { + /* + * prevent any rebalance actions during the peer recovery if we run into + * a relocation the reuse count will be 0 and this fails the test. We + * are testing here if we reuse the files on disk after full restarts + * for replicas. + */ + assertAcked(client().admin().indices().prepareCreate("test").setSettings(Settings.builder().put(indexSettings) + .put(EnableAllocationDecider.INDEX_ROUTING_REBALANCE_ENABLE, EnableAllocationDecider.Rebalance.NONE))); + client().admin().cluster().prepareHealth().setWaitForGreenStatus().setTimeout("30s").get(); + logger.info("--> indexing docs"); + for (int i = 0; i < 1000; i++) { + client().prepareIndex("test", "type").setSource("field", "value").execute().actionGet(); + if ((i % 200) == 0) { + client().admin().indices().prepareFlush().execute().actionGet(); + } + } + if (randomBoolean()) { + client().admin().indices().prepareFlush().execute().actionGet(); + } + logger.info("--> running cluster health"); + client().admin().cluster().prepareHealth().setWaitForGreenStatus().setTimeout("30s").get(); + // just wait for merges + client().admin().indices().prepareForceMerge("test").setMaxNumSegments(100).get(); + client().admin().indices().prepareFlush().setWaitIfOngoing(true).setForce(true).get(); + + if (useSyncIds == false) { + logger.info("--> disabling allocation while the cluster is shut down"); + + // Disable allocations while we are closing nodes + client().admin().cluster().prepareUpdateSettings().setTransientSettings(settingsBuilder() + .put(EnableAllocationDecider.CLUSTER_ROUTING_ALLOCATION_ENABLE_SETTING.getKey(), EnableAllocationDecider.Allocation.NONE)).get(); + logger.info("--> full cluster restart"); + restartCluster.run(); + + logger.info("--> waiting for cluster to return to green after first shutdown"); + client().admin().cluster().prepareHealth().setWaitForGreenStatus().setTimeout("30s").get(); + } else { + logger.info("--> trying to sync flush"); + assertEquals(client().admin().indices().prepareSyncedFlush("test").get().failedShards(), 0); + assertSyncIdsNotNull(); + } + + logger.info("--> disabling allocation while the cluster is shut down", useSyncIds ? "" : " a second time"); + // Disable allocations while we are closing nodes + client().admin().cluster().prepareUpdateSettings().setTransientSettings( + settingsBuilder().put(EnableAllocationDecider.CLUSTER_ROUTING_ALLOCATION_ENABLE_SETTING.getKey(), EnableAllocationDecider.Allocation.NONE)) + .get(); + logger.info("--> full cluster restart"); + restartCluster.run(); + + logger.info("--> waiting for cluster to return to green after {}shutdown", useSyncIds ? "" : "second "); + client().admin().cluster().prepareHealth().setWaitForGreenStatus().setTimeout("30s").get(); + + if (useSyncIds) { + assertSyncIdsNotNull(); + } + RecoveryResponse recoveryResponse = client().admin().indices().prepareRecoveries("test").get(); + for (RecoveryState recoveryState : recoveryResponse.shardRecoveryStates().get("test")) { + long recovered = 0; + for (RecoveryState.File file : recoveryState.getIndex().fileDetails()) { + if (file.name().startsWith("segments")) { + recovered += file.length(); + } + } + if (!recoveryState.getPrimary() && (useSyncIds == false)) { + logger.info("--> replica shard {} recovered from {} to {}, recovered {}, reuse {}", recoveryState.getShardId().getId(), + recoveryState.getSourceNode().name(), recoveryState.getTargetNode().name(), + recoveryState.getIndex().recoveredBytes(), recoveryState.getIndex().reusedBytes()); + assertThat("no bytes should be recovered", recoveryState.getIndex().recoveredBytes(), equalTo(recovered)); + assertThat("data should have been reused", recoveryState.getIndex().reusedBytes(), greaterThan(0l)); + // we have to recover the segments file since we commit the translog ID on engine startup + assertThat("all bytes should be reused except of the segments file", recoveryState.getIndex().reusedBytes(), + equalTo(recoveryState.getIndex().totalBytes() - recovered)); + assertThat("no files should be recovered except of the segments file", recoveryState.getIndex().recoveredFileCount(), + equalTo(1)); + assertThat("all files should be reused except of the segments file", recoveryState.getIndex().reusedFileCount(), + equalTo(recoveryState.getIndex().totalFileCount() - 1)); + assertThat("> 0 files should be reused", recoveryState.getIndex().reusedFileCount(), greaterThan(0)); + } else { + if (useSyncIds && !recoveryState.getPrimary()) { + logger.info("--> replica shard {} recovered from {} to {} using sync id, recovered {}, reuse {}", + recoveryState.getShardId().getId(), recoveryState.getSourceNode().name(), recoveryState.getTargetNode().name(), + recoveryState.getIndex().recoveredBytes(), recoveryState.getIndex().reusedBytes()); + } + assertThat(recoveryState.getIndex().recoveredBytes(), equalTo(0l)); + assertThat(recoveryState.getIndex().reusedBytes(), equalTo(recoveryState.getIndex().totalBytes())); + assertThat(recoveryState.getIndex().recoveredFileCount(), equalTo(0)); + assertThat(recoveryState.getIndex().reusedFileCount(), equalTo(recoveryState.getIndex().totalFileCount())); + } + } + } + + public static void assertSyncIdsNotNull() { + IndexStats indexStats = client().admin().indices().prepareStats("test").get().getIndex("test"); + for (ShardStats shardStats : indexStats.getShards()) { + assertNotNull(shardStats.getCommitStats().getUserData().get(Engine.SYNC_COMMIT_ID)); + } + } +} diff --git a/core/src/test/java/org/elasticsearch/index/TransportIndexFailuresIT.java b/core/src/test/java/org/elasticsearch/index/TransportIndexFailuresIT.java index 21ecdf710b..457cf31ec8 100644 --- a/core/src/test/java/org/elasticsearch/index/TransportIndexFailuresIT.java +++ b/core/src/test/java/org/elasticsearch/index/TransportIndexFailuresIT.java @@ -56,7 +56,7 @@ public class TransportIndexFailuresIT extends ESIntegTestCase { .put("discovery.type", "zen") // <-- To override the local setting if set externally .put(FaultDetection.SETTING_PING_TIMEOUT, "1s") // <-- for hitting simulated network failures quickly .put(FaultDetection.SETTING_PING_RETRIES, "1") // <-- for hitting simulated network failures quickly - .put(DiscoverySettings.PUBLISH_TIMEOUT, "1s") // <-- for hitting simulated network failures quickly + .put(DiscoverySettings.PUBLISH_TIMEOUT_SETTING.getKey(), "1s") // <-- for hitting simulated network failures quickly .put("discovery.zen.minimum_master_nodes", 1) .build(); diff --git a/core/src/test/java/org/elasticsearch/index/mapper/all/SimpleAllMapperTests.java b/core/src/test/java/org/elasticsearch/index/mapper/all/SimpleAllMapperTests.java index bfb7ead734..bbba3432b6 100644 --- a/core/src/test/java/org/elasticsearch/index/mapper/all/SimpleAllMapperTests.java +++ b/core/src/test/java/org/elasticsearch/index/mapper/all/SimpleAllMapperTests.java @@ -49,21 +49,12 @@ import org.elasticsearch.test.ESSingleNodeTestCase; import org.hamcrest.Matchers; import java.io.IOException; -import java.util.ArrayList; -import java.util.Collections; -import java.util.HashMap; -import java.util.List; -import java.util.Map; +import java.util.*; import static org.elasticsearch.common.xcontent.XContentFactory.jsonBuilder; import static org.elasticsearch.test.StreamsUtils.copyToBytesFromClasspath; import static org.elasticsearch.test.StreamsUtils.copyToStringFromClasspath; -import static org.hamcrest.Matchers.containsString; -import static org.hamcrest.Matchers.empty; -import static org.hamcrest.Matchers.equalTo; -import static org.hamcrest.Matchers.hasItem; -import static org.hamcrest.Matchers.hasSize; -import static org.hamcrest.Matchers.nullValue; +import static org.hamcrest.Matchers.*; public class SimpleAllMapperTests extends ESSingleNodeTestCase { @@ -251,7 +242,7 @@ public class SimpleAllMapperTests extends ESSingleNodeTestCase { if (randomBoolean()) { booleanOptionList.add(new Tuple<>("store_term_vector_payloads", tv_payloads = randomBoolean())); } - Collections.shuffle(booleanOptionList, getRandom()); + Collections.shuffle(booleanOptionList, random()); for (Tuple<String, Boolean> option : booleanOptionList) { mappingBuilder.field(option.v1(), option.v2().booleanValue()); } diff --git a/core/src/test/java/org/elasticsearch/index/mapper/copyto/CopyToMapperTests.java b/core/src/test/java/org/elasticsearch/index/mapper/copyto/CopyToMapperTests.java index d94ae2b673..2fe0cf9f21 100644 --- a/core/src/test/java/org/elasticsearch/index/mapper/copyto/CopyToMapperTests.java +++ b/core/src/test/java/org/elasticsearch/index/mapper/copyto/CopyToMapperTests.java @@ -31,7 +31,6 @@ import org.elasticsearch.index.mapper.DocumentMapper; import org.elasticsearch.index.mapper.DocumentMapperParser; import org.elasticsearch.index.mapper.FieldMapper; import org.elasticsearch.index.mapper.MapperParsingException; -import org.elasticsearch.index.mapper.MergeResult; import org.elasticsearch.index.mapper.ParseContext; import org.elasticsearch.index.mapper.ParseContext.Document; import org.elasticsearch.index.mapper.ParsedDocument; @@ -39,7 +38,6 @@ import org.elasticsearch.index.mapper.core.LongFieldMapper; import org.elasticsearch.index.mapper.core.StringFieldMapper; import org.elasticsearch.test.ESSingleNodeTestCase; -import java.util.Arrays; import java.util.List; import java.util.Map; @@ -321,9 +319,7 @@ public class CopyToMapperTests extends ESSingleNodeTestCase { DocumentMapper docMapperAfter = parser.parse(mappingAfter); - MergeResult mergeResult = docMapperBefore.merge(docMapperAfter.mapping(), true, false); - - assertThat(Arrays.toString(mergeResult.buildConflicts()), mergeResult.hasConflicts(), equalTo(false)); + docMapperBefore.merge(docMapperAfter.mapping(), true, false); docMapperBefore.merge(docMapperAfter.mapping(), false, false); diff --git a/core/src/test/java/org/elasticsearch/index/mapper/core/TokenCountFieldMapperTests.java b/core/src/test/java/org/elasticsearch/index/mapper/core/TokenCountFieldMapperTests.java index b1224d5c6c..1cb41480cb 100644 --- a/core/src/test/java/org/elasticsearch/index/mapper/core/TokenCountFieldMapperTests.java +++ b/core/src/test/java/org/elasticsearch/index/mapper/core/TokenCountFieldMapperTests.java @@ -19,15 +19,10 @@ package org.elasticsearch.index.mapper.core; -import org.apache.lucene.analysis.Analyzer; -import org.apache.lucene.analysis.CannedTokenStream; -import org.apache.lucene.analysis.MockTokenizer; -import org.apache.lucene.analysis.Token; -import org.apache.lucene.analysis.TokenStream; +import org.apache.lucene.analysis.*; import org.elasticsearch.common.xcontent.XContentFactory; import org.elasticsearch.index.mapper.DocumentMapper; import org.elasticsearch.index.mapper.DocumentMapperParser; -import org.elasticsearch.index.mapper.MergeResult; import org.elasticsearch.test.ESSingleNodeTestCase; import java.io.IOException; @@ -64,13 +59,11 @@ public class TokenCountFieldMapperTests extends ESSingleNodeTestCase { .endObject().endObject().string(); DocumentMapper stage2 = parser.parse(stage2Mapping); - MergeResult mergeResult = stage1.merge(stage2.mapping(), true, false); - assertThat(mergeResult.hasConflicts(), equalTo(false)); + stage1.merge(stage2.mapping(), true, false); // Just simulated so merge hasn't happened yet assertThat(((TokenCountFieldMapper) stage1.mappers().smartNameFieldMapper("tc")).analyzer(), equalTo("keyword")); - mergeResult = stage1.merge(stage2.mapping(), false, false); - assertThat(mergeResult.hasConflicts(), equalTo(false)); + stage1.merge(stage2.mapping(), false, false); // Just simulated so merge hasn't happened yet assertThat(((TokenCountFieldMapper) stage1.mappers().smartNameFieldMapper("tc")).analyzer(), equalTo("standard")); } @@ -85,7 +78,7 @@ public class TokenCountFieldMapperTests extends ESSingleNodeTestCase { t2.setPositionIncrement(2); // Count funny tokens with more than one increment int finalTokenIncrement = 4; // Count the final token increment on the rare token streams that have them Token[] tokens = new Token[] {t1, t2, t3}; - Collections.shuffle(Arrays.asList(tokens), getRandom()); + Collections.shuffle(Arrays.asList(tokens), random()); final TokenStream tokenStream = new CannedTokenStream(finalTokenIncrement, 0, tokens); // TODO: we have no CannedAnalyzer? Analyzer analyzer = new Analyzer() { diff --git a/core/src/test/java/org/elasticsearch/index/mapper/date/SimpleDateMappingTests.java b/core/src/test/java/org/elasticsearch/index/mapper/date/SimpleDateMappingTests.java index fb67401e33..4772958bdb 100644 --- a/core/src/test/java/org/elasticsearch/index/mapper/date/SimpleDateMappingTests.java +++ b/core/src/test/java/org/elasticsearch/index/mapper/date/SimpleDateMappingTests.java @@ -371,9 +371,8 @@ public class SimpleDateMappingTests extends ESSingleNodeTestCase { Map<String, String> config = getConfigurationViaXContent(initialDateFieldMapper); assertThat(config.get("format"), is("EEE MMM dd HH:mm:ss.S Z yyyy||EEE MMM dd HH:mm:ss.SSS Z yyyy")); - MergeResult mergeResult = defaultMapper.merge(mergeMapper.mapping(), false, false); + defaultMapper.merge(mergeMapper.mapping(), false, false); - assertThat("Merging resulting in conflicts: " + Arrays.asList(mergeResult.buildConflicts()), mergeResult.hasConflicts(), is(false)); assertThat(defaultMapper.mappers().getMapper("field"), is(instanceOf(DateFieldMapper.class))); DateFieldMapper mergedFieldMapper = (DateFieldMapper) defaultMapper.mappers().getMapper("field"); diff --git a/core/src/test/java/org/elasticsearch/index/mapper/externalvalues/ExternalMapper.java b/core/src/test/java/org/elasticsearch/index/mapper/externalvalues/ExternalMapper.java index e5d08db8d9..6f7541a272 100755 --- a/core/src/test/java/org/elasticsearch/index/mapper/externalvalues/ExternalMapper.java +++ b/core/src/test/java/org/elasticsearch/index/mapper/externalvalues/ExternalMapper.java @@ -34,7 +34,6 @@ import org.elasticsearch.index.mapper.FieldMapper; import org.elasticsearch.index.mapper.MappedFieldType; import org.elasticsearch.index.mapper.Mapper; import org.elasticsearch.index.mapper.MapperParsingException; -import org.elasticsearch.index.mapper.MergeResult; import org.elasticsearch.index.mapper.ParseContext; import org.elasticsearch.index.mapper.core.BinaryFieldMapper; import org.elasticsearch.index.mapper.core.BooleanFieldMapper; @@ -82,7 +81,7 @@ public class ExternalMapper extends FieldMapper { private String mapperName; public Builder(String name, String generatedValue, String mapperName) { - super(name, new ExternalFieldType()); + super(name, new ExternalFieldType(), new ExternalFieldType()); this.builder = this; this.stringBuilder = stringField(name).store(false); this.generatedValue = generatedValue; @@ -96,9 +95,6 @@ public class ExternalMapper extends FieldMapper { @Override public ExternalMapper build(BuilderContext context) { - ContentPath.Type origPathType = context.path().pathType(); - context.path().pathType(ContentPath.Type.FULL); - context.path().add(name); BinaryFieldMapper binMapper = binBuilder.build(context); BooleanFieldMapper boolMapper = boolBuilder.build(context); @@ -108,7 +104,6 @@ public class ExternalMapper extends FieldMapper { FieldMapper stringMapper = (FieldMapper)stringBuilder.build(context); context.path().remove(); - context.path().pathType(origPathType); setupFieldType(context); return new ExternalMapper(name, fieldType, generatedValue, mapperName, binMapper, boolMapper, pointMapper, shapeMapper, stringMapper, @@ -219,7 +214,7 @@ public class ExternalMapper extends FieldMapper { } @Override - public void merge(Mapper mergeWith, MergeResult mergeResult) { + protected void doMerge(Mapper mergeWith, boolean updateAllTypes) { // ignore this for now } diff --git a/core/src/test/java/org/elasticsearch/index/mapper/externalvalues/ExternalMetadataMapper.java b/core/src/test/java/org/elasticsearch/index/mapper/externalvalues/ExternalMetadataMapper.java index dae8bc67fd..8bdb5670db 100644 --- a/core/src/test/java/org/elasticsearch/index/mapper/externalvalues/ExternalMetadataMapper.java +++ b/core/src/test/java/org/elasticsearch/index/mapper/externalvalues/ExternalMetadataMapper.java @@ -28,7 +28,6 @@ import org.elasticsearch.index.fielddata.FieldDataType; import org.elasticsearch.index.mapper.MappedFieldType; import org.elasticsearch.index.mapper.Mapper; import org.elasticsearch.index.mapper.MapperParsingException; -import org.elasticsearch.index.mapper.MergeResult; import org.elasticsearch.index.mapper.MetadataFieldMapper; import org.elasticsearch.index.mapper.ParseContext; import org.elasticsearch.index.mapper.core.BooleanFieldMapper; @@ -66,9 +65,9 @@ public class ExternalMetadataMapper extends MetadataFieldMapper { } @Override - public void merge(Mapper mergeWith, MergeResult mergeResult) { + public void doMerge(Mapper mergeWith, boolean updateAllTypes) { if (!(mergeWith instanceof ExternalMetadataMapper)) { - mergeResult.addConflict("Trying to merge " + mergeWith + " with " + this); + throw new IllegalArgumentException("Trying to merge " + mergeWith + " with " + this); } } @@ -99,7 +98,7 @@ public class ExternalMetadataMapper extends MetadataFieldMapper { public static class Builder extends MetadataFieldMapper.Builder<Builder, ExternalMetadataMapper> { protected Builder() { - super(CONTENT_TYPE, FIELD_TYPE); + super(CONTENT_TYPE, FIELD_TYPE, FIELD_TYPE); } @Override diff --git a/core/src/test/java/org/elasticsearch/index/mapper/externalvalues/ExternalValuesMapperIntegrationIT.java b/core/src/test/java/org/elasticsearch/index/mapper/externalvalues/ExternalValuesMapperIntegrationIT.java index 4cf7b40521..7e519c3b72 100644 --- a/core/src/test/java/org/elasticsearch/index/mapper/externalvalues/ExternalValuesMapperIntegrationIT.java +++ b/core/src/test/java/org/elasticsearch/index/mapper/externalvalues/ExternalValuesMapperIntegrationIT.java @@ -87,7 +87,7 @@ public class ExternalValuesMapperIntegrationIT extends ESIntegTestCase { .startObject("f") .field("type", ExternalMapperPlugin.EXTERNAL_UPPER) .startObject("fields") - .startObject("f") + .startObject("g") .field("type", "string") .field("store", "yes") .startObject("fields") @@ -107,7 +107,7 @@ public class ExternalValuesMapperIntegrationIT extends ESIntegTestCase { refresh(); SearchResponse response = client().prepareSearch("test-idx") - .setQuery(QueryBuilders.termQuery("f.f.raw", "FOO BAR")) + .setQuery(QueryBuilders.termQuery("f.g.raw", "FOO BAR")) .execute().actionGet(); assertThat(response.getHits().totalHits(), equalTo((long) 1)); diff --git a/core/src/test/java/org/elasticsearch/index/mapper/geo/GeoPointFieldMapperTests.java b/core/src/test/java/org/elasticsearch/index/mapper/geo/GeoPointFieldMapperTests.java index 93fd71599c..4efa12fca0 100644 --- a/core/src/test/java/org/elasticsearch/index/mapper/geo/GeoPointFieldMapperTests.java +++ b/core/src/test/java/org/elasticsearch/index/mapper/geo/GeoPointFieldMapperTests.java @@ -33,7 +33,6 @@ import org.elasticsearch.index.mapper.DocumentMapper; import org.elasticsearch.index.mapper.DocumentMapperParser; import org.elasticsearch.index.mapper.MapperParsingException; import org.elasticsearch.index.mapper.MapperService; -import org.elasticsearch.index.mapper.MergeResult; import org.elasticsearch.index.mapper.ParsedDocument; import org.elasticsearch.search.SearchHitField; import org.elasticsearch.test.ESSingleNodeTestCase; diff --git a/core/src/test/java/org/elasticsearch/index/mapper/geo/GeoShapeFieldMapperTests.java b/core/src/test/java/org/elasticsearch/index/mapper/geo/GeoShapeFieldMapperTests.java index 54e9e96f8a..596efdcc27 100644 --- a/core/src/test/java/org/elasticsearch/index/mapper/geo/GeoShapeFieldMapperTests.java +++ b/core/src/test/java/org/elasticsearch/index/mapper/geo/GeoShapeFieldMapperTests.java @@ -30,17 +30,13 @@ import org.elasticsearch.index.mapper.DocumentMapper; import org.elasticsearch.index.mapper.DocumentMapperParser; import org.elasticsearch.index.mapper.FieldMapper; import org.elasticsearch.index.mapper.MapperService; -import org.elasticsearch.index.mapper.MergeResult; import org.elasticsearch.test.ESSingleNodeTestCase; import java.io.IOException; -import java.util.ArrayList; -import java.util.Arrays; import static org.hamcrest.Matchers.containsString; import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.Matchers.instanceOf; -import static org.hamcrest.Matchers.isIn; public class GeoShapeFieldMapperTests extends ESSingleNodeTestCase { public void testDefaultConfiguration() throws IOException { diff --git a/core/src/test/java/org/elasticsearch/index/mapper/internal/FieldNamesFieldMapperTests.java b/core/src/test/java/org/elasticsearch/index/mapper/internal/FieldNamesFieldMapperTests.java index 3f3c5702e8..f97b22e0ec 100644 --- a/core/src/test/java/org/elasticsearch/index/mapper/internal/FieldNamesFieldMapperTests.java +++ b/core/src/test/java/org/elasticsearch/index/mapper/internal/FieldNamesFieldMapperTests.java @@ -204,7 +204,7 @@ public class FieldNamesFieldMapperTests extends ESSingleNodeTestCase { @Override public Builder<?, ?> parse(String name, Map<String, Object> node, ParserContext parserContext) throws MapperParsingException { - return new MetadataFieldMapper.Builder<Builder, DummyMetadataFieldMapper>("_dummy", FIELD_TYPE) { + return new MetadataFieldMapper.Builder<Builder, DummyMetadataFieldMapper>("_dummy", FIELD_TYPE, FIELD_TYPE) { @Override public DummyMetadataFieldMapper build(BuilderContext context) { return new DummyMetadataFieldMapper(context.indexSettings()); diff --git a/core/src/test/java/org/elasticsearch/index/mapper/merge/TestMergeMapperTests.java b/core/src/test/java/org/elasticsearch/index/mapper/merge/TestMergeMapperTests.java index 1a66879c44..b2faf44e65 100644 --- a/core/src/test/java/org/elasticsearch/index/mapper/merge/TestMergeMapperTests.java +++ b/core/src/test/java/org/elasticsearch/index/mapper/merge/TestMergeMapperTests.java @@ -29,7 +29,6 @@ import org.elasticsearch.index.mapper.DocumentMapper; import org.elasticsearch.index.mapper.DocumentMapperParser; import org.elasticsearch.index.mapper.MapperService; import org.elasticsearch.index.mapper.Mapping; -import org.elasticsearch.index.mapper.MergeResult; import org.elasticsearch.index.mapper.ParsedDocument; import org.elasticsearch.index.mapper.core.StringFieldMapper; import org.elasticsearch.index.mapper.object.ObjectMapper; @@ -39,6 +38,7 @@ import java.util.concurrent.CyclicBarrier; import java.util.concurrent.atomic.AtomicBoolean; import java.util.concurrent.atomic.AtomicReference; +import static org.hamcrest.Matchers.containsString; import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.Matchers.notNullValue; import static org.hamcrest.Matchers.nullValue; @@ -59,15 +59,12 @@ public class TestMergeMapperTests extends ESSingleNodeTestCase { .endObject().endObject().endObject().string(); DocumentMapper stage2 = parser.parse(stage2Mapping); - MergeResult mergeResult = stage1.merge(stage2.mapping(), true, false); - assertThat(mergeResult.hasConflicts(), equalTo(false)); + stage1.merge(stage2.mapping(), true, false); // since we are simulating, we should not have the age mapping assertThat(stage1.mappers().smartNameFieldMapper("age"), nullValue()); assertThat(stage1.mappers().smartNameFieldMapper("obj1.prop1"), nullValue()); // now merge, don't simulate - mergeResult = stage1.merge(stage2.mapping(), false, false); - // there is still merge failures - assertThat(mergeResult.hasConflicts(), equalTo(false)); + stage1.merge(stage2.mapping(), false, false); // but we have the age in assertThat(stage1.mappers().smartNameFieldMapper("age"), notNullValue()); assertThat(stage1.mappers().smartNameFieldMapper("obj1.prop1"), notNullValue()); @@ -83,8 +80,7 @@ public class TestMergeMapperTests extends ESSingleNodeTestCase { DocumentMapper withDynamicMapper = parser.parse(withDynamicMapping); assertThat(withDynamicMapper.root().dynamic(), equalTo(ObjectMapper.Dynamic.FALSE)); - MergeResult mergeResult = mapper.merge(withDynamicMapper.mapping(), false, false); - assertThat(mergeResult.hasConflicts(), equalTo(false)); + mapper.merge(withDynamicMapper.mapping(), false, false); assertThat(mapper.root().dynamic(), equalTo(ObjectMapper.Dynamic.FALSE)); } @@ -99,14 +95,19 @@ public class TestMergeMapperTests extends ESSingleNodeTestCase { .endObject().endObject().endObject().string(); DocumentMapper nestedMapper = parser.parse(nestedMapping); - MergeResult mergeResult = objectMapper.merge(nestedMapper.mapping(), true, false); - assertThat(mergeResult.hasConflicts(), equalTo(true)); - assertThat(mergeResult.buildConflicts().length, equalTo(1)); - assertThat(mergeResult.buildConflicts()[0], equalTo("object mapping [obj] can't be changed from non-nested to nested")); + try { + objectMapper.merge(nestedMapper.mapping(), true, false); + fail(); + } catch (IllegalArgumentException e) { + assertThat(e.getMessage(), containsString("object mapping [obj] can't be changed from non-nested to nested")); + } - mergeResult = nestedMapper.merge(objectMapper.mapping(), true, false); - assertThat(mergeResult.buildConflicts().length, equalTo(1)); - assertThat(mergeResult.buildConflicts()[0], equalTo("object mapping [obj] can't be changed from nested to non-nested")); + try { + nestedMapper.merge(objectMapper.mapping(), true, false); + fail(); + } catch (IllegalArgumentException e) { + assertThat(e.getMessage(), containsString("object mapping [obj] can't be changed from nested to non-nested")); + } } public void testMergeSearchAnalyzer() throws Exception { @@ -122,9 +123,8 @@ public class TestMergeMapperTests extends ESSingleNodeTestCase { DocumentMapper changed = parser.parse(mapping2); assertThat(((NamedAnalyzer) existing.mappers().getMapper("field").fieldType().searchAnalyzer()).name(), equalTo("whitespace")); - MergeResult mergeResult = existing.merge(changed.mapping(), false, false); + existing.merge(changed.mapping(), false, false); - assertThat(mergeResult.hasConflicts(), equalTo(false)); assertThat(((NamedAnalyzer) existing.mappers().getMapper("field").fieldType().searchAnalyzer()).name(), equalTo("keyword")); } @@ -141,9 +141,8 @@ public class TestMergeMapperTests extends ESSingleNodeTestCase { DocumentMapper changed = parser.parse(mapping2); assertThat(((NamedAnalyzer) existing.mappers().getMapper("field").fieldType().searchAnalyzer()).name(), equalTo("whitespace")); - MergeResult mergeResult = existing.merge(changed.mapping(), false, false); + existing.merge(changed.mapping(), false, false); - assertThat(mergeResult.hasConflicts(), equalTo(false)); assertThat(((NamedAnalyzer) existing.mappers().getMapper("field").fieldType().searchAnalyzer()).name(), equalTo("standard")); assertThat(((StringFieldMapper) (existing.mappers().getMapper("field"))).getIgnoreAbove(), equalTo(14)); } diff --git a/core/src/test/java/org/elasticsearch/index/mapper/multifield/MultiFieldTests.java b/core/src/test/java/org/elasticsearch/index/mapper/multifield/MultiFieldTests.java index a5a073d147..58fa8fd69b 100644 --- a/core/src/test/java/org/elasticsearch/index/mapper/multifield/MultiFieldTests.java +++ b/core/src/test/java/org/elasticsearch/index/mapper/multifield/MultiFieldTests.java @@ -479,7 +479,7 @@ public class MultiFieldTests extends ESSingleNodeTestCase { .startObject("my_field").field("type", "string").startObject("fields").startObject(MY_MULTI_FIELD) .field("type", "string").startObject("fielddata"); String[] keys = possibleSettings.keySet().toArray(new String[]{}); - Collections.shuffle(Arrays.asList(keys)); + Collections.shuffle(Arrays.asList(keys), random()); for(int i = randomIntBetween(0, possibleSettings.size()-1); i >= 0; --i) builder.field(keys[i], possibleSettings.get(keys[i])); builder.endObject().endObject().endObject().endObject().endObject().endObject().endObject(); diff --git a/core/src/test/java/org/elasticsearch/index/mapper/multifield/merge/JavaMultiFieldMergeTests.java b/core/src/test/java/org/elasticsearch/index/mapper/multifield/merge/JavaMultiFieldMergeTests.java index 30890dcd22..83e10bd826 100644 --- a/core/src/test/java/org/elasticsearch/index/mapper/multifield/merge/JavaMultiFieldMergeTests.java +++ b/core/src/test/java/org/elasticsearch/index/mapper/multifield/merge/JavaMultiFieldMergeTests.java @@ -27,15 +27,11 @@ import org.elasticsearch.common.xcontent.XContentFactory; import org.elasticsearch.index.mapper.DocumentMapper; import org.elasticsearch.index.mapper.DocumentMapperParser; import org.elasticsearch.index.mapper.MapperService; -import org.elasticsearch.index.mapper.MergeResult; import org.elasticsearch.index.mapper.ParseContext.Document; import org.elasticsearch.test.ESSingleNodeTestCase; -import java.util.Arrays; - import static org.elasticsearch.test.StreamsUtils.copyToStringFromClasspath; import static org.hamcrest.Matchers.containsString; -import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.Matchers.notNullValue; import static org.hamcrest.Matchers.nullValue; @@ -62,8 +58,7 @@ public class JavaMultiFieldMergeTests extends ESSingleNodeTestCase { mapping = copyToStringFromClasspath("/org/elasticsearch/index/mapper/multifield/merge/test-mapping2.json"); DocumentMapper docMapper2 = parser.parse(mapping); - MergeResult mergeResult = docMapper.merge(docMapper2.mapping(), true, false); - assertThat(Arrays.toString(mergeResult.buildConflicts()), mergeResult.hasConflicts(), equalTo(false)); + docMapper.merge(docMapper2.mapping(), true, false); docMapper.merge(docMapper2.mapping(), false, false); @@ -84,8 +79,7 @@ public class JavaMultiFieldMergeTests extends ESSingleNodeTestCase { mapping = copyToStringFromClasspath("/org/elasticsearch/index/mapper/multifield/merge/test-mapping3.json"); DocumentMapper docMapper3 = parser.parse(mapping); - mergeResult = docMapper.merge(docMapper3.mapping(), true, false); - assertThat(Arrays.toString(mergeResult.buildConflicts()), mergeResult.hasConflicts(), equalTo(false)); + docMapper.merge(docMapper3.mapping(), true, false); docMapper.merge(docMapper3.mapping(), false, false); @@ -100,8 +94,7 @@ public class JavaMultiFieldMergeTests extends ESSingleNodeTestCase { mapping = copyToStringFromClasspath("/org/elasticsearch/index/mapper/multifield/merge/test-mapping4.json"); DocumentMapper docMapper4 = parser.parse(mapping); - mergeResult = docMapper.merge(docMapper4.mapping(), true, false); - assertThat(Arrays.toString(mergeResult.buildConflicts()), mergeResult.hasConflicts(), equalTo(false)); + docMapper.merge(docMapper4.mapping(), true, false); docMapper.merge(docMapper4.mapping(), false, false); diff --git a/core/src/test/java/org/elasticsearch/index/mapper/source/CompressSourceMappingTests.java b/core/src/test/java/org/elasticsearch/index/mapper/source/CompressSourceMappingTests.java deleted file mode 100644 index 7c1875be55..0000000000 --- a/core/src/test/java/org/elasticsearch/index/mapper/source/CompressSourceMappingTests.java +++ /dev/null @@ -1,97 +0,0 @@ -/* - * Licensed to Elasticsearch under one or more contributor - * license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright - * ownership. Elasticsearch licenses this file to you under - * the Apache License, Version 2.0 (the "License"); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -package org.elasticsearch.index.mapper.source; - -import org.apache.lucene.util.BytesRef; -import org.elasticsearch.Version; -import org.elasticsearch.cluster.metadata.IndexMetaData; -import org.elasticsearch.common.bytes.BytesArray; -import org.elasticsearch.common.compress.CompressorFactory; -import org.elasticsearch.common.settings.Settings; -import org.elasticsearch.common.xcontent.XContentFactory; -import org.elasticsearch.index.mapper.DocumentMapper; -import org.elasticsearch.index.mapper.ParsedDocument; -import org.elasticsearch.test.ESSingleNodeTestCase; - -import static org.hamcrest.Matchers.equalTo; - -/** - * - */ -public class CompressSourceMappingTests extends ESSingleNodeTestCase { - Settings settings = Settings.builder().put(IndexMetaData.SETTING_VERSION_CREATED, Version.V_1_4_2.id).build(); - - public void testCompressDisabled() throws Exception { - String mapping = XContentFactory.jsonBuilder().startObject().startObject("type") - .startObject("_source").field("compress", false).endObject() - .endObject().endObject().string(); - - DocumentMapper documentMapper = createIndex("test", settings).mapperService().documentMapperParser().parse(mapping); - - ParsedDocument doc = documentMapper.parse("test", "type", "1", XContentFactory.jsonBuilder().startObject() - .field("field1", "value1") - .field("field2", "value2") - .endObject().bytes()); - BytesRef bytes = doc.rootDoc().getBinaryValue("_source"); - assertThat(CompressorFactory.isCompressed(new BytesArray(bytes)), equalTo(false)); - } - - public void testCompressEnabled() throws Exception { - String mapping = XContentFactory.jsonBuilder().startObject().startObject("type") - .startObject("_source").field("compress", true).endObject() - .endObject().endObject().string(); - - DocumentMapper documentMapper = createIndex("test", settings).mapperService().documentMapperParser().parse(mapping); - - ParsedDocument doc = documentMapper.parse("test", "type", "1", XContentFactory.jsonBuilder().startObject() - .field("field1", "value1") - .field("field2", "value2") - .endObject().bytes()); - - BytesRef bytes = doc.rootDoc().getBinaryValue("_source"); - assertThat(CompressorFactory.isCompressed(new BytesArray(bytes)), equalTo(true)); - } - - public void testCompressThreshold() throws Exception { - String mapping = XContentFactory.jsonBuilder().startObject().startObject("type") - .startObject("_source").field("compress_threshold", "200b").endObject() - .endObject().endObject().string(); - - DocumentMapper documentMapper = createIndex("test", settings).mapperService().documentMapperParser().parse(mapping); - - ParsedDocument doc = documentMapper.parse("test", "type", "1", XContentFactory.jsonBuilder().startObject() - .field("field1", "value1") - .endObject().bytes()); - - BytesRef bytes = doc.rootDoc().getBinaryValue("_source"); - assertThat(CompressorFactory.isCompressed(new BytesArray(bytes)), equalTo(false)); - - doc = documentMapper.parse("test", "type", "1", XContentFactory.jsonBuilder().startObject() - .field("field1", "value1") - .field("field2", "value2 xxxxxxxxxxxxxx yyyyyyyyyyyyyyyyyyy zzzzzzzzzzzzzzzzz") - .field("field2", "value2 xxxxxxxxxxxxxx yyyyyyyyyyyyyyyyyyy zzzzzzzzzzzzzzzzz") - .field("field2", "value2 xxxxxxxxxxxxxx yyyyyyyyyyyyyyyyyyy zzzzzzzzzzzzzzzzz") - .field("field2", "value2 xxxxxxxxxxxxxx yyyyyyyyyyyyyyyyyyy zzzzzzzzzzzzzzzzz") - .endObject().bytes()); - - bytes = doc.rootDoc().getBinaryValue("_source"); - assertThat(CompressorFactory.isCompressed(new BytesArray(bytes)), equalTo(true)); - } -} diff --git a/core/src/test/java/org/elasticsearch/index/mapper/source/DefaultSourceMappingTests.java b/core/src/test/java/org/elasticsearch/index/mapper/source/DefaultSourceMappingTests.java index 4ec0ff5211..c30ea9bc6c 100644 --- a/core/src/test/java/org/elasticsearch/index/mapper/source/DefaultSourceMappingTests.java +++ b/core/src/test/java/org/elasticsearch/index/mapper/source/DefaultSourceMappingTests.java @@ -31,13 +31,12 @@ import org.elasticsearch.common.xcontent.XContentParser; import org.elasticsearch.common.xcontent.XContentType; import org.elasticsearch.index.mapper.*; import org.elasticsearch.test.ESSingleNodeTestCase; +import org.elasticsearch.test.VersionUtils; import java.io.IOException; -import java.util.ArrayList; -import java.util.Arrays; -import java.util.List; import java.util.Map; +import static org.hamcrest.Matchers.containsString; import static org.hamcrest.Matchers.equalTo; public class DefaultSourceMappingTests extends ESSingleNodeTestCase { @@ -63,51 +62,16 @@ public class DefaultSourceMappingTests extends ESSingleNodeTestCase { assertThat(XContentFactory.xContentType(doc.source()), equalTo(XContentType.SMILE)); } - public void testJsonFormat() throws Exception { + public void testFormatBackCompat() throws Exception { String mapping = XContentFactory.jsonBuilder().startObject().startObject("type") .startObject("_source").field("format", "json").endObject() .endObject().endObject().string(); + Settings settings = Settings.builder() + .put(IndexMetaData.SETTING_VERSION_CREATED, VersionUtils.randomVersionBetween(random(), Version.V_2_0_0, Version.V_2_2_0)) + .build(); - DocumentMapperParser parser = createIndex("test").mapperService().documentMapperParser(); - DocumentMapper documentMapper = parser.parse(mapping); - ParsedDocument doc = documentMapper.parse("test", "type", "1", XContentFactory.jsonBuilder().startObject() - .field("field", "value") - .endObject().bytes()); - - assertThat(XContentFactory.xContentType(doc.source()), equalTo(XContentType.JSON)); - - documentMapper = parser.parse(mapping); - doc = documentMapper.parse("test", "type", "1", XContentFactory.smileBuilder().startObject() - .field("field", "value") - .endObject().bytes()); - - assertThat(XContentFactory.xContentType(doc.source()), equalTo(XContentType.JSON)); - } - - public void testJsonFormatCompressedBackcompat() throws Exception { - String mapping = XContentFactory.jsonBuilder().startObject().startObject("type") - .startObject("_source").field("format", "json").field("compress", true).endObject() - .endObject().endObject().string(); - - Settings backcompatSettings = Settings.builder().put(IndexMetaData.SETTING_VERSION_CREATED, Version.V_1_4_2.id).build(); - DocumentMapperParser parser = createIndex("test", backcompatSettings).mapperService().documentMapperParser(); - DocumentMapper documentMapper = parser.parse(mapping); - ParsedDocument doc = documentMapper.parse("test", "type", "1", XContentFactory.jsonBuilder().startObject() - .field("field", "value") - .endObject().bytes()); - - assertThat(CompressorFactory.isCompressed(doc.source()), equalTo(true)); - byte[] uncompressed = CompressorFactory.uncompressIfNeeded(doc.source()).toBytes(); - assertThat(XContentFactory.xContentType(uncompressed), equalTo(XContentType.JSON)); - - documentMapper = parser.parse(mapping); - doc = documentMapper.parse("test", "type", "1", XContentFactory.smileBuilder().startObject() - .field("field", "value") - .endObject().bytes()); - - assertThat(CompressorFactory.isCompressed(doc.source()), equalTo(true)); - uncompressed = CompressorFactory.uncompressIfNeeded(doc.source()).toBytes(); - assertThat(XContentFactory.xContentType(uncompressed), equalTo(XContentType.JSON)); + DocumentMapperParser parser = createIndex("test", settings).mapperService().documentMapperParser(); + parser.parse(mapping); // no exception } public void testIncludes() throws Exception { @@ -228,13 +192,18 @@ public class DefaultSourceMappingTests extends ESSingleNodeTestCase { void assertConflicts(String mapping1, String mapping2, DocumentMapperParser parser, String... conflicts) throws IOException { DocumentMapper docMapper = parser.parse(mapping1); docMapper = parser.parse(docMapper.mappingSource().string()); - MergeResult mergeResult = docMapper.merge(parser.parse(mapping2).mapping(), true, false); - - List<String> expectedConflicts = new ArrayList<>(Arrays.asList(conflicts)); - for (String conflict : mergeResult.buildConflicts()) { - assertTrue("found unexpected conflict [" + conflict + "]", expectedConflicts.remove(conflict)); + if (conflicts.length == 0) { + docMapper.merge(parser.parse(mapping2).mapping(), true, false); + } else { + try { + docMapper.merge(parser.parse(mapping2).mapping(), true, false); + fail(); + } catch (IllegalArgumentException e) { + for (String conflict : conflicts) { + assertThat(e.getMessage(), containsString(conflict)); + } + } } - assertTrue("missing conflicts: " + Arrays.toString(expectedConflicts.toArray()), expectedConflicts.isEmpty()); } public void testEnabledNotUpdateable() throws Exception { diff --git a/core/src/test/java/org/elasticsearch/index/mapper/string/SimpleStringMappingTests.java b/core/src/test/java/org/elasticsearch/index/mapper/string/SimpleStringMappingTests.java index 9ac039a49f..cadd9dd673 100644 --- a/core/src/test/java/org/elasticsearch/index/mapper/string/SimpleStringMappingTests.java +++ b/core/src/test/java/org/elasticsearch/index/mapper/string/SimpleStringMappingTests.java @@ -40,7 +40,6 @@ import org.elasticsearch.index.mapper.DocumentMapperParser; import org.elasticsearch.index.mapper.FieldMapper; import org.elasticsearch.index.mapper.Mapper.BuilderContext; import org.elasticsearch.index.mapper.MapperParsingException; -import org.elasticsearch.index.mapper.MergeResult; import org.elasticsearch.index.mapper.ParseContext.Document; import org.elasticsearch.index.mapper.ParsedDocument; import org.elasticsearch.index.mapper.core.StringFieldMapper; @@ -493,8 +492,7 @@ public class SimpleStringMappingTests extends ESSingleNodeTestCase { String updatedMapping = XContentFactory.jsonBuilder().startObject().startObject("type") .startObject("properties").startObject("field").field("type", "string").startObject("norms").field("enabled", false).endObject() .endObject().endObject().endObject().endObject().string(); - MergeResult mergeResult = defaultMapper.merge(parser.parse(updatedMapping).mapping(), false, false); - assertFalse(Arrays.toString(mergeResult.buildConflicts()), mergeResult.hasConflicts()); + defaultMapper.merge(parser.parse(updatedMapping).mapping(), false, false); doc = defaultMapper.parse("test", "type", "1", XContentFactory.jsonBuilder() .startObject() diff --git a/core/src/test/java/org/elasticsearch/index/mapper/timestamp/TimestampMappingTests.java b/core/src/test/java/org/elasticsearch/index/mapper/timestamp/TimestampMappingTests.java index 53a3bf7bb6..d545452db0 100644 --- a/core/src/test/java/org/elasticsearch/index/mapper/timestamp/TimestampMappingTests.java +++ b/core/src/test/java/org/elasticsearch/index/mapper/timestamp/TimestampMappingTests.java @@ -42,7 +42,6 @@ import org.elasticsearch.index.mapper.DocumentMapperParser; import org.elasticsearch.index.mapper.MappedFieldType; import org.elasticsearch.index.mapper.MapperParsingException; import org.elasticsearch.index.mapper.MapperService; -import org.elasticsearch.index.mapper.MergeResult; import org.elasticsearch.index.mapper.ParsedDocument; import org.elasticsearch.index.mapper.SourceToParse; import org.elasticsearch.index.mapper.internal.TimestampFieldMapper; @@ -515,8 +514,7 @@ public class TimestampMappingTests extends ESSingleNodeTestCase { .startObject("_timestamp").field("enabled", randomBoolean()).startObject("fielddata").field("loading", "eager").field("format", "array").endObject().field("store", "yes").endObject() .endObject().endObject().string(); - MergeResult mergeResult = docMapper.merge(parser.parse(mapping).mapping(), false, false); - assertThat(mergeResult.buildConflicts().length, equalTo(0)); + docMapper.merge(parser.parse(mapping).mapping(), false, false); assertThat(docMapper.timestampFieldMapper().fieldType().fieldDataType().getLoading(), equalTo(MappedFieldType.Loading.EAGER)); assertThat(docMapper.timestampFieldMapper().fieldType().fieldDataType().getFormat(indexSettings), equalTo("array")); } @@ -618,9 +616,9 @@ public class TimestampMappingTests extends ESSingleNodeTestCase { .field("index", indexValues.remove(randomInt(2))) .endObject() .endObject().endObject().string(); - DocumentMapperParser parser = createIndex("test", BWC_SETTINGS).mapperService().documentMapperParser(); + MapperService mapperService = createIndex("test", BWC_SETTINGS).mapperService(); - DocumentMapper docMapper = parser.parse(mapping); + mapperService.merge("type", new CompressedXContent(mapping), true, false); mapping = XContentFactory.jsonBuilder().startObject() .startObject("type") .startObject("_timestamp") @@ -628,18 +626,11 @@ public class TimestampMappingTests extends ESSingleNodeTestCase { .endObject() .endObject().endObject().string(); - MergeResult mergeResult = docMapper.merge(parser.parse(mapping).mapping(), true, false); - List<String> expectedConflicts = new ArrayList<>(); - expectedConflicts.add("mapper [_timestamp] has different [index] values"); - expectedConflicts.add("mapper [_timestamp] has different [tokenize] values"); - if (indexValues.get(0).equals("not_analyzed") == false) { - // if the only index value left is not_analyzed, then the doc values setting will be the same, but in the - // other two cases, it will change - expectedConflicts.add("mapper [_timestamp] has different [doc_values] values"); - } - - for (String conflict : mergeResult.buildConflicts()) { - assertThat(conflict, isIn(expectedConflicts)); + try { + mapperService.merge("type", new CompressedXContent(mapping), false, false); + fail(); + } catch (IllegalArgumentException e) { + assertThat(e.getMessage(), containsString("mapper [_timestamp] has different [index] values")); } } @@ -686,10 +677,15 @@ public class TimestampMappingTests extends ESSingleNodeTestCase { void assertConflict(String mapping1, String mapping2, DocumentMapperParser parser, String conflict) throws IOException { DocumentMapper docMapper = parser.parse(mapping1); docMapper = parser.parse(docMapper.mappingSource().string()); - MergeResult mergeResult = docMapper.merge(parser.parse(mapping2).mapping(), true, false); - assertThat(mergeResult.buildConflicts().length, equalTo(conflict == null ? 0 : 1)); - if (conflict != null) { - assertThat(mergeResult.buildConflicts()[0], containsString(conflict)); + if (conflict == null) { + docMapper.merge(parser.parse(mapping2).mapping(), true, false); + } else { + try { + docMapper.merge(parser.parse(mapping2).mapping(), true, false); + fail(); + } catch (IllegalArgumentException e) { + assertThat(e.getMessage(), containsString(conflict)); + } } } diff --git a/core/src/test/java/org/elasticsearch/index/mapper/ttl/TTLMappingTests.java b/core/src/test/java/org/elasticsearch/index/mapper/ttl/TTLMappingTests.java index efe0761553..444d692079 100644 --- a/core/src/test/java/org/elasticsearch/index/mapper/ttl/TTLMappingTests.java +++ b/core/src/test/java/org/elasticsearch/index/mapper/ttl/TTLMappingTests.java @@ -35,7 +35,6 @@ import org.elasticsearch.index.IndexService; import org.elasticsearch.index.mapper.DocumentMapper; import org.elasticsearch.index.mapper.DocumentMapperParser; import org.elasticsearch.index.mapper.MapperParsingException; -import org.elasticsearch.index.mapper.MergeResult; import org.elasticsearch.index.mapper.ParsedDocument; import org.elasticsearch.index.mapper.SourceToParse; import org.elasticsearch.index.mapper.internal.TTLFieldMapper; @@ -116,9 +115,8 @@ public class TTLMappingTests extends ESSingleNodeTestCase { DocumentMapper mapperWithoutTtl = parser.parse(mappingWithoutTtl); DocumentMapper mapperWithTtl = parser.parse(mappingWithTtl); - MergeResult mergeResult = mapperWithoutTtl.merge(mapperWithTtl.mapping(), false, false); + mapperWithoutTtl.merge(mapperWithTtl.mapping(), false, false); - assertThat(mergeResult.hasConflicts(), equalTo(false)); assertThat(mapperWithoutTtl.TTLFieldMapper().enabled(), equalTo(true)); } @@ -141,9 +139,8 @@ public class TTLMappingTests extends ESSingleNodeTestCase { DocumentMapper initialMapper = parser.parse(mappingWithTtl); DocumentMapper updatedMapper = parser.parse(updatedMapping); - MergeResult mergeResult = initialMapper.merge(updatedMapper.mapping(), true, false); + initialMapper.merge(updatedMapper.mapping(), true, false); - assertThat(mergeResult.hasConflicts(), equalTo(false)); assertThat(initialMapper.TTLFieldMapper().enabled(), equalTo(true)); } @@ -154,9 +151,13 @@ public class TTLMappingTests extends ESSingleNodeTestCase { DocumentMapper initialMapper = parser.parse(mappingWithTtl); DocumentMapper updatedMapper = parser.parse(mappingWithTtlDisabled); - MergeResult mergeResult = initialMapper.merge(updatedMapper.mapping(), true, false); + try { + initialMapper.merge(updatedMapper.mapping(), true, false); + fail(); + } catch (IllegalArgumentException e) { + // expected + } - assertThat(mergeResult.hasConflicts(), equalTo(true)); assertThat(initialMapper.TTLFieldMapper().enabled(), equalTo(true)); } @@ -189,23 +190,20 @@ public class TTLMappingTests extends ESSingleNodeTestCase { public void testNoConflictIfNothingSetAndDisabledLater() throws Exception { IndexService indexService = createIndex("testindex", Settings.settingsBuilder().build(), "type"); XContentBuilder mappingWithTtlDisabled = getMappingWithTtlDisabled("7d"); - MergeResult mergeResult = indexService.mapperService().documentMapper("type").merge(indexService.mapperService().parse("type", new CompressedXContent(mappingWithTtlDisabled.string()), true).mapping(), randomBoolean(), false); - assertFalse(mergeResult.hasConflicts()); + indexService.mapperService().documentMapper("type").merge(indexService.mapperService().parse("type", new CompressedXContent(mappingWithTtlDisabled.string()), true).mapping(), randomBoolean(), false); } public void testNoConflictIfNothingSetAndEnabledLater() throws Exception { IndexService indexService = createIndex("testindex", Settings.settingsBuilder().build(), "type"); XContentBuilder mappingWithTtlEnabled = getMappingWithTtlEnabled("7d"); - MergeResult mergeResult = indexService.mapperService().documentMapper("type").merge(indexService.mapperService().parse("type", new CompressedXContent(mappingWithTtlEnabled.string()), true).mapping(), randomBoolean(), false); - assertFalse(mergeResult.hasConflicts()); + indexService.mapperService().documentMapper("type").merge(indexService.mapperService().parse("type", new CompressedXContent(mappingWithTtlEnabled.string()), true).mapping(), randomBoolean(), false); } public void testMergeWithOnlyDefaultSet() throws Exception { XContentBuilder mappingWithTtlEnabled = getMappingWithTtlEnabled("7d"); IndexService indexService = createIndex("testindex", Settings.settingsBuilder().build(), "type", mappingWithTtlEnabled); XContentBuilder mappingWithOnlyDefaultSet = getMappingWithOnlyTtlDefaultSet("6m"); - MergeResult mergeResult = indexService.mapperService().documentMapper("type").merge(indexService.mapperService().parse("type", new CompressedXContent(mappingWithOnlyDefaultSet.string()), true).mapping(), false, false); - assertFalse(mergeResult.hasConflicts()); + indexService.mapperService().documentMapper("type").merge(indexService.mapperService().parse("type", new CompressedXContent(mappingWithOnlyDefaultSet.string()), true).mapping(), false, false); CompressedXContent mappingAfterMerge = indexService.mapperService().documentMapper("type").mappingSource(); assertThat(mappingAfterMerge, equalTo(new CompressedXContent("{\"type\":{\"_ttl\":{\"enabled\":true,\"default\":360000},\"properties\":{\"field\":{\"type\":\"string\"}}}}"))); } @@ -216,8 +214,7 @@ public class TTLMappingTests extends ESSingleNodeTestCase { CompressedXContent mappingAfterCreation = indexService.mapperService().documentMapper("type").mappingSource(); assertThat(mappingAfterCreation, equalTo(new CompressedXContent("{\"type\":{\"_ttl\":{\"enabled\":false},\"properties\":{\"field\":{\"type\":\"string\"}}}}"))); XContentBuilder mappingWithOnlyDefaultSet = getMappingWithOnlyTtlDefaultSet("6m"); - MergeResult mergeResult = indexService.mapperService().documentMapper("type").merge(indexService.mapperService().parse("type", new CompressedXContent(mappingWithOnlyDefaultSet.string()), true).mapping(), false, false); - assertFalse(mergeResult.hasConflicts()); + indexService.mapperService().documentMapper("type").merge(indexService.mapperService().parse("type", new CompressedXContent(mappingWithOnlyDefaultSet.string()), true).mapping(), false, false); CompressedXContent mappingAfterMerge = indexService.mapperService().documentMapper("type").mappingSource(); assertThat(mappingAfterMerge, equalTo(new CompressedXContent("{\"type\":{\"_ttl\":{\"enabled\":false},\"properties\":{\"field\":{\"type\":\"string\"}}}}"))); } @@ -228,8 +225,7 @@ public class TTLMappingTests extends ESSingleNodeTestCase { IndexService indexService = createIndex("testindex", Settings.settingsBuilder().build(), "type", mappingWithTtl); CompressedXContent mappingBeforeMerge = indexService.mapperService().documentMapper("type").mappingSource(); XContentBuilder mappingWithTtlDifferentDefault = getMappingWithTtlEnabled("7d"); - MergeResult mergeResult = indexService.mapperService().documentMapper("type").merge(indexService.mapperService().parse("type", new CompressedXContent(mappingWithTtlDifferentDefault.string()), true).mapping(), true, false); - assertFalse(mergeResult.hasConflicts()); + indexService.mapperService().documentMapper("type").merge(indexService.mapperService().parse("type", new CompressedXContent(mappingWithTtlDifferentDefault.string()), true).mapping(), true, false); // make sure simulate flag actually worked - no mappings applied CompressedXContent mappingAfterMerge = indexService.mapperService().documentMapper("type").mappingSource(); assertThat(mappingAfterMerge, equalTo(mappingBeforeMerge)); @@ -240,8 +236,7 @@ public class TTLMappingTests extends ESSingleNodeTestCase { indexService = createIndex("testindex", Settings.settingsBuilder().build(), "type", mappingWithoutTtl); mappingBeforeMerge = indexService.mapperService().documentMapper("type").mappingSource(); XContentBuilder mappingWithTtlEnabled = getMappingWithTtlEnabled(); - mergeResult = indexService.mapperService().documentMapper("type").merge(indexService.mapperService().parse("type", new CompressedXContent(mappingWithTtlEnabled.string()), true).mapping(), true, false); - assertFalse(mergeResult.hasConflicts()); + indexService.mapperService().documentMapper("type").merge(indexService.mapperService().parse("type", new CompressedXContent(mappingWithTtlEnabled.string()), true).mapping(), true, false); // make sure simulate flag actually worked - no mappings applied mappingAfterMerge = indexService.mapperService().documentMapper("type").mappingSource(); assertThat(mappingAfterMerge, equalTo(mappingBeforeMerge)); @@ -252,8 +247,7 @@ public class TTLMappingTests extends ESSingleNodeTestCase { indexService = createIndex("testindex", Settings.settingsBuilder().build(), "type", mappingWithoutTtl); mappingBeforeMerge = indexService.mapperService().documentMapper("type").mappingSource(); mappingWithTtlEnabled = getMappingWithTtlEnabled("7d"); - mergeResult = indexService.mapperService().documentMapper("type").merge(indexService.mapperService().parse("type", new CompressedXContent(mappingWithTtlEnabled.string()), true).mapping(), true, false); - assertFalse(mergeResult.hasConflicts()); + indexService.mapperService().documentMapper("type").merge(indexService.mapperService().parse("type", new CompressedXContent(mappingWithTtlEnabled.string()), true).mapping(), true, false); // make sure simulate flag actually worked - no mappings applied mappingAfterMerge = indexService.mapperService().documentMapper("type").mappingSource(); assertThat(mappingAfterMerge, equalTo(mappingBeforeMerge)); @@ -263,8 +257,7 @@ public class TTLMappingTests extends ESSingleNodeTestCase { mappingWithoutTtl = getMappingWithTtlDisabled("6d"); indexService = createIndex("testindex", Settings.settingsBuilder().build(), "type", mappingWithoutTtl); mappingWithTtlEnabled = getMappingWithTtlEnabled("7d"); - mergeResult = indexService.mapperService().documentMapper("type").merge(indexService.mapperService().parse("type", new CompressedXContent(mappingWithTtlEnabled.string()), true).mapping(), false, false); - assertFalse(mergeResult.hasConflicts()); + indexService.mapperService().documentMapper("type").merge(indexService.mapperService().parse("type", new CompressedXContent(mappingWithTtlEnabled.string()), true).mapping(), false, false); // make sure simulate flag actually worked - mappings applied mappingAfterMerge = indexService.mapperService().documentMapper("type").mappingSource(); assertThat(mappingAfterMerge, equalTo(new CompressedXContent("{\"type\":{\"_ttl\":{\"enabled\":true,\"default\":604800000},\"properties\":{\"field\":{\"type\":\"string\"}}}}"))); @@ -273,8 +266,7 @@ public class TTLMappingTests extends ESSingleNodeTestCase { // check if switching simulate flag off works if nothing was applied in the beginning indexService = createIndex("testindex", Settings.settingsBuilder().build(), "type"); mappingWithTtlEnabled = getMappingWithTtlEnabled("7d"); - mergeResult = indexService.mapperService().documentMapper("type").merge(indexService.mapperService().parse("type", new CompressedXContent(mappingWithTtlEnabled.string()), true).mapping(), false, false); - assertFalse(mergeResult.hasConflicts()); + indexService.mapperService().documentMapper("type").merge(indexService.mapperService().parse("type", new CompressedXContent(mappingWithTtlEnabled.string()), true).mapping(), false, false); // make sure simulate flag actually worked - mappings applied mappingAfterMerge = indexService.mapperService().documentMapper("type").mappingSource(); assertThat(mappingAfterMerge, equalTo(new CompressedXContent("{\"type\":{\"_ttl\":{\"enabled\":true,\"default\":604800000},\"properties\":{\"field\":{\"type\":\"string\"}}}}"))); diff --git a/core/src/test/java/org/elasticsearch/index/mapper/update/UpdateMappingTests.java b/core/src/test/java/org/elasticsearch/index/mapper/update/UpdateMappingTests.java index abf5f4819c..f73ad3e3b3 100644 --- a/core/src/test/java/org/elasticsearch/index/mapper/update/UpdateMappingTests.java +++ b/core/src/test/java/org/elasticsearch/index/mapper/update/UpdateMappingTests.java @@ -29,7 +29,6 @@ import org.elasticsearch.common.xcontent.XContentFactory; import org.elasticsearch.index.IndexService; import org.elasticsearch.index.mapper.DocumentMapper; import org.elasticsearch.index.mapper.MapperService; -import org.elasticsearch.index.mapper.MergeResult; import org.elasticsearch.index.mapper.core.LongFieldMapper; import org.elasticsearch.test.ESSingleNodeTestCase; @@ -77,9 +76,7 @@ public class UpdateMappingTests extends ESSingleNodeTestCase { private void testNoConflictWhileMergingAndMappingChanged(XContentBuilder mapping, XContentBuilder mappingUpdate, XContentBuilder expectedMapping) throws IOException { IndexService indexService = createIndex("test", Settings.settingsBuilder().build(), "type", mapping); // simulate like in MetaDataMappingService#putMapping - MergeResult mergeResult = indexService.mapperService().documentMapper("type").merge(indexService.mapperService().parse("type", new CompressedXContent(mappingUpdate.bytes()), true).mapping(), false, false); - // assure we have no conflicts - assertThat(mergeResult.buildConflicts().length, equalTo(0)); + indexService.mapperService().documentMapper("type").merge(indexService.mapperService().parse("type", new CompressedXContent(mappingUpdate.bytes()), true).mapping(), false, false); // make sure mappings applied CompressedXContent mappingAfterUpdate = indexService.mapperService().documentMapper("type").mappingSource(); assertThat(mappingAfterUpdate.toString(), equalTo(expectedMapping.string())); @@ -101,9 +98,12 @@ public class UpdateMappingTests extends ESSingleNodeTestCase { IndexService indexService = createIndex("test", Settings.settingsBuilder().build(), "type", mapping); CompressedXContent mappingBeforeUpdate = indexService.mapperService().documentMapper("type").mappingSource(); // simulate like in MetaDataMappingService#putMapping - MergeResult mergeResult = indexService.mapperService().documentMapper("type").merge(indexService.mapperService().parse("type", new CompressedXContent(mappingUpdate.bytes()), true).mapping(), true, false); - // assure we have conflicts - assertThat(mergeResult.buildConflicts().length, equalTo(1)); + try { + indexService.mapperService().documentMapper("type").merge(indexService.mapperService().parse("type", new CompressedXContent(mappingUpdate.bytes()), true).mapping(), true, false); + fail(); + } catch (IllegalArgumentException e) { + // expected + } // make sure simulate flag actually worked - no mappings applied CompressedXContent mappingAfterUpdate = indexService.mapperService().documentMapper("type").mappingSource(); assertThat(mappingAfterUpdate, equalTo(mappingBeforeUpdate)); @@ -202,6 +202,51 @@ public class UpdateMappingTests extends ESSingleNodeTestCase { assertNull(mapperService.documentMapper("type2").mapping().root().getMapper("foo")); } + public void testReuseMetaField() throws IOException { + XContentBuilder mapping = XContentFactory.jsonBuilder().startObject().startObject("type") + .startObject("properties").startObject("_id").field("type", "string").endObject() + .endObject().endObject().endObject(); + MapperService mapperService = createIndex("test", Settings.settingsBuilder().build()).mapperService(); + + try { + mapperService.merge("type", new CompressedXContent(mapping.string()), false, false); + fail(); + } catch (IllegalArgumentException e) { + assertTrue(e.getMessage().contains("Field [_id] is defined twice in [type]")); + } + + try { + mapperService.merge("type", new CompressedXContent(mapping.string()), false, false); + fail(); + } catch (IllegalArgumentException e) { + assertTrue(e.getMessage().contains("Field [_id] is defined twice in [type]")); + } + } + + public void testReuseMetaFieldBackCompat() throws IOException { + XContentBuilder mapping = XContentFactory.jsonBuilder().startObject().startObject("type") + .startObject("properties").startObject("_id").field("type", "string").endObject() + .endObject().endObject().endObject(); + // the logic is different for 2.x indices since they record some meta mappers (including _id) + // in the root object + Settings settings = Settings.builder().put(IndexMetaData.SETTING_VERSION_CREATED, Version.V_2_1_0).build(); + MapperService mapperService = createIndex("test", settings).mapperService(); + + try { + mapperService.merge("type", new CompressedXContent(mapping.string()), false, false); + fail(); + } catch (IllegalArgumentException e) { + assertTrue(e.getMessage().contains("Field [_id] is defined twice in [type]")); + } + + try { + mapperService.merge("type", new CompressedXContent(mapping.string()), false, false); + fail(); + } catch (IllegalArgumentException e) { + assertTrue(e.getMessage().contains("Field [_id] is defined twice in [type]")); + } + } + public void testIndexFieldParsingBackcompat() throws IOException { IndexService indexService = createIndex("test", Settings.settingsBuilder().put(IndexMetaData.SETTING_VERSION_CREATED, Version.V_1_4_2.id).build()); XContentBuilder indexMapping = XContentFactory.jsonBuilder(); diff --git a/core/src/test/java/org/elasticsearch/index/query/AbstractQueryTestCase.java b/core/src/test/java/org/elasticsearch/index/query/AbstractQueryTestCase.java index aa97d72273..2d250ff0b9 100644 --- a/core/src/test/java/org/elasticsearch/index/query/AbstractQueryTestCase.java +++ b/core/src/test/java/org/elasticsearch/index/query/AbstractQueryTestCase.java @@ -47,6 +47,7 @@ import org.elasticsearch.common.bytes.BytesArray; import org.elasticsearch.common.bytes.BytesReference; import org.elasticsearch.common.collect.Tuple; import org.elasticsearch.common.compress.CompressedXContent; +import org.elasticsearch.common.geo.builders.ShapeBuilderRegistry; import org.elasticsearch.common.inject.AbstractModule; import org.elasticsearch.common.inject.Injector; import org.elasticsearch.common.inject.ModulesBuilder; @@ -191,6 +192,7 @@ public abstract class AbstractQueryTestCase<QB extends AbstractQueryBuilder<QB>> // skip services bindQueryParsersExtension(); bindMapperExtension(); + bind(ShapeBuilderRegistry.class).asEagerSingleton(); } }, new ScriptModule(settings) { diff --git a/core/src/test/java/org/elasticsearch/index/query/GeoShapeQueryBuilderTests.java b/core/src/test/java/org/elasticsearch/index/query/GeoShapeQueryBuilderTests.java index 6b2088d25c..0720303940 100644 --- a/core/src/test/java/org/elasticsearch/index/query/GeoShapeQueryBuilderTests.java +++ b/core/src/test/java/org/elasticsearch/index/query/GeoShapeQueryBuilderTests.java @@ -59,14 +59,11 @@ public class GeoShapeQueryBuilderTests extends AbstractQueryTestCase<GeoShapeQue protected GeoShapeQueryBuilder doCreateTestQueryBuilder() { ShapeType shapeType = ShapeType.randomType(getRandom()); ShapeBuilder shape = RandomShapeGenerator.createShapeWithin(getRandom(), null, shapeType); + GeoShapeQueryBuilder builder; clearShapeFields(); if (randomBoolean()) { - try { - builder = new GeoShapeQueryBuilder(GEO_SHAPE_FIELD_NAME, shape); - } catch (IOException e) { - throw new RuntimeException(e); - } + builder = new GeoShapeQueryBuilder(GEO_SHAPE_FIELD_NAME, shape); } else { indexedShapeToReturn = shape; indexedShapeId = randomAsciiOfLengthBetween(3, 20); @@ -225,17 +222,17 @@ public class GeoShapeQueryBuilderTests extends AbstractQueryTestCase<GeoShapeQue public void testFromJson() throws IOException { String json = - "{\n" + - " \"geo_shape\" : {\n" + - " \"location\" : {\n" + - " \"shape\" : {\n" + - " \"type\" : \"envelope\",\n" + - " \"coordinates\" : [ [ 13.0, 53.0 ], [ 14.0, 52.0 ] ]\n" + - " },\n" + - " \"relation\" : \"intersects\"\n" + - " },\n" + - " \"boost\" : 42.0\n" + - " }\n" + + "{\n" + + " \"geo_shape\" : {\n" + + " \"location\" : {\n" + + " \"shape\" : {\n" + + " \"type\" : \"envelope\",\n" + + " \"coordinates\" : [ [ 13.0, 53.0 ], [ 14.0, 52.0 ] ]\n" + + " },\n" + + " \"relation\" : \"intersects\"\n" + + " },\n" + + " \"boost\" : 42.0\n" + + " }\n" + "}"; GeoShapeQueryBuilder parsed = (GeoShapeQueryBuilder) parseQuery(json); checkGeneratedJson(json, parsed); diff --git a/core/src/test/java/org/elasticsearch/index/query/MissingQueryBuilderTests.java b/core/src/test/java/org/elasticsearch/index/query/MissingQueryBuilderTests.java deleted file mode 100644 index 253509b2fc..0000000000 --- a/core/src/test/java/org/elasticsearch/index/query/MissingQueryBuilderTests.java +++ /dev/null @@ -1,107 +0,0 @@ -/* - * Licensed to Elasticsearch under one or more contributor - * license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright - * ownership. Elasticsearch licenses this file to you under - * the Apache License, Version 2.0 (the "License"); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -package org.elasticsearch.index.query; - -import org.apache.lucene.search.Query; - -import java.io.IOException; - -import static org.hamcrest.Matchers.containsString; - -public class MissingQueryBuilderTests extends AbstractQueryTestCase<MissingQueryBuilder> { - - @Override - protected MissingQueryBuilder doCreateTestQueryBuilder() { - String fieldName = randomBoolean() ? randomFrom(MAPPED_FIELD_NAMES) : randomAsciiOfLengthBetween(1, 10); - Boolean existence = randomBoolean(); - Boolean nullValue = randomBoolean(); - if (existence == false && nullValue == false) { - if (randomBoolean()) { - existence = true; - } else { - nullValue = true; - } - } - return new MissingQueryBuilder(fieldName, nullValue, existence); - } - - @Override - protected void doAssertLuceneQuery(MissingQueryBuilder queryBuilder, Query query, QueryShardContext context) throws IOException { - // too many mapping dependent cases to test, we don't want to end up - // duplication the toQuery method - } - - public void testIllegalArguments() { - try { - if (randomBoolean()) { - new MissingQueryBuilder("", true, true); - } else { - new MissingQueryBuilder(null, true, true); - } - fail("must not be null or empty"); - } catch (IllegalArgumentException e) { - // expected - } - - try { - new MissingQueryBuilder("fieldname", false, false); - fail("existence and nullValue cannot both be false"); - } catch (IllegalArgumentException e) { - // expected - } - - try { - new MissingQueryBuilder("fieldname", MissingQueryBuilder.DEFAULT_NULL_VALUE, false); - fail("existence and nullValue cannot both be false"); - } catch (IllegalArgumentException e) { - // expected - } - } - - public void testBothNullValueAndExistenceFalse() throws IOException { - QueryShardContext context = createShardContext(); - context.setAllowUnmappedFields(true); - try { - MissingQueryBuilder.newFilter(context, "field", false, false); - fail("Expected QueryShardException"); - } catch (QueryShardException e) { - assertThat(e.getMessage(), containsString("missing must have either existence, or null_value")); - } - } - - public void testFromJson() throws IOException { - String json = - "{\n" + - " \"missing\" : {\n" + - " \"field\" : \"user\",\n" + - " \"null_value\" : false,\n" + - " \"existence\" : true,\n" + - " \"boost\" : 1.0\n" + - " }\n" + - "}"; - - MissingQueryBuilder parsed = (MissingQueryBuilder) parseQuery(json); - checkGeneratedJson(json, parsed); - - assertEquals(json, false, parsed.nullValue()); - assertEquals(json, true, parsed.existence()); - assertEquals(json, "user", parsed.fieldPattern()); - } -} diff --git a/core/src/test/java/org/elasticsearch/index/query/QueryDSLDocumentationTests.java b/core/src/test/java/org/elasticsearch/index/query/QueryDSLDocumentationTests.java index 7ba1d11815..028987448f 100644 --- a/core/src/test/java/org/elasticsearch/index/query/QueryDSLDocumentationTests.java +++ b/core/src/test/java/org/elasticsearch/index/query/QueryDSLDocumentationTests.java @@ -58,7 +58,6 @@ import static org.elasticsearch.index.query.QueryBuilders.idsQuery; import static org.elasticsearch.index.query.QueryBuilders.indicesQuery; import static org.elasticsearch.index.query.QueryBuilders.matchAllQuery; import static org.elasticsearch.index.query.QueryBuilders.matchQuery; -import static org.elasticsearch.index.query.QueryBuilders.missingQuery; import static org.elasticsearch.index.query.QueryBuilders.moreLikeThisQuery; import static org.elasticsearch.index.query.QueryBuilders.multiMatchQuery; import static org.elasticsearch.index.query.QueryBuilders.nestedQuery; @@ -240,10 +239,6 @@ public class QueryDSLDocumentationTests extends ESTestCase { matchQuery("name", "kimchy elasticsearch"); } - public void testMissing() { - missingQuery("user", true, true); - } - public void testMLT() { String[] fields = {"name.first", "name.last"}; String[] texts = {"text like this one"}; diff --git a/core/src/test/java/org/elasticsearch/index/query/RangeQueryBuilderTests.java b/core/src/test/java/org/elasticsearch/index/query/RangeQueryBuilderTests.java index bf580e55f4..4df799e9f3 100644 --- a/core/src/test/java/org/elasticsearch/index/query/RangeQueryBuilderTests.java +++ b/core/src/test/java/org/elasticsearch/index/query/RangeQueryBuilderTests.java @@ -23,7 +23,9 @@ import org.apache.lucene.search.NumericRangeQuery; import org.apache.lucene.search.Query; import org.apache.lucene.search.TermRangeQuery; import org.elasticsearch.ElasticsearchParseException; +import org.elasticsearch.common.ParseFieldMatcher; import org.elasticsearch.common.lucene.BytesRefs; +import org.hamcrest.core.IsEqual; import org.joda.time.DateTime; import org.joda.time.DateTimeZone; @@ -353,4 +355,42 @@ public class RangeQueryBuilderTests extends AbstractQueryTestCase<RangeQueryBuil assertEquals(json, "2015-01-01 00:00:00", parsed.from()); assertEquals(json, "now", parsed.to()); } + + public void testNamedQueryParsing() throws IOException { + String json = + "{\n" + + " \"range\" : {\n" + + " \"timestamp\" : {\n" + + " \"from\" : \"2015-01-01 00:00:00\",\n" + + " \"to\" : \"now\",\n" + + " \"boost\" : 1.0,\n" + + " \"_name\" : \"my_range\"\n" + + " }\n" + + " }\n" + + "}"; + assertNotNull(parseQuery(json)); + + json = + "{\n" + + " \"range\" : {\n" + + " \"timestamp\" : {\n" + + " \"from\" : \"2015-01-01 00:00:00\",\n" + + " \"to\" : \"now\",\n" + + " \"boost\" : 1.0\n" + + " },\n" + + " \"_name\" : \"my_range\"\n" + + " }\n" + + "}"; + + // non strict parsing should accept "_name" on top level + assertNotNull(parseQuery(json, ParseFieldMatcher.EMPTY)); + + // with strict parsing, ParseField will throw exception + try { + parseQuery(json, ParseFieldMatcher.STRICT); + fail("Strict parsing should trigger exception for '_name' on top level"); + } catch (IllegalArgumentException e) { + assertThat(e.getMessage(), equalTo("Deprecated field [_name] used, replaced by [query name is not supported in short version of range query]")); + } + } } diff --git a/core/src/test/java/org/elasticsearch/index/shard/IndexShardTests.java b/core/src/test/java/org/elasticsearch/index/shard/IndexShardTests.java index b5f33afa94..8dce7f1f95 100644 --- a/core/src/test/java/org/elasticsearch/index/shard/IndexShardTests.java +++ b/core/src/test/java/org/elasticsearch/index/shard/IndexShardTests.java @@ -133,7 +133,7 @@ public class IndexShardTests extends ESSingleNodeTestCase { ShardId id = new ShardId("foo", 1); long version = between(1, Integer.MAX_VALUE / 2); boolean primary = randomBoolean(); - AllocationId allocationId = randomAllocationId(); + AllocationId allocationId = randomBoolean() ? null : randomAllocationId(); ShardStateMetaData state1 = new ShardStateMetaData(version, primary, "foo", allocationId); write(state1, env.availableShardPaths(id)); ShardStateMetaData shardStateMetaData = load(logger, env.availableShardPaths(id)); @@ -288,7 +288,8 @@ public class IndexShardTests extends ESSingleNodeTestCase { } public void testShardStateMetaHashCodeEquals() { - ShardStateMetaData meta = new ShardStateMetaData(randomLong(), randomBoolean(), randomRealisticUnicodeOfCodepointLengthBetween(1, 10), randomAllocationId()); + AllocationId allocationId = randomBoolean() ? null : randomAllocationId(); + ShardStateMetaData meta = new ShardStateMetaData(randomLong(), randomBoolean(), randomRealisticUnicodeOfCodepointLengthBetween(1, 10), allocationId); assertEquals(meta, new ShardStateMetaData(meta.version, meta.primary, meta.indexUUID, meta.allocationId)); assertEquals(meta.hashCode(), new ShardStateMetaData(meta.version, meta.primary, meta.indexUUID, meta.allocationId).hashCode()); @@ -299,7 +300,8 @@ public class IndexShardTests extends ESSingleNodeTestCase { assertFalse(meta.equals(new ShardStateMetaData(meta.version, !meta.primary, meta.indexUUID + "foo", randomAllocationId()))); Set<Integer> hashCodes = new HashSet<>(); for (int i = 0; i < 30; i++) { // just a sanity check that we impl hashcode - meta = new ShardStateMetaData(randomLong(), randomBoolean(), randomRealisticUnicodeOfCodepointLengthBetween(1, 10), randomAllocationId()); + allocationId = randomBoolean() ? null : randomAllocationId(); + meta = new ShardStateMetaData(randomLong(), randomBoolean(), randomRealisticUnicodeOfCodepointLengthBetween(1, 10), allocationId); hashCodes.add(meta.hashCode()); } assertTrue("more than one unique hashcode expected but got: " + hashCodes.size(), hashCodes.size() > 1); diff --git a/core/src/test/java/org/elasticsearch/index/store/CorruptedFileIT.java b/core/src/test/java/org/elasticsearch/index/store/CorruptedFileIT.java index 4858e0a6e3..5a1aa2ef46 100644 --- a/core/src/test/java/org/elasticsearch/index/store/CorruptedFileIT.java +++ b/core/src/test/java/org/elasticsearch/index/store/CorruptedFileIT.java @@ -19,26 +19,21 @@ package org.elasticsearch.index.store; import com.carrotsearch.randomizedtesting.generators.RandomPicks; - import org.apache.lucene.index.CheckIndex; import org.apache.lucene.index.IndexFileNames; import org.elasticsearch.action.admin.cluster.health.ClusterHealthResponse; -import org.elasticsearch.cluster.health.ClusterHealthStatus; import org.elasticsearch.action.admin.cluster.node.stats.NodeStats; import org.elasticsearch.action.admin.cluster.node.stats.NodesStatsResponse; import org.elasticsearch.action.admin.cluster.snapshots.create.CreateSnapshotResponse; import org.elasticsearch.action.admin.cluster.state.ClusterStateResponse; -import org.elasticsearch.action.search.SearchResponse; import org.elasticsearch.action.index.IndexRequestBuilder; +import org.elasticsearch.action.search.SearchResponse; import org.elasticsearch.client.Requests; import org.elasticsearch.cluster.ClusterState; +import org.elasticsearch.cluster.health.ClusterHealthStatus; import org.elasticsearch.cluster.metadata.IndexMetaData; import org.elasticsearch.cluster.node.DiscoveryNode; -import org.elasticsearch.cluster.routing.GroupShardsIterator; -import org.elasticsearch.cluster.routing.IndexShardRoutingTable; -import org.elasticsearch.cluster.routing.ShardIterator; -import org.elasticsearch.cluster.routing.ShardRouting; -import org.elasticsearch.cluster.routing.ShardRoutingState; +import org.elasticsearch.cluster.routing.*; import org.elasticsearch.cluster.routing.allocation.decider.EnableAllocationDecider; import org.elasticsearch.cluster.routing.allocation.decider.ThrottlingAllocationDecider; import org.elasticsearch.common.Nullable; @@ -48,7 +43,6 @@ import org.elasticsearch.common.io.stream.BytesStreamOutput; import org.elasticsearch.common.lucene.Lucene; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.unit.ByteSizeUnit; -import org.elasticsearch.discovery.Discovery; import org.elasticsearch.gateway.PrimaryShardAllocator; import org.elasticsearch.index.shard.*; import org.elasticsearch.indices.recovery.RecoveryFileChunkRequest; @@ -75,14 +69,7 @@ import java.nio.charset.StandardCharsets; import java.nio.file.DirectoryStream; import java.nio.file.Files; import java.nio.file.Path; -import java.util.ArrayList; -import java.util.Collection; -import java.util.Collections; -import java.util.HashMap; -import java.util.List; -import java.util.Map; -import java.util.Set; -import java.util.TreeSet; +import java.util.*; import java.util.concurrent.CopyOnWriteArrayList; import java.util.concurrent.CountDownLatch; import java.util.concurrent.ExecutionException; @@ -91,16 +78,8 @@ import java.util.concurrent.atomic.AtomicBoolean; import static org.elasticsearch.common.settings.Settings.settingsBuilder; import static org.elasticsearch.common.util.CollectionUtils.iterableAsArrayList; -import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertAcked; -import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertAllSuccessful; -import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertHitCount; -import static org.hamcrest.Matchers.anyOf; -import static org.hamcrest.Matchers.empty; -import static org.hamcrest.Matchers.equalTo; -import static org.hamcrest.Matchers.greaterThanOrEqualTo; -import static org.hamcrest.Matchers.is; -import static org.hamcrest.Matchers.not; -import static org.hamcrest.Matchers.notNullValue; +import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.*; +import static org.hamcrest.Matchers.*; @ESIntegTestCase.ClusterScope(scope = ESIntegTestCase.Scope.SUITE) public class CorruptedFileIT extends ESIntegTestCase { @@ -111,9 +90,9 @@ public class CorruptedFileIT extends ESIntegTestCase { // and we need to make sure primaries are not just trashed if we don't have replicas .put(super.nodeSettings(nodeOrdinal)) // speed up recoveries - .put(RecoverySettings.INDICES_RECOVERY_CONCURRENT_STREAMS, 10) - .put(RecoverySettings.INDICES_RECOVERY_CONCURRENT_SMALL_FILE_STREAMS, 10) - .put(ThrottlingAllocationDecider.CLUSTER_ROUTING_ALLOCATION_NODE_CONCURRENT_RECOVERIES, 5) + .put(RecoverySettings.INDICES_RECOVERY_CONCURRENT_STREAMS_SETTING.getKey(), 10) + .put(RecoverySettings.INDICES_RECOVERY_CONCURRENT_SMALL_FILE_STREAMS_SETTING.getKey(), 10) + .put(ThrottlingAllocationDecider.CLUSTER_ROUTING_ALLOCATION_NODE_CONCURRENT_RECOVERIES_SETTING.getKey(), 5) .build(); } @@ -320,7 +299,7 @@ public class CorruptedFileIT extends ESIntegTestCase { } assertThat(dataNodeStats.size(), greaterThanOrEqualTo(2)); - Collections.shuffle(dataNodeStats, getRandom()); + Collections.shuffle(dataNodeStats, random()); NodeStats primariesNode = dataNodeStats.get(0); NodeStats unluckyNode = dataNodeStats.get(1); assertAcked(prepareCreate("test").setSettings(Settings.builder() @@ -380,7 +359,7 @@ public class CorruptedFileIT extends ESIntegTestCase { } assertThat(dataNodeStats.size(), greaterThanOrEqualTo(2)); - Collections.shuffle(dataNodeStats, getRandom()); + Collections.shuffle(dataNodeStats, random()); NodeStats primariesNode = dataNodeStats.get(0); NodeStats unluckyNode = dataNodeStats.get(1); diff --git a/core/src/test/java/org/elasticsearch/index/translog/BufferedTranslogTests.java b/core/src/test/java/org/elasticsearch/index/translog/BufferedTranslogTests.java index aab980e975..a29cc6cf8d 100644 --- a/core/src/test/java/org/elasticsearch/index/translog/BufferedTranslogTests.java +++ b/core/src/test/java/org/elasticsearch/index/translog/BufferedTranslogTests.java @@ -34,13 +34,12 @@ import java.nio.file.Path; public class BufferedTranslogTests extends TranslogTests { @Override - protected Translog create(Path path) throws IOException { + protected TranslogConfig getTranslogConfig(Path path) { Settings build = Settings.settingsBuilder() .put("index.translog.fs.type", TranslogWriter.Type.BUFFERED.name()) .put("index.translog.fs.buffer_size", 10 + randomInt(128 * 1024), ByteSizeUnit.BYTES) .put(IndexMetaData.SETTING_VERSION_CREATED, org.elasticsearch.Version.CURRENT) .build(); - TranslogConfig translogConfig = new TranslogConfig(shardId, path, IndexSettingsModule.newIndexSettings(shardId.index(), build), Translog.Durabilty.REQUEST, BigArrays.NON_RECYCLING_INSTANCE, null); - return new Translog(translogConfig); + return new TranslogConfig(shardId, path, IndexSettingsModule.newIndexSettings(shardId.index(), build), Translog.Durabilty.REQUEST, BigArrays.NON_RECYCLING_INSTANCE, null); } } diff --git a/core/src/test/java/org/elasticsearch/index/translog/TranslogTests.java b/core/src/test/java/org/elasticsearch/index/translog/TranslogTests.java index 26faa02a17..8b3294c15b 100644 --- a/core/src/test/java/org/elasticsearch/index/translog/TranslogTests.java +++ b/core/src/test/java/org/elasticsearch/index/translog/TranslogTests.java @@ -22,9 +22,12 @@ package org.elasticsearch.index.translog; import com.carrotsearch.randomizedtesting.generators.RandomPicks; import org.apache.lucene.codecs.CodecUtil; import org.apache.lucene.index.Term; +import org.apache.lucene.mockfile.FilterFileChannel; import org.apache.lucene.store.AlreadyClosedException; import org.apache.lucene.store.ByteArrayDataOutput; +import org.apache.lucene.store.MockDirectoryWrapper; import org.apache.lucene.util.IOUtils; +import org.apache.lucene.util.LineFileDocs; import org.apache.lucene.util.LuceneTestCase; import org.elasticsearch.ElasticsearchException; import org.elasticsearch.cluster.metadata.IndexMetaData; @@ -110,16 +113,19 @@ public class TranslogTests extends ESTestCase { } } - protected Translog create(Path path) throws IOException { + private Translog create(Path path) throws IOException { + return new Translog(getTranslogConfig(path)); + } + + protected TranslogConfig getTranslogConfig(Path path) { Settings build = Settings.settingsBuilder() .put(TranslogConfig.INDEX_TRANSLOG_FS_TYPE, TranslogWriter.Type.SIMPLE.name()) .put(IndexMetaData.SETTING_VERSION_CREATED, org.elasticsearch.Version.CURRENT) .build(); - TranslogConfig translogConfig = new TranslogConfig(shardId, path, IndexSettingsModule.newIndexSettings(shardId.index(), build), Translog.Durabilty.REQUEST, BigArrays.NON_RECYCLING_INSTANCE, null); - return new Translog(translogConfig); + return new TranslogConfig(shardId, path, IndexSettingsModule.newIndexSettings(shardId.index(), build), Translog.Durabilty.REQUEST, BigArrays.NON_RECYCLING_INSTANCE, null); } - protected void addToTranslogAndList(Translog translog, ArrayList<Translog.Operation> list, Translog.Operation op) { + protected void addToTranslogAndList(Translog translog, ArrayList<Translog.Operation> list, Translog.Operation op) throws IOException { list.add(op); translog.add(op); } @@ -330,7 +336,7 @@ public class TranslogTests extends ESTestCase { } } - public void testSnapshot() { + public void testSnapshot() throws IOException { ArrayList<Translog.Operation> ops = new ArrayList<>(); Translog.Snapshot snapshot = translog.newSnapshot(); assertThat(snapshot, SnapshotMatchers.size(0)); @@ -389,7 +395,7 @@ public class TranslogTests extends ESTestCase { Translog.Snapshot snapshot = translog.newSnapshot(); fail("translog is closed"); } catch (AlreadyClosedException ex) { - assertThat(ex.getMessage(), containsString("translog-1.tlog is already closed can't increment")); + assertEquals(ex.getMessage(), "translog is already closed"); } } @@ -634,7 +640,7 @@ public class TranslogTests extends ESTestCase { final String threadId = "writer_" + i; writers[i] = new Thread(new AbstractRunnable() { @Override - public void doRun() throws BrokenBarrierException, InterruptedException { + public void doRun() throws BrokenBarrierException, InterruptedException, IOException { barrier.await(); int counter = 0; while (run.get()) { @@ -1237,11 +1243,11 @@ public class TranslogTests extends ESTestCase { private final CountDownLatch downLatch; private final int opsPerThread; private final int threadId; - private final BlockingQueue<LocationOperation> writtenOperations; + private final Collection<LocationOperation> writtenOperations; private final Throwable[] threadExceptions; private final Translog translog; - public TranslogThread(Translog translog, CountDownLatch downLatch, int opsPerThread, int threadId, BlockingQueue<LocationOperation> writtenOperations, Throwable[] threadExceptions) { + public TranslogThread(Translog translog, CountDownLatch downLatch, int opsPerThread, int threadId, Collection<LocationOperation> writtenOperations, Throwable[] threadExceptions) { this.translog = translog; this.downLatch = downLatch; this.opsPerThread = opsPerThread; @@ -1271,12 +1277,302 @@ public class TranslogTests extends ESTestCase { throw new ElasticsearchException("not supported op type"); } - Translog.Location loc = translog.add(op); + Translog.Location loc = add(op); writtenOperations.add(new LocationOperation(op, loc)); + afterAdd(); } } catch (Throwable t) { threadExceptions[threadId] = t; } } + + protected Translog.Location add(Translog.Operation op) throws IOException { + return translog.add(op); + } + + protected void afterAdd() throws IOException {} + } + + public void testFailFlush() throws IOException { + Path tempDir = createTempDir(); + final AtomicBoolean fail = new AtomicBoolean(); + TranslogConfig config = getTranslogConfig(tempDir); + Translog translog = getFailableTranslog(fail, config); + + List<Translog.Location> locations = new ArrayList<>(); + int opsSynced = 0; + boolean failed = false; + while(failed == false) { + try { + locations.add(translog.add(new Translog.Index("test", "" + opsSynced, Integer.toString(opsSynced).getBytes(Charset.forName("UTF-8"))))); + translog.sync(); + opsSynced++; + } catch (MockDirectoryWrapper.FakeIOException ex) { + failed = true; + assertFalse(translog.isOpen()); + } catch (IOException ex) { + failed = true; + assertFalse(translog.isOpen()); + assertEquals("__FAKE__ no space left on device", ex.getMessage()); + } + fail.set(randomBoolean()); + } + fail.set(false); + if (randomBoolean()) { + try { + locations.add(translog.add(new Translog.Index("test", "" + opsSynced, Integer.toString(opsSynced).getBytes(Charset.forName("UTF-8"))))); + fail("we are already closed"); + } catch (AlreadyClosedException ex) { + assertNotNull(ex.getCause()); + if (ex.getCause() instanceof MockDirectoryWrapper.FakeIOException) { + assertNull(ex.getCause().getMessage()); + } else { + assertEquals(ex.getCause().getMessage(), "__FAKE__ no space left on device"); + } + } + + } + Translog.TranslogGeneration translogGeneration = translog.getGeneration(); + try { + translog.newSnapshot(); + fail("already closed"); + } catch (AlreadyClosedException ex) { + // all is well + assertNotNull(ex.getCause()); + assertSame(translog.getTragicException(), ex.getCause()); + } + + try { + translog.commit(); + fail("already closed"); + } catch (AlreadyClosedException ex) { + assertNotNull(ex.getCause()); + assertSame(translog.getTragicException(), ex.getCause()); + } + + assertFalse(translog.isOpen()); + translog.close(); // we are closed + config.setTranslogGeneration(translogGeneration); + try (Translog tlog = new Translog(config)){ + assertEquals("lastCommitted must be 1 less than current", translogGeneration.translogFileGeneration + 1, tlog.currentFileGeneration()); + assertFalse(tlog.syncNeeded()); + + try (Translog.Snapshot snapshot = tlog.newSnapshot()) { + assertEquals(opsSynced, snapshot.estimatedTotalOperations()); + for (int i = 0; i < opsSynced; i++) { + assertEquals("expected operation" + i + " to be in the previous translog but wasn't", tlog.currentFileGeneration() - 1, locations.get(i).generation); + Translog.Operation next = snapshot.next(); + assertNotNull("operation " + i + " must be non-null", next); + assertEquals(i, Integer.parseInt(next.getSource().source.toUtf8())); + } + } + } + } + + public void testTranslogOpsCountIsCorrect() throws IOException { + List<Translog.Location> locations = new ArrayList<>(); + int numOps = randomIntBetween(100, 200); + LineFileDocs lineFileDocs = new LineFileDocs(random()); // writes pretty big docs so we cross buffer boarders regularly + for (int opsAdded = 0; opsAdded < numOps; opsAdded++) { + locations.add(translog.add(new Translog.Index("test", "" + opsAdded, lineFileDocs.nextDoc().toString().getBytes(Charset.forName("UTF-8"))))); + try (Translog.Snapshot snapshot = translog.newSnapshot()) { + assertEquals(opsAdded+1, snapshot.estimatedTotalOperations()); + for (int i = 0; i < opsAdded; i++) { + assertEquals("expected operation" + i + " to be in the current translog but wasn't", translog.currentFileGeneration(), locations.get(i).generation); + Translog.Operation next = snapshot.next(); + assertNotNull("operation " + i + " must be non-null", next); + } + } + } + } + + public void testTragicEventCanBeAnyException() throws IOException { + Path tempDir = createTempDir(); + final AtomicBoolean fail = new AtomicBoolean(); + TranslogConfig config = getTranslogConfig(tempDir); + assumeFalse("this won't work if we sync on any op",config.isSyncOnEachOperation()); + Translog translog = getFailableTranslog(fail, config, false, true); + LineFileDocs lineFileDocs = new LineFileDocs(random()); // writes pretty big docs so we cross buffer boarders regularly + translog.add(new Translog.Index("test", "1", lineFileDocs.nextDoc().toString().getBytes(Charset.forName("UTF-8")))); + fail.set(true); + try { + Translog.Location location = translog.add(new Translog.Index("test", "2", lineFileDocs.nextDoc().toString().getBytes(Charset.forName("UTF-8")))); + if (config.getType() == TranslogWriter.Type.BUFFERED) { // the buffered case will fail on the add if we exceed the buffer or will fail on the flush once we sync + if (randomBoolean()) { + translog.ensureSynced(location); + } else { + translog.sync(); + } + } + //TODO once we have a mock FS that can simulate we can also fail on plain sync + fail("WTF"); + } catch (UnknownException ex) { + // w00t + } catch (TranslogException ex) { + assertTrue(ex.getCause() instanceof UnknownException); + } + assertFalse(translog.isOpen()); + assertTrue(translog.getTragicException() instanceof UnknownException); + } + + public void testFatalIOExceptionsWhileWritingConcurrently() throws IOException, InterruptedException { + Path tempDir = createTempDir(); + final AtomicBoolean fail = new AtomicBoolean(false); + + TranslogConfig config = getTranslogConfig(tempDir); + Translog translog = getFailableTranslog(fail, config); + + final int threadCount = randomIntBetween(1, 5); + Thread[] threads = new Thread[threadCount]; + final Throwable[] threadExceptions = new Throwable[threadCount]; + final CountDownLatch downLatch = new CountDownLatch(1); + final CountDownLatch added = new CountDownLatch(randomIntBetween(10, 100)); + List<LocationOperation> writtenOperations = Collections.synchronizedList(new ArrayList<>()); + for (int i = 0; i < threadCount; i++) { + final int threadId = i; + threads[i] = new TranslogThread(translog, downLatch, 200, threadId, writtenOperations, threadExceptions) { + @Override + protected Translog.Location add(Translog.Operation op) throws IOException { + Translog.Location add = super.add(op); + added.countDown(); + return add; + } + + @Override + protected void afterAdd() throws IOException { + if (randomBoolean()) { + translog.sync(); + } + } + }; + threads[i].setDaemon(true); + threads[i].start(); + } + downLatch.countDown(); + added.await(); + try (Translog.View view = translog.newView()) { + // this holds a reference to the current tlog channel such that it's not closed + // if we hit a tragic event. this is important to ensure that asserts inside the Translog#add doesn't trip + // otherwise our assertions here are off by one sometimes. + fail.set(true); + for (int i = 0; i < threadCount; i++) { + threads[i].join(); + } + boolean atLeastOneFailed = false; + for (Throwable ex : threadExceptions) { + assertTrue(ex.toString(), ex instanceof IOException || ex instanceof AlreadyClosedException); + if (ex != null) { + atLeastOneFailed = true; + } + } + if (atLeastOneFailed == false) { + try { + boolean syncNeeded = translog.syncNeeded(); + translog.close(); + assertFalse("should have failed if sync was needed", syncNeeded); + } catch (IOException ex) { + // boom now we failed + } + } + Collections.sort(writtenOperations, (a, b) -> a.location.compareTo(b.location)); + assertFalse(translog.isOpen()); + final Checkpoint checkpoint = Checkpoint.read(config.getTranslogPath().resolve(Translog.CHECKPOINT_FILE_NAME)); + Iterator<LocationOperation> iterator = writtenOperations.iterator(); + while (iterator.hasNext()) { + LocationOperation next = iterator.next(); + if (checkpoint.offset < (next.location.translogLocation + next.location.size)) { + // drop all that haven't been synced + iterator.remove(); + } + } + config.setTranslogGeneration(translog.getGeneration()); + try (Translog tlog = new Translog(config)) { + try (Translog.Snapshot snapshot = tlog.newSnapshot()) { + if (writtenOperations.size() != snapshot.estimatedTotalOperations()) { + for (int i = 0; i < threadCount; i++) { + if (threadExceptions[i] != null) + threadExceptions[i].printStackTrace(); + } + } + assertEquals(writtenOperations.size(), snapshot.estimatedTotalOperations()); + for (int i = 0; i < writtenOperations.size(); i++) { + assertEquals("expected operation" + i + " to be in the previous translog but wasn't", tlog.currentFileGeneration() - 1, writtenOperations.get(i).location.generation); + Translog.Operation next = snapshot.next(); + assertNotNull("operation " + i + " must be non-null", next); + assertEquals(next, writtenOperations.get(i).operation); + } + } + } + } + } + private Translog getFailableTranslog(final AtomicBoolean fail, final TranslogConfig config) throws IOException { + return getFailableTranslog(fail, config, randomBoolean(), false); + } + + private Translog getFailableTranslog(final AtomicBoolean fail, final TranslogConfig config, final boolean paritalWrites, final boolean throwUnknownException) throws IOException { + return new Translog(config) { + @Override + TranslogWriter.ChannelFactory getChannelFactory() { + final TranslogWriter.ChannelFactory factory = super.getChannelFactory(); + + return new TranslogWriter.ChannelFactory() { + @Override + public FileChannel open(Path file) throws IOException { + FileChannel channel = factory.open(file); + return new ThrowingFileChannel(fail, paritalWrites, throwUnknownException, channel); + } + }; + } + }; + } + + public static class ThrowingFileChannel extends FilterFileChannel { + private final AtomicBoolean fail; + private final boolean partialWrite; + private final boolean throwUnknownException; + + public ThrowingFileChannel(AtomicBoolean fail, boolean partialWrite, boolean throwUnknownException, FileChannel delegate) { + super(delegate); + this.fail = fail; + this.partialWrite = partialWrite; + this.throwUnknownException = throwUnknownException; + } + + @Override + public long write(ByteBuffer[] srcs, int offset, int length) throws IOException { + throw new UnsupportedOperationException(); + } + + @Override + public int write(ByteBuffer src, long position) throws IOException { + throw new UnsupportedOperationException(); + } + + + public int write(ByteBuffer src) throws IOException { + if (fail.get()) { + if (partialWrite) { + if (src.hasRemaining()) { + final int pos = src.position(); + final int limit = src.limit(); + src.limit(randomIntBetween(pos, limit)); + super.write(src); + src.limit(limit); + src.position(pos); + throw new IOException("__FAKE__ no space left on device"); + } + } + if (throwUnknownException) { + throw new UnknownException(); + } else { + throw new MockDirectoryWrapper.FakeIOException(); + } + } + return super.write(src); + } + } + + private static final class UnknownException extends RuntimeException { + } } diff --git a/core/src/test/java/org/elasticsearch/indices/IndicesLifecycleListenerIT.java b/core/src/test/java/org/elasticsearch/indices/IndicesLifecycleListenerIT.java index 8de3af2582..4f6aaf2570 100644 --- a/core/src/test/java/org/elasticsearch/indices/IndicesLifecycleListenerIT.java +++ b/core/src/test/java/org/elasticsearch/indices/IndicesLifecycleListenerIT.java @@ -170,14 +170,14 @@ public class IndicesLifecycleListenerIT extends ESIntegTestCase { //add a node: 3 out of the 6 shards will be relocated to it //disable allocation before starting a new node, as we need to register the listener first assertAcked(client().admin().cluster().prepareUpdateSettings() - .setPersistentSettings(builder().put(EnableAllocationDecider.CLUSTER_ROUTING_ALLOCATION_ENABLE, "none"))); + .setPersistentSettings(builder().put(EnableAllocationDecider.CLUSTER_ROUTING_ALLOCATION_ENABLE_SETTING.getKey(), "none"))); String node2 = internalCluster().startNode(); IndexShardStateChangeListener stateChangeListenerNode2 = new IndexShardStateChangeListener(); //add a listener that keeps track of the shard state changes internalCluster().getInstance(MockIndexEventListener.TestEventListener.class, node2).setNewDelegate(stateChangeListenerNode2); //re-enable allocation assertAcked(client().admin().cluster().prepareUpdateSettings() - .setPersistentSettings(builder().put(EnableAllocationDecider.CLUSTER_ROUTING_ALLOCATION_ENABLE, "all"))); + .setPersistentSettings(builder().put(EnableAllocationDecider.CLUSTER_ROUTING_ALLOCATION_ENABLE_SETTING.getKey(), "all"))); ensureGreen(); //the 3 relocated shards get closed on the first node diff --git a/core/src/test/java/org/elasticsearch/indices/flush/FlushIT.java b/core/src/test/java/org/elasticsearch/indices/flush/FlushIT.java index aa8c9f18c0..9ff0df4d39 100644 --- a/core/src/test/java/org/elasticsearch/indices/flush/FlushIT.java +++ b/core/src/test/java/org/elasticsearch/indices/flush/FlushIT.java @@ -22,6 +22,7 @@ import org.elasticsearch.action.ActionListener; import org.elasticsearch.action.admin.indices.flush.FlushResponse; import org.elasticsearch.action.admin.indices.stats.IndexStats; import org.elasticsearch.action.admin.indices.stats.ShardStats; +import org.elasticsearch.action.admin.indices.flush.SyncedFlushResponse; import org.elasticsearch.cluster.ClusterState; import org.elasticsearch.cluster.metadata.IndexMetaData; import org.elasticsearch.cluster.routing.ShardRouting; @@ -99,7 +100,7 @@ public class FlushIT extends ESIntegTestCase { result = SyncedFlushUtil.attemptSyncedFlush(internalCluster(), new ShardId("test", 0)); } else { logger.info("--> sync flushing index [test]"); - IndicesSyncedFlushResult indicesResult = SyncedFlushUtil.attemptSyncedFlush(internalCluster(), "test"); + SyncedFlushResponse indicesResult = client().admin().indices().prepareSyncedFlush("test").get(); result = indicesResult.getShardsResultPerIndex().get("test").get(0); } assertFalse(result.failed()); @@ -171,7 +172,7 @@ public class FlushIT extends ESIntegTestCase { assertNull(shardStats.getCommitStats().getUserData().get(Engine.SYNC_COMMIT_ID)); } logger.info("--> trying sync flush"); - IndicesSyncedFlushResult syncedFlushResult = SyncedFlushUtil.attemptSyncedFlush(internalCluster(), "test"); + SyncedFlushResponse syncedFlushResult = client().admin().indices().prepareSyncedFlush("test").get(); logger.info("--> sync flush done"); stop.set(true); indexingThread.join(); @@ -191,7 +192,7 @@ public class FlushIT extends ESIntegTestCase { for (final ShardStats shardStats : shardsStats) { for (final ShardsSyncedFlushResult shardResult : syncedFlushResults) { if (shardStats.getShardRouting().getId() == shardResult.shardId().getId()) { - for (Map.Entry<ShardRouting, SyncedFlushService.SyncedFlushResponse> singleResponse : shardResult.shardResponses().entrySet()) { + for (Map.Entry<ShardRouting, SyncedFlushService.ShardSyncedFlushResponse> singleResponse : shardResult.shardResponses().entrySet()) { if (singleResponse.getKey().currentNodeId().equals(shardStats.getShardRouting().currentNodeId())) { if (singleResponse.getValue().success()) { logger.info("{} sync flushed on node {}", singleResponse.getKey().shardId(), singleResponse.getKey().currentNodeId()); @@ -212,7 +213,7 @@ public class FlushIT extends ESIntegTestCase { prepareCreate("test").setSettings(Settings.builder().put("index.routing.allocation.include._name", "nonexistent")).get(); // this should not hang but instead immediately return with empty result set - List<ShardsSyncedFlushResult> shardsResult = SyncedFlushUtil.attemptSyncedFlush(internalCluster(), "test").getShardsResultPerIndex().get("test"); + List<ShardsSyncedFlushResult> shardsResult = client().admin().indices().prepareSyncedFlush("test").get().getShardsResultPerIndex().get("test"); // just to make sure the test actually tests the right thing int numShards = client().admin().indices().prepareGetSettings("test").get().getIndexToSettings().get("test").getAsInt(IndexMetaData.SETTING_NUMBER_OF_SHARDS, -1); assertThat(shardsResult.size(), equalTo(numShards)); diff --git a/core/src/test/java/org/elasticsearch/indices/flush/SyncedFlushSingleNodeTests.java b/core/src/test/java/org/elasticsearch/indices/flush/SyncedFlushSingleNodeTests.java index 1a4bf8fd3f..e4c9cb8a7e 100644 --- a/core/src/test/java/org/elasticsearch/indices/flush/SyncedFlushSingleNodeTests.java +++ b/core/src/test/java/org/elasticsearch/indices/flush/SyncedFlushSingleNodeTests.java @@ -98,7 +98,7 @@ public class SyncedFlushSingleNodeTests extends ESSingleNodeTestCase { assertNotNull(syncedFlushResult); assertEquals(1, syncedFlushResult.successfulShards()); assertEquals(1, syncedFlushResult.totalShards()); - SyncedFlushService.SyncedFlushResponse response = syncedFlushResult.shardResponses().values().iterator().next(); + SyncedFlushService.ShardSyncedFlushResponse response = syncedFlushResult.shardResponses().values().iterator().next(); assertTrue(response.success()); } @@ -157,7 +157,7 @@ public class SyncedFlushSingleNodeTests extends ESSingleNodeTestCase { assertNull(listener.result); assertEquals("no such index", listener.error.getMessage()); } - + public void testFailAfterIntermediateCommit() throws InterruptedException { createIndex("test"); client().prepareIndex("test", "test", "1").setSource("{}").get(); diff --git a/core/src/test/java/org/elasticsearch/indices/flush/SyncedFlushUtil.java b/core/src/test/java/org/elasticsearch/indices/flush/SyncedFlushUtil.java index fef6c23231..fdabaf6b5a 100644 --- a/core/src/test/java/org/elasticsearch/indices/flush/SyncedFlushUtil.java +++ b/core/src/test/java/org/elasticsearch/indices/flush/SyncedFlushUtil.java @@ -20,6 +20,7 @@ package org.elasticsearch.indices.flush; import org.elasticsearch.ExceptionsHelper; import org.elasticsearch.action.ActionListener; +import org.elasticsearch.action.admin.indices.flush.SyncedFlushResponse; import org.elasticsearch.action.support.IndicesOptions; import org.elasticsearch.cluster.ClusterState; import org.elasticsearch.cluster.routing.ShardRouting; @@ -31,6 +32,9 @@ import java.util.List; import java.util.Map; import java.util.concurrent.CountDownLatch; +import static org.elasticsearch.test.ESIntegTestCase.client; +import static org.elasticsearch.test.ESTestCase.randomBoolean; + /** Utils for SyncedFlush */ public class SyncedFlushUtil { @@ -39,25 +43,6 @@ public class SyncedFlushUtil { } /** - * Blocking single index version of {@link SyncedFlushService#attemptSyncedFlush(String[], IndicesOptions, ActionListener)} - */ - public static IndicesSyncedFlushResult attemptSyncedFlush(InternalTestCluster cluster, String index) { - SyncedFlushService service = cluster.getInstance(SyncedFlushService.class); - LatchedListener<IndicesSyncedFlushResult> listener = new LatchedListener(); - service.attemptSyncedFlush(new String[]{index}, IndicesOptions.lenientExpandOpen(), listener); - try { - listener.latch.await(); - } catch (InterruptedException e) { - Thread.currentThread().interrupt(); - } - if (listener.error != null) { - throw ExceptionsHelper.convertToElastic(listener.error); - } - return listener.result; - } - - - /** * Blocking version of {@link SyncedFlushService#attemptSyncedFlush(ShardId, ActionListener)} */ public static ShardsSyncedFlushResult attemptSyncedFlush(InternalTestCluster cluster, ShardId shardId) { @@ -109,5 +94,4 @@ public class SyncedFlushUtil { } return listener.result; } - } diff --git a/core/src/test/java/org/elasticsearch/indices/memory/breaker/CircuitBreakerNoopIT.java b/core/src/test/java/org/elasticsearch/indices/memory/breaker/CircuitBreakerNoopIT.java index 3398839b90..b1b56acd8c 100644 --- a/core/src/test/java/org/elasticsearch/indices/memory/breaker/CircuitBreakerNoopIT.java +++ b/core/src/test/java/org/elasticsearch/indices/memory/breaker/CircuitBreakerNoopIT.java @@ -41,12 +41,12 @@ public class CircuitBreakerNoopIT extends ESIntegTestCase { @Override protected Settings nodeSettings(int nodeOrdinal) { return Settings.builder() - .put(HierarchyCircuitBreakerService.FIELDDATA_CIRCUIT_BREAKER_TYPE_SETTING, "noop") + .put(HierarchyCircuitBreakerService.FIELDDATA_CIRCUIT_BREAKER_TYPE_SETTING.getKey(), "noop") // This is set low, because if the "noop" is not a noop, it will break - .put(HierarchyCircuitBreakerService.FIELDDATA_CIRCUIT_BREAKER_LIMIT_SETTING, "10b") - .put(HierarchyCircuitBreakerService.REQUEST_CIRCUIT_BREAKER_TYPE_SETTING, "noop") + .put(HierarchyCircuitBreakerService.FIELDDATA_CIRCUIT_BREAKER_LIMIT_SETTING.getKey(), "10b") + .put(HierarchyCircuitBreakerService.REQUEST_CIRCUIT_BREAKER_TYPE_SETTING.getKey(), "noop") // This is set low, because if the "noop" is not a noop, it will break - .put(HierarchyCircuitBreakerService.REQUEST_CIRCUIT_BREAKER_LIMIT_SETTING, "10b") + .put(HierarchyCircuitBreakerService.REQUEST_CIRCUIT_BREAKER_LIMIT_SETTING.getKey(), "10b") .build(); } diff --git a/core/src/test/java/org/elasticsearch/indices/memory/breaker/CircuitBreakerServiceIT.java b/core/src/test/java/org/elasticsearch/indices/memory/breaker/CircuitBreakerServiceIT.java index fcd94d9958..1af04e295d 100644 --- a/core/src/test/java/org/elasticsearch/indices/memory/breaker/CircuitBreakerServiceIT.java +++ b/core/src/test/java/org/elasticsearch/indices/memory/breaker/CircuitBreakerServiceIT.java @@ -63,13 +63,13 @@ public class CircuitBreakerServiceIT extends ESIntegTestCase { private void reset() { logger.info("--> resetting breaker settings"); Settings resetSettings = settingsBuilder() - .put(HierarchyCircuitBreakerService.FIELDDATA_CIRCUIT_BREAKER_LIMIT_SETTING, - HierarchyCircuitBreakerService.DEFAULT_FIELDDATA_BREAKER_LIMIT) - .put(HierarchyCircuitBreakerService.FIELDDATA_CIRCUIT_BREAKER_OVERHEAD_SETTING, - HierarchyCircuitBreakerService.DEFAULT_FIELDDATA_OVERHEAD_CONSTANT) - .put(HierarchyCircuitBreakerService.REQUEST_CIRCUIT_BREAKER_LIMIT_SETTING, - HierarchyCircuitBreakerService.DEFAULT_REQUEST_BREAKER_LIMIT) - .put(HierarchyCircuitBreakerService.REQUEST_CIRCUIT_BREAKER_OVERHEAD_SETTING, 1.0) + .put(HierarchyCircuitBreakerService.FIELDDATA_CIRCUIT_BREAKER_LIMIT_SETTING.getKey(), + HierarchyCircuitBreakerService.FIELDDATA_CIRCUIT_BREAKER_LIMIT_SETTING.getDefault(null)) + .put(HierarchyCircuitBreakerService.FIELDDATA_CIRCUIT_BREAKER_OVERHEAD_SETTING.getKey(), + HierarchyCircuitBreakerService.FIELDDATA_CIRCUIT_BREAKER_OVERHEAD_SETTING.getDefault(null)) + .put(HierarchyCircuitBreakerService.REQUEST_CIRCUIT_BREAKER_LIMIT_SETTING.getKey(), + HierarchyCircuitBreakerService.REQUEST_CIRCUIT_BREAKER_LIMIT_SETTING.getDefault(null)) + .put(HierarchyCircuitBreakerService.REQUEST_CIRCUIT_BREAKER_OVERHEAD_SETTING.getKey(), 1.0) .build(); assertAcked(client().admin().cluster().prepareUpdateSettings().setTransientSettings(resetSettings)); } @@ -119,8 +119,8 @@ public class CircuitBreakerServiceIT extends ESIntegTestCase { // Update circuit breaker settings Settings settings = settingsBuilder() - .put(HierarchyCircuitBreakerService.FIELDDATA_CIRCUIT_BREAKER_LIMIT_SETTING, "100b") - .put(HierarchyCircuitBreakerService.FIELDDATA_CIRCUIT_BREAKER_OVERHEAD_SETTING, 1.05) + .put(HierarchyCircuitBreakerService.FIELDDATA_CIRCUIT_BREAKER_LIMIT_SETTING.getKey(), "100b") + .put(HierarchyCircuitBreakerService.FIELDDATA_CIRCUIT_BREAKER_OVERHEAD_SETTING.getKey(), 1.05) .build(); assertAcked(client.admin().cluster().prepareUpdateSettings().setTransientSettings(settings)); @@ -168,8 +168,8 @@ public class CircuitBreakerServiceIT extends ESIntegTestCase { // Update circuit breaker settings Settings settings = settingsBuilder() - .put(HierarchyCircuitBreakerService.FIELDDATA_CIRCUIT_BREAKER_LIMIT_SETTING, "100b") - .put(HierarchyCircuitBreakerService.FIELDDATA_CIRCUIT_BREAKER_OVERHEAD_SETTING, 1.05) + .put(HierarchyCircuitBreakerService.FIELDDATA_CIRCUIT_BREAKER_LIMIT_SETTING.getKey(), "100b") + .put(HierarchyCircuitBreakerService.FIELDDATA_CIRCUIT_BREAKER_OVERHEAD_SETTING.getKey(), 1.05) .build(); assertAcked(client.admin().cluster().prepareUpdateSettings().setTransientSettings(settings)); @@ -213,8 +213,8 @@ public class CircuitBreakerServiceIT extends ESIntegTestCase { .getNodes()[0].getBreaker().getStats(CircuitBreaker.REQUEST).getLimit(); Settings resetSettings = settingsBuilder() - .put(HierarchyCircuitBreakerService.FIELDDATA_CIRCUIT_BREAKER_LIMIT_SETTING, "10b") - .put(HierarchyCircuitBreakerService.FIELDDATA_CIRCUIT_BREAKER_OVERHEAD_SETTING, 1.0) + .put(HierarchyCircuitBreakerService.FIELDDATA_CIRCUIT_BREAKER_LIMIT_SETTING.getKey(), "10b") + .put(HierarchyCircuitBreakerService.FIELDDATA_CIRCUIT_BREAKER_OVERHEAD_SETTING.getKey(), 1.0) .build(); assertAcked(client.admin().cluster().prepareUpdateSettings().setTransientSettings(resetSettings)); @@ -234,9 +234,9 @@ public class CircuitBreakerServiceIT extends ESIntegTestCase { // Adjust settings so the parent breaker will fail, but the fielddata breaker doesn't resetSettings = settingsBuilder() - .put(HierarchyCircuitBreakerService.TOTAL_CIRCUIT_BREAKER_LIMIT_SETTING, "15b") - .put(HierarchyCircuitBreakerService.FIELDDATA_CIRCUIT_BREAKER_LIMIT_SETTING, "90%") - .put(HierarchyCircuitBreakerService.FIELDDATA_CIRCUIT_BREAKER_OVERHEAD_SETTING, 1.0) + .put(HierarchyCircuitBreakerService.TOTAL_CIRCUIT_BREAKER_LIMIT_SETTING.getKey(), "15b") + .put(HierarchyCircuitBreakerService.FIELDDATA_CIRCUIT_BREAKER_LIMIT_SETTING.getKey(), "90%") + .put(HierarchyCircuitBreakerService.FIELDDATA_CIRCUIT_BREAKER_OVERHEAD_SETTING.getKey(), 1.0) .build(); client.admin().cluster().prepareUpdateSettings().setTransientSettings(resetSettings).execute().actionGet(); @@ -261,7 +261,7 @@ public class CircuitBreakerServiceIT extends ESIntegTestCase { // Make request breaker limited to a small amount Settings resetSettings = settingsBuilder() - .put(HierarchyCircuitBreakerService.REQUEST_CIRCUIT_BREAKER_LIMIT_SETTING, "10b") + .put(HierarchyCircuitBreakerService.REQUEST_CIRCUIT_BREAKER_LIMIT_SETTING.getKey(), "10b") .build(); assertAcked(client.admin().cluster().prepareUpdateSettings().setTransientSettings(resetSettings)); diff --git a/core/src/test/java/org/elasticsearch/indices/memory/breaker/CircuitBreakerUnitTests.java b/core/src/test/java/org/elasticsearch/indices/memory/breaker/CircuitBreakerUnitTests.java index 741ea30525..212d7ecbb7 100644 --- a/core/src/test/java/org/elasticsearch/indices/memory/breaker/CircuitBreakerUnitTests.java +++ b/core/src/test/java/org/elasticsearch/indices/memory/breaker/CircuitBreakerUnitTests.java @@ -19,12 +19,12 @@ package org.elasticsearch.indices.memory.breaker; +import org.elasticsearch.common.settings.ClusterSettings; import org.elasticsearch.common.breaker.CircuitBreaker; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.indices.breaker.BreakerSettings; import org.elasticsearch.indices.breaker.CircuitBreakerService; import org.elasticsearch.indices.breaker.HierarchyCircuitBreakerService; -import org.elasticsearch.node.settings.NodeSettingsService; import org.elasticsearch.test.ESTestCase; import static org.hamcrest.Matchers.equalTo; @@ -66,7 +66,7 @@ public class CircuitBreakerUnitTests extends ESTestCase { } public void testRegisterCustomBreaker() throws Exception { - CircuitBreakerService service = new HierarchyCircuitBreakerService(Settings.EMPTY, new NodeSettingsService(Settings.EMPTY)); + CircuitBreakerService service = new HierarchyCircuitBreakerService(Settings.EMPTY, new ClusterSettings(Settings.EMPTY, ClusterSettings.BUILT_IN_CLUSTER_SETTINGS)); String customName = "custom"; BreakerSettings settings = new BreakerSettings(customName, 20, 1.0); service.registerBreaker(settings); diff --git a/core/src/test/java/org/elasticsearch/indices/recovery/IndexRecoveryIT.java b/core/src/test/java/org/elasticsearch/indices/recovery/IndexRecoveryIT.java index 4cf60289a1..2b8f5ea638 100644 --- a/core/src/test/java/org/elasticsearch/indices/recovery/IndexRecoveryIT.java +++ b/core/src/test/java/org/elasticsearch/indices/recovery/IndexRecoveryIT.java @@ -145,7 +145,7 @@ public class IndexRecoveryIT extends ESIntegTestCase { assertTrue(client().admin().cluster().prepareUpdateSettings() .setTransientSettings(Settings.builder() // one chunk per sec.. - .put(RecoverySettings.INDICES_RECOVERY_MAX_BYTES_PER_SEC, chunkSize, ByteSizeUnit.BYTES) + .put(RecoverySettings.INDICES_RECOVERY_MAX_BYTES_PER_SEC_SETTING.getKey(), chunkSize, ByteSizeUnit.BYTES) ) .get().isAcknowledged()); } @@ -156,7 +156,7 @@ public class IndexRecoveryIT extends ESIntegTestCase { } assertTrue(client().admin().cluster().prepareUpdateSettings() .setTransientSettings(Settings.builder() - .put(RecoverySettings.INDICES_RECOVERY_MAX_BYTES_PER_SEC, "20mb") + .put(RecoverySettings.INDICES_RECOVERY_MAX_BYTES_PER_SEC_SETTING.getKey(), "20mb") ) .get().isAcknowledged()); } @@ -529,8 +529,8 @@ public class IndexRecoveryIT extends ESIntegTestCase { public void testDisconnectsWhileRecovering() throws Exception { final String indexName = "test"; final Settings nodeSettings = Settings.builder() - .put(RecoverySettings.INDICES_RECOVERY_RETRY_DELAY_NETWORK, "100ms") - .put(RecoverySettings.INDICES_RECOVERY_INTERNAL_ACTION_TIMEOUT, "1s") + .put(RecoverySettings.INDICES_RECOVERY_RETRY_DELAY_NETWORK_SETTING.getKey(), "100ms") + .put(RecoverySettings.INDICES_RECOVERY_INTERNAL_ACTION_TIMEOUT_SETTING.getKey(), "1s") .put(MockFSDirectoryService.RANDOM_PREVENT_DOUBLE_WRITE, false) // restarted recoveries will delete temp files and write them again .build(); // start a master node diff --git a/core/src/test/java/org/elasticsearch/indices/recovery/RecoverySourceHandlerTests.java b/core/src/test/java/org/elasticsearch/indices/recovery/RecoverySourceHandlerTests.java index 8346003287..75a2b14060 100644 --- a/core/src/test/java/org/elasticsearch/indices/recovery/RecoverySourceHandlerTests.java +++ b/core/src/test/java/org/elasticsearch/indices/recovery/RecoverySourceHandlerTests.java @@ -34,6 +34,7 @@ import org.elasticsearch.ExceptionsHelper; import org.elasticsearch.Version; import org.elasticsearch.cluster.metadata.IndexMetaData; import org.elasticsearch.cluster.node.DiscoveryNode; +import org.elasticsearch.common.settings.ClusterSettings; import org.elasticsearch.common.io.FileSystemUtils; import org.elasticsearch.common.lucene.store.IndexOutputOutputStream; import org.elasticsearch.common.settings.Settings; @@ -44,7 +45,6 @@ import org.elasticsearch.index.shard.ShardId; import org.elasticsearch.index.store.DirectoryService; import org.elasticsearch.index.store.Store; import org.elasticsearch.index.store.StoreFileMetaData; -import org.elasticsearch.node.settings.NodeSettingsService; import org.elasticsearch.test.CorruptionUtils; import org.elasticsearch.test.DummyShardLock; import org.elasticsearch.test.ESTestCase; @@ -60,7 +60,7 @@ import java.util.concurrent.atomic.AtomicBoolean; public class RecoverySourceHandlerTests extends ESTestCase { private static final IndexSettings INDEX_SETTINGS = IndexSettingsModule.newIndexSettings(new Index("index"), Settings.settingsBuilder().put(IndexMetaData.SETTING_VERSION_CREATED, org.elasticsearch.Version.CURRENT).build()); private final ShardId shardId = new ShardId(INDEX_SETTINGS.getIndex(), 1); - private final NodeSettingsService service = new NodeSettingsService(Settings.EMPTY); + private final ClusterSettings service = new ClusterSettings(Settings.EMPTY, ClusterSettings.BUILT_IN_CLUSTER_SETTINGS); public void testSendFiles() throws Throwable { Settings settings = Settings.builder().put("indices.recovery.concurrent_streams", 1). diff --git a/core/src/test/java/org/elasticsearch/indices/recovery/RecoveryStateTests.java b/core/src/test/java/org/elasticsearch/indices/recovery/RecoveryStateTests.java index fea8f0fdf1..8b23354ebb 100644 --- a/core/src/test/java/org/elasticsearch/indices/recovery/RecoveryStateTests.java +++ b/core/src/test/java/org/elasticsearch/indices/recovery/RecoveryStateTests.java @@ -26,13 +26,7 @@ import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.Streamable; import org.elasticsearch.common.transport.DummyTransportAddress; import org.elasticsearch.index.shard.ShardId; -import org.elasticsearch.indices.recovery.RecoveryState.File; -import org.elasticsearch.indices.recovery.RecoveryState.Index; -import org.elasticsearch.indices.recovery.RecoveryState.Stage; -import org.elasticsearch.indices.recovery.RecoveryState.Timer; -import org.elasticsearch.indices.recovery.RecoveryState.Translog; -import org.elasticsearch.indices.recovery.RecoveryState.Type; -import org.elasticsearch.indices.recovery.RecoveryState.VerifyIndex; +import org.elasticsearch.indices.recovery.RecoveryState.*; import org.elasticsearch.test.ESTestCase; import java.io.IOException; @@ -43,14 +37,7 @@ import java.util.concurrent.atomic.AtomicBoolean; import java.util.concurrent.atomic.AtomicReference; import static org.elasticsearch.test.VersionUtils.randomVersion; -import static org.hamcrest.Matchers.arrayContainingInAnyOrder; -import static org.hamcrest.Matchers.closeTo; -import static org.hamcrest.Matchers.either; -import static org.hamcrest.Matchers.equalTo; -import static org.hamcrest.Matchers.greaterThan; -import static org.hamcrest.Matchers.greaterThanOrEqualTo; -import static org.hamcrest.Matchers.lessThan; -import static org.hamcrest.Matchers.lessThanOrEqualTo; +import static org.hamcrest.Matchers.*; public class RecoveryStateTests extends ESTestCase { abstract class Streamer<T extends Streamable> extends Thread { @@ -201,7 +188,7 @@ public class RecoveryStateTests extends ESTestCase { } } - Collections.shuffle(Arrays.asList(files)); + Collections.shuffle(Arrays.asList(files), random()); final RecoveryState.Index index = new RecoveryState.Index(); if (randomBoolean()) { diff --git a/core/src/test/java/org/elasticsearch/indices/state/CloseIndexDisableCloseAllIT.java b/core/src/test/java/org/elasticsearch/indices/state/CloseIndexDisableCloseAllIT.java index bde40aa928..8ec629dbbd 100644 --- a/core/src/test/java/org/elasticsearch/indices/state/CloseIndexDisableCloseAllIT.java +++ b/core/src/test/java/org/elasticsearch/indices/state/CloseIndexDisableCloseAllIT.java @@ -40,7 +40,7 @@ public class CloseIndexDisableCloseAllIT extends ESIntegTestCase { // The cluster scope is test b/c we can't clear cluster settings. public void testCloseAllRequiresName() { Settings clusterSettings = Settings.builder() - .put(DestructiveOperations.REQUIRES_NAME, true) + .put(DestructiveOperations.REQUIRES_NAME_SETTING.getKey(), true) .build(); assertAcked(client().admin().cluster().prepareUpdateSettings().setTransientSettings(clusterSettings)); createIndex("test1", "test2", "test3"); @@ -91,7 +91,7 @@ public class CloseIndexDisableCloseAllIT extends ESIntegTestCase { createIndex("test_no_close"); healthResponse = client.admin().cluster().prepareHealth().setWaitForGreenStatus().execute().actionGet(); assertThat(healthResponse.isTimedOut(), equalTo(false)); - client().admin().cluster().prepareUpdateSettings().setTransientSettings(Settings.builder().put(TransportCloseIndexAction.SETTING_CLUSTER_INDICES_CLOSE_ENABLE, false)).get(); + client().admin().cluster().prepareUpdateSettings().setTransientSettings(Settings.builder().put(TransportCloseIndexAction.CLUSTER_INDICES_CLOSE_ENABLE_SETTING.getKey(), false)).get(); try { client.admin().indices().prepareClose("test_no_close").execute().actionGet(); diff --git a/core/src/test/java/org/elasticsearch/indices/state/RareClusterStateIT.java b/core/src/test/java/org/elasticsearch/indices/state/RareClusterStateIT.java index 96611aeca8..2e73a46667 100644 --- a/core/src/test/java/org/elasticsearch/indices/state/RareClusterStateIT.java +++ b/core/src/test/java/org/elasticsearch/indices/state/RareClusterStateIT.java @@ -171,7 +171,7 @@ public class RareClusterStateIT extends ESIntegTestCase { ensureGreen("test"); // now that the cluster is stable, remove publishing timeout - assertAcked(client().admin().cluster().prepareUpdateSettings().setTransientSettings(Settings.builder().put(DiscoverySettings.PUBLISH_TIMEOUT, "0"))); + assertAcked(client().admin().cluster().prepareUpdateSettings().setTransientSettings(Settings.builder().put(DiscoverySettings.PUBLISH_TIMEOUT_SETTING.getKey(), "0"))); Set<String> nodes = new HashSet<>(Arrays.asList(internalCluster().getNodeNames())); nodes.remove(internalCluster().getMasterName()); @@ -200,7 +200,7 @@ public class RareClusterStateIT extends ESIntegTestCase { // but the change might not be on the node that performed the indexing // operation yet - Settings settings = Settings.builder().put(DiscoverySettings.PUBLISH_TIMEOUT, "0ms").build(); + Settings settings = Settings.builder().put(DiscoverySettings.PUBLISH_TIMEOUT_SETTING.getKey(), "0ms").build(); final List<String> nodeNames = internalCluster().startNodesAsync(2, settings).get(); assertFalse(client().admin().cluster().prepareHealth().setWaitForNodes("2").get().isTimedOut()); diff --git a/core/src/test/java/org/elasticsearch/indices/state/SimpleIndexStateIT.java b/core/src/test/java/org/elasticsearch/indices/state/SimpleIndexStateIT.java index a5cfa81645..af9cfeb94c 100644 --- a/core/src/test/java/org/elasticsearch/indices/state/SimpleIndexStateIT.java +++ b/core/src/test/java/org/elasticsearch/indices/state/SimpleIndexStateIT.java @@ -97,7 +97,7 @@ public class SimpleIndexStateIT extends ESIntegTestCase { client().prepareIndex("test", "type1", "1").setSource("field1", "value1").get(); } - public void testFastCloseAfterCreateDoesNotClose() { + public void testFastCloseAfterCreateContinuesCreateAfterOpen() { logger.info("--> creating test index that cannot be allocated"); client().admin().indices().prepareCreate("test").setSettings(Settings.settingsBuilder() .put("index.routing.allocation.include.tag", "no_such_node").build()).get(); @@ -106,17 +106,14 @@ public class SimpleIndexStateIT extends ESIntegTestCase { assertThat(health.isTimedOut(), equalTo(false)); assertThat(health.getStatus(), equalTo(ClusterHealthStatus.RED)); - try { - client().admin().indices().prepareClose("test").get(); - fail("Exception should have been thrown"); - } catch(IndexPrimaryShardNotAllocatedException e) { - // expected - } + client().admin().indices().prepareClose("test").get(); logger.info("--> updating test index settings to allow allocation"); client().admin().indices().prepareUpdateSettings("test").setSettings(Settings.settingsBuilder() .put("index.routing.allocation.include.tag", "").build()).get(); + client().admin().indices().prepareOpen("test").get(); + logger.info("--> waiting for green status"); ensureGreen(); diff --git a/core/src/test/java/org/elasticsearch/indices/stats/IndexStatsIT.java b/core/src/test/java/org/elasticsearch/indices/stats/IndexStatsIT.java index a87da6fc04..4bdd972ea9 100644 --- a/core/src/test/java/org/elasticsearch/indices/stats/IndexStatsIT.java +++ b/core/src/test/java/org/elasticsearch/indices/stats/IndexStatsIT.java @@ -23,8 +23,8 @@ import org.apache.lucene.util.LuceneTestCase.SuppressCodecs; import org.apache.lucene.util.Version; import org.elasticsearch.action.admin.cluster.node.stats.NodesStatsResponse; import org.elasticsearch.action.admin.indices.stats.CommonStats; -import org.elasticsearch.action.admin.indices.stats.CommonStatsFlags; import org.elasticsearch.action.admin.indices.stats.CommonStatsFlags.Flag; +import org.elasticsearch.action.admin.indices.stats.CommonStatsFlags; import org.elasticsearch.action.admin.indices.stats.IndexStats; import org.elasticsearch.action.admin.indices.stats.IndicesStatsRequestBuilder; import org.elasticsearch.action.admin.indices.stats.IndicesStatsResponse; @@ -46,11 +46,12 @@ import org.elasticsearch.index.query.QueryBuilders; import org.elasticsearch.index.shard.MergePolicyConfig; import org.elasticsearch.index.shard.MergeSchedulerConfig; import org.elasticsearch.index.store.IndexStore; +import org.elasticsearch.index.translog.TranslogConfig; import org.elasticsearch.indices.cache.request.IndicesRequestCache; import org.elasticsearch.search.sort.SortOrder; -import org.elasticsearch.test.ESIntegTestCase; import org.elasticsearch.test.ESIntegTestCase.ClusterScope; import org.elasticsearch.test.ESIntegTestCase.Scope; +import org.elasticsearch.test.ESIntegTestCase; import java.io.IOException; import java.util.EnumSet; @@ -315,7 +316,7 @@ public class IndexStatsIT extends ESIntegTestCase { .put(MergeSchedulerConfig.MAX_THREAD_COUNT, "1") .put(MergeSchedulerConfig.MAX_MERGE_COUNT, "1") .put("index.merge.policy.type", "tiered") - + .put(TranslogConfig.INDEX_TRANSLOG_DURABILITY, "ASYNC") )); ensureGreen(); long termUpto = 0; diff --git a/core/src/test/java/org/elasticsearch/indices/store/IndicesStoreIntegrationIT.java b/core/src/test/java/org/elasticsearch/indices/store/IndicesStoreIntegrationIT.java index fc4dd4f648..948b76b963 100644 --- a/core/src/test/java/org/elasticsearch/indices/store/IndicesStoreIntegrationIT.java +++ b/core/src/test/java/org/elasticsearch/indices/store/IndicesStoreIntegrationIT.java @@ -303,7 +303,7 @@ public class IndicesStoreIntegrationIT extends ESIntegTestCase { // disable allocation to control the situation more easily assertAcked(client().admin().cluster().prepareUpdateSettings().setTransientSettings(Settings.builder() - .put(EnableAllocationDecider.CLUSTER_ROUTING_ALLOCATION_ENABLE, "none"))); + .put(EnableAllocationDecider.CLUSTER_ROUTING_ALLOCATION_ENABLE_SETTING.getKey(), "none"))); logger.debug("--> shutting down two random nodes"); internalCluster().stopRandomNode(InternalTestCluster.nameFilter(node1, node2, node3)); @@ -322,7 +322,7 @@ public class IndicesStoreIntegrationIT extends ESIntegTestCase { .put(FilterAllocationDecider.INDEX_ROUTING_EXCLUDE_GROUP + "_name", "NONE"))); assertAcked(client().admin().cluster().prepareUpdateSettings().setTransientSettings(Settings.builder() - .put(EnableAllocationDecider.CLUSTER_ROUTING_ALLOCATION_ENABLE, "all"))); + .put(EnableAllocationDecider.CLUSTER_ROUTING_ALLOCATION_ENABLE_SETTING.getKey(), "all"))); logger.debug("--> waiting for shards to recover on [{}]", node4); // we have to do this in two steps as we now do async shard fetching before assigning, so the change to the @@ -340,7 +340,7 @@ public class IndicesStoreIntegrationIT extends ESIntegTestCase { // disable allocation again to control concurrency a bit and allow shard active to kick in before allocation assertAcked(client().admin().cluster().prepareUpdateSettings().setTransientSettings(Settings.builder() - .put(EnableAllocationDecider.CLUSTER_ROUTING_ALLOCATION_ENABLE, "none"))); + .put(EnableAllocationDecider.CLUSTER_ROUTING_ALLOCATION_ENABLE_SETTING.getKey(), "none"))); logger.debug("--> starting the two old nodes back"); @@ -351,7 +351,7 @@ public class IndicesStoreIntegrationIT extends ESIntegTestCase { assertAcked(client().admin().cluster().prepareUpdateSettings().setTransientSettings(Settings.builder() - .put(EnableAllocationDecider.CLUSTER_ROUTING_ALLOCATION_ENABLE, "all"))); + .put(EnableAllocationDecider.CLUSTER_ROUTING_ALLOCATION_ENABLE_SETTING.getKey(), "all"))); logger.debug("--> waiting for the lost shard to be recovered"); @@ -396,7 +396,7 @@ public class IndicesStoreIntegrationIT extends ESIntegTestCase { // disable relocations when we do this, to make sure the shards are not relocated from node2 // due to rebalancing, and delete its content - client().admin().cluster().prepareUpdateSettings().setTransientSettings(settingsBuilder().put(EnableAllocationDecider.CLUSTER_ROUTING_REBALANCE_ENABLE, EnableAllocationDecider.Rebalance.NONE)).get(); + client().admin().cluster().prepareUpdateSettings().setTransientSettings(settingsBuilder().put(EnableAllocationDecider.CLUSTER_ROUTING_REBALANCE_ENABLE_SETTING.getKey(), EnableAllocationDecider.Rebalance.NONE)).get(); internalCluster().getInstance(ClusterService.class, nonMasterNode).submitStateUpdateTask("test", new ClusterStateUpdateTask(Priority.IMMEDIATE) { @Override public ClusterState execute(ClusterState currentState) throws Exception { diff --git a/core/src/test/java/org/elasticsearch/indices/template/SimpleIndexTemplateIT.java b/core/src/test/java/org/elasticsearch/indices/template/SimpleIndexTemplateIT.java index ca2025ced1..b32cfef76b 100644 --- a/core/src/test/java/org/elasticsearch/indices/template/SimpleIndexTemplateIT.java +++ b/core/src/test/java/org/elasticsearch/indices/template/SimpleIndexTemplateIT.java @@ -560,7 +560,7 @@ public class SimpleIndexTemplateIT extends ESIntegTestCase { .setOrder(0) .addAlias(new Alias("alias1")) .addAlias(new Alias("{index}-alias")) - .addAlias(new Alias("alias3").filter(QueryBuilders.missingQuery("test"))) + .addAlias(new Alias("alias3").filter(QueryBuilders.boolQuery().mustNot(QueryBuilders.existsQuery("test")))) .addAlias(new Alias("alias4")).get(); client().admin().indices().preparePutTemplate("template2") diff --git a/core/src/test/java/org/elasticsearch/nodesinfo/NodeInfoStreamingTests.java b/core/src/test/java/org/elasticsearch/nodesinfo/NodeInfoStreamingTests.java new file mode 100644 index 0000000000..693ba4a2eb --- /dev/null +++ b/core/src/test/java/org/elasticsearch/nodesinfo/NodeInfoStreamingTests.java @@ -0,0 +1,140 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.nodesinfo; + +import org.elasticsearch.Build; +import org.elasticsearch.Version; +import org.elasticsearch.action.admin.cluster.node.info.NodeInfo; +import org.elasticsearch.action.admin.cluster.node.info.PluginsAndModules; +import org.elasticsearch.cluster.node.DiscoveryNode; +import org.elasticsearch.common.io.stream.BytesStreamOutput; +import org.elasticsearch.common.io.stream.StreamInput; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.common.transport.BoundTransportAddress; +import org.elasticsearch.common.transport.DummyTransportAddress; +import org.elasticsearch.common.transport.TransportAddress; +import org.elasticsearch.common.xcontent.ToXContent; +import org.elasticsearch.common.xcontent.XContentBuilder; +import org.elasticsearch.http.HttpInfo; +import org.elasticsearch.monitor.jvm.JvmInfo; +import org.elasticsearch.monitor.os.DummyOsInfo; +import org.elasticsearch.monitor.os.OsInfo; +import org.elasticsearch.monitor.process.ProcessInfo; +import org.elasticsearch.plugins.DummyPluginInfo; +import org.elasticsearch.test.ESTestCase; +import org.elasticsearch.test.VersionUtils; +import org.elasticsearch.threadpool.ThreadPool; +import org.elasticsearch.threadpool.ThreadPoolInfo; +import org.elasticsearch.transport.TransportInfo; + +import java.io.IOException; +import java.util.ArrayList; +import java.util.HashMap; +import java.util.List; +import java.util.Map; + +import static org.elasticsearch.common.xcontent.XContentFactory.jsonBuilder; +import static org.hamcrest.core.IsEqual.equalTo; + +/** + * + */ +public class NodeInfoStreamingTests extends ESTestCase { + + public void testNodeInfoStreaming() throws IOException { + NodeInfo nodeInfo = createNodeInfo(); + Version version = Version.CURRENT; + BytesStreamOutput out = new BytesStreamOutput(); + out.setVersion(version); + nodeInfo.writeTo(out); + out.close(); + StreamInput in = StreamInput.wrap(out.bytes()); + in.setVersion(version); + NodeInfo readNodeInfo = NodeInfo.readNodeInfo(in); + assertExpectedUnchanged(nodeInfo, readNodeInfo); + + } + // checks all properties that are expected to be unchanged. Once we start changing them between versions this method has to be changed as well + private void assertExpectedUnchanged(NodeInfo nodeInfo, NodeInfo readNodeInfo) throws IOException { + assertThat(nodeInfo.getBuild().toString(), equalTo(readNodeInfo.getBuild().toString())); + assertThat(nodeInfo.getHostname(), equalTo(readNodeInfo.getHostname())); + assertThat(nodeInfo.getVersion(), equalTo(readNodeInfo.getVersion())); + assertThat(nodeInfo.getServiceAttributes().size(), equalTo(readNodeInfo.getServiceAttributes().size())); + for (Map.Entry<String, String> entry : nodeInfo.getServiceAttributes().entrySet()) { + assertNotNull(readNodeInfo.getServiceAttributes().get(entry.getKey())); + assertThat(readNodeInfo.getServiceAttributes().get(entry.getKey()), equalTo(entry.getValue())); + } + compareJsonOutput(nodeInfo.getHttp(), readNodeInfo.getHttp()); + compareJsonOutput(nodeInfo.getJvm(), readNodeInfo.getJvm()); + compareJsonOutput(nodeInfo.getProcess(), readNodeInfo.getProcess()); + compareJsonOutput(nodeInfo.getSettings(), readNodeInfo.getSettings()); + compareJsonOutput(nodeInfo.getThreadPool(), readNodeInfo.getThreadPool()); + compareJsonOutput(nodeInfo.getTransport(), readNodeInfo.getTransport()); + compareJsonOutput(nodeInfo.getNode(), readNodeInfo.getNode()); + compareJsonOutput(nodeInfo.getOs(), readNodeInfo.getOs()); + comparePluginsAndModules(nodeInfo, readNodeInfo); + } + + private void comparePluginsAndModules(NodeInfo nodeInfo, NodeInfo readNodeInfo) throws IOException { + ToXContent.Params params = ToXContent.EMPTY_PARAMS; + XContentBuilder pluginsAndModules = jsonBuilder(); + pluginsAndModules.startObject(); + nodeInfo.getPlugins().toXContent(pluginsAndModules, params); + pluginsAndModules.endObject(); + XContentBuilder readPluginsAndModules = jsonBuilder(); + readPluginsAndModules.startObject(); + readNodeInfo.getPlugins().toXContent(readPluginsAndModules, params); + readPluginsAndModules.endObject(); + assertThat(pluginsAndModules.string(), equalTo(readPluginsAndModules.string())); + } + + private void compareJsonOutput(ToXContent param1, ToXContent param2) throws IOException { + ToXContent.Params params = ToXContent.EMPTY_PARAMS; + XContentBuilder param1Builder = jsonBuilder(); + XContentBuilder param2Builder = jsonBuilder(); + param1.toXContent(param1Builder, params); + param2.toXContent(param2Builder, params); + assertThat(param1Builder.string(), equalTo(param2Builder.string())); + } + + + private NodeInfo createNodeInfo() { + Build build = Build.CURRENT; + DiscoveryNode node = new DiscoveryNode("test_node", DummyTransportAddress.INSTANCE, VersionUtils.randomVersion(random())); + Map<String, String> serviceAttributes = new HashMap<>(); + serviceAttributes.put("test", "attribute"); + Settings settings = Settings.builder().put("test", "setting").build(); + OsInfo osInfo = DummyOsInfo.INSTANCE; + ProcessInfo process = new ProcessInfo(randomInt(), randomBoolean()); + JvmInfo jvm = JvmInfo.jvmInfo(); + List<ThreadPool.Info> threadPoolInfos = new ArrayList<>(); + threadPoolInfos.add(new ThreadPool.Info("test_threadpool", ThreadPool.ThreadPoolType.FIXED, 5)); + ThreadPoolInfo threadPoolInfo = new ThreadPoolInfo(threadPoolInfos); + Map<String, BoundTransportAddress> profileAddresses = new HashMap<>(); + BoundTransportAddress dummyBoundTransportAddress = new BoundTransportAddress(new TransportAddress[]{DummyTransportAddress.INSTANCE}, DummyTransportAddress.INSTANCE); + profileAddresses.put("test_address", dummyBoundTransportAddress); + TransportInfo transport = new TransportInfo(dummyBoundTransportAddress, profileAddresses); + HttpInfo htttpInfo = new HttpInfo(dummyBoundTransportAddress, randomLong()); + PluginsAndModules plugins = new PluginsAndModules(); + plugins.addModule(DummyPluginInfo.INSTANCE); + plugins.addPlugin(DummyPluginInfo.INSTANCE); + return new NodeInfo(VersionUtils.randomVersion(random()), build, node, serviceAttributes, settings, osInfo, process, jvm, threadPoolInfo, transport, htttpInfo, plugins); + } +} diff --git a/core/src/test/java/org/elasticsearch/operateAllIndices/DestructiveOperationsIntegrationIT.java b/core/src/test/java/org/elasticsearch/operateAllIndices/DestructiveOperationsIntegrationIT.java index 49d22b87bf..514b1757e4 100644 --- a/core/src/test/java/org/elasticsearch/operateAllIndices/DestructiveOperationsIntegrationIT.java +++ b/core/src/test/java/org/elasticsearch/operateAllIndices/DestructiveOperationsIntegrationIT.java @@ -34,7 +34,7 @@ public class DestructiveOperationsIntegrationIT extends ESIntegTestCase { // The cluster scope is test b/c we can't clear cluster settings. public void testDestructiveOperations() throws Exception { Settings settings = Settings.builder() - .put(DestructiveOperations.REQUIRES_NAME, true) + .put(DestructiveOperations.REQUIRES_NAME_SETTING.getKey(), true) .build(); assertAcked(client().admin().cluster().prepareUpdateSettings().setTransientSettings(settings)); @@ -58,7 +58,7 @@ public class DestructiveOperationsIntegrationIT extends ESIntegTestCase { } settings = Settings.builder() - .put(DestructiveOperations.REQUIRES_NAME, false) + .put(DestructiveOperations.REQUIRES_NAME_SETTING.getKey(), false) .build(); assertAcked(client().admin().cluster().prepareUpdateSettings().setTransientSettings(settings)); @@ -68,7 +68,7 @@ public class DestructiveOperationsIntegrationIT extends ESIntegTestCase { // end delete index: // close index: settings = Settings.builder() - .put(DestructiveOperations.REQUIRES_NAME, true) + .put(DestructiveOperations.REQUIRES_NAME_SETTING.getKey(), true) .build(); assertAcked(client().admin().cluster().prepareUpdateSettings().setTransientSettings(settings)); @@ -100,7 +100,7 @@ public class DestructiveOperationsIntegrationIT extends ESIntegTestCase { } settings = Settings.builder() - .put(DestructiveOperations.REQUIRES_NAME, false) + .put(DestructiveOperations.REQUIRES_NAME_SETTING.getKey(), false) .build(); assertAcked(client().admin().cluster().prepareUpdateSettings().setTransientSettings(settings)); assertAcked(client().admin().indices().prepareClose("_all").get()); diff --git a/core/src/test/java/org/elasticsearch/plugins/PluggableTransportModuleIT.java b/core/src/test/java/org/elasticsearch/plugins/PluggableTransportModuleIT.java deleted file mode 100644 index 7831b7ca99..0000000000 --- a/core/src/test/java/org/elasticsearch/plugins/PluggableTransportModuleIT.java +++ /dev/null @@ -1,112 +0,0 @@ -/* - * Licensed to Elasticsearch under one or more contributor - * license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright - * ownership. Elasticsearch licenses this file to you under - * the Apache License, Version 2.0 (the "License"); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ -package org.elasticsearch.plugins; - -import org.elasticsearch.Version; -import org.elasticsearch.cluster.node.DiscoveryNode; -import org.elasticsearch.common.inject.Inject; -import org.elasticsearch.common.io.stream.NamedWriteableRegistry; -import org.elasticsearch.common.settings.Settings; -import org.elasticsearch.discovery.DiscoveryModule; -import org.elasticsearch.test.ESIntegTestCase; -import org.elasticsearch.test.ESIntegTestCase.ClusterScope; -import org.elasticsearch.test.ESIntegTestCase.Scope; -import org.elasticsearch.test.transport.AssertingLocalTransport; -import org.elasticsearch.threadpool.ThreadPool; -import org.elasticsearch.transport.Transport; -import org.elasticsearch.transport.TransportException; -import org.elasticsearch.transport.TransportModule; -import org.elasticsearch.transport.TransportRequest; -import org.elasticsearch.transport.TransportRequestOptions; - -import java.io.IOException; -import java.util.Collection; -import java.util.concurrent.atomic.AtomicInteger; - -import static org.elasticsearch.common.settings.Settings.settingsBuilder; -import static org.hamcrest.Matchers.greaterThan; -import static org.hamcrest.Matchers.instanceOf; -import static org.hamcrest.Matchers.is; - -/** - * - */ -@ClusterScope(scope = Scope.SUITE, numDataNodes = 2) -public class PluggableTransportModuleIT extends ESIntegTestCase { - public static final AtomicInteger SENT_REQUEST_COUNTER = new AtomicInteger(0); - - @Override - protected Settings nodeSettings(int nodeOrdinal) { - return settingsBuilder() - .put(super.nodeSettings(nodeOrdinal)) - .put(DiscoveryModule.DISCOVERY_TYPE_KEY, "local") - .build(); - } - - @Override - protected Collection<Class<? extends Plugin>> nodePlugins() { - return pluginList(CountingSentRequestsPlugin.class); - } - - @Override - protected Collection<Class<? extends Plugin>> transportClientPlugins() { - return pluginList(CountingSentRequestsPlugin.class); - } - - public void testThatPluginFunctionalityIsLoadedWithoutConfiguration() throws Exception { - for (Transport transport : internalCluster().getInstances(Transport.class)) { - assertThat(transport, instanceOf(CountingAssertingLocalTransport.class)); - } - - int countBeforeRequest = SENT_REQUEST_COUNTER.get(); - internalCluster().clientNodeClient().admin().cluster().prepareHealth().get(); - int countAfterRequest = SENT_REQUEST_COUNTER.get(); - assertThat("Expected send request counter to be greather than zero", countAfterRequest, is(greaterThan(countBeforeRequest))); - } - - public static class CountingSentRequestsPlugin extends Plugin { - @Override - public String name() { - return "counting-pipelines-plugin"; - } - - @Override - public String description() { - return "counting-pipelines-plugin"; - } - - public void onModule(TransportModule transportModule) { - transportModule.setTransport(CountingAssertingLocalTransport.class, this.name()); - } - } - - public static final class CountingAssertingLocalTransport extends AssertingLocalTransport { - - @Inject - public CountingAssertingLocalTransport(Settings settings, ThreadPool threadPool, Version version, NamedWriteableRegistry namedWriteableRegistry) { - super(settings, threadPool, version, namedWriteableRegistry); - } - - @Override - public void sendRequest(final DiscoveryNode node, final long requestId, final String action, final TransportRequest request, TransportRequestOptions options) throws IOException, TransportException { - SENT_REQUEST_COUNTER.incrementAndGet(); - super.sendRequest(node, requestId, action, request, options); - } - } -} diff --git a/core/src/test/java/org/elasticsearch/plugins/responseheader/TestResponseHeaderPlugin.java b/core/src/test/java/org/elasticsearch/plugins/responseheader/TestResponseHeaderPlugin.java index b9282cf05a..a16f318140 100644 --- a/core/src/test/java/org/elasticsearch/plugins/responseheader/TestResponseHeaderPlugin.java +++ b/core/src/test/java/org/elasticsearch/plugins/responseheader/TestResponseHeaderPlugin.java @@ -19,8 +19,8 @@ package org.elasticsearch.plugins.responseheader; +import org.elasticsearch.common.network.NetworkModule; import org.elasticsearch.plugins.Plugin; -import org.elasticsearch.rest.RestModule; public class TestResponseHeaderPlugin extends Plugin { @@ -34,7 +34,7 @@ public class TestResponseHeaderPlugin extends Plugin { return "test-plugin-custom-header-desc"; } - public void onModule(RestModule restModule) { - restModule.addRestAction(TestResponseHeaderRestAction.class); + public void onModule(NetworkModule module) { + module.registerRestHandler(TestResponseHeaderRestAction.class); } } diff --git a/core/src/test/java/org/elasticsearch/recovery/RecoverySettingsTests.java b/core/src/test/java/org/elasticsearch/recovery/RecoverySettingsTests.java index 4bcbb8c8ee..d7e13be312 100644 --- a/core/src/test/java/org/elasticsearch/recovery/RecoverySettingsTests.java +++ b/core/src/test/java/org/elasticsearch/recovery/RecoverySettingsTests.java @@ -32,49 +32,49 @@ public class RecoverySettingsTests extends ESSingleNodeTestCase { } public void testAllSettingsAreDynamicallyUpdatable() { - innerTestSettings(RecoverySettings.INDICES_RECOVERY_CONCURRENT_STREAMS, randomIntBetween(1, 200), new Validator() { + innerTestSettings(RecoverySettings.INDICES_RECOVERY_CONCURRENT_STREAMS_SETTING.getKey(), randomIntBetween(1, 200), new Validator() { @Override public void validate(RecoverySettings recoverySettings, int expectedValue) { assertEquals(expectedValue, recoverySettings.concurrentStreamPool().getMaximumPoolSize()); } }); - innerTestSettings(RecoverySettings.INDICES_RECOVERY_CONCURRENT_SMALL_FILE_STREAMS, randomIntBetween(1, 200), new Validator() { + innerTestSettings(RecoverySettings.INDICES_RECOVERY_CONCURRENT_SMALL_FILE_STREAMS_SETTING.getKey(), randomIntBetween(1, 200), new Validator() { @Override public void validate(RecoverySettings recoverySettings, int expectedValue) { assertEquals(expectedValue, recoverySettings.concurrentSmallFileStreamPool().getMaximumPoolSize()); } }); - innerTestSettings(RecoverySettings.INDICES_RECOVERY_MAX_BYTES_PER_SEC, 0, new Validator() { + innerTestSettings(RecoverySettings.INDICES_RECOVERY_MAX_BYTES_PER_SEC_SETTING.getKey(), 0, new Validator() { @Override public void validate(RecoverySettings recoverySettings, int expectedValue) { assertEquals(null, recoverySettings.rateLimiter()); } }); - innerTestSettings(RecoverySettings.INDICES_RECOVERY_RETRY_DELAY_STATE_SYNC, randomIntBetween(1, 200), TimeUnit.MILLISECONDS, new Validator() { + innerTestSettings(RecoverySettings.INDICES_RECOVERY_RETRY_DELAY_STATE_SYNC_SETTING.getKey(), randomIntBetween(1, 200), TimeUnit.MILLISECONDS, new Validator() { @Override public void validate(RecoverySettings recoverySettings, int expectedValue) { assertEquals(expectedValue, recoverySettings.retryDelayStateSync().millis()); } }); - innerTestSettings(RecoverySettings.INDICES_RECOVERY_RETRY_DELAY_NETWORK, randomIntBetween(1, 200), TimeUnit.MILLISECONDS, new Validator() { + innerTestSettings(RecoverySettings.INDICES_RECOVERY_RETRY_DELAY_NETWORK_SETTING.getKey(), randomIntBetween(1, 200), TimeUnit.MILLISECONDS, new Validator() { @Override public void validate(RecoverySettings recoverySettings, int expectedValue) { assertEquals(expectedValue, recoverySettings.retryDelayNetwork().millis()); } }); - innerTestSettings(RecoverySettings.INDICES_RECOVERY_ACTIVITY_TIMEOUT, randomIntBetween(1, 200), TimeUnit.MILLISECONDS, new Validator() { + innerTestSettings(RecoverySettings.INDICES_RECOVERY_ACTIVITY_TIMEOUT_SETTING.getKey(), randomIntBetween(1, 200), TimeUnit.MILLISECONDS, new Validator() { @Override public void validate(RecoverySettings recoverySettings, int expectedValue) { assertEquals(expectedValue, recoverySettings.activityTimeout().millis()); } }); - innerTestSettings(RecoverySettings.INDICES_RECOVERY_INTERNAL_ACTION_TIMEOUT, randomIntBetween(1, 200), TimeUnit.MILLISECONDS, new Validator() { + innerTestSettings(RecoverySettings.INDICES_RECOVERY_INTERNAL_ACTION_TIMEOUT_SETTING.getKey(), randomIntBetween(1, 200), TimeUnit.MILLISECONDS, new Validator() { @Override public void validate(RecoverySettings recoverySettings, int expectedValue) { assertEquals(expectedValue, recoverySettings.internalActionTimeout().millis()); } }); - innerTestSettings(RecoverySettings.INDICES_RECOVERY_INTERNAL_LONG_ACTION_TIMEOUT, randomIntBetween(1, 200), TimeUnit.MILLISECONDS, new Validator() { + innerTestSettings(RecoverySettings.INDICES_RECOVERY_INTERNAL_LONG_ACTION_TIMEOUT_SETTING.getKey(), randomIntBetween(1, 200), TimeUnit.MILLISECONDS, new Validator() { @Override public void validate(RecoverySettings recoverySettings, int expectedValue) { assertEquals(expectedValue, recoverySettings.internalActionLongTimeout().millis()); diff --git a/core/src/test/java/org/elasticsearch/recovery/RelocationIT.java b/core/src/test/java/org/elasticsearch/recovery/RelocationIT.java index 57b5e888ea..541911ce4e 100644 --- a/core/src/test/java/org/elasticsearch/recovery/RelocationIT.java +++ b/core/src/test/java/org/elasticsearch/recovery/RelocationIT.java @@ -387,7 +387,7 @@ public class RelocationIT extends ESIntegTestCase { logger.info("--> stopping replica assignment"); assertAcked(client().admin().cluster().prepareUpdateSettings() - .setTransientSettings(Settings.builder().put(EnableAllocationDecider.CLUSTER_ROUTING_ALLOCATION_ENABLE, "none"))); + .setTransientSettings(Settings.builder().put(EnableAllocationDecider.CLUSTER_ROUTING_ALLOCATION_ENABLE_SETTING.getKey(), "none"))); logger.info("--> wait for all replica shards to be removed, on all nodes"); assertBusy(new Runnable() { diff --git a/core/src/test/java/org/elasticsearch/recovery/TruncatedRecoveryIT.java b/core/src/test/java/org/elasticsearch/recovery/TruncatedRecoveryIT.java index 4d11146950..60a14abac7 100644 --- a/core/src/test/java/org/elasticsearch/recovery/TruncatedRecoveryIT.java +++ b/core/src/test/java/org/elasticsearch/recovery/TruncatedRecoveryIT.java @@ -29,7 +29,6 @@ import org.elasticsearch.cluster.node.DiscoveryNode; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.unit.ByteSizeUnit; import org.elasticsearch.common.unit.ByteSizeValue; -import org.elasticsearch.discovery.Discovery; import org.elasticsearch.index.query.QueryBuilders; import org.elasticsearch.indices.recovery.IndexRecoveryIT; import org.elasticsearch.indices.recovery.RecoveryFileChunkRequest; @@ -84,7 +83,7 @@ public class TruncatedRecoveryIT extends ESIntegTestCase { } } assertThat(dataNodeStats.size(), greaterThanOrEqualTo(2)); - Collections.shuffle(dataNodeStats, getRandom()); + Collections.shuffle(dataNodeStats, random()); // we use 2 nodes a lucky and unlucky one // the lucky one holds the primary // the unlucky one gets the replica and the truncated leftovers diff --git a/core/src/test/java/org/elasticsearch/script/FileScriptTests.java b/core/src/test/java/org/elasticsearch/script/FileScriptTests.java index fc888c79a8..987aef90bc 100644 --- a/core/src/test/java/org/elasticsearch/script/FileScriptTests.java +++ b/core/src/test/java/org/elasticsearch/script/FileScriptTests.java @@ -54,7 +54,7 @@ public class FileScriptTests extends ESTestCase { .put("script.engine." + MockScriptEngine.NAME + ".file.aggs", false).build(); ScriptService scriptService = makeScriptService(settings); Script script = new Script("script1", ScriptService.ScriptType.FILE, MockScriptEngine.NAME, null); - assertNotNull(scriptService.compile(script, ScriptContext.Standard.SEARCH, contextAndHeaders)); + assertNotNull(scriptService.compile(script, ScriptContext.Standard.SEARCH, contextAndHeaders, Collections.emptyMap())); } public void testAllOpsDisabled() throws Exception { @@ -68,7 +68,7 @@ public class FileScriptTests extends ESTestCase { Script script = new Script("script1", ScriptService.ScriptType.FILE, MockScriptEngine.NAME, null); for (ScriptContext context : ScriptContext.Standard.values()) { try { - scriptService.compile(script, context, contextAndHeaders); + scriptService.compile(script, context, contextAndHeaders, Collections.emptyMap()); fail(context.getKey() + " script should have been rejected"); } catch(Exception e) { assertTrue(e.getMessage(), e.getMessage().contains("scripts of type [file], operation [" + context.getKey() + "] and lang [" + MockScriptEngine.NAME + "] are disabled")); diff --git a/core/src/test/java/org/elasticsearch/script/NativeScriptTests.java b/core/src/test/java/org/elasticsearch/script/NativeScriptTests.java index 02fad31984..47adeabe02 100644 --- a/core/src/test/java/org/elasticsearch/script/NativeScriptTests.java +++ b/core/src/test/java/org/elasticsearch/script/NativeScriptTests.java @@ -36,6 +36,7 @@ import org.elasticsearch.watcher.ResourceWatcherService; import java.io.IOException; import java.util.ArrayList; +import java.util.Collections; import java.util.HashMap; import java.util.Map; import java.util.Set; @@ -62,7 +63,7 @@ public class NativeScriptTests extends ESTestCase { ScriptService scriptService = injector.getInstance(ScriptService.class); ExecutableScript executable = scriptService.executable(new Script("my", ScriptType.INLINE, NativeScriptEngineService.NAME, null), - ScriptContext.Standard.SEARCH, contextAndHeaders); + ScriptContext.Standard.SEARCH, contextAndHeaders, Collections.emptyMap()); assertThat(executable.run().toString(), equalTo("test")); terminate(injector.getInstance(ThreadPool.class)); } @@ -88,7 +89,7 @@ public class NativeScriptTests extends ESTestCase { for (ScriptContext scriptContext : scriptContextRegistry.scriptContexts()) { assertThat(scriptService.compile(new Script("my", ScriptType.INLINE, NativeScriptEngineService.NAME, null), scriptContext, - contextAndHeaders), notNullValue()); + contextAndHeaders, Collections.emptyMap()), notNullValue()); } } @@ -110,4 +111,4 @@ public class NativeScriptTests extends ESTestCase { return "test"; } } -}
\ No newline at end of file +} diff --git a/core/src/test/java/org/elasticsearch/script/ScriptContextTests.java b/core/src/test/java/org/elasticsearch/script/ScriptContextTests.java index 0edaedbb28..019eb7c74a 100644 --- a/core/src/test/java/org/elasticsearch/script/ScriptContextTests.java +++ b/core/src/test/java/org/elasticsearch/script/ScriptContextTests.java @@ -58,7 +58,7 @@ public class ScriptContextTests extends ESTestCase { for (ScriptService.ScriptType scriptType : ScriptService.ScriptType.values()) { try { Script script = new Script("1", scriptType, MockScriptEngine.NAME, null); - scriptService.compile(script, new ScriptContext.Plugin(PLUGIN_NAME, "custom_globally_disabled_op"), contextAndHeaders); + scriptService.compile(script, new ScriptContext.Plugin(PLUGIN_NAME, "custom_globally_disabled_op"), contextAndHeaders, Collections.emptyMap()); fail("script compilation should have been rejected"); } catch (ScriptException e) { assertThat(e.getMessage(), containsString("scripts of type [" + scriptType + "], operation [" + PLUGIN_NAME + "_custom_globally_disabled_op] and lang [" + MockScriptEngine.NAME + "] are disabled")); @@ -71,16 +71,16 @@ public class ScriptContextTests extends ESTestCase { ScriptService scriptService = makeScriptService(); Script script = new Script("1", ScriptService.ScriptType.INLINE, MockScriptEngine.NAME, null); try { - scriptService.compile(script, new ScriptContext.Plugin(PLUGIN_NAME, "custom_exp_disabled_op"), contextAndHeaders); + scriptService.compile(script, new ScriptContext.Plugin(PLUGIN_NAME, "custom_exp_disabled_op"), contextAndHeaders, Collections.emptyMap()); fail("script compilation should have been rejected"); } catch (ScriptException e) { assertTrue(e.getMessage(), e.getMessage().contains("scripts of type [inline], operation [" + PLUGIN_NAME + "_custom_exp_disabled_op] and lang [" + MockScriptEngine.NAME + "] are disabled")); } // still works for other script contexts - assertNotNull(scriptService.compile(script, ScriptContext.Standard.AGGS, contextAndHeaders)); - assertNotNull(scriptService.compile(script, ScriptContext.Standard.SEARCH, contextAndHeaders)); - assertNotNull(scriptService.compile(script, new ScriptContext.Plugin(PLUGIN_NAME, "custom_op"), contextAndHeaders)); + assertNotNull(scriptService.compile(script, ScriptContext.Standard.AGGS, contextAndHeaders, Collections.emptyMap())); + assertNotNull(scriptService.compile(script, ScriptContext.Standard.SEARCH, contextAndHeaders, Collections.emptyMap())); + assertNotNull(scriptService.compile(script, new ScriptContext.Plugin(PLUGIN_NAME, "custom_op"), contextAndHeaders, Collections.emptyMap())); } public void testUnknownPluginScriptContext() throws Exception { @@ -89,7 +89,7 @@ public class ScriptContextTests extends ESTestCase { for (ScriptService.ScriptType scriptType : ScriptService.ScriptType.values()) { try { Script script = new Script("1", scriptType, MockScriptEngine.NAME, null); - scriptService.compile(script, new ScriptContext.Plugin(PLUGIN_NAME, "unknown"), contextAndHeaders); + scriptService.compile(script, new ScriptContext.Plugin(PLUGIN_NAME, "unknown"), contextAndHeaders, Collections.emptyMap()); fail("script compilation should have been rejected"); } catch (IllegalArgumentException e) { assertTrue(e.getMessage(), e.getMessage().contains("script context [" + PLUGIN_NAME + "_unknown] not supported")); @@ -109,7 +109,7 @@ public class ScriptContextTests extends ESTestCase { for (ScriptService.ScriptType scriptType : ScriptService.ScriptType.values()) { try { Script script = new Script("1", scriptType, MockScriptEngine.NAME, null); - scriptService.compile(script, context, contextAndHeaders); + scriptService.compile(script, context, contextAndHeaders, Collections.emptyMap()); fail("script compilation should have been rejected"); } catch (IllegalArgumentException e) { assertTrue(e.getMessage(), e.getMessage().contains("script context [test] not supported")); diff --git a/core/src/test/java/org/elasticsearch/script/ScriptModesTests.java b/core/src/test/java/org/elasticsearch/script/ScriptModesTests.java index 3e476d2beb..0afd72dab2 100644 --- a/core/src/test/java/org/elasticsearch/script/ScriptModesTests.java +++ b/core/src/test/java/org/elasticsearch/script/ScriptModesTests.java @@ -252,7 +252,7 @@ public class ScriptModesTests extends ESTestCase { } @Override - public Object compile(String script) { + public Object compile(String script, Map<String, String> params) { return null; } diff --git a/core/src/test/java/org/elasticsearch/script/ScriptServiceTests.java b/core/src/test/java/org/elasticsearch/script/ScriptServiceTests.java index 23cada02c6..ab325e9e0c 100644 --- a/core/src/test/java/org/elasticsearch/script/ScriptServiceTests.java +++ b/core/src/test/java/org/elasticsearch/script/ScriptServiceTests.java @@ -33,6 +33,7 @@ import org.junit.Before; import java.io.IOException; import java.nio.file.Files; import java.nio.file.Path; +import java.util.Collections; import java.util.HashMap; import java.util.Map; import java.util.Set; @@ -48,7 +49,7 @@ import static org.hamcrest.Matchers.sameInstance; public class ScriptServiceTests extends ESTestCase { private ResourceWatcherService resourceWatcherService; - private Set<ScriptEngineService> scriptEngineServices; + private ScriptEngineService scriptEngineService; private Map<String, ScriptEngineService> scriptEnginesByLangMap; private ScriptContextRegistry scriptContextRegistry; private ScriptContext[] scriptContexts; @@ -72,8 +73,8 @@ public class ScriptServiceTests extends ESTestCase { .put("path.conf", genericConfigFolder) .build(); resourceWatcherService = new ResourceWatcherService(baseSettings, null); - scriptEngineServices = newHashSet(new TestEngineService()); - scriptEnginesByLangMap = ScriptModesTests.buildScriptEnginesByLangMap(scriptEngineServices); + scriptEngineService = new TestEngineService(); + scriptEnginesByLangMap = ScriptModesTests.buildScriptEnginesByLangMap(Collections.singleton(scriptEngineService)); //randomly register custom script contexts int randomInt = randomIntBetween(0, 3); //prevent duplicates using map @@ -100,7 +101,7 @@ public class ScriptServiceTests extends ESTestCase { private void buildScriptService(Settings additionalSettings) throws IOException { Settings finalSettings = Settings.builder().put(baseSettings).put(additionalSettings).build(); Environment environment = new Environment(finalSettings); - scriptService = new ScriptService(finalSettings, environment, scriptEngineServices, resourceWatcherService, scriptContextRegistry) { + scriptService = new ScriptService(finalSettings, environment, Collections.singleton(scriptEngineService), resourceWatcherService, scriptContextRegistry) { @Override String getScriptFromIndex(String scriptLang, String id, HasContextAndHeaders headersContext) { //mock the script that gets retrieved from an index @@ -131,7 +132,7 @@ public class ScriptServiceTests extends ESTestCase { logger.info("--> verify that file with extension was correctly processed"); CompiledScript compiledScript = scriptService.compile(new Script("test_script", ScriptType.FILE, "test", null), - ScriptContext.Standard.SEARCH, contextAndHeaders); + ScriptContext.Standard.SEARCH, contextAndHeaders, Collections.emptyMap()); assertThat(compiledScript.compiled(), equalTo((Object) "compiled_test_file")); logger.info("--> delete both files"); @@ -142,7 +143,7 @@ public class ScriptServiceTests extends ESTestCase { logger.info("--> verify that file with extension was correctly removed"); try { scriptService.compile(new Script("test_script", ScriptType.FILE, "test", null), ScriptContext.Standard.SEARCH, - contextAndHeaders); + contextAndHeaders, Collections.emptyMap()); fail("the script test_script should no longer exist"); } catch (IllegalArgumentException ex) { assertThat(ex.getMessage(), containsString("Unable to find on disk file script [test_script] using lang [test]")); @@ -153,9 +154,9 @@ public class ScriptServiceTests extends ESTestCase { ContextAndHeaderHolder contextAndHeaders = new ContextAndHeaderHolder(); buildScriptService(Settings.EMPTY); CompiledScript compiledScript1 = scriptService.compile(new Script("1+1", ScriptType.INLINE, "test", null), - randomFrom(scriptContexts), contextAndHeaders); + randomFrom(scriptContexts), contextAndHeaders, Collections.emptyMap()); CompiledScript compiledScript2 = scriptService.compile(new Script("1+1", ScriptType.INLINE, "test", null), - randomFrom(scriptContexts), contextAndHeaders); + randomFrom(scriptContexts), contextAndHeaders, Collections.emptyMap()); assertThat(compiledScript1.compiled(), sameInstance(compiledScript2.compiled())); } @@ -163,9 +164,9 @@ public class ScriptServiceTests extends ESTestCase { ContextAndHeaderHolder contextAndHeaders = new ContextAndHeaderHolder(); buildScriptService(Settings.EMPTY); CompiledScript compiledScript1 = scriptService.compile(new Script("script", ScriptType.INLINE, "test", null), - randomFrom(scriptContexts), contextAndHeaders); + randomFrom(scriptContexts), contextAndHeaders, Collections.emptyMap()); CompiledScript compiledScript2 = scriptService.compile(new Script("script", ScriptType.INLINE, "test2", null), - randomFrom(scriptContexts), contextAndHeaders); + randomFrom(scriptContexts), contextAndHeaders, Collections.emptyMap()); assertThat(compiledScript1.compiled(), sameInstance(compiledScript2.compiled())); } @@ -174,9 +175,9 @@ public class ScriptServiceTests extends ESTestCase { buildScriptService(Settings.EMPTY); createFileScripts("test"); CompiledScript compiledScript1 = scriptService.compile(new Script("file_script", ScriptType.FILE, "test", null), - randomFrom(scriptContexts), contextAndHeaders); + randomFrom(scriptContexts), contextAndHeaders, Collections.emptyMap()); CompiledScript compiledScript2 = scriptService.compile(new Script("file_script", ScriptType.FILE, "test2", null), - randomFrom(scriptContexts), contextAndHeaders); + randomFrom(scriptContexts), contextAndHeaders, Collections.emptyMap()); assertThat(compiledScript1.compiled(), sameInstance(compiledScript2.compiled())); } @@ -225,13 +226,11 @@ public class ScriptServiceTests extends ESTestCase { } while (scriptContextSettings.containsKey(scriptContext)); scriptContextSettings.put(scriptContext, randomFrom(ScriptMode.values())); } - int numEngineSettings = randomIntBetween(0, 10); + int numEngineSettings = randomIntBetween(0, ScriptType.values().length * scriptContexts.length); Map<String, ScriptMode> engineSettings = new HashMap<>(); for (int i = 0; i < numEngineSettings; i++) { String settingKey; do { - ScriptEngineService[] scriptEngineServices = this.scriptEngineServices.toArray(new ScriptEngineService[this.scriptEngineServices.size()]); - ScriptEngineService scriptEngineService = randomFrom(scriptEngineServices); ScriptType scriptType = randomFrom(ScriptType.values()); ScriptContext scriptContext = randomFrom(this.scriptContexts); settingKey = scriptEngineService.types()[0] + "." + scriptType + "." + scriptContext.getKey(); @@ -288,40 +287,38 @@ public class ScriptServiceTests extends ESTestCase { buildScriptService(builder.build()); createFileScripts("groovy", "expression", "mustache", "test"); - for (ScriptEngineService scriptEngineService : scriptEngineServices) { - for (ScriptType scriptType : ScriptType.values()) { - //make sure file scripts have a different name than inline ones. - //Otherwise they are always considered file ones as they can be found in the static cache. - String script = scriptType == ScriptType.FILE ? "file_script" : "script"; - for (ScriptContext scriptContext : this.scriptContexts) { - //fallback mechanism: 1) engine specific settings 2) op based settings 3) source based settings - ScriptMode scriptMode = engineSettings.get(scriptEngineService.types()[0] + "." + scriptType + "." + scriptContext.getKey()); - if (scriptMode == null) { - scriptMode = scriptContextSettings.get(scriptContext.getKey()); - } - if (scriptMode == null) { - scriptMode = scriptSourceSettings.get(scriptType); - } - if (scriptMode == null) { - scriptMode = DEFAULT_SCRIPT_MODES.get(scriptType); - } + for (ScriptType scriptType : ScriptType.values()) { + //make sure file scripts have a different name than inline ones. + //Otherwise they are always considered file ones as they can be found in the static cache. + String script = scriptType == ScriptType.FILE ? "file_script" : "script"; + for (ScriptContext scriptContext : this.scriptContexts) { + //fallback mechanism: 1) engine specific settings 2) op based settings 3) source based settings + ScriptMode scriptMode = engineSettings.get(scriptEngineService.types()[0] + "." + scriptType + "." + scriptContext.getKey()); + if (scriptMode == null) { + scriptMode = scriptContextSettings.get(scriptContext.getKey()); + } + if (scriptMode == null) { + scriptMode = scriptSourceSettings.get(scriptType); + } + if (scriptMode == null) { + scriptMode = DEFAULT_SCRIPT_MODES.get(scriptType); + } - for (String lang : scriptEngineService.types()) { - switch (scriptMode) { - case ON: + for (String lang : scriptEngineService.types()) { + switch (scriptMode) { + case ON: + assertCompileAccepted(lang, script, scriptType, scriptContext, contextAndHeaders); + break; + case OFF: + assertCompileRejected(lang, script, scriptType, scriptContext, contextAndHeaders); + break; + case SANDBOX: + if (scriptEngineService.sandboxed()) { assertCompileAccepted(lang, script, scriptType, scriptContext, contextAndHeaders); - break; - case OFF: + } else { assertCompileRejected(lang, script, scriptType, scriptContext, contextAndHeaders); - break; - case SANDBOX: - if (scriptEngineService.sandboxed()) { - assertCompileAccepted(lang, script, scriptType, scriptContext, contextAndHeaders); - } else { - assertCompileRejected(lang, script, scriptType, scriptContext, contextAndHeaders); - } - break; - } + } + break; } } } @@ -338,15 +335,13 @@ public class ScriptServiceTests extends ESTestCase { unknownContext = randomAsciiOfLength(randomIntBetween(1, 30)); } while(scriptContextRegistry.isSupportedContext(new ScriptContext.Plugin(pluginName, unknownContext))); - for (ScriptEngineService scriptEngineService : scriptEngineServices) { - for (String type : scriptEngineService.types()) { - try { - scriptService.compile(new Script("test", randomFrom(ScriptType.values()), type, null), new ScriptContext.Plugin( - pluginName, unknownContext), contextAndHeaders); - fail("script compilation should have been rejected"); - } catch(IllegalArgumentException e) { - assertThat(e.getMessage(), containsString("script context [" + pluginName + "_" + unknownContext + "] not supported")); - } + for (String type : scriptEngineService.types()) { + try { + scriptService.compile(new Script("test", randomFrom(ScriptType.values()), type, null), new ScriptContext.Plugin( + pluginName, unknownContext), contextAndHeaders, Collections.emptyMap()); + fail("script compilation should have been rejected"); + } catch(IllegalArgumentException e) { + assertThat(e.getMessage(), containsString("script context [" + pluginName + "_" + unknownContext + "] not supported")); } } } @@ -354,20 +349,20 @@ public class ScriptServiceTests extends ESTestCase { public void testCompileCountedInCompilationStats() throws IOException { ContextAndHeaderHolder contextAndHeaders = new ContextAndHeaderHolder(); buildScriptService(Settings.EMPTY); - scriptService.compile(new Script("1+1", ScriptType.INLINE, "test", null), randomFrom(scriptContexts), contextAndHeaders); + scriptService.compile(new Script("1+1", ScriptType.INLINE, "test", null), randomFrom(scriptContexts), contextAndHeaders, Collections.emptyMap()); assertEquals(1L, scriptService.stats().getCompilations()); } public void testExecutableCountedInCompilationStats() throws IOException { ContextAndHeaderHolder contextAndHeaders = new ContextAndHeaderHolder(); buildScriptService(Settings.EMPTY); - scriptService.executable(new Script("1+1", ScriptType.INLINE, "test", null), randomFrom(scriptContexts), contextAndHeaders); + scriptService.executable(new Script("1+1", ScriptType.INLINE, "test", null), randomFrom(scriptContexts), contextAndHeaders, Collections.emptyMap()); assertEquals(1L, scriptService.stats().getCompilations()); } public void testSearchCountedInCompilationStats() throws IOException { buildScriptService(Settings.EMPTY); - scriptService.search(null, new Script("1+1", ScriptType.INLINE, "test", null), randomFrom(scriptContexts)); + scriptService.search(null, new Script("1+1", ScriptType.INLINE, "test", null), randomFrom(scriptContexts), Collections.emptyMap()); assertEquals(1L, scriptService.stats().getCompilations()); } @@ -377,7 +372,7 @@ public class ScriptServiceTests extends ESTestCase { int numberOfCompilations = randomIntBetween(1, 1024); for (int i = 0; i < numberOfCompilations; i++) { scriptService - .compile(new Script(i + " + " + i, ScriptType.INLINE, "test", null), randomFrom(scriptContexts), contextAndHeaders); + .compile(new Script(i + " + " + i, ScriptType.INLINE, "test", null), randomFrom(scriptContexts), contextAndHeaders, Collections.emptyMap()); } assertEquals(numberOfCompilations, scriptService.stats().getCompilations()); } @@ -387,8 +382,8 @@ public class ScriptServiceTests extends ESTestCase { Settings.Builder builder = Settings.builder(); builder.put(ScriptService.SCRIPT_CACHE_SIZE_SETTING, 1); buildScriptService(builder.build()); - scriptService.executable(new Script("1+1", ScriptType.INLINE, "test", null), randomFrom(scriptContexts), contextAndHeaders); - scriptService.executable(new Script("1+1", ScriptType.INLINE, "test", null), randomFrom(scriptContexts), contextAndHeaders); + scriptService.executable(new Script("1+1", ScriptType.INLINE, "test", null), randomFrom(scriptContexts), contextAndHeaders, Collections.emptyMap()); + scriptService.executable(new Script("1+1", ScriptType.INLINE, "test", null), randomFrom(scriptContexts), contextAndHeaders, Collections.emptyMap()); assertEquals(1L, scriptService.stats().getCompilations()); } @@ -396,14 +391,14 @@ public class ScriptServiceTests extends ESTestCase { ContextAndHeaderHolder contextAndHeaders = new ContextAndHeaderHolder(); buildScriptService(Settings.EMPTY); createFileScripts("test"); - scriptService.compile(new Script("file_script", ScriptType.FILE, "test", null), randomFrom(scriptContexts), contextAndHeaders); + scriptService.compile(new Script("file_script", ScriptType.FILE, "test", null), randomFrom(scriptContexts), contextAndHeaders, Collections.emptyMap()); assertEquals(1L, scriptService.stats().getCompilations()); } public void testIndexedScriptCountedInCompilationStats() throws IOException { ContextAndHeaderHolder contextAndHeaders = new ContextAndHeaderHolder(); buildScriptService(Settings.EMPTY); - scriptService.compile(new Script("script", ScriptType.INDEXED, "test", null), randomFrom(scriptContexts), contextAndHeaders); + scriptService.compile(new Script("script", ScriptType.INDEXED, "test", null), randomFrom(scriptContexts), contextAndHeaders, Collections.emptyMap()); assertEquals(1L, scriptService.stats().getCompilations()); } @@ -412,8 +407,8 @@ public class ScriptServiceTests extends ESTestCase { Settings.Builder builder = Settings.builder(); builder.put(ScriptService.SCRIPT_CACHE_SIZE_SETTING, 1); buildScriptService(builder.build()); - scriptService.executable(new Script("1+1", ScriptType.INLINE, "test", null), randomFrom(scriptContexts), contextAndHeaders); - scriptService.executable(new Script("2+2", ScriptType.INLINE, "test", null), randomFrom(scriptContexts), contextAndHeaders); + scriptService.executable(new Script("1+1", ScriptType.INLINE, "test", null), randomFrom(scriptContexts), contextAndHeaders, Collections.emptyMap()); + scriptService.executable(new Script("2+2", ScriptType.INLINE, "test", null), randomFrom(scriptContexts), contextAndHeaders, Collections.emptyMap()); assertEquals(2L, scriptService.stats().getCompilations()); assertEquals(1L, scriptService.stats().getCacheEvictions()); } @@ -429,7 +424,7 @@ public class ScriptServiceTests extends ESTestCase { private void assertCompileRejected(String lang, String script, ScriptType scriptType, ScriptContext scriptContext, HasContextAndHeaders contextAndHeaders) { try { - scriptService.compile(new Script(script, scriptType, lang, null), scriptContext, contextAndHeaders); + scriptService.compile(new Script(script, scriptType, lang, null), scriptContext, contextAndHeaders, Collections.emptyMap()); fail("compile should have been rejected for lang [" + lang + "], script_type [" + scriptType + "], scripted_op [" + scriptContext + "]"); } catch(ScriptException e) { //all good @@ -438,7 +433,7 @@ public class ScriptServiceTests extends ESTestCase { private void assertCompileAccepted(String lang, String script, ScriptType scriptType, ScriptContext scriptContext, HasContextAndHeaders contextAndHeaders) { - assertThat(scriptService.compile(new Script(script, scriptType, lang, null), scriptContext, contextAndHeaders), notNullValue()); + assertThat(scriptService.compile(new Script(script, scriptType, lang, null), scriptContext, contextAndHeaders, Collections.emptyMap()), notNullValue()); } public static class TestEngineService implements ScriptEngineService { @@ -459,7 +454,7 @@ public class ScriptServiceTests extends ESTestCase { } @Override - public Object compile(String script) { + public Object compile(String script, Map<String, String> params) { return "compiled_" + script; } diff --git a/core/src/test/java/org/elasticsearch/search/aggregations/bucket/BooleanTermsIT.java b/core/src/test/java/org/elasticsearch/search/aggregations/bucket/BooleanTermsIT.java index 0a660b8537..aad2c9bb3e 100644 --- a/core/src/test/java/org/elasticsearch/search/aggregations/bucket/BooleanTermsIT.java +++ b/core/src/test/java/org/elasticsearch/search/aggregations/bucket/BooleanTermsIT.java @@ -99,20 +99,22 @@ public class BooleanTermsIT extends ESIntegTestCase { final int bucketCount = numSingleFalses > 0 && numSingleTrues > 0 ? 2 : numSingleFalses + numSingleTrues > 0 ? 1 : 0; assertThat(terms.getBuckets().size(), equalTo(bucketCount)); - Terms.Bucket bucket = terms.getBucketByKey("0"); + Terms.Bucket bucket = terms.getBucketByKey("false"); if (numSingleFalses == 0) { assertNull(bucket); } else { assertNotNull(bucket); assertEquals(numSingleFalses, bucket.getDocCount()); + assertEquals("false", bucket.getKeyAsString()); } - bucket = terms.getBucketByKey("1"); + bucket = terms.getBucketByKey("true"); if (numSingleTrues == 0) { assertNull(bucket); } else { assertNotNull(bucket); assertEquals(numSingleTrues, bucket.getDocCount()); + assertEquals("true", bucket.getKeyAsString()); } } @@ -131,20 +133,22 @@ public class BooleanTermsIT extends ESIntegTestCase { final int bucketCount = numMultiFalses > 0 && numMultiTrues > 0 ? 2 : numMultiFalses + numMultiTrues > 0 ? 1 : 0; assertThat(terms.getBuckets().size(), equalTo(bucketCount)); - Terms.Bucket bucket = terms.getBucketByKey("0"); + Terms.Bucket bucket = terms.getBucketByKey("false"); if (numMultiFalses == 0) { assertNull(bucket); } else { assertNotNull(bucket); assertEquals(numMultiFalses, bucket.getDocCount()); + assertEquals("false", bucket.getKeyAsString()); } - bucket = terms.getBucketByKey("1"); + bucket = terms.getBucketByKey("true"); if (numMultiTrues == 0) { assertNull(bucket); } else { assertNotNull(bucket); assertEquals(numMultiTrues, bucket.getDocCount()); + assertEquals("true", bucket.getKeyAsString()); } } diff --git a/core/src/test/java/org/elasticsearch/search/aggregations/bucket/ChildrenIT.java b/core/src/test/java/org/elasticsearch/search/aggregations/bucket/ChildrenIT.java index b6611a956a..540420c21b 100644 --- a/core/src/test/java/org/elasticsearch/search/aggregations/bucket/ChildrenIT.java +++ b/core/src/test/java/org/elasticsearch/search/aggregations/bucket/ChildrenIT.java @@ -18,12 +18,15 @@ */ package org.elasticsearch.search.aggregations.bucket; +import org.elasticsearch.action.index.IndexRequest; import org.elasticsearch.action.index.IndexRequestBuilder; import org.elasticsearch.action.search.SearchResponse; import org.elasticsearch.action.update.UpdateResponse; import org.elasticsearch.cluster.metadata.IndexMetaData; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.search.SearchHit; +import org.elasticsearch.search.aggregations.AggregationBuilder; +import org.elasticsearch.search.aggregations.AggregationBuilders; import org.elasticsearch.search.aggregations.bucket.children.Children; import org.elasticsearch.search.aggregations.bucket.terms.Terms; import org.elasticsearch.search.aggregations.metrics.sum.Sum; @@ -392,6 +395,65 @@ public class ChildrenIT extends ESIntegTestCase { assertThat(terms.getBuckets().get(0).getDocCount(), equalTo(1l)); } + public void testPostCollectAllLeafReaders() throws Exception { + // The 'towns' and 'parent_names' aggs operate on parent docs and if child docs are in different segments we need + // to ensure those segments which child docs are also evaluated to in the post collect phase. + + // Before we only evaluated segments that yielded matches in 'towns' and 'parent_names' aggs, which caused + // us to miss to evaluate child docs in segments we didn't have parent matches for. + + assertAcked( + prepareCreate("index") + .addMapping("parentType", "name", "type=string,index=not_analyzed", "town", "type=string,index=not_analyzed") + .addMapping("childType", "_parent", "type=parentType", "name", "type=string,index=not_analyzed", "age", "type=integer") + ); + List<IndexRequestBuilder> requests = new ArrayList<>(); + requests.add(client().prepareIndex("index", "parentType", "1").setSource("name", "Bob", "town", "Memphis")); + requests.add(client().prepareIndex("index", "parentType", "2").setSource("name", "Alice", "town", "Chicago")); + requests.add(client().prepareIndex("index", "parentType", "3").setSource("name", "Bill", "town", "Chicago")); + requests.add(client().prepareIndex("index", "childType", "1").setSource("name", "Jill", "age", 5).setParent("1")); + requests.add(client().prepareIndex("index", "childType", "2").setSource("name", "Joey", "age", 3).setParent("1")); + requests.add(client().prepareIndex("index", "childType", "3").setSource("name", "John", "age", 2).setParent("2")); + requests.add(client().prepareIndex("index", "childType", "4").setSource("name", "Betty", "age", 6).setParent("3")); + requests.add(client().prepareIndex("index", "childType", "5").setSource("name", "Dan", "age", 1).setParent("3")); + indexRandom(true, requests); + + SearchResponse response = client().prepareSearch("index") + .setSize(0) + .addAggregation(AggregationBuilders.terms("towns").field("town") + .subAggregation(AggregationBuilders.terms("parent_names").field("name") + .subAggregation(AggregationBuilders.children("child_docs").childType("childType")) + ) + ) + .get(); + + Terms towns = response.getAggregations().get("towns"); + assertThat(towns.getBuckets().size(), equalTo(2)); + assertThat(towns.getBuckets().get(0).getKeyAsString(), equalTo("Chicago")); + assertThat(towns.getBuckets().get(0).getDocCount(), equalTo(2L)); + + Terms parents = towns.getBuckets().get(0).getAggregations().get("parent_names"); + assertThat(parents.getBuckets().size(), equalTo(2)); + assertThat(parents.getBuckets().get(0).getKeyAsString(), equalTo("Alice")); + assertThat(parents.getBuckets().get(0).getDocCount(), equalTo(1L)); + Children children = parents.getBuckets().get(0).getAggregations().get("child_docs"); + assertThat(children.getDocCount(), equalTo(1L)); + + assertThat(parents.getBuckets().get(1).getKeyAsString(), equalTo("Bill")); + assertThat(parents.getBuckets().get(1).getDocCount(), equalTo(1L)); + children = parents.getBuckets().get(1).getAggregations().get("child_docs"); + assertThat(children.getDocCount(), equalTo(2L)); + + assertThat(towns.getBuckets().get(1).getKeyAsString(), equalTo("Memphis")); + assertThat(towns.getBuckets().get(1).getDocCount(), equalTo(1L)); + parents = towns.getBuckets().get(1).getAggregations().get("parent_names"); + assertThat(parents.getBuckets().size(), equalTo(1)); + assertThat(parents.getBuckets().get(0).getKeyAsString(), equalTo("Bob")); + assertThat(parents.getBuckets().get(0).getDocCount(), equalTo(1L)); + children = parents.getBuckets().get(0).getAggregations().get("child_docs"); + assertThat(children.getDocCount(), equalTo(2L)); + } + private static final class Control { final String category; diff --git a/core/src/test/java/org/elasticsearch/search/aggregations/bucket/DateHistogramIT.java b/core/src/test/java/org/elasticsearch/search/aggregations/bucket/DateHistogramIT.java index 9a1d498ad6..97cd659a1d 100644 --- a/core/src/test/java/org/elasticsearch/search/aggregations/bucket/DateHistogramIT.java +++ b/core/src/test/java/org/elasticsearch/search/aggregations/bucket/DateHistogramIT.java @@ -1429,7 +1429,7 @@ public class DateHistogramIT extends ESIntegTestCase { } @Override - public Object compile(String script) { + public Object compile(String script, Map<String, String> params) { return script; } @@ -1555,7 +1555,7 @@ public class DateHistogramIT extends ESIntegTestCase { } @Override - public Object compile(String script) { + public Object compile(String script, Map<String, String> params) { return script; } diff --git a/core/src/test/java/org/elasticsearch/search/aggregations/bucket/NestedIT.java b/core/src/test/java/org/elasticsearch/search/aggregations/bucket/NestedIT.java index a1f4b20dc1..349b61fc37 100644 --- a/core/src/test/java/org/elasticsearch/search/aggregations/bucket/NestedIT.java +++ b/core/src/test/java/org/elasticsearch/search/aggregations/bucket/NestedIT.java @@ -423,10 +423,10 @@ public class NestedIT extends ESIntegTestCase { Terms startDate = response.getAggregations().get("startDate"); assertThat(startDate.getBuckets().size(), equalTo(2)); - Terms.Bucket bucket = startDate.getBucketByKey("1414800000000"); // 2014-11-01T00:00:00.000Z + Terms.Bucket bucket = startDate.getBucketByKey("2014-11-01T00:00:00.000Z"); assertThat(bucket.getDocCount(), equalTo(1l)); Terms endDate = bucket.getAggregations().get("endDate"); - bucket = endDate.getBucketByKey("1417305600000"); // 2014-11-30T00:00:00.000Z + bucket = endDate.getBucketByKey("2014-11-30T00:00:00.000Z"); assertThat(bucket.getDocCount(), equalTo(1l)); Terms period = bucket.getAggregations().get("period"); bucket = period.getBucketByKey("2014-11"); @@ -440,10 +440,10 @@ public class NestedIT extends ESIntegTestCase { Terms tags = nestedTags.getAggregations().get("tag"); assertThat(tags.getBuckets().size(), equalTo(0)); // and this must be empty - bucket = startDate.getBucketByKey("1417392000000"); // 2014-12-01T00:00:00.000Z + bucket = startDate.getBucketByKey("2014-12-01T00:00:00.000Z"); assertThat(bucket.getDocCount(), equalTo(1l)); endDate = bucket.getAggregations().get("endDate"); - bucket = endDate.getBucketByKey("1419984000000"); // 2014-12-31T00:00:00.000Z + bucket = endDate.getBucketByKey("2014-12-31T00:00:00.000Z"); assertThat(bucket.getDocCount(), equalTo(1l)); period = bucket.getAggregations().get("period"); bucket = period.getBucketByKey("2014-12"); diff --git a/core/src/test/java/org/elasticsearch/search/aggregations/metrics/AvgIT.java b/core/src/test/java/org/elasticsearch/search/aggregations/metrics/AvgIT.java index ac146706eb..db02d6ccb0 100644 --- a/core/src/test/java/org/elasticsearch/search/aggregations/metrics/AvgIT.java +++ b/core/src/test/java/org/elasticsearch/search/aggregations/metrics/AvgIT.java @@ -364,7 +364,7 @@ public class AvgIT extends AbstractNumericTestCase { } @Override - public Object compile(String script) { + public Object compile(String script, Map<String, String> params) { return script; } @@ -500,7 +500,7 @@ public class AvgIT extends AbstractNumericTestCase { } @Override - public Object compile(String script) { + public Object compile(String script, Map<String, String> params) { return script; } @@ -585,4 +585,4 @@ public class AvgIT extends AbstractNumericTestCase { public void scriptRemoved(CompiledScript script) { } } -}
\ No newline at end of file +} diff --git a/core/src/test/java/org/elasticsearch/search/aggregations/metrics/SumIT.java b/core/src/test/java/org/elasticsearch/search/aggregations/metrics/SumIT.java index d87de00010..2c27bde57d 100644 --- a/core/src/test/java/org/elasticsearch/search/aggregations/metrics/SumIT.java +++ b/core/src/test/java/org/elasticsearch/search/aggregations/metrics/SumIT.java @@ -359,7 +359,7 @@ public class SumIT extends AbstractNumericTestCase { } @Override - public Object compile(String script) { + public Object compile(String script, Map<String, String> params) { return script; } @@ -497,7 +497,7 @@ public class SumIT extends AbstractNumericTestCase { } @Override - public Object compile(String script) { + public Object compile(String script, Map<String, String> params) { return script; } @@ -583,4 +583,4 @@ public class SumIT extends AbstractNumericTestCase { public void scriptRemoved(CompiledScript script) { } } -}
\ No newline at end of file +} diff --git a/core/src/test/java/org/elasticsearch/search/aggregations/metrics/ValueCountIT.java b/core/src/test/java/org/elasticsearch/search/aggregations/metrics/ValueCountIT.java index fde7256ad0..c461154649 100644 --- a/core/src/test/java/org/elasticsearch/search/aggregations/metrics/ValueCountIT.java +++ b/core/src/test/java/org/elasticsearch/search/aggregations/metrics/ValueCountIT.java @@ -244,7 +244,7 @@ public class ValueCountIT extends ESIntegTestCase { } @Override - public Object compile(String script) { + public Object compile(String script, Map<String, String> params) { return script; } @@ -330,4 +330,4 @@ public class ValueCountIT extends ESIntegTestCase { public void scriptRemoved(CompiledScript script) { } } -}
\ No newline at end of file +} diff --git a/core/src/test/java/org/elasticsearch/search/aggregations/pipeline/moving/avg/MovAvgIT.java b/core/src/test/java/org/elasticsearch/search/aggregations/pipeline/moving/avg/MovAvgIT.java index 90d4437fce..207b626409 100644 --- a/core/src/test/java/org/elasticsearch/search/aggregations/pipeline/moving/avg/MovAvgIT.java +++ b/core/src/test/java/org/elasticsearch/search/aggregations/pipeline/moving/avg/MovAvgIT.java @@ -57,6 +57,7 @@ import static org.elasticsearch.search.aggregations.AggregationBuilders.min; import static org.elasticsearch.search.aggregations.AggregationBuilders.range; import static org.elasticsearch.search.aggregations.pipeline.PipelineAggregatorBuilders.derivative; import static org.elasticsearch.search.aggregations.pipeline.PipelineAggregatorBuilders.movingAvg; +import static org.elasticsearch.test.hamcrest.DoubleMatcher.nearlyEqual; import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertSearchResponse; import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.Matchers.is; diff --git a/core/src/test/java/org/elasticsearch/search/basic/SearchWhileRelocatingIT.java b/core/src/test/java/org/elasticsearch/search/basic/SearchWhileRelocatingIT.java index 69c4bbdbd1..1f42129237 100644 --- a/core/src/test/java/org/elasticsearch/search/basic/SearchWhileRelocatingIT.java +++ b/core/src/test/java/org/elasticsearch/search/basic/SearchWhileRelocatingIT.java @@ -61,7 +61,7 @@ public class SearchWhileRelocatingIT extends ESIntegTestCase { final int numShards = between(1, 20); client().admin().indices().prepareCreate("test") .setSettings(settingsBuilder().put("index.number_of_shards", numShards).put("index.number_of_replicas", numberOfReplicas)) - .addMapping("type1", "loc", "type=geo_point", "test", "type=string").execute().actionGet(); + .addMapping("type", "loc", "type=geo_point", "test", "type=string").execute().actionGet(); ensureGreen(); List<IndexRequestBuilder> indexBuilders = new ArrayList<>(); final int numDocs = between(10, 20); diff --git a/core/src/test/java/org/elasticsearch/search/builder/SearchSourceBuilderTests.java b/core/src/test/java/org/elasticsearch/search/builder/SearchSourceBuilderTests.java index 891ebf7e2d..b80810fc6d 100644 --- a/core/src/test/java/org/elasticsearch/search/builder/SearchSourceBuilderTests.java +++ b/core/src/test/java/org/elasticsearch/search/builder/SearchSourceBuilderTests.java @@ -34,7 +34,11 @@ import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.settings.SettingsFilter; import org.elasticsearch.common.settings.SettingsModule; import org.elasticsearch.common.unit.TimeValue; -import org.elasticsearch.common.xcontent.*; +import org.elasticsearch.common.xcontent.ToXContent; +import org.elasticsearch.common.xcontent.XContentBuilder; +import org.elasticsearch.common.xcontent.XContentFactory; +import org.elasticsearch.common.xcontent.XContentParser; +import org.elasticsearch.common.xcontent.XContentType; import org.elasticsearch.index.query.AbstractQueryTestCase; import org.elasticsearch.index.query.EmptyQueryBuilder; import org.elasticsearch.index.query.QueryBuilders; @@ -47,7 +51,7 @@ import org.elasticsearch.search.aggregations.AggregationBuilders; import org.elasticsearch.search.fetch.innerhits.InnerHitsBuilder; import org.elasticsearch.search.fetch.innerhits.InnerHitsBuilder.InnerHit; import org.elasticsearch.search.fetch.source.FetchSourceContext; -import org.elasticsearch.search.highlight.HighlightBuilder; +import org.elasticsearch.search.highlight.HighlightBuilderTests; import org.elasticsearch.search.rescore.RescoreBuilder; import org.elasticsearch.search.sort.SortBuilders; import org.elasticsearch.search.sort.SortOrder; @@ -251,8 +255,7 @@ public class SearchSourceBuilderTests extends ESTestCase { } } if (randomBoolean()) { - // NORELEASE need a random highlight builder method - builder.highlighter(new HighlightBuilder().field(randomAsciiOfLengthBetween(5, 20))); + builder.highlighter(HighlightBuilderTests.randomHighlighterBuilder()); } if (randomBoolean()) { // NORELEASE need a random suggest builder method diff --git a/core/src/test/java/org/elasticsearch/search/child/ChildQuerySearchIT.java b/core/src/test/java/org/elasticsearch/search/child/ChildQuerySearchIT.java index 4be2b36fbe..63c142f1e7 100644 --- a/core/src/test/java/org/elasticsearch/search/child/ChildQuerySearchIT.java +++ b/core/src/test/java/org/elasticsearch/search/child/ChildQuerySearchIT.java @@ -1176,7 +1176,7 @@ public class ChildQuerySearchIT extends ESIntegTestCase { .endObject().endObject()).get(); fail(); } catch (IllegalArgumentException e) { - assertThat(e.toString(), containsString("Merge failed with failures {[The _parent field's type option can't be changed: [null]->[parent]")); + assertThat(e.toString(), containsString("The _parent field's type option can't be changed: [null]->[parent]")); } } diff --git a/core/src/test/java/org/elasticsearch/search/geo/GeoShapeQueryTests.java b/core/src/test/java/org/elasticsearch/search/geo/GeoShapeQueryTests.java index 847e03e5c4..c0cc17fc43 100644 --- a/core/src/test/java/org/elasticsearch/search/geo/GeoShapeQueryTests.java +++ b/core/src/test/java/org/elasticsearch/search/geo/GeoShapeQueryTests.java @@ -20,6 +20,7 @@ package org.elasticsearch.search.geo; import com.spatial4j.core.shape.Rectangle; + import org.elasticsearch.action.get.GetResponse; import org.elasticsearch.action.search.SearchResponse; import org.elasticsearch.common.geo.ShapeRelation; @@ -47,7 +48,10 @@ import static org.elasticsearch.test.geo.RandomShapeGenerator.xRandomPoint; import static org.elasticsearch.test.geo.RandomShapeGenerator.xRandomRectangle; import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertHitCount; import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertSearchResponse; -import static org.hamcrest.Matchers.*; +import static org.hamcrest.Matchers.containsString; +import static org.hamcrest.Matchers.equalTo; +import static org.hamcrest.Matchers.greaterThan; +import static org.hamcrest.Matchers.nullValue; public class GeoShapeQueryTests extends ESSingleNodeTestCase { public void testNullShape() throws Exception { @@ -396,6 +400,12 @@ public class GeoShapeQueryTests extends ESSingleNodeTestCase { .setPostFilter(filter).get(); assertSearchResponse(result); assertHitCount(result, 1); + // no shape + filter = QueryBuilders.geoShapeQuery("location", ShapeBuilders.newGeometryCollection()); + result = client().prepareSearch("test").setTypes("type").setQuery(QueryBuilders.matchAllQuery()) + .setPostFilter(filter).get(); + assertSearchResponse(result); + assertHitCount(result, 0); } public void testPointsOnly() throws Exception { diff --git a/core/src/test/java/org/elasticsearch/search/highlight/CustomHighlighter.java b/core/src/test/java/org/elasticsearch/search/highlight/CustomHighlighter.java index 5a8d7c0150..05b999a919 100644 --- a/core/src/test/java/org/elasticsearch/search/highlight/CustomHighlighter.java +++ b/core/src/test/java/org/elasticsearch/search/highlight/CustomHighlighter.java @@ -18,7 +18,6 @@ */ package org.elasticsearch.search.highlight; -import org.elasticsearch.common.text.StringText; import org.elasticsearch.common.text.Text; import org.elasticsearch.index.mapper.FieldMapper; @@ -52,12 +51,12 @@ public class CustomHighlighter implements Highlighter { } List<Text> responses = new ArrayList<>(); - responses.add(new StringText(String.format(Locale.ENGLISH, "standard response for %s at position %s", field.field(), + responses.add(new Text(String.format(Locale.ENGLISH, "standard response for %s at position %s", field.field(), cacheEntry.position))); if (field.fieldOptions().options() != null) { for (Map.Entry<String, Object> entry : field.fieldOptions().options().entrySet()) { - responses.add(new StringText("field:" + entry.getKey() + ":" + entry.getValue())); + responses.add(new Text("field:" + entry.getKey() + ":" + entry.getValue())); } } diff --git a/core/src/test/java/org/elasticsearch/search/highlight/HighlightBuilderTests.java b/core/src/test/java/org/elasticsearch/search/highlight/HighlightBuilderTests.java index 75fc9f9815..2ac5895c9e 100644 --- a/core/src/test/java/org/elasticsearch/search/highlight/HighlightBuilderTests.java +++ b/core/src/test/java/org/elasticsearch/search/highlight/HighlightBuilderTests.java @@ -19,10 +19,10 @@ package org.elasticsearch.search.highlight; -import org.elasticsearch.common.ParseFieldMatcher; -import org.elasticsearch.common.ParsingException; import org.elasticsearch.Version; import org.elasticsearch.cluster.metadata.IndexMetaData; +import org.elasticsearch.common.ParseFieldMatcher; +import org.elasticsearch.common.ParsingException; import org.elasticsearch.common.io.stream.BytesStreamOutput; import org.elasticsearch.common.io.stream.NamedWriteableAwareStreamInput; import org.elasticsearch.common.io.stream.NamedWriteableRegistry; @@ -52,8 +52,8 @@ import org.elasticsearch.index.query.QueryShardContext; import org.elasticsearch.index.query.TermQueryBuilder; import org.elasticsearch.index.query.TermQueryParser; import org.elasticsearch.indices.query.IndicesQueriesRegistry; -import org.elasticsearch.search.highlight.HighlightBuilder; import org.elasticsearch.search.highlight.HighlightBuilder.Field; +import org.elasticsearch.search.highlight.HighlightBuilder.Order; import org.elasticsearch.search.highlight.SearchContextHighlight.FieldOptions; import org.elasticsearch.test.ESTestCase; import org.elasticsearch.test.IndexSettingsModule; @@ -159,7 +159,8 @@ public class HighlightBuilderTests extends ESTestCase { XContentParser parser = XContentHelper.createParser(builder.bytes()); context.reset(parser); - HighlightBuilder secondHighlightBuilder = HighlightBuilder.fromXContent(context); + parser.nextToken(); + HighlightBuilder secondHighlightBuilder = HighlightBuilder.PROTOTYPE.fromXContent(context); assertNotSame(highlightBuilder, secondHighlightBuilder); assertEquals(highlightBuilder, secondHighlightBuilder); assertEquals(highlightBuilder.hashCode(), secondHighlightBuilder.hashCode()); @@ -179,7 +180,7 @@ public class HighlightBuilderTests extends ESTestCase { context.reset(parser); try { - HighlightBuilder.fromXContent(context); + HighlightBuilder.PROTOTYPE.fromXContent(context); fail("expected a parsing exception"); } catch (ParsingException e) { assertEquals("cannot parse array with name [bad_fieldname]", e.getMessage()); @@ -196,7 +197,7 @@ public class HighlightBuilderTests extends ESTestCase { context.reset(parser); try { - HighlightBuilder.fromXContent(context); + HighlightBuilder.PROTOTYPE.fromXContent(context); fail("expected a parsing exception"); } catch (ParsingException e) { assertEquals("cannot parse array with name [bad_fieldname]", e.getMessage()); @@ -216,7 +217,7 @@ public class HighlightBuilderTests extends ESTestCase { context.reset(parser); try { - HighlightBuilder.fromXContent(context); + HighlightBuilder.PROTOTYPE.fromXContent(context); fail("expected a parsing exception"); } catch (ParsingException e) { assertEquals("unexpected fieldname [bad_fieldname]", e.getMessage()); @@ -233,7 +234,7 @@ public class HighlightBuilderTests extends ESTestCase { context.reset(parser); try { - HighlightBuilder.fromXContent(context); + HighlightBuilder.PROTOTYPE.fromXContent(context); fail("expected a parsing exception"); } catch (ParsingException e) { assertEquals("unexpected fieldname [bad_fieldname]", e.getMessage()); @@ -253,7 +254,7 @@ public class HighlightBuilderTests extends ESTestCase { context.reset(parser); try { - HighlightBuilder.fromXContent(context); + HighlightBuilder.PROTOTYPE.fromXContent(context); fail("expected a parsing exception"); } catch (ParsingException e) { assertEquals("cannot parse object with name [bad_fieldname]", e.getMessage()); @@ -270,7 +271,7 @@ public class HighlightBuilderTests extends ESTestCase { context.reset(parser); try { - HighlightBuilder.fromXContent(context); + HighlightBuilder.PROTOTYPE.fromXContent(context); fail("expected a parsing exception"); } catch (ParsingException e) { assertEquals("cannot parse object with name [bad_fieldname]", e.getMessage()); @@ -354,7 +355,7 @@ public class HighlightBuilderTests extends ESTestCase { XContentParser parser = XContentFactory.xContent(highlightElement).createParser(highlightElement); context.reset(parser); - HighlightBuilder highlightBuilder = HighlightBuilder.fromXContent(context); + HighlightBuilder highlightBuilder = HighlightBuilder.PROTOTYPE.fromXContent(context); assertArrayEquals("setting tags_schema 'styled' should alter pre_tags", HighlightBuilder.DEFAULT_STYLED_PRE_TAG, highlightBuilder.preTags()); assertArrayEquals("setting tags_schema 'styled' should alter post_tags", HighlightBuilder.DEFAULT_STYLED_POST_TAGS, @@ -366,7 +367,7 @@ public class HighlightBuilderTests extends ESTestCase { parser = XContentFactory.xContent(highlightElement).createParser(highlightElement); context.reset(parser); - highlightBuilder = HighlightBuilder.fromXContent(context); + highlightBuilder = HighlightBuilder.PROTOTYPE.fromXContent(context); assertArrayEquals("setting tags_schema 'default' should alter pre_tags", HighlightBuilder.DEFAULT_PRE_TAGS, highlightBuilder.preTags()); assertArrayEquals("setting tags_schema 'default' should alter post_tags", HighlightBuilder.DEFAULT_POST_TAGS, @@ -379,13 +380,66 @@ public class HighlightBuilderTests extends ESTestCase { context.reset(parser); try { - highlightBuilder = HighlightBuilder.fromXContent(context); + HighlightBuilder.PROTOTYPE.fromXContent(context); fail("setting unknown tag schema should throw exception"); } catch (IllegalArgumentException e) { assertEquals("Unknown tag schema [somthing_else]", e.getMessage()); } } + /** + * test parsing empty highlight or empty fields blocks + */ + public void testParsingEmptyStructure() throws IOException { + QueryParseContext context = new QueryParseContext(indicesQueriesRegistry); + context.parseFieldMatcher(new ParseFieldMatcher(Settings.EMPTY)); + String highlightElement = "{ }"; + XContentParser parser = XContentFactory.xContent(highlightElement).createParser(highlightElement); + + context.reset(parser); + HighlightBuilder highlightBuilder = HighlightBuilder.PROTOTYPE.fromXContent(context); + assertEquals("expected plain HighlightBuilder", new HighlightBuilder(), highlightBuilder); + + highlightElement = "{ \"fields\" : { } }"; + parser = XContentFactory.xContent(highlightElement).createParser(highlightElement); + + context.reset(parser); + highlightBuilder = HighlightBuilder.PROTOTYPE.fromXContent(context); + assertEquals("defining no field should return plain HighlightBuilder", new HighlightBuilder(), highlightBuilder); + + highlightElement = "{ \"fields\" : { \"foo\" : { } } }"; + parser = XContentFactory.xContent(highlightElement).createParser(highlightElement); + + context.reset(parser); + highlightBuilder = HighlightBuilder.PROTOTYPE.fromXContent(context); + assertEquals("expected HighlightBuilder with field", new HighlightBuilder().field(new Field("foo")), highlightBuilder); + System.out.println(Math.log(1/(double)(1+1)) + 1.0); + } + + /** + * test ordinals of {@link Order}, since serialization depends on it + */ + public void testValidOrderOrdinals() { + assertThat(Order.NONE.ordinal(), equalTo(0)); + assertThat(Order.SCORE.ordinal(), equalTo(1)); + } + + public void testOrderSerialization() throws Exception { + try (BytesStreamOutput out = new BytesStreamOutput()) { + Order.NONE.writeTo(out); + try (StreamInput in = StreamInput.wrap(out.bytes())) { + assertThat(in.readVInt(), equalTo(0)); + } + } + + try (BytesStreamOutput out = new BytesStreamOutput()) { + Order.SCORE.writeTo(out); + try (StreamInput in = StreamInput.wrap(out.bytes())) { + assertThat(in.readVInt(), equalTo(1)); + } + } + } + protected static XContentBuilder toXContent(HighlightBuilder highlight, XContentType contentType) throws IOException { XContentBuilder builder = XContentFactory.contentBuilder(contentType); if (randomBoolean()) { @@ -396,9 +450,9 @@ public class HighlightBuilderTests extends ESTestCase { } /** - * create random shape that is put under test + * create random highlight builder that is put under test */ - private static HighlightBuilder randomHighlighterBuilder() { + public static HighlightBuilder randomHighlighterBuilder() { HighlightBuilder testHighlighter = new HighlightBuilder(); setRandomCommonOptions(testHighlighter); testHighlighter.useExplicitFieldOrder(randomBoolean()); @@ -457,7 +511,12 @@ public class HighlightBuilderTests extends ESTestCase { highlightBuilder.highlightQuery(highlightQuery); } if (randomBoolean()) { - highlightBuilder.order(randomAsciiOfLengthBetween(1, 10)); + if (randomBoolean()) { + highlightBuilder.order(randomFrom(Order.values())); + } else { + // also test the string setter + highlightBuilder.order(randomFrom(Order.values()).toString()); + } } if (randomBoolean()) { highlightBuilder.highlightFilter(randomBoolean()); @@ -526,7 +585,11 @@ public class HighlightBuilderTests extends ESTestCase { highlightBuilder.highlightQuery(new TermQueryBuilder(randomAsciiOfLengthBetween(11, 20), randomAsciiOfLengthBetween(11, 20))); break; case 8: - highlightBuilder.order(randomAsciiOfLengthBetween(11, 20)); + if (highlightBuilder.order() == Order.NONE) { + highlightBuilder.order(Order.SCORE); + } else { + highlightBuilder.order(Order.NONE); + } break; case 9: highlightBuilder.highlightFilter(toggleOrSet(highlightBuilder.highlightFilter())); diff --git a/core/src/test/java/org/elasticsearch/search/highlight/HighlighterSearchIT.java b/core/src/test/java/org/elasticsearch/search/highlight/HighlighterSearchIT.java index 8a2506ec3a..4063ec81a2 100644 --- a/core/src/test/java/org/elasticsearch/search/highlight/HighlighterSearchIT.java +++ b/core/src/test/java/org/elasticsearch/search/highlight/HighlighterSearchIT.java @@ -58,7 +58,6 @@ import static org.elasticsearch.index.query.QueryBuilders.fuzzyQuery; import static org.elasticsearch.index.query.QueryBuilders.matchPhrasePrefixQuery; import static org.elasticsearch.index.query.QueryBuilders.matchPhraseQuery; import static org.elasticsearch.index.query.QueryBuilders.matchQuery; -import static org.elasticsearch.index.query.QueryBuilders.missingQuery; import static org.elasticsearch.index.query.QueryBuilders.multiMatchQuery; import static org.elasticsearch.index.query.QueryBuilders.prefixQuery; import static org.elasticsearch.index.query.QueryBuilders.queryStringQuery; @@ -67,6 +66,7 @@ import static org.elasticsearch.index.query.QueryBuilders.regexpQuery; import static org.elasticsearch.index.query.QueryBuilders.termQuery; import static org.elasticsearch.index.query.QueryBuilders.typeQuery; import static org.elasticsearch.index.query.QueryBuilders.wildcardQuery; +import static org.elasticsearch.index.query.QueryBuilders.existsQuery; import static org.elasticsearch.search.builder.SearchSourceBuilder.highlight; import static org.elasticsearch.search.builder.SearchSourceBuilder.searchSource; import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertAcked; @@ -1557,7 +1557,7 @@ public class HighlighterSearchIT extends ESIntegTestCase { .fragmenter("simple"))).get(); assertHighlight(response, 0, "tags", 0, equalTo("this is a really <em>long</em> <em>tag</em> i would like to highlight")); - assertHighlight(response, 0, "tags", 1, 2, equalTo("here is another one that is very <em>long</em> <em>tag</em> and has the <em>tag</em> token near the end")); + assertHighlight(response, 0, "tags", 1, 2, equalTo("here is another one that is very <em>long</em> <em>tag</em> and has the tag token near the end")); response = client().prepareSearch("test") .setQuery(QueryBuilders.matchQuery("tags", "long tag").type(MatchQuery.Type.PHRASE)) @@ -1566,7 +1566,7 @@ public class HighlighterSearchIT extends ESIntegTestCase { .fragmenter("span"))).get(); assertHighlight(response, 0, "tags", 0, equalTo("this is a really <em>long</em> <em>tag</em> i would like to highlight")); - assertHighlight(response, 0, "tags", 1, 2, equalTo("here is another one that is very <em>long</em> <em>tag</em> and has the <em>tag</em> token near the end")); + assertHighlight(response, 0, "tags", 1, 2, equalTo("here is another one that is very <em>long</em> <em>tag</em> and has the tag token near the end")); assertFailures(client().prepareSearch("test") .setQuery(QueryBuilders.matchQuery("tags", "long tag").type(MatchQuery.Type.PHRASE)) @@ -2062,7 +2062,7 @@ public class HighlighterSearchIT extends ESIntegTestCase { searchResponse = client().search(searchRequest("test").source(source)).actionGet(); - assertHighlight(searchResponse, 0, "field2", 0, 1, equalTo("The <xxx>quick</xxx> <xxx>brown</xxx> fox jumps over the lazy <xxx>quick</xxx> dog")); + assertHighlight(searchResponse, 0, "field2", 0, 1, equalTo("The <xxx>quick</xxx> <xxx>brown</xxx> fox jumps over the lazy quick dog")); } public void testPostingsHighlighterMultipleFields() throws Exception { @@ -2471,7 +2471,7 @@ public class HighlighterSearchIT extends ESIntegTestCase { logger.info("--> highlighting and searching on field1"); SearchSourceBuilder source = searchSource().query(boolQuery() - .should(constantScoreQuery(QueryBuilders.missingQuery("field1"))) + .should(boolQuery().mustNot(QueryBuilders.existsQuery("field1"))) .should(matchQuery("field1", "test")) .should(constantScoreQuery(queryStringQuery("field1:photo*")))) .highlighter(highlight().field("field1")); @@ -2501,7 +2501,9 @@ public class HighlighterSearchIT extends ESIntegTestCase { refresh(); logger.info("--> highlighting and searching on field1"); - SearchSourceBuilder source = searchSource().query(boolQuery().must(queryStringQuery("field1:photo*")).filter(missingQuery("field_null"))) + SearchSourceBuilder source = searchSource().query(boolQuery() + .must(queryStringQuery("field1:photo*")) + .mustNot(existsQuery("field_null"))) .highlighter(highlight().field("field1")); SearchResponse searchResponse = client().prepareSearch("test").setSource(source).get(); assertHighlight(searchResponse, 0, "field1", 0, 1, equalTo("The <em>photography</em> word will get highlighted")); diff --git a/core/src/test/java/org/elasticsearch/search/highlight/PlainHighlighterTests.java b/core/src/test/java/org/elasticsearch/search/highlight/PlainHighlighterTests.java new file mode 100644 index 0000000000..5156209d6f --- /dev/null +++ b/core/src/test/java/org/elasticsearch/search/highlight/PlainHighlighterTests.java @@ -0,0 +1,42 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.search.highlight; + +import org.apache.lucene.analysis.MockAnalyzer; +import org.apache.lucene.index.Term; +import org.apache.lucene.search.PhraseQuery; +import org.apache.lucene.search.Query; +import org.apache.lucene.search.highlight.QueryScorer; +import org.apache.lucene.util.LuceneTestCase; + +public class PlainHighlighterTests extends LuceneTestCase { + + public void testHighlightPhrase() throws Exception { + Query query = new PhraseQuery.Builder() + .add(new Term("field", "foo")) + .add(new Term("field", "bar")) + .build(); + QueryScorer queryScorer = new CustomQueryScorer(query); + org.apache.lucene.search.highlight.Highlighter highlighter = new org.apache.lucene.search.highlight.Highlighter(queryScorer); + String[] frags = highlighter.getBestFragments(new MockAnalyzer(random()), "field", "bar foo bar foo", 10); + assertArrayEquals(new String[] {"bar <B>foo</B> <B>bar</B> foo"}, frags); + } + +} diff --git a/core/src/test/java/org/elasticsearch/search/internal/InternalSearchHitTests.java b/core/src/test/java/org/elasticsearch/search/internal/InternalSearchHitTests.java index cc631d5df2..0525fd28db 100644 --- a/core/src/test/java/org/elasticsearch/search/internal/InternalSearchHitTests.java +++ b/core/src/test/java/org/elasticsearch/search/internal/InternalSearchHitTests.java @@ -21,7 +21,7 @@ package org.elasticsearch.search.internal; import org.elasticsearch.common.io.stream.BytesStreamOutput; import org.elasticsearch.common.io.stream.InputStreamStreamInput; -import org.elasticsearch.common.text.StringText; +import org.elasticsearch.common.text.Text; import org.elasticsearch.search.SearchShardTarget; import org.elasticsearch.test.ESTestCase; @@ -39,25 +39,25 @@ public class InternalSearchHitTests extends ESTestCase { SearchShardTarget target = new SearchShardTarget("_node_id", "_index", 0); Map<String, InternalSearchHits> innerHits = new HashMap<>(); - InternalSearchHit innerHit1 = new InternalSearchHit(0, "_id", new StringText("_type"), null); + InternalSearchHit innerHit1 = new InternalSearchHit(0, "_id", new Text("_type"), null); innerHit1.shardTarget(target); - InternalSearchHit innerInnerHit2 = new InternalSearchHit(0, "_id", new StringText("_type"), null); + InternalSearchHit innerInnerHit2 = new InternalSearchHit(0, "_id", new Text("_type"), null); innerInnerHit2.shardTarget(target); innerHits.put("1", new InternalSearchHits(new InternalSearchHit[]{innerInnerHit2}, 1, 1f)); innerHit1.setInnerHits(innerHits); - InternalSearchHit innerHit2 = new InternalSearchHit(0, "_id", new StringText("_type"), null); + InternalSearchHit innerHit2 = new InternalSearchHit(0, "_id", new Text("_type"), null); innerHit2.shardTarget(target); - InternalSearchHit innerHit3 = new InternalSearchHit(0, "_id", new StringText("_type"), null); + InternalSearchHit innerHit3 = new InternalSearchHit(0, "_id", new Text("_type"), null); innerHit3.shardTarget(target); innerHits = new HashMap<>(); - InternalSearchHit hit1 = new InternalSearchHit(0, "_id", new StringText("_type"), null); + InternalSearchHit hit1 = new InternalSearchHit(0, "_id", new Text("_type"), null); innerHits.put("1", new InternalSearchHits(new InternalSearchHit[]{innerHit1, innerHit2}, 1, 1f)); innerHits.put("2", new InternalSearchHits(new InternalSearchHit[]{innerHit3}, 1, 1f)); hit1.shardTarget(target); hit1.setInnerHits(innerHits); - InternalSearchHit hit2 = new InternalSearchHit(0, "_id", new StringText("_type"), null); + InternalSearchHit hit2 = new InternalSearchHit(0, "_id", new Text("_type"), null); hit2.shardTarget(target); InternalSearchHits hits = new InternalSearchHits(new InternalSearchHit[]{hit1, hit2}, 2, 1f); diff --git a/core/src/test/java/org/elasticsearch/search/profile/ProfileTests.java b/core/src/test/java/org/elasticsearch/search/profile/ProfileTests.java new file mode 100644 index 0000000000..83f6efaa15 --- /dev/null +++ b/core/src/test/java/org/elasticsearch/search/profile/ProfileTests.java @@ -0,0 +1,173 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.search.profile; + +import org.apache.lucene.document.Document; +import org.apache.lucene.document.Field.Store; +import org.apache.lucene.document.StringField; +import org.apache.lucene.index.IndexReader; +import org.apache.lucene.index.RandomIndexWriter; +import org.apache.lucene.index.Term; +import org.apache.lucene.search.IndexSearcher; +import org.apache.lucene.search.LeafCollector; +import org.apache.lucene.search.Query; +import org.apache.lucene.search.RandomApproximationQuery; +import org.apache.lucene.search.Sort; +import org.apache.lucene.search.TermQuery; +import org.apache.lucene.search.TotalHitCountCollector; +import org.apache.lucene.store.Directory; +import org.apache.lucene.util.IOUtils; +import org.apache.lucene.util.TestUtil; +import org.elasticsearch.common.lucene.Lucene; +import org.elasticsearch.index.engine.Engine; +import org.elasticsearch.search.internal.ContextIndexSearcher; +import org.elasticsearch.test.ESTestCase; +import org.junit.AfterClass; +import org.junit.BeforeClass; + +import java.io.IOException; +import java.util.List; +import java.util.Map; + +import static org.hamcrest.Matchers.equalTo; +import static org.hamcrest.Matchers.greaterThan; + +public class ProfileTests extends ESTestCase { + + static Directory dir; + static IndexReader reader; + static ContextIndexSearcher searcher; + + @BeforeClass + public static void before() throws IOException { + dir = newDirectory(); + RandomIndexWriter w = new RandomIndexWriter(random(), dir); + final int numDocs = TestUtil.nextInt(random(), 1, 20); + for (int i = 0; i < numDocs; ++i) { + final int numHoles = random().nextInt(5); + for (int j = 0; j < numHoles; ++j) { + w.addDocument(new Document()); + } + Document doc = new Document(); + doc.add(new StringField("foo", "bar", Store.NO)); + w.addDocument(doc); + } + reader = w.getReader(); + w.close(); + Engine.Searcher engineSearcher = new Engine.Searcher("test", new IndexSearcher(reader)); + searcher = new ContextIndexSearcher(engineSearcher, IndexSearcher.getDefaultQueryCache(), MAYBE_CACHE_POLICY); + } + + @AfterClass + public static void after() throws IOException { + IOUtils.close(reader, dir); + dir = null; + reader = null; + searcher = null; + } + + public void testBasic() throws IOException { + Profiler profiler = new Profiler(); + searcher.setProfiler(profiler); + Query query = new TermQuery(new Term("foo", "bar")); + searcher.search(query, 1); + List<ProfileResult> results = profiler.getQueryTree(); + assertEquals(1, results.size()); + Map<String, Long> breakdown = results.get(0).getTimeBreakdown(); + assertThat(breakdown.get(ProfileBreakdown.TimingType.CREATE_WEIGHT.toString()).longValue(), greaterThan(0L)); + assertThat(breakdown.get(ProfileBreakdown.TimingType.BUILD_SCORER.toString()).longValue(), greaterThan(0L)); + assertThat(breakdown.get(ProfileBreakdown.TimingType.NEXT_DOC.toString()).longValue(), greaterThan(0L)); + assertThat(breakdown.get(ProfileBreakdown.TimingType.ADVANCE.toString()).longValue(), equalTo(0L)); + assertThat(breakdown.get(ProfileBreakdown.TimingType.SCORE.toString()).longValue(), greaterThan(0L)); + assertThat(breakdown.get(ProfileBreakdown.TimingType.MATCH.toString()).longValue(), equalTo(0L)); + + long rewriteTime = profiler.getRewriteTime(); + assertThat(rewriteTime, greaterThan(0L)); + } + + public void testNoScoring() throws IOException { + Profiler profiler = new Profiler(); + searcher.setProfiler(profiler); + Query query = new TermQuery(new Term("foo", "bar")); + searcher.search(query, 1, Sort.INDEXORDER); // scores are not needed + List<ProfileResult> results = profiler.getQueryTree(); + assertEquals(1, results.size()); + Map<String, Long> breakdown = results.get(0).getTimeBreakdown(); + assertThat(breakdown.get(ProfileBreakdown.TimingType.CREATE_WEIGHT.toString()).longValue(), greaterThan(0L)); + assertThat(breakdown.get(ProfileBreakdown.TimingType.BUILD_SCORER.toString()).longValue(), greaterThan(0L)); + assertThat(breakdown.get(ProfileBreakdown.TimingType.NEXT_DOC.toString()).longValue(), greaterThan(0L)); + assertThat(breakdown.get(ProfileBreakdown.TimingType.ADVANCE.toString()).longValue(), equalTo(0L)); + assertThat(breakdown.get(ProfileBreakdown.TimingType.SCORE.toString()).longValue(), equalTo(0L)); + assertThat(breakdown.get(ProfileBreakdown.TimingType.MATCH.toString()).longValue(), equalTo(0L)); + + long rewriteTime = profiler.getRewriteTime(); + assertThat(rewriteTime, greaterThan(0L)); + } + + public void testUseIndexStats() throws IOException { + Profiler profiler = new Profiler(); + searcher.setProfiler(profiler); + Query query = new TermQuery(new Term("foo", "bar")); + searcher.count(query); // will use index stats + List<ProfileResult> results = profiler.getQueryTree(); + assertEquals(0, results.size()); + + long rewriteTime = profiler.getRewriteTime(); + assertThat(rewriteTime, greaterThan(0L)); + } + + public void testApproximations() throws IOException { + Profiler profiler = new Profiler(); + Engine.Searcher engineSearcher = new Engine.Searcher("test", new IndexSearcher(reader)); + // disable query caching since we want to test approximations, which won't + // be exposed on a cached entry + ContextIndexSearcher searcher = new ContextIndexSearcher(engineSearcher, null, MAYBE_CACHE_POLICY); + searcher.setProfiler(profiler); + Query query = new RandomApproximationQuery(new TermQuery(new Term("foo", "bar")), random()); + searcher.count(query); + List<ProfileResult> results = profiler.getQueryTree(); + assertEquals(1, results.size()); + Map<String, Long> breakdown = results.get(0).getTimeBreakdown(); + assertThat(breakdown.get(ProfileBreakdown.TimingType.CREATE_WEIGHT.toString()).longValue(), greaterThan(0L)); + assertThat(breakdown.get(ProfileBreakdown.TimingType.BUILD_SCORER.toString()).longValue(), greaterThan(0L)); + assertThat(breakdown.get(ProfileBreakdown.TimingType.NEXT_DOC.toString()).longValue(), greaterThan(0L)); + assertThat(breakdown.get(ProfileBreakdown.TimingType.ADVANCE.toString()).longValue(), equalTo(0L)); + assertThat(breakdown.get(ProfileBreakdown.TimingType.SCORE.toString()).longValue(), equalTo(0L)); + assertThat(breakdown.get(ProfileBreakdown.TimingType.MATCH.toString()).longValue(), greaterThan(0L)); + + long rewriteTime = profiler.getRewriteTime(); + assertThat(rewriteTime, greaterThan(0L)); + + } + + public void testCollector() throws IOException { + TotalHitCountCollector collector = new TotalHitCountCollector(); + ProfileCollector profileCollector = new ProfileCollector(collector); + assertEquals(0, profileCollector.getTime()); + final LeafCollector leafCollector = profileCollector.getLeafCollector(reader.leaves().get(0)); + assertThat(profileCollector.getTime(), greaterThan(0L)); + long time = profileCollector.getTime(); + leafCollector.setScorer(Lucene.illegalScorer("dummy scorer")); + assertThat(profileCollector.getTime(), greaterThan(time)); + time = profileCollector.getTime(); + leafCollector.collect(0); + assertThat(profileCollector.getTime(), greaterThan(time)); + } +} diff --git a/core/src/test/java/org/elasticsearch/search/profile/QueryProfilerIT.java b/core/src/test/java/org/elasticsearch/search/profile/QueryProfilerIT.java new file mode 100644 index 0000000000..bb33364a75 --- /dev/null +++ b/core/src/test/java/org/elasticsearch/search/profile/QueryProfilerIT.java @@ -0,0 +1,596 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.search.profile; + +import org.elasticsearch.action.search.*; +import org.elasticsearch.search.SearchHit; +import org.apache.lucene.util.English; +import org.elasticsearch.action.index.IndexRequestBuilder; +import org.elasticsearch.index.query.*; +import org.elasticsearch.search.sort.SortOrder; +import org.elasticsearch.test.ESIntegTestCase; + +import java.util.Arrays; +import java.util.List; +import java.util.Map; + +import static org.elasticsearch.search.profile.RandomQueryGenerator.randomQueryBuilder; +import static org.elasticsearch.test.hamcrest.DoubleMatcher.nearlyEqual; +import static org.hamcrest.Matchers.*; + + +public class QueryProfilerIT extends ESIntegTestCase { + + /** + * This test simply checks to make sure nothing crashes. Test indexes 100-150 documents, + * constructs 20-100 random queries and tries to profile them + */ + public void testProfileQuery() throws Exception { + createIndex("test"); + ensureGreen(); + + int numDocs = randomIntBetween(100, 150); + IndexRequestBuilder[] docs = new IndexRequestBuilder[numDocs]; + for (int i = 0; i < numDocs; i++) { + docs[i] = client().prepareIndex("test", "type1", String.valueOf(i)).setSource( + "field1", English.intToEnglish(i), + "field2", i + ); + } + + List<String> stringFields = Arrays.asList("field1"); + List<String> numericFields = Arrays.asList("field2"); + + indexRandom(true, docs); + + refresh(); + int iters = between(20, 100); + for (int i = 0; i < iters; i++) { + QueryBuilder q = randomQueryBuilder(stringFields, numericFields, numDocs, 3); + logger.info(q.toString()); + + SearchResponse resp = client().prepareSearch() + .setQuery(q) + .setProfile(true) + .setSearchType(SearchType.QUERY_THEN_FETCH) + .execute().actionGet(); + + assertNotNull("Profile response element should not be null", resp.getProfileResults()); + for (Map.Entry<String, List<ProfileShardResult>> shard : resp.getProfileResults().entrySet()) { + for (ProfileShardResult searchProfiles : shard.getValue()) { + for (ProfileResult result : searchProfiles.getQueryResults()) { + assertNotNull(result.getQueryName()); + assertNotNull(result.getLuceneDescription()); + assertThat(result.getTime(), greaterThan(0L)); + } + + CollectorResult result = searchProfiles.getCollectorResult(); + assertThat(result.getName(), not(isEmptyOrNullString())); + assertThat(result.getTime(), greaterThan(0L)); + } + } + + } + } + + /** + * This test generates 1-10 random queries and executes a profiled and non-profiled + * search for each query. It then does some basic sanity checking of score and hits + * to make sure the profiling doesn't interfere with the hits being returned + */ + public void testProfileMatchesRegular() throws Exception { + createIndex("test"); + ensureGreen(); + + int numDocs = randomIntBetween(100, 150); + IndexRequestBuilder[] docs = new IndexRequestBuilder[numDocs]; + for (int i = 0; i < numDocs; i++) { + docs[i] = client().prepareIndex("test", "type1", String.valueOf(i)).setSource( + "field1", English.intToEnglish(i), + "field2", i + ); + } + + List<String> stringFields = Arrays.asList("field1"); + List<String> numericFields = Arrays.asList("field2"); + + indexRandom(true, docs); + + refresh(); + int iters = between(1, 10); + for (int i = 0; i < iters; i++) { + QueryBuilder q = randomQueryBuilder(stringFields, numericFields, numDocs, 3); + logger.info(q.toString()); + + + SearchRequestBuilder vanilla = client().prepareSearch("test") + .setQuery(q) + .setProfile(false) + .addSort("_score", SortOrder.DESC) + .addSort("_uid", SortOrder.ASC) + .setPreference("_primary") + .setSearchType(SearchType.QUERY_THEN_FETCH); + + SearchRequestBuilder profile = client().prepareSearch("test") + .setQuery(q) + .setProfile(true) + .addSort("_score", SortOrder.DESC) + .addSort("_uid", SortOrder.ASC) + .setPreference("_primary") + .setSearchType(SearchType.QUERY_THEN_FETCH); + + MultiSearchResponse.Item[] responses = client().prepareMultiSearch() + .add(vanilla) + .add(profile) + .execute().actionGet().getResponses(); + + SearchResponse vanillaResponse = responses[0].getResponse(); + SearchResponse profileResponse = responses[1].getResponse(); + + float vanillaMaxScore = vanillaResponse.getHits().getMaxScore(); + float profileMaxScore = profileResponse.getHits().getMaxScore(); + if (Float.isNaN(vanillaMaxScore)) { + assertTrue("Vanilla maxScore is NaN but Profile is not [" + profileMaxScore + "]", + Float.isNaN(profileMaxScore)); + } else { + assertTrue("Profile maxScore of [" + profileMaxScore + "] is not close to Vanilla maxScore [" + vanillaMaxScore + "]", + nearlyEqual(vanillaMaxScore, profileMaxScore, 0.001)); + } + + assertThat("Profile totalHits of [" + profileResponse.getHits().totalHits() + "] is not close to Vanilla totalHits [" + vanillaResponse.getHits().totalHits() + "]", + vanillaResponse.getHits().getTotalHits(), equalTo(profileResponse.getHits().getTotalHits())); + + SearchHit[] vanillaHits = vanillaResponse.getHits().getHits(); + SearchHit[] profileHits = profileResponse.getHits().getHits(); + + for (int j = 0; j < vanillaHits.length; j++) { + assertThat("Profile hit #" + j + " has a different ID from Vanilla", + vanillaHits[j].getId(), equalTo(profileHits[j].getId())); + } + + } + } + + /** + * This test verifies that the output is reasonable for a simple, non-nested query + */ + public void testSimpleMatch() throws Exception { + createIndex("test"); + int numDocs = randomIntBetween(100, 150); + IndexRequestBuilder[] docs = new IndexRequestBuilder[numDocs]; + for (int i = 0; i < numDocs; i++) { + docs[i] = client().prepareIndex("test", "type1", String.valueOf(i)).setSource( + "field1", English.intToEnglish(i), + "field2", i + ); + } + + indexRandom(true, docs); + ensureGreen(); + + QueryBuilder q = QueryBuilders.matchQuery("field1", "one"); + + SearchResponse resp = client().prepareSearch() + .setQuery(q) + .setProfile(true) + .setSearchType(SearchType.QUERY_THEN_FETCH) + .execute().actionGet(); + + Map<String, List<ProfileShardResult>> p = resp.getProfileResults(); + assertNotNull(p); + + for (Map.Entry<String, List<ProfileShardResult>> shardResult : resp.getProfileResults().entrySet()) { + for (ProfileShardResult searchProfiles : shardResult.getValue()) { + for (ProfileResult result : searchProfiles.getQueryResults()) { + assertEquals(result.getQueryName(), "TermQuery"); + assertEquals(result.getLuceneDescription(), "field1:one"); + assertThat(result.getTime(), greaterThan(0L)); + assertNotNull(result.getTimeBreakdown()); + } + + CollectorResult result = searchProfiles.getCollectorResult(); + assertThat(result.getName(), not(isEmptyOrNullString())); + assertThat(result.getTime(), greaterThan(0L)); + } + } + } + + /** + * This test verifies that the output is reasonable for a nested query + */ + public void testBool() throws Exception { + createIndex("test"); + ensureGreen(); + + int numDocs = randomIntBetween(100, 150); + IndexRequestBuilder[] docs = new IndexRequestBuilder[numDocs]; + for (int i = 0; i < numDocs; i++) { + docs[i] = client().prepareIndex("test", "type1", String.valueOf(i)).setSource( + "field1", English.intToEnglish(i), + "field2", i + ); + } + + indexRandom(true, docs); + + QueryBuilder q = QueryBuilders.boolQuery().must(QueryBuilders.matchQuery("field1", "one")).must(QueryBuilders.matchQuery("field1", "two")); + + SearchResponse resp = client().prepareSearch() + .setQuery(q) + .setProfile(true) + .setSearchType(SearchType.QUERY_THEN_FETCH) + .execute().actionGet(); + + Map<String, List<ProfileShardResult>> p = resp.getProfileResults(); + assertNotNull(p); + + for (Map.Entry<String, List<ProfileShardResult>> shardResult : resp.getProfileResults().entrySet()) { + for (ProfileShardResult searchProfiles : shardResult.getValue()) { + for (ProfileResult result : searchProfiles.getQueryResults()) { + assertEquals(result.getQueryName(), "BooleanQuery"); + assertEquals(result.getLuceneDescription(), "+field1:one +field1:two"); + assertThat(result.getTime(), greaterThan(0L)); + assertNotNull(result.getTimeBreakdown()); + assertEquals(result.getProfiledChildren().size(), 2); + + // Check the children + List<ProfileResult> children = result.getProfiledChildren(); + assertEquals(children.size(), 2); + + ProfileResult childProfile = children.get(0); + assertEquals(childProfile.getQueryName(), "TermQuery"); + assertEquals(childProfile.getLuceneDescription(), "field1:one"); + assertThat(childProfile.getTime(), greaterThan(0L)); + assertNotNull(childProfile.getTimeBreakdown()); + assertEquals(childProfile.getProfiledChildren().size(), 0); + + childProfile = children.get(1); + assertEquals(childProfile.getQueryName(), "TermQuery"); + assertEquals(childProfile.getLuceneDescription(), "field1:two"); + assertThat(childProfile.getTime(), greaterThan(0L)); + assertNotNull(childProfile.getTimeBreakdown()); + } + + CollectorResult result = searchProfiles.getCollectorResult(); + assertThat(result.getName(), not(isEmptyOrNullString())); + assertThat(result.getTime(), greaterThan(0L)); + } + } + + + } + + /** + * Tests a boolean query with no children clauses + */ + public void testEmptyBool() throws Exception { + createIndex("test"); + ensureGreen(); + + int numDocs = randomIntBetween(100, 150); + IndexRequestBuilder[] docs = new IndexRequestBuilder[numDocs]; + for (int i = 0; i < numDocs; i++) { + docs[i] = client().prepareIndex("test", "type1", String.valueOf(i)).setSource( + "field1", English.intToEnglish(i), + "field2", i + ); + } + + indexRandom(true, docs); + + refresh(); + + QueryBuilder q = QueryBuilders.boolQuery(); + logger.info(q.toString()); + + SearchResponse resp = client().prepareSearch() + .setQuery(q) + .setProfile(true) + .setSearchType(SearchType.QUERY_THEN_FETCH) + .execute().actionGet(); + + assertNotNull("Profile response element should not be null", resp.getProfileResults()); + + for (Map.Entry<String, List<ProfileShardResult>> shardResult : resp.getProfileResults().entrySet()) { + for (ProfileShardResult searchProfiles : shardResult.getValue()) { + for (ProfileResult result : searchProfiles.getQueryResults()) { + assertNotNull(result.getQueryName()); + assertNotNull(result.getLuceneDescription()); + assertThat(result.getTime(), greaterThan(0L)); + assertNotNull(result.getTimeBreakdown()); + } + + CollectorResult result = searchProfiles.getCollectorResult(); + assertThat(result.getName(), not(isEmptyOrNullString())); + assertThat(result.getTime(), greaterThan(0L)); + } + } + } + + /** + * Tests a series of three nested boolean queries with a single "leaf" match query. + * The rewrite process will "collapse" this down to a single bool, so this tests to make sure + * nothing catastrophic happens during that fairly substantial rewrite + */ + public void testCollapsingBool() throws Exception { + createIndex("test"); + ensureGreen(); + + int numDocs = randomIntBetween(100, 150); + IndexRequestBuilder[] docs = new IndexRequestBuilder[numDocs]; + for (int i = 0; i < numDocs; i++) { + docs[i] = client().prepareIndex("test", "type1", String.valueOf(i)).setSource( + "field1", English.intToEnglish(i), + "field2", i + ); + } + + indexRandom(true, docs); + + refresh(); + + QueryBuilder q = QueryBuilders.boolQuery().must(QueryBuilders.boolQuery().must(QueryBuilders.boolQuery().must(QueryBuilders.matchQuery("field1", "one")))); + + + logger.info(q.toString()); + + SearchResponse resp = client().prepareSearch() + .setQuery(q) + .setProfile(true) + .setSearchType(SearchType.QUERY_THEN_FETCH) + .execute().actionGet(); + + assertNotNull("Profile response element should not be null", resp.getProfileResults()); + + for (Map.Entry<String, List<ProfileShardResult>> shardResult : resp.getProfileResults().entrySet()) { + for (ProfileShardResult searchProfiles : shardResult.getValue()) { + for (ProfileResult result : searchProfiles.getQueryResults()) { + assertNotNull(result.getQueryName()); + assertNotNull(result.getLuceneDescription()); + assertThat(result.getTime(), greaterThan(0L)); + assertNotNull(result.getTimeBreakdown()); + } + + CollectorResult result = searchProfiles.getCollectorResult(); + assertThat(result.getName(), not(isEmptyOrNullString())); + assertThat(result.getTime(), greaterThan(0L)); + } + } + } + + public void testBoosting() throws Exception { + createIndex("test"); + ensureGreen(); + + int numDocs = randomIntBetween(100, 150); + IndexRequestBuilder[] docs = new IndexRequestBuilder[numDocs]; + for (int i = 0; i < numDocs; i++) { + docs[i] = client().prepareIndex("test", "type1", String.valueOf(i)).setSource( + "field1", English.intToEnglish(i), + "field2", i + ); + } + + indexRandom(true, docs); + + refresh(); + + QueryBuilder q = QueryBuilders.boostingQuery(QueryBuilders.matchQuery("field1", "one"), QueryBuilders.matchQuery("field1", "two")) + .boost(randomFloat()) + .negativeBoost(randomFloat()); + logger.info(q.toString()); + + SearchResponse resp = client().prepareSearch() + .setQuery(q) + .setProfile(true) + .setSearchType(SearchType.QUERY_THEN_FETCH) + .execute().actionGet(); + + assertNotNull("Profile response element should not be null", resp.getProfileResults()); + + for (Map.Entry<String, List<ProfileShardResult>> shardResult : resp.getProfileResults().entrySet()) { + for (ProfileShardResult searchProfiles : shardResult.getValue()) { + for (ProfileResult result : searchProfiles.getQueryResults()) { + assertNotNull(result.getQueryName()); + assertNotNull(result.getLuceneDescription()); + assertThat(result.getTime(), greaterThan(0L)); + assertNotNull(result.getTimeBreakdown()); + } + + CollectorResult result = searchProfiles.getCollectorResult(); + assertThat(result.getName(), not(isEmptyOrNullString())); + assertThat(result.getTime(), greaterThan(0L)); + } + } + } + + public void testDisMaxRange() throws Exception { + createIndex("test"); + ensureGreen(); + + int numDocs = randomIntBetween(100, 150); + IndexRequestBuilder[] docs = new IndexRequestBuilder[numDocs]; + for (int i = 0; i < numDocs; i++) { + docs[i] = client().prepareIndex("test", "type1", String.valueOf(i)).setSource( + "field1", English.intToEnglish(i), + "field2", i + ); + } + + indexRandom(true, docs); + + refresh(); + + QueryBuilder q = QueryBuilders.disMaxQuery() + .boost(0.33703882f) + .add(QueryBuilders.rangeQuery("field2").from(null).to(73).includeLower(true).includeUpper(true)); + logger.info(q.toString()); + + SearchResponse resp = client().prepareSearch() + .setQuery(q) + .setProfile(true) + .setSearchType(SearchType.QUERY_THEN_FETCH) + .execute().actionGet(); + + assertNotNull("Profile response element should not be null", resp.getProfileResults()); + + for (Map.Entry<String, List<ProfileShardResult>> shardResult : resp.getProfileResults().entrySet()) { + for (ProfileShardResult searchProfiles : shardResult.getValue()) { + for (ProfileResult result : searchProfiles.getQueryResults()) { + assertNotNull(result.getQueryName()); + assertNotNull(result.getLuceneDescription()); + assertThat(result.getTime(), greaterThan(0L)); + assertNotNull(result.getTimeBreakdown()); + } + + CollectorResult result = searchProfiles.getCollectorResult(); + assertThat(result.getName(), not(isEmptyOrNullString())); + assertThat(result.getTime(), greaterThan(0L)); + } + } + } + + public void testRange() throws Exception { + createIndex("test"); + ensureGreen(); + + int numDocs = randomIntBetween(100, 150); + IndexRequestBuilder[] docs = new IndexRequestBuilder[numDocs]; + for (int i = 0; i < numDocs; i++) { + docs[i] = client().prepareIndex("test", "type1", String.valueOf(i)).setSource( + "field1", English.intToEnglish(i), + "field2", i + ); + } + + indexRandom(true, docs); + + refresh(); + + QueryBuilder q = QueryBuilders.rangeQuery("field2").from(0).to(5); + + logger.info(q.toString()); + + SearchResponse resp = client().prepareSearch() + .setQuery(q) + .setProfile(true) + .setSearchType(SearchType.QUERY_THEN_FETCH) + .execute().actionGet(); + + assertNotNull("Profile response element should not be null", resp.getProfileResults()); + + for (Map.Entry<String, List<ProfileShardResult>> shardResult : resp.getProfileResults().entrySet()) { + for (ProfileShardResult searchProfiles : shardResult.getValue()) { + for (ProfileResult result : searchProfiles.getQueryResults()) { + assertNotNull(result.getQueryName()); + assertNotNull(result.getLuceneDescription()); + assertThat(result.getTime(), greaterThan(0L)); + assertNotNull(result.getTimeBreakdown()); + } + + CollectorResult result = searchProfiles.getCollectorResult(); + assertThat(result.getName(), not(isEmptyOrNullString())); + assertThat(result.getTime(), greaterThan(0L)); + } + } + } + + public void testPhrase() throws Exception { + createIndex("test"); + ensureGreen(); + + int numDocs = randomIntBetween(100, 150); + IndexRequestBuilder[] docs = new IndexRequestBuilder[numDocs]; + for (int i = 0; i < numDocs; i++) { + docs[i] = client().prepareIndex("test", "type1", String.valueOf(i)).setSource( + "field1", English.intToEnglish(i) + " " + English.intToEnglish(i+1), + "field2", i + ); + } + + indexRandom(true, docs); + + refresh(); + + QueryBuilder q = QueryBuilders.matchPhraseQuery("field1", "one two"); + + logger.info(q.toString()); + + SearchResponse resp = client().prepareSearch() + .setQuery(q) + .setIndices("test") + .setTypes("type1") + .setProfile(true) + .setSearchType(SearchType.QUERY_THEN_FETCH) + .execute().actionGet(); + + if (resp.getShardFailures().length > 0) { + for (ShardSearchFailure f : resp.getShardFailures()) { + logger.error(f.toString()); + } + fail(); + } + + assertNotNull("Profile response element should not be null", resp.getProfileResults()); + + for (Map.Entry<String, List<ProfileShardResult>> shardResult : resp.getProfileResults().entrySet()) { + for (ProfileShardResult searchProfiles : shardResult.getValue()) { + for (ProfileResult result : searchProfiles.getQueryResults()) { + assertNotNull(result.getQueryName()); + assertNotNull(result.getLuceneDescription()); + assertThat(result.getTime(), greaterThan(0L)); + assertNotNull(result.getTimeBreakdown()); + } + + CollectorResult result = searchProfiles.getCollectorResult(); + assertThat(result.getName(), not(isEmptyOrNullString())); + assertThat(result.getTime(), greaterThan(0L)); + } + } + } + + /** + * This test makes sure no profile results are returned when profiling is disabled + */ + public void testNoProfile() throws Exception { + createIndex("test"); + ensureGreen(); + + int numDocs = randomIntBetween(100, 150); + IndexRequestBuilder[] docs = new IndexRequestBuilder[numDocs]; + for (int i = 0; i < numDocs; i++) { + docs[i] = client().prepareIndex("test", "type1", String.valueOf(i)).setSource( + "field1", English.intToEnglish(i), + "field2", i + ); + } + + indexRandom(true, docs); + refresh(); + QueryBuilder q = QueryBuilders.rangeQuery("field2").from(0).to(5); + + logger.info(q.toString()); + + SearchResponse resp = client().prepareSearch().setQuery(q).setProfile(false).execute().actionGet(); + assertThat("Profile response element should be an empty map", resp.getProfileResults().size(), equalTo(0)); + } + +} + diff --git a/core/src/test/java/org/elasticsearch/search/profile/RandomQueryGenerator.java b/core/src/test/java/org/elasticsearch/search/profile/RandomQueryGenerator.java new file mode 100644 index 0000000000..fb8cd40ce5 --- /dev/null +++ b/core/src/test/java/org/elasticsearch/search/profile/RandomQueryGenerator.java @@ -0,0 +1,266 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.search.profile; + +import org.apache.lucene.util.English; +import org.elasticsearch.common.unit.Fuzziness; +import org.elasticsearch.index.query.*; + +import java.util.ArrayList; +import java.util.List; + +import static com.carrotsearch.randomizedtesting.RandomizedTest.*; +import static org.junit.Assert.assertTrue; + + +public class RandomQueryGenerator { + public static QueryBuilder randomQueryBuilder(List<String> stringFields, List<String> numericFields, int numDocs, int depth) { + assertTrue("Must supply at least one string field", stringFields.size() > 0); + assertTrue("Must supply at least one numeric field", numericFields.size() > 0); + + // If depth is exhausted, or 50% of the time return a terminal + // Helps limit ridiculously large compound queries + if (depth == 0 || randomBoolean()) { + return randomTerminalQuery(stringFields, numericFields, numDocs); + } + + switch (randomIntBetween(0,5)) { + case 0: + return randomTerminalQuery(stringFields, numericFields, numDocs); + case 1: + return QueryBuilders.boolQuery().must(randomQueryBuilder(stringFields, numericFields, numDocs, depth -1)) + .filter(randomQueryBuilder(stringFields, numericFields, numDocs, depth -1)); + case 2: + return randomBoolQuery(stringFields, numericFields, numDocs, depth); + case 3: + // disabled for now because of https://issues.apache.org/jira/browse/LUCENE-6781 + //return randomBoostingQuery(stringFields, numericFields, numDocs, depth); + case 4: + return randomConstantScoreQuery(stringFields, numericFields, numDocs, depth); + case 5: + return randomDisMaxQuery(stringFields, numericFields, numDocs, depth); + default: + return randomTerminalQuery(stringFields, numericFields, numDocs); + } + } + + private static QueryBuilder randomTerminalQuery(List<String> stringFields, List<String> numericFields, int numDocs) { + switch (randomIntBetween(0,6)) { + case 0: + return randomTermQuery(stringFields, numDocs); + case 1: + return randomTermsQuery(stringFields, numDocs); + case 2: + return randomRangeQuery(numericFields, numDocs); + case 3: + return QueryBuilders.matchAllQuery(); + case 4: + return randomCommonTermsQuery(stringFields, numDocs); + case 5: + return randomFuzzyQuery(stringFields); + case 6: + return randomIDsQuery(); + default: + return randomTermQuery(stringFields, numDocs); + } + } + + private static String randomQueryString(int max) { + StringBuilder qsBuilder = new StringBuilder(); + + for (int i = 0; i < max; i++) { + qsBuilder.append(English.intToEnglish(randomInt(max))); + qsBuilder.append(" "); + } + + return qsBuilder.toString().trim(); + } + + private static String randomField(List<String> fields) { + return fields.get(randomInt(fields.size() - 1)); + } + + + + private static QueryBuilder randomTermQuery(List<String> fields, int numDocs) { + return QueryBuilders.termQuery(randomField(fields), randomQueryString(1)); + } + + private static QueryBuilder randomTermsQuery(List<String> fields, int numDocs) { + int numTerms = randomInt(numDocs); + ArrayList<String> terms = new ArrayList<>(numTerms); + + for (int i = 0; i < numTerms; i++) { + terms.add(randomQueryString(1)); + } + + return QueryBuilders.termsQuery(randomField(fields), terms); + } + + private static QueryBuilder randomRangeQuery(List<String> fields, int numDocs) { + QueryBuilder q = QueryBuilders.rangeQuery(randomField(fields)); + + if (randomBoolean()) { + ((RangeQueryBuilder)q).from(randomIntBetween(0, numDocs / 2 - 1)); + } + if (randomBoolean()) { + ((RangeQueryBuilder)q).to(randomIntBetween(numDocs / 2, numDocs)); + } + + return q; + } + + private static QueryBuilder randomBoolQuery(List<String> stringFields, List<String> numericFields, int numDocs, int depth) { + QueryBuilder q = QueryBuilders.boolQuery(); + int numClause = randomIntBetween(0,5); + for (int i = 0; i < numClause; i++) { + ((BoolQueryBuilder)q).must(randomQueryBuilder(stringFields, numericFields,numDocs, depth -1)); + } + + numClause = randomIntBetween(0,5); + for (int i = 0; i < numClause; i++) { + ((BoolQueryBuilder)q).should(randomQueryBuilder(stringFields, numericFields,numDocs, depth -1)); + } + + numClause = randomIntBetween(0,5); + for (int i = 0; i < numClause; i++) { + ((BoolQueryBuilder)q).mustNot(randomQueryBuilder(stringFields, numericFields, numDocs, depth -1)); + } + + return q; + } + + private static QueryBuilder randomBoostingQuery(List<String> stringFields, List<String> numericFields, int numDocs, int depth) { + return QueryBuilders.boostingQuery( + randomQueryBuilder(stringFields, numericFields, numDocs, depth - 1), + randomQueryBuilder(stringFields, numericFields, numDocs, depth - 1)) + .boost(randomFloat()) + .negativeBoost(randomFloat()); + } + + private static QueryBuilder randomConstantScoreQuery(List<String> stringFields, List<String> numericFields, int numDocs, int depth) { + return QueryBuilders.constantScoreQuery(randomQueryBuilder(stringFields, numericFields, numDocs, depth - 1)); + } + + private static QueryBuilder randomCommonTermsQuery(List<String> fields, int numDocs) { + int numTerms = randomInt(numDocs); + + QueryBuilder q = QueryBuilders.commonTermsQuery(randomField(fields), randomQueryString(numTerms)); + if (randomBoolean()) { + ((CommonTermsQueryBuilder)q).boost(randomFloat()); + } + + if (randomBoolean()) { + ((CommonTermsQueryBuilder)q).cutoffFrequency(randomFloat()); + } + + if (randomBoolean()) { + ((CommonTermsQueryBuilder)q).highFreqMinimumShouldMatch(Integer.toString(randomInt(numTerms))) + .highFreqOperator(randomBoolean() ? Operator.AND : Operator.OR); + } + + if (randomBoolean()) { + ((CommonTermsQueryBuilder)q).lowFreqMinimumShouldMatch(Integer.toString(randomInt(numTerms))) + .lowFreqOperator(randomBoolean() ? Operator.AND : Operator.OR); + } + + return q; + } + + private static QueryBuilder randomFuzzyQuery(List<String> fields) { + + QueryBuilder q = QueryBuilders.fuzzyQuery(randomField(fields), randomQueryString(1)); + + if (randomBoolean()) { + ((FuzzyQueryBuilder)q).boost(randomFloat()); + } + + if (randomBoolean()) { + switch (randomIntBetween(0, 4)) { + case 0: + ((FuzzyQueryBuilder)q).fuzziness(Fuzziness.AUTO); + break; + case 1: + ((FuzzyQueryBuilder)q).fuzziness(Fuzziness.ONE); + break; + case 2: + ((FuzzyQueryBuilder)q).fuzziness(Fuzziness.TWO); + break; + case 3: + ((FuzzyQueryBuilder)q).fuzziness(Fuzziness.ZERO); + break; + case 4: + ((FuzzyQueryBuilder)q).fuzziness(Fuzziness.fromEdits(randomIntBetween(0,2))); + break; + default: + ((FuzzyQueryBuilder)q).fuzziness(Fuzziness.AUTO); + break; + } + } + + if (randomBoolean()) { + ((FuzzyQueryBuilder)q).maxExpansions(Math.abs(randomInt())); + } + + if (randomBoolean()) { + ((FuzzyQueryBuilder)q).prefixLength(Math.abs(randomInt())); + } + + if (randomBoolean()) { + ((FuzzyQueryBuilder)q).transpositions(randomBoolean()); + } + + return q; + } + + private static QueryBuilder randomDisMaxQuery(List<String> stringFields, List<String> numericFields, int numDocs, int depth) { + QueryBuilder q = QueryBuilders.disMaxQuery(); + + int numClauses = randomIntBetween(1, 10); + for (int i = 0; i < numClauses; i++) { + ((DisMaxQueryBuilder)q).add(randomQueryBuilder(stringFields, numericFields, numDocs, depth - 1)); + } + + if (randomBoolean()) { + ((DisMaxQueryBuilder)q).boost(randomFloat()); + } + + if (randomBoolean()) { + ((DisMaxQueryBuilder)q).tieBreaker(randomFloat()); + } + + return q; + } + + private static QueryBuilder randomIDsQuery() { + QueryBuilder q = QueryBuilders.idsQuery(); + + int numIDs = randomInt(100); + for (int i = 0; i < numIDs; i++) { + ((IdsQueryBuilder)q).addIds(String.valueOf(randomInt())); + } + + if (randomBoolean()) { + ((IdsQueryBuilder)q).boost(randomFloat()); + } + + return q; + } +}
\ No newline at end of file diff --git a/core/src/test/java/org/elasticsearch/search/query/ExistsMissingIT.java b/core/src/test/java/org/elasticsearch/search/query/ExistsIT.java index 349197d5f4..73906b2ed8 100644 --- a/core/src/test/java/org/elasticsearch/search/query/ExistsMissingIT.java +++ b/core/src/test/java/org/elasticsearch/search/query/ExistsIT.java @@ -43,7 +43,7 @@ import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertHitC import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertSearchHits; import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertSearchResponse; -public class ExistsMissingIT extends ESIntegTestCase { +public class ExistsIT extends ESIntegTestCase { // TODO: move this to a unit test somewhere... public void testEmptyIndex() throws Exception { @@ -51,11 +51,11 @@ public class ExistsMissingIT extends ESIntegTestCase { ensureYellow("test"); SearchResponse resp = client().prepareSearch("test").setQuery(QueryBuilders.existsQuery("foo")).execute().actionGet(); assertSearchResponse(resp); - resp = client().prepareSearch("test").setQuery(QueryBuilders.missingQuery("foo")).execute().actionGet(); + resp = client().prepareSearch("test").setQuery(QueryBuilders.boolQuery().mustNot(QueryBuilders.existsQuery("foo"))).execute().actionGet(); assertSearchResponse(resp); } - public void testExistsMissing() throws Exception { + public void testExists() throws Exception { XContentBuilder mapping = XContentBuilder.builder(JsonXContent.jsonXContent) .startObject() .startObject("type") @@ -145,62 +145,6 @@ public class ExistsMissingIT extends ESIntegTestCase { } throw e; } - - // missing - resp = client().prepareSearch("idx").setQuery(QueryBuilders.missingQuery(fieldName)).execute().actionGet(); - assertSearchResponse(resp); - assertEquals(String.format(Locale.ROOT, "missing(%s, %d) mapping: %s response: %s", fieldName, count, mapping.string(), resp), numDocs - count, resp.getHits().totalHits()); - } - } - - public void testNullValueUnset() throws Exception { - assertAcked(client().admin().indices().prepareCreate("idx").addMapping("type", "f", "type=string,index=not_analyzed")); - indexRandom(true, - client().prepareIndex("idx", "type", "1").setSource("f", "foo"), - client().prepareIndex("idx", "type", "2").setSource("f", null), - client().prepareIndex("idx", "type", "3").setSource("g", "bar"), - client().prepareIndex("idx", "type", "4").setSource("f", "bar")); - - SearchResponse resp = client().prepareSearch("idx").setQuery(QueryBuilders.missingQuery("f", true, true)).get(); - assertSearchHits(resp, "2", "3"); - - resp = client().prepareSearch("idx").setQuery(QueryBuilders.missingQuery("f", false, true)).get(); - assertSearchHits(resp, "2", "3"); - - resp = client().prepareSearch("idx").setQuery(QueryBuilders.missingQuery("f", true, false)).get(); - assertSearchHits(resp); - - try { - client().prepareSearch("idx").setQuery(QueryBuilders.missingQuery("f", false, false)).get(); - fail("both existence and null_value can't be false"); - } catch (IllegalArgumentException e) { - // expected } } - - public void testNullValueSet() throws Exception { - assertAcked(client().admin().indices().prepareCreate("idx").addMapping("type", "f", "type=string,index=not_analyzed,null_value=bar")); - indexRandom(true, - client().prepareIndex("idx", "type", "1").setSource("f", "foo"), - client().prepareIndex("idx", "type", "2").setSource("f", null), - client().prepareIndex("idx", "type", "3").setSource("g", "bar"), - client().prepareIndex("idx", "type", "4").setSource("f", "bar")); - - SearchResponse resp = client().prepareSearch("idx").setQuery(QueryBuilders.missingQuery("f", true, true)).get(); - assertSearchHits(resp, "2", "3", "4"); - - resp = client().prepareSearch("idx").setQuery(QueryBuilders.missingQuery("f", false, true)).get(); - assertSearchHits(resp, "3"); - - resp = client().prepareSearch("idx").setQuery(QueryBuilders.missingQuery("f", true, false)).get(); - assertSearchHits(resp, "2", "4"); - - try { - client().prepareSearch("idx").setQuery(QueryBuilders.missingQuery("f", false, false)).get(); - fail("both existence and null_value can't be false"); - } catch (IllegalArgumentException e) { - // expected - } - } - } diff --git a/core/src/test/java/org/elasticsearch/search/query/SearchQueryIT.java b/core/src/test/java/org/elasticsearch/search/query/SearchQueryIT.java index 61a237c1a9..9918d44965 100644 --- a/core/src/test/java/org/elasticsearch/search/query/SearchQueryIT.java +++ b/core/src/test/java/org/elasticsearch/search/query/SearchQueryIT.java @@ -70,7 +70,6 @@ import static org.elasticsearch.index.query.QueryBuilders.idsQuery; import static org.elasticsearch.index.query.QueryBuilders.indicesQuery; import static org.elasticsearch.index.query.QueryBuilders.matchAllQuery; import static org.elasticsearch.index.query.QueryBuilders.matchQuery; -import static org.elasticsearch.index.query.QueryBuilders.missingQuery; import static org.elasticsearch.index.query.QueryBuilders.multiMatchQuery; import static org.elasticsearch.index.query.QueryBuilders.prefixQuery; import static org.elasticsearch.index.query.QueryBuilders.queryStringQuery; @@ -805,32 +804,6 @@ public class SearchQueryIT extends ESIntegTestCase { searchResponse = client().prepareSearch().setQuery(existsQuery("obj1")).get(); assertHitCount(searchResponse, 2l); assertSearchHits(searchResponse, "1", "2"); - - searchResponse = client().prepareSearch().setQuery(missingQuery("field1")).get(); - assertHitCount(searchResponse, 2l); - assertSearchHits(searchResponse, "3", "4"); - - searchResponse = client().prepareSearch().setQuery(missingQuery("field1")).get(); - assertHitCount(searchResponse, 2l); - assertSearchHits(searchResponse, "3", "4"); - - searchResponse = client().prepareSearch().setQuery(constantScoreQuery(missingQuery("field1"))).get(); - assertHitCount(searchResponse, 2l); - assertSearchHits(searchResponse, "3", "4"); - - searchResponse = client().prepareSearch().setQuery(queryStringQuery("_missing_:field1")).get(); - assertHitCount(searchResponse, 2l); - assertSearchHits(searchResponse, "3", "4"); - - // wildcard check - searchResponse = client().prepareSearch().setQuery(missingQuery("x*")).get(); - assertHitCount(searchResponse, 2l); - assertSearchHits(searchResponse, "3", "4"); - - // object check - searchResponse = client().prepareSearch().setQuery(missingQuery("obj1")).get(); - assertHitCount(searchResponse, 2l); - assertSearchHits(searchResponse, "3", "4"); } public void testPassQueryOrFilterAsJSONString() throws Exception { diff --git a/core/src/test/java/org/elasticsearch/search/suggest/CompletionSuggestSearchIT.java b/core/src/test/java/org/elasticsearch/search/suggest/CompletionSuggestSearchIT.java index 6f00e9977c..0f5ac1a522 100644 --- a/core/src/test/java/org/elasticsearch/search/suggest/CompletionSuggestSearchIT.java +++ b/core/src/test/java/org/elasticsearch/search/suggest/CompletionSuggestSearchIT.java @@ -942,13 +942,13 @@ public class CompletionSuggestSearchIT extends ESIntegTestCase { } public void assertSuggestions(String suggestion, String... suggestions) { - String suggestionName = RandomStrings.randomAsciiOfLength(new Random(), 10); + String suggestionName = RandomStrings.randomAsciiOfLength(random(), 10); CompletionSuggestionBuilder suggestionBuilder = SuggestBuilders.completionSuggestion(suggestionName).field(FIELD).text(suggestion).size(10); assertSuggestions(suggestionName, suggestionBuilder, suggestions); } public void assertSuggestionsNotInOrder(String suggestString, String... suggestions) { - String suggestionName = RandomStrings.randomAsciiOfLength(new Random(), 10); + String suggestionName = RandomStrings.randomAsciiOfLength(random(), 10); SuggestResponse suggestResponse = client().prepareSuggest(INDEX).addSuggestion( SuggestBuilders.completionSuggestion(suggestionName).field(FIELD).text(suggestString).size(10) ).execute().actionGet(); diff --git a/core/src/test/java/org/elasticsearch/search/suggest/CustomSuggester.java b/core/src/test/java/org/elasticsearch/search/suggest/CustomSuggester.java index 281cf6ae18..419316b526 100644 --- a/core/src/test/java/org/elasticsearch/search/suggest/CustomSuggester.java +++ b/core/src/test/java/org/elasticsearch/search/suggest/CustomSuggester.java @@ -20,7 +20,7 @@ package org.elasticsearch.search.suggest; import org.apache.lucene.search.IndexSearcher; import org.apache.lucene.util.CharsRefBuilder; -import org.elasticsearch.common.text.StringText; +import org.elasticsearch.common.text.Text; import java.io.IOException; import java.util.Locale; @@ -42,11 +42,11 @@ public class CustomSuggester extends Suggester<CustomSuggester.CustomSuggestions Suggest.Suggestion<Suggest.Suggestion.Entry<Suggest.Suggestion.Entry.Option>> response = new Suggest.Suggestion<>(name, suggestion.getSize()); String firstSuggestion = String.format(Locale.ROOT, "%s-%s-%s-%s", text, suggestion.getField(), suggestion.options.get("suffix"), "12"); - Suggest.Suggestion.Entry<Suggest.Suggestion.Entry.Option> resultEntry12 = new Suggest.Suggestion.Entry<>(new StringText(firstSuggestion), 0, text.length() + 2); + Suggest.Suggestion.Entry<Suggest.Suggestion.Entry.Option> resultEntry12 = new Suggest.Suggestion.Entry<>(new Text(firstSuggestion), 0, text.length() + 2); response.addTerm(resultEntry12); String secondSuggestion = String.format(Locale.ROOT, "%s-%s-%s-%s", text, suggestion.getField(), suggestion.options.get("suffix"), "123"); - Suggest.Suggestion.Entry<Suggest.Suggestion.Entry.Option> resultEntry123 = new Suggest.Suggestion.Entry<>(new StringText(secondSuggestion), 0, text.length() + 3); + Suggest.Suggestion.Entry<Suggest.Suggestion.Entry.Option> resultEntry123 = new Suggest.Suggestion.Entry<>(new Text(secondSuggestion), 0, text.length() + 3); response.addTerm(resultEntry123); return response; diff --git a/core/src/test/java/org/elasticsearch/snapshots/AbstractSnapshotIntegTestCase.java b/core/src/test/java/org/elasticsearch/snapshots/AbstractSnapshotIntegTestCase.java index 51ae038ca0..8fde9bbf33 100644 --- a/core/src/test/java/org/elasticsearch/snapshots/AbstractSnapshotIntegTestCase.java +++ b/core/src/test/java/org/elasticsearch/snapshots/AbstractSnapshotIntegTestCase.java @@ -56,7 +56,7 @@ public abstract class AbstractSnapshotIntegTestCase extends ESIntegTestCase { return settingsBuilder().put(super.nodeSettings(nodeOrdinal)) // Rebalancing is causing some checks after restore to randomly fail // due to https://github.com/elastic/elasticsearch/issues/9421 - .put(EnableAllocationDecider.CLUSTER_ROUTING_REBALANCE_ENABLE, EnableAllocationDecider.Rebalance.NONE) + .put(EnableAllocationDecider.CLUSTER_ROUTING_REBALANCE_ENABLE_SETTING.getKey(), EnableAllocationDecider.Rebalance.NONE) .build(); } diff --git a/core/src/test/java/org/elasticsearch/snapshots/DedicatedClusterSnapshotRestoreIT.java b/core/src/test/java/org/elasticsearch/snapshots/DedicatedClusterSnapshotRestoreIT.java index f9392836d8..7946116f57 100644 --- a/core/src/test/java/org/elasticsearch/snapshots/DedicatedClusterSnapshotRestoreIT.java +++ b/core/src/test/java/org/elasticsearch/snapshots/DedicatedClusterSnapshotRestoreIT.java @@ -22,7 +22,6 @@ package org.elasticsearch.snapshots; import com.carrotsearch.hppc.IntHashSet; import com.carrotsearch.hppc.IntSet; -import org.elasticsearch.ElasticsearchParseException; import org.elasticsearch.action.ListenableActionFuture; import org.elasticsearch.action.admin.cluster.repositories.put.PutRepositoryResponse; import org.elasticsearch.action.admin.cluster.snapshots.create.CreateSnapshotResponse; @@ -33,24 +32,17 @@ import org.elasticsearch.action.admin.cluster.snapshots.status.SnapshotStatus; import org.elasticsearch.action.admin.cluster.snapshots.status.SnapshotsStatusResponse; import org.elasticsearch.action.index.IndexRequestBuilder; import org.elasticsearch.client.Client; -import org.elasticsearch.cluster.AbstractDiffable; import org.elasticsearch.cluster.ClusterService; import org.elasticsearch.cluster.ClusterState; import org.elasticsearch.cluster.ClusterStateUpdateTask; import org.elasticsearch.cluster.metadata.MetaData; -import org.elasticsearch.cluster.metadata.MetaData.Custom; import org.elasticsearch.cluster.metadata.MetaDataIndexStateService; import org.elasticsearch.cluster.routing.allocation.decider.EnableAllocationDecider; import org.elasticsearch.common.Nullable; import org.elasticsearch.common.Priority; -import org.elasticsearch.common.io.stream.StreamInput; -import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.unit.ByteSizeUnit; import org.elasticsearch.common.unit.TimeValue; -import org.elasticsearch.common.xcontent.ToXContent; -import org.elasticsearch.common.xcontent.XContentBuilder; -import org.elasticsearch.common.xcontent.XContentParser; import org.elasticsearch.discovery.zen.ZenDiscovery; import org.elasticsearch.discovery.zen.elect.ElectMasterService; import org.elasticsearch.index.store.IndexStore; @@ -68,9 +60,9 @@ import org.elasticsearch.test.ESIntegTestCase; import org.elasticsearch.test.ESIntegTestCase.ClusterScope; import org.elasticsearch.test.ESIntegTestCase.Scope; import org.elasticsearch.test.InternalTestCluster; +import org.elasticsearch.test.TestCustomMetaData; import org.elasticsearch.test.rest.FakeRestRequest; -import java.io.IOException; import java.nio.file.Path; import java.util.ArrayList; import java.util.Collection; @@ -123,14 +115,14 @@ public class DedicatedClusterSnapshotRestoreIT extends AbstractSnapshotIntegTest logger.info("--> set test persistent setting"); client.admin().cluster().prepareUpdateSettings().setPersistentSettings( Settings.settingsBuilder() - .put(ElectMasterService.DISCOVERY_ZEN_MINIMUM_MASTER_NODES, 2) - .put(IndicesTTLService.INDICES_TTL_INTERVAL, random, TimeUnit.MINUTES)) + .put(ElectMasterService.DISCOVERY_ZEN_MINIMUM_MASTER_NODES_SETTING.getKey(), 2) + .put(IndicesTTLService.INDICES_TTL_INTERVAL_SETTING.getKey(), random, TimeUnit.MINUTES)) .execute().actionGet(); assertThat(client.admin().cluster().prepareState().setRoutingTable(false).setNodes(false).execute().actionGet().getState() - .getMetaData().persistentSettings().getAsTime(IndicesTTLService.INDICES_TTL_INTERVAL, TimeValue.timeValueMinutes(1)).millis(), equalTo(TimeValue.timeValueMinutes(random).millis())); + .getMetaData().persistentSettings().getAsTime(IndicesTTLService.INDICES_TTL_INTERVAL_SETTING.getKey(), TimeValue.timeValueMinutes(1)).millis(), equalTo(TimeValue.timeValueMinutes(random).millis())); assertThat(client.admin().cluster().prepareState().setRoutingTable(false).setNodes(false).execute().actionGet().getState() - .getMetaData().persistentSettings().getAsInt(ElectMasterService.DISCOVERY_ZEN_MINIMUM_MASTER_NODES, -1), equalTo(2)); + .getMetaData().persistentSettings().getAsInt(ElectMasterService.DISCOVERY_ZEN_MINIMUM_MASTER_NODES_SETTING.getKey(), -1), equalTo(2)); logger.info("--> create repository"); PutRepositoryResponse putRepositoryResponse = client.admin().cluster().preparePutRepository("test-repo") @@ -146,23 +138,26 @@ public class DedicatedClusterSnapshotRestoreIT extends AbstractSnapshotIntegTest logger.info("--> clean the test persistent setting"); client.admin().cluster().prepareUpdateSettings().setPersistentSettings( Settings.settingsBuilder() - .put(ElectMasterService.DISCOVERY_ZEN_MINIMUM_MASTER_NODES, 1) - .put(IndicesTTLService.INDICES_TTL_INTERVAL, TimeValue.timeValueMinutes(1))) + .put(ElectMasterService.DISCOVERY_ZEN_MINIMUM_MASTER_NODES_SETTING.getKey(), 1) + .put(IndicesTTLService.INDICES_TTL_INTERVAL_SETTING.getKey(), TimeValue.timeValueMinutes(1))) .execute().actionGet(); assertThat(client.admin().cluster().prepareState().setRoutingTable(false).setNodes(false).execute().actionGet().getState() - .getMetaData().persistentSettings().getAsTime(IndicesTTLService.INDICES_TTL_INTERVAL, TimeValue.timeValueMinutes(1)).millis(), equalTo(TimeValue.timeValueMinutes(1).millis())); + .getMetaData().persistentSettings().getAsTime(IndicesTTLService.INDICES_TTL_INTERVAL_SETTING.getKey(), TimeValue.timeValueMinutes(1)).millis(), equalTo(TimeValue.timeValueMinutes(1).millis())); stopNode(secondNode); assertThat(client.admin().cluster().prepareHealth().setWaitForNodes("1").get().isTimedOut(), equalTo(false)); logger.info("--> restore snapshot"); - client.admin().cluster().prepareRestoreSnapshot("test-repo", "test-snap").setRestoreGlobalState(true).setWaitForCompletion(true).execute().actionGet(); - assertThat(client.admin().cluster().prepareState().setRoutingTable(false).setNodes(false).execute().actionGet().getState() - .getMetaData().persistentSettings().getAsTime(IndicesTTLService.INDICES_TTL_INTERVAL, TimeValue.timeValueMinutes(1)).millis(), equalTo(TimeValue.timeValueMinutes(random).millis())); - + try { + client.admin().cluster().prepareRestoreSnapshot("test-repo", "test-snap").setRestoreGlobalState(true).setWaitForCompletion(true).execute().actionGet(); + fail("can't restore minimum master nodes"); + } catch (IllegalArgumentException ex) { + assertEquals("illegal value can't update [discovery.zen.minimum_master_nodes] from [1] to [2]", ex.getMessage()); + assertEquals("cannot set discovery.zen.minimum_master_nodes to more than the current master nodes count [1]", ex.getCause().getMessage()); + } logger.info("--> ensure that zen discovery minimum master nodes wasn't restored"); assertThat(client.admin().cluster().prepareState().setRoutingTable(false).setNodes(false).execute().actionGet().getState() - .getMetaData().persistentSettings().getAsInt(ElectMasterService.DISCOVERY_ZEN_MINIMUM_MASTER_NODES, -1), not(equalTo(2))); + .getMetaData().persistentSettings().getAsInt(ElectMasterService.DISCOVERY_ZEN_MINIMUM_MASTER_NODES_SETTING.getKey(), -1), not(equalTo(2))); } public void testRestoreCustomMetadata() throws Exception { @@ -554,7 +549,7 @@ public class DedicatedClusterSnapshotRestoreIT extends AbstractSnapshotIntegTest public void testRestoreIndexWithShardsMissingInLocalGateway() throws Exception { logger.info("--> start 2 nodes"); Settings nodeSettings = settingsBuilder() - .put(EnableAllocationDecider.CLUSTER_ROUTING_REBALANCE_ENABLE, EnableAllocationDecider.Rebalance.NONE) + .put(EnableAllocationDecider.CLUSTER_ROUTING_REBALANCE_ENABLE_SETTING.getKey(), EnableAllocationDecider.Rebalance.NONE) .build(); internalCluster().startNode(nodeSettings); @@ -899,78 +894,6 @@ public class DedicatedClusterSnapshotRestoreIT extends AbstractSnapshotIntegTest )); } - public static abstract class TestCustomMetaData extends AbstractDiffable<Custom> implements MetaData.Custom { - private final String data; - - protected TestCustomMetaData(String data) { - this.data = data; - } - - public String getData() { - return data; - } - - @Override - public boolean equals(Object o) { - if (this == o) return true; - if (o == null || getClass() != o.getClass()) return false; - - TestCustomMetaData that = (TestCustomMetaData) o; - - if (!data.equals(that.data)) return false; - - return true; - } - - @Override - public int hashCode() { - return data.hashCode(); - } - - protected abstract TestCustomMetaData newTestCustomMetaData(String data); - - @Override - public Custom readFrom(StreamInput in) throws IOException { - return newTestCustomMetaData(in.readString()); - } - - @Override - public void writeTo(StreamOutput out) throws IOException { - out.writeString(getData()); - } - - @Override - public Custom fromXContent(XContentParser parser) throws IOException { - XContentParser.Token token; - String data = null; - while ((token = parser.nextToken()) != XContentParser.Token.END_OBJECT) { - if (token == XContentParser.Token.FIELD_NAME) { - String currentFieldName = parser.currentName(); - if ("data".equals(currentFieldName)) { - if (parser.nextToken() != XContentParser.Token.VALUE_STRING) { - throw new ElasticsearchParseException("failed to parse snapshottable metadata, invalid data type"); - } - data = parser.text(); - } else { - throw new ElasticsearchParseException("failed to parse snapshottable metadata, unknown field [{}]", currentFieldName); - } - } else { - throw new ElasticsearchParseException("failed to parse snapshottable metadata"); - } - } - if (data == null) { - throw new ElasticsearchParseException("failed to parse snapshottable metadata, data not found"); - } - return newTestCustomMetaData(data); - } - - @Override - public XContentBuilder toXContent(XContentBuilder builder, ToXContent.Params params) throws IOException { - builder.field("data", getData()); - return builder; - } - } - static { MetaData.registerPrototype(SnapshottableMetadata.TYPE, SnapshottableMetadata.PROTO); diff --git a/core/src/test/java/org/elasticsearch/test/hamcrest/DoubleMatcher.java b/core/src/test/java/org/elasticsearch/test/hamcrest/DoubleMatcher.java new file mode 100644 index 0000000000..de275eaffc --- /dev/null +++ b/core/src/test/java/org/elasticsearch/test/hamcrest/DoubleMatcher.java @@ -0,0 +1,45 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.test.hamcrest; + + +public class DoubleMatcher { + + /** + * Better floating point comparisons courtesy of https://github.com/brazzy/floating-point-gui.de + * + * Snippet adapted to use doubles instead of floats + */ + public static boolean nearlyEqual(double a, double b, double epsilon) { + final double absA = Math.abs(a); + final double absB = Math.abs(b); + final double diff = Math.abs(a - b); + + if (a == b) { // shortcut, handles infinities + return true; + } else if (a == 0 || b == 0 || diff < Double.MIN_NORMAL) { + // a or b is zero or both are extremely close to it + // relative error is less meaningful here + return diff < (epsilon * Double.MIN_NORMAL); + } else { // use relative error + return diff / Math.min((absA + absB), Double.MAX_VALUE) < epsilon; + } + } +} diff --git a/core/src/test/java/org/elasticsearch/threadpool/ThreadPoolTypeSettingsValidatorTests.java b/core/src/test/java/org/elasticsearch/threadpool/ThreadPoolTypeSettingsValidatorTests.java deleted file mode 100644 index 3dfca5cb28..0000000000 --- a/core/src/test/java/org/elasticsearch/threadpool/ThreadPoolTypeSettingsValidatorTests.java +++ /dev/null @@ -1,73 +0,0 @@ -/* - * Licensed to Elasticsearch under one or more contributor - * license agreements. See the NOTICE file distributed wit[√h - * this work for additional information regarding copyright - * ownership. Elasticsearch licenses this file to you under - * the Apache License, Version 2.0 (the "License"); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -package org.elasticsearch.threadpool; - -import org.elasticsearch.cluster.ClusterState; -import org.elasticsearch.cluster.settings.Validator; -import org.elasticsearch.test.ESTestCase; -import org.junit.Before; - -import java.util.*; - -import static org.junit.Assert.*; - -public class ThreadPoolTypeSettingsValidatorTests extends ESTestCase { - private Validator validator; - - @Before - public void setUp() throws Exception { - super.setUp(); - validator = ThreadPool.THREAD_POOL_TYPE_SETTINGS_VALIDATOR; - } - - public void testValidThreadPoolTypeSettings() { - for (Map.Entry<String, ThreadPool.ThreadPoolType> entry : ThreadPool.THREAD_POOL_TYPES.entrySet()) { - assertNull(validateSetting(validator, entry.getKey(), entry.getValue().getType())); - } - } - - public void testInvalidThreadPoolTypeSettings() { - for (Map.Entry<String, ThreadPool.ThreadPoolType> entry : ThreadPool.THREAD_POOL_TYPES.entrySet()) { - Set<ThreadPool.ThreadPoolType> set = new HashSet<>(); - set.addAll(Arrays.asList(ThreadPool.ThreadPoolType.values())); - set.remove(entry.getValue()); - ThreadPool.ThreadPoolType invalidThreadPoolType = randomFrom(set.toArray(new ThreadPool.ThreadPoolType[set.size()])); - String expectedMessage = String.format( - Locale.ROOT, - "thread pool type for [%s] can only be updated to [%s] but was [%s]", - entry.getKey(), - entry.getValue().getType(), - invalidThreadPoolType.getType()); - String message = validateSetting(validator, entry.getKey(), invalidThreadPoolType.getType()); - assertNotNull(message); - assertEquals(expectedMessage, message); - } - } - - public void testNonThreadPoolTypeSetting() { - String setting = ThreadPool.THREADPOOL_GROUP + randomAsciiOfLength(10) + "foo"; - String value = randomAsciiOfLength(10); - assertNull(validator.validate(setting, value, ClusterState.PROTO)); - } - - private String validateSetting(Validator validator, String threadPoolName, String value) { - return validator.validate(ThreadPool.THREADPOOL_GROUP + threadPoolName + ".type", value, ClusterState.PROTO); - } -} diff --git a/core/src/test/java/org/elasticsearch/threadpool/UpdateThreadPoolSettingsTests.java b/core/src/test/java/org/elasticsearch/threadpool/UpdateThreadPoolSettingsTests.java index 95ceea1e49..56b2a03bad 100644 --- a/core/src/test/java/org/elasticsearch/threadpool/UpdateThreadPoolSettingsTests.java +++ b/core/src/test/java/org/elasticsearch/threadpool/UpdateThreadPoolSettingsTests.java @@ -19,6 +19,7 @@ package org.elasticsearch.threadpool; +import org.elasticsearch.common.settings.ClusterSettings; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.util.concurrent.EsThreadPoolExecutor; import org.elasticsearch.test.ESTestCase; @@ -90,17 +91,19 @@ public class UpdateThreadPoolSettingsTests extends ESTestCase { ThreadPool threadPool = null; try { threadPool = new ThreadPool(settingsBuilder().put("name", "testUpdateSettingsCanNotChangeThreadPoolType").build()); + ClusterSettings clusterSettings = new ClusterSettings(Settings.EMPTY, ClusterSettings.BUILT_IN_CLUSTER_SETTINGS); + threadPool.setClusterSettings(clusterSettings); - - threadPool.updateSettings( + clusterSettings.applySettings( settingsBuilder() .put("threadpool." + threadPoolName + ".type", invalidThreadPoolType.getType()) .build() ); fail("expected IllegalArgumentException"); } catch (IllegalArgumentException e) { + assertEquals("illegal value can't update [threadpool.] from [{}] to [{" + threadPoolName + ".type=" + invalidThreadPoolType.getType() + "}]", e.getMessage()); assertThat( - e.getMessage(), + e.getCause().getMessage(), is("setting threadpool." + threadPoolName + ".type to " + invalidThreadPoolType.getType() + " is not permitted; must be " + validThreadPoolType.getType())); } finally { terminateThreadPoolIfNeeded(threadPool); @@ -111,14 +114,16 @@ public class UpdateThreadPoolSettingsTests extends ESTestCase { String threadPoolName = randomThreadPool(ThreadPool.ThreadPoolType.CACHED); ThreadPool threadPool = null; try { - threadPool = new ThreadPool( - Settings.settingsBuilder() - .put("name", "testCachedExecutorType").build()); + Settings nodeSettings = Settings.settingsBuilder() + .put("name", "testCachedExecutorType").build(); + threadPool = new ThreadPool(nodeSettings); + ClusterSettings clusterSettings = new ClusterSettings(nodeSettings, ClusterSettings.BUILT_IN_CLUSTER_SETTINGS); + threadPool.setClusterSettings(clusterSettings); assertEquals(info(threadPool, threadPoolName).getThreadPoolType(), ThreadPool.ThreadPoolType.CACHED); assertThat(threadPool.executor(threadPoolName), instanceOf(EsThreadPoolExecutor.class)); - threadPool.updateSettings(settingsBuilder() + Settings settings = clusterSettings.applySettings(settingsBuilder() .put("threadpool." + threadPoolName + ".keep_alive", "10m") .build()); assertEquals(info(threadPool, threadPoolName).getThreadPoolType(), ThreadPool.ThreadPoolType.CACHED); @@ -134,7 +139,7 @@ public class UpdateThreadPoolSettingsTests extends ESTestCase { // Change keep alive Executor oldExecutor = threadPool.executor(threadPoolName); - threadPool.updateSettings(settingsBuilder().put("threadpool." + threadPoolName + ".keep_alive", "1m").build()); + settings = clusterSettings.applySettings(settingsBuilder().put(settings).put("threadpool." + threadPoolName + ".keep_alive", "1m").build()); // Make sure keep alive value changed assertThat(info(threadPool, threadPoolName).getKeepAlive().minutes(), equalTo(1L)); assertThat(((EsThreadPoolExecutor) threadPool.executor(threadPoolName)).getKeepAliveTime(TimeUnit.MINUTES), equalTo(1L)); @@ -143,7 +148,7 @@ public class UpdateThreadPoolSettingsTests extends ESTestCase { assertThat(threadPool.executor(threadPoolName), sameInstance(oldExecutor)); // Set the same keep alive - threadPool.updateSettings(settingsBuilder().put("threadpool." + threadPoolName + ".keep_alive", "1m").build()); + settings = clusterSettings.applySettings(settingsBuilder().put(settings).put("threadpool." + threadPoolName + ".keep_alive", "1m").build()); // Make sure keep alive value didn't change assertThat(info(threadPool, threadPoolName).getKeepAlive().minutes(), equalTo(1L)); assertThat(((EsThreadPoolExecutor) threadPool.executor(threadPoolName)).getKeepAliveTime(TimeUnit.MINUTES), equalTo(1L)); @@ -160,11 +165,13 @@ public class UpdateThreadPoolSettingsTests extends ESTestCase { ThreadPool threadPool = null; try { - threadPool = new ThreadPool(settingsBuilder() - .put("name", "testCachedExecutorType").build()); + Settings nodeSettings = Settings.settingsBuilder() + .put("name", "testFixedExecutorType").build(); + threadPool = new ThreadPool(nodeSettings); + ClusterSettings clusterSettings = new ClusterSettings(nodeSettings, ClusterSettings.BUILT_IN_CLUSTER_SETTINGS); + threadPool.setClusterSettings(clusterSettings); assertThat(threadPool.executor(threadPoolName), instanceOf(EsThreadPoolExecutor.class)); - - threadPool.updateSettings(settingsBuilder() + Settings settings = clusterSettings.applySettings(settingsBuilder() .put("threadpool." + threadPoolName + ".size", "15") .build()); assertEquals(info(threadPool, threadPoolName).getThreadPoolType(), ThreadPool.ThreadPoolType.FIXED); @@ -177,7 +184,7 @@ public class UpdateThreadPoolSettingsTests extends ESTestCase { assertThat(((EsThreadPoolExecutor) threadPool.executor(threadPoolName)).getKeepAliveTime(TimeUnit.MINUTES), equalTo(0L)); // Put old type back - threadPool.updateSettings(Settings.EMPTY); + settings = clusterSettings.applySettings(Settings.EMPTY); assertEquals(info(threadPool, threadPoolName).getThreadPoolType(), ThreadPool.ThreadPoolType.FIXED); // Make sure keep alive value is not used assertThat(info(threadPool, threadPoolName).getKeepAlive(), nullValue()); @@ -190,7 +197,7 @@ public class UpdateThreadPoolSettingsTests extends ESTestCase { // Change size Executor oldExecutor = threadPool.executor(threadPoolName); - threadPool.updateSettings(settingsBuilder().put("threadpool." + threadPoolName + ".size", "10").build()); + settings = clusterSettings.applySettings(settingsBuilder().put(settings).put("threadpool." + threadPoolName + ".size", "10").build()); // Make sure size values changed assertThat(info(threadPool, threadPoolName).getMax(), equalTo(10)); assertThat(info(threadPool, threadPoolName).getMin(), equalTo(10)); @@ -201,8 +208,7 @@ public class UpdateThreadPoolSettingsTests extends ESTestCase { assertThat(threadPool.executor(threadPoolName), sameInstance(oldExecutor)); // Change queue capacity - threadPool.updateSettings(settingsBuilder() - .put("threadpool." + threadPoolName + ".queue", "500") + settings = clusterSettings.applySettings(settingsBuilder().put(settings).put("threadpool." + threadPoolName + ".queue", "500") .build()); } finally { terminateThreadPoolIfNeeded(threadPool); @@ -213,9 +219,12 @@ public class UpdateThreadPoolSettingsTests extends ESTestCase { String threadPoolName = randomThreadPool(ThreadPool.ThreadPoolType.SCALING); ThreadPool threadPool = null; try { - threadPool = new ThreadPool(settingsBuilder() + Settings nodeSettings = settingsBuilder() .put("threadpool." + threadPoolName + ".size", 10) - .put("name", "testCachedExecutorType").build()); + .put("name", "testScalingExecutorType").build(); + threadPool = new ThreadPool(nodeSettings); + ClusterSettings clusterSettings = new ClusterSettings(nodeSettings, ClusterSettings.BUILT_IN_CLUSTER_SETTINGS); + threadPool.setClusterSettings(clusterSettings); assertThat(info(threadPool, threadPoolName).getMin(), equalTo(1)); assertThat(info(threadPool, threadPoolName).getMax(), equalTo(10)); assertThat(info(threadPool, threadPoolName).getKeepAlive().minutes(), equalTo(5L)); @@ -224,7 +233,7 @@ public class UpdateThreadPoolSettingsTests extends ESTestCase { // Change settings that doesn't require pool replacement Executor oldExecutor = threadPool.executor(threadPoolName); - threadPool.updateSettings(settingsBuilder() + clusterSettings.applySettings(settingsBuilder() .put("threadpool." + threadPoolName + ".keep_alive", "10m") .put("threadpool." + threadPoolName + ".min", "2") .put("threadpool." + threadPoolName + ".size", "15") @@ -248,9 +257,12 @@ public class UpdateThreadPoolSettingsTests extends ESTestCase { String threadPoolName = randomThreadPool(ThreadPool.ThreadPoolType.FIXED); ThreadPool threadPool = null; try { - threadPool = new ThreadPool(Settings.settingsBuilder() + Settings nodeSettings = Settings.settingsBuilder() .put("threadpool." + threadPoolName + ".queue_size", 1000) - .put("name", "testCachedExecutorType").build()); + .put("name", "testCachedExecutorType").build(); + threadPool = new ThreadPool(nodeSettings); + ClusterSettings clusterSettings = new ClusterSettings(nodeSettings, ClusterSettings.BUILT_IN_CLUSTER_SETTINGS); + threadPool.setClusterSettings(clusterSettings); assertEquals(info(threadPool, threadPoolName).getQueueSize().getSingles(), 1000L); final CountDownLatch latch = new CountDownLatch(1); @@ -264,7 +276,7 @@ public class UpdateThreadPoolSettingsTests extends ESTestCase { } } ); - threadPool.updateSettings(settingsBuilder().put("threadpool." + threadPoolName + ".queue_size", 2000).build()); + clusterSettings.applySettings(settingsBuilder().put("threadpool." + threadPoolName + ".queue_size", 2000).build()); assertThat(threadPool.executor(threadPoolName), not(sameInstance(oldExecutor))); assertThat(oldExecutor.isShutdown(), equalTo(true)); assertThat(oldExecutor.isTerminating(), equalTo(true)); @@ -279,12 +291,15 @@ public class UpdateThreadPoolSettingsTests extends ESTestCase { public void testCustomThreadPool() throws Exception { ThreadPool threadPool = null; try { - threadPool = new ThreadPool(Settings.settingsBuilder() + Settings nodeSettings = Settings.settingsBuilder() .put("threadpool.my_pool1.type", "scaling") .put("threadpool.my_pool2.type", "fixed") .put("threadpool.my_pool2.size", "1") .put("threadpool.my_pool2.queue_size", "1") - .put("name", "testCustomThreadPool").build()); + .put("name", "testCustomThreadPool").build(); + threadPool = new ThreadPool(nodeSettings); + ClusterSettings clusterSettings = new ClusterSettings(nodeSettings, ClusterSettings.BUILT_IN_CLUSTER_SETTINGS); + threadPool.setClusterSettings(clusterSettings); ThreadPoolInfo groups = threadPool.info(); boolean foundPool1 = false; boolean foundPool2 = false; @@ -316,7 +331,7 @@ public class UpdateThreadPoolSettingsTests extends ESTestCase { Settings settings = Settings.builder() .put("threadpool.my_pool2.size", "10") .build(); - threadPool.updateSettings(settings); + clusterSettings.applySettings(settings); groups = threadPool.info(); foundPool1 = false; diff --git a/core/src/test/java/org/elasticsearch/transport/AbstractSimpleTransportTestCase.java b/core/src/test/java/org/elasticsearch/transport/AbstractSimpleTransportTestCase.java index becb61666d..6599412834 100644 --- a/core/src/test/java/org/elasticsearch/transport/AbstractSimpleTransportTestCase.java +++ b/core/src/test/java/org/elasticsearch/transport/AbstractSimpleTransportTestCase.java @@ -21,6 +21,7 @@ package org.elasticsearch.transport; import org.elasticsearch.Version; import org.elasticsearch.cluster.node.DiscoveryNode; +import org.elasticsearch.common.settings.ClusterSettings; import org.elasticsearch.common.io.stream.NamedWriteableRegistry; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; @@ -69,12 +70,12 @@ public abstract class AbstractSimpleTransportTestCase extends ESTestCase { super.setUp(); threadPool = new ThreadPool(getClass().getName()); serviceA = build( - Settings.builder().put("name", "TS_A", TransportService.SETTING_TRACE_LOG_INCLUDE, "", TransportService.SETTING_TRACE_LOG_EXCLUDE, "NOTHING").build(), + Settings.builder().put("name", "TS_A", TransportService.TRACE_LOG_INCLUDE_SETTING.getKey(), "", TransportService.TRACE_LOG_EXCLUDE_SETTING.getKey(), "NOTHING").build(), version0, new NamedWriteableRegistry() ); nodeA = new DiscoveryNode("TS_A", "TS_A", serviceA.boundAddress().publishAddress(), emptyMap(), version0); serviceB = build( - Settings.builder().put("name", "TS_B", TransportService.SETTING_TRACE_LOG_INCLUDE, "", TransportService.SETTING_TRACE_LOG_EXCLUDE, "NOTHING").build(), + Settings.builder().put("name", "TS_B", TransportService.TRACE_LOG_INCLUDE_SETTING.getKey(), "", TransportService.TRACE_LOG_EXCLUDE_SETTING.getKey(), "NOTHING").build(), version1, new NamedWriteableRegistry() ); nodeB = new DiscoveryNode("TS_B", "TS_B", serviceB.boundAddress().publishAddress(), emptyMap(), version1); @@ -650,9 +651,10 @@ public abstract class AbstractSimpleTransportTestCase extends ESTestCase { includeSettings = "test"; excludeSettings = "DOESN'T_MATCH"; } - - serviceA.applySettings(Settings.builder() - .put(TransportService.SETTING_TRACE_LOG_INCLUDE, includeSettings, TransportService.SETTING_TRACE_LOG_EXCLUDE, excludeSettings) + ClusterSettings service = new ClusterSettings(Settings.EMPTY, ClusterSettings.BUILT_IN_CLUSTER_SETTINGS); + serviceA.setDynamicSettings(service); + service.applySettings(Settings.builder() + .put(TransportService.TRACE_LOG_INCLUDE_SETTING.getKey(), includeSettings, TransportService.TRACE_LOG_EXCLUDE_SETTING.getKey(), excludeSettings) .build()); tracer.reset(4); diff --git a/core/src/test/java/org/elasticsearch/transport/NettySizeHeaderFrameDecoderTests.java b/core/src/test/java/org/elasticsearch/transport/NettySizeHeaderFrameDecoderTests.java index 3f140b388f..7a3fd88f93 100644 --- a/core/src/test/java/org/elasticsearch/transport/NettySizeHeaderFrameDecoderTests.java +++ b/core/src/test/java/org/elasticsearch/transport/NettySizeHeaderFrameDecoderTests.java @@ -21,6 +21,7 @@ package org.elasticsearch.transport; import org.elasticsearch.Version; import org.elasticsearch.cache.recycler.MockPageCacheRecycler; +import org.elasticsearch.common.settings.ClusterSettings; import org.elasticsearch.common.io.stream.NamedWriteableRegistry; import org.elasticsearch.common.network.NetworkService; import org.elasticsearch.common.settings.Settings; @@ -28,7 +29,6 @@ import org.elasticsearch.common.transport.InetSocketTransportAddress; import org.elasticsearch.common.util.BigArrays; import org.elasticsearch.common.util.MockBigArrays; import org.elasticsearch.indices.breaker.NoneCircuitBreakerService; -import org.elasticsearch.node.settings.NodeSettingsService; import org.elasticsearch.test.ESTestCase; import org.elasticsearch.threadpool.ThreadPool; import org.elasticsearch.transport.netty.NettyTransport; @@ -64,7 +64,7 @@ public class NettySizeHeaderFrameDecoderTests extends ESTestCase { @Before public void startThreadPool() { threadPool = new ThreadPool(settings); - threadPool.setNodeSettingsService(new NodeSettingsService(settings)); + threadPool.setClusterSettings(new ClusterSettings(settings, ClusterSettings.BUILT_IN_CLUSTER_SETTINGS)); NetworkService networkService = new NetworkService(settings); BigArrays bigArrays = new MockBigArrays(new MockPageCacheRecycler(settings, threadPool), new NoneCircuitBreakerService()); nettyTransport = new NettyTransport(settings, threadPool, networkService, bigArrays, Version.CURRENT, new NamedWriteableRegistry()); diff --git a/core/src/test/java/org/elasticsearch/transport/TransportModuleTests.java b/core/src/test/java/org/elasticsearch/transport/TransportModuleTests.java new file mode 100644 index 0000000000..d587ab05e4 --- /dev/null +++ b/core/src/test/java/org/elasticsearch/transport/TransportModuleTests.java @@ -0,0 +1,48 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.transport; + +import org.elasticsearch.Version; +import org.elasticsearch.common.inject.Inject; +import org.elasticsearch.common.inject.ModuleTestCase; +import org.elasticsearch.common.io.stream.NamedWriteableRegistry; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.test.transport.AssertingLocalTransport; +import org.elasticsearch.threadpool.ThreadPool; + +/** Unit tests for module registering custom transport and transport service */ +public class TransportModuleTests extends ModuleTestCase { + + + + static class FakeTransport extends AssertingLocalTransport { + @Inject + public FakeTransport(Settings settings, ThreadPool threadPool, Version version, NamedWriteableRegistry namedWriteableRegistry) { + super(settings, threadPool, version, namedWriteableRegistry); + } + } + + static class FakeTransportService extends TransportService { + @Inject + public FakeTransportService(Settings settings, Transport transport, ThreadPool threadPool) { + super(settings, transport, threadPool); + } + } +} diff --git a/core/src/test/java/org/elasticsearch/transport/netty/NettyTransportIT.java b/core/src/test/java/org/elasticsearch/transport/netty/NettyTransportIT.java index c84a9eb9a5..78caef498d 100644 --- a/core/src/test/java/org/elasticsearch/transport/netty/NettyTransportIT.java +++ b/core/src/test/java/org/elasticsearch/transport/netty/NettyTransportIT.java @@ -21,13 +21,14 @@ package org.elasticsearch.transport.netty; import org.elasticsearch.ElasticsearchException; import org.elasticsearch.Version; import org.elasticsearch.action.admin.cluster.health.ClusterHealthResponse; -import org.elasticsearch.cluster.health.ClusterHealthStatus; import org.elasticsearch.client.Client; +import org.elasticsearch.cluster.health.ClusterHealthStatus; import org.elasticsearch.common.component.Lifecycle; import org.elasticsearch.common.inject.Inject; import org.elasticsearch.common.io.stream.NamedWriteableRegistry; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.logging.ESLogger; +import org.elasticsearch.common.network.NetworkModule; import org.elasticsearch.common.network.NetworkService; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.transport.InetSocketTransportAddress; @@ -40,7 +41,6 @@ import org.elasticsearch.test.ESIntegTestCase.Scope; import org.elasticsearch.threadpool.ThreadPool; import org.elasticsearch.transport.ActionNotFoundTransportException; import org.elasticsearch.transport.RequestHandlerRegistry; -import org.elasticsearch.transport.TransportModule; import org.elasticsearch.transport.TransportRequest; import org.jboss.netty.channel.Channel; import org.jboss.netty.channel.ChannelPipeline; @@ -66,7 +66,7 @@ public class NettyTransportIT extends ESIntegTestCase { protected Settings nodeSettings(int nodeOrdinal) { return settingsBuilder().put(super.nodeSettings(nodeOrdinal)) .put("node.mode", "network") - .put(TransportModule.TRANSPORT_TYPE_KEY, "exception-throwing").build(); + .put(NetworkModule.TRANSPORT_TYPE_KEY, "exception-throwing").build(); } @Override @@ -99,8 +99,8 @@ public class NettyTransportIT extends ESIntegTestCase { public String description() { return "an exception throwing transport for testing"; } - public void onModule(TransportModule transportModule) { - transportModule.addTransport("exception-throwing", ExceptionThrowingNettyTransport.class); + public void onModule(NetworkModule module) { + module.registerTransport("exception-throwing", ExceptionThrowingNettyTransport.class); } } diff --git a/core/src/test/java/org/elasticsearch/transport/netty/NettyTransportMultiPortIntegrationIT.java b/core/src/test/java/org/elasticsearch/transport/netty/NettyTransportMultiPortIntegrationIT.java index 59ef26c42a..ee49012291 100644 --- a/core/src/test/java/org/elasticsearch/transport/netty/NettyTransportMultiPortIntegrationIT.java +++ b/core/src/test/java/org/elasticsearch/transport/netty/NettyTransportMultiPortIntegrationIT.java @@ -19,11 +19,12 @@ package org.elasticsearch.transport.netty; import org.elasticsearch.action.admin.cluster.health.ClusterHealthResponse; -import org.elasticsearch.cluster.health.ClusterHealthStatus; import org.elasticsearch.action.admin.cluster.node.info.NodeInfo; import org.elasticsearch.action.admin.cluster.node.info.NodesInfoResponse; import org.elasticsearch.client.transport.TransportClient; +import org.elasticsearch.cluster.health.ClusterHealthStatus; import org.elasticsearch.common.network.NetworkAddress; +import org.elasticsearch.common.network.NetworkModule; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.transport.InetSocketTransportAddress; import org.elasticsearch.common.transport.TransportAddress; @@ -31,7 +32,6 @@ import org.elasticsearch.test.ESIntegTestCase; import org.elasticsearch.test.ESIntegTestCase.ClusterScope; import org.elasticsearch.test.ESIntegTestCase.Scope; import org.elasticsearch.test.junit.annotations.Network; -import org.elasticsearch.transport.TransportModule; import java.net.InetAddress; import java.util.Locale; @@ -60,7 +60,7 @@ public class NettyTransportMultiPortIntegrationIT extends ESIntegTestCase { Settings.Builder builder = settingsBuilder() .put(super.nodeSettings(nodeOrdinal)) .put("network.host", "127.0.0.1") - .put(TransportModule.TRANSPORT_TYPE_KEY, "netty") + .put(NetworkModule.TRANSPORT_TYPE_KEY, "netty") .put("node.mode", "network") .put("transport.profiles.client1.port", randomPortRange) .put("transport.profiles.client1.publish_host", "127.0.0.7") @@ -72,7 +72,7 @@ public class NettyTransportMultiPortIntegrationIT extends ESIntegTestCase { public void testThatTransportClientCanConnect() throws Exception { Settings settings = settingsBuilder() .put("cluster.name", internalCluster().getClusterName()) - .put(TransportModule.TRANSPORT_TYPE_KEY, "netty") + .put(NetworkModule.TRANSPORT_TYPE_KEY, "netty") .put("path.home", createTempDir().toString()) .build(); try (TransportClient transportClient = TransportClient.builder().settings(settings).build()) { diff --git a/core/src/test/java/org/elasticsearch/transport/netty/NettyTransportPublishAddressIT.java b/core/src/test/java/org/elasticsearch/transport/netty/NettyTransportPublishAddressIT.java index 3437701f6c..ea67ce3271 100644 --- a/core/src/test/java/org/elasticsearch/transport/netty/NettyTransportPublishAddressIT.java +++ b/core/src/test/java/org/elasticsearch/transport/netty/NettyTransportPublishAddressIT.java @@ -21,22 +21,19 @@ package org.elasticsearch.transport.netty; import org.elasticsearch.action.admin.cluster.node.info.NodeInfo; import org.elasticsearch.action.admin.cluster.node.info.NodesInfoResponse; +import org.elasticsearch.common.network.NetworkModule; import org.elasticsearch.common.network.NetworkUtils; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.transport.BoundTransportAddress; import org.elasticsearch.common.transport.InetSocketTransportAddress; import org.elasticsearch.common.transport.TransportAddress; import org.elasticsearch.test.ESIntegTestCase; -import org.elasticsearch.transport.TransportModule; import java.net.Inet4Address; -import java.net.Inet6Address; import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.Matchers.greaterThan; -import static org.hamcrest.Matchers.greaterThanOrEqualTo; import static org.hamcrest.Matchers.instanceOf; -import static org.hamcrest.Matchers.not; /** * Checks that Elasticsearch produces a sane publish_address when it binds to @@ -48,7 +45,7 @@ public class NettyTransportPublishAddressIT extends ESIntegTestCase { protected Settings nodeSettings(int nodeOrdinal) { return Settings.builder() .put(super.nodeSettings(nodeOrdinal)) - .put(TransportModule.TRANSPORT_TYPE_KEY, "netty") + .put(NetworkModule.TRANSPORT_TYPE_KEY, "netty") .put("node.mode", "network").build(); } diff --git a/core/src/test/java/org/elasticsearch/tribe/TribeIT.java b/core/src/test/java/org/elasticsearch/tribe/TribeIT.java index 28a3dea118..1350dcbb8e 100644 --- a/core/src/test/java/org/elasticsearch/tribe/TribeIT.java +++ b/core/src/test/java/org/elasticsearch/tribe/TribeIT.java @@ -37,6 +37,7 @@ import org.elasticsearch.common.transport.TransportAddress; import org.elasticsearch.discovery.MasterNotDiscoveredException; import org.elasticsearch.discovery.zen.ping.unicast.UnicastZenPing; import org.elasticsearch.node.Node; +import org.elasticsearch.plugins.Plugin; import org.elasticsearch.test.ESIntegTestCase; import org.elasticsearch.test.InternalTestCluster; import org.elasticsearch.test.NodeConfigurationSource; @@ -47,6 +48,7 @@ import org.junit.BeforeClass; import java.io.IOException; import java.util.ArrayList; +import java.util.Collection; import java.util.Collections; import java.util.Map; @@ -80,13 +82,18 @@ public class TribeIT extends ESIntegTestCase { } @Override + public Collection<Class<? extends Plugin>> nodePlugins() { + return Collections.emptyList(); + } + + @Override public Settings transportClientSettings() { return null; } }; cluster2 = new InternalTestCluster(InternalTestCluster.configuredNodeMode(), randomLong(), createTempDir(), 2, 2, - Strings.randomBase64UUID(getRandom()), nodeConfigurationSource, 0, false, SECOND_CLUSTER_NODE_PREFIX, true); + Strings.randomBase64UUID(getRandom()), nodeConfigurationSource, 0, false, SECOND_CLUSTER_NODE_PREFIX, Collections.emptyList()); cluster2.beforeTest(getRandom(), 0.1); cluster2.ensureAtLeastNumDataNodes(2); diff --git a/core/src/test/java/org/elasticsearch/update/UpdateIT.java b/core/src/test/java/org/elasticsearch/update/UpdateIT.java index a789bb4877..09887d8354 100644 --- a/core/src/test/java/org/elasticsearch/update/UpdateIT.java +++ b/core/src/test/java/org/elasticsearch/update/UpdateIT.java @@ -120,7 +120,7 @@ public class UpdateIT extends ESIntegTestCase { } @Override - public Object compile(String script) { + public Object compile(String script, Map<String, String> params) { return new Object(); // unused } @@ -218,7 +218,7 @@ public class UpdateIT extends ESIntegTestCase { } @Override - public Object compile(String script) { + public Object compile(String script, Map<String, String> params) { return script; } @@ -309,7 +309,7 @@ public class UpdateIT extends ESIntegTestCase { } @Override - public Object compile(String script) { + public Object compile(String script, Map<String, String> params) { return new Object(); // unused } @@ -400,7 +400,7 @@ public class UpdateIT extends ESIntegTestCase { } @Override - public Object compile(String script) { + public Object compile(String script, Map<String, String> params) { return new Object(); // unused } diff --git a/core/src/test/resources/indices/bwc/index-2.0.2.zip b/core/src/test/resources/indices/bwc/index-2.0.2.zip Binary files differnew file mode 100644 index 0000000000..2f77405a83 --- /dev/null +++ b/core/src/test/resources/indices/bwc/index-2.0.2.zip diff --git a/core/src/test/resources/indices/bwc/index-2.1.1.zip b/core/src/test/resources/indices/bwc/index-2.1.1.zip Binary files differnew file mode 100644 index 0000000000..74c967d2c6 --- /dev/null +++ b/core/src/test/resources/indices/bwc/index-2.1.1.zip diff --git a/core/src/test/resources/indices/bwc/repo-2.0.2.zip b/core/src/test/resources/indices/bwc/repo-2.0.2.zip Binary files differnew file mode 100644 index 0000000000..696ffd939d --- /dev/null +++ b/core/src/test/resources/indices/bwc/repo-2.0.2.zip diff --git a/core/src/test/resources/indices/bwc/repo-2.1.1.zip b/core/src/test/resources/indices/bwc/repo-2.1.1.zip Binary files differnew file mode 100644 index 0000000000..3253da62c3 --- /dev/null +++ b/core/src/test/resources/indices/bwc/repo-2.1.1.zip |