summaryrefslogtreecommitdiff
path: root/test/framework/src/main
diff options
context:
space:
mode:
authorRyan Ernst <ryan@iernst.net>2015-12-17 16:57:39 -0800
committerRyan Ernst <ryan@iernst.net>2015-12-17 21:25:06 -0800
commit6a99796b02383862825687a4fb57080ddd92f616 (patch)
tree3db36dd9cce2719ecdde9754f4f057a38b8b9345 /test/framework/src/main
parente128298c5da69ce8ccc1cd82d2b94cabd52df094 (diff)
Build: Move test framework under a "test" top level dir
This allows adding more test projects, eg integ test fixtures that will be coming soon.
Diffstat (limited to 'test/framework/src/main')
-rw-r--r--test/framework/src/main/java/org/elasticsearch/bootstrap/BootstrapForTesting.java242
-rw-r--r--test/framework/src/main/java/org/elasticsearch/cache/recycler/MockPageCacheRecycler.java146
-rw-r--r--test/framework/src/main/java/org/elasticsearch/cluster/MockInternalClusterInfoService.java136
-rw-r--r--test/framework/src/main/java/org/elasticsearch/cluster/routing/TestShardRouting.java79
-rw-r--r--test/framework/src/main/java/org/elasticsearch/common/cli/CliToolTestCase.java171
-rw-r--r--test/framework/src/main/java/org/elasticsearch/common/io/FileTestUtils.java53
-rw-r--r--test/framework/src/main/java/org/elasticsearch/common/io/PathUtilsForTesting.java45
-rw-r--r--test/framework/src/main/java/org/elasticsearch/common/util/MockBigArrays.java552
-rw-r--r--test/framework/src/main/java/org/elasticsearch/index/MapperTestUtils.java63
-rw-r--r--test/framework/src/main/java/org/elasticsearch/index/MockEngineFactoryPlugin.java64
-rw-r--r--test/framework/src/main/java/org/elasticsearch/node/MockNode.java55
-rw-r--r--test/framework/src/main/java/org/elasticsearch/node/NodeMocksPlugin.java41
-rw-r--r--test/framework/src/main/java/org/elasticsearch/percolator/PercolatorTestUtil.java41
-rw-r--r--test/framework/src/main/java/org/elasticsearch/plugins/PluginTestUtil.java44
-rw-r--r--test/framework/src/main/java/org/elasticsearch/script/MockScriptEngine.java120
-rw-r--r--test/framework/src/main/java/org/elasticsearch/search/MockSearchService.java91
-rw-r--r--test/framework/src/main/java/org/elasticsearch/search/aggregations/bucket/AbstractTermsTestCase.java80
-rw-r--r--test/framework/src/main/java/org/elasticsearch/search/aggregations/bucket/script/NativeSignificanceScoreScriptNoParams.java52
-rw-r--r--test/framework/src/main/java/org/elasticsearch/search/aggregations/bucket/script/NativeSignificanceScoreScriptWithParams.java55
-rw-r--r--test/framework/src/main/java/org/elasticsearch/search/aggregations/bucket/script/TestScript.java55
-rw-r--r--test/framework/src/main/java/org/elasticsearch/search/aggregations/metrics/AbstractNumericTestCase.java100
-rw-r--r--test/framework/src/main/java/org/elasticsearch/test/BackgroundIndexer.java287
-rw-r--r--test/framework/src/main/java/org/elasticsearch/test/CompositeTestCluster.java288
-rw-r--r--test/framework/src/main/java/org/elasticsearch/test/CorruptionUtils.java100
-rw-r--r--test/framework/src/main/java/org/elasticsearch/test/DummyShardLock.java37
-rw-r--r--test/framework/src/main/java/org/elasticsearch/test/ESAllocationTestCase.java243
-rw-r--r--test/framework/src/main/java/org/elasticsearch/test/ESBackcompatTestCase.java271
-rw-r--r--test/framework/src/main/java/org/elasticsearch/test/ESIntegTestCase.java2129
-rw-r--r--test/framework/src/main/java/org/elasticsearch/test/ESSingleNodeTestCase.java263
-rw-r--r--test/framework/src/main/java/org/elasticsearch/test/ESTestCase.java640
-rw-r--r--test/framework/src/main/java/org/elasticsearch/test/ESTokenStreamTestCase.java58
-rw-r--r--test/framework/src/main/java/org/elasticsearch/test/ExternalNode.java245
-rw-r--r--test/framework/src/main/java/org/elasticsearch/test/ExternalTestCluster.java176
-rw-r--r--test/framework/src/main/java/org/elasticsearch/test/FieldMaskingReader.java51
-rw-r--r--test/framework/src/main/java/org/elasticsearch/test/IndexSettingsModule.java59
-rw-r--r--test/framework/src/main/java/org/elasticsearch/test/InternalTestCluster.java1873
-rw-r--r--test/framework/src/main/java/org/elasticsearch/test/MockIndexEventListener.java164
-rw-r--r--test/framework/src/main/java/org/elasticsearch/test/NodeConfigurationSource.java78
-rw-r--r--test/framework/src/main/java/org/elasticsearch/test/StreamsUtils.java61
-rw-r--r--test/framework/src/main/java/org/elasticsearch/test/TestCluster.java232
-rw-r--r--test/framework/src/main/java/org/elasticsearch/test/TestSearchContext.java671
-rw-r--r--test/framework/src/main/java/org/elasticsearch/test/VersionUtils.java112
-rw-r--r--test/framework/src/main/java/org/elasticsearch/test/XContentTestUtils.java126
-rw-r--r--test/framework/src/main/java/org/elasticsearch/test/client/RandomizingClient.java69
-rw-r--r--test/framework/src/main/java/org/elasticsearch/test/cluster/NoopClusterService.java170
-rw-r--r--test/framework/src/main/java/org/elasticsearch/test/cluster/TestClusterService.java282
-rw-r--r--test/framework/src/main/java/org/elasticsearch/test/discovery/ClusterDiscoveryConfiguration.java167
-rw-r--r--test/framework/src/main/java/org/elasticsearch/test/disruption/BlockClusterStateProcessing.java107
-rw-r--r--test/framework/src/main/java/org/elasticsearch/test/disruption/IntermittentLongGCDisruption.java129
-rw-r--r--test/framework/src/main/java/org/elasticsearch/test/disruption/LongGCDisruption.java127
-rw-r--r--test/framework/src/main/java/org/elasticsearch/test/disruption/NetworkDelaysPartition.java94
-rw-r--r--test/framework/src/main/java/org/elasticsearch/test/disruption/NetworkDisconnectPartition.java57
-rw-r--r--test/framework/src/main/java/org/elasticsearch/test/disruption/NetworkPartition.java204
-rw-r--r--test/framework/src/main/java/org/elasticsearch/test/disruption/NetworkPartitionIT.java45
-rw-r--r--test/framework/src/main/java/org/elasticsearch/test/disruption/NetworkUnresponsivePartition.java56
-rw-r--r--test/framework/src/main/java/org/elasticsearch/test/disruption/NoOpDisruptionScheme.java71
-rw-r--r--test/framework/src/main/java/org/elasticsearch/test/disruption/ServiceDisruptionScheme.java44
-rw-r--r--test/framework/src/main/java/org/elasticsearch/test/disruption/SingleNodeDisruption.java91
-rw-r--r--test/framework/src/main/java/org/elasticsearch/test/disruption/SlowClusterStateProcessing.java176
-rw-r--r--test/framework/src/main/java/org/elasticsearch/test/engine/AssertingSearcher.java91
-rw-r--r--test/framework/src/main/java/org/elasticsearch/test/engine/MockEngineFactory.java52
-rw-r--r--test/framework/src/main/java/org/elasticsearch/test/engine/MockEngineSupport.java216
-rw-r--r--test/framework/src/main/java/org/elasticsearch/test/engine/MockInternalEngine.java83
-rw-r--r--test/framework/src/main/java/org/elasticsearch/test/engine/MockShadowEngine.java48
-rw-r--r--test/framework/src/main/java/org/elasticsearch/test/engine/ThrowingLeafReaderWrapper.java190
-rw-r--r--test/framework/src/main/java/org/elasticsearch/test/gateway/NoopGatewayAllocator.java53
-rw-r--r--test/framework/src/main/java/org/elasticsearch/test/hamcrest/CollectionAssertions.java33
-rw-r--r--test/framework/src/main/java/org/elasticsearch/test/hamcrest/CollectionMatchers.java59
-rw-r--r--test/framework/src/main/java/org/elasticsearch/test/hamcrest/ElasticsearchAssertions.java831
-rw-r--r--test/framework/src/main/java/org/elasticsearch/test/hamcrest/ElasticsearchMatchers.java145
-rw-r--r--test/framework/src/main/java/org/elasticsearch/test/hamcrest/RegexMatcher.java62
-rw-r--r--test/framework/src/main/java/org/elasticsearch/test/junit/annotations/Network.java34
-rw-r--r--test/framework/src/main/java/org/elasticsearch/test/junit/annotations/TestLogging.java41
-rw-r--r--test/framework/src/main/java/org/elasticsearch/test/junit/listeners/LoggingListener.java118
-rw-r--r--test/framework/src/main/java/org/elasticsearch/test/junit/listeners/ReproduceInfoPrinter.java172
-rw-r--r--test/framework/src/main/java/org/elasticsearch/test/junit/rule/RepeatOnExceptionRule.java80
-rw-r--r--test/framework/src/main/java/org/elasticsearch/test/rest/BlacklistedPathPatternMatcher.java68
-rw-r--r--test/framework/src/main/java/org/elasticsearch/test/rest/ESRestTestCase.java374
-rw-r--r--test/framework/src/main/java/org/elasticsearch/test/rest/FakeRestRequest.java104
-rw-r--r--test/framework/src/main/java/org/elasticsearch/test/rest/RestTestCandidate.java67
-rw-r--r--test/framework/src/main/java/org/elasticsearch/test/rest/RestTestExecutionContext.java157
-rw-r--r--test/framework/src/main/java/org/elasticsearch/test/rest/Stash.java128
-rw-r--r--test/framework/src/main/java/org/elasticsearch/test/rest/client/RestClient.java304
-rw-r--r--test/framework/src/main/java/org/elasticsearch/test/rest/client/RestException.java41
-rw-r--r--test/framework/src/main/java/org/elasticsearch/test/rest/client/RestPath.java97
-rw-r--r--test/framework/src/main/java/org/elasticsearch/test/rest/client/RestResponse.java115
-rw-r--r--test/framework/src/main/java/org/elasticsearch/test/rest/client/http/HttpDeleteWithEntity.java40
-rw-r--r--test/framework/src/main/java/org/elasticsearch/test/rest/client/http/HttpGetWithEntity.java40
-rw-r--r--test/framework/src/main/java/org/elasticsearch/test/rest/client/http/HttpRequestBuilder.java246
-rw-r--r--test/framework/src/main/java/org/elasticsearch/test/rest/client/http/HttpResponse.java108
-rw-r--r--test/framework/src/main/java/org/elasticsearch/test/rest/json/JsonPath.java126
-rw-r--r--test/framework/src/main/java/org/elasticsearch/test/rest/parser/DoSectionParser.java105
-rw-r--r--test/framework/src/main/java/org/elasticsearch/test/rest/parser/GreaterThanEqualToParser.java40
-rw-r--r--test/framework/src/main/java/org/elasticsearch/test/rest/parser/GreaterThanParser.java39
-rw-r--r--test/framework/src/main/java/org/elasticsearch/test/rest/parser/IsFalseParser.java34
-rw-r--r--test/framework/src/main/java/org/elasticsearch/test/rest/parser/IsTrueParser.java34
-rw-r--r--test/framework/src/main/java/org/elasticsearch/test/rest/parser/LengthParser.java48
-rw-r--r--test/framework/src/main/java/org/elasticsearch/test/rest/parser/LessThanOrEqualToParser.java40
-rw-r--r--test/framework/src/main/java/org/elasticsearch/test/rest/parser/LessThanParser.java39
-rw-r--r--test/framework/src/main/java/org/elasticsearch/test/rest/parser/MatchParser.java36
-rw-r--r--test/framework/src/main/java/org/elasticsearch/test/rest/parser/RestTestFragmentParser.java33
-rw-r--r--test/framework/src/main/java/org/elasticsearch/test/rest/parser/RestTestParseException.java33
-rw-r--r--test/framework/src/main/java/org/elasticsearch/test/rest/parser/RestTestSectionParser.java51
-rw-r--r--test/framework/src/main/java/org/elasticsearch/test/rest/parser/RestTestSuiteParseContext.java165
-rw-r--r--test/framework/src/main/java/org/elasticsearch/test/rest/parser/RestTestSuiteParser.java100
-rw-r--r--test/framework/src/main/java/org/elasticsearch/test/rest/parser/SetSectionParser.java57
-rw-r--r--test/framework/src/main/java/org/elasticsearch/test/rest/parser/SetupSectionParser.java54
-rw-r--r--test/framework/src/main/java/org/elasticsearch/test/rest/parser/SkipSectionParser.java83
-rw-r--r--test/framework/src/main/java/org/elasticsearch/test/rest/section/ApiCallSection.java83
-rw-r--r--test/framework/src/main/java/org/elasticsearch/test/rest/section/Assertion.java83
-rw-r--r--test/framework/src/main/java/org/elasticsearch/test/rest/section/DoSection.java145
-rw-r--r--test/framework/src/main/java/org/elasticsearch/test/rest/section/ExecutableSection.java34
-rw-r--r--test/framework/src/main/java/org/elasticsearch/test/rest/section/GreaterThanAssertion.java58
-rw-r--r--test/framework/src/main/java/org/elasticsearch/test/rest/section/GreaterThanEqualToAssertion.java58
-rw-r--r--test/framework/src/main/java/org/elasticsearch/test/rest/section/IsFalseAssertion.java61
-rw-r--r--test/framework/src/main/java/org/elasticsearch/test/rest/section/IsTrueAssertion.java55
-rw-r--r--test/framework/src/main/java/org/elasticsearch/test/rest/section/LengthAssertion.java63
-rw-r--r--test/framework/src/main/java/org/elasticsearch/test/rest/section/LessThanAssertion.java59
-rw-r--r--test/framework/src/main/java/org/elasticsearch/test/rest/section/LessThanOrEqualToAssertion.java58
-rw-r--r--test/framework/src/main/java/org/elasticsearch/test/rest/section/MatchAssertion.java77
-rw-r--r--test/framework/src/main/java/org/elasticsearch/test/rest/section/RestTestSuite.java75
-rw-r--r--test/framework/src/main/java/org/elasticsearch/test/rest/section/SetSection.java52
-rw-r--r--test/framework/src/main/java/org/elasticsearch/test/rest/section/SetupSection.java59
-rw-r--r--test/framework/src/main/java/org/elasticsearch/test/rest/section/SkipSection.java114
-rw-r--r--test/framework/src/main/java/org/elasticsearch/test/rest/section/TestSection.java78
-rw-r--r--test/framework/src/main/java/org/elasticsearch/test/rest/spec/RestApi.java171
-rw-r--r--test/framework/src/main/java/org/elasticsearch/test/rest/spec/RestApiParser.java139
-rw-r--r--test/framework/src/main/java/org/elasticsearch/test/rest/spec/RestSpec.java73
-rw-r--r--test/framework/src/main/java/org/elasticsearch/test/rest/support/Features.java57
-rw-r--r--test/framework/src/main/java/org/elasticsearch/test/rest/support/FileUtils.java167
-rw-r--r--test/framework/src/main/java/org/elasticsearch/test/store/MockFSDirectoryService.java220
-rw-r--r--test/framework/src/main/java/org/elasticsearch/test/store/MockFSIndexStore.java104
-rw-r--r--test/framework/src/main/java/org/elasticsearch/test/transport/AssertingLocalTransport.java91
-rw-r--r--test/framework/src/main/java/org/elasticsearch/test/transport/CapturingTransport.java183
-rw-r--r--test/framework/src/main/java/org/elasticsearch/test/transport/MockTransportService.java618
-rw-r--r--test/framework/src/main/resources/log4j.properties9
136 files changed, 20696 insertions, 0 deletions
diff --git a/test/framework/src/main/java/org/elasticsearch/bootstrap/BootstrapForTesting.java b/test/framework/src/main/java/org/elasticsearch/bootstrap/BootstrapForTesting.java
new file mode 100644
index 0000000000..6878408379
--- /dev/null
+++ b/test/framework/src/main/java/org/elasticsearch/bootstrap/BootstrapForTesting.java
@@ -0,0 +1,242 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.bootstrap;
+
+import com.carrotsearch.randomizedtesting.RandomizedRunner;
+
+import org.apache.lucene.util.LuceneTestCase;
+import org.apache.lucene.util.TestSecurityManager;
+import org.elasticsearch.SecureSM;
+import org.elasticsearch.bootstrap.Bootstrap;
+import org.elasticsearch.bootstrap.ESPolicy;
+import org.elasticsearch.bootstrap.Security;
+import org.elasticsearch.common.Strings;
+import org.elasticsearch.common.SuppressForbidden;
+import org.elasticsearch.common.io.PathUtils;
+import org.elasticsearch.plugins.PluginInfo;
+import org.junit.Assert;
+
+import java.io.FilePermission;
+import java.io.InputStream;
+import java.net.SocketPermission;
+import java.net.URL;
+import java.nio.file.Path;
+import java.security.Permission;
+import java.security.Permissions;
+import java.security.Policy;
+import java.security.ProtectionDomain;
+import java.util.ArrayList;
+import java.util.Arrays;
+import java.util.Collections;
+import java.util.HashMap;
+import java.util.HashSet;
+import java.util.List;
+import java.util.Map;
+import java.util.Objects;
+import java.util.Properties;
+import java.util.Set;
+
+import static com.carrotsearch.randomizedtesting.RandomizedTest.systemPropertyAsBoolean;
+
+/**
+ * Initializes natives and installs test security manager
+ * (init'd early by base classes to ensure it happens regardless of which
+ * test case happens to be first, test ordering, etc).
+ * <p>
+ * The idea is to mimic as much as possible what happens with ES in production
+ * mode (e.g. assign permissions and install security manager the same way)
+ */
+public class BootstrapForTesting {
+
+ // TODO: can we share more code with the non-test side here
+ // without making things complex???
+
+ static {
+ // make sure java.io.tmpdir exists always (in case code uses it in a static initializer)
+ Path javaTmpDir = PathUtils.get(Objects.requireNonNull(System.getProperty("java.io.tmpdir"),
+ "please set ${java.io.tmpdir} in pom.xml"));
+ try {
+ Security.ensureDirectoryExists(javaTmpDir);
+ } catch (Exception e) {
+ throw new RuntimeException("unable to create test temp directory", e);
+ }
+
+ // just like bootstrap, initialize natives, then SM
+ Bootstrap.initializeNatives(javaTmpDir, true, true, true);
+
+ // initialize probes
+ Bootstrap.initializeProbes();
+
+ // initialize sysprops
+ BootstrapInfo.getSystemProperties();
+
+ // check for jar hell
+ try {
+ JarHell.checkJarHell();
+ } catch (Exception e) {
+ throw new RuntimeException("found jar hell in test classpath", e);
+ }
+
+ // install security manager if requested
+ if (systemPropertyAsBoolean("tests.security.manager", true)) {
+ try {
+ // initialize paths the same exact way as bootstrap
+ Permissions perms = new Permissions();
+ Security.addClasspathPermissions(perms);
+ // crazy jython
+ for (URL url : JarHell.parseClassPath()) {
+ Path path = PathUtils.get(url.toURI());
+
+ // crazy jython...
+ String filename = path.getFileName().toString();
+ if (filename.contains("jython") && filename.endsWith(".jar")) {
+ // just enough so it won't fail when it does not exist
+ perms.add(new FilePermission(path.getParent().toString(), "read,readlink"));
+ perms.add(new FilePermission(path.getParent().resolve("Lib").toString(), "read,readlink"));
+ }
+ }
+ // java.io.tmpdir
+ Security.addPath(perms, "java.io.tmpdir", javaTmpDir, "read,readlink,write,delete");
+ // custom test config file
+ if (Strings.hasLength(System.getProperty("tests.config"))) {
+ perms.add(new FilePermission(System.getProperty("tests.config"), "read,readlink"));
+ }
+ // jacoco coverage output file
+ if (Boolean.getBoolean("tests.coverage")) {
+ Path coverageDir = PathUtils.get(System.getProperty("tests.coverage.dir"));
+ perms.add(new FilePermission(coverageDir.resolve("jacoco.exec").toString(), "read,write"));
+ // in case we get fancy and use the -integration goals later:
+ perms.add(new FilePermission(coverageDir.resolve("jacoco-it.exec").toString(), "read,write"));
+ }
+ // intellij hack: intellij test runner wants setIO and will
+ // screw up all test logging without it!
+ if (System.getProperty("tests.maven") == null) {
+ perms.add(new RuntimePermission("setIO"));
+ }
+
+ // add bind permissions for testing
+ // ephemeral ports (note, on java 7 before update 51, this is a different permission)
+ // this should really be the only one allowed for tests, otherwise they have race conditions
+ perms.add(new SocketPermission("localhost:0", "listen,resolve"));
+ // ... but tests are messy. like file permissions, just let them live in a fantasy for now.
+ // TODO: cut over all tests to bind to ephemeral ports
+ perms.add(new SocketPermission("localhost:1024-", "listen,resolve"));
+
+ // read test-framework permissions
+ final Policy testFramework = Security.readPolicy(Bootstrap.class.getResource("test-framework.policy"), JarHell.parseClassPath());
+ final Policy esPolicy = new ESPolicy(perms, getPluginPermissions(), true);
+ Policy.setPolicy(new Policy() {
+ @Override
+ public boolean implies(ProtectionDomain domain, Permission permission) {
+ // implements union
+ return esPolicy.implies(domain, permission) || testFramework.implies(domain, permission);
+ }
+ });
+ System.setSecurityManager(new SecureSM(true));
+ Security.selfTest();
+
+ // guarantee plugin classes are initialized first, in case they have one-time hacks.
+ // this just makes unit testing more realistic
+ for (URL url : Collections.list(BootstrapForTesting.class.getClassLoader().getResources(PluginInfo.ES_PLUGIN_PROPERTIES))) {
+ Properties properties = new Properties();
+ try (InputStream stream = url.openStream()) {
+ properties.load(stream);
+ }
+ if (Boolean.parseBoolean(properties.getProperty("jvm"))) {
+ String clazz = properties.getProperty("classname");
+ if (clazz != null) {
+ Class.forName(clazz);
+ }
+ }
+ }
+ } catch (Exception e) {
+ throw new RuntimeException("unable to install test security manager", e);
+ }
+ }
+ }
+
+ /**
+ * we dont know which codesources belong to which plugin, so just remove the permission from key codebases
+ * like core, test-framework, etc. this way tests fail if accesscontroller blocks are missing.
+ */
+ @SuppressForbidden(reason = "accesses fully qualified URLs to configure security")
+ static Map<String,Policy> getPluginPermissions() throws Exception {
+ List<URL> pluginPolicies = Collections.list(BootstrapForTesting.class.getClassLoader().getResources(PluginInfo.ES_PLUGIN_POLICY));
+ if (pluginPolicies.isEmpty()) {
+ return Collections.emptyMap();
+ }
+
+ // compute classpath minus obvious places, all other jars will get the permission.
+ Set<URL> codebases = new HashSet<>(Arrays.asList(parseClassPathWithSymlinks()));
+ Set<URL> excluded = new HashSet<>(Arrays.asList(
+ // es core
+ Bootstrap.class.getProtectionDomain().getCodeSource().getLocation(),
+ // es test framework
+ BootstrapForTesting.class.getProtectionDomain().getCodeSource().getLocation(),
+ // lucene test framework
+ LuceneTestCase.class.getProtectionDomain().getCodeSource().getLocation(),
+ // randomized runner
+ RandomizedRunner.class.getProtectionDomain().getCodeSource().getLocation(),
+ // junit library
+ Assert.class.getProtectionDomain().getCodeSource().getLocation()
+ ));
+ codebases.removeAll(excluded);
+
+ // parse each policy file, with codebase substitution from the classpath
+ final List<Policy> policies = new ArrayList<>();
+ for (URL policyFile : pluginPolicies) {
+ policies.add(Security.readPolicy(policyFile, codebases.toArray(new URL[codebases.size()])));
+ }
+
+ // consult each policy file for those codebases
+ Map<String,Policy> map = new HashMap<>();
+ for (URL url : codebases) {
+ map.put(url.getFile(), new Policy() {
+ @Override
+ public boolean implies(ProtectionDomain domain, Permission permission) {
+ // implements union
+ for (Policy p : policies) {
+ if (p.implies(domain, permission)) {
+ return true;
+ }
+ }
+ return false;
+ }
+ });
+ }
+ return Collections.unmodifiableMap(map);
+ }
+
+ /**
+ * return parsed classpath, but with symlinks resolved to destination files for matching
+ * this is for matching the toRealPath() in the code where we have a proper plugin structure
+ */
+ @SuppressForbidden(reason = "does evil stuff with paths and urls because devs and jenkins do evil stuff with paths and urls")
+ static URL[] parseClassPathWithSymlinks() throws Exception {
+ URL raw[] = JarHell.parseClassPath();
+ for (int i = 0; i < raw.length; i++) {
+ raw[i] = PathUtils.get(raw[i].toURI()).toRealPath().toUri().toURL();
+ }
+ return raw;
+ }
+
+ // does nothing, just easy way to make sure the class is loaded.
+ public static void ensureInitialized() {}
+}
diff --git a/test/framework/src/main/java/org/elasticsearch/cache/recycler/MockPageCacheRecycler.java b/test/framework/src/main/java/org/elasticsearch/cache/recycler/MockPageCacheRecycler.java
new file mode 100644
index 0000000000..99cd417133
--- /dev/null
+++ b/test/framework/src/main/java/org/elasticsearch/cache/recycler/MockPageCacheRecycler.java
@@ -0,0 +1,146 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.cache.recycler;
+
+import org.elasticsearch.common.inject.Inject;
+import org.elasticsearch.common.recycler.Recycler.V;
+import org.elasticsearch.common.settings.Settings;
+import org.elasticsearch.common.util.set.Sets;
+import org.elasticsearch.test.ESTestCase;
+import org.elasticsearch.test.InternalTestCluster;
+import org.elasticsearch.threadpool.ThreadPool;
+
+import java.lang.reflect.Array;
+import java.util.Arrays;
+import java.util.HashMap;
+import java.util.Map;
+import java.util.Random;
+import java.util.concurrent.ConcurrentHashMap;
+import java.util.concurrent.ConcurrentMap;
+
+public class MockPageCacheRecycler extends PageCacheRecycler {
+
+ private static final ConcurrentMap<Object, Throwable> ACQUIRED_PAGES = new ConcurrentHashMap<>();
+
+ public static void ensureAllPagesAreReleased() throws Exception {
+ final Map<Object, Throwable> masterCopy = new HashMap<>(ACQUIRED_PAGES);
+ if (!masterCopy.isEmpty()) {
+ // not empty, we might be executing on a shared cluster that keeps on obtaining
+ // and releasing pages, lets make sure that after a reasonable timeout, all master
+ // copy (snapshot) have been released
+ boolean success =
+ ESTestCase.awaitBusy(() -> Sets.haveEmptyIntersection(masterCopy.keySet(), ACQUIRED_PAGES.keySet()));
+ if (!success) {
+ masterCopy.keySet().retainAll(ACQUIRED_PAGES.keySet());
+ ACQUIRED_PAGES.keySet().removeAll(masterCopy.keySet()); // remove all existing master copy we will report on
+ if (!masterCopy.isEmpty()) {
+ final Throwable t = masterCopy.entrySet().iterator().next().getValue();
+ throw new RuntimeException(masterCopy.size() + " pages have not been released", t);
+ }
+ }
+ }
+ }
+
+ private final Random random;
+
+ @Inject
+ public MockPageCacheRecycler(Settings settings, ThreadPool threadPool) {
+ super(settings, threadPool);
+ final long seed = settings.getAsLong(InternalTestCluster.SETTING_CLUSTER_NODE_SEED, 0L);
+ random = new Random(seed);
+ }
+
+ private <T> V<T> wrap(final V<T> v) {
+ ACQUIRED_PAGES.put(v, new Throwable());
+ return new V<T>() {
+
+ @Override
+ public void close() {
+ final Throwable t = ACQUIRED_PAGES.remove(v);
+ if (t == null) {
+ throw new IllegalStateException("Releasing a page that has not been acquired");
+ }
+ final T ref = v();
+ if (ref instanceof Object[]) {
+ Arrays.fill((Object[])ref, 0, Array.getLength(ref), null);
+ } else if (ref instanceof byte[]) {
+ Arrays.fill((byte[])ref, 0, Array.getLength(ref), (byte) random.nextInt(256));
+ } else if (ref instanceof long[]) {
+ Arrays.fill((long[])ref, 0, Array.getLength(ref), random.nextLong());
+ } else if (ref instanceof int[]) {
+ Arrays.fill((int[])ref, 0, Array.getLength(ref), random.nextInt());
+ } else if (ref instanceof double[]) {
+ Arrays.fill((double[])ref, 0, Array.getLength(ref), random.nextDouble() - 0.5);
+ } else if (ref instanceof float[]) {
+ Arrays.fill((float[])ref, 0, Array.getLength(ref), random.nextFloat() - 0.5f);
+ } else {
+ for (int i = 0; i < Array.getLength(ref); ++i) {
+ Array.set(ref, i, (byte) random.nextInt(256));
+ }
+ }
+ v.close();
+ }
+
+ @Override
+ public T v() {
+ return v.v();
+ }
+
+ @Override
+ public boolean isRecycled() {
+ return v.isRecycled();
+ }
+
+ };
+ }
+
+ @Override
+ public V<byte[]> bytePage(boolean clear) {
+ final V<byte[]> page = super.bytePage(clear);
+ if (!clear) {
+ Arrays.fill(page.v(), 0, page.v().length, (byte)random.nextInt(1<<8));
+ }
+ return wrap(page);
+ }
+
+ @Override
+ public V<int[]> intPage(boolean clear) {
+ final V<int[]> page = super.intPage(clear);
+ if (!clear) {
+ Arrays.fill(page.v(), 0, page.v().length, random.nextInt());
+ }
+ return wrap(page);
+ }
+
+ @Override
+ public V<long[]> longPage(boolean clear) {
+ final V<long[]> page = super.longPage(clear);
+ if (!clear) {
+ Arrays.fill(page.v(), 0, page.v().length, random.nextLong());
+ }
+ return wrap(page);
+ }
+
+ @Override
+ public V<Object[]> objectPage() {
+ return wrap(super.objectPage());
+ }
+
+}
diff --git a/test/framework/src/main/java/org/elasticsearch/cluster/MockInternalClusterInfoService.java b/test/framework/src/main/java/org/elasticsearch/cluster/MockInternalClusterInfoService.java
new file mode 100644
index 0000000000..6ac2101fe5
--- /dev/null
+++ b/test/framework/src/main/java/org/elasticsearch/cluster/MockInternalClusterInfoService.java
@@ -0,0 +1,136 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.elasticsearch.cluster;
+
+import org.elasticsearch.Version;
+import org.elasticsearch.action.ActionListener;
+import org.elasticsearch.action.admin.cluster.node.stats.NodeStats;
+import org.elasticsearch.action.admin.cluster.node.stats.NodesStatsResponse;
+import org.elasticsearch.action.admin.cluster.node.stats.TransportNodesStatsAction;
+import org.elasticsearch.action.admin.indices.stats.IndicesStatsResponse;
+import org.elasticsearch.action.admin.indices.stats.TransportIndicesStatsAction;
+import org.elasticsearch.cluster.node.DiscoveryNode;
+import org.elasticsearch.cluster.routing.ShardRouting;
+import org.elasticsearch.common.collect.ImmutableOpenMap;
+import org.elasticsearch.common.inject.Inject;
+import org.elasticsearch.common.settings.Settings;
+import org.elasticsearch.common.transport.DummyTransportAddress;
+import org.elasticsearch.monitor.fs.FsInfo;
+import org.elasticsearch.node.settings.NodeSettingsService;
+import org.elasticsearch.plugins.Plugin;
+import org.elasticsearch.threadpool.ThreadPool;
+
+import java.util.concurrent.CountDownLatch;
+
+/**
+ * Fake ClusterInfoService class that allows updating the nodes stats disk
+ * usage with fake values
+ */
+public class MockInternalClusterInfoService extends InternalClusterInfoService {
+
+ public static class TestPlugin extends Plugin {
+ @Override
+ public String name() {
+ return "mock-cluster-info-service";
+ }
+ @Override
+ public String description() {
+ return "a mock cluster info service for testing";
+ }
+ public void onModule(ClusterModule module) {
+ module.clusterInfoServiceImpl = MockInternalClusterInfoService.class;
+ }
+ }
+
+ private final ClusterName clusterName;
+ private volatile NodeStats[] stats = new NodeStats[3];
+
+ /** Create a fake NodeStats for the given node and usage */
+ public static NodeStats makeStats(String nodeName, DiskUsage usage) {
+ FsInfo.Path[] paths = new FsInfo.Path[1];
+ FsInfo.Path path = new FsInfo.Path("/dev/null", null,
+ usage.getTotalBytes(), usage.getFreeBytes(), usage.getFreeBytes());
+ paths[0] = path;
+ FsInfo fsInfo = new FsInfo(System.currentTimeMillis(), paths);
+ return new NodeStats(new DiscoveryNode(nodeName, DummyTransportAddress.INSTANCE, Version.CURRENT),
+ System.currentTimeMillis(),
+ null, null, null, null, null,
+ fsInfo,
+ null, null, null,
+ null, null);
+ }
+
+ @Inject
+ public MockInternalClusterInfoService(Settings settings, NodeSettingsService nodeSettingsService,
+ TransportNodesStatsAction transportNodesStatsAction,
+ TransportIndicesStatsAction transportIndicesStatsAction,
+ ClusterService clusterService, ThreadPool threadPool) {
+ super(settings, nodeSettingsService, transportNodesStatsAction, transportIndicesStatsAction, clusterService, threadPool);
+ this.clusterName = ClusterName.clusterNameFromSettings(settings);
+ stats[0] = makeStats("node_t1", new DiskUsage("node_t1", "n1", "/dev/null", 100, 100));
+ stats[1] = makeStats("node_t2", new DiskUsage("node_t2", "n2", "/dev/null", 100, 100));
+ stats[2] = makeStats("node_t3", new DiskUsage("node_t3", "n3", "/dev/null", 100, 100));
+ }
+
+ public void setN1Usage(String nodeName, DiskUsage newUsage) {
+ stats[0] = makeStats(nodeName, newUsage);
+ }
+
+ public void setN2Usage(String nodeName, DiskUsage newUsage) {
+ stats[1] = makeStats(nodeName, newUsage);
+ }
+
+ public void setN3Usage(String nodeName, DiskUsage newUsage) {
+ stats[2] = makeStats(nodeName, newUsage);
+ }
+
+ @Override
+ public CountDownLatch updateNodeStats(final ActionListener<NodesStatsResponse> listener) {
+ NodesStatsResponse response = new NodesStatsResponse(clusterName, stats);
+ listener.onResponse(response);
+ return new CountDownLatch(0);
+ }
+
+ @Override
+ public CountDownLatch updateIndicesStats(final ActionListener<IndicesStatsResponse> listener) {
+ // Not used, so noop
+ return new CountDownLatch(0);
+ }
+
+ @Override
+ public ClusterInfo getClusterInfo() {
+ ClusterInfo clusterInfo = super.getClusterInfo();
+ return new DevNullClusterInfo(clusterInfo.getNodeLeastAvailableDiskUsages(), clusterInfo.getNodeMostAvailableDiskUsages(), clusterInfo.shardSizes);
+ }
+
+ /**
+ * ClusterInfo that always points to DevNull.
+ */
+ public static class DevNullClusterInfo extends ClusterInfo {
+ public DevNullClusterInfo(ImmutableOpenMap<String, DiskUsage> leastAvailableSpaceUsage,
+ ImmutableOpenMap<String, DiskUsage> mostAvailableSpaceUsage, ImmutableOpenMap<String, Long> shardSizes) {
+ super(leastAvailableSpaceUsage, mostAvailableSpaceUsage, shardSizes, null);
+ }
+
+ @Override
+ public String getDataPath(ShardRouting shardRouting) {
+ return "/dev/null";
+ }
+ }
+}
diff --git a/test/framework/src/main/java/org/elasticsearch/cluster/routing/TestShardRouting.java b/test/framework/src/main/java/org/elasticsearch/cluster/routing/TestShardRouting.java
new file mode 100644
index 0000000000..df9e1f8af2
--- /dev/null
+++ b/test/framework/src/main/java/org/elasticsearch/cluster/routing/TestShardRouting.java
@@ -0,0 +1,79 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.cluster.routing;
+
+import org.elasticsearch.test.ESTestCase;
+
+/**
+ * A helper that allows to create shard routing instances within tests, while not requiring to expose
+ * different simplified constructors on the ShardRouting itself.
+ */
+public class TestShardRouting {
+
+ public static ShardRouting newShardRouting(String index, int shardId, String currentNodeId, boolean primary, ShardRoutingState state, long version) {
+ return new ShardRouting(index, shardId, currentNodeId, null, null, primary, state, version, buildUnassignedInfo(state), buildAllocationId(state), true, -1);
+ }
+
+ public static ShardRouting newShardRouting(String index, int shardId, String currentNodeId, String relocatingNodeId, boolean primary, ShardRoutingState state, long version) {
+ return new ShardRouting(index, shardId, currentNodeId, relocatingNodeId, null, primary, state, version, buildUnassignedInfo(state), buildAllocationId(state), true, -1);
+ }
+
+ public static ShardRouting newShardRouting(String index, int shardId, String currentNodeId, String relocatingNodeId, boolean primary, ShardRoutingState state, AllocationId allocationId, long version) {
+ return new ShardRouting(index, shardId, currentNodeId, relocatingNodeId, null, primary, state, version, buildUnassignedInfo(state), allocationId, true, -1);
+ }
+
+ public static ShardRouting newShardRouting(String index, int shardId, String currentNodeId, String relocatingNodeId, RestoreSource restoreSource, boolean primary, ShardRoutingState state, long version) {
+ return new ShardRouting(index, shardId, currentNodeId, relocatingNodeId, restoreSource, primary, state, version, buildUnassignedInfo(state), buildAllocationId(state), true, -1);
+ }
+
+ public static ShardRouting newShardRouting(String index, int shardId, String currentNodeId,
+ String relocatingNodeId, RestoreSource restoreSource, boolean primary, ShardRoutingState state, long version,
+ UnassignedInfo unassignedInfo) {
+ return new ShardRouting(index, shardId, currentNodeId, relocatingNodeId, restoreSource, primary, state, version, unassignedInfo, buildAllocationId(state), true, -1);
+ }
+
+ private static AllocationId buildAllocationId(ShardRoutingState state) {
+ switch (state) {
+ case UNASSIGNED:
+ return null;
+ case INITIALIZING:
+ case STARTED:
+ return AllocationId.newInitializing();
+ case RELOCATING:
+ AllocationId allocationId = AllocationId.newInitializing();
+ return AllocationId.newRelocation(allocationId);
+ default:
+ throw new IllegalStateException("illegal state");
+ }
+ }
+
+ private static UnassignedInfo buildUnassignedInfo(ShardRoutingState state) {
+ switch (state) {
+ case UNASSIGNED:
+ case INITIALIZING:
+ return new UnassignedInfo(ESTestCase.randomFrom(UnassignedInfo.Reason.values()), "auto generated for test");
+ case STARTED:
+ case RELOCATING:
+ return null;
+ default:
+ throw new IllegalStateException("illegal state");
+ }
+ }
+}
diff --git a/test/framework/src/main/java/org/elasticsearch/common/cli/CliToolTestCase.java b/test/framework/src/main/java/org/elasticsearch/common/cli/CliToolTestCase.java
new file mode 100644
index 0000000000..ab304c28c5
--- /dev/null
+++ b/test/framework/src/main/java/org/elasticsearch/common/cli/CliToolTestCase.java
@@ -0,0 +1,171 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.common.cli;
+
+import org.elasticsearch.ExceptionsHelper;
+import org.elasticsearch.common.Strings;
+import org.elasticsearch.common.SuppressForbidden;
+import org.elasticsearch.test.ESTestCase;
+import org.elasticsearch.test.StreamsUtils;
+import org.junit.After;
+import org.junit.Before;
+
+import java.io.IOException;
+import java.io.PrintWriter;
+import java.io.Writer;
+import java.util.ArrayList;
+import java.util.List;
+import java.util.Locale;
+
+import static org.hamcrest.Matchers.containsString;
+import static org.hamcrest.Matchers.greaterThan;
+import static org.hamcrest.Matchers.hasSize;
+
+public abstract class CliToolTestCase extends ESTestCase {
+
+ @Before
+ @SuppressForbidden(reason = "sets es.default.path.home during tests")
+ public void setPathHome() {
+ System.setProperty("es.default.path.home", createTempDir().toString());
+ }
+
+ @After
+ @SuppressForbidden(reason = "clears es.default.path.home during tests")
+ public void clearPathHome() {
+ System.clearProperty("es.default.path.home");
+ }
+
+ public static String[] args(String command) {
+ if (!Strings.hasLength(command)) {
+ return Strings.EMPTY_ARRAY;
+ }
+ return command.split("\\s+");
+ }
+
+ /**
+ * A terminal implementation that discards everything
+ */
+ public static class MockTerminal extends Terminal {
+
+ private static final PrintWriter DEV_NULL = new PrintWriter(new DevNullWriter());
+
+ public MockTerminal() {
+ super(Verbosity.NORMAL);
+ }
+
+ public MockTerminal(Verbosity verbosity) {
+ super(verbosity);
+ }
+
+ @Override
+ protected void doPrint(String msg, Object... args) {
+ }
+
+ @Override
+ public String readText(String text, Object... args) {
+ return null;
+ }
+
+ @Override
+ public char[] readSecret(String text, Object... args) {
+ return new char[0];
+ }
+
+ @Override
+ public void print(String msg, Object... args) {
+ }
+
+ @Override
+ public void printStackTrace(Throwable t) {
+ return;
+ }
+
+ @Override
+ public PrintWriter writer() {
+ return DEV_NULL;
+ }
+
+ private static class DevNullWriter extends Writer {
+
+ @Override
+ public void write(char[] cbuf, int off, int len) throws IOException {
+ }
+
+ @Override
+ public void flush() throws IOException {
+ }
+
+ @Override
+ public void close() throws IOException {
+ }
+ }
+ }
+
+ /**
+ * A terminal implementation that captures everything written to it
+ */
+ public static class CaptureOutputTerminal extends MockTerminal {
+
+ List<String> terminalOutput = new ArrayList();
+
+ public CaptureOutputTerminal() {
+ super(Verbosity.NORMAL);
+ }
+
+ public CaptureOutputTerminal(Verbosity verbosity) {
+ super(verbosity);
+ }
+
+ @Override
+ protected void doPrint(String msg, Object... args) {
+ terminalOutput.add(String.format(Locale.ROOT, msg, args));
+ }
+
+ @Override
+ public void print(String msg, Object... args) {
+ doPrint(msg, args);
+ }
+
+ @Override
+ public void printStackTrace(Throwable t) {
+ terminalOutput.add(ExceptionsHelper.stackTrace(t));
+ }
+
+ public List<String> getTerminalOutput() {
+ return terminalOutput;
+ }
+ }
+
+ public static void assertTerminalOutputContainsHelpFile(CliToolTestCase.CaptureOutputTerminal terminal, String classPath) throws IOException {
+ List<String> nonEmptyLines = new ArrayList<>();
+ for (String line : terminal.getTerminalOutput()) {
+ String originalPrintedLine = line.replaceAll(System.lineSeparator(), "");
+ if (Strings.isNullOrEmpty(originalPrintedLine)) {
+ nonEmptyLines.add(originalPrintedLine);
+ }
+ }
+ assertThat(nonEmptyLines, hasSize(greaterThan(0)));
+
+ String expectedDocs = StreamsUtils.copyToStringFromClasspath(classPath);
+ for (String nonEmptyLine : nonEmptyLines) {
+ assertThat(expectedDocs, containsString(nonEmptyLine.replaceAll(System.lineSeparator(), "")));
+ }
+ }
+}
diff --git a/test/framework/src/main/java/org/elasticsearch/common/io/FileTestUtils.java b/test/framework/src/main/java/org/elasticsearch/common/io/FileTestUtils.java
new file mode 100644
index 0000000000..50d677c600
--- /dev/null
+++ b/test/framework/src/main/java/org/elasticsearch/common/io/FileTestUtils.java
@@ -0,0 +1,53 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.common.io;
+
+import org.junit.Assert;
+
+import java.io.IOException;
+import java.nio.file.Files;
+import java.nio.file.Path;
+
+import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertFileExists;
+import static org.hamcrest.CoreMatchers.equalTo;
+import static org.hamcrest.CoreMatchers.is;
+
+/** test helper methods for working with files */
+public class FileTestUtils {
+
+ /**
+ * Check that a file contains a given String
+ * @param dir root dir for file
+ * @param filename relative path from root dir to file
+ * @param expected expected content (if null, we don't expect any file)
+ */
+ public static void assertFileContent(Path dir, String filename, String expected) throws IOException {
+ Assert.assertThat(Files.exists(dir), is(true));
+ Path file = dir.resolve(filename);
+ if (expected == null) {
+ Assert.assertThat("file [" + file + "] should not exist.", Files.exists(file), is(false));
+ } else {
+ assertFileExists(file);
+ String fileContent = new String(Files.readAllBytes(file), java.nio.charset.StandardCharsets.UTF_8);
+ // trim the string content to prevent different handling on windows vs. unix and CR chars...
+ Assert.assertThat(fileContent.trim(), equalTo(expected.trim()));
+ }
+ }
+}
diff --git a/test/framework/src/main/java/org/elasticsearch/common/io/PathUtilsForTesting.java b/test/framework/src/main/java/org/elasticsearch/common/io/PathUtilsForTesting.java
new file mode 100644
index 0000000000..fee053eed4
--- /dev/null
+++ b/test/framework/src/main/java/org/elasticsearch/common/io/PathUtilsForTesting.java
@@ -0,0 +1,45 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.common.io;
+
+import org.apache.lucene.util.LuceneTestCase;
+
+import java.nio.file.FileSystem;
+
+/**
+ * Exposes some package private stuff in PathUtils for framework purposes only!
+ */
+public class PathUtilsForTesting {
+
+ /** Sets a new default filesystem for testing */
+ public static void setup() {
+ installMock(LuceneTestCase.getBaseTempDirForTestClass().getFileSystem());
+ }
+
+ /** Installs a mock filesystem for testing */
+ public static void installMock(FileSystem mock) {
+ PathUtils.DEFAULT = mock;
+ }
+
+ /** Resets filesystem back to the real system default */
+ public static void teardown() {
+ PathUtils.DEFAULT = PathUtils.ACTUAL_DEFAULT;
+ }
+}
diff --git a/test/framework/src/main/java/org/elasticsearch/common/util/MockBigArrays.java b/test/framework/src/main/java/org/elasticsearch/common/util/MockBigArrays.java
new file mode 100644
index 0000000000..50f1886431
--- /dev/null
+++ b/test/framework/src/main/java/org/elasticsearch/common/util/MockBigArrays.java
@@ -0,0 +1,552 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.common.util;
+
+import com.carrotsearch.randomizedtesting.RandomizedContext;
+import com.carrotsearch.randomizedtesting.SeedUtils;
+import org.apache.lucene.util.Accountable;
+import org.apache.lucene.util.Accountables;
+import org.apache.lucene.util.BytesRef;
+import org.elasticsearch.cache.recycler.PageCacheRecycler;
+import org.elasticsearch.common.inject.Inject;
+import org.elasticsearch.common.util.set.Sets;
+import org.elasticsearch.indices.breaker.CircuitBreakerService;
+import org.elasticsearch.test.ESTestCase;
+
+import java.util.Collection;
+import java.util.Collections;
+import java.util.HashMap;
+import java.util.Map;
+import java.util.Random;
+import java.util.Set;
+import java.util.WeakHashMap;
+import java.util.concurrent.ConcurrentHashMap;
+import java.util.concurrent.ConcurrentMap;
+import java.util.concurrent.atomic.AtomicBoolean;
+
+public class MockBigArrays extends BigArrays {
+
+ /**
+ * Tracking allocations is useful when debugging a leak but shouldn't be enabled by default as this would also be very costly
+ * since it creates a new Exception every time a new array is created.
+ */
+ private static final boolean TRACK_ALLOCATIONS = false;
+
+ private static final Set<BigArrays> INSTANCES = Collections.synchronizedSet(Collections.newSetFromMap(new WeakHashMap<BigArrays, Boolean>()));
+ private static final ConcurrentMap<Object, Object> ACQUIRED_ARRAYS = new ConcurrentHashMap<>();
+
+ public static void ensureAllArraysAreReleased() throws Exception {
+ final Map<Object, Object> masterCopy = new HashMap<>(ACQUIRED_ARRAYS);
+ if (!masterCopy.isEmpty()) {
+ // not empty, we might be executing on a shared cluster that keeps on obtaining
+ // and releasing arrays, lets make sure that after a reasonable timeout, all master
+ // copy (snapshot) have been released
+ boolean success = ESTestCase.awaitBusy(() -> Sets.haveEmptyIntersection(masterCopy.keySet(), ACQUIRED_ARRAYS.keySet()));
+ if (!success) {
+ masterCopy.keySet().retainAll(ACQUIRED_ARRAYS.keySet());
+ ACQUIRED_ARRAYS.keySet().removeAll(masterCopy.keySet()); // remove all existing master copy we will report on
+ if (!masterCopy.isEmpty()) {
+ final Object cause = masterCopy.entrySet().iterator().next().getValue();
+ throw new RuntimeException(masterCopy.size() + " arrays have not been released", cause instanceof Throwable ? (Throwable) cause : null);
+ }
+ }
+ }
+ }
+
+ private final Random random;
+ private final PageCacheRecycler recycler;
+ private final CircuitBreakerService breakerService;
+
+ @Inject
+ public MockBigArrays(PageCacheRecycler recycler, CircuitBreakerService breakerService) {
+ this(recycler, breakerService, false);
+ }
+
+ public MockBigArrays(PageCacheRecycler recycler, CircuitBreakerService breakerService, boolean checkBreaker) {
+ super(recycler, breakerService, checkBreaker);
+ this.recycler = recycler;
+ this.breakerService = breakerService;
+ long seed;
+ try {
+ seed = SeedUtils.parseSeed(RandomizedContext.current().getRunnerSeedAsString());
+ } catch (IllegalStateException e) { // rest tests don't run randomized and have no context
+ seed = 0;
+ }
+ random = new Random(seed);
+ INSTANCES.add(this);
+ }
+
+
+ @Override
+ public BigArrays withCircuitBreaking() {
+ return new MockBigArrays(this.recycler, this.breakerService, true);
+ }
+
+ @Override
+ public ByteArray newByteArray(long size, boolean clearOnResize) {
+ final ByteArrayWrapper array = new ByteArrayWrapper(super.newByteArray(size, clearOnResize), clearOnResize);
+ if (!clearOnResize) {
+ array.randomizeContent(0, size);
+ }
+ return array;
+ }
+
+ @Override
+ public ByteArray resize(ByteArray array, long size) {
+ ByteArrayWrapper arr = (ByteArrayWrapper) array;
+ final long originalSize = arr.size();
+ array = super.resize(arr.in, size);
+ ACQUIRED_ARRAYS.remove(arr);
+ if (array instanceof ByteArrayWrapper) {
+ arr = (ByteArrayWrapper) array;
+ } else {
+ arr = new ByteArrayWrapper(array, arr.clearOnResize);
+ }
+ if (!arr.clearOnResize) {
+ arr.randomizeContent(originalSize, size);
+ }
+ return arr;
+ }
+
+ @Override
+ public IntArray newIntArray(long size, boolean clearOnResize) {
+ final IntArrayWrapper array = new IntArrayWrapper(super.newIntArray(size, clearOnResize), clearOnResize);
+ if (!clearOnResize) {
+ array.randomizeContent(0, size);
+ }
+ return array;
+ }
+
+ @Override
+ public IntArray resize(IntArray array, long size) {
+ IntArrayWrapper arr = (IntArrayWrapper) array;
+ final long originalSize = arr.size();
+ array = super.resize(arr.in, size);
+ ACQUIRED_ARRAYS.remove(arr);
+ if (array instanceof IntArrayWrapper) {
+ arr = (IntArrayWrapper) array;
+ } else {
+ arr = new IntArrayWrapper(array, arr.clearOnResize);
+ }
+ if (!arr.clearOnResize) {
+ arr.randomizeContent(originalSize, size);
+ }
+ return arr;
+ }
+
+ @Override
+ public LongArray newLongArray(long size, boolean clearOnResize) {
+ final LongArrayWrapper array = new LongArrayWrapper(super.newLongArray(size, clearOnResize), clearOnResize);
+ if (!clearOnResize) {
+ array.randomizeContent(0, size);
+ }
+ return array;
+ }
+
+ @Override
+ public LongArray resize(LongArray array, long size) {
+ LongArrayWrapper arr = (LongArrayWrapper) array;
+ final long originalSize = arr.size();
+ array = super.resize(arr.in, size);
+ ACQUIRED_ARRAYS.remove(arr);
+ if (array instanceof LongArrayWrapper) {
+ arr = (LongArrayWrapper) array;
+ } else {
+ arr = new LongArrayWrapper(array, arr.clearOnResize);
+ }
+ if (!arr.clearOnResize) {
+ arr.randomizeContent(originalSize, size);
+ }
+ return arr;
+ }
+
+ @Override
+ public FloatArray newFloatArray(long size, boolean clearOnResize) {
+ final FloatArrayWrapper array = new FloatArrayWrapper(super.newFloatArray(size, clearOnResize), clearOnResize);
+ if (!clearOnResize) {
+ array.randomizeContent(0, size);
+ }
+ return array;
+ }
+
+ @Override
+ public FloatArray resize(FloatArray array, long size) {
+ FloatArrayWrapper arr = (FloatArrayWrapper) array;
+ final long originalSize = arr.size();
+ array = super.resize(arr.in, size);
+ ACQUIRED_ARRAYS.remove(arr);
+ if (array instanceof FloatArrayWrapper) {
+ arr = (FloatArrayWrapper) array;
+ } else {
+ arr = new FloatArrayWrapper(array, arr.clearOnResize);
+ }
+ if (!arr.clearOnResize) {
+ arr.randomizeContent(originalSize, size);
+ }
+ return arr;
+ }
+
+ @Override
+ public DoubleArray newDoubleArray(long size, boolean clearOnResize) {
+ final DoubleArrayWrapper array = new DoubleArrayWrapper(super.newDoubleArray(size, clearOnResize), clearOnResize);
+ if (!clearOnResize) {
+ array.randomizeContent(0, size);
+ }
+ return array;
+ }
+
+ @Override
+ public DoubleArray resize(DoubleArray array, long size) {
+ DoubleArrayWrapper arr = (DoubleArrayWrapper) array;
+ final long originalSize = arr.size();
+ array = super.resize(arr.in, size);
+ ACQUIRED_ARRAYS.remove(arr);
+ if (array instanceof DoubleArrayWrapper) {
+ arr = (DoubleArrayWrapper) array;
+ } else {
+ arr = new DoubleArrayWrapper(array, arr.clearOnResize);
+ }
+ if (!arr.clearOnResize) {
+ arr.randomizeContent(originalSize, size);
+ }
+ return arr;
+ }
+
+ @Override
+ public <T> ObjectArray<T> newObjectArray(long size) {
+ return new ObjectArrayWrapper<>(super.<T>newObjectArray(size));
+ }
+
+ @Override
+ public <T> ObjectArray<T> resize(ObjectArray<T> array, long size) {
+ ObjectArrayWrapper<T> arr = (ObjectArrayWrapper<T>) array;
+ array = super.resize(arr.in, size);
+ ACQUIRED_ARRAYS.remove(arr);
+ if (array instanceof ObjectArrayWrapper) {
+ arr = (ObjectArrayWrapper<T>) array;
+ } else {
+ arr = new ObjectArrayWrapper<>(array);
+ }
+ return arr;
+ }
+
+ private static abstract class AbstractArrayWrapper {
+
+ final BigArray in;
+ boolean clearOnResize;
+ AtomicBoolean released;
+
+ AbstractArrayWrapper(BigArray in, boolean clearOnResize) {
+ ACQUIRED_ARRAYS.put(this, TRACK_ALLOCATIONS ? new RuntimeException() : Boolean.TRUE);
+ this.in = in;
+ this.clearOnResize = clearOnResize;
+ released = new AtomicBoolean(false);
+ }
+
+ protected abstract BigArray getDelegate();
+
+ protected abstract void randomizeContent(long from, long to);
+
+ public long size() {
+ return getDelegate().size();
+ }
+
+ public long ramBytesUsed() {
+ return in.ramBytesUsed();
+ }
+
+ public void close() {
+ if (!released.compareAndSet(false, true)) {
+ throw new IllegalStateException("Double release");
+ }
+ ACQUIRED_ARRAYS.remove(this);
+ randomizeContent(0, size());
+ getDelegate().close();
+ }
+
+ }
+
+ private class ByteArrayWrapper extends AbstractArrayWrapper implements ByteArray {
+
+ private final ByteArray in;
+
+ ByteArrayWrapper(ByteArray in, boolean clearOnResize) {
+ super(in, clearOnResize);
+ this.in = in;
+ }
+
+ @Override
+ protected BigArray getDelegate() {
+ return in;
+ }
+
+ @Override
+ protected void randomizeContent(long from, long to) {
+ fill(from, to, (byte) random.nextInt(1 << 8));
+ }
+
+ @Override
+ public byte get(long index) {
+ return in.get(index);
+ }
+
+ @Override
+ public byte set(long index, byte value) {
+ return in.set(index, value);
+ }
+
+ @Override
+ public boolean get(long index, int len, BytesRef ref) {
+ return in.get(index, len, ref);
+ }
+
+ @Override
+ public void set(long index, byte[] buf, int offset, int len) {
+ in.set(index, buf, offset, len);
+ }
+
+ @Override
+ public void fill(long fromIndex, long toIndex, byte value) {
+ in.fill(fromIndex, toIndex, value);
+ }
+
+ @Override
+ public Collection<Accountable> getChildResources() {
+ return Collections.singleton(Accountables.namedAccountable("delegate", in));
+ }
+ }
+
+ private class IntArrayWrapper extends AbstractArrayWrapper implements IntArray {
+
+ private final IntArray in;
+
+ IntArrayWrapper(IntArray in, boolean clearOnResize) {
+ super(in, clearOnResize);
+ this.in = in;
+ }
+
+ @Override
+ protected BigArray getDelegate() {
+ return in;
+ }
+
+ @Override
+ protected void randomizeContent(long from, long to) {
+ fill(from, to, random.nextInt());
+ }
+
+ @Override
+ public int get(long index) {
+ return in.get(index);
+ }
+
+ @Override
+ public int set(long index, int value) {
+ return in.set(index, value);
+ }
+
+ @Override
+ public int increment(long index, int inc) {
+ return in.increment(index, inc);
+ }
+
+ @Override
+ public void fill(long fromIndex, long toIndex, int value) {
+ in.fill(fromIndex, toIndex, value);
+ }
+
+ @Override
+ public Collection<Accountable> getChildResources() {
+ return Collections.singleton(Accountables.namedAccountable("delegate", in));
+ }
+ }
+
+ private class LongArrayWrapper extends AbstractArrayWrapper implements LongArray {
+
+ private final LongArray in;
+
+ LongArrayWrapper(LongArray in, boolean clearOnResize) {
+ super(in, clearOnResize);
+ this.in = in;
+ }
+
+ @Override
+ protected BigArray getDelegate() {
+ return in;
+ }
+
+ @Override
+ protected void randomizeContent(long from, long to) {
+ fill(from, to, random.nextLong());
+ }
+
+ @Override
+ public long get(long index) {
+ return in.get(index);
+ }
+
+ @Override
+ public long set(long index, long value) {
+ return in.set(index, value);
+ }
+
+ @Override
+ public long increment(long index, long inc) {
+ return in.increment(index, inc);
+ }
+
+ @Override
+ public void fill(long fromIndex, long toIndex, long value) {
+ in.fill(fromIndex, toIndex, value);
+ }
+
+ @Override
+ public Collection<Accountable> getChildResources() {
+ return Collections.singleton(Accountables.namedAccountable("delegate", in));
+ }
+
+ }
+
+ private class FloatArrayWrapper extends AbstractArrayWrapper implements FloatArray {
+
+ private final FloatArray in;
+
+ FloatArrayWrapper(FloatArray in, boolean clearOnResize) {
+ super(in, clearOnResize);
+ this.in = in;
+ }
+
+ @Override
+ protected BigArray getDelegate() {
+ return in;
+ }
+
+ @Override
+ protected void randomizeContent(long from, long to) {
+ fill(from, to, (random.nextFloat() - 0.5f) * 1000);
+ }
+
+ @Override
+ public float get(long index) {
+ return in.get(index);
+ }
+
+ @Override
+ public float set(long index, float value) {
+ return in.set(index, value);
+ }
+
+ @Override
+ public float increment(long index, float inc) {
+ return in.increment(index, inc);
+ }
+
+ @Override
+ public void fill(long fromIndex, long toIndex, float value) {
+ in.fill(fromIndex, toIndex, value);
+ }
+
+ @Override
+ public Collection<Accountable> getChildResources() {
+ return Collections.singleton(Accountables.namedAccountable("delegate", in));
+ }
+ }
+
+ private class DoubleArrayWrapper extends AbstractArrayWrapper implements DoubleArray {
+
+ private final DoubleArray in;
+
+ DoubleArrayWrapper(DoubleArray in, boolean clearOnResize) {
+ super(in, clearOnResize);
+ this.in = in;
+ }
+
+ @Override
+ protected BigArray getDelegate() {
+ return in;
+ }
+
+ @Override
+ protected void randomizeContent(long from, long to) {
+ fill(from, to, (random.nextDouble() - 0.5) * 1000);
+ }
+
+ @Override
+ public double get(long index) {
+ return in.get(index);
+ }
+
+ @Override
+ public double set(long index, double value) {
+ return in.set(index, value);
+ }
+
+ @Override
+ public double increment(long index, double inc) {
+ return in.increment(index, inc);
+ }
+
+ @Override
+ public void fill(long fromIndex, long toIndex, double value) {
+ in.fill(fromIndex, toIndex, value);
+ }
+
+ @Override
+ public Collection<Accountable> getChildResources() {
+ return Collections.singleton(Accountables.namedAccountable("delegate", in));
+ }
+ }
+
+ private class ObjectArrayWrapper<T> extends AbstractArrayWrapper implements ObjectArray<T> {
+
+ private final ObjectArray<T> in;
+
+ ObjectArrayWrapper(ObjectArray<T> in) {
+ super(in, false);
+ this.in = in;
+ }
+
+ @Override
+ protected BigArray getDelegate() {
+ return in;
+ }
+
+ @Override
+ public T get(long index) {
+ return in.get(index);
+ }
+
+ @Override
+ public T set(long index, T value) {
+ return in.set(index, value);
+ }
+
+ @Override
+ protected void randomizeContent(long from, long to) {
+ // will be cleared anyway
+ }
+
+ @Override
+ public Collection<Accountable> getChildResources() {
+ return Collections.singleton(Accountables.namedAccountable("delegate", in));
+ }
+ }
+
+}
diff --git a/test/framework/src/main/java/org/elasticsearch/index/MapperTestUtils.java b/test/framework/src/main/java/org/elasticsearch/index/MapperTestUtils.java
new file mode 100644
index 0000000000..8b529f9fc8
--- /dev/null
+++ b/test/framework/src/main/java/org/elasticsearch/index/MapperTestUtils.java
@@ -0,0 +1,63 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.index;
+
+import org.elasticsearch.Version;
+import org.elasticsearch.cluster.metadata.IndexMetaData;
+import org.elasticsearch.common.settings.Settings;
+import org.elasticsearch.env.Environment;
+import org.elasticsearch.index.analysis.AnalysisRegistry;
+import org.elasticsearch.index.analysis.AnalysisService;
+import org.elasticsearch.index.mapper.MapperService;
+import org.elasticsearch.index.similarity.SimilarityService;
+import org.elasticsearch.indices.IndicesModule;
+import org.elasticsearch.indices.mapper.MapperRegistry;
+import org.elasticsearch.test.IndexSettingsModule;
+
+import java.io.IOException;
+import java.nio.file.Path;
+import java.util.Collections;
+
+
+public class MapperTestUtils {
+
+ public static MapperService newMapperService(Path tempDir, Settings indexSettings) throws IOException {
+ IndicesModule indicesModule = new IndicesModule();
+ return newMapperService(tempDir, indexSettings, indicesModule);
+ }
+
+ public static MapperService newMapperService(Path tempDir, Settings settings, IndicesModule indicesModule) throws IOException {
+ Settings.Builder settingsBuilder = Settings.builder()
+ .put("path.home", tempDir)
+ .put(settings);
+ if (settings.get(IndexMetaData.SETTING_VERSION_CREATED) == null) {
+ settingsBuilder.put(IndexMetaData.SETTING_VERSION_CREATED, Version.CURRENT);
+ }
+ Settings finalSettings = settingsBuilder.build();
+ MapperRegistry mapperRegistry = indicesModule.getMapperRegistry();
+ IndexSettings indexSettings = IndexSettingsModule.newIndexSettings(new Index("test"), finalSettings);
+ AnalysisService analysisService = new AnalysisRegistry(null, new Environment(finalSettings)).build(indexSettings);
+ SimilarityService similarityService = new SimilarityService(indexSettings, Collections.emptyMap());
+ return new MapperService(indexSettings,
+ analysisService,
+ similarityService,
+ mapperRegistry);
+ }
+}
diff --git a/test/framework/src/main/java/org/elasticsearch/index/MockEngineFactoryPlugin.java b/test/framework/src/main/java/org/elasticsearch/index/MockEngineFactoryPlugin.java
new file mode 100644
index 0000000000..db63d137f3
--- /dev/null
+++ b/test/framework/src/main/java/org/elasticsearch/index/MockEngineFactoryPlugin.java
@@ -0,0 +1,64 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.elasticsearch.index;
+
+import org.apache.lucene.index.AssertingDirectoryReader;
+import org.apache.lucene.index.FilterDirectoryReader;
+import org.elasticsearch.common.inject.AbstractModule;
+import org.elasticsearch.common.inject.Module;
+import org.elasticsearch.plugins.Plugin;
+import org.elasticsearch.test.engine.MockEngineFactory;
+
+import java.util.Collection;
+import java.util.Collections;
+
+// this must exist in the same package as IndexModule to allow access to setting the impl
+public class MockEngineFactoryPlugin extends Plugin {
+ @Override
+ public String name() {
+ return "mock-engine-factory";
+ }
+ @Override
+ public String description() {
+ return "a mock engine factory for testing";
+ }
+
+ private Class<? extends FilterDirectoryReader> readerWrapper = AssertingDirectoryReader.class;
+
+ @Override
+ public void onIndexModule(IndexModule module) {
+ module.engineFactory.set(new MockEngineFactory(readerWrapper));
+ }
+
+ @Override
+ public Collection<Module> nodeModules() {
+ return Collections.singleton(new MockEngineReaderModule());
+ }
+
+ public class MockEngineReaderModule extends AbstractModule {
+
+ public void setReaderClass(Class<? extends FilterDirectoryReader> readerWrapper) {
+ MockEngineFactoryPlugin.this.readerWrapper = readerWrapper;
+ }
+
+ @Override
+ protected void configure() {
+ }
+ }
+}
diff --git a/test/framework/src/main/java/org/elasticsearch/node/MockNode.java b/test/framework/src/main/java/org/elasticsearch/node/MockNode.java
new file mode 100644
index 0000000000..57dcc08f4f
--- /dev/null
+++ b/test/framework/src/main/java/org/elasticsearch/node/MockNode.java
@@ -0,0 +1,55 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.node;
+
+import org.elasticsearch.Version;
+import org.elasticsearch.common.settings.Settings;
+import org.elasticsearch.node.internal.InternalSettingsPreparer;
+import org.elasticsearch.plugins.Plugin;
+
+import java.util.Collection;
+
+/**
+ * A node for testing which allows:
+ * <ul>
+ * <li>Overriding Version.CURRENT</li>
+ * <li>Adding test plugins that exist on the classpath</li>
+ * </ul>
+ */
+public class MockNode extends Node {
+
+ // these are kept here so a copy of this MockNode can be created, since Node does not store them
+ private Version version;
+ private Collection<Class<? extends Plugin>> plugins;
+
+ public MockNode(Settings settings, Version version, Collection<Class<? extends Plugin>> classpathPlugins) {
+ super(InternalSettingsPreparer.prepareEnvironment(settings, null), version, classpathPlugins);
+ this.version = version;
+ this.plugins = classpathPlugins;
+ }
+
+ public Collection<Class<? extends Plugin>> getPlugins() {
+ return plugins;
+ }
+
+ public Version getVersion() {
+ return version;
+ }
+}
diff --git a/test/framework/src/main/java/org/elasticsearch/node/NodeMocksPlugin.java b/test/framework/src/main/java/org/elasticsearch/node/NodeMocksPlugin.java
new file mode 100644
index 0000000000..f958b2b752
--- /dev/null
+++ b/test/framework/src/main/java/org/elasticsearch/node/NodeMocksPlugin.java
@@ -0,0 +1,41 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.elasticsearch.node;
+
+import org.elasticsearch.cache.recycler.MockPageCacheRecycler;
+import org.elasticsearch.common.util.MockBigArrays;
+import org.elasticsearch.plugins.Plugin;
+
+public class NodeMocksPlugin extends Plugin {
+
+ @Override
+ public String name() {
+ return "node-mocks";
+ }
+
+ @Override
+ public String description() {
+ return "a plugin to setup mocks for node level classes";
+ }
+
+ public void onModule(NodeModule module) {
+ module.pageCacheRecyclerImpl = MockPageCacheRecycler.class;
+ module.bigArraysImpl = MockBigArrays.class;
+ }
+}
diff --git a/test/framework/src/main/java/org/elasticsearch/percolator/PercolatorTestUtil.java b/test/framework/src/main/java/org/elasticsearch/percolator/PercolatorTestUtil.java
new file mode 100644
index 0000000000..671d7b8fd3
--- /dev/null
+++ b/test/framework/src/main/java/org/elasticsearch/percolator/PercolatorTestUtil.java
@@ -0,0 +1,41 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.percolator;
+
+import org.elasticsearch.action.percolate.PercolateResponse;
+import org.elasticsearch.common.Strings;
+import org.junit.Assert;
+
+/** Static method pulled out of PercolatorIT, used by other tests */
+public class PercolatorTestUtil extends Assert {
+
+ public static String[] convertFromTextArray(PercolateResponse.Match[] matches, String index) {
+ if (matches.length == 0) {
+ return Strings.EMPTY_ARRAY;
+ }
+ String[] strings = new String[matches.length];
+ for (int i = 0; i < matches.length; i++) {
+ assertEquals(index, matches[i].getIndex().string());
+ strings[i] = matches[i].getId().string();
+ }
+ return strings;
+ }
+
+}
diff --git a/test/framework/src/main/java/org/elasticsearch/plugins/PluginTestUtil.java b/test/framework/src/main/java/org/elasticsearch/plugins/PluginTestUtil.java
new file mode 100644
index 0000000000..10f4de2482
--- /dev/null
+++ b/test/framework/src/main/java/org/elasticsearch/plugins/PluginTestUtil.java
@@ -0,0 +1,44 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.plugins;
+
+import java.io.IOException;
+import java.io.OutputStream;
+import java.nio.file.Files;
+import java.nio.file.Path;
+import java.util.Properties;
+
+/** Utility methods for testing plugins */
+public class PluginTestUtil {
+
+ /** convenience method to write a plugin properties file */
+ public static void writeProperties(Path pluginDir, String... stringProps) throws IOException {
+ assert stringProps.length % 2 == 0;
+ Files.createDirectories(pluginDir);
+ Path propertiesFile = pluginDir.resolve(PluginInfo.ES_PLUGIN_PROPERTIES);
+ Properties properties = new Properties();
+ for (int i = 0; i < stringProps.length; i += 2) {
+ properties.put(stringProps[i], stringProps[i + 1]);
+ }
+ try (OutputStream out = Files.newOutputStream(propertiesFile)) {
+ properties.store(out, "");
+ }
+ }
+}
diff --git a/test/framework/src/main/java/org/elasticsearch/script/MockScriptEngine.java b/test/framework/src/main/java/org/elasticsearch/script/MockScriptEngine.java
new file mode 100644
index 0000000000..aec90aa50d
--- /dev/null
+++ b/test/framework/src/main/java/org/elasticsearch/script/MockScriptEngine.java
@@ -0,0 +1,120 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.script;
+
+import org.apache.lucene.index.LeafReaderContext;
+import org.elasticsearch.common.Nullable;
+import org.elasticsearch.common.bytes.BytesArray;
+import org.elasticsearch.common.bytes.BytesReference;
+import org.elasticsearch.plugins.Plugin;
+import org.elasticsearch.search.lookup.SearchLookup;
+
+import java.io.IOException;
+import java.util.Map;
+
+/**
+ * A dummy script engine used for testing. Scripts must be a number. Running the script
+ */
+public class MockScriptEngine implements ScriptEngineService {
+ public static final String NAME = "mockscript";
+
+ public static class TestPlugin extends Plugin {
+
+ public TestPlugin() {
+ }
+
+ @Override
+ public String name() {
+ return NAME;
+ }
+
+ @Override
+ public String description() {
+ return "Mock script engine for integration tests";
+ }
+
+ public void onModule(ScriptModule module) {
+ module.addScriptEngine(MockScriptEngine.class);
+ }
+
+ }
+
+ @Override
+ public String[] types() {
+ return new String[]{ NAME };
+ }
+
+ @Override
+ public String[] extensions() {
+ return types();
+ }
+
+ @Override
+ public boolean sandboxed() {
+ return true;
+ }
+
+ @Override
+ public Object compile(String script, Map<String, String> params) {
+ return script;
+ }
+
+ @Override
+ public ExecutableScript executable(CompiledScript compiledScript, @Nullable Map<String, Object> vars) {
+ return new AbstractExecutableScript() {
+ @Override
+ public Object run() {
+ return new BytesArray((String)compiledScript.compiled());
+ }
+ };
+ }
+
+ @Override
+ public SearchScript search(CompiledScript compiledScript, SearchLookup lookup, @Nullable Map<String, Object> vars) {
+ return new SearchScript() {
+ @Override
+ public LeafSearchScript getLeafSearchScript(LeafReaderContext context) throws IOException {
+ AbstractSearchScript leafSearchScript = new AbstractSearchScript() {
+
+ @Override
+ public Object run() {
+ return compiledScript.compiled();
+ }
+
+ };
+ leafSearchScript.setLookup(lookup.getLeafSearchLookup(context));
+ return leafSearchScript;
+ }
+
+ @Override
+ public boolean needsScores() {
+ return false;
+ }
+ };
+ }
+
+ @Override
+ public void scriptRemoved(@Nullable CompiledScript script) {
+ }
+
+ @Override
+ public void close() throws IOException {
+ }
+}
diff --git a/test/framework/src/main/java/org/elasticsearch/search/MockSearchService.java b/test/framework/src/main/java/org/elasticsearch/search/MockSearchService.java
new file mode 100644
index 0000000000..9a7a3efa3d
--- /dev/null
+++ b/test/framework/src/main/java/org/elasticsearch/search/MockSearchService.java
@@ -0,0 +1,91 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.search;
+
+import org.elasticsearch.cache.recycler.PageCacheRecycler;
+import org.elasticsearch.cluster.ClusterService;
+import org.elasticsearch.common.inject.Inject;
+import org.elasticsearch.common.settings.Settings;
+import org.elasticsearch.common.util.BigArrays;
+import org.elasticsearch.indices.IndicesService;
+import org.elasticsearch.indices.IndicesWarmer;
+import org.elasticsearch.indices.cache.request.IndicesRequestCache;
+import org.elasticsearch.node.settings.NodeSettingsService;
+import org.elasticsearch.plugins.Plugin;
+import org.elasticsearch.script.ScriptService;
+import org.elasticsearch.search.dfs.DfsPhase;
+import org.elasticsearch.search.fetch.FetchPhase;
+import org.elasticsearch.search.internal.SearchContext;
+import org.elasticsearch.search.query.QueryPhase;
+import org.elasticsearch.threadpool.ThreadPool;
+
+import java.util.HashMap;
+import java.util.Map;
+import java.util.concurrent.ConcurrentHashMap;
+
+public class MockSearchService extends SearchService {
+
+ public static class TestPlugin extends Plugin {
+ @Override
+ public String name() {
+ return "mock-search-service";
+ }
+ @Override
+ public String description() {
+ return "a mock search service for testing";
+ }
+ public void onModule(SearchModule module) {
+ module.searchServiceImpl = MockSearchService.class;
+ }
+ }
+
+ private static final Map<SearchContext, Throwable> ACTIVE_SEARCH_CONTEXTS = new ConcurrentHashMap<>();
+
+ /** Throw an {@link AssertionError} if there are still in-flight contexts. */
+ public static void assertNoInFLightContext() {
+ final Map<SearchContext, Throwable> copy = new HashMap<>(ACTIVE_SEARCH_CONTEXTS);
+ if (copy.isEmpty() == false) {
+ throw new AssertionError("There are still " + copy.size() + " in-flight contexts", copy.values().iterator().next());
+ }
+ }
+
+ @Inject
+ public MockSearchService(Settings settings, NodeSettingsService nodeSettingsService, ClusterService clusterService, IndicesService indicesService, IndicesWarmer indicesWarmer,
+ ThreadPool threadPool, ScriptService scriptService, PageCacheRecycler pageCacheRecycler, BigArrays bigArrays,
+ DfsPhase dfsPhase, QueryPhase queryPhase, FetchPhase fetchPhase, IndicesRequestCache indicesQueryCache) {
+ super(settings, nodeSettingsService, clusterService, indicesService, indicesWarmer, threadPool, scriptService, pageCacheRecycler, bigArrays, dfsPhase,
+ queryPhase, fetchPhase, indicesQueryCache);
+ }
+
+ @Override
+ protected void putContext(SearchContext context) {
+ super.putContext(context);
+ ACTIVE_SEARCH_CONTEXTS.put(context, new RuntimeException());
+ }
+
+ @Override
+ protected SearchContext removeContext(long id) {
+ final SearchContext removed = super.removeContext(id);
+ if (removed != null) {
+ ACTIVE_SEARCH_CONTEXTS.remove(removed);
+ }
+ return removed;
+ }
+}
diff --git a/test/framework/src/main/java/org/elasticsearch/search/aggregations/bucket/AbstractTermsTestCase.java b/test/framework/src/main/java/org/elasticsearch/search/aggregations/bucket/AbstractTermsTestCase.java
new file mode 100644
index 0000000000..4d5ff10f9c
--- /dev/null
+++ b/test/framework/src/main/java/org/elasticsearch/search/aggregations/bucket/AbstractTermsTestCase.java
@@ -0,0 +1,80 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.search.aggregations.bucket;
+
+import org.elasticsearch.action.search.SearchResponse;
+import org.elasticsearch.search.aggregations.Aggregator.SubAggCollectionMode;
+import org.elasticsearch.search.aggregations.bucket.terms.Terms;
+import org.elasticsearch.search.aggregations.bucket.terms.TermsAggregatorFactory.ExecutionMode;
+import org.elasticsearch.test.ESIntegTestCase;
+
+import static org.elasticsearch.search.aggregations.AggregationBuilders.terms;
+import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertSearchResponse;
+
+public abstract class AbstractTermsTestCase extends ESIntegTestCase {
+
+ public String randomExecutionHint() {
+ return randomBoolean() ? null : randomFrom(ExecutionMode.values()).toString();
+ }
+
+ private static long sumOfDocCounts(Terms terms) {
+ long sumOfDocCounts = terms.getSumOfOtherDocCounts();
+ for (Terms.Bucket b : terms.getBuckets()) {
+ sumOfDocCounts += b.getDocCount();
+ }
+ return sumOfDocCounts;
+ }
+
+ public void testOtherDocCount(String... fieldNames) {
+ for (String fieldName : fieldNames) {
+ SearchResponse allTerms = client().prepareSearch("idx")
+ .addAggregation(terms("terms")
+ .executionHint(randomExecutionHint())
+ .field(fieldName)
+ .size(0)
+ .collectMode(randomFrom(SubAggCollectionMode.values())))
+ .get();
+ assertSearchResponse(allTerms);
+
+ Terms terms = allTerms.getAggregations().get("terms");
+ assertEquals(0, terms.getSumOfOtherDocCounts()); // size is 0
+ final long sumOfDocCounts = sumOfDocCounts(terms);
+ final int totalNumTerms = terms.getBuckets().size();
+
+ for (int size = 1; size < totalNumTerms + 2; size += randomIntBetween(1, 5)) {
+ for (int shardSize = size; shardSize <= totalNumTerms + 2; shardSize += randomIntBetween(1, 5)) {
+ SearchResponse resp = client().prepareSearch("idx")
+ .addAggregation(terms("terms")
+ .executionHint(randomExecutionHint())
+ .field(fieldName)
+ .size(size)
+ .shardSize(shardSize)
+ .collectMode(randomFrom(SubAggCollectionMode.values())))
+ .get();
+ assertSearchResponse(resp);
+ terms = resp.getAggregations().get("terms");
+ assertEquals(Math.min(size, totalNumTerms), terms.getBuckets().size());
+ assertEquals(sumOfDocCounts, sumOfDocCounts(terms));
+ }
+ }
+ }
+ }
+
+}
diff --git a/test/framework/src/main/java/org/elasticsearch/search/aggregations/bucket/script/NativeSignificanceScoreScriptNoParams.java b/test/framework/src/main/java/org/elasticsearch/search/aggregations/bucket/script/NativeSignificanceScoreScriptNoParams.java
new file mode 100644
index 0000000000..313734cc9c
--- /dev/null
+++ b/test/framework/src/main/java/org/elasticsearch/search/aggregations/bucket/script/NativeSignificanceScoreScriptNoParams.java
@@ -0,0 +1,52 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.search.aggregations.bucket.script;
+
+import org.elasticsearch.common.Nullable;
+import org.elasticsearch.script.ExecutableScript;
+import org.elasticsearch.script.NativeScriptFactory;
+
+import java.util.Map;
+
+public class NativeSignificanceScoreScriptNoParams extends TestScript {
+
+ public static final String NATIVE_SIGNIFICANCE_SCORE_SCRIPT_NO_PARAMS = "native_significance_score_script_no_params";
+
+ public static class Factory implements NativeScriptFactory {
+
+ @Override
+ public ExecutableScript newScript(@Nullable Map<String, Object> params) {
+ return new NativeSignificanceScoreScriptNoParams();
+ }
+
+ @Override
+ public boolean needsScores() {
+ return false;
+ }
+ }
+
+ private NativeSignificanceScoreScriptNoParams() {
+ }
+
+ @Override
+ public Object run() {
+ return _subset_freq.longValue() + _subset_size.longValue() + _superset_freq.longValue() + _superset_size.longValue();
+ }
+}
diff --git a/test/framework/src/main/java/org/elasticsearch/search/aggregations/bucket/script/NativeSignificanceScoreScriptWithParams.java b/test/framework/src/main/java/org/elasticsearch/search/aggregations/bucket/script/NativeSignificanceScoreScriptWithParams.java
new file mode 100644
index 0000000000..6b248fe56f
--- /dev/null
+++ b/test/framework/src/main/java/org/elasticsearch/search/aggregations/bucket/script/NativeSignificanceScoreScriptWithParams.java
@@ -0,0 +1,55 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.search.aggregations.bucket.script;
+
+import org.elasticsearch.common.Nullable;
+import org.elasticsearch.script.ExecutableScript;
+import org.elasticsearch.script.NativeScriptFactory;
+
+import java.util.Map;
+
+public class NativeSignificanceScoreScriptWithParams extends TestScript {
+
+ public static final String NATIVE_SIGNIFICANCE_SCORE_SCRIPT_WITH_PARAMS = "native_significance_score_script_with_params";
+ double factor = 0.0;
+
+ public static class Factory implements NativeScriptFactory {
+
+ @Override
+ public ExecutableScript newScript(@Nullable Map<String, Object> params) {
+ return new NativeSignificanceScoreScriptWithParams(params);
+ }
+
+ @Override
+ public boolean needsScores() {
+ return false;
+ }
+ }
+
+ private NativeSignificanceScoreScriptWithParams(Map<String, Object> params) {
+ factor = ((Number) params.get("param")).doubleValue();
+ }
+
+ @Override
+ public Object run() {
+ return factor * (_subset_freq.longValue() + _subset_size.longValue() + _superset_freq.longValue() + _superset_size.longValue()) / factor;
+ }
+
+}
diff --git a/test/framework/src/main/java/org/elasticsearch/search/aggregations/bucket/script/TestScript.java b/test/framework/src/main/java/org/elasticsearch/search/aggregations/bucket/script/TestScript.java
new file mode 100644
index 0000000000..3060d7af81
--- /dev/null
+++ b/test/framework/src/main/java/org/elasticsearch/search/aggregations/bucket/script/TestScript.java
@@ -0,0 +1,55 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.search.aggregations.bucket.script;
+
+import org.elasticsearch.script.ExecutableScript;
+import org.elasticsearch.search.aggregations.bucket.significant.heuristics.ScriptHeuristic;
+
+public abstract class TestScript implements ExecutableScript{
+
+ ScriptHeuristic.LongAccessor _subset_freq;
+ ScriptHeuristic.LongAccessor _subset_size;
+ ScriptHeuristic.LongAccessor _superset_freq;
+ ScriptHeuristic.LongAccessor _superset_size;
+
+ protected TestScript() {
+ }
+
+ @Override
+ public void setNextVar(String name, Object value) {
+ if (name.equals("_subset_freq")) {
+ _subset_freq = (ScriptHeuristic.LongAccessor)value;
+ }
+ if (name.equals("_subset_size")) {
+ _subset_size = (ScriptHeuristic.LongAccessor)value;
+ }
+ if (name.equals("_superset_freq")) {
+ _superset_freq = (ScriptHeuristic.LongAccessor)value;
+ }
+ if (name.equals("_superset_size")) {
+ _superset_size = (ScriptHeuristic.LongAccessor)value;
+ }
+ }
+
+ @Override
+ public Double unwrap(Object value) {
+ return ((Number) value).doubleValue();
+ }
+}
diff --git a/test/framework/src/main/java/org/elasticsearch/search/aggregations/metrics/AbstractNumericTestCase.java b/test/framework/src/main/java/org/elasticsearch/search/aggregations/metrics/AbstractNumericTestCase.java
new file mode 100644
index 0000000000..ece26be823
--- /dev/null
+++ b/test/framework/src/main/java/org/elasticsearch/search/aggregations/metrics/AbstractNumericTestCase.java
@@ -0,0 +1,100 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.elasticsearch.search.aggregations.metrics;
+
+import org.elasticsearch.action.index.IndexRequestBuilder;
+import org.elasticsearch.test.ESIntegTestCase;
+
+import java.util.ArrayList;
+import java.util.List;
+
+import static org.elasticsearch.common.xcontent.XContentFactory.jsonBuilder;
+
+/**
+ *
+ */
+@ESIntegTestCase.SuiteScopeTestCase
+public abstract class AbstractNumericTestCase extends ESIntegTestCase {
+ protected static long minValue, maxValue, minValues, maxValues;
+
+ @Override
+ public void setupSuiteScopeCluster() throws Exception {
+ createIndex("idx");
+ createIndex("idx_unmapped");
+
+ List<IndexRequestBuilder> builders = new ArrayList<>();
+
+ final int numDocs = 10;
+ for (int i = 0; i < numDocs; i++) { // TODO randomize the size and the params in here?
+ builders.add(client().prepareIndex("idx", "type", ""+i).setSource(jsonBuilder()
+ .startObject()
+ .field("value", i+1)
+ .startArray("values").value(i+2).value(i+3).endArray()
+ .endObject()));
+ }
+ minValue = 1;
+ minValues = 2;
+ maxValue = numDocs;
+ maxValues = numDocs + 2;
+ indexRandom(true, builders);
+
+ // creating an index to test the empty buckets functionality. The way it works is by indexing
+ // two docs {value: 0} and {value : 2}, then building a histogram agg with interval 1 and with empty
+ // buckets computed.. the empty bucket is the one associated with key "1". then each test will have
+ // to check that this bucket exists with the appropriate sub aggregations.
+ prepareCreate("empty_bucket_idx").addMapping("type", "value", "type=integer").execute().actionGet();
+ builders = new ArrayList<>();
+ for (int i = 0; i < 2; i++) {
+ builders.add(client().prepareIndex("empty_bucket_idx", "type", ""+i).setSource(jsonBuilder()
+ .startObject()
+ .field("value", i*2)
+ .endObject()));
+ }
+ indexRandom(true, builders);
+ ensureSearchable();
+ }
+
+ public abstract void testEmptyAggregation() throws Exception;
+
+ public abstract void testUnmapped() throws Exception;
+
+ public abstract void testSingleValuedField() throws Exception;
+
+ public abstract void testSingleValuedFieldGetProperty() throws Exception;
+
+ public abstract void testSingleValuedFieldPartiallyUnmapped() throws Exception;
+
+ public abstract void testSingleValuedFieldWithValueScript() throws Exception;
+
+ public abstract void testSingleValuedFieldWithValueScriptWithParams() throws Exception;
+
+ public abstract void testMultiValuedField() throws Exception;
+
+ public abstract void testMultiValuedFieldWithValueScript() throws Exception;
+
+ public abstract void testMultiValuedFieldWithValueScriptWithParams() throws Exception;
+
+ public abstract void testScriptSingleValued() throws Exception;
+
+ public abstract void testScriptSingleValuedWithParams() throws Exception;
+
+ public abstract void testScriptMultiValued() throws Exception;
+
+ public abstract void testScriptMultiValuedWithParams() throws Exception;
+} \ No newline at end of file
diff --git a/test/framework/src/main/java/org/elasticsearch/test/BackgroundIndexer.java b/test/framework/src/main/java/org/elasticsearch/test/BackgroundIndexer.java
new file mode 100644
index 0000000000..933f26e6e8
--- /dev/null
+++ b/test/framework/src/main/java/org/elasticsearch/test/BackgroundIndexer.java
@@ -0,0 +1,287 @@
+package org.elasticsearch.test;/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+import com.carrotsearch.randomizedtesting.RandomizedTest;
+import com.carrotsearch.randomizedtesting.generators.RandomInts;
+import com.carrotsearch.randomizedtesting.generators.RandomStrings;
+import org.elasticsearch.ElasticsearchException;
+import org.elasticsearch.action.bulk.BulkItemResponse;
+import org.elasticsearch.action.bulk.BulkRequestBuilder;
+import org.elasticsearch.action.bulk.BulkResponse;
+import org.elasticsearch.client.Client;
+import org.elasticsearch.common.logging.ESLogger;
+import org.elasticsearch.common.logging.Loggers;
+import org.elasticsearch.common.xcontent.XContentBuilder;
+import org.elasticsearch.common.xcontent.XContentFactory;
+import org.junit.Assert;
+
+import java.io.IOException;
+import java.util.Random;
+import java.util.concurrent.CopyOnWriteArrayList;
+import java.util.concurrent.CountDownLatch;
+import java.util.concurrent.Semaphore;
+import java.util.concurrent.TimeUnit;
+import java.util.concurrent.atomic.AtomicBoolean;
+import java.util.concurrent.atomic.AtomicLong;
+
+import static org.hamcrest.Matchers.emptyIterable;
+import static org.hamcrest.Matchers.equalTo;
+
+public class BackgroundIndexer implements AutoCloseable {
+
+ private final ESLogger logger = Loggers.getLogger(getClass());
+
+ final Thread[] writers;
+ final CountDownLatch stopLatch;
+ final CopyOnWriteArrayList<Throwable> failures;
+ final AtomicBoolean stop = new AtomicBoolean(false);
+ final AtomicLong idGenerator = new AtomicLong();
+ final AtomicLong indexCounter = new AtomicLong();
+ final CountDownLatch startLatch = new CountDownLatch(1);
+ final AtomicBoolean hasBudget = new AtomicBoolean(false); // when set to true, writers will acquire writes from a semaphore
+ final Semaphore availableBudget = new Semaphore(0);
+
+ volatile int minFieldSize = 10;
+ volatile int maxFieldSize = 140;
+
+ /**
+ * Start indexing in the background using a random number of threads.
+ *
+ * @param index index name to index into
+ * @param type document type
+ * @param client client to use
+ */
+ public BackgroundIndexer(String index, String type, Client client) {
+ this(index, type, client, -1);
+ }
+
+ /**
+ * Start indexing in the background using a random number of threads. Indexing will be paused after numOfDocs docs has
+ * been indexed.
+ *
+ * @param index index name to index into
+ * @param type document type
+ * @param client client to use
+ * @param numOfDocs number of document to index before pausing. Set to -1 to have no limit.
+ */
+ public BackgroundIndexer(String index, String type, Client client, int numOfDocs) {
+ this(index, type, client, numOfDocs, RandomizedTest.scaledRandomIntBetween(2, 5));
+ }
+
+ /**
+ * Start indexing in the background using a given number of threads. Indexing will be paused after numOfDocs docs has
+ * been indexed.
+ *
+ * @param index index name to index into
+ * @param type document type
+ * @param client client to use
+ * @param numOfDocs number of document to index before pausing. Set to -1 to have no limit.
+ * @param writerCount number of indexing threads to use
+ */
+ public BackgroundIndexer(String index, String type, Client client, int numOfDocs, final int writerCount) {
+ this(index, type, client, numOfDocs, writerCount, true, null);
+ }
+
+ /**
+ * Start indexing in the background using a given number of threads. Indexing will be paused after numOfDocs docs has
+ * been indexed.
+ *
+ * @param index index name to index into
+ * @param type document type
+ * @param client client to use
+ * @param numOfDocs number of document to index before pausing. Set to -1 to have no limit.
+ * @param writerCount number of indexing threads to use
+ * @param autoStart set to true to start indexing as soon as all threads have been created.
+ * @param random random instance to use
+ */
+ public BackgroundIndexer(final String index, final String type, final Client client, final int numOfDocs, final int writerCount,
+ boolean autoStart, Random random) {
+
+ if (random == null) {
+ random = RandomizedTest.getRandom();
+ }
+ failures = new CopyOnWriteArrayList<>();
+ writers = new Thread[writerCount];
+ stopLatch = new CountDownLatch(writers.length);
+ logger.info("--> creating {} indexing threads (auto start: [{}], numOfDocs: [{}])", writerCount, autoStart, numOfDocs);
+ for (int i = 0; i < writers.length; i++) {
+ final int indexerId = i;
+ final boolean batch = random.nextBoolean();
+ final Random threadRandom = new Random(random.nextLong());
+ writers[i] = new Thread() {
+ @Override
+ public void run() {
+ long id = -1;
+ try {
+ startLatch.await();
+ logger.info("**** starting indexing thread {}", indexerId);
+ while (!stop.get()) {
+ if (batch) {
+ int batchSize = threadRandom.nextInt(20) + 1;
+ if (hasBudget.get()) {
+ batchSize = Math.max(Math.min(batchSize, availableBudget.availablePermits()), 1);// always try to get at least one
+ if (!availableBudget.tryAcquire(batchSize, 250, TimeUnit.MILLISECONDS)) {
+ // time out -> check if we have to stop.
+ continue;
+ }
+
+ }
+ BulkRequestBuilder bulkRequest = client.prepareBulk();
+ for (int i = 0; i < batchSize; i++) {
+ id = idGenerator.incrementAndGet();
+ bulkRequest.add(client.prepareIndex(index, type, Long.toString(id)).setSource(generateSource(id, threadRandom)));
+ }
+ BulkResponse bulkResponse = bulkRequest.get();
+ for (BulkItemResponse bulkItemResponse : bulkResponse) {
+ if (!bulkItemResponse.isFailed()) {
+ indexCounter.incrementAndGet();
+ } else {
+ throw new ElasticsearchException("bulk request failure, id: ["
+ + bulkItemResponse.getFailure().getId() + "] message: " + bulkItemResponse.getFailure().getMessage());
+ }
+ }
+
+ } else {
+
+ if (hasBudget.get() && !availableBudget.tryAcquire(250, TimeUnit.MILLISECONDS)) {
+ // time out -> check if we have to stop.
+ continue;
+ }
+ id = idGenerator.incrementAndGet();
+ client.prepareIndex(index, type, Long.toString(id)).setSource(generateSource(id, threadRandom)).get();
+ indexCounter.incrementAndGet();
+ }
+ }
+ logger.info("**** done indexing thread {} stop: {} numDocsIndexed: {}", indexerId, stop.get(), indexCounter.get());
+ } catch (Throwable e) {
+ failures.add(e);
+ logger.warn("**** failed indexing thread {} on doc id {}", e, indexerId, id);
+ } finally {
+ stopLatch.countDown();
+ }
+ }
+ };
+ writers[i].start();
+ }
+
+ if (autoStart) {
+ start(numOfDocs);
+ }
+ }
+
+ private XContentBuilder generateSource(long id, Random random) throws IOException {
+ int contentLength = RandomInts.randomIntBetween(random, minFieldSize, maxFieldSize);
+ StringBuilder text = new StringBuilder(contentLength);
+ while (text.length() < contentLength) {
+ int tokenLength = RandomInts.randomIntBetween(random, 1, Math.min(contentLength - text.length(), 10));
+ text.append(" ").append(RandomStrings.randomRealisticUnicodeOfCodepointLength(random, tokenLength));
+ }
+ XContentBuilder builder = XContentFactory.smileBuilder();
+ builder.startObject().field("test", "value" + id)
+ .field("text", text.toString())
+ .field("id", id)
+ .endObject();
+ return builder;
+
+ }
+
+ private void setBudget(int numOfDocs) {
+ logger.debug("updating budget to [{}]", numOfDocs);
+ if (numOfDocs >= 0) {
+ hasBudget.set(true);
+ availableBudget.release(numOfDocs);
+ } else {
+ hasBudget.set(false);
+ }
+
+ }
+
+ /** Start indexing with no limit to the number of documents */
+ public void start() {
+ start(-1);
+ }
+
+ /**
+ * Start indexing
+ *
+ * @param numOfDocs number of document to index before pausing. Set to -1 to have no limit.
+ */
+ public void start(int numOfDocs) {
+ assert !stop.get() : "background indexer can not be started after it has stopped";
+ setBudget(numOfDocs);
+ startLatch.countDown();
+ }
+
+ /** Pausing indexing by setting current document limit to 0 */
+ public void pauseIndexing() {
+ availableBudget.drainPermits();
+ setBudget(0);
+ }
+
+ /** Continue indexing after it has paused. No new document limit will be set */
+ public void continueIndexing() {
+ continueIndexing(-1);
+ }
+
+ /**
+ * Continue indexing after it has paused.
+ *
+ * @param numOfDocs number of document to index before pausing. Set to -1 to have no limit.
+ */
+ public void continueIndexing(int numOfDocs) {
+ setBudget(numOfDocs);
+ }
+
+ /** Stop all background threads * */
+ public void stop() throws InterruptedException {
+ if (stop.get()) {
+ return;
+ }
+ stop.set(true);
+ Assert.assertThat("timeout while waiting for indexing threads to stop", stopLatch.await(6, TimeUnit.MINUTES), equalTo(true));
+ assertNoFailures();
+ }
+
+ public long totalIndexedDocs() {
+ return indexCounter.get();
+ }
+
+ public Throwable[] getFailures() {
+ return failures.toArray(new Throwable[failures.size()]);
+ }
+
+ public void assertNoFailures() {
+ Assert.assertThat(failures, emptyIterable());
+ }
+
+ /** the minimum size in code points of a payload field in the indexed documents */
+ public void setMinFieldSize(int fieldSize) {
+ minFieldSize = fieldSize;
+ }
+
+ /** the minimum size in code points of a payload field in the indexed documents */
+ public void setMaxFieldSize(int fieldSize) {
+ maxFieldSize = fieldSize;
+ }
+
+ @Override
+ public void close() throws Exception {
+ stop();
+ }
+}
diff --git a/test/framework/src/main/java/org/elasticsearch/test/CompositeTestCluster.java b/test/framework/src/main/java/org/elasticsearch/test/CompositeTestCluster.java
new file mode 100644
index 0000000000..caa414e071
--- /dev/null
+++ b/test/framework/src/main/java/org/elasticsearch/test/CompositeTestCluster.java
@@ -0,0 +1,288 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.elasticsearch.test;
+
+import com.carrotsearch.randomizedtesting.generators.RandomPicks;
+import org.apache.lucene.util.IOUtils;
+import org.elasticsearch.action.admin.cluster.node.stats.NodeStats;
+import org.elasticsearch.action.admin.cluster.node.stats.NodesStatsResponse;
+import org.elasticsearch.client.Client;
+import org.elasticsearch.client.FilterClient;
+import org.elasticsearch.common.breaker.CircuitBreaker;
+import org.elasticsearch.common.settings.Settings;
+import org.elasticsearch.common.transport.TransportAddress;
+
+import java.io.IOException;
+import java.net.InetSocketAddress;
+import java.util.*;
+import java.util.stream.Collectors;
+
+import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertNoTimeout;
+import static org.hamcrest.Matchers.equalTo;
+import static org.junit.Assert.assertThat;
+
+/**
+ * A test cluster implementation that holds a fixed set of external nodes as well as a InternalTestCluster
+ * which is used to run mixed version clusters in tests like backwards compatibility tests.
+ * Note: this is an experimental API
+ */
+public class CompositeTestCluster extends TestCluster {
+ private final InternalTestCluster cluster;
+ private final ExternalNode[] externalNodes;
+ private final ExternalClient client = new ExternalClient();
+ private static final String NODE_PREFIX = "external_";
+
+ public CompositeTestCluster(InternalTestCluster cluster, int numExternalNodes, ExternalNode externalNode) throws IOException {
+ super(cluster.seed());
+ this.cluster = cluster;
+ this.externalNodes = new ExternalNode[numExternalNodes];
+ for (int i = 0; i < externalNodes.length; i++) {
+ externalNodes[i] = externalNode;
+ }
+ }
+
+ @Override
+ public synchronized void afterTest() throws IOException {
+ cluster.afterTest();
+ }
+
+ @Override
+ public synchronized void beforeTest(Random random, double transportClientRatio) throws IOException, InterruptedException {
+ super.beforeTest(random, transportClientRatio);
+ cluster.beforeTest(random, transportClientRatio);
+ Settings defaultSettings = cluster.getDefaultSettings();
+ final Client client = cluster.size() > 0 ? cluster.client() : cluster.clientNodeClient();
+ for (int i = 0; i < externalNodes.length; i++) {
+ if (!externalNodes[i].running()) {
+ externalNodes[i] = externalNodes[i].start(client, defaultSettings, NODE_PREFIX + i, cluster.getClusterName(), i);
+ }
+ externalNodes[i].reset(random.nextLong());
+ }
+ if (size() > 0) {
+ client().admin().cluster().prepareHealth().setWaitForNodes(">=" + Integer.toString(this.size())).get();
+ }
+ }
+
+ private Collection<ExternalNode> runningNodes() {
+ return Arrays
+ .stream(externalNodes)
+ .filter(input -> input.running())
+ .collect(Collectors.toCollection(ArrayList::new));
+ }
+
+ /**
+ * Upgrades one external running node to a node from the version running the tests. Commonly this is used
+ * to move from a node with version N-1 to a node running version N. This works seamless since they will
+ * share the same data directory. This method will return <tt>true</tt> iff a node got upgraded otherwise if no
+ * external node is running it returns <tt>false</tt>
+ */
+ public synchronized boolean upgradeOneNode() throws InterruptedException, IOException {
+ return upgradeOneNode(Settings.EMPTY);
+ }
+
+ /**
+ * Upgrades all external running nodes to a node from the version running the tests.
+ * All nodes are shut down before the first upgrade happens.
+ * @return <code>true</code> iff at least one node as upgraded.
+ */
+ public synchronized boolean upgradeAllNodes() throws InterruptedException, IOException {
+ return upgradeAllNodes(Settings.EMPTY);
+ }
+
+
+ /**
+ * Upgrades all external running nodes to a node from the version running the tests.
+ * All nodes are shut down before the first upgrade happens.
+ * @return <code>true</code> iff at least one node as upgraded.
+ * @param nodeSettings settings for the upgrade nodes
+ */
+ public synchronized boolean upgradeAllNodes(Settings nodeSettings) throws InterruptedException, IOException {
+ boolean upgradedOneNode = false;
+ while(upgradeOneNode(nodeSettings)) {
+ upgradedOneNode = true;
+ }
+ return upgradedOneNode;
+ }
+
+ /**
+ * Upgrades one external running node to a node from the version running the tests. Commonly this is used
+ * to move from a node with version N-1 to a node running version N. This works seamless since they will
+ * share the same data directory. This method will return <tt>true</tt> iff a node got upgraded otherwise if no
+ * external node is running it returns <tt>false</tt>
+ */
+ public synchronized boolean upgradeOneNode(Settings nodeSettings) throws InterruptedException, IOException {
+ Collection<ExternalNode> runningNodes = runningNodes();
+ if (!runningNodes.isEmpty()) {
+ final Client existingClient = cluster.client();
+ ExternalNode externalNode = RandomPicks.randomFrom(random, runningNodes);
+ externalNode.stop();
+ String s = cluster.startNode(nodeSettings);
+ ExternalNode.waitForNode(existingClient, s);
+ assertNoTimeout(existingClient.admin().cluster().prepareHealth().setWaitForNodes(Integer.toString(size())).get());
+ return true;
+ }
+ return false;
+ }
+
+
+ /**
+ * Returns the a simple pattern that matches all "new" nodes in the cluster.
+ */
+ public String newNodePattern() {
+ return cluster.nodePrefix() + "*";
+ }
+
+ /**
+ * Returns the a simple pattern that matches all "old" / "backwardss" nodes in the cluster.
+ */
+ public String backwardsNodePattern() {
+ return NODE_PREFIX + "*";
+ }
+
+ /**
+ * Allows allocation of shards of the given indices on all nodes in the cluster.
+ */
+ public void allowOnAllNodes(String... index) {
+ Settings build = Settings.builder().put("index.routing.allocation.exclude._name", "").build();
+ client().admin().indices().prepareUpdateSettings(index).setSettings(build).execute().actionGet();
+ }
+
+ /**
+ * Allows allocation of shards of the given indices only on "new" nodes in the cluster.
+ * Note: if a shard is allocated on an "old" node and can't be allocated on a "new" node it will only be removed it can
+ * be allocated on some other "new" node.
+ */
+ public void allowOnlyNewNodes(String... index) {
+ Settings build = Settings.builder().put("index.routing.allocation.exclude._name", backwardsNodePattern()).build();
+ client().admin().indices().prepareUpdateSettings(index).setSettings(build).execute().actionGet();
+ }
+
+ /**
+ * Starts a current version data node
+ */
+ public void startNewNode() {
+ cluster.startNode();
+ }
+
+
+ @Override
+ public synchronized Client client() {
+ return client;
+ }
+
+ @Override
+ public synchronized int size() {
+ return runningNodes().size() + cluster.size();
+ }
+
+ @Override
+ public int numDataNodes() {
+ return runningNodes().size() + cluster.numDataNodes();
+ }
+
+ @Override
+ public int numDataAndMasterNodes() {
+ return runningNodes().size() + cluster.numDataAndMasterNodes();
+ }
+
+ @Override
+ public InetSocketAddress[] httpAddresses() {
+ return cluster.httpAddresses();
+ }
+
+ @Override
+ public void close() throws IOException {
+ try {
+ IOUtils.close(externalNodes);
+ } finally {
+ IOUtils.close(cluster);
+ }
+ }
+
+ @Override
+ public void ensureEstimatedStats() {
+ if (size() > 0) {
+ NodesStatsResponse nodeStats = client().admin().cluster().prepareNodesStats()
+ .clear().setBreaker(true).execute().actionGet();
+ for (NodeStats stats : nodeStats.getNodes()) {
+ assertThat("Fielddata breaker not reset to 0 on node: " + stats.getNode(),
+ stats.getBreaker().getStats(CircuitBreaker.FIELDDATA).getEstimated(), equalTo(0L));
+ }
+ // CompositeTestCluster does not check the request breaker,
+ // because checking it requires a network request, which in
+ // turn increments the breaker, making it non-0
+ }
+ }
+
+ @Override
+ public String getClusterName() {
+ return cluster.getClusterName();
+ }
+
+ @Override
+ public synchronized Iterator<Client> iterator() {
+ return Collections.singleton(client()).iterator();
+ }
+
+ /**
+ * Delegates to {@link org.elasticsearch.test.InternalTestCluster#fullRestart()}
+ */
+ public void fullRestartInternalCluster() throws Exception {
+ cluster.fullRestart();
+ }
+
+ /**
+ * Returns the number of current version data nodes in the cluster
+ */
+ public int numNewDataNodes() {
+ return cluster.numDataNodes();
+ }
+
+ /**
+ * Returns the number of former version data nodes in the cluster
+ */
+ public int numBackwardsDataNodes() {
+ return runningNodes().size();
+ }
+
+ public TransportAddress externalTransportAddress() {
+ return RandomPicks.randomFrom(random, externalNodes).getTransportAddress();
+ }
+
+ public InternalTestCluster internalCluster() {
+ return cluster;
+ }
+
+ private synchronized Client internalClient() {
+ Collection<ExternalNode> externalNodes = runningNodes();
+ return random.nextBoolean() && !externalNodes.isEmpty() ? RandomPicks.randomFrom(random, externalNodes).getClient() : cluster.client();
+ }
+
+ private final class ExternalClient extends FilterClient {
+
+ public ExternalClient() {
+ super(internalClient());
+ }
+
+ @Override
+ public void close() {
+ // never close this client
+ }
+ }
+}
diff --git a/test/framework/src/main/java/org/elasticsearch/test/CorruptionUtils.java b/test/framework/src/main/java/org/elasticsearch/test/CorruptionUtils.java
new file mode 100644
index 0000000000..bf9ccc957b
--- /dev/null
+++ b/test/framework/src/main/java/org/elasticsearch/test/CorruptionUtils.java
@@ -0,0 +1,100 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.elasticsearch.test;
+
+import com.carrotsearch.randomizedtesting.generators.RandomPicks;
+import org.apache.lucene.codecs.CodecUtil;
+import org.apache.lucene.store.*;
+import org.elasticsearch.common.logging.ESLogger;
+import org.elasticsearch.common.logging.ESLoggerFactory;
+
+import java.io.IOException;
+import java.nio.ByteBuffer;
+import java.nio.channels.FileChannel;
+import java.nio.file.Files;
+import java.nio.file.Path;
+import java.nio.file.StandardOpenOption;
+import java.util.Random;
+
+import static org.apache.lucene.util.LuceneTestCase.assumeTrue;
+import static org.hamcrest.Matchers.is;
+import static org.hamcrest.Matchers.notNullValue;
+import static org.junit.Assert.assertThat;
+import static org.junit.Assert.assertTrue;
+
+
+public final class CorruptionUtils {
+ private static ESLogger logger = ESLoggerFactory.getLogger("test");
+ private CorruptionUtils() {}
+
+ /**
+ * Corrupts a random file at a random position
+ */
+ public static void corruptFile(Random random, Path... files) throws IOException {
+ assertTrue("files must be non-empty", files.length > 0);
+ final Path fileToCorrupt = RandomPicks.randomFrom(random, files);
+ assertTrue(fileToCorrupt + " is not a file", Files.isRegularFile(fileToCorrupt));
+ try (Directory dir = FSDirectory.open(fileToCorrupt.toAbsolutePath().getParent())) {
+ long checksumBeforeCorruption;
+ try (IndexInput input = dir.openInput(fileToCorrupt.getFileName().toString(), IOContext.DEFAULT)) {
+ checksumBeforeCorruption = CodecUtil.retrieveChecksum(input);
+ }
+ try (FileChannel raf = FileChannel.open(fileToCorrupt, StandardOpenOption.READ, StandardOpenOption.WRITE)) {
+ // read
+ raf.position(random.nextInt((int) Math.min(Integer.MAX_VALUE, raf.size())));
+ long filePointer = raf.position();
+ ByteBuffer bb = ByteBuffer.wrap(new byte[1]);
+ raf.read(bb);
+ bb.flip();
+
+ // corrupt
+ byte oldValue = bb.get(0);
+ byte newValue = (byte) (oldValue + 1);
+ bb.put(0, newValue);
+
+ // rewrite
+ raf.position(filePointer);
+ raf.write(bb);
+ logger.info("Corrupting file -- flipping at position {} from {} to {} file: {}", filePointer, Integer.toHexString(oldValue), Integer.toHexString(newValue), fileToCorrupt.getFileName());
+ }
+ long checksumAfterCorruption;
+ long actualChecksumAfterCorruption;
+ try (ChecksumIndexInput input = dir.openChecksumInput(fileToCorrupt.getFileName().toString(), IOContext.DEFAULT)) {
+ assertThat(input.getFilePointer(), is(0l));
+ input.seek(input.length() - 8); // one long is the checksum... 8 bytes
+ checksumAfterCorruption = input.getChecksum();
+ actualChecksumAfterCorruption = input.readLong();
+ }
+ // we need to add assumptions here that the checksums actually really don't match there is a small chance to get collisions
+ // in the checksum which is ok though....
+ StringBuilder msg = new StringBuilder();
+ msg.append("Checksum before: [").append(checksumBeforeCorruption).append("]");
+ msg.append(" after: [").append(checksumAfterCorruption).append("]");
+ msg.append(" checksum value after corruption: ").append(actualChecksumAfterCorruption).append("]");
+ msg.append(" file: ").append(fileToCorrupt.getFileName()).append(" length: ").append(dir.fileLength(fileToCorrupt.getFileName().toString()));
+ logger.info(msg.toString());
+ assumeTrue("Checksum collision - " + msg.toString(),
+ checksumAfterCorruption != checksumBeforeCorruption // collision
+ || actualChecksumAfterCorruption != checksumBeforeCorruption); // checksum corrupted
+ assertThat("no file corrupted", fileToCorrupt, notNullValue());
+ }
+ }
+
+
+}
diff --git a/test/framework/src/main/java/org/elasticsearch/test/DummyShardLock.java b/test/framework/src/main/java/org/elasticsearch/test/DummyShardLock.java
new file mode 100644
index 0000000000..078803a812
--- /dev/null
+++ b/test/framework/src/main/java/org/elasticsearch/test/DummyShardLock.java
@@ -0,0 +1,37 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.test;
+
+import org.elasticsearch.env.ShardLock;
+import org.elasticsearch.index.shard.ShardId;
+
+/*
+ * A ShardLock that does nothing... for tests only
+ */
+public class DummyShardLock extends ShardLock {
+
+ public DummyShardLock(ShardId id) {
+ super(id);
+ }
+
+ @Override
+ protected void closeInternal() {
+ }
+}
diff --git a/test/framework/src/main/java/org/elasticsearch/test/ESAllocationTestCase.java b/test/framework/src/main/java/org/elasticsearch/test/ESAllocationTestCase.java
new file mode 100644
index 0000000000..e6a25a3956
--- /dev/null
+++ b/test/framework/src/main/java/org/elasticsearch/test/ESAllocationTestCase.java
@@ -0,0 +1,243 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.elasticsearch.test;
+
+import org.elasticsearch.Version;
+import org.elasticsearch.cluster.ClusterInfoService;
+import org.elasticsearch.cluster.ClusterModule;
+import org.elasticsearch.cluster.ClusterState;
+import org.elasticsearch.cluster.EmptyClusterInfoService;
+import org.elasticsearch.cluster.metadata.IndexMetaData;
+import org.elasticsearch.cluster.node.DiscoveryNode;
+import org.elasticsearch.cluster.routing.RoutingNode;
+import org.elasticsearch.cluster.routing.RoutingNodes;
+import org.elasticsearch.cluster.routing.RoutingTable;
+import org.elasticsearch.cluster.routing.ShardRouting;
+import org.elasticsearch.cluster.routing.allocation.AllocationService;
+import org.elasticsearch.cluster.routing.allocation.FailedRerouteAllocation;
+import org.elasticsearch.cluster.routing.allocation.RoutingAllocation;
+import org.elasticsearch.cluster.routing.allocation.StartedRerouteAllocation;
+import org.elasticsearch.cluster.routing.allocation.allocator.ShardsAllocators;
+import org.elasticsearch.cluster.routing.allocation.decider.AllocationDecider;
+import org.elasticsearch.cluster.routing.allocation.decider.AllocationDeciders;
+import org.elasticsearch.cluster.routing.allocation.decider.Decision;
+import org.elasticsearch.common.Randomness;
+import org.elasticsearch.common.settings.Settings;
+import org.elasticsearch.common.transport.DummyTransportAddress;
+import org.elasticsearch.common.transport.TransportAddress;
+import org.elasticsearch.gateway.AsyncShardFetch;
+import org.elasticsearch.gateway.GatewayAllocator;
+import org.elasticsearch.gateway.ReplicaShardAllocator;
+import org.elasticsearch.indices.store.TransportNodesListShardStoreMetaData;
+import org.elasticsearch.node.settings.NodeSettingsService;
+import org.elasticsearch.test.gateway.NoopGatewayAllocator;
+
+import java.lang.reflect.Constructor;
+import java.util.*;
+
+import static org.elasticsearch.cluster.routing.ShardRoutingState.INITIALIZING;
+import static org.elasticsearch.common.util.CollectionUtils.arrayAsArrayList;
+import static org.hamcrest.CoreMatchers.equalTo;
+import static org.hamcrest.CoreMatchers.is;
+
+/**
+ */
+public abstract class ESAllocationTestCase extends ESTestCase {
+
+ public static MockAllocationService createAllocationService() {
+ return createAllocationService(Settings.Builder.EMPTY_SETTINGS);
+ }
+
+ public static MockAllocationService createAllocationService(Settings settings) {
+ return createAllocationService(settings, getRandom());
+ }
+
+ public static MockAllocationService createAllocationService(Settings settings, Random random) {
+ return createAllocationService(settings, new NodeSettingsService(Settings.Builder.EMPTY_SETTINGS), random);
+ }
+
+ public static MockAllocationService createAllocationService(Settings settings, NodeSettingsService nodeSettingsService, Random random) {
+ return new MockAllocationService(settings,
+ randomAllocationDeciders(settings, nodeSettingsService, random),
+ new ShardsAllocators(settings, NoopGatewayAllocator.INSTANCE), EmptyClusterInfoService.INSTANCE);
+ }
+
+ public static MockAllocationService createAllocationService(Settings settings, ClusterInfoService clusterInfoService) {
+ return new MockAllocationService(settings,
+ randomAllocationDeciders(settings, new NodeSettingsService(Settings.Builder.EMPTY_SETTINGS), getRandom()),
+ new ShardsAllocators(settings, NoopGatewayAllocator.INSTANCE), clusterInfoService);
+ }
+
+ public static MockAllocationService createAllocationService(Settings settings, GatewayAllocator allocator) {
+ return new MockAllocationService(settings,
+ randomAllocationDeciders(settings, new NodeSettingsService(Settings.Builder.EMPTY_SETTINGS), getRandom()),
+ new ShardsAllocators(settings, allocator), EmptyClusterInfoService.INSTANCE);
+ }
+
+
+
+ public static AllocationDeciders randomAllocationDeciders(Settings settings, NodeSettingsService nodeSettingsService, Random random) {
+ final List<Class<? extends AllocationDecider>> defaultAllocationDeciders = ClusterModule.DEFAULT_ALLOCATION_DECIDERS;
+ final List<AllocationDecider> list = new ArrayList<>();
+ for (Class<? extends AllocationDecider> deciderClass : ClusterModule.DEFAULT_ALLOCATION_DECIDERS) {
+ try {
+ try {
+ Constructor<? extends AllocationDecider> constructor = deciderClass.getConstructor(Settings.class, NodeSettingsService.class);
+ list.add(constructor.newInstance(settings, nodeSettingsService));
+ } catch (NoSuchMethodException e) {
+ Constructor<? extends AllocationDecider> constructor = null;
+ constructor = deciderClass.getConstructor(Settings.class);
+ list.add(constructor.newInstance(settings));
+ }
+ } catch (Exception ex) {
+ throw new RuntimeException(ex);
+ }
+ }
+ assertThat(list.size(), equalTo(defaultAllocationDeciders.size()));
+ for (AllocationDecider d : list) {
+ assertThat(defaultAllocationDeciders.contains(d.getClass()), is(true));
+ }
+ Randomness.shuffle(list);
+ return new AllocationDeciders(settings, list.toArray(new AllocationDecider[0]));
+
+ }
+
+ public static DiscoveryNode newNode(String nodeId) {
+ return new DiscoveryNode(nodeId, DummyTransportAddress.INSTANCE, Version.CURRENT);
+ }
+
+ public static DiscoveryNode newNode(String nodeId, TransportAddress address) {
+ return new DiscoveryNode(nodeId, address, Version.CURRENT);
+ }
+
+ public static DiscoveryNode newNode(String nodeId, Map<String, String> attributes) {
+ return new DiscoveryNode("", nodeId, DummyTransportAddress.INSTANCE, attributes, Version.CURRENT);
+ }
+
+ public static DiscoveryNode newNode(String nodeName,String nodeId, Map<String, String> attributes) {
+ return new DiscoveryNode(nodeName, nodeId, DummyTransportAddress.INSTANCE, attributes, Version.CURRENT);
+ }
+
+ public static DiscoveryNode newNode(String nodeId, Version version) {
+ return new DiscoveryNode(nodeId, DummyTransportAddress.INSTANCE, version);
+ }
+
+ public static ClusterState startRandomInitializingShard(ClusterState clusterState, AllocationService strategy) {
+ List<ShardRouting> initializingShards = clusterState.getRoutingNodes().shardsWithState(INITIALIZING);
+ if (initializingShards.isEmpty()) {
+ return clusterState;
+ }
+ RoutingTable routingTable = strategy.applyStartedShards(clusterState, arrayAsArrayList(initializingShards.get(randomInt(initializingShards.size() - 1)))).routingTable();
+ return ClusterState.builder(clusterState).routingTable(routingTable).build();
+ }
+
+ public static AllocationDeciders yesAllocationDeciders() {
+ return new AllocationDeciders(Settings.EMPTY, new AllocationDecider[] {new TestAllocateDecision(Decision.YES)});
+ }
+
+ public static AllocationDeciders noAllocationDeciders() {
+ return new AllocationDeciders(Settings.EMPTY, new AllocationDecider[] {new TestAllocateDecision(Decision.NO)});
+ }
+
+ public static AllocationDeciders throttleAllocationDeciders() {
+ return new AllocationDeciders(Settings.EMPTY, new AllocationDecider[] {new TestAllocateDecision(Decision.THROTTLE)});
+ }
+
+ public static class TestAllocateDecision extends AllocationDecider {
+
+ private final Decision decision;
+
+ public TestAllocateDecision(Decision decision) {
+ super(Settings.EMPTY);
+ this.decision = decision;
+ }
+
+ @Override
+ public Decision canAllocate(ShardRouting shardRouting, RoutingNode node, RoutingAllocation allocation) {
+ return decision;
+ }
+
+ @Override
+ public Decision canAllocate(ShardRouting shardRouting, RoutingAllocation allocation) {
+ return decision;
+ }
+
+ @Override
+ public Decision canAllocate(RoutingNode node, RoutingAllocation allocation) {
+ return decision;
+ }
+ }
+
+ /** A lock {@link AllocationService} allowing tests to override time */
+ protected static class MockAllocationService extends AllocationService {
+
+ private Long nanoTimeOverride = null;
+
+ public MockAllocationService(Settings settings, AllocationDeciders allocationDeciders, ShardsAllocators shardsAllocators, ClusterInfoService clusterInfoService) {
+ super(settings, allocationDeciders, shardsAllocators, clusterInfoService);
+ }
+
+ public void setNanoTimeOverride(long nanoTime) {
+ this.nanoTimeOverride = nanoTime;
+ }
+
+ @Override
+ protected long currentNanoTime() {
+ return nanoTimeOverride == null ? super.currentNanoTime() : nanoTimeOverride;
+ }
+ }
+
+ /**
+ * Mocks behavior in ReplicaShardAllocator to remove delayed shards from list of unassigned shards so they don't get reassigned yet.
+ */
+ protected static class DelayedShardsMockGatewayAllocator extends GatewayAllocator {
+ private final ReplicaShardAllocator replicaShardAllocator = new ReplicaShardAllocator(Settings.EMPTY) {
+ @Override
+ protected AsyncShardFetch.FetchResult<TransportNodesListShardStoreMetaData.NodeStoreFilesMetaData> fetchData(ShardRouting shard, RoutingAllocation allocation) {
+ return new AsyncShardFetch.FetchResult<>(shard.shardId(), null, Collections.<String>emptySet(), Collections.<String>emptySet());
+ }
+ };
+
+
+ public DelayedShardsMockGatewayAllocator() {
+ super(Settings.EMPTY, null, null);
+ }
+
+ @Override
+ public void applyStartedShards(StartedRerouteAllocation allocation) {}
+
+ @Override
+ public void applyFailedShards(FailedRerouteAllocation allocation) {}
+
+ @Override
+ public boolean allocateUnassigned(RoutingAllocation allocation) {
+ final RoutingNodes.UnassignedShards.UnassignedIterator unassignedIterator = allocation.routingNodes().unassigned().iterator();
+ boolean changed = false;
+ while (unassignedIterator.hasNext()) {
+ ShardRouting shard = unassignedIterator.next();
+ IndexMetaData indexMetaData = allocation.metaData().index(shard.getIndex());
+ if (shard.primary() || shard.allocatedPostIndexCreate(indexMetaData) == false) {
+ continue;
+ }
+ changed |= replicaShardAllocator.ignoreUnassignedIfDelayed(unassignedIterator, shard);
+ }
+ return changed;
+ }
+ }
+}
diff --git a/test/framework/src/main/java/org/elasticsearch/test/ESBackcompatTestCase.java b/test/framework/src/main/java/org/elasticsearch/test/ESBackcompatTestCase.java
new file mode 100644
index 0000000000..49644196da
--- /dev/null
+++ b/test/framework/src/main/java/org/elasticsearch/test/ESBackcompatTestCase.java
@@ -0,0 +1,271 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.elasticsearch.test;
+
+import com.carrotsearch.randomizedtesting.annotations.TestGroup;
+import org.elasticsearch.Version;
+import org.elasticsearch.cluster.ClusterState;
+import org.elasticsearch.cluster.routing.IndexRoutingTable;
+import org.elasticsearch.cluster.routing.IndexShardRoutingTable;
+import org.elasticsearch.cluster.routing.ShardRouting;
+import org.elasticsearch.common.Strings;
+import org.elasticsearch.common.io.PathUtils;
+import org.elasticsearch.common.network.NetworkModule;
+import org.elasticsearch.common.regex.Regex;
+import org.elasticsearch.common.settings.Settings;
+import org.elasticsearch.plugins.Plugin;
+import org.elasticsearch.test.junit.annotations.TestLogging;
+import org.elasticsearch.test.junit.listeners.LoggingListener;
+
+import java.io.IOException;
+import java.lang.annotation.ElementType;
+import java.lang.annotation.Inherited;
+import java.lang.annotation.Retention;
+import java.lang.annotation.RetentionPolicy;
+import java.lang.annotation.Target;
+import java.nio.file.Files;
+import java.nio.file.Path;
+import java.util.Collection;
+import java.util.Collections;
+import java.util.Map;
+
+import static org.hamcrest.Matchers.is;
+
+/**
+ * Abstract base class for backwards compatibility tests. Subclasses of this class
+ * can run tests against a mixed version cluster. A subset of the nodes in the cluster
+ * are started in dedicated process running off a full fledged elasticsearch release.
+ * Nodes can be "upgraded" from the "backwards" node to an "new" node where "new" nodes
+ * version corresponds to current version.
+ * The purpose of this test class is to run tests in scenarios where clusters are in an
+ * intermediate state during a rolling upgrade as well as upgrade situations. The clients
+ * accessed via #client() are random clients to the nodes in the cluster which might
+ * execute requests on the "new" as well as the "old" nodes.
+ * <p>
+ * Note: this base class is still experimental and might have bugs or leave external processes running behind.
+ * </p>
+ * Backwards compatibility tests are disabled by default via {@link Backwards} annotation.
+ * The following system variables control the test execution:
+ * <ul>
+ * <li>
+ * <tt>{@value #TESTS_BACKWARDS_COMPATIBILITY}</tt> enables / disables
+ * tests annotated with {@link Backwards} (defaults to
+ * <tt>false</tt>)
+ * </li>
+ * <li>
+ * <tt>{@value #TESTS_BACKWARDS_COMPATIBILITY_VERSION}</tt>
+ * sets the version to run the external nodes from formatted as <i>X.Y.Z</i>.
+ * The tests class will try to locate a release folder <i>elasticsearch-X.Y.Z</i>
+ * within path passed via {@value #TESTS_BACKWARDS_COMPATIBILITY_PATH}
+ * depending on this system variable.
+ * </li>
+ * <li>
+ * <tt>{@value #TESTS_BACKWARDS_COMPATIBILITY_PATH}</tt> the path to the
+ * elasticsearch releases to run backwards compatibility tests against.
+ * </li>
+ * </ul>
+ *
+ */
+// the transportClientRatio is tricky here since we don't fully control the cluster nodes
+@ESBackcompatTestCase.Backwards
+@ESIntegTestCase.ClusterScope(minNumDataNodes = 0, maxNumDataNodes = 2, scope = ESIntegTestCase.Scope.SUITE, numClientNodes = 0, transportClientRatio = 0.0)
+public abstract class ESBackcompatTestCase extends ESIntegTestCase {
+
+ /**
+ * Key used to set the path for the elasticsearch executable used to run backwards compatibility tests from
+ * via the commandline -D{@value #TESTS_BACKWARDS_COMPATIBILITY}
+ */
+ public static final String TESTS_BACKWARDS_COMPATIBILITY = "tests.bwc";
+ public static final String TESTS_BACKWARDS_COMPATIBILITY_VERSION = "tests.bwc.version";
+ /**
+ * Key used to set the path for the elasticsearch executable used to run backwards compatibility tests from
+ * via the commandline -D{@value #TESTS_BACKWARDS_COMPATIBILITY_PATH}
+ */
+ public static final String TESTS_BACKWARDS_COMPATIBILITY_PATH = "tests.bwc.path";
+ /**
+ * Property that allows to adapt the tests behaviour to older features/bugs based on the input version
+ */
+ private static final String TESTS_COMPATIBILITY = "tests.compatibility";
+
+ private static final Version GLOABL_COMPATIBILITY_VERSION = Version.fromString(compatibilityVersionProperty());
+
+ private static Path backwardsCompatibilityPath() {
+ String path = System.getProperty(TESTS_BACKWARDS_COMPATIBILITY_PATH);
+ if (path == null || path.isEmpty()) {
+ throw new IllegalArgumentException("Must specify backwards test path with property " + TESTS_BACKWARDS_COMPATIBILITY_PATH);
+ }
+ String version = System.getProperty(TESTS_BACKWARDS_COMPATIBILITY_VERSION);
+ if (version == null || version.isEmpty()) {
+ throw new IllegalArgumentException("Must specify backwards test version with property " + TESTS_BACKWARDS_COMPATIBILITY_VERSION);
+ }
+ if (Version.fromString(version).before(Version.CURRENT.minimumCompatibilityVersion())) {
+ throw new IllegalArgumentException("Backcompat elasticsearch version must be same major version as current. " +
+ "backcompat: " + version + ", current: " + Version.CURRENT.toString());
+ }
+ Path file = PathUtils.get(path, "elasticsearch-" + version);
+ if (!Files.exists(file)) {
+ throw new IllegalArgumentException("Backwards tests location is missing: " + file.toAbsolutePath());
+ }
+ if (!Files.isDirectory(file)) {
+ throw new IllegalArgumentException("Backwards tests location is not a directory: " + file.toAbsolutePath());
+ }
+ return file;
+ }
+
+ /**
+ * Retruns the tests compatibility version.
+ */
+ public Version compatibilityVersion() {
+ return compatibilityVersion(getClass());
+ }
+
+ private Version compatibilityVersion(Class<?> clazz) {
+ if (clazz == Object.class || clazz == ESIntegTestCase.class) {
+ return globalCompatibilityVersion();
+ }
+ CompatibilityVersion annotation = clazz.getAnnotation(CompatibilityVersion.class);
+ if (annotation != null) {
+ return Version.smallest(Version.fromId(annotation.version()), compatibilityVersion(clazz.getSuperclass()));
+ }
+ return compatibilityVersion(clazz.getSuperclass());
+ }
+
+ /**
+ * Returns a global compatibility version that is set via the
+ * {@value #TESTS_COMPATIBILITY} or {@value #TESTS_BACKWARDS_COMPATIBILITY_VERSION} system property.
+ * If both are unset the current version is used as the global compatibility version. This
+ * compatibility version is used for static randomization. For per-suite compatibility version see
+ * {@link #compatibilityVersion()}
+ */
+ public static Version globalCompatibilityVersion() {
+ return GLOABL_COMPATIBILITY_VERSION;
+ }
+
+ private static String compatibilityVersionProperty() {
+ final String version = System.getProperty(TESTS_COMPATIBILITY);
+ if (Strings.hasLength(version)) {
+ return version;
+ }
+ return System.getProperty(TESTS_BACKWARDS_COMPATIBILITY_VERSION);
+ }
+
+ public CompositeTestCluster backwardsCluster() {
+ return (CompositeTestCluster) cluster();
+ }
+
+ @Override
+ protected TestCluster buildTestCluster(Scope scope, long seed) throws IOException {
+ TestCluster cluster = super.buildTestCluster(scope, seed);
+ ExternalNode externalNode = new ExternalNode(backwardsCompatibilityPath(), randomLong(), new NodeConfigurationSource() {
+ @Override
+ public Settings nodeSettings(int nodeOrdinal) {
+ return externalNodeSettings(nodeOrdinal);
+ }
+
+ @Override
+ public Collection<Class<? extends Plugin>> nodePlugins() {
+ return Collections.emptyList();
+ }
+
+ @Override
+ public Settings transportClientSettings() {
+ return transportClientSettings();
+ }
+ });
+ return new CompositeTestCluster((InternalTestCluster) cluster, between(minExternalNodes(), maxExternalNodes()), externalNode);
+ }
+
+ private Settings addLoggerSettings(Settings externalNodesSettings) {
+ TestLogging logging = getClass().getAnnotation(TestLogging.class);
+ Map<String, String> loggingLevels = LoggingListener.getLoggersAndLevelsFromAnnotation(logging);
+ Settings.Builder finalSettings = Settings.settingsBuilder();
+ if (loggingLevels != null) {
+ for (Map.Entry<String, String> level : loggingLevels.entrySet()) {
+ finalSettings.put("logger." + level.getKey(), level.getValue());
+ }
+ }
+ finalSettings.put(externalNodesSettings);
+ return finalSettings.build();
+ }
+
+ protected int minExternalNodes() { return 1; }
+
+ protected int maxExternalNodes() {
+ return 2;
+ }
+
+ @Override
+ protected int maximumNumberOfReplicas() {
+ return 1;
+ }
+
+ protected Settings requiredSettings() {
+ return ExternalNode.REQUIRED_SETTINGS;
+ }
+
+ @Override
+ protected Settings nodeSettings(int nodeOrdinal) {
+ return commonNodeSettings(nodeOrdinal);
+ }
+
+ public void assertAllShardsOnNodes(String index, String pattern) {
+ ClusterState clusterState = client().admin().cluster().prepareState().execute().actionGet().getState();
+ for (IndexRoutingTable indexRoutingTable : clusterState.routingTable()) {
+ for (IndexShardRoutingTable indexShardRoutingTable : indexRoutingTable) {
+ for (ShardRouting shardRouting : indexShardRoutingTable) {
+ if (shardRouting.currentNodeId() != null && index.equals(shardRouting.getIndex())) {
+ String name = clusterState.nodes().get(shardRouting.currentNodeId()).name();
+ assertThat("Allocated on new node: " + name, Regex.simpleMatch(pattern, name), is(true));
+ }
+ }
+ }
+ }
+ }
+
+ protected Settings commonNodeSettings(int nodeOrdinal) {
+ Settings.Builder builder = Settings.builder().put(requiredSettings());
+ builder.put(NetworkModule.TRANSPORT_TYPE_KEY, "netty"); // run same transport / disco as external
+ builder.put("node.mode", "network");
+ return builder.build();
+ }
+
+ protected Settings externalNodeSettings(int nodeOrdinal) {
+ return addLoggerSettings(commonNodeSettings(nodeOrdinal));
+ }
+
+ /**
+ * Annotation for backwards compat tests
+ */
+ @Inherited
+ @Retention(RetentionPolicy.RUNTIME)
+ @Target(ElementType.TYPE)
+ @TestGroup(enabled = false, sysProperty = ESBackcompatTestCase.TESTS_BACKWARDS_COMPATIBILITY)
+ public @interface Backwards {
+ }
+
+ /**
+ * If a test is annotated with {@link CompatibilityVersion}
+ * all randomized settings will only contain settings or mappings which are compatible with the specified version ID.
+ */
+ @Retention(RetentionPolicy.RUNTIME)
+ @Target({ElementType.TYPE})
+ public @interface CompatibilityVersion {
+ int version();
+ }
+}
diff --git a/test/framework/src/main/java/org/elasticsearch/test/ESIntegTestCase.java b/test/framework/src/main/java/org/elasticsearch/test/ESIntegTestCase.java
new file mode 100644
index 0000000000..e4fb5e755c
--- /dev/null
+++ b/test/framework/src/main/java/org/elasticsearch/test/ESIntegTestCase.java
@@ -0,0 +1,2129 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.elasticsearch.test;
+
+import com.carrotsearch.randomizedtesting.RandomizedContext;
+import com.carrotsearch.randomizedtesting.RandomizedTest;
+import com.carrotsearch.randomizedtesting.annotations.TestGroup;
+import com.carrotsearch.randomizedtesting.generators.RandomInts;
+import com.carrotsearch.randomizedtesting.generators.RandomPicks;
+import org.apache.http.impl.client.HttpClients;
+import org.apache.lucene.util.IOUtils;
+import org.apache.lucene.util.LuceneTestCase;
+import org.apache.lucene.util.TestUtil;
+import org.elasticsearch.ElasticsearchException;
+import org.elasticsearch.ExceptionsHelper;
+import org.elasticsearch.action.ActionListener;
+import org.elasticsearch.action.ShardOperationFailedException;
+import org.elasticsearch.action.admin.cluster.health.ClusterHealthRequest;
+import org.elasticsearch.action.admin.cluster.health.ClusterHealthResponse;
+import org.elasticsearch.action.admin.cluster.node.info.NodeInfo;
+import org.elasticsearch.action.admin.cluster.node.info.NodesInfoResponse;
+import org.elasticsearch.action.admin.cluster.state.ClusterStateResponse;
+import org.elasticsearch.action.admin.cluster.tasks.PendingClusterTasksResponse;
+import org.elasticsearch.action.admin.indices.create.CreateIndexRequestBuilder;
+import org.elasticsearch.action.admin.indices.exists.indices.IndicesExistsResponse;
+import org.elasticsearch.action.admin.indices.flush.FlushResponse;
+import org.elasticsearch.action.admin.indices.flush.SyncedFlushResponse;
+import org.elasticsearch.action.admin.indices.forcemerge.ForceMergeResponse;
+import org.elasticsearch.action.admin.indices.mapping.get.GetMappingsResponse;
+import org.elasticsearch.action.admin.indices.refresh.RefreshResponse;
+import org.elasticsearch.action.admin.indices.segments.IndicesSegmentResponse;
+import org.elasticsearch.action.admin.indices.template.put.PutIndexTemplateRequestBuilder;
+import org.elasticsearch.action.bulk.BulkRequestBuilder;
+import org.elasticsearch.action.bulk.BulkResponse;
+import org.elasticsearch.action.delete.DeleteResponse;
+import org.elasticsearch.action.get.GetResponse;
+import org.elasticsearch.action.index.IndexRequestBuilder;
+import org.elasticsearch.action.index.IndexResponse;
+import org.elasticsearch.action.search.ClearScrollResponse;
+import org.elasticsearch.action.search.SearchResponse;
+import org.elasticsearch.action.support.IndicesOptions;
+import org.elasticsearch.client.AdminClient;
+import org.elasticsearch.client.Client;
+import org.elasticsearch.client.Requests;
+import org.elasticsearch.cluster.ClusterService;
+import org.elasticsearch.cluster.ClusterState;
+import org.elasticsearch.cluster.health.ClusterHealthStatus;
+import org.elasticsearch.cluster.metadata.IndexMetaData;
+import org.elasticsearch.cluster.metadata.MappingMetaData;
+import org.elasticsearch.cluster.metadata.MetaData;
+import org.elasticsearch.cluster.routing.IndexRoutingTable;
+import org.elasticsearch.cluster.routing.IndexShardRoutingTable;
+import org.elasticsearch.cluster.routing.ShardRouting;
+import org.elasticsearch.cluster.routing.UnassignedInfo;
+import org.elasticsearch.cluster.routing.allocation.decider.DiskThresholdDecider;
+import org.elasticsearch.cluster.routing.allocation.decider.EnableAllocationDecider;
+import org.elasticsearch.common.Nullable;
+import org.elasticsearch.common.Priority;
+import org.elasticsearch.common.Strings;
+import org.elasticsearch.common.collect.ImmutableOpenMap;
+import org.elasticsearch.common.collect.Tuple;
+import org.elasticsearch.common.network.NetworkAddress;
+import org.elasticsearch.common.regex.Regex;
+import org.elasticsearch.common.settings.Settings;
+import org.elasticsearch.common.transport.InetSocketTransportAddress;
+import org.elasticsearch.common.transport.TransportAddress;
+import org.elasticsearch.common.unit.ByteSizeUnit;
+import org.elasticsearch.common.unit.ByteSizeValue;
+import org.elasticsearch.common.unit.TimeValue;
+import org.elasticsearch.common.util.concurrent.EsRejectedExecutionException;
+import org.elasticsearch.common.xcontent.ToXContent;
+import org.elasticsearch.common.xcontent.XContentBuilder;
+import org.elasticsearch.common.xcontent.XContentFactory;
+import org.elasticsearch.common.xcontent.XContentHelper;
+import org.elasticsearch.common.xcontent.json.JsonXContent;
+import org.elasticsearch.common.xcontent.support.XContentMapValues;
+import org.elasticsearch.discovery.Discovery;
+import org.elasticsearch.discovery.zen.ZenDiscovery;
+import org.elasticsearch.discovery.zen.elect.ElectMasterService;
+import org.elasticsearch.env.Environment;
+import org.elasticsearch.index.IndexService;
+import org.elasticsearch.index.MockEngineFactoryPlugin;
+import org.elasticsearch.index.codec.CodecService;
+import org.elasticsearch.index.mapper.DocumentMapper;
+import org.elasticsearch.index.mapper.MappedFieldType;
+import org.elasticsearch.index.mapper.MappedFieldType.Loading;
+import org.elasticsearch.index.mapper.internal.TimestampFieldMapper;
+import org.elasticsearch.index.shard.IndexShard;
+import org.elasticsearch.index.shard.MergePolicyConfig;
+import org.elasticsearch.index.shard.MergeSchedulerConfig;
+import org.elasticsearch.index.translog.Translog;
+import org.elasticsearch.index.translog.TranslogConfig;
+import org.elasticsearch.index.translog.TranslogWriter;
+import org.elasticsearch.indices.IndicesService;
+import org.elasticsearch.indices.cache.request.IndicesRequestCache;
+import org.elasticsearch.indices.flush.SyncedFlushService;
+import org.elasticsearch.indices.store.IndicesStore;
+import org.elasticsearch.node.Node;
+import org.elasticsearch.node.NodeMocksPlugin;
+import org.elasticsearch.plugins.Plugin;
+import org.elasticsearch.rest.RestStatus;
+import org.elasticsearch.search.MockSearchService;
+import org.elasticsearch.search.SearchHit;
+import org.elasticsearch.search.SearchService;
+import org.elasticsearch.test.client.RandomizingClient;
+import org.elasticsearch.test.disruption.ServiceDisruptionScheme;
+import org.elasticsearch.test.rest.client.http.HttpRequestBuilder;
+import org.elasticsearch.test.store.MockFSIndexStore;
+import org.elasticsearch.test.transport.AssertingLocalTransport;
+import org.elasticsearch.test.transport.MockTransportService;
+import org.hamcrest.Matchers;
+import org.joda.time.DateTimeZone;
+import org.junit.After;
+import org.junit.AfterClass;
+import org.junit.Before;
+import org.junit.BeforeClass;
+
+import java.io.IOException;
+import java.io.InputStream;
+import java.lang.annotation.Annotation;
+import java.lang.annotation.Documented;
+import java.lang.annotation.ElementType;
+import java.lang.annotation.Inherited;
+import java.lang.annotation.Retention;
+import java.lang.annotation.RetentionPolicy;
+import java.lang.annotation.Target;
+import java.net.InetAddress;
+import java.net.InetSocketAddress;
+import java.net.URL;
+import java.nio.file.DirectoryStream;
+import java.nio.file.Files;
+import java.nio.file.Path;
+import java.util.ArrayList;
+import java.util.Arrays;
+import java.util.Collection;
+import java.util.Collections;
+import java.util.HashSet;
+import java.util.IdentityHashMap;
+import java.util.List;
+import java.util.Map;
+import java.util.Random;
+import java.util.Set;
+import java.util.concurrent.Callable;
+import java.util.concurrent.CopyOnWriteArrayList;
+import java.util.concurrent.CountDownLatch;
+import java.util.concurrent.ExecutionException;
+import java.util.concurrent.TimeUnit;
+import java.util.concurrent.atomic.AtomicInteger;
+import java.util.concurrent.atomic.AtomicLong;
+import java.util.function.BooleanSupplier;
+
+import static org.elasticsearch.client.Requests.syncedFlushRequest;
+import static org.elasticsearch.cluster.metadata.IndexMetaData.SETTING_NUMBER_OF_REPLICAS;
+import static org.elasticsearch.cluster.metadata.IndexMetaData.SETTING_NUMBER_OF_SHARDS;
+import static org.elasticsearch.common.settings.Settings.settingsBuilder;
+import static org.elasticsearch.common.util.CollectionUtils.eagerPartition;
+import static org.elasticsearch.index.query.QueryBuilders.matchAllQuery;
+import static org.elasticsearch.test.XContentTestUtils.convertToMap;
+import static org.elasticsearch.test.XContentTestUtils.differenceBetweenMapsIgnoringArrayOrder;
+import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertAcked;
+import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertNoFailures;
+import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertNoTimeout;
+import static org.hamcrest.Matchers.emptyArray;
+import static org.hamcrest.Matchers.emptyIterable;
+import static org.hamcrest.Matchers.equalTo;
+import static org.hamcrest.Matchers.is;
+import static org.hamcrest.Matchers.notNullValue;
+import static org.hamcrest.Matchers.startsWith;
+
+/**
+ * {@link ESIntegTestCase} is an abstract base class to run integration
+ * tests against a JVM private Elasticsearch Cluster. The test class supports 2 different
+ * cluster scopes.
+ * <ul>
+ * <li>{@link Scope#TEST} - uses a new cluster for each individual test method.</li>
+ * <li>{@link Scope#SUITE} - uses a cluster shared across all test methods in the same suite</li>
+ * </ul>
+ * <p>
+ * The most common test scope is {@link Scope#SUITE} which shares a cluster per test suite.
+ * <p>
+ * If the test methods need specific node settings or change persistent and/or transient cluster settings {@link Scope#TEST}
+ * should be used. To configure a scope for the test cluster the {@link ClusterScope} annotation
+ * should be used, here is an example:
+ * <pre>
+ *
+ * {@literal @}ClusterScope(scope=Scope.TEST) public class SomeIT extends ESIntegTestCase {
+ * public void testMethod() {}
+ * }
+ * </pre>
+ * <p>
+ * If no {@link ClusterScope} annotation is present on an integration test the default scope is {@link Scope#SUITE}
+ * <p>
+ * A test cluster creates a set of nodes in the background before the test starts. The number of nodes in the cluster is
+ * determined at random and can change across tests. The {@link ClusterScope} allows configuring the initial number of nodes
+ * that are created before the tests start.
+ * <pre>
+ * {@literal @}ClusterScope(scope=Scope.SUITE, numDataNodes=3)
+ * public class SomeIT extends ESIntegTestCase {
+ * public void testMethod() {}
+ * }
+ * </pre>
+ * <p>
+ * Note, the {@link ESIntegTestCase} uses randomized settings on a cluster and index level. For instance
+ * each test might use different directory implementation for each test or will return a random client to one of the
+ * nodes in the cluster for each call to {@link #client()}. Test failures might only be reproducible if the correct
+ * system properties are passed to the test execution environment.
+ * <p>
+ * This class supports the following system properties (passed with -Dkey=value to the application)
+ * <ul>
+ * <li>-D{@value #TESTS_CLIENT_RATIO} - a double value in the interval [0..1] which defines the ration between node and transport clients used</li>
+ * <li>-D{@value #TESTS_ENABLE_MOCK_MODULES} - a boolean value to enable or disable mock modules. This is
+ * useful to test the system without asserting modules that to make sure they don't hide any bugs in production.</li>
+ * <li> - a random seed used to initialize the index random context.
+ * </ul>
+ */
+@LuceneTestCase.SuppressFileSystems("ExtrasFS") // doesn't work with potential multi data path from test cluster yet
+public abstract class ESIntegTestCase extends ESTestCase {
+
+ /**
+ * Property that controls whether ThirdParty Integration tests are run (not the default).
+ */
+ public static final String SYSPROP_THIRDPARTY = "tests.thirdparty";
+
+ /**
+ * Annotation for third-party integration tests.
+ * <p>
+ * These are tests the require a third-party service in order to run. They
+ * may require the user to manually configure an external process (such as rabbitmq),
+ * or may additionally require some external configuration (e.g. AWS credentials)
+ * via the {@code tests.config} system property.
+ */
+ @Inherited
+ @Retention(RetentionPolicy.RUNTIME)
+ @Target(ElementType.TYPE)
+ @TestGroup(enabled = false, sysProperty = ESIntegTestCase.SYSPROP_THIRDPARTY)
+ public @interface ThirdParty {
+ }
+
+ /** node names of the corresponding clusters will start with these prefixes */
+ public static final String SUITE_CLUSTER_NODE_PREFIX = "node_s";
+ public static final String TEST_CLUSTER_NODE_PREFIX = "node_t";
+
+ /**
+ * Key used to set the transport client ratio via the commandline -D{@value #TESTS_CLIENT_RATIO}
+ */
+ public static final String TESTS_CLIENT_RATIO = "tests.client.ratio";
+
+ /**
+ * Key used to eventually switch to using an external cluster and provide its transport addresses
+ */
+ public static final String TESTS_CLUSTER = "tests.cluster";
+
+ /**
+ * Key used to retrieve the index random seed from the index settings on a running node.
+ * The value of this seed can be used to initialize a random context for a specific index.
+ * It's set once per test via a generic index template.
+ */
+ public static final String SETTING_INDEX_SEED = "index.tests.seed";
+
+ /**
+ * A boolean value to enable or disable mock modules. This is useful to test the
+ * system without asserting modules that to make sure they don't hide any bugs in
+ * production.
+ *
+ * @see ESIntegTestCase
+ */
+ public static final String TESTS_ENABLE_MOCK_MODULES = "tests.enable_mock_modules";
+
+ /**
+ * Threshold at which indexing switches from frequently async to frequently bulk.
+ */
+ private static final int FREQUENT_BULK_THRESHOLD = 300;
+
+ /**
+ * Threshold at which bulk indexing will always be used.
+ */
+ private static final int ALWAYS_BULK_THRESHOLD = 3000;
+
+ /**
+ * Maximum number of async operations that indexRandom will kick off at one time.
+ */
+ private static final int MAX_IN_FLIGHT_ASYNC_INDEXES = 150;
+
+ /**
+ * Maximum number of documents in a single bulk index request.
+ */
+ private static final int MAX_BULK_INDEX_REQUEST_SIZE = 1000;
+
+ /**
+ * Default minimum number of shards for an index
+ */
+ protected static final int DEFAULT_MIN_NUM_SHARDS = 1;
+
+ /**
+ * Default maximum number of shards for an index
+ */
+ protected static final int DEFAULT_MAX_NUM_SHARDS = 10;
+
+ /**
+ * The current cluster depending on the configured {@link Scope}.
+ * By default if no {@link ClusterScope} is configured this will hold a reference to the suite cluster.
+ */
+ private static TestCluster currentCluster;
+
+ private static final double TRANSPORT_CLIENT_RATIO = transportClientRatio();
+
+ private static final Map<Class<?>, TestCluster> clusters = new IdentityHashMap<>();
+
+ private static ESIntegTestCase INSTANCE = null; // see @SuiteScope
+ private static Long SUITE_SEED = null;
+
+ @BeforeClass
+ public static void beforeClass() throws Exception {
+ SUITE_SEED = randomLong();
+ initializeSuiteScope();
+ }
+
+ protected final void beforeInternal() throws Exception {
+ final Scope currentClusterScope = getCurrentClusterScope();
+ switch (currentClusterScope) {
+ case SUITE:
+ assert SUITE_SEED != null : "Suite seed was not initialized";
+ currentCluster = buildAndPutCluster(currentClusterScope, SUITE_SEED);
+ break;
+ case TEST:
+ currentCluster = buildAndPutCluster(currentClusterScope, randomLong());
+ break;
+ default:
+ fail("Unknown Scope: [" + currentClusterScope + "]");
+ }
+ cluster().beforeTest(getRandom(), getPerTestTransportClientRatio());
+ cluster().wipe(excludeTemplates());
+ randomIndexTemplate();
+ }
+
+ private void printTestMessage(String message) {
+ if (isSuiteScopedTest(getClass()) && (getTestName().equals("<unknown>"))) {
+ logger.info("[{}]: {} suite", getTestClass().getSimpleName(), message);
+ } else {
+ logger.info("[{}#{}]: {} test", getTestClass().getSimpleName(), getTestName(), message);
+ }
+ }
+
+ private Loading randomLoadingValues() {
+ return randomFrom(Loading.values());
+ }
+
+ /**
+ * Creates a randomized index template. This template is used to pass in randomized settings on a
+ * per index basis. Allows to enable/disable the randomization for number of shards and replicas
+ */
+ public void randomIndexTemplate() throws IOException {
+
+ // TODO move settings for random directory etc here into the index based randomized settings.
+ if (cluster().size() > 0) {
+ Settings.Builder randomSettingsBuilder =
+ setRandomIndexSettings(getRandom(), Settings.builder())
+ .put(SETTING_INDEX_SEED, getRandom().nextLong());
+
+ randomSettingsBuilder.put(SETTING_NUMBER_OF_SHARDS, numberOfShards())
+ .put(SETTING_NUMBER_OF_REPLICAS, numberOfReplicas());
+
+ // if the test class is annotated with SuppressCodecs("*"), it means don't use lucene's codec randomization
+ // otherwise, use it, it has assertions and so on that can find bugs.
+ SuppressCodecs annotation = getClass().getAnnotation(SuppressCodecs.class);
+ if (annotation != null && annotation.value().length == 1 && "*".equals(annotation.value()[0])) {
+ randomSettingsBuilder.put("index.codec", randomFrom(CodecService.DEFAULT_CODEC, CodecService.BEST_COMPRESSION_CODEC));
+ } else {
+ randomSettingsBuilder.put("index.codec", CodecService.LUCENE_DEFAULT_CODEC);
+ }
+ XContentBuilder mappings = null;
+ if (frequently() && randomDynamicTemplates()) {
+ mappings = XContentFactory.jsonBuilder().startObject().startObject("_default_");
+ if (randomBoolean()) {
+ mappings.startObject(TimestampFieldMapper.NAME)
+ .field("enabled", randomBoolean());
+ mappings.endObject();
+ }
+ mappings.startArray("dynamic_templates")
+ .startObject()
+ .startObject("template-strings")
+ .field("match_mapping_type", "string")
+ .startObject("mapping")
+ .startObject("fielddata")
+ .field(Loading.KEY, randomLoadingValues())
+ .endObject()
+ .endObject()
+ .endObject()
+ .endObject()
+ .startObject()
+ .startObject("template-longs")
+ .field("match_mapping_type", "long")
+ .startObject("mapping")
+ .startObject("fielddata")
+ .field(Loading.KEY, randomFrom(Loading.LAZY, Loading.EAGER))
+ .endObject()
+ .endObject()
+ .endObject()
+ .endObject()
+ .startObject()
+ .startObject("template-doubles")
+ .field("match_mapping_type", "double")
+ .startObject("mapping")
+ .startObject("fielddata")
+ .field(Loading.KEY, randomFrom(Loading.LAZY, Loading.EAGER))
+ .endObject()
+ .endObject()
+ .endObject()
+ .endObject()
+ .startObject()
+ .startObject("template-geo_points")
+ .field("match_mapping_type", "geo_point")
+ .startObject("mapping")
+ .startObject("fielddata")
+ .field(Loading.KEY, randomFrom(Loading.LAZY, Loading.EAGER))
+ .endObject()
+ .endObject()
+ .endObject()
+ .endObject()
+ .startObject()
+ .startObject("template-booleans")
+ .field("match_mapping_type", "boolean")
+ .startObject("mapping")
+ .startObject("fielddata")
+ .field(Loading.KEY, randomFrom(Loading.LAZY, Loading.EAGER))
+ .endObject()
+ .endObject()
+ .endObject()
+ .endObject()
+ .endArray();
+ mappings.endObject().endObject();
+ }
+
+ for (String setting : randomSettingsBuilder.internalMap().keySet()) {
+ assertThat("non index. prefix setting set on index template, its a node setting...", setting, startsWith("index."));
+ }
+
+ PutIndexTemplateRequestBuilder putTemplate = client().admin().indices()
+ .preparePutTemplate("random_index_template")
+ .setTemplate("*")
+ .setOrder(0)
+ .setSettings(randomSettingsBuilder);
+ if (mappings != null) {
+ logger.info("test using _default_ mappings: [{}]", mappings.bytes().toUtf8());
+ putTemplate.addMapping("_default_", mappings);
+ }
+ assertAcked(putTemplate.execute().actionGet());
+ }
+ }
+
+ protected Settings.Builder setRandomIndexSettings(Random random, Settings.Builder builder) {
+ setRandomIndexMergeSettings(random, builder);
+ setRandomIndexTranslogSettings(random, builder);
+ setRandomIndexNormsLoading(random, builder);
+
+ if (random.nextBoolean()) {
+ builder.put(MergeSchedulerConfig.AUTO_THROTTLE, false);
+ }
+
+ if (random.nextBoolean()) {
+ builder.put(IndicesRequestCache.INDEX_CACHE_REQUEST_ENABLED, random.nextBoolean());
+ }
+
+ if (random.nextBoolean()) {
+ builder.put("index.shard.check_on_startup", randomFrom(random, "false", "checksum", "true"));
+ }
+
+ if (randomBoolean()) {
+ // keep this low so we don't stall tests
+ builder.put(UnassignedInfo.INDEX_DELAYED_NODE_LEFT_TIMEOUT_SETTING, RandomInts.randomIntBetween(random, 1, 15) + "ms");
+ }
+
+ return builder;
+ }
+
+ private static Settings.Builder setRandomIndexMergeSettings(Random random, Settings.Builder builder) {
+ if (random.nextBoolean()) {
+ builder.put(MergePolicyConfig.INDEX_COMPOUND_FORMAT,
+ random.nextBoolean() ? random.nextDouble() : random.nextBoolean());
+ }
+ switch (random.nextInt(4)) {
+ case 3:
+ final int maxThreadCount = RandomInts.randomIntBetween(random, 1, 4);
+ final int maxMergeCount = RandomInts.randomIntBetween(random, maxThreadCount, maxThreadCount + 4);
+ builder.put(MergeSchedulerConfig.MAX_MERGE_COUNT, maxMergeCount);
+ builder.put(MergeSchedulerConfig.MAX_THREAD_COUNT, maxThreadCount);
+ break;
+ }
+
+ return builder;
+ }
+
+ private static Settings.Builder setRandomIndexNormsLoading(Random random, Settings.Builder builder) {
+ if (random.nextBoolean()) {
+ builder.put(SearchService.NORMS_LOADING_KEY, RandomPicks.randomFrom(random, Arrays.asList(MappedFieldType.Loading.EAGER, MappedFieldType.Loading.LAZY)));
+ }
+ return builder;
+ }
+
+ private static Settings.Builder setRandomIndexTranslogSettings(Random random, Settings.Builder builder) {
+ if (random.nextBoolean()) {
+ builder.put(IndexShard.INDEX_TRANSLOG_FLUSH_THRESHOLD_OPS, RandomInts.randomIntBetween(random, 1, 10000));
+ }
+ if (random.nextBoolean()) {
+ builder.put(IndexShard.INDEX_TRANSLOG_FLUSH_THRESHOLD_SIZE, new ByteSizeValue(RandomInts.randomIntBetween(random, 1, 300), ByteSizeUnit.MB));
+ }
+ if (random.nextBoolean()) {
+ builder.put(IndexShard.INDEX_TRANSLOG_DISABLE_FLUSH, random.nextBoolean());
+ }
+ if (random.nextBoolean()) {
+ builder.put(TranslogConfig.INDEX_TRANSLOG_DURABILITY, RandomPicks.randomFrom(random, Translog.Durabilty.values()));
+ }
+
+ if (random.nextBoolean()) {
+ builder.put(TranslogConfig.INDEX_TRANSLOG_FS_TYPE, RandomPicks.randomFrom(random, TranslogWriter.Type.values()));
+ if (rarely(random)) {
+ builder.put(TranslogConfig.INDEX_TRANSLOG_SYNC_INTERVAL, 0); // 0 has special meaning to sync each op
+ } else {
+ builder.put(TranslogConfig.INDEX_TRANSLOG_SYNC_INTERVAL, RandomInts.randomIntBetween(random, 100, 5000), TimeUnit.MILLISECONDS);
+ }
+ }
+
+ return builder;
+ }
+
+ private TestCluster buildWithPrivateContext(final Scope scope, final long seed) throws Exception {
+ return RandomizedContext.current().runWithPrivateRandomness(new com.carrotsearch.randomizedtesting.Randomness(seed), new Callable<TestCluster>() {
+ @Override
+ public TestCluster call() throws Exception {
+ return buildTestCluster(scope, seed);
+ }
+ });
+ }
+
+ private TestCluster buildAndPutCluster(Scope currentClusterScope, long seed) throws Exception {
+ final Class<?> clazz = this.getClass();
+ TestCluster testCluster = clusters.remove(clazz); // remove this cluster first
+ clearClusters(); // all leftovers are gone by now... this is really just a double safety if we miss something somewhere
+ switch (currentClusterScope) {
+ case SUITE:
+ if (testCluster == null) { // only build if it's not there yet
+ testCluster = buildWithPrivateContext(currentClusterScope, seed);
+ }
+ break;
+ case TEST:
+ // close the previous one and create a new one
+ IOUtils.closeWhileHandlingException(testCluster);
+ testCluster = buildTestCluster(currentClusterScope, seed);
+ break;
+ }
+ clusters.put(clazz, testCluster);
+ return testCluster;
+ }
+
+ private static void clearClusters() throws IOException {
+ if (!clusters.isEmpty()) {
+ IOUtils.close(clusters.values());
+ clusters.clear();
+ }
+ }
+
+ protected final void afterInternal(boolean afterClass) throws Exception {
+ boolean success = false;
+ try {
+ final Scope currentClusterScope = getCurrentClusterScope();
+ clearDisruptionScheme();
+ try {
+ if (cluster() != null) {
+ if (currentClusterScope != Scope.TEST) {
+ MetaData metaData = client().admin().cluster().prepareState().execute().actionGet().getState().getMetaData();
+ assertThat("test leaves persistent cluster metadata behind: " + metaData.persistentSettings().getAsMap(), metaData
+ .persistentSettings().getAsMap().size(), equalTo(0));
+ assertThat("test leaves transient cluster metadata behind: " + metaData.transientSettings().getAsMap(), metaData
+ .transientSettings().getAsMap().size(), equalTo(0));
+ }
+ ensureClusterSizeConsistency();
+ ensureClusterStateConsistency();
+ if (isInternalCluster()) {
+ // check no pending cluster states are leaked
+ for (Discovery discovery : internalCluster().getInstances(Discovery.class)) {
+ if (discovery instanceof ZenDiscovery) {
+ final ZenDiscovery zenDiscovery = (ZenDiscovery) discovery;
+ assertBusy(new Runnable() {
+ @Override
+ public void run() {
+ assertThat("still having pending states: " + Strings.arrayToDelimitedString(zenDiscovery.pendingClusterStates(), "\n"),
+ zenDiscovery.pendingClusterStates(), emptyArray());
+ }
+ });
+ }
+ }
+ }
+ beforeIndexDeletion();
+ cluster().wipe(excludeTemplates()); // wipe after to make sure we fail in the test that didn't ack the delete
+ if (afterClass || currentClusterScope == Scope.TEST) {
+ cluster().close();
+ }
+ cluster().assertAfterTest();
+ }
+ } finally {
+ if (currentClusterScope == Scope.TEST) {
+ clearClusters(); // it is ok to leave persistent / transient cluster state behind if scope is TEST
+ }
+ }
+ success = true;
+ } finally {
+ if (!success) {
+ // if we failed here that means that something broke horribly so we should clear all clusters
+ // TODO: just let the exception happen, WTF is all this horseshit
+ // afterTestRule.forceFailure();
+ }
+ }
+ }
+
+ /**
+ * @return An exclude set of index templates that will not be removed in between tests.
+ */
+ protected Set<String> excludeTemplates() {
+ return Collections.emptySet();
+ }
+
+ protected void beforeIndexDeletion() {
+ cluster().beforeIndexDeletion();
+ }
+
+ public static TestCluster cluster() {
+ return currentCluster;
+ }
+
+ public static boolean isInternalCluster() {
+ return (currentCluster instanceof InternalTestCluster);
+ }
+
+ public static InternalTestCluster internalCluster() {
+ if (!isInternalCluster()) {
+ throw new UnsupportedOperationException("current test cluster is immutable");
+ }
+ return (InternalTestCluster) currentCluster;
+ }
+
+ public ClusterService clusterService() {
+ return internalCluster().clusterService();
+ }
+
+ public static Client client() {
+ return client(null);
+ }
+
+ public static Client client(@Nullable String node) {
+ if (node != null) {
+ return internalCluster().client(node);
+ }
+ Client client = cluster().client();
+ if (frequently()) {
+ client = new RandomizingClient(client, getRandom());
+ }
+ return client;
+ }
+
+ public static Client dataNodeClient() {
+ Client client = internalCluster().dataNodeClient();
+ if (frequently()) {
+ client = new RandomizingClient(client, getRandom());
+ }
+ return client;
+ }
+
+ public static Iterable<Client> clients() {
+ return cluster();
+ }
+
+ protected int minimumNumberOfShards() {
+ return DEFAULT_MIN_NUM_SHARDS;
+ }
+
+ protected int maximumNumberOfShards() {
+ return DEFAULT_MAX_NUM_SHARDS;
+ }
+
+ protected int numberOfShards() {
+ return between(minimumNumberOfShards(), maximumNumberOfShards());
+ }
+
+ protected int minimumNumberOfReplicas() {
+ return 0;
+ }
+
+ protected int maximumNumberOfReplicas() {
+ //use either 0 or 1 replica, yet a higher amount when possible, but only rarely
+ int maxNumReplicas = Math.max(0, cluster().numDataNodes() - 1);
+ return frequently() ? Math.min(1, maxNumReplicas) : maxNumReplicas;
+ }
+
+ protected int numberOfReplicas() {
+ return between(minimumNumberOfReplicas(), maximumNumberOfReplicas());
+ }
+
+
+ public void setDisruptionScheme(ServiceDisruptionScheme scheme) {
+ internalCluster().setDisruptionScheme(scheme);
+ }
+
+ public void clearDisruptionScheme() {
+ if (isInternalCluster()) {
+ internalCluster().clearDisruptionScheme();
+ }
+ }
+
+ /**
+ * Returns a settings object used in {@link #createIndex(String...)} and {@link #prepareCreate(String)} and friends.
+ * This method can be overwritten by subclasses to set defaults for the indices that are created by the test.
+ * By default it returns a settings object that sets a random number of shards. Number of shards and replicas
+ * can be controlled through specific methods.
+ */
+ public Settings indexSettings() {
+ Settings.Builder builder = Settings.builder();
+ int numberOfShards = numberOfShards();
+ if (numberOfShards > 0) {
+ builder.put(SETTING_NUMBER_OF_SHARDS, numberOfShards).build();
+ }
+ int numberOfReplicas = numberOfReplicas();
+ if (numberOfReplicas >= 0) {
+ builder.put(SETTING_NUMBER_OF_REPLICAS, numberOfReplicas).build();
+ }
+ // 30% of the time
+ if (randomInt(9) < 3) {
+ final String dataPath = randomAsciiOfLength(10);
+ logger.info("using custom data_path for index: [{}]", dataPath);
+ builder.put(IndexMetaData.SETTING_DATA_PATH, dataPath);
+ }
+ return builder.build();
+ }
+
+ /**
+ * Creates one or more indices and asserts that the indices are acknowledged. If one of the indices
+ * already exists this method will fail and wipe all the indices created so far.
+ */
+ public final void createIndex(String... names) {
+
+ List<String> created = new ArrayList<>();
+ for (String name : names) {
+ boolean success = false;
+ try {
+ assertAcked(prepareCreate(name));
+ created.add(name);
+ success = true;
+ } finally {
+ if (!success && !created.isEmpty()) {
+ cluster().wipeIndices(created.toArray(new String[created.size()]));
+ }
+ }
+ }
+ }
+
+ /**
+ * Creates a new {@link CreateIndexRequestBuilder} with the settings obtained from {@link #indexSettings()}.
+ */
+ public final CreateIndexRequestBuilder prepareCreate(String index) {
+ return client().admin().indices().prepareCreate(index).setSettings(indexSettings());
+ }
+
+ /**
+ * Creates a new {@link CreateIndexRequestBuilder} with the settings obtained from {@link #indexSettings()}.
+ * The index that is created with this builder will only be allowed to allocate on the number of nodes passed to this
+ * method.
+ * <p>
+ * This method uses allocation deciders to filter out certain nodes to allocate the created index on. It defines allocation
+ * rules based on <code>index.routing.allocation.exclude._name</code>.
+ * </p>
+ */
+ public final CreateIndexRequestBuilder prepareCreate(String index, int numNodes) {
+ return prepareCreate(index, numNodes, Settings.builder());
+ }
+
+ /**
+ * Creates a new {@link CreateIndexRequestBuilder} with the settings obtained from {@link #indexSettings()}.
+ * The index that is created with this builder will only be allowed to allocate on the number of nodes passed to this
+ * method.
+ * <p>
+ * This method uses allocation deciders to filter out certain nodes to allocate the created index on. It defines allocation
+ * rules based on <code>index.routing.allocation.exclude._name</code>.
+ * </p>
+ */
+ public CreateIndexRequestBuilder prepareCreate(String index, int numNodes, Settings.Builder settingsBuilder) {
+ internalCluster().ensureAtLeastNumDataNodes(numNodes);
+
+ Settings.Builder builder = Settings.builder().put(indexSettings()).put(settingsBuilder.build());
+
+ if (numNodes > 0) {
+ getExcludeSettings(index, numNodes, builder);
+ }
+ return client().admin().indices().prepareCreate(index).setSettings(builder.build());
+ }
+
+ private Settings.Builder getExcludeSettings(String index, int num, Settings.Builder builder) {
+ String exclude = String.join(",", internalCluster().allDataNodesButN(num));
+ builder.put("index.routing.allocation.exclude._name", exclude);
+ return builder;
+ }
+
+ /**
+ * Waits until all nodes have no pending tasks.
+ */
+ public void waitNoPendingTasksOnAll() throws Exception {
+ assertNoTimeout(client().admin().cluster().prepareHealth().setWaitForEvents(Priority.LANGUID).get());
+ assertBusy(new Runnable() {
+ @Override
+ public void run() {
+ for (Client client : clients()) {
+ ClusterHealthResponse clusterHealth = client.admin().cluster().prepareHealth().setLocal(true).get();
+ assertThat("client " + client + " still has in flight fetch", clusterHealth.getNumberOfInFlightFetch(), equalTo(0));
+ PendingClusterTasksResponse pendingTasks = client.admin().cluster().preparePendingClusterTasks().setLocal(true).get();
+ assertThat("client " + client + " still has pending tasks " + pendingTasks.prettyPrint(), pendingTasks, Matchers.emptyIterable());
+ clusterHealth = client.admin().cluster().prepareHealth().setLocal(true).get();
+ assertThat("client " + client + " still has in flight fetch", clusterHealth.getNumberOfInFlightFetch(), equalTo(0));
+ }
+ }
+ });
+ assertNoTimeout(client().admin().cluster().prepareHealth().setWaitForEvents(Priority.LANGUID).get());
+ }
+
+ /**
+ * Waits till a (pattern) field name mappings concretely exists on all nodes. Note, this waits for the current
+ * started shards and checks for concrete mappings.
+ */
+ public void assertConcreteMappingsOnAll(final String index, final String type, final String... fieldNames) throws Exception {
+ Set<String> nodes = internalCluster().nodesInclude(index);
+ assertThat(nodes, Matchers.not(Matchers.emptyIterable()));
+ for (String node : nodes) {
+ IndicesService indicesService = internalCluster().getInstance(IndicesService.class, node);
+ IndexService indexService = indicesService.indexService(index);
+ assertThat("index service doesn't exists on " + node, indexService, notNullValue());
+ DocumentMapper documentMapper = indexService.mapperService().documentMapper(type);
+ assertThat("document mapper doesn't exists on " + node, documentMapper, notNullValue());
+ for (String fieldName : fieldNames) {
+ Collection<String> matches = documentMapper.mappers().simpleMatchToFullName(fieldName);
+ assertThat("field " + fieldName + " doesn't exists on " + node, matches, Matchers.not(emptyIterable()));
+ }
+ }
+ assertMappingOnMaster(index, type, fieldNames);
+ }
+
+ /**
+ * Waits for the given mapping type to exists on the master node.
+ */
+ public void assertMappingOnMaster(final String index, final String type, final String... fieldNames) throws Exception {
+ GetMappingsResponse response = client().admin().indices().prepareGetMappings(index).setTypes(type).get();
+ ImmutableOpenMap<String, MappingMetaData> mappings = response.getMappings().get(index);
+ assertThat(mappings, notNullValue());
+ MappingMetaData mappingMetaData = mappings.get(type);
+ assertThat(mappingMetaData, notNullValue());
+
+ Map<String, Object> mappingSource = mappingMetaData.getSourceAsMap();
+ assertFalse(mappingSource.isEmpty());
+ assertTrue(mappingSource.containsKey("properties"));
+
+ for (String fieldName : fieldNames) {
+ Map<String, Object> mappingProperties = (Map<String, Object>) mappingSource.get("properties");
+ if (fieldName.indexOf('.') != -1) {
+ fieldName = fieldName.replace(".", ".properties.");
+ }
+ assertThat("field " + fieldName + " doesn't exists in mapping " + mappingMetaData.source().string(), XContentMapValues.extractValue(fieldName, mappingProperties), notNullValue());
+ }
+ }
+
+ /** Ensures the result counts are as expected, and logs the results if different */
+ public void assertResultsAndLogOnFailure(long expectedResults, SearchResponse searchResponse) {
+ if (searchResponse.getHits().getTotalHits() != expectedResults) {
+ StringBuilder sb = new StringBuilder("search result contains [");
+ sb.append(searchResponse.getHits().getTotalHits()).append("] results. expected [").append(expectedResults).append("]");
+ String failMsg = sb.toString();
+ for (SearchHit hit : searchResponse.getHits().getHits()) {
+ sb.append("\n-> _index: [").append(hit.getIndex()).append("] type [").append(hit.getType())
+ .append("] id [").append(hit.id()).append("]");
+ }
+ logger.warn(sb.toString());
+ fail(failMsg);
+ }
+ }
+
+ /**
+ * Restricts the given index to be allocated on <code>n</code> nodes using the allocation deciders.
+ * Yet if the shards can't be allocated on any other node shards for this index will remain allocated on
+ * more than <code>n</code> nodes.
+ */
+ public void allowNodes(String index, int n) {
+ assert index != null;
+ internalCluster().ensureAtLeastNumDataNodes(n);
+ Settings.Builder builder = Settings.builder();
+ if (n > 0) {
+ getExcludeSettings(index, n, builder);
+ }
+ Settings build = builder.build();
+ if (!build.getAsMap().isEmpty()) {
+ logger.debug("allowNodes: updating [{}]'s setting to [{}]", index, build.toDelimitedString(';'));
+ client().admin().indices().prepareUpdateSettings(index).setSettings(build).execute().actionGet();
+ }
+ }
+
+ /**
+ * Ensures the cluster has a green state via the cluster health API. This method will also wait for relocations.
+ * It is useful to ensure that all action on the cluster have finished and all shards that were currently relocating
+ * are now allocated and started.
+ */
+ public ClusterHealthStatus ensureGreen(String... indices) {
+ return ensureGreen(TimeValue.timeValueSeconds(30), indices);
+ }
+
+ /**
+ * Ensures the cluster has a green state via the cluster health API. This method will also wait for relocations.
+ * It is useful to ensure that all action on the cluster have finished and all shards that were currently relocating
+ * are now allocated and started.
+ *
+ * @param timeout time out value to set on {@link org.elasticsearch.action.admin.cluster.health.ClusterHealthRequest}
+ */
+ public ClusterHealthStatus ensureGreen(TimeValue timeout, String... indices) {
+ ClusterHealthResponse actionGet = client().admin().cluster()
+ .health(Requests.clusterHealthRequest(indices).timeout(timeout).waitForGreenStatus().waitForEvents(Priority.LANGUID).waitForRelocatingShards(0)).actionGet();
+ if (actionGet.isTimedOut()) {
+ logger.info("ensureGreen timed out, cluster state:\n{}\n{}", client().admin().cluster().prepareState().get().getState().prettyPrint(), client().admin().cluster().preparePendingClusterTasks().get().prettyPrint());
+ fail("timed out waiting for green state");
+ }
+ assertThat(actionGet.getStatus(), equalTo(ClusterHealthStatus.GREEN));
+ logger.debug("indices {} are green", indices.length == 0 ? "[_all]" : indices);
+ return actionGet.getStatus();
+ }
+
+ /**
+ * Waits for all relocating shards to become active using the cluster health API.
+ */
+ public ClusterHealthStatus waitForRelocation() {
+ return waitForRelocation(null);
+ }
+
+ /**
+ * Waits for all relocating shards to become active and the cluster has reached the given health status
+ * using the cluster health API.
+ */
+ public ClusterHealthStatus waitForRelocation(ClusterHealthStatus status) {
+ ClusterHealthRequest request = Requests.clusterHealthRequest().waitForRelocatingShards(0);
+ if (status != null) {
+ request.waitForStatus(status);
+ }
+ ClusterHealthResponse actionGet = client().admin().cluster()
+ .health(request).actionGet();
+ if (actionGet.isTimedOut()) {
+ logger.info("waitForRelocation timed out (status={}), cluster state:\n{}\n{}", status, client().admin().cluster().prepareState().get().getState().prettyPrint(), client().admin().cluster().preparePendingClusterTasks().get().prettyPrint());
+ assertThat("timed out waiting for relocation", actionGet.isTimedOut(), equalTo(false));
+ }
+ if (status != null) {
+ assertThat(actionGet.getStatus(), equalTo(status));
+ }
+ return actionGet.getStatus();
+ }
+
+ /**
+ * Waits until at least a give number of document is visible for searchers
+ *
+ * @param numDocs number of documents to wait for.
+ * @return the actual number of docs seen.
+ */
+ public long waitForDocs(final long numDocs) throws InterruptedException {
+ return waitForDocs(numDocs, null);
+ }
+
+ /**
+ * Waits until at least a give number of document is visible for searchers
+ *
+ * @param numDocs number of documents to wait for
+ * @param indexer a {@link org.elasticsearch.test.BackgroundIndexer}. If supplied it will be first checked for documents indexed.
+ * This saves on unneeded searches.
+ * @return the actual number of docs seen.
+ */
+ public long waitForDocs(final long numDocs, final @Nullable BackgroundIndexer indexer) throws InterruptedException {
+ // indexing threads can wait for up to ~1m before retrying when they first try to index into a shard which is not STARTED.
+ return waitForDocs(numDocs, 90, TimeUnit.SECONDS, indexer);
+ }
+
+ /**
+ * Waits until at least a give number of document is visible for searchers
+ *
+ * @param numDocs number of documents to wait for
+ * @param maxWaitTime if not progress have been made during this time, fail the test
+ * @param maxWaitTimeUnit the unit in which maxWaitTime is specified
+ * @param indexer a {@link org.elasticsearch.test.BackgroundIndexer}. If supplied it will be first checked for documents indexed.
+ * This saves on unneeded searches.
+ * @return the actual number of docs seen.
+ */
+ public long waitForDocs(final long numDocs, int maxWaitTime, TimeUnit maxWaitTimeUnit, final @Nullable BackgroundIndexer indexer)
+ throws InterruptedException {
+ final AtomicLong lastKnownCount = new AtomicLong(-1);
+ long lastStartCount = -1;
+ BooleanSupplier testDocs = () -> {
+ if (indexer != null) {
+ lastKnownCount.set(indexer.totalIndexedDocs());
+ }
+ if (lastKnownCount.get() >= numDocs) {
+ try {
+ long count = client().prepareSearch().setSize(0).setQuery(matchAllQuery()).execute().actionGet().getHits().totalHits();
+ if (count == lastKnownCount.get()) {
+ // no progress - try to refresh for the next time
+ client().admin().indices().prepareRefresh().get();
+ }
+ lastKnownCount.set(count);
+ } catch (Throwable e) { // count now acts like search and barfs if all shards failed...
+ logger.debug("failed to executed count", e);
+ return false;
+ }
+ logger.debug("[{}] docs visible for search. waiting for [{}]", lastKnownCount.get(), numDocs);
+ } else {
+ logger.debug("[{}] docs indexed. waiting for [{}]", lastKnownCount.get(), numDocs);
+ }
+ return lastKnownCount.get() >= numDocs;
+ };
+
+ while (!awaitBusy(testDocs, maxWaitTime, maxWaitTimeUnit)) {
+ if (lastStartCount == lastKnownCount.get()) {
+ // we didn't make any progress
+ fail("failed to reach " + numDocs + "docs");
+ }
+ lastStartCount = lastKnownCount.get();
+ }
+ return lastKnownCount.get();
+ }
+
+
+ /**
+ * Sets the cluster's minimum master node and make sure the response is acknowledge.
+ * Note: this doesn't guarantee that the new setting has taken effect, just that it has been received by all nodes.
+ */
+ public void setMinimumMasterNodes(int n) {
+ assertTrue(client().admin().cluster().prepareUpdateSettings().setTransientSettings(
+ settingsBuilder().put(ElectMasterService.DISCOVERY_ZEN_MINIMUM_MASTER_NODES, n))
+ .get().isAcknowledged());
+ }
+
+ /**
+ * Ensures the cluster has a yellow state via the cluster health API.
+ */
+ public ClusterHealthStatus ensureYellow(String... indices) {
+ ClusterHealthResponse actionGet = client().admin().cluster()
+ .health(Requests.clusterHealthRequest(indices).waitForRelocatingShards(0).waitForYellowStatus().waitForEvents(Priority.LANGUID)).actionGet();
+ if (actionGet.isTimedOut()) {
+ logger.info("ensureYellow timed out, cluster state:\n{}\n{}", client().admin().cluster().prepareState().get().getState().prettyPrint(), client().admin().cluster().preparePendingClusterTasks().get().prettyPrint());
+ assertThat("timed out waiting for yellow", actionGet.isTimedOut(), equalTo(false));
+ }
+ logger.debug("indices {} are yellow", indices.length == 0 ? "[_all]" : indices);
+ return actionGet.getStatus();
+ }
+
+ /**
+ * Prints the current cluster state as debug logging.
+ */
+ public void logClusterState() {
+ logger.debug("cluster state:\n{}\n{}", client().admin().cluster().prepareState().get().getState().prettyPrint(), client().admin().cluster().preparePendingClusterTasks().get().prettyPrint());
+ }
+
+ /**
+ * Prints the segments info for the given indices as debug logging.
+ */
+ public void logSegmentsState(String... indices) throws Exception {
+ IndicesSegmentResponse segsRsp = client().admin().indices().prepareSegments(indices).get();
+ logger.debug("segments {} state: \n{}", indices.length == 0 ? "[_all]" : indices,
+ segsRsp.toXContent(JsonXContent.contentBuilder().prettyPrint(), ToXContent.EMPTY_PARAMS).string());
+ }
+
+ /**
+ * Prints current memory stats as info logging.
+ */
+ public void logMemoryStats() {
+ logger.info("memory: {}", XContentHelper.toString(client().admin().cluster().prepareNodesStats().clear().setJvm(true).get()));
+ }
+
+ void ensureClusterSizeConsistency() {
+ if (cluster() != null) { // if static init fails the cluster can be null
+ logger.trace("Check consistency for [{}] nodes", cluster().size());
+ assertNoTimeout(client().admin().cluster().prepareHealth().setWaitForNodes(Integer.toString(cluster().size())).get());
+ }
+ }
+
+ /**
+ * Verifies that all nodes that have the same version of the cluster state as master have same cluster state
+ */
+ protected void ensureClusterStateConsistency() throws IOException {
+ if (cluster() != null) {
+ ClusterState masterClusterState = client().admin().cluster().prepareState().all().get().getState();
+ byte[] masterClusterStateBytes = ClusterState.Builder.toBytes(masterClusterState);
+ // remove local node reference
+ masterClusterState = ClusterState.Builder.fromBytes(masterClusterStateBytes, null);
+ Map<String, Object> masterStateMap = convertToMap(masterClusterState);
+ int masterClusterStateSize = masterClusterState.toString().length();
+ String masterId = masterClusterState.nodes().masterNodeId();
+ for (Client client : cluster()) {
+ ClusterState localClusterState = client.admin().cluster().prepareState().all().setLocal(true).get().getState();
+ byte[] localClusterStateBytes = ClusterState.Builder.toBytes(localClusterState);
+ // remove local node reference
+ localClusterState = ClusterState.Builder.fromBytes(localClusterStateBytes, null);
+ final Map<String, Object> localStateMap = convertToMap(localClusterState);
+ final int localClusterStateSize = localClusterState.toString().length();
+ // Check that the non-master node has the same version of the cluster state as the master and
+ // that the master node matches the master (otherwise there is no requirement for the cluster state to match)
+ if (masterClusterState.version() == localClusterState.version() && masterId.equals(localClusterState.nodes().masterNodeId())) {
+ try {
+ assertEquals("clusterstate UUID does not match", masterClusterState.stateUUID(), localClusterState.stateUUID());
+ // We cannot compare serialization bytes since serialization order of maps is not guaranteed
+ // but we can compare serialization sizes - they should be the same
+ assertEquals("clusterstate size does not match", masterClusterStateSize, localClusterStateSize);
+ // Compare JSON serialization
+ assertNull("clusterstate JSON serialization does not match", differenceBetweenMapsIgnoringArrayOrder(masterStateMap, localStateMap));
+ } catch (AssertionError error) {
+ logger.error("Cluster state from master:\n{}\nLocal cluster state:\n{}", masterClusterState.toString(), localClusterState.toString());
+ throw error;
+ }
+ }
+ }
+ }
+
+ }
+
+ /**
+ * Ensures the cluster is in a searchable state for the given indices. This means a searchable copy of each
+ * shard is available on the cluster.
+ */
+ protected ClusterHealthStatus ensureSearchable(String... indices) {
+ // this is just a temporary thing but it's easier to change if it is encapsulated.
+ return ensureGreen(indices);
+ }
+
+ protected void ensureStableCluster(int nodeCount) {
+ ensureStableCluster(nodeCount, TimeValue.timeValueSeconds(30));
+ }
+
+ protected void ensureStableCluster(int nodeCount, TimeValue timeValue) {
+ ensureStableCluster(nodeCount, timeValue, false, null);
+ }
+
+ protected void ensureStableCluster(int nodeCount, @Nullable String viaNode) {
+ ensureStableCluster(nodeCount, TimeValue.timeValueSeconds(30), false, viaNode);
+ }
+
+ protected void ensureStableCluster(int nodeCount, TimeValue timeValue, boolean local, @Nullable String viaNode) {
+ if (viaNode == null) {
+ viaNode = randomFrom(internalCluster().getNodeNames());
+ }
+ logger.debug("ensuring cluster is stable with [{}] nodes. access node: [{}]. timeout: [{}]", nodeCount, viaNode, timeValue);
+ ClusterHealthResponse clusterHealthResponse = client(viaNode).admin().cluster().prepareHealth()
+ .setWaitForEvents(Priority.LANGUID)
+ .setWaitForNodes(Integer.toString(nodeCount))
+ .setTimeout(timeValue)
+ .setLocal(local)
+ .setWaitForRelocatingShards(0)
+ .get();
+ if (clusterHealthResponse.isTimedOut()) {
+ ClusterStateResponse stateResponse = client(viaNode).admin().cluster().prepareState().get();
+ fail("failed to reach a stable cluster of [" + nodeCount + "] nodes. Tried via [" + viaNode + "]. last cluster state:\n"
+ + stateResponse.getState().prettyPrint());
+ }
+ assertThat(clusterHealthResponse.isTimedOut(), is(false));
+ }
+
+ /**
+ * Syntactic sugar for:
+ * <pre>
+ * client().prepareIndex(index, type).setSource(source).execute().actionGet();
+ * </pre>
+ */
+ protected final IndexResponse index(String index, String type, XContentBuilder source) {
+ return client().prepareIndex(index, type).setSource(source).execute().actionGet();
+ }
+
+ /**
+ * Syntactic sugar for:
+ * <pre>
+ * client().prepareIndex(index, type).setSource(source).execute().actionGet();
+ * </pre>
+ */
+ protected final IndexResponse index(String index, String type, String id, Map<String, Object> source) {
+ return client().prepareIndex(index, type, id).setSource(source).execute().actionGet();
+ }
+
+ /**
+ * Syntactic sugar for:
+ * <pre>
+ * client().prepareGet(index, type, id).execute().actionGet();
+ * </pre>
+ */
+ protected final GetResponse get(String index, String type, String id) {
+ return client().prepareGet(index, type, id).execute().actionGet();
+ }
+
+ /**
+ * Syntactic sugar for:
+ * <pre>
+ * return client().prepareIndex(index, type, id).setSource(source).execute().actionGet();
+ * </pre>
+ */
+ protected final IndexResponse index(String index, String type, String id, XContentBuilder source) {
+ return client().prepareIndex(index, type, id).setSource(source).execute().actionGet();
+ }
+
+ /**
+ * Syntactic sugar for:
+ * <pre>
+ * return client().prepareIndex(index, type, id).setSource(source).execute().actionGet();
+ * </pre>
+ */
+ protected final IndexResponse index(String index, String type, String id, Object... source) {
+ return client().prepareIndex(index, type, id).setSource(source).execute().actionGet();
+ }
+
+ /**
+ * Syntactic sugar for:
+ * <pre>
+ * return client().prepareIndex(index, type, id).setSource(source).execute().actionGet();
+ * </pre>
+ * <p>
+ * where source is a String.
+ */
+ protected final IndexResponse index(String index, String type, String id, String source) {
+ return client().prepareIndex(index, type, id).setSource(source).execute().actionGet();
+ }
+
+ /**
+ * Waits for relocations and refreshes all indices in the cluster.
+ *
+ * @see #waitForRelocation()
+ */
+ protected final RefreshResponse refresh() {
+ waitForRelocation();
+ // TODO RANDOMIZE with flush?
+ RefreshResponse actionGet = client().admin().indices().prepareRefresh().execute().actionGet();
+ assertNoFailures(actionGet);
+ return actionGet;
+ }
+
+ /**
+ * Flushes and refreshes all indices in the cluster
+ */
+ protected final void flushAndRefresh(String... indices) {
+ flush(indices);
+ refresh();
+ }
+
+ /**
+ * Flush some or all indices in the cluster.
+ */
+ protected final FlushResponse flush(String... indices) {
+ waitForRelocation();
+ FlushResponse actionGet = client().admin().indices().prepareFlush(indices).setWaitIfOngoing(true).execute().actionGet();
+ for (ShardOperationFailedException failure : actionGet.getShardFailures()) {
+ assertThat("unexpected flush failure " + failure.reason(), failure.status(), equalTo(RestStatus.SERVICE_UNAVAILABLE));
+ }
+ return actionGet;
+ }
+
+ /**
+ * Waits for all relocations and force merge all indices in the cluster to 1 segment.
+ */
+ protected ForceMergeResponse forceMerge() {
+ waitForRelocation();
+ ForceMergeResponse actionGet = client().admin().indices().prepareForceMerge().setMaxNumSegments(1).execute().actionGet();
+ assertNoFailures(actionGet);
+ return actionGet;
+ }
+
+ /**
+ * Returns <code>true</code> iff the given index exists otherwise <code>false</code>
+ */
+ protected boolean indexExists(String index) {
+ IndicesExistsResponse actionGet = client().admin().indices().prepareExists(index).execute().actionGet();
+ return actionGet.isExists();
+ }
+
+ /**
+ * Syntactic sugar for enabling allocation for <code>indices</code>
+ */
+ protected final void enableAllocation(String... indices) {
+ client().admin().indices().prepareUpdateSettings(indices).setSettings(Settings.builder().put(
+ EnableAllocationDecider.INDEX_ROUTING_ALLOCATION_ENABLE, "all"
+ )).get();
+ }
+
+ /**
+ * Syntactic sugar for disabling allocation for <code>indices</code>
+ */
+ protected final void disableAllocation(String... indices) {
+ client().admin().indices().prepareUpdateSettings(indices).setSettings(Settings.builder().put(
+ EnableAllocationDecider.INDEX_ROUTING_ALLOCATION_ENABLE, "none"
+ )).get();
+ }
+
+ /**
+ * Returns a random admin client. This client can either be a node or a transport client pointing to any of
+ * the nodes in the cluster.
+ */
+ protected AdminClient admin() {
+ return client().admin();
+ }
+
+ /**
+ * Convenience method that forwards to {@link #indexRandom(boolean, List)}.
+ */
+ public void indexRandom(boolean forceRefresh, IndexRequestBuilder... builders) throws InterruptedException, ExecutionException {
+ indexRandom(forceRefresh, Arrays.asList(builders));
+ }
+
+ public void indexRandom(boolean forceRefresh, boolean dummyDocuments, IndexRequestBuilder... builders) throws InterruptedException, ExecutionException {
+ indexRandom(forceRefresh, dummyDocuments, Arrays.asList(builders));
+ }
+
+
+ private static final String RANDOM_BOGUS_TYPE = "RANDOM_BOGUS_TYPE______";
+
+ /**
+ * Indexes the given {@link IndexRequestBuilder} instances randomly. It shuffles the given builders and either
+ * indexes them in a blocking or async fashion. This is very useful to catch problems that relate to internal document
+ * ids or index segment creations. Some features might have bug when a given document is the first or the last in a
+ * segment or if only one document is in a segment etc. This method prevents issues like this by randomizing the index
+ * layout.
+ *
+ * @param forceRefresh if <tt>true</tt> all involved indices are refreshed once the documents are indexed. Additionally if <tt>true</tt>
+ * some empty dummy documents are may be randomly inserted into the document list and deleted once all documents are indexed.
+ * This is useful to produce deleted documents on the server side.
+ * @param builders the documents to index.
+ * @see #indexRandom(boolean, boolean, java.util.List)
+ */
+ public void indexRandom(boolean forceRefresh, List<IndexRequestBuilder> builders) throws InterruptedException, ExecutionException {
+ indexRandom(forceRefresh, forceRefresh, builders);
+ }
+
+ /**
+ * Indexes the given {@link IndexRequestBuilder} instances randomly. It shuffles the given builders and either
+ * indexes they in a blocking or async fashion. This is very useful to catch problems that relate to internal document
+ * ids or index segment creations. Some features might have bug when a given document is the first or the last in a
+ * segment or if only one document is in a segment etc. This method prevents issues like this by randomizing the index
+ * layout.
+ *
+ * @param forceRefresh if <tt>true</tt> all involved indices are refreshed once the documents are indexed.
+ * @param dummyDocuments if <tt>true</tt> some empty dummy documents may be randomly inserted into the document list and deleted once
+ * all documents are indexed. This is useful to produce deleted documents on the server side.
+ * @param builders the documents to index.
+ */
+ public void indexRandom(boolean forceRefresh, boolean dummyDocuments, List<IndexRequestBuilder> builders) throws InterruptedException, ExecutionException {
+ indexRandom(forceRefresh, dummyDocuments, true, builders);
+ }
+
+ /**
+ * Indexes the given {@link IndexRequestBuilder} instances randomly. It shuffles the given builders and either
+ * indexes they in a blocking or async fashion. This is very useful to catch problems that relate to internal document
+ * ids or index segment creations. Some features might have bug when a given document is the first or the last in a
+ * segment or if only one document is in a segment etc. This method prevents issues like this by randomizing the index
+ * layout.
+ *
+ * @param forceRefresh if <tt>true</tt> all involved indices are refreshed once the documents are indexed.
+ * @param dummyDocuments if <tt>true</tt> some empty dummy documents may be randomly inserted into the document list and deleted once
+ * all documents are indexed. This is useful to produce deleted documents on the server side.
+ * @param maybeFlush if <tt>true</tt> this method may randomly execute full flushes after index operations.
+ * @param builders the documents to index.
+ */
+ public void indexRandom(boolean forceRefresh, boolean dummyDocuments, boolean maybeFlush, List<IndexRequestBuilder> builders) throws InterruptedException, ExecutionException {
+
+ Random random = getRandom();
+ Set<String> indicesSet = new HashSet<>();
+ for (IndexRequestBuilder builder : builders) {
+ indicesSet.add(builder.request().index());
+ }
+ Set<Tuple<String, String>> bogusIds = new HashSet<>();
+ if (random.nextBoolean() && !builders.isEmpty() && dummyDocuments) {
+ builders = new ArrayList<>(builders);
+ final String[] indices = indicesSet.toArray(new String[indicesSet.size()]);
+ // inject some bogus docs
+ final int numBogusDocs = scaledRandomIntBetween(1, builders.size() * 2);
+ final int unicodeLen = between(1, 10);
+ for (int i = 0; i < numBogusDocs; i++) {
+ String id = randomRealisticUnicodeOfLength(unicodeLen) + Integer.toString(dummmyDocIdGenerator.incrementAndGet());
+ String index = RandomPicks.randomFrom(random, indices);
+ bogusIds.add(new Tuple<>(index, id));
+ builders.add(client().prepareIndex(index, RANDOM_BOGUS_TYPE, id).setSource("{}"));
+ }
+ }
+ final String[] indices = indicesSet.toArray(new String[indicesSet.size()]);
+ Collections.shuffle(builders, random());
+ final CopyOnWriteArrayList<Tuple<IndexRequestBuilder, Throwable>> errors = new CopyOnWriteArrayList<>();
+ List<CountDownLatch> inFlightAsyncOperations = new ArrayList<>();
+ // If you are indexing just a few documents then frequently do it one at a time. If many then frequently in bulk.
+ if (builders.size() < FREQUENT_BULK_THRESHOLD ? frequently() : builders.size() < ALWAYS_BULK_THRESHOLD ? rarely() : false) {
+ if (frequently()) {
+ logger.info("Index [{}] docs async: [{}] bulk: [{}]", builders.size(), true, false);
+ for (IndexRequestBuilder indexRequestBuilder : builders) {
+ indexRequestBuilder.execute(new PayloadLatchedActionListener<IndexResponse, IndexRequestBuilder>(indexRequestBuilder, newLatch(inFlightAsyncOperations), errors));
+ postIndexAsyncActions(indices, inFlightAsyncOperations, maybeFlush);
+ }
+ } else {
+ logger.info("Index [{}] docs async: [{}] bulk: [{}]", builders.size(), false, false);
+ for (IndexRequestBuilder indexRequestBuilder : builders) {
+ indexRequestBuilder.execute().actionGet();
+ postIndexAsyncActions(indices, inFlightAsyncOperations, maybeFlush);
+ }
+ }
+ } else {
+ List<List<IndexRequestBuilder>> partition = eagerPartition(builders, Math.min(MAX_BULK_INDEX_REQUEST_SIZE,
+ Math.max(1, (int) (builders.size() * randomDouble()))));
+ logger.info("Index [{}] docs async: [{}] bulk: [{}] partitions [{}]", builders.size(), false, true, partition.size());
+ for (List<IndexRequestBuilder> segmented : partition) {
+ BulkRequestBuilder bulkBuilder = client().prepareBulk();
+ for (IndexRequestBuilder indexRequestBuilder : segmented) {
+ bulkBuilder.add(indexRequestBuilder);
+ }
+ BulkResponse actionGet = bulkBuilder.execute().actionGet();
+ assertThat(actionGet.hasFailures() ? actionGet.buildFailureMessage() : "", actionGet.hasFailures(), equalTo(false));
+ }
+ }
+ for (CountDownLatch operation : inFlightAsyncOperations) {
+ operation.await();
+ }
+ final List<Throwable> actualErrors = new ArrayList<>();
+ for (Tuple<IndexRequestBuilder, Throwable> tuple : errors) {
+ if (ExceptionsHelper.unwrapCause(tuple.v2()) instanceof EsRejectedExecutionException) {
+ tuple.v1().execute().actionGet(); // re-index if rejected
+ } else {
+ actualErrors.add(tuple.v2());
+ }
+ }
+ assertThat(actualErrors, emptyIterable());
+ if (!bogusIds.isEmpty()) {
+ // delete the bogus types again - it might trigger merges or at least holes in the segments and enforces deleted docs!
+ for (Tuple<String, String> doc : bogusIds) {
+ // see https://github.com/elasticsearch/elasticsearch/issues/8706
+ final DeleteResponse deleteResponse = client().prepareDelete(doc.v1(), RANDOM_BOGUS_TYPE, doc.v2()).get();
+ if (deleteResponse.isFound() == false) {
+ logger.warn("failed to delete a dummy doc [{}][{}]", doc.v1(), doc.v2());
+ }
+ }
+ }
+ if (forceRefresh) {
+ assertNoFailures(client().admin().indices().prepareRefresh(indices).setIndicesOptions(IndicesOptions.lenientExpandOpen()).execute().get());
+ }
+ }
+
+ private AtomicInteger dummmyDocIdGenerator = new AtomicInteger();
+
+ /** Disables translog flushing for the specified index */
+ public static void disableTranslogFlush(String index) {
+ Settings settings = Settings.builder().put(IndexShard.INDEX_TRANSLOG_DISABLE_FLUSH, true).build();
+ client().admin().indices().prepareUpdateSettings(index).setSettings(settings).get();
+ }
+
+ /** Enables translog flushing for the specified index */
+ public static void enableTranslogFlush(String index) {
+ Settings settings = Settings.builder().put(IndexShard.INDEX_TRANSLOG_DISABLE_FLUSH, false).build();
+ client().admin().indices().prepareUpdateSettings(index).setSettings(settings).get();
+ }
+
+ /** Disables an index block for the specified index */
+ public static void disableIndexBlock(String index, String block) {
+ Settings settings = Settings.builder().put(block, false).build();
+ client().admin().indices().prepareUpdateSettings(index).setSettings(settings).get();
+ }
+
+ /** Enables an index block for the specified index */
+ public static void enableIndexBlock(String index, String block) {
+ Settings settings = Settings.builder().put(block, true).build();
+ client().admin().indices().prepareUpdateSettings(index).setSettings(settings).get();
+ }
+
+ /** Sets or unsets the cluster read_only mode **/
+ public static void setClusterReadOnly(boolean value) {
+ Settings settings = settingsBuilder().put(MetaData.SETTING_READ_ONLY, value).build();
+ assertAcked(client().admin().cluster().prepareUpdateSettings().setTransientSettings(settings).get());
+ }
+
+ private static CountDownLatch newLatch(List<CountDownLatch> latches) {
+ CountDownLatch l = new CountDownLatch(1);
+ latches.add(l);
+ return l;
+ }
+
+ /**
+ * Maybe refresh, force merge, or flush then always make sure there aren't too many in flight async operations.
+ */
+ private void postIndexAsyncActions(String[] indices, List<CountDownLatch> inFlightAsyncOperations, boolean maybeFlush) throws InterruptedException {
+ if (rarely()) {
+ if (rarely()) {
+ client().admin().indices().prepareRefresh(indices).setIndicesOptions(IndicesOptions.lenientExpandOpen()).execute(
+ new LatchedActionListener<>(newLatch(inFlightAsyncOperations)));
+ } else if (maybeFlush && rarely()) {
+ if (randomBoolean()) {
+ client().admin().indices().prepareFlush(indices).setIndicesOptions(IndicesOptions.lenientExpandOpen()).execute(
+ new LatchedActionListener<>(newLatch(inFlightAsyncOperations)));
+ } else {
+ client().admin().indices().syncedFlush(syncedFlushRequest(indices).indicesOptions(IndicesOptions.lenientExpandOpen()),
+ new LatchedActionListener<>(newLatch(inFlightAsyncOperations)));
+ }
+ } else if (rarely()) {
+ client().admin().indices().prepareForceMerge(indices).setIndicesOptions(IndicesOptions.lenientExpandOpen()).setMaxNumSegments(between(1, 10)).setFlush(maybeFlush && randomBoolean()).execute(
+ new LatchedActionListener<>(newLatch(inFlightAsyncOperations)));
+ }
+ }
+ while (inFlightAsyncOperations.size() > MAX_IN_FLIGHT_ASYNC_INDEXES) {
+ int waitFor = between(0, inFlightAsyncOperations.size() - 1);
+ inFlightAsyncOperations.remove(waitFor).await();
+ }
+ }
+
+ /**
+ * The scope of a test cluster used together with
+ * {@link ESIntegTestCase.ClusterScope} annotations on {@link ESIntegTestCase} subclasses.
+ */
+ public enum Scope {
+ /**
+ * A cluster shared across all method in a single test suite
+ */
+ SUITE,
+ /**
+ * A test exclusive test cluster
+ */
+ TEST
+ }
+
+ /**
+ * Defines a cluster scope for a {@link ESIntegTestCase} subclass.
+ * By default if no {@link ClusterScope} annotation is present {@link ESIntegTestCase.Scope#SUITE} is used
+ * together with randomly chosen settings like number of nodes etc.
+ */
+ @Retention(RetentionPolicy.RUNTIME)
+ @Target({ElementType.TYPE})
+ public @interface ClusterScope {
+ /**
+ * Returns the scope. {@link ESIntegTestCase.Scope#SUITE} is default.
+ */
+ Scope scope() default Scope.SUITE;
+
+ /**
+ * Returns the number of nodes in the cluster. Default is <tt>-1</tt> which means
+ * a random number of nodes is used, where the minimum and maximum number of nodes
+ * are either the specified ones or the default ones if not specified.
+ */
+ int numDataNodes() default -1;
+
+ /**
+ * Returns the minimum number of nodes in the cluster. Default is <tt>-1</tt>.
+ * Ignored when {@link ClusterScope#numDataNodes()} is set.
+ */
+ int minNumDataNodes() default -1;
+
+ /**
+ * Returns the maximum number of nodes in the cluster. Default is <tt>-1</tt>.
+ * Ignored when {@link ClusterScope#numDataNodes()} is set.
+ */
+ int maxNumDataNodes() default -1;
+
+ /**
+ * Returns the number of client nodes in the cluster. Default is {@link InternalTestCluster#DEFAULT_NUM_CLIENT_NODES}, a
+ * negative value means that the number of client nodes will be randomized.
+ */
+ int numClientNodes() default InternalTestCluster.DEFAULT_NUM_CLIENT_NODES;
+
+ /**
+ * Returns the transport client ratio. By default this returns <code>-1</code> which means a random
+ * ratio in the interval <code>[0..1]</code> is used.
+ */
+ double transportClientRatio() default -1;
+
+ /**
+ * Return whether or not to enable dynamic templates for the mappings.
+ */
+ boolean randomDynamicTemplates() default true;
+ }
+
+ private class LatchedActionListener<Response> implements ActionListener<Response> {
+ private final CountDownLatch latch;
+
+ public LatchedActionListener(CountDownLatch latch) {
+ this.latch = latch;
+ }
+
+ @Override
+ public final void onResponse(Response response) {
+ latch.countDown();
+ }
+
+ @Override
+ public final void onFailure(Throwable t) {
+ try {
+ logger.info("Action Failed", t);
+ addError(t);
+ } finally {
+ latch.countDown();
+ }
+ }
+
+ protected void addError(Throwable t) {
+ }
+
+ }
+
+ private class PayloadLatchedActionListener<Response, T> extends LatchedActionListener<Response> {
+ private final CopyOnWriteArrayList<Tuple<T, Throwable>> errors;
+ private final T builder;
+
+ public PayloadLatchedActionListener(T builder, CountDownLatch latch, CopyOnWriteArrayList<Tuple<T, Throwable>> errors) {
+ super(latch);
+ this.errors = errors;
+ this.builder = builder;
+ }
+
+ @Override
+ protected void addError(Throwable t) {
+ errors.add(new Tuple<>(builder, t));
+ }
+
+ }
+
+ /**
+ * Clears the given scroll Ids
+ */
+ public void clearScroll(String... scrollIds) {
+ ClearScrollResponse clearResponse = client().prepareClearScroll()
+ .setScrollIds(Arrays.asList(scrollIds)).get();
+ assertThat(clearResponse.isSucceeded(), equalTo(true));
+ }
+
+ private static <A extends Annotation> A getAnnotation(Class<?> clazz, Class<A> annotationClass) {
+ if (clazz == Object.class || clazz == ESIntegTestCase.class) {
+ return null;
+ }
+ A annotation = clazz.getAnnotation(annotationClass);
+ if (annotation != null) {
+ return annotation;
+ }
+ return getAnnotation(clazz.getSuperclass(), annotationClass);
+ }
+
+
+ private Scope getCurrentClusterScope() {
+ return getCurrentClusterScope(this.getClass());
+ }
+
+ private static Scope getCurrentClusterScope(Class<?> clazz) {
+ ClusterScope annotation = getAnnotation(clazz, ClusterScope.class);
+ // if we are not annotated assume suite!
+ return annotation == null ? Scope.SUITE : annotation.scope();
+ }
+
+ private int getNumDataNodes() {
+ ClusterScope annotation = getAnnotation(this.getClass(), ClusterScope.class);
+ return annotation == null ? -1 : annotation.numDataNodes();
+ }
+
+ private int getMinNumDataNodes() {
+ ClusterScope annotation = getAnnotation(this.getClass(), ClusterScope.class);
+ return annotation == null || annotation.minNumDataNodes() == -1 ? InternalTestCluster.DEFAULT_MIN_NUM_DATA_NODES : annotation.minNumDataNodes();
+ }
+
+ private int getMaxNumDataNodes() {
+ ClusterScope annotation = getAnnotation(this.getClass(), ClusterScope.class);
+ return annotation == null || annotation.maxNumDataNodes() == -1 ? InternalTestCluster.DEFAULT_MAX_NUM_DATA_NODES : annotation.maxNumDataNodes();
+ }
+
+ private int getNumClientNodes() {
+ ClusterScope annotation = getAnnotation(this.getClass(), ClusterScope.class);
+ return annotation == null ? InternalTestCluster.DEFAULT_NUM_CLIENT_NODES : annotation.numClientNodes();
+ }
+
+ private boolean randomDynamicTemplates() {
+ ClusterScope annotation = getAnnotation(this.getClass(), ClusterScope.class);
+ return annotation == null || annotation.randomDynamicTemplates();
+ }
+
+ /**
+ * This method is used to obtain settings for the <tt>Nth</tt> node in the cluster.
+ * Nodes in this cluster are associated with an ordinal number such that nodes can
+ * be started with specific configurations. This method might be called multiple
+ * times with the same ordinal and is expected to return the same value for each invocation.
+ * In other words subclasses must ensure this method is idempotent.
+ */
+ protected Settings nodeSettings(int nodeOrdinal) {
+ Settings.Builder builder = settingsBuilder()
+ // Default the watermarks to absurdly low to prevent the tests
+ // from failing on nodes without enough disk space
+ .put(DiskThresholdDecider.CLUSTER_ROUTING_ALLOCATION_LOW_DISK_WATERMARK, "1b")
+ .put(DiskThresholdDecider.CLUSTER_ROUTING_ALLOCATION_HIGH_DISK_WATERMARK, "1b")
+ .put("script.indexed", "on")
+ .put("script.inline", "on")
+ // wait short time for other active shards before actually deleting, default 30s not needed in tests
+ .put(IndicesStore.INDICES_STORE_DELETE_SHARD_TIMEOUT, new TimeValue(1, TimeUnit.SECONDS));
+ return builder.build();
+ }
+
+ /**
+ * Returns a collection of plugins that should be loaded on each node.
+ */
+ protected Collection<Class<? extends Plugin>> nodePlugins() {
+ return Collections.emptyList();
+ }
+
+ /**
+ * Returns a collection of plugins that should be loaded when creating a transport client.
+ */
+ protected Collection<Class<? extends Plugin>> transportClientPlugins() {
+ return Collections.emptyList();
+ }
+
+ /** Helper method to create list of plugins without specifying generic types. */
+ @SafeVarargs
+ @SuppressWarnings("varargs") // due to type erasure, the varargs type is non-reifiable, which casues this warning
+ protected final Collection<Class<? extends Plugin>> pluginList(Class<? extends Plugin>... plugins) {
+ return Arrays.asList(plugins);
+ }
+
+ /**
+ * This method is used to obtain additional settings for clients created by the internal cluster.
+ * These settings will be applied on the client in addition to some randomized settings defined in
+ * the cluster. These setttings will also override any other settings the internal cluster might
+ * add by default.
+ */
+ protected Settings transportClientSettings() {
+ return Settings.EMPTY;
+ }
+
+ private ExternalTestCluster buildExternalCluster(String clusterAddresses) throws IOException {
+ String[] stringAddresses = clusterAddresses.split(",");
+ TransportAddress[] transportAddresses = new TransportAddress[stringAddresses.length];
+ int i = 0;
+ for (String stringAddress : stringAddresses) {
+ URL url = new URL("http://" + stringAddress);
+ InetAddress inetAddress = InetAddress.getByName(url.getHost());
+ transportAddresses[i++] = new InetSocketTransportAddress(new InetSocketAddress(inetAddress, url.getPort()));
+ }
+ return new ExternalTestCluster(createTempDir(), externalClusterClientSettings(), transportClientPlugins(), transportAddresses);
+ }
+
+ protected Settings externalClusterClientSettings() {
+ return Settings.EMPTY;
+ }
+
+ protected TestCluster buildTestCluster(Scope scope, long seed) throws IOException {
+ String clusterAddresses = System.getProperty(TESTS_CLUSTER);
+ if (Strings.hasLength(clusterAddresses)) {
+ if (scope == Scope.TEST) {
+ throw new IllegalArgumentException("Cannot run TEST scope test with " + TESTS_CLUSTER);
+ }
+ return buildExternalCluster(clusterAddresses);
+ }
+
+ final String nodePrefix;
+ switch (scope) {
+ case TEST:
+ nodePrefix = TEST_CLUSTER_NODE_PREFIX;
+ break;
+ case SUITE:
+ nodePrefix = SUITE_CLUSTER_NODE_PREFIX;
+ break;
+ default:
+ throw new ElasticsearchException("Scope not supported: " + scope);
+ }
+ NodeConfigurationSource nodeConfigurationSource = new NodeConfigurationSource() {
+ @Override
+ public Settings nodeSettings(int nodeOrdinal) {
+ return Settings.builder().put(Node.HTTP_ENABLED, false).
+ put(ESIntegTestCase.this.nodeSettings(nodeOrdinal)).build();
+ }
+
+ @Override
+ public Collection<Class<? extends Plugin>> nodePlugins() {
+ return ESIntegTestCase.this.nodePlugins();
+ }
+
+ @Override
+ public Settings transportClientSettings() {
+ return ESIntegTestCase.this.transportClientSettings();
+ }
+
+ @Override
+ public Collection<Class<? extends Plugin>> transportClientPlugins() {
+ return ESIntegTestCase.this.transportClientPlugins();
+ }
+ };
+
+ int numDataNodes = getNumDataNodes();
+ int minNumDataNodes;
+ int maxNumDataNodes;
+ if (numDataNodes >= 0) {
+ minNumDataNodes = maxNumDataNodes = numDataNodes;
+ } else {
+ minNumDataNodes = getMinNumDataNodes();
+ maxNumDataNodes = getMaxNumDataNodes();
+ }
+ SuppressLocalMode noLocal = getAnnotation(this.getClass(), SuppressLocalMode.class);
+ SuppressNetworkMode noNetwork = getAnnotation(this.getClass(), SuppressNetworkMode.class);
+ String nodeMode = InternalTestCluster.configuredNodeMode();
+ if (noLocal != null && noNetwork != null) {
+ throw new IllegalStateException("Can't suppress both network and local mode");
+ } else if (noLocal != null) {
+ nodeMode = "network";
+ } else if (noNetwork != null) {
+ nodeMode = "local";
+ }
+
+ Collection<Class<? extends Plugin>> mockPlugins = getMockPlugins();
+
+ return new InternalTestCluster(nodeMode, seed, createTempDir(), minNumDataNodes, maxNumDataNodes,
+ InternalTestCluster.clusterName(scope.name(), seed) + "-cluster", nodeConfigurationSource, getNumClientNodes(),
+ InternalTestCluster.DEFAULT_ENABLE_HTTP_PIPELINING, nodePrefix, mockPlugins);
+ }
+
+ /** Return the mock plugins the cluster should use. These may be randomly omitted based on the cluster seed. */
+ protected Collection<Class<? extends Plugin>> getMockPlugins() {
+ return pluginList(MockTransportService.TestPlugin.class,
+ MockFSIndexStore.TestPlugin.class,
+ NodeMocksPlugin.class,
+ MockEngineFactoryPlugin.class,
+ MockSearchService.TestPlugin.class,
+ AssertingLocalTransport.TestPlugin.class);
+ }
+
+ /**
+ * Returns the client ratio configured via
+ */
+ private static double transportClientRatio() {
+ String property = System.getProperty(TESTS_CLIENT_RATIO);
+ if (property == null || property.isEmpty()) {
+ return Double.NaN;
+ }
+ return Double.parseDouble(property);
+ }
+
+ /**
+ * Returns the transport client ratio from the class level annotation or via
+ * {@link System#getProperty(String)} if available. If both are not available this will
+ * return a random ratio in the interval <tt>[0..1]</tt>
+ */
+ protected double getPerTestTransportClientRatio() {
+ final ClusterScope annotation = getAnnotation(this.getClass(), ClusterScope.class);
+ double perTestRatio = -1;
+ if (annotation != null) {
+ perTestRatio = annotation.transportClientRatio();
+ }
+ if (perTestRatio == -1) {
+ return Double.isNaN(TRANSPORT_CLIENT_RATIO) ? randomDouble() : TRANSPORT_CLIENT_RATIO;
+ }
+ assert perTestRatio >= 0.0 && perTestRatio <= 1.0;
+ return perTestRatio;
+ }
+
+ /**
+ * Returns a random JODA Time Zone based on Java Time Zones
+ */
+ public static DateTimeZone randomDateTimeZone() {
+ DateTimeZone timeZone;
+
+ // It sounds like some Java Time Zones are unknown by JODA. For example: Asia/Riyadh88
+ // We need to fallback in that case to a known time zone
+ try {
+ timeZone = DateTimeZone.forTimeZone(RandomizedTest.randomTimeZone());
+ } catch (IllegalArgumentException e) {
+ timeZone = DateTimeZone.forOffsetHours(randomIntBetween(-12, 12));
+ }
+
+ return timeZone;
+ }
+
+ /**
+ * Returns path to a random directory that can be used to create a temporary file system repo
+ */
+ public Path randomRepoPath() {
+ if (currentCluster instanceof InternalTestCluster) {
+ return randomRepoPath(((InternalTestCluster) currentCluster).getDefaultSettings());
+ } else if (currentCluster instanceof CompositeTestCluster) {
+ return randomRepoPath(((CompositeTestCluster) currentCluster).internalCluster().getDefaultSettings());
+ }
+ throw new UnsupportedOperationException("unsupported cluster type");
+ }
+
+ /**
+ * Returns path to a random directory that can be used to create a temporary file system repo
+ */
+ public static Path randomRepoPath(Settings settings) {
+ Environment environment = new Environment(settings);
+ Path[] repoFiles = environment.repoFiles();
+ assert repoFiles.length > 0;
+ Path path;
+ do {
+ path = repoFiles[0].resolve(randomAsciiOfLength(10));
+ } while (Files.exists(path));
+ return path;
+ }
+
+ protected NumShards getNumShards(String index) {
+ MetaData metaData = client().admin().cluster().prepareState().get().getState().metaData();
+ assertThat(metaData.hasIndex(index), equalTo(true));
+ int numShards = Integer.valueOf(metaData.index(index).getSettings().get(SETTING_NUMBER_OF_SHARDS));
+ int numReplicas = Integer.valueOf(metaData.index(index).getSettings().get(SETTING_NUMBER_OF_REPLICAS));
+ return new NumShards(numShards, numReplicas);
+ }
+
+ /**
+ * Asserts that all shards are allocated on nodes matching the given node pattern.
+ */
+ public Set<String> assertAllShardsOnNodes(String index, String... pattern) {
+ Set<String> nodes = new HashSet<>();
+ ClusterState clusterState = client().admin().cluster().prepareState().execute().actionGet().getState();
+ for (IndexRoutingTable indexRoutingTable : clusterState.routingTable()) {
+ for (IndexShardRoutingTable indexShardRoutingTable : indexRoutingTable) {
+ for (ShardRouting shardRouting : indexShardRoutingTable) {
+ if (shardRouting.currentNodeId() != null && index.equals(shardRouting.getIndex())) {
+ String name = clusterState.nodes().get(shardRouting.currentNodeId()).name();
+ nodes.add(name);
+ assertThat("Allocated on new node: " + name, Regex.simpleMatch(pattern, name), is(true));
+ }
+ }
+ }
+ }
+ return nodes;
+ }
+
+ protected static class NumShards {
+ public final int numPrimaries;
+ public final int numReplicas;
+ public final int totalNumShards;
+ public final int dataCopies;
+
+ private NumShards(int numPrimaries, int numReplicas) {
+ this.numPrimaries = numPrimaries;
+ this.numReplicas = numReplicas;
+ this.dataCopies = numReplicas + 1;
+ this.totalNumShards = numPrimaries * dataCopies;
+ }
+ }
+
+ private static boolean runTestScopeLifecycle() {
+ return INSTANCE == null;
+ }
+
+
+ @Before
+ public final void before() throws Exception {
+
+ if (runTestScopeLifecycle()) {
+ printTestMessage("setup");
+ beforeInternal();
+ }
+ printTestMessage("starting");
+ }
+
+
+ @After
+ public final void after() throws Exception {
+ printTestMessage("finished");
+ // Deleting indices is going to clear search contexts implicitely so we
+ // need to check that there are no more in-flight search contexts before
+ // we remove indices
+ super.ensureAllSearchContextsReleased();
+ if (runTestScopeLifecycle()) {
+ printTestMessage("cleaning up after");
+ afterInternal(false);
+ printTestMessage("cleaned up after");
+ }
+ }
+
+ @AfterClass
+ public static void afterClass() throws Exception {
+ if (!runTestScopeLifecycle()) {
+ try {
+ INSTANCE.printTestMessage("cleaning up after");
+ INSTANCE.afterInternal(true);
+ } finally {
+ INSTANCE = null;
+ }
+ } else {
+ clearClusters();
+ }
+ SUITE_SEED = null;
+ currentCluster = null;
+ }
+
+ private static void initializeSuiteScope() throws Exception {
+ Class<?> targetClass = getTestClass();
+ /**
+ * Note we create these test class instance via reflection
+ * since JUnit creates a new instance per test and that is also
+ * the reason why INSTANCE is static since this entire method
+ * must be executed in a static context.
+ */
+ assert INSTANCE == null;
+ if (isSuiteScopedTest(targetClass)) {
+ // note we need to do this this way to make sure this is reproducible
+ INSTANCE = (ESIntegTestCase) targetClass.newInstance();
+ boolean success = false;
+ try {
+ INSTANCE.printTestMessage("setup");
+ INSTANCE.beforeInternal();
+ INSTANCE.setupSuiteScopeCluster();
+ success = true;
+ } finally {
+ if (!success) {
+ afterClass();
+ }
+ }
+ } else {
+ INSTANCE = null;
+ }
+ }
+
+ /**
+ * Compute a routing key that will route documents to the <code>shard</code>-th shard
+ * of the provided index.
+ */
+ protected String routingKeyForShard(String index, String type, int shard) {
+ return internalCluster().routingKeyForShard(index, type, shard, getRandom());
+ }
+
+ /**
+ * Return settings that could be used to start a node that has the given zipped home directory.
+ */
+ protected Settings prepareBackwardsDataDir(Path backwardsIndex, Object... settings) throws IOException {
+ Path indexDir = createTempDir();
+ Path dataDir = indexDir.resolve("data");
+ try (InputStream stream = Files.newInputStream(backwardsIndex)) {
+ TestUtil.unzip(stream, indexDir);
+ }
+ assertTrue(Files.exists(dataDir));
+
+ // list clusters in the datapath, ignoring anything from extrasfs
+ final Path[] list;
+ try (DirectoryStream<Path> stream = Files.newDirectoryStream(dataDir)) {
+ List<Path> dirs = new ArrayList<>();
+ for (Path p : stream) {
+ if (!p.getFileName().toString().startsWith("extra")) {
+ dirs.add(p);
+ }
+ }
+ list = dirs.toArray(new Path[0]);
+ }
+
+ if (list.length != 1) {
+ StringBuilder builder = new StringBuilder("Backwards index must contain exactly one cluster\n");
+ for (Path line : list) {
+ builder.append(line.toString()).append('\n');
+ }
+ throw new IllegalStateException(builder.toString());
+ }
+ Path src = list[0];
+ Path dest = dataDir.resolve(internalCluster().getClusterName());
+ assertTrue(Files.exists(src));
+ Files.move(src, dest);
+ assertFalse(Files.exists(src));
+ assertTrue(Files.exists(dest));
+ Settings.Builder builder = Settings.builder()
+ .put(settings)
+ .put("path.data", dataDir.toAbsolutePath());
+
+ Path configDir = indexDir.resolve("config");
+ if (Files.exists(configDir)) {
+ builder.put("path.conf", configDir.toAbsolutePath());
+ }
+ return builder.build();
+ }
+
+ protected HttpRequestBuilder httpClient() {
+ final NodesInfoResponse nodeInfos = client().admin().cluster().prepareNodesInfo().get();
+ final NodeInfo[] nodes = nodeInfos.getNodes();
+ assertTrue(nodes.length > 0);
+ TransportAddress publishAddress = randomFrom(nodes).getHttp().address().publishAddress();
+ assertEquals(1, publishAddress.uniqueAddressTypeId());
+ InetSocketAddress address = ((InetSocketTransportAddress) publishAddress).address();
+ return new HttpRequestBuilder(HttpClients.createDefault()).host(NetworkAddress.formatAddress(address.getAddress())).port(address.getPort());
+ }
+
+ /**
+ * This method is executed iff the test is annotated with {@link SuiteScopeTestCase}
+ * before the first test of this class is executed.
+ *
+ * @see SuiteScopeTestCase
+ */
+ protected void setupSuiteScopeCluster() throws Exception {
+ }
+
+ private static boolean isSuiteScopedTest(Class<?> clazz) {
+ return clazz.getAnnotation(SuiteScopeTestCase.class) != null;
+ }
+
+ /**
+ * If a test is annotated with {@link SuiteScopeTestCase}
+ * the checks and modifications that are applied to the used test cluster are only done after all tests
+ * of this class are executed. This also has the side-effect of a suite level setup method {@link #setupSuiteScopeCluster()}
+ * that is executed in a separate test instance. Variables that need to be accessible across test instances must be static.
+ */
+ @Retention(RetentionPolicy.RUNTIME)
+ @Inherited
+ @Target(ElementType.TYPE)
+ public @interface SuiteScopeTestCase {
+ }
+
+ /**
+ * If used the test will never run in local mode.
+ */
+ @Retention(RetentionPolicy.RUNTIME)
+ @Inherited
+ @Target(ElementType.TYPE)
+ public @interface SuppressLocalMode {
+ }
+
+ /**
+ * If used the test will never run in network mode
+ */
+ @Retention(RetentionPolicy.RUNTIME)
+ @Inherited
+ @Target(ElementType.TYPE)
+ public @interface SuppressNetworkMode {
+ }
+
+}
diff --git a/test/framework/src/main/java/org/elasticsearch/test/ESSingleNodeTestCase.java b/test/framework/src/main/java/org/elasticsearch/test/ESSingleNodeTestCase.java
new file mode 100644
index 0000000000..287bd121c9
--- /dev/null
+++ b/test/framework/src/main/java/org/elasticsearch/test/ESSingleNodeTestCase.java
@@ -0,0 +1,263 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.elasticsearch.test;
+
+import org.elasticsearch.action.admin.cluster.health.ClusterHealthResponse;
+import org.elasticsearch.action.admin.indices.create.CreateIndexRequestBuilder;
+import org.elasticsearch.cache.recycler.PageCacheRecycler;
+import org.elasticsearch.client.Client;
+import org.elasticsearch.client.Requests;
+import org.elasticsearch.cluster.ClusterName;
+import org.elasticsearch.cluster.health.ClusterHealthStatus;
+import org.elasticsearch.cluster.metadata.IndexMetaData;
+import org.elasticsearch.cluster.metadata.MetaData;
+import org.elasticsearch.cluster.node.DiscoveryNode;
+import org.elasticsearch.common.Priority;
+import org.elasticsearch.common.lease.Releasables;
+import org.elasticsearch.common.settings.Settings;
+import org.elasticsearch.common.unit.TimeValue;
+import org.elasticsearch.common.util.BigArrays;
+import org.elasticsearch.common.util.concurrent.EsExecutors;
+import org.elasticsearch.common.xcontent.XContentBuilder;
+import org.elasticsearch.index.IndexService;
+import org.elasticsearch.indices.IndicesService;
+import org.elasticsearch.node.Node;
+import org.elasticsearch.node.internal.InternalSettingsPreparer;
+import org.elasticsearch.script.ScriptService;
+import org.elasticsearch.search.internal.SearchContext;
+import org.elasticsearch.threadpool.ThreadPool;
+import org.junit.After;
+import org.junit.AfterClass;
+import org.junit.BeforeClass;
+
+import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertAcked;
+import static org.hamcrest.Matchers.equalTo;
+import static org.hamcrest.Matchers.is;
+import static org.hamcrest.Matchers.lessThanOrEqualTo;
+
+/**
+ * A test that keep a singleton node started for all tests that can be used to get
+ * references to Guice injectors in unit tests.
+ */
+public abstract class ESSingleNodeTestCase extends ESTestCase {
+
+ private static Node NODE = null;
+
+ private static void reset() {
+ assert NODE != null;
+ stopNode();
+ startNode();
+ }
+
+ private static void startNode() {
+ assert NODE == null;
+ NODE = newNode();
+ // we must wait for the node to actually be up and running. otherwise the node might have started, elected itself master but might not yet have removed the
+ // SERVICE_UNAVAILABLE/1/state not recovered / initialized block
+ ClusterHealthResponse clusterHealthResponse = client().admin().cluster().prepareHealth().setWaitForGreenStatus().get();
+ assertFalse(clusterHealthResponse.isTimedOut());
+ }
+
+ private static void stopNode() {
+ Node node = NODE;
+ NODE = null;
+ Releasables.close(node);
+ }
+
+ static void cleanup(boolean resetNode) {
+ assertAcked(client().admin().indices().prepareDelete("*").get());
+ if (resetNode) {
+ reset();
+ }
+ MetaData metaData = client().admin().cluster().prepareState().get().getState().getMetaData();
+ assertThat("test leaves persistent cluster metadata behind: " + metaData.persistentSettings().getAsMap(),
+ metaData.persistentSettings().getAsMap().size(), equalTo(0));
+ assertThat("test leaves transient cluster metadata behind: " + metaData.transientSettings().getAsMap(),
+ metaData.transientSettings().getAsMap().size(), equalTo(0));
+ }
+
+ @After
+ public void tearDown() throws Exception {
+ logger.info("[{}#{}]: cleaning up after test", getTestClass().getSimpleName(), getTestName());
+ super.tearDown();
+ cleanup(resetNodeAfterTest());
+ }
+
+ @BeforeClass
+ public static void setUpClass() throws Exception {
+ stopNode();
+ startNode();
+ }
+
+ @AfterClass
+ public static void tearDownClass() {
+ stopNode();
+ }
+
+ /**
+ * This method returns <code>true</code> if the node that is used in the background should be reset
+ * after each test. This is useful if the test changes the cluster state metadata etc. The default is
+ * <code>false</code>.
+ */
+ protected boolean resetNodeAfterTest() {
+ return false;
+ }
+
+ private static Node newNode() {
+ Node build = new Node(Settings.builder()
+ .put(ClusterName.SETTING, InternalTestCluster.clusterName("single-node-cluster", randomLong()))
+ .put("path.home", createTempDir())
+ // TODO: use a consistent data path for custom paths
+ // This needs to tie into the ESIntegTestCase#indexSettings() method
+ .put("path.shared_data", createTempDir().getParent())
+ .put("node.name", nodeName())
+ .put(IndexMetaData.SETTING_NUMBER_OF_SHARDS, 1)
+ .put(IndexMetaData.SETTING_NUMBER_OF_REPLICAS, 0)
+ .put("script.inline", "on")
+ .put("script.indexed", "on")
+ .put(EsExecutors.PROCESSORS, 1) // limit the number of threads created
+ .put("http.enabled", false)
+ .put("node.local", true)
+ .put("node.data", true)
+ .put(InternalSettingsPreparer.IGNORE_SYSTEM_PROPERTIES_SETTING, true) // make sure we get what we set :)
+ .build()
+ );
+ build.start();
+ assertThat(DiscoveryNode.localNode(build.settings()), is(true));
+ return build;
+ }
+
+ /**
+ * Returns a client to the single-node cluster.
+ */
+ public static Client client() {
+ return NODE.client();
+ }
+
+ /**
+ * Returns the single test nodes name.
+ */
+ public static String nodeName() {
+ return "node_s_0";
+ }
+
+ /**
+ * Return a reference to the singleton node.
+ */
+ protected static Node node() {
+ return NODE;
+ }
+
+ /**
+ * Get an instance for a particular class using the injector of the singleton node.
+ */
+ protected static <T> T getInstanceFromNode(Class<T> clazz) {
+ return NODE.injector().getInstance(clazz);
+ }
+
+ /**
+ * Create a new index on the singleton node with empty index settings.
+ */
+ protected static IndexService createIndex(String index) {
+ return createIndex(index, Settings.EMPTY);
+ }
+
+ /**
+ * Create a new index on the singleton node with the provided index settings.
+ */
+ protected static IndexService createIndex(String index, Settings settings) {
+ return createIndex(index, settings, null, (XContentBuilder) null);
+ }
+
+ /**
+ * Create a new index on the singleton node with the provided index settings.
+ */
+ protected static IndexService createIndex(String index, Settings settings, String type, XContentBuilder mappings) {
+ CreateIndexRequestBuilder createIndexRequestBuilder = client().admin().indices().prepareCreate(index).setSettings(settings);
+ if (type != null && mappings != null) {
+ createIndexRequestBuilder.addMapping(type, mappings);
+ }
+ return createIndex(index, createIndexRequestBuilder);
+ }
+
+ /**
+ * Create a new index on the singleton node with the provided index settings.
+ */
+ protected static IndexService createIndex(String index, Settings settings, String type, Object... mappings) {
+ CreateIndexRequestBuilder createIndexRequestBuilder = client().admin().indices().prepareCreate(index).setSettings(settings);
+ if (type != null && mappings != null) {
+ createIndexRequestBuilder.addMapping(type, mappings);
+ }
+ return createIndex(index, createIndexRequestBuilder);
+ }
+
+ protected static IndexService createIndex(String index, CreateIndexRequestBuilder createIndexRequestBuilder) {
+ assertAcked(createIndexRequestBuilder.get());
+ // Wait for the index to be allocated so that cluster state updates don't override
+ // changes that would have been done locally
+ ClusterHealthResponse health = client().admin().cluster()
+ .health(Requests.clusterHealthRequest(index).waitForYellowStatus().waitForEvents(Priority.LANGUID).waitForRelocatingShards(0)).actionGet();
+ assertThat(health.getStatus(), lessThanOrEqualTo(ClusterHealthStatus.YELLOW));
+ assertThat("Cluster must be a single node cluster", health.getNumberOfDataNodes(), equalTo(1));
+ IndicesService instanceFromNode = getInstanceFromNode(IndicesService.class);
+ return instanceFromNode.indexServiceSafe(index);
+ }
+
+ /**
+ * Create a new search context.
+ */
+ protected static SearchContext createSearchContext(IndexService indexService) {
+ BigArrays bigArrays = indexService.getIndexServices().getBigArrays();
+ ThreadPool threadPool = indexService.getIndexServices().getThreadPool();
+ PageCacheRecycler pageCacheRecycler = node().injector().getInstance(PageCacheRecycler.class);
+ ScriptService scriptService = node().injector().getInstance(ScriptService.class);
+ return new TestSearchContext(threadPool, pageCacheRecycler, bigArrays, scriptService, indexService);
+ }
+
+ /**
+ * Ensures the cluster has a green state via the cluster health API. This method will also wait for relocations.
+ * It is useful to ensure that all action on the cluster have finished and all shards that were currently relocating
+ * are now allocated and started.
+ */
+ public ClusterHealthStatus ensureGreen(String... indices) {
+ return ensureGreen(TimeValue.timeValueSeconds(30), indices);
+ }
+
+
+ /**
+ * Ensures the cluster has a green state via the cluster health API. This method will also wait for relocations.
+ * It is useful to ensure that all action on the cluster have finished and all shards that were currently relocating
+ * are now allocated and started.
+ *
+ * @param timeout time out value to set on {@link org.elasticsearch.action.admin.cluster.health.ClusterHealthRequest}
+ */
+ public ClusterHealthStatus ensureGreen(TimeValue timeout, String... indices) {
+ ClusterHealthResponse actionGet = client().admin().cluster()
+ .health(Requests.clusterHealthRequest(indices).timeout(timeout).waitForGreenStatus().waitForEvents(Priority.LANGUID).waitForRelocatingShards(0)).actionGet();
+ if (actionGet.isTimedOut()) {
+ logger.info("ensureGreen timed out, cluster state:\n{}\n{}", client().admin().cluster().prepareState().get().getState().prettyPrint(), client().admin().cluster().preparePendingClusterTasks().get().prettyPrint());
+ assertThat("timed out waiting for green state", actionGet.isTimedOut(), equalTo(false));
+ }
+ assertThat(actionGet.getStatus(), equalTo(ClusterHealthStatus.GREEN));
+ logger.debug("indices {} are green", indices.length == 0 ? "[_all]" : indices);
+ return actionGet.getStatus();
+ }
+
+
+}
diff --git a/test/framework/src/main/java/org/elasticsearch/test/ESTestCase.java b/test/framework/src/main/java/org/elasticsearch/test/ESTestCase.java
new file mode 100644
index 0000000000..e1443110c0
--- /dev/null
+++ b/test/framework/src/main/java/org/elasticsearch/test/ESTestCase.java
@@ -0,0 +1,640 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.elasticsearch.test;
+
+import com.carrotsearch.randomizedtesting.RandomizedContext;
+import com.carrotsearch.randomizedtesting.RandomizedTest;
+import com.carrotsearch.randomizedtesting.annotations.Listeners;
+import com.carrotsearch.randomizedtesting.annotations.ThreadLeakLingering;
+import com.carrotsearch.randomizedtesting.annotations.ThreadLeakScope;
+import com.carrotsearch.randomizedtesting.annotations.ThreadLeakScope.Scope;
+import com.carrotsearch.randomizedtesting.annotations.TimeoutSuite;
+import com.carrotsearch.randomizedtesting.generators.RandomInts;
+import com.carrotsearch.randomizedtesting.generators.RandomPicks;
+import com.carrotsearch.randomizedtesting.generators.RandomStrings;
+import com.carrotsearch.randomizedtesting.rules.TestRuleAdapter;
+
+import org.apache.lucene.uninverting.UninvertingReader;
+import org.apache.lucene.util.LuceneTestCase;
+import org.apache.lucene.util.LuceneTestCase.SuppressCodecs;
+import org.apache.lucene.util.TestRuleMarkFailure;
+import org.apache.lucene.util.TestUtil;
+import org.apache.lucene.util.TimeUnits;
+import org.elasticsearch.ExceptionsHelper;
+import org.elasticsearch.Version;
+import org.elasticsearch.bootstrap.BootstrapForTesting;
+import org.elasticsearch.cache.recycler.MockPageCacheRecycler;
+import org.elasticsearch.client.Requests;
+import org.elasticsearch.cluster.metadata.IndexMetaData;
+import org.elasticsearch.common.SuppressForbidden;
+import org.elasticsearch.common.io.PathUtils;
+import org.elasticsearch.common.io.PathUtilsForTesting;
+import org.elasticsearch.common.logging.ESLogger;
+import org.elasticsearch.common.logging.Loggers;
+import org.elasticsearch.common.settings.Settings;
+import org.elasticsearch.common.util.MockBigArrays;
+import org.elasticsearch.common.util.concurrent.EsExecutors;
+import org.elasticsearch.common.util.concurrent.EsRejectedExecutionException;
+import org.elasticsearch.common.xcontent.XContentType;
+import org.elasticsearch.env.Environment;
+import org.elasticsearch.env.NodeEnvironment;
+import org.elasticsearch.search.MockSearchService;
+import org.elasticsearch.test.junit.listeners.LoggingListener;
+import org.elasticsearch.test.junit.listeners.ReproduceInfoPrinter;
+import org.elasticsearch.threadpool.ThreadPool;
+import org.junit.After;
+import org.junit.AfterClass;
+import org.junit.Before;
+import org.junit.BeforeClass;
+import org.junit.Rule;
+import org.junit.rules.RuleChain;
+
+import java.io.IOException;
+import java.nio.file.DirectoryStream;
+import java.nio.file.Files;
+import java.nio.file.Path;
+import java.util.*;
+import java.util.concurrent.Callable;
+import java.util.concurrent.ExecutorService;
+import java.util.concurrent.Executors;
+import java.util.concurrent.TimeUnit;
+import java.util.function.BooleanSupplier;
+
+import static org.elasticsearch.common.util.CollectionUtils.arrayAsArrayList;
+import static org.hamcrest.Matchers.equalTo;
+
+/**
+ * Base testcase for randomized unit testing with Elasticsearch
+ */
+@Listeners({
+ ReproduceInfoPrinter.class,
+ LoggingListener.class
+})
+@ThreadLeakScope(Scope.SUITE)
+@ThreadLeakLingering(linger = 5000) // 5 sec lingering
+@TimeoutSuite(millis = 20 * TimeUnits.MINUTE)
+@LuceneTestCase.SuppressSysoutChecks(bugUrl = "we log a lot on purpose")
+// we suppress pretty much all the lucene codecs for now, except asserting
+// assertingcodec is the winner for a codec here: it finds bugs and gives clear exceptions.
+@SuppressCodecs({
+ "SimpleText", "Memory", "CheapBastard", "Direct", "Compressing", "FST50", "FSTOrd50",
+ "TestBloomFilteredLucenePostings", "MockRandom", "BlockTreeOrds", "LuceneFixedGap",
+ "LuceneVarGapFixedInterval", "LuceneVarGapDocFreqInterval", "Lucene50"
+})
+@LuceneTestCase.SuppressReproduceLine
+public abstract class ESTestCase extends LuceneTestCase {
+
+ static {
+ BootstrapForTesting.ensureInitialized();
+ }
+
+ protected final ESLogger logger = Loggers.getLogger(getClass());
+
+ // -----------------------------------------------------------------
+ // Suite and test case setup/cleanup.
+ // -----------------------------------------------------------------
+
+ @Rule
+ public RuleChain failureAndSuccessEvents = RuleChain.outerRule(new TestRuleAdapter() {
+ @Override
+ protected void afterIfSuccessful() throws Throwable {
+ ESTestCase.this.afterIfSuccessful();
+ }
+
+ @Override
+ protected void afterAlways(List<Throwable> errors) throws Throwable {
+ if (errors != null && errors.isEmpty() == false) {
+ ESTestCase.this.afterIfFailed(errors);
+ }
+ super.afterAlways(errors);
+ }
+ });
+
+ /** called when a test fails, supplying the errors it generated */
+ protected void afterIfFailed(List<Throwable> errors) {
+ }
+
+ /** called after a test is finished, but only if succesfull */
+ protected void afterIfSuccessful() throws Exception {
+ }
+
+ // setup mock filesystems for this test run. we change PathUtils
+ // so that all accesses are plumbed thru any mock wrappers
+
+ @BeforeClass
+ public static void setFileSystem() throws Exception {
+ PathUtilsForTesting.setup();
+ }
+
+ @AfterClass
+ public static void restoreFileSystem() throws Exception {
+ PathUtilsForTesting.teardown();
+ }
+
+ // randomize content type for request builders
+
+ @BeforeClass
+ public static void setContentType() throws Exception {
+ Requests.CONTENT_TYPE = randomFrom(XContentType.values());
+ Requests.INDEX_CONTENT_TYPE = randomFrom(XContentType.values());
+ }
+
+ @AfterClass
+ public static void restoreContentType() {
+ Requests.CONTENT_TYPE = XContentType.SMILE;
+ Requests.INDEX_CONTENT_TYPE = XContentType.JSON;
+ }
+
+ // randomize and override the number of cpus so tests reproduce regardless of real number of cpus
+
+ @BeforeClass
+ @SuppressForbidden(reason = "sets the number of cpus during tests")
+ public static void setProcessors() {
+ int numCpu = TestUtil.nextInt(random(), 1, 4);
+ System.setProperty(EsExecutors.DEFAULT_SYSPROP, Integer.toString(numCpu));
+ assertEquals(numCpu, EsExecutors.boundedNumberOfProcessors(Settings.EMPTY));
+ }
+
+ @AfterClass
+ @SuppressForbidden(reason = "clears the number of cpus during tests")
+ public static void restoreProcessors() {
+ System.clearProperty(EsExecutors.DEFAULT_SYSPROP);
+ }
+
+ @After
+ public final void ensureCleanedUp() throws Exception {
+ MockPageCacheRecycler.ensureAllPagesAreReleased();
+ MockBigArrays.ensureAllArraysAreReleased();
+ // field cache should NEVER get loaded.
+ String[] entries = UninvertingReader.getUninvertedStats();
+ assertEquals("fieldcache must never be used, got=" + Arrays.toString(entries), 0, entries.length);
+ }
+
+ // this must be a separate method from other ensure checks above so suite scoped integ tests can call...TODO: fix that
+ @After
+ public final void ensureAllSearchContextsReleased() throws Exception {
+ assertBusy(new Runnable() {
+ @Override
+ public void run() {
+ MockSearchService.assertNoInFLightContext();
+ }
+ });
+ }
+
+ // mockdirectorywrappers currently set this boolean if checkindex fails
+ // TODO: can we do this cleaner???
+
+ /** MockFSDirectoryService sets this: */
+ public static boolean checkIndexFailed;
+
+ @Before
+ public final void resetCheckIndexStatus() throws Exception {
+ checkIndexFailed = false;
+ }
+
+ @After
+ public final void ensureCheckIndexPassed() throws Exception {
+ assertFalse("at least one shard failed CheckIndex", checkIndexFailed);
+ }
+
+ // -----------------------------------------------------------------
+ // Test facilities and facades for subclasses.
+ // -----------------------------------------------------------------
+
+ // TODO: replaces uses of getRandom() with random()
+ // TODO: decide on one set of naming for between/scaledBetween and remove others
+ // TODO: replace frequently() with usually()
+
+ /** Shortcut for {@link RandomizedContext#getRandom()}. Use {@link #random()} instead. */
+ public static Random getRandom() {
+ // TODO: replace uses of this function with random()
+ return random();
+ }
+
+ /**
+ * Returns a "scaled" random number between min and max (inclusive).
+ *
+ * @see RandomizedTest#scaledRandomIntBetween(int, int)
+ */
+ public static int scaledRandomIntBetween(int min, int max) {
+ return RandomizedTest.scaledRandomIntBetween(min, max);
+ }
+
+ /**
+ * A random integer from <code>min</code> to <code>max</code> (inclusive).
+ *
+ * @see #scaledRandomIntBetween(int, int)
+ */
+ public static int randomIntBetween(int min, int max) {
+ return RandomInts.randomIntBetween(random(), min, max);
+ }
+
+ /**
+ * Returns a "scaled" number of iterations for loops which can have a variable
+ * iteration count. This method is effectively
+ * an alias to {@link #scaledRandomIntBetween(int, int)}.
+ */
+ public static int iterations(int min, int max) {
+ return scaledRandomIntBetween(min, max);
+ }
+
+ /**
+ * An alias for {@link #randomIntBetween(int, int)}.
+ *
+ * @see #scaledRandomIntBetween(int, int)
+ */
+ public static int between(int min, int max) {
+ return randomIntBetween(min, max);
+ }
+
+ /**
+ * The exact opposite of {@link #rarely()}.
+ */
+ public static boolean frequently() {
+ return !rarely();
+ }
+
+ public static boolean randomBoolean() {
+ return random().nextBoolean();
+ }
+
+ public static byte randomByte() {
+ return (byte) random().nextInt();
+ }
+
+ public static short randomShort() {
+ return (short) random().nextInt();
+ }
+
+ public static int randomInt() {
+ return random().nextInt();
+ }
+
+ public static float randomFloat() {
+ return random().nextFloat();
+ }
+
+ public static double randomDouble() {
+ return random().nextDouble();
+ }
+
+ /**
+ * Returns a double value in the interval [start, end) if lowerInclusive is
+ * set to true, (start, end) otherwise.
+ *
+ * @param start lower bound of interval to draw uniformly distributed random numbers from
+ * @param end upper bound
+ * @param lowerInclusive whether or not to include lower end of the interval
+ * */
+ public static double randomDoubleBetween(double start, double end, boolean lowerInclusive) {
+ double result = 0.0;
+
+ if (start == -Double.MAX_VALUE || end == Double.MAX_VALUE) {
+ // formula below does not work with very large doubles
+ result = Double.longBitsToDouble(randomLong());
+ while (result < start || result > end || Double.isNaN(result)) {
+ result = Double.longBitsToDouble(randomLong());
+ }
+ } else {
+ result = randomDouble();
+ if (lowerInclusive == false) {
+ while (result <= 0.0) {
+ result = randomDouble();
+ }
+ }
+ result = result * end + (1.0 - result) * start;
+ }
+ return result;
+ }
+
+ public static long randomLong() {
+ return random().nextLong();
+ }
+
+ /** A random integer from 0..max (inclusive). */
+ public static int randomInt(int max) {
+ return RandomizedTest.randomInt(max);
+ }
+
+ /** Pick a random object from the given array. The array must not be empty. */
+ public static <T> T randomFrom(T... array) {
+ return RandomPicks.randomFrom(random(), array);
+ }
+
+ /** Pick a random object from the given list. */
+ public static <T> T randomFrom(List<T> list) {
+ return RandomPicks.randomFrom(random(), list);
+ }
+
+ public static String randomAsciiOfLengthBetween(int minCodeUnits, int maxCodeUnits) {
+ return RandomizedTest.randomAsciiOfLengthBetween(minCodeUnits, maxCodeUnits);
+ }
+
+ public static String randomAsciiOfLength(int codeUnits) {
+ return RandomizedTest.randomAsciiOfLength(codeUnits);
+ }
+
+ public static String randomUnicodeOfLengthBetween(int minCodeUnits, int maxCodeUnits) {
+ return RandomizedTest.randomUnicodeOfLengthBetween(minCodeUnits, maxCodeUnits);
+ }
+
+ public static String randomUnicodeOfLength(int codeUnits) {
+ return RandomizedTest.randomUnicodeOfLength(codeUnits);
+ }
+
+ public static String randomUnicodeOfCodepointLengthBetween(int minCodePoints, int maxCodePoints) {
+ return RandomizedTest.randomUnicodeOfCodepointLengthBetween(minCodePoints, maxCodePoints);
+ }
+
+ public static String randomUnicodeOfCodepointLength(int codePoints) {
+ return RandomizedTest.randomUnicodeOfCodepointLength(codePoints);
+ }
+
+ public static String randomRealisticUnicodeOfLengthBetween(int minCodeUnits, int maxCodeUnits) {
+ return RandomizedTest.randomRealisticUnicodeOfLengthBetween(minCodeUnits, maxCodeUnits);
+ }
+
+ public static String randomRealisticUnicodeOfLength(int codeUnits) {
+ return RandomizedTest.randomRealisticUnicodeOfLength(codeUnits);
+ }
+
+ public static String randomRealisticUnicodeOfCodepointLengthBetween(int minCodePoints, int maxCodePoints) {
+ return RandomizedTest.randomRealisticUnicodeOfCodepointLengthBetween(minCodePoints, maxCodePoints);
+ }
+
+ public static String randomRealisticUnicodeOfCodepointLength(int codePoints) {
+ return RandomizedTest.randomRealisticUnicodeOfCodepointLength(codePoints);
+ }
+
+ public static String[] generateRandomStringArray(int maxArraySize, int maxStringSize, boolean allowNull, boolean allowEmpty) {
+ if (allowNull && random().nextBoolean()) {
+ return null;
+ }
+ int arraySize = randomIntBetween(allowEmpty ? 0 : 1, maxArraySize);
+ String[] array = new String[arraySize];
+ for (int i = 0; i < arraySize; i++) {
+ array[i] = RandomStrings.randomAsciiOfLength(random(), maxStringSize);
+ }
+ return array;
+ }
+
+ public static String[] generateRandomStringArray(int maxArraySize, int maxStringSize, boolean allowNull) {
+ return generateRandomStringArray(maxArraySize, maxStringSize, allowNull, true);
+ }
+
+ public static String randomTimeValue() {
+ final String[] values = new String[]{"d", "H", "ms", "s", "S", "w"};
+ return randomIntBetween(0, 1000) + randomFrom(values);
+ }
+
+ /**
+ * Runs the code block for 10 seconds waiting for no assertion to trip.
+ */
+ public static void assertBusy(Runnable codeBlock) throws Exception {
+ assertBusy(Executors.callable(codeBlock), 10, TimeUnit.SECONDS);
+ }
+
+ public static void assertBusy(Runnable codeBlock, long maxWaitTime, TimeUnit unit) throws Exception {
+ assertBusy(Executors.callable(codeBlock), maxWaitTime, unit);
+ }
+
+ /**
+ * Runs the code block for 10 seconds waiting for no assertion to trip.
+ */
+ public static <V> V assertBusy(Callable<V> codeBlock) throws Exception {
+ return assertBusy(codeBlock, 10, TimeUnit.SECONDS);
+ }
+
+ /**
+ * Runs the code block for the provided interval, waiting for no assertions to trip.
+ */
+ public static <V> V assertBusy(Callable<V> codeBlock, long maxWaitTime, TimeUnit unit) throws Exception {
+ long maxTimeInMillis = TimeUnit.MILLISECONDS.convert(maxWaitTime, unit);
+ long iterations = Math.max(Math.round(Math.log10(maxTimeInMillis) / Math.log10(2)), 1);
+ long timeInMillis = 1;
+ long sum = 0;
+ List<AssertionError> failures = new ArrayList<>();
+ for (int i = 0; i < iterations; i++) {
+ try {
+ return codeBlock.call();
+ } catch (AssertionError e) {
+ failures.add(e);
+ }
+ sum += timeInMillis;
+ Thread.sleep(timeInMillis);
+ timeInMillis *= 2;
+ }
+ timeInMillis = maxTimeInMillis - sum;
+ Thread.sleep(Math.max(timeInMillis, 0));
+ try {
+ return codeBlock.call();
+ } catch (AssertionError e) {
+ for (AssertionError failure : failures) {
+ e.addSuppressed(failure);
+ }
+ throw e;
+ }
+ }
+
+ public static boolean awaitBusy(BooleanSupplier breakSupplier) throws InterruptedException {
+ return awaitBusy(breakSupplier, 10, TimeUnit.SECONDS);
+ }
+
+ // After 1s, we stop growing the sleep interval exponentially and just sleep 1s until maxWaitTime
+ private static final long AWAIT_BUSY_THRESHOLD = 1000L;
+
+ public static boolean awaitBusy(BooleanSupplier breakSupplier, long maxWaitTime, TimeUnit unit) throws InterruptedException {
+ long maxTimeInMillis = TimeUnit.MILLISECONDS.convert(maxWaitTime, unit);
+ long timeInMillis = 1;
+ long sum = 0;
+ while (sum + timeInMillis < maxTimeInMillis) {
+ if (breakSupplier.getAsBoolean()) {
+ return true;
+ }
+ Thread.sleep(timeInMillis);
+ sum += timeInMillis;
+ timeInMillis = Math.min(AWAIT_BUSY_THRESHOLD, timeInMillis * 2);
+ }
+ timeInMillis = maxTimeInMillis - sum;
+ Thread.sleep(Math.max(timeInMillis, 0));
+ return breakSupplier.getAsBoolean();
+ }
+
+ public static boolean terminate(ExecutorService... services) throws InterruptedException {
+ boolean terminated = true;
+ for (ExecutorService service : services) {
+ if (service != null) {
+ terminated &= ThreadPool.terminate(service, 10, TimeUnit.SECONDS);
+ }
+ }
+ return terminated;
+ }
+
+ public static boolean terminate(ThreadPool service) throws InterruptedException {
+ return ThreadPool.terminate(service, 10, TimeUnit.SECONDS);
+ }
+
+ /**
+ * Returns a {@link java.nio.file.Path} pointing to the class path relative resource given
+ * as the first argument. In contrast to
+ * <code>getClass().getResource(...).getFile()</code> this method will not
+ * return URL encoded paths if the parent path contains spaces or other
+ * non-standard characters.
+ */
+ @Override
+ public Path getDataPath(String relativePath) {
+ // we override LTC behavior here: wrap even resources with mockfilesystems,
+ // because some code is buggy when it comes to multiple nio.2 filesystems
+ // (e.g. FileSystemUtils, and likely some tests)
+ try {
+ return PathUtils.get(getClass().getResource(relativePath).toURI());
+ } catch (Exception e) {
+ throw new RuntimeException("resource not found: " + relativePath, e);
+ }
+ }
+
+ public Path getBwcIndicesPath() {
+ return getDataPath("/indices/bwc");
+ }
+
+ /** Returns a random number of temporary paths. */
+ public String[] tmpPaths() {
+ final int numPaths = TestUtil.nextInt(random(), 1, 3);
+ final String[] absPaths = new String[numPaths];
+ for (int i = 0; i < numPaths; i++) {
+ absPaths[i] = createTempDir().toAbsolutePath().toString();
+ }
+ return absPaths;
+ }
+
+ public NodeEnvironment newNodeEnvironment() throws IOException {
+ return newNodeEnvironment(Settings.EMPTY);
+ }
+
+ public NodeEnvironment newNodeEnvironment(Settings settings) throws IOException {
+ Settings build = Settings.builder()
+ .put(settings)
+ .put("path.home", createTempDir().toAbsolutePath())
+ .putArray("path.data", tmpPaths()).build();
+ return new NodeEnvironment(build, new Environment(build));
+ }
+
+ /** Return consistent index settings for the provided index version. */
+ public static Settings.Builder settings(Version version) {
+ Settings.Builder builder = Settings.builder().put(IndexMetaData.SETTING_VERSION_CREATED, version);
+ return builder;
+ }
+
+ private static String threadName(Thread t) {
+ return "Thread[" +
+ "id=" + t.getId() +
+ ", name=" + t.getName() +
+ ", state=" + t.getState() +
+ ", group=" + groupName(t.getThreadGroup()) +
+ "]";
+ }
+
+ private static String groupName(ThreadGroup threadGroup) {
+ if (threadGroup == null) {
+ return "{null group}";
+ } else {
+ return threadGroup.getName();
+ }
+ }
+
+ /**
+ * Returns size random values
+ */
+ public static <T> List<T> randomSubsetOf(int size, T... values) {
+ if (size > values.length) {
+ throw new IllegalArgumentException("Can\'t pick " + size + " random objects from a list of " + values.length + " objects");
+ }
+ List<T> list = arrayAsArrayList(values);
+ Collections.shuffle(list, random());
+ return list.subList(0, size);
+ }
+
+ /**
+ * Returns true iff assertions for elasticsearch packages are enabled
+ */
+ public static boolean assertionsEnabled() {
+ boolean enabled = false;
+ assert (enabled = true);
+ return enabled;
+ }
+
+ /**
+ * Asserts that there are no files in the specified path
+ */
+ public void assertPathHasBeenCleared(String path) throws Exception {
+ assertPathHasBeenCleared(PathUtils.get(path));
+ }
+
+ /**
+ * Asserts that there are no files in the specified path
+ */
+ public void assertPathHasBeenCleared(Path path) throws Exception {
+ logger.info("--> checking that [{}] has been cleared", path);
+ int count = 0;
+ StringBuilder sb = new StringBuilder();
+ sb.append("[");
+ if (Files.exists(path)) {
+ try (DirectoryStream<Path> stream = Files.newDirectoryStream(path)) {
+ for (Path file : stream) {
+ // Skip files added by Lucene's ExtraFS
+ if (file.getFileName().toString().startsWith("extra")) {
+ continue;
+ }
+ logger.info("--> found file: [{}]", file.toAbsolutePath().toString());
+ if (Files.isDirectory(file)) {
+ assertPathHasBeenCleared(file);
+ } else if (Files.isRegularFile(file)) {
+ count++;
+ sb.append(file.toAbsolutePath().toString());
+ sb.append("\n");
+ }
+ }
+ }
+ }
+ sb.append("]");
+ assertThat(count + " files exist that should have been cleaned:\n" + sb.toString(), count, equalTo(0));
+ }
+
+ /** Returns the suite failure marker: internal use only! */
+ public static TestRuleMarkFailure getSuiteFailureMarker() {
+ return suiteFailureMarker;
+ }
+
+ /** Compares two stack traces, ignoring module (which is not yet serialized) */
+ public static void assertArrayEquals(StackTraceElement expected[], StackTraceElement actual[]) {
+ assertEquals(expected.length, actual.length);
+ for (int i = 0; i < expected.length; i++) {
+ assertEquals(expected[i], actual[i]);
+ }
+ }
+
+ /** Compares two stack trace elements, ignoring module (which is not yet serialized) */
+ public static void assertEquals(StackTraceElement expected, StackTraceElement actual) {
+ assertEquals(expected.getClassName(), actual.getClassName());
+ assertEquals(expected.getMethodName(), actual.getMethodName());
+ assertEquals(expected.getFileName(), actual.getFileName());
+ assertEquals(expected.getLineNumber(), actual.getLineNumber());
+ assertEquals(expected.isNativeMethod(), actual.isNativeMethod());
+ }
+}
diff --git a/test/framework/src/main/java/org/elasticsearch/test/ESTokenStreamTestCase.java b/test/framework/src/main/java/org/elasticsearch/test/ESTokenStreamTestCase.java
new file mode 100644
index 0000000000..ed54ae60fb
--- /dev/null
+++ b/test/framework/src/main/java/org/elasticsearch/test/ESTokenStreamTestCase.java
@@ -0,0 +1,58 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.test;
+
+import com.carrotsearch.randomizedtesting.annotations.Listeners;
+import com.carrotsearch.randomizedtesting.annotations.TimeoutSuite;
+
+import org.apache.lucene.analysis.BaseTokenStreamTestCase;
+import org.apache.lucene.util.LuceneTestCase;
+import org.apache.lucene.util.TimeUnits;
+import org.elasticsearch.Version;
+import org.elasticsearch.bootstrap.BootstrapForTesting;
+import org.elasticsearch.cluster.metadata.IndexMetaData;
+import org.elasticsearch.common.settings.Settings;
+import org.elasticsearch.test.junit.listeners.ReproduceInfoPrinter;
+
+@Listeners({
+ ReproduceInfoPrinter.class
+})
+@TimeoutSuite(millis = TimeUnits.HOUR)
+@LuceneTestCase.SuppressReproduceLine
+@LuceneTestCase.SuppressSysoutChecks(bugUrl = "we log a lot on purpose")
+/**
+ * Basic test case for token streams. the assertion methods in this class will
+ * run basic checks to enforce correct behavior of the token streams.
+ */
+public abstract class ESTokenStreamTestCase extends BaseTokenStreamTestCase {
+
+ static {
+ BootstrapForTesting.ensureInitialized();
+ }
+
+ public static Version randomVersion() {
+ return VersionUtils.randomVersion(random());
+ }
+
+ public Settings.Builder newAnalysisSettingsBuilder() {
+ return Settings.builder().put(IndexMetaData.SETTING_VERSION_CREATED, Version.CURRENT);
+ }
+
+}
diff --git a/test/framework/src/main/java/org/elasticsearch/test/ExternalNode.java b/test/framework/src/main/java/org/elasticsearch/test/ExternalNode.java
new file mode 100644
index 0000000000..05f194fc26
--- /dev/null
+++ b/test/framework/src/main/java/org/elasticsearch/test/ExternalNode.java
@@ -0,0 +1,245 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.elasticsearch.test;
+
+import org.apache.lucene.util.Constants;
+import org.elasticsearch.action.admin.cluster.node.info.NodeInfo;
+import org.elasticsearch.action.admin.cluster.node.info.NodesInfoResponse;
+import org.elasticsearch.client.Client;
+import org.elasticsearch.client.transport.TransportClient;
+import org.elasticsearch.cluster.ClusterName;
+import org.elasticsearch.common.SuppressForbidden;
+import org.elasticsearch.common.io.PathUtils;
+import org.elasticsearch.common.logging.ESLogger;
+import org.elasticsearch.common.logging.Loggers;
+import org.elasticsearch.common.network.NetworkModule;
+import org.elasticsearch.common.settings.Settings;
+import org.elasticsearch.common.transport.TransportAddress;
+import org.elasticsearch.discovery.DiscoveryModule;
+import org.elasticsearch.node.internal.InternalSettingsPreparer;
+
+import java.io.Closeable;
+import java.io.IOException;
+import java.nio.file.Files;
+import java.nio.file.Path;
+import java.util.ArrayList;
+import java.util.List;
+import java.util.Map;
+import java.util.Random;
+import java.util.concurrent.TimeUnit;
+
+import static org.elasticsearch.common.settings.Settings.settingsBuilder;
+
+/**
+ * Simple helper class to start external nodes to be used within a test cluster
+ */
+final class ExternalNode implements Closeable {
+
+ public static final Settings REQUIRED_SETTINGS = Settings.builder()
+ .put(InternalSettingsPreparer.IGNORE_SYSTEM_PROPERTIES_SETTING, true)
+ .put(DiscoveryModule.DISCOVERY_TYPE_KEY, "zen")
+ .put("node.mode", "network").build(); // we need network mode for this
+
+ private final Path path;
+ private final Random random;
+ private final NodeConfigurationSource nodeConfigurationSource;
+ private Process process;
+ private NodeInfo nodeInfo;
+ private final String clusterName;
+ private TransportClient client;
+
+ private final ESLogger logger = Loggers.getLogger(getClass());
+ private Settings externalNodeSettings;
+
+
+ ExternalNode(Path path, long seed, NodeConfigurationSource nodeConfigurationSource) {
+ this(path, null, seed, nodeConfigurationSource);
+ }
+
+ ExternalNode(Path path, String clusterName, long seed, NodeConfigurationSource nodeConfigurationSource) {
+ if (!Files.isDirectory(path)) {
+ throw new IllegalArgumentException("path must be a directory");
+ }
+ this.path = path;
+ this.clusterName = clusterName;
+ this.random = new Random(seed);
+ this.nodeConfigurationSource = nodeConfigurationSource;
+ }
+
+ synchronized ExternalNode start(Client localNode, Settings defaultSettings, String nodeName, String clusterName, int nodeOrdinal) throws IOException, InterruptedException {
+ ExternalNode externalNode = new ExternalNode(path, clusterName, random.nextLong(), nodeConfigurationSource);
+ Settings settings = Settings.builder().put(defaultSettings).put(nodeConfigurationSource.nodeSettings(nodeOrdinal)).build();
+ externalNode.startInternal(localNode, settings, nodeName, clusterName);
+ return externalNode;
+ }
+
+ @SuppressForbidden(reason = "needs java.io.File api to start a process")
+ synchronized void startInternal(Client client, Settings settings, String nodeName, String clusterName) throws IOException, InterruptedException {
+ if (process != null) {
+ throw new IllegalStateException("Already started");
+ }
+ List<String> params = new ArrayList<>();
+
+ if (!Constants.WINDOWS) {
+ params.add("bin/elasticsearch");
+ } else {
+ params.add("bin/elasticsearch.bat");
+ }
+ params.add("-Des.cluster.name=" + clusterName);
+ params.add("-Des.node.name=" + nodeName);
+ Settings.Builder externaNodeSettingsBuilder = Settings.builder();
+ for (Map.Entry<String, String> entry : settings.getAsMap().entrySet()) {
+ switch (entry.getKey()) {
+ case "cluster.name":
+ case "node.name":
+ case "path.home":
+ case "node.mode":
+ case "node.local":
+ case NetworkModule.TRANSPORT_TYPE_KEY:
+ case DiscoveryModule.DISCOVERY_TYPE_KEY:
+ case NetworkModule.TRANSPORT_SERVICE_TYPE_KEY:
+ case InternalSettingsPreparer.IGNORE_SYSTEM_PROPERTIES_SETTING:
+ continue;
+ default:
+ externaNodeSettingsBuilder.put(entry.getKey(), entry.getValue());
+
+ }
+ }
+ this.externalNodeSettings = externaNodeSettingsBuilder.put(REQUIRED_SETTINGS).build();
+ for (Map.Entry<String, String> entry : externalNodeSettings.getAsMap().entrySet()) {
+ params.add("-Des." + entry.getKey() + "=" + entry.getValue());
+ }
+
+ params.add("-Des.path.home=" + PathUtils.get(".").toAbsolutePath());
+ params.add("-Des.path.conf=" + path + "/config");
+
+ ProcessBuilder builder = new ProcessBuilder(params);
+ builder.directory(path.toFile());
+ builder.inheritIO();
+ boolean success = false;
+ try {
+ logger.info("starting external node [{}] with: {}", nodeName, builder.command());
+ process = builder.start();
+ this.nodeInfo = null;
+ if (waitForNode(client, nodeName)) {
+ nodeInfo = nodeInfo(client, nodeName);
+ assert nodeInfo != null;
+ logger.info("external node {} found, version [{}], build {}", nodeInfo.getNode(), nodeInfo.getVersion(), nodeInfo.getBuild());
+ } else {
+ throw new IllegalStateException("Node [" + nodeName + "] didn't join the cluster");
+ }
+ success = true;
+ } finally {
+ if (!success) {
+ stop();
+ }
+ }
+ }
+
+ static boolean waitForNode(final Client client, final String name) throws InterruptedException {
+ return ESTestCase.awaitBusy(() -> {
+ final NodesInfoResponse nodeInfos = client.admin().cluster().prepareNodesInfo().get();
+ final NodeInfo[] nodes = nodeInfos.getNodes();
+ for (NodeInfo info : nodes) {
+ if (name.equals(info.getNode().getName())) {
+ return true;
+ }
+ }
+ return false;
+ }, 30, TimeUnit.SECONDS);
+ }
+
+ static NodeInfo nodeInfo(final Client client, final String nodeName) {
+ final NodesInfoResponse nodeInfos = client.admin().cluster().prepareNodesInfo().get();
+ final NodeInfo[] nodes = nodeInfos.getNodes();
+ for (NodeInfo info : nodes) {
+ if (nodeName.equals(info.getNode().getName())) {
+ return info;
+ }
+ }
+ return null;
+ }
+
+ synchronized TransportAddress getTransportAddress() {
+ if (nodeInfo == null) {
+ throw new IllegalStateException("Node has not started yet");
+ }
+ return nodeInfo.getTransport().getAddress().publishAddress();
+ }
+
+ synchronized Client getClient() {
+ if (nodeInfo == null) {
+ throw new IllegalStateException("Node has not started yet");
+ }
+ if (client == null) {
+ TransportAddress addr = nodeInfo.getTransport().getAddress().publishAddress();
+ // verify that the end node setting will have network enabled.
+
+ Settings clientSettings = settingsBuilder().put(externalNodeSettings)
+ .put("client.transport.nodes_sampler_interval", "1s")
+ .put("name", "transport_client_" + nodeInfo.getNode().name())
+ .put(ClusterName.SETTING, clusterName).put("client.transport.sniff", false).build();
+ TransportClient client = TransportClient.builder().settings(clientSettings).build();
+ client.addTransportAddress(addr);
+ this.client = client;
+ }
+ return client;
+ }
+
+ synchronized void reset(long seed) {
+ this.random.setSeed(seed);
+ }
+
+ synchronized void stop() throws InterruptedException {
+ if (running()) {
+ try {
+ if (this.client != null) {
+ client.close();
+ }
+ } finally {
+ process.destroy();
+ process.waitFor();
+ process = null;
+ nodeInfo = null;
+
+ }
+ }
+ }
+
+
+ synchronized boolean running() {
+ return process != null;
+ }
+
+ @Override
+ public void close() {
+ try {
+ stop();
+ } catch (InterruptedException e) {
+ Thread.currentThread().interrupt();
+ }
+ }
+
+ synchronized String getName() {
+ if (nodeInfo == null) {
+ throw new IllegalStateException("Node has not started yet");
+ }
+ return nodeInfo.getNode().getName();
+ }
+}
diff --git a/test/framework/src/main/java/org/elasticsearch/test/ExternalTestCluster.java b/test/framework/src/main/java/org/elasticsearch/test/ExternalTestCluster.java
new file mode 100644
index 0000000000..34b6bfbfb1
--- /dev/null
+++ b/test/framework/src/main/java/org/elasticsearch/test/ExternalTestCluster.java
@@ -0,0 +1,176 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.test;
+
+import org.elasticsearch.action.admin.cluster.node.info.NodeInfo;
+import org.elasticsearch.action.admin.cluster.node.info.NodesInfoResponse;
+import org.elasticsearch.action.admin.cluster.node.stats.NodeStats;
+import org.elasticsearch.action.admin.cluster.node.stats.NodesStatsResponse;
+import org.elasticsearch.client.Client;
+import org.elasticsearch.client.transport.TransportClient;
+import org.elasticsearch.cluster.node.DiscoveryNode;
+import org.elasticsearch.common.breaker.CircuitBreaker;
+import org.elasticsearch.common.logging.ESLogger;
+import org.elasticsearch.common.logging.Loggers;
+import org.elasticsearch.common.settings.Settings;
+import org.elasticsearch.common.transport.InetSocketTransportAddress;
+import org.elasticsearch.common.transport.TransportAddress;
+import org.elasticsearch.node.internal.InternalSettingsPreparer;
+import org.elasticsearch.plugins.Plugin;
+
+import java.io.IOException;
+import java.net.InetSocketAddress;
+import java.nio.file.Path;
+import java.util.Collection;
+import java.util.Collections;
+import java.util.Iterator;
+import java.util.concurrent.atomic.AtomicInteger;
+
+import static org.hamcrest.Matchers.equalTo;
+import static org.junit.Assert.assertThat;
+
+/**
+ * External cluster to run the tests against.
+ * It is a pure immutable test cluster that allows to send requests to a pre-existing cluster
+ * and supports by nature all the needed test operations like wipeIndices etc.
+ */
+public final class ExternalTestCluster extends TestCluster {
+
+ private static final ESLogger logger = Loggers.getLogger(ExternalTestCluster.class);
+
+ private static final AtomicInteger counter = new AtomicInteger();
+ public static final String EXTERNAL_CLUSTER_PREFIX = "external_";
+
+ private final Client client;
+
+ private final InetSocketAddress[] httpAddresses;
+
+ private final String clusterName;
+
+ private final int numDataNodes;
+ private final int numMasterAndDataNodes;
+
+ public ExternalTestCluster(Path tempDir, Settings additionalSettings, Collection<Class<? extends Plugin>> pluginClasses, TransportAddress... transportAddresses) {
+ super(0);
+ Settings clientSettings = Settings.settingsBuilder()
+ .put(additionalSettings)
+ .put("name", InternalTestCluster.TRANSPORT_CLIENT_PREFIX + EXTERNAL_CLUSTER_PREFIX + counter.getAndIncrement())
+ .put(InternalSettingsPreparer.IGNORE_SYSTEM_PROPERTIES_SETTING, true) // prevents any settings to be replaced by system properties.
+ .put("client.transport.ignore_cluster_name", true)
+ .put("path.home", tempDir)
+ .put("node.mode", "network").build(); // we require network here!
+
+ TransportClient.Builder transportClientBuilder = TransportClient.builder().settings(clientSettings);
+ for (Class<? extends Plugin> pluginClass : pluginClasses) {
+ transportClientBuilder.addPlugin(pluginClass);
+ }
+ TransportClient client = transportClientBuilder.build();
+
+ try {
+ client.addTransportAddresses(transportAddresses);
+ NodesInfoResponse nodeInfos = client.admin().cluster().prepareNodesInfo().clear().setSettings(true).setHttp(true).get();
+ httpAddresses = new InetSocketAddress[nodeInfos.getNodes().length];
+ this.clusterName = nodeInfos.getClusterName().value();
+ int dataNodes = 0;
+ int masterAndDataNodes = 0;
+ for (int i = 0; i < nodeInfos.getNodes().length; i++) {
+ NodeInfo nodeInfo = nodeInfos.getNodes()[i];
+ httpAddresses[i] = ((InetSocketTransportAddress) nodeInfo.getHttp().address().publishAddress()).address();
+ if (DiscoveryNode.dataNode(nodeInfo.getSettings())) {
+ dataNodes++;
+ masterAndDataNodes++;
+ } else if (DiscoveryNode.masterNode(nodeInfo.getSettings())) {
+ masterAndDataNodes++;
+ }
+ }
+ this.numDataNodes = dataNodes;
+ this.numMasterAndDataNodes = masterAndDataNodes;
+ this.client = client;
+ logger.info("Setup ExternalTestCluster [{}] made of [{}] nodes", nodeInfos.getClusterName().value(), size());
+ } catch (Exception e) {
+ client.close();
+ throw e;
+ }
+ }
+
+ @Override
+ public void afterTest() {
+
+ }
+
+ @Override
+ public Client client() {
+ return client;
+ }
+
+ @Override
+ public int size() {
+ return httpAddresses.length;
+ }
+
+ @Override
+ public int numDataNodes() {
+ return numDataNodes;
+ }
+
+ @Override
+ public int numDataAndMasterNodes() {
+ return numMasterAndDataNodes;
+ }
+
+ @Override
+ public InetSocketAddress[] httpAddresses() {
+ return httpAddresses;
+ }
+
+ @Override
+ public void close() throws IOException {
+ client.close();
+ }
+
+ @Override
+ public void ensureEstimatedStats() {
+ if (size() > 0) {
+ NodesStatsResponse nodeStats = client().admin().cluster().prepareNodesStats()
+ .clear().setBreaker(true).setIndices(true).execute().actionGet();
+ for (NodeStats stats : nodeStats.getNodes()) {
+ assertThat("Fielddata breaker not reset to 0 on node: " + stats.getNode(),
+ stats.getBreaker().getStats(CircuitBreaker.FIELDDATA).getEstimated(), equalTo(0L));
+ // ExternalTestCluster does not check the request breaker,
+ // because checking it requires a network request, which in
+ // turn increments the breaker, making it non-0
+
+ assertThat("Fielddata size must be 0 on node: " + stats.getNode(), stats.getIndices().getFieldData().getMemorySizeInBytes(), equalTo(0l));
+ assertThat("Query cache size must be 0 on node: " + stats.getNode(), stats.getIndices().getQueryCache().getMemorySizeInBytes(), equalTo(0l));
+ assertThat("FixedBitSet cache size must be 0 on node: " + stats.getNode(), stats.getIndices().getSegments().getBitsetMemoryInBytes(), equalTo(0l));
+ }
+ }
+ }
+
+ @Override
+ public Iterator<Client> iterator() {
+ return Collections.singleton(client).iterator();
+ }
+
+ @Override
+ public String getClusterName() {
+ return clusterName;
+ }
+}
diff --git a/test/framework/src/main/java/org/elasticsearch/test/FieldMaskingReader.java b/test/framework/src/main/java/org/elasticsearch/test/FieldMaskingReader.java
new file mode 100644
index 0000000000..5ce620166c
--- /dev/null
+++ b/test/framework/src/main/java/org/elasticsearch/test/FieldMaskingReader.java
@@ -0,0 +1,51 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.elasticsearch.test;
+
+import org.apache.lucene.index.DirectoryReader;
+import org.apache.lucene.index.FieldFilterLeafReader;
+import org.apache.lucene.index.FilterDirectoryReader;
+import org.apache.lucene.index.LeafReader;
+
+import java.io.IOException;
+import java.util.Collections;
+
+public class FieldMaskingReader extends FilterDirectoryReader {
+ private final String field;
+ public FieldMaskingReader(String field, DirectoryReader in) throws IOException {
+ super(in, new FilterDirectoryReader.SubReaderWrapper() {
+ @Override
+ public LeafReader wrap(LeafReader reader) {
+ return new FieldFilterLeafReader(reader, Collections.singleton(field), true);
+ }
+ });
+ this.field = field;
+
+ }
+
+ @Override
+ protected DirectoryReader doWrapDirectoryReader(DirectoryReader in) throws IOException {
+ return new FieldMaskingReader(field, in);
+ }
+
+ @Override
+ public Object getCoreCacheKey() {
+ return in.getCoreCacheKey();
+ }
+} \ No newline at end of file
diff --git a/test/framework/src/main/java/org/elasticsearch/test/IndexSettingsModule.java b/test/framework/src/main/java/org/elasticsearch/test/IndexSettingsModule.java
new file mode 100644
index 0000000000..39e1857f41
--- /dev/null
+++ b/test/framework/src/main/java/org/elasticsearch/test/IndexSettingsModule.java
@@ -0,0 +1,59 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.elasticsearch.test;
+
+import org.elasticsearch.Version;
+import org.elasticsearch.cluster.metadata.IndexMetaData;
+import org.elasticsearch.common.inject.AbstractModule;
+import org.elasticsearch.common.settings.Settings;
+import org.elasticsearch.index.Index;
+import org.elasticsearch.index.IndexSettings;
+
+import java.util.Collections;
+
+public class IndexSettingsModule extends AbstractModule {
+
+ private final Index index;
+ private final Settings settings;
+
+ public IndexSettingsModule(Index index, Settings settings) {
+ this.settings = settings;
+ this.index = index;
+
+ }
+
+ @Override
+ protected void configure() {
+ bind(IndexSettings.class).toInstance(newIndexSettings(index, settings));
+ }
+
+ public static IndexSettings newIndexSettings(String index, Settings settings) {
+ return newIndexSettings(new Index(index), settings);
+ }
+
+ public static IndexSettings newIndexSettings(Index index, Settings settings) {
+ Settings build = Settings.settingsBuilder().put(IndexMetaData.SETTING_VERSION_CREATED, Version.CURRENT)
+ .put(IndexMetaData.SETTING_NUMBER_OF_REPLICAS, 1)
+ .put(IndexMetaData.SETTING_NUMBER_OF_SHARDS, 1)
+ .put(settings)
+ .build();
+ IndexMetaData metaData = IndexMetaData.builder(index.getName()).settings(build).build();
+ return new IndexSettings(metaData, Settings.EMPTY, Collections.emptyList());
+ }
+}
diff --git a/test/framework/src/main/java/org/elasticsearch/test/InternalTestCluster.java b/test/framework/src/main/java/org/elasticsearch/test/InternalTestCluster.java
new file mode 100644
index 0000000000..bc6db112d7
--- /dev/null
+++ b/test/framework/src/main/java/org/elasticsearch/test/InternalTestCluster.java
@@ -0,0 +1,1873 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.elasticsearch.test;
+
+import com.carrotsearch.randomizedtesting.RandomizedTest;
+import com.carrotsearch.randomizedtesting.SeedUtils;
+import com.carrotsearch.randomizedtesting.SysGlobals;
+import com.carrotsearch.randomizedtesting.generators.RandomInts;
+import com.carrotsearch.randomizedtesting.generators.RandomPicks;
+import com.carrotsearch.randomizedtesting.generators.RandomStrings;
+import org.apache.lucene.store.StoreRateLimiting;
+import org.apache.lucene.util.IOUtils;
+import org.elasticsearch.ElasticsearchException;
+import org.elasticsearch.Version;
+import org.elasticsearch.action.admin.cluster.node.stats.NodeStats;
+import org.elasticsearch.action.admin.indices.stats.CommonStatsFlags;
+import org.elasticsearch.cache.recycler.PageCacheRecycler;
+import org.elasticsearch.client.Client;
+import org.elasticsearch.client.transport.TransportClient;
+import org.elasticsearch.cluster.ClusterName;
+import org.elasticsearch.cluster.ClusterService;
+import org.elasticsearch.cluster.ClusterState;
+import org.elasticsearch.cluster.action.index.MappingUpdatedAction;
+import org.elasticsearch.cluster.metadata.IndexMetaData;
+import org.elasticsearch.cluster.node.DiscoveryNode;
+import org.elasticsearch.cluster.node.DiscoveryNodes;
+import org.elasticsearch.cluster.routing.OperationRouting;
+import org.elasticsearch.cluster.routing.ShardRouting;
+import org.elasticsearch.cluster.routing.UnassignedInfo;
+import org.elasticsearch.cluster.routing.allocation.decider.DiskThresholdDecider;
+import org.elasticsearch.cluster.routing.allocation.decider.ThrottlingAllocationDecider;
+import org.elasticsearch.common.Nullable;
+import org.elasticsearch.common.Strings;
+import org.elasticsearch.common.breaker.CircuitBreaker;
+import org.elasticsearch.common.io.FileSystemUtils;
+import org.elasticsearch.common.lease.Releasables;
+import org.elasticsearch.common.logging.ESLogger;
+import org.elasticsearch.common.logging.Loggers;
+import org.elasticsearch.common.settings.Settings;
+import org.elasticsearch.common.settings.Settings.Builder;
+import org.elasticsearch.common.transport.InetSocketTransportAddress;
+import org.elasticsearch.common.transport.TransportAddress;
+import org.elasticsearch.common.unit.ByteSizeUnit;
+import org.elasticsearch.common.unit.ByteSizeValue;
+import org.elasticsearch.common.unit.TimeValue;
+import org.elasticsearch.common.util.concurrent.EsExecutors;
+import org.elasticsearch.discovery.DiscoveryService;
+import org.elasticsearch.env.NodeEnvironment;
+import org.elasticsearch.http.HttpServerTransport;
+import org.elasticsearch.index.IndexModule;
+import org.elasticsearch.index.IndexService;
+import org.elasticsearch.index.MockEngineFactoryPlugin;
+import org.elasticsearch.index.engine.CommitStats;
+import org.elasticsearch.index.engine.Engine;
+import org.elasticsearch.index.shard.IndexShard;
+import org.elasticsearch.index.shard.ShardId;
+import org.elasticsearch.index.store.IndexStoreConfig;
+import org.elasticsearch.indices.IndicesService;
+import org.elasticsearch.indices.breaker.CircuitBreakerService;
+import org.elasticsearch.indices.breaker.HierarchyCircuitBreakerService;
+import org.elasticsearch.indices.fielddata.cache.IndicesFieldDataCache;
+import org.elasticsearch.indices.recovery.RecoverySettings;
+import org.elasticsearch.node.MockNode;
+import org.elasticsearch.node.Node;
+import org.elasticsearch.node.NodeMocksPlugin;
+import org.elasticsearch.node.internal.InternalSettingsPreparer;
+import org.elasticsearch.node.service.NodeService;
+import org.elasticsearch.plugins.Plugin;
+import org.elasticsearch.script.ScriptService;
+import org.elasticsearch.search.MockSearchService;
+import org.elasticsearch.search.SearchService;
+import org.elasticsearch.test.disruption.ServiceDisruptionScheme;
+import org.elasticsearch.test.store.MockFSIndexStore;
+import org.elasticsearch.test.transport.AssertingLocalTransport;
+import org.elasticsearch.test.transport.MockTransportService;
+import org.elasticsearch.transport.Transport;
+import org.elasticsearch.transport.TransportService;
+import org.elasticsearch.transport.netty.NettyTransport;
+import org.junit.Assert;
+
+import java.io.Closeable;
+import java.io.IOException;
+import java.net.InetSocketAddress;
+import java.nio.file.Path;
+import java.util.*;
+import java.util.concurrent.ExecutionException;
+import java.util.concurrent.ExecutorService;
+import java.util.concurrent.Future;
+import java.util.concurrent.TimeUnit;
+import java.util.concurrent.atomic.AtomicBoolean;
+import java.util.concurrent.atomic.AtomicInteger;
+import java.util.function.Predicate;
+import java.util.stream.Collectors;
+import java.util.stream.Stream;
+
+import static junit.framework.Assert.fail;
+import static org.apache.lucene.util.LuceneTestCase.*;
+import static org.elasticsearch.common.settings.Settings.settingsBuilder;
+import static org.elasticsearch.test.ESTestCase.assertBusy;
+import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertNoTimeout;
+import static org.hamcrest.Matchers.*;
+import static org.junit.Assert.assertThat;
+
+/**
+ * InternalTestCluster manages a set of JVM private nodes and allows convenient access to them.
+ * The cluster supports randomized configuration such that nodes started in the cluster will
+ * automatically load asserting services tracking resources like file handles or open searchers.
+ * <p>
+ * The Cluster is bound to a test lifecycle where tests must call {@link #beforeTest(java.util.Random, double)} and
+ * {@link #afterTest()} to initialize and reset the cluster in order to be more reproducible. The term "more" relates
+ * to the async nature of Elasticsearch in combination with randomized testing. Once Threads and asynchronous calls
+ * are involved reproducibility is very limited. This class should only be used through {@link ESIntegTestCase}.
+ * </p>
+ */
+public final class InternalTestCluster extends TestCluster {
+
+ private final ESLogger logger = Loggers.getLogger(getClass());
+
+ static NodeConfigurationSource DEFAULT_SETTINGS_SOURCE = NodeConfigurationSource.EMPTY;
+
+ /**
+ * A node level setting that holds a per node random seed that is consistent across node restarts
+ */
+ public static final String SETTING_CLUSTER_NODE_SEED = "test.cluster.node.seed";
+
+ /**
+ * The number of ports in the range used for this JVM
+ */
+ public static final int PORTS_PER_JVM = 100;
+
+ /**
+ * The number of ports in the range used for this cluster
+ */
+ public static final int PORTS_PER_CLUSTER = 20;
+
+ private static final int GLOBAL_TRANSPORT_BASE_PORT = 9300;
+ private static final int GLOBAL_HTTP_BASE_PORT = 19200;
+
+ private static final int JVM_ORDINAL = Integer.parseInt(System.getProperty(SysGlobals.CHILDVM_SYSPROP_JVM_ID, "0"));
+
+ /** a per-JVM unique offset to be used for calculating unique port ranges. */
+ public static final int JVM_BASE_PORT_OFFEST = PORTS_PER_JVM * (JVM_ORDINAL + 1);
+
+ private static final AtomicInteger clusterOrdinal = new AtomicInteger();
+ private final int CLUSTER_BASE_PORT_OFFSET = JVM_BASE_PORT_OFFEST + (clusterOrdinal.getAndIncrement() * PORTS_PER_CLUSTER) % PORTS_PER_JVM;
+
+ public final int TRANSPORT_BASE_PORT = GLOBAL_TRANSPORT_BASE_PORT + CLUSTER_BASE_PORT_OFFSET;
+ public final int HTTP_BASE_PORT = GLOBAL_HTTP_BASE_PORT + CLUSTER_BASE_PORT_OFFSET;
+
+
+ static final int DEFAULT_MIN_NUM_DATA_NODES = 1;
+ static final int DEFAULT_MAX_NUM_DATA_NODES = TEST_NIGHTLY ? 6 : 3;
+
+ static final int DEFAULT_NUM_CLIENT_NODES = -1;
+ static final int DEFAULT_MIN_NUM_CLIENT_NODES = 0;
+ static final int DEFAULT_MAX_NUM_CLIENT_NODES = 1;
+
+ static final boolean DEFAULT_ENABLE_HTTP_PIPELINING = true;
+
+ /* sorted map to make traverse order reproducible, concurrent since we do checks on it not within a sync block */
+ private final NavigableMap<String, NodeAndClient> nodes = new TreeMap<>();
+
+ private final Set<Path> dataDirToClean = new HashSet<>();
+
+ private final String clusterName;
+
+ private final AtomicBoolean open = new AtomicBoolean(true);
+
+ private final Settings defaultSettings;
+
+ private AtomicInteger nextNodeId = new AtomicInteger(0);
+
+ /* Each shared node has a node seed that is used to start up the node and get default settings
+ * this is important if a node is randomly shut down in a test since the next test relies on a
+ * fully shared cluster to be more reproducible */
+ private final long[] sharedNodesSeeds;
+
+ private final int numSharedDataNodes;
+
+ private final int numSharedClientNodes;
+
+ private final NodeConfigurationSource nodeConfigurationSource;
+
+ private final ExecutorService executor;
+
+ private final Collection<Class<? extends Plugin>> mockPlugins;
+
+ /**
+ * All nodes started by the cluster will have their name set to nodePrefix followed by a positive number
+ */
+ private final String nodePrefix;
+ private final Path baseDir;
+
+ private ServiceDisruptionScheme activeDisruptionScheme;
+ private String nodeMode;
+
+ public InternalTestCluster(String nodeMode, long clusterSeed, Path baseDir,
+ int minNumDataNodes, int maxNumDataNodes, String clusterName, NodeConfigurationSource nodeConfigurationSource, int numClientNodes,
+ boolean enableHttpPipelining, String nodePrefix, Collection<Class<? extends Plugin>> mockPlugins) {
+ super(clusterSeed);
+ if ("network".equals(nodeMode) == false && "local".equals(nodeMode) == false) {
+ throw new IllegalArgumentException("Unknown nodeMode: " + nodeMode);
+ }
+ this.nodeMode = nodeMode;
+ this.baseDir = baseDir;
+ this.clusterName = clusterName;
+ if (minNumDataNodes < 0 || maxNumDataNodes < 0) {
+ throw new IllegalArgumentException("minimum and maximum number of data nodes must be >= 0");
+ }
+
+ if (maxNumDataNodes < minNumDataNodes) {
+ throw new IllegalArgumentException("maximum number of data nodes must be >= minimum number of data nodes");
+ }
+
+ Random random = new Random(clusterSeed);
+
+ this.numSharedDataNodes = RandomInts.randomIntBetween(random, minNumDataNodes, maxNumDataNodes);
+ assert this.numSharedDataNodes >= 0;
+
+ //for now all shared data nodes are also master eligible
+ if (numSharedDataNodes == 0) {
+ this.numSharedClientNodes = 0;
+ } else {
+ if (numClientNodes < 0) {
+ this.numSharedClientNodes = RandomInts.randomIntBetween(random, DEFAULT_MIN_NUM_CLIENT_NODES, DEFAULT_MAX_NUM_CLIENT_NODES);
+ } else {
+ this.numSharedClientNodes = numClientNodes;
+ }
+ }
+ assert this.numSharedClientNodes >= 0;
+
+ this.nodePrefix = nodePrefix;
+
+ assert nodePrefix != null;
+ this.mockPlugins = mockPlugins;
+
+ /*
+ * TODO
+ * - we might want start some master only nodes?
+ * - we could add a flag that returns a client to the master all the time?
+ * - we could add a flag that never returns a client to the master
+ * - along those lines use a dedicated node that is master eligible and let all other nodes be only data nodes
+ */
+ sharedNodesSeeds = new long[numSharedDataNodes + numSharedClientNodes];
+ for (int i = 0; i < sharedNodesSeeds.length; i++) {
+ sharedNodesSeeds[i] = random.nextLong();
+ }
+
+ logger.info("Setup InternalTestCluster [{}] with seed [{}] using [{}] data nodes and [{}] client nodes", clusterName, SeedUtils.formatSeed(clusterSeed), numSharedDataNodes, numSharedClientNodes);
+ this.nodeConfigurationSource = nodeConfigurationSource;
+ Builder builder = Settings.settingsBuilder();
+ if (random.nextInt(5) == 0) { // sometimes set this
+ // randomize (multi/single) data path, special case for 0, don't set it at all...
+ final int numOfDataPaths = random.nextInt(5);
+ if (numOfDataPaths > 0) {
+ StringBuilder dataPath = new StringBuilder();
+ for (int i = 0; i < numOfDataPaths; i++) {
+ dataPath.append(baseDir.resolve("d" + i).toAbsolutePath()).append(',');
+ }
+ builder.put("path.data", dataPath.toString());
+ }
+ }
+ builder.put("path.shared_data", baseDir.resolve("custom"));
+ builder.put("path.home", baseDir);
+ builder.put("path.repo", baseDir.resolve("repos"));
+ builder.put("transport.tcp.port", TRANSPORT_BASE_PORT + "-" + (TRANSPORT_BASE_PORT + PORTS_PER_CLUSTER));
+ builder.put("http.port", HTTP_BASE_PORT + "-" + (HTTP_BASE_PORT + PORTS_PER_CLUSTER));
+ builder.put(InternalSettingsPreparer.IGNORE_SYSTEM_PROPERTIES_SETTING, true);
+ builder.put("node.mode", nodeMode);
+ builder.put("http.pipelining", enableHttpPipelining);
+ if (Strings.hasLength(System.getProperty("es.logger.level"))) {
+ builder.put("logger.level", System.getProperty("es.logger.level"));
+ }
+ if (Strings.hasLength(System.getProperty("es.logger.prefix"))) {
+ builder.put("logger.prefix", System.getProperty("es.logger.prefix"));
+ }
+ // Default the watermarks to absurdly low to prevent the tests
+ // from failing on nodes without enough disk space
+ builder.put(DiskThresholdDecider.CLUSTER_ROUTING_ALLOCATION_LOW_DISK_WATERMARK, "1b");
+ builder.put(DiskThresholdDecider.CLUSTER_ROUTING_ALLOCATION_HIGH_DISK_WATERMARK, "1b");
+ if (TEST_NIGHTLY) {
+ builder.put(RecoverySettings.INDICES_RECOVERY_CONCURRENT_STREAMS, RandomInts.randomIntBetween(random, 10, 15));
+ builder.put(RecoverySettings.INDICES_RECOVERY_CONCURRENT_SMALL_FILE_STREAMS, RandomInts.randomIntBetween(random, 10, 15));
+ builder.put(ThrottlingAllocationDecider.CLUSTER_ROUTING_ALLOCATION_NODE_CONCURRENT_RECOVERIES, RandomInts.randomIntBetween(random, 5, 10));
+ } else if (random.nextInt(100) <= 90) {
+ builder.put(RecoverySettings.INDICES_RECOVERY_CONCURRENT_STREAMS, RandomInts.randomIntBetween(random, 3, 6));
+ builder.put(RecoverySettings.INDICES_RECOVERY_CONCURRENT_SMALL_FILE_STREAMS, RandomInts.randomIntBetween(random, 3, 6));
+ builder.put(ThrottlingAllocationDecider.CLUSTER_ROUTING_ALLOCATION_NODE_CONCURRENT_RECOVERIES, RandomInts.randomIntBetween(random, 2, 5));
+ }
+ // always reduce this - it can make tests really slow
+ builder.put(RecoverySettings.INDICES_RECOVERY_RETRY_DELAY_STATE_SYNC, TimeValue.timeValueMillis(RandomInts.randomIntBetween(random, 20, 50)));
+ defaultSettings = builder.build();
+ executor = EsExecutors.newCached("test runner", 0, TimeUnit.SECONDS, EsExecutors.daemonThreadFactory("test_" + clusterName));
+ }
+
+ public static String configuredNodeMode() {
+ Builder builder = Settings.builder();
+ if (Strings.isEmpty(System.getProperty("es.node.mode")) && Strings.isEmpty(System.getProperty("es.node.local"))) {
+ return "local"; // default if nothing is specified
+ }
+ if (Strings.hasLength(System.getProperty("es.node.mode"))) {
+ builder.put("node.mode", System.getProperty("es.node.mode"));
+ }
+ if (Strings.hasLength(System.getProperty("es.node.local"))) {
+ builder.put("node.local", System.getProperty("es.node.local"));
+ }
+ if (DiscoveryNode.localNode(builder.build())) {
+ return "local";
+ } else {
+ return "network";
+ }
+ }
+
+ @Override
+ public String getClusterName() {
+ return clusterName;
+ }
+
+ public String[] getNodeNames() {
+ return nodes.keySet().toArray(Strings.EMPTY_ARRAY);
+ }
+
+ private boolean isLocalTransportConfigured() {
+ return "local".equals(nodeMode);
+ }
+
+ private Settings getSettings(int nodeOrdinal, long nodeSeed, Settings others) {
+ Builder builder = Settings.settingsBuilder().put(defaultSettings)
+ .put(getRandomNodeSettings(nodeSeed));
+ Settings settings = nodeConfigurationSource.nodeSettings(nodeOrdinal);
+ if (settings != null) {
+ if (settings.get(ClusterName.SETTING) != null) {
+ throw new IllegalStateException("Tests must not set a '" + ClusterName.SETTING + "' as a node setting set '" + ClusterName.SETTING + "': [" + settings.get(ClusterName.SETTING) + "]");
+ }
+ builder.put(settings);
+ }
+ if (others != null) {
+ builder.put(others);
+ }
+ builder.put(ClusterName.SETTING, clusterName);
+ return builder.build();
+ }
+
+ private Collection<Class<? extends Plugin>> getPlugins(long seed) {
+ Set<Class<? extends Plugin>> plugins = new HashSet<>(nodeConfigurationSource.nodePlugins());
+ plugins.addAll(mockPlugins);
+ if (isLocalTransportConfigured() == false) {
+ // this is crazy we must do this here...we should really just always be using local transport...
+ plugins.remove(AssertingLocalTransport.TestPlugin.class);
+ }
+ return plugins;
+ }
+
+ private Settings getRandomNodeSettings(long seed) {
+ Random random = new Random(seed);
+ Builder builder = Settings.settingsBuilder()
+ .put(SETTING_CLUSTER_NODE_SEED, seed);
+ if (isLocalTransportConfigured() == false) {
+ builder.put(Transport.TransportSettings.TRANSPORT_TCP_COMPRESS, rarely(random));
+ }
+ if (random.nextBoolean()) {
+ builder.put("cache.recycler.page.type", RandomPicks.randomFrom(random, PageCacheRecycler.Type.values()));
+ }
+ if (random.nextInt(10) == 0) { // 10% of the nodes have a very frequent check interval
+ builder.put(SearchService.KEEPALIVE_INTERVAL_KEY, TimeValue.timeValueMillis(10 + random.nextInt(2000)));
+ } else if (random.nextInt(10) != 0) { // 90% of the time - 10% of the time we don't set anything
+ builder.put(SearchService.KEEPALIVE_INTERVAL_KEY, TimeValue.timeValueSeconds(10 + random.nextInt(5 * 60)));
+ }
+ if (random.nextBoolean()) { // sometimes set a
+ builder.put(SearchService.DEFAULT_KEEPALIVE_KEY, TimeValue.timeValueSeconds(100 + random.nextInt(5 * 60)));
+ }
+
+ if (random.nextInt(10) == 0) {
+ // node gets an extra cpu this time
+ builder.put(EsExecutors.PROCESSORS, 1 + EsExecutors.boundedNumberOfProcessors(Settings.EMPTY));
+ }
+
+ if (random.nextBoolean()) {
+ if (random.nextBoolean()) {
+ builder.put("indices.fielddata.cache.size", 1 + random.nextInt(1000), ByteSizeUnit.MB);
+ }
+ }
+
+ // randomize netty settings
+ if (random.nextBoolean()) {
+ builder.put(NettyTransport.WORKER_COUNT, random.nextInt(3) + 1);
+ builder.put(NettyTransport.CONNECTIONS_PER_NODE_RECOVERY, random.nextInt(2) + 1);
+ builder.put(NettyTransport.CONNECTIONS_PER_NODE_BULK, random.nextInt(3) + 1);
+ builder.put(NettyTransport.CONNECTIONS_PER_NODE_REG, random.nextInt(6) + 1);
+ }
+
+ if (random.nextBoolean()) {
+ builder.put(MappingUpdatedAction.INDICES_MAPPING_DYNAMIC_TIMEOUT, new TimeValue(RandomInts.randomIntBetween(random, 10, 30), TimeUnit.SECONDS));
+ }
+
+ if (random.nextInt(10) == 0) {
+ builder.put(HierarchyCircuitBreakerService.REQUEST_CIRCUIT_BREAKER_TYPE_SETTING, "noop");
+ builder.put(HierarchyCircuitBreakerService.FIELDDATA_CIRCUIT_BREAKER_TYPE_SETTING, "noop");
+ }
+
+ if (random.nextBoolean()) {
+ builder.put(IndexModule.QUERY_CACHE_TYPE, random.nextBoolean() ? IndexModule.INDEX_QUERY_CACHE : IndexModule.NONE_QUERY_CACHE);
+ }
+
+ if (random.nextBoolean()) {
+ builder.put(IndexModule.QUERY_CACHE_EVERYTHING, random.nextBoolean());
+ }
+
+ if (random.nextBoolean()) {
+ if (random.nextInt(10) == 0) { // do something crazy slow here
+ builder.put(IndexStoreConfig.INDICES_STORE_THROTTLE_MAX_BYTES_PER_SEC, new ByteSizeValue(RandomInts.randomIntBetween(random, 1, 10), ByteSizeUnit.MB));
+ } else {
+ builder.put(IndexStoreConfig.INDICES_STORE_THROTTLE_MAX_BYTES_PER_SEC, new ByteSizeValue(RandomInts.randomIntBetween(random, 10, 200), ByteSizeUnit.MB));
+ }
+ }
+ if (random.nextBoolean()) {
+ builder.put(IndexStoreConfig.INDICES_STORE_THROTTLE_TYPE, RandomPicks.randomFrom(random, StoreRateLimiting.Type.values()));
+ }
+
+ if (random.nextBoolean()) {
+ if (random.nextInt(10) == 0) { // do something crazy slow here
+ builder.put(RecoverySettings.INDICES_RECOVERY_MAX_BYTES_PER_SEC, new ByteSizeValue(RandomInts.randomIntBetween(random, 1, 10), ByteSizeUnit.MB));
+ } else {
+ builder.put(RecoverySettings.INDICES_RECOVERY_MAX_BYTES_PER_SEC, new ByteSizeValue(RandomInts.randomIntBetween(random, 10, 200), ByteSizeUnit.MB));
+ }
+ }
+
+ if (random.nextBoolean()) {
+ builder.put(NettyTransport.PING_SCHEDULE, RandomInts.randomIntBetween(random, 100, 2000) + "ms");
+ }
+
+ if (random.nextBoolean()) {
+ builder.put(ScriptService.SCRIPT_CACHE_SIZE_SETTING, RandomInts.randomIntBetween(random, -100, 2000));
+ }
+ if (random.nextBoolean()) {
+ builder.put(ScriptService.SCRIPT_CACHE_EXPIRE_SETTING, TimeValue.timeValueMillis(RandomInts.randomIntBetween(random, 750, 10000000)));
+ }
+
+ // always default delayed allocation to 0 to make sure we have tests are not delayed
+ builder.put(UnassignedInfo.INDEX_DELAYED_NODE_LEFT_TIMEOUT_SETTING, 0);
+
+ return builder.build();
+ }
+
+ public static String clusterName(String prefix, long clusterSeed) {
+ StringBuilder builder = new StringBuilder(prefix);
+ final int childVM = RandomizedTest.systemPropertyAsInt(SysGlobals.CHILDVM_SYSPROP_JVM_ID, 0);
+ builder.append("-CHILD_VM=[").append(childVM).append(']');
+ builder.append("-CLUSTER_SEED=[").append(clusterSeed).append(']');
+ // if multiple maven task run on a single host we better have an identifier that doesn't rely on input params
+ builder.append("-HASH=[").append(SeedUtils.formatSeed(System.nanoTime())).append(']');
+ return builder.toString();
+ }
+
+ private void ensureOpen() {
+ if (!open.get()) {
+ throw new RuntimeException("Cluster is already closed");
+ }
+ }
+
+ private synchronized NodeAndClient getOrBuildRandomNode() {
+ ensureOpen();
+ NodeAndClient randomNodeAndClient = getRandomNodeAndClient();
+ if (randomNodeAndClient != null) {
+ return randomNodeAndClient;
+ }
+ NodeAndClient buildNode = buildNode();
+ buildNode.node().start();
+ publishNode(buildNode);
+ return buildNode;
+ }
+
+ private synchronized NodeAndClient getRandomNodeAndClient() {
+ return getRandomNodeAndClient(nc -> true);
+ }
+
+
+ private synchronized NodeAndClient getRandomNodeAndClient(Predicate<NodeAndClient> predicate) {
+ ensureOpen();
+ Collection<NodeAndClient> values = nodes.values().stream().filter(predicate).collect(Collectors.toCollection(ArrayList::new));
+ if (!values.isEmpty()) {
+ int whichOne = random.nextInt(values.size());
+ for (NodeAndClient nodeAndClient : values) {
+ if (whichOne-- == 0) {
+ return nodeAndClient;
+ }
+ }
+ }
+ return null;
+ }
+
+ /**
+ * Ensures that at least <code>n</code> data nodes are present in the cluster.
+ * if more nodes than <code>n</code> are present this method will not
+ * stop any of the running nodes.
+ */
+ public void ensureAtLeastNumDataNodes(int n) {
+ final List<Async<String>> asyncs = new ArrayList<>();
+ synchronized (this) {
+ int size = numDataNodes();
+ for (int i = size; i < n; i++) {
+ logger.info("increasing cluster size from {} to {}", size, n);
+ asyncs.add(startNodeAsync());
+ }
+ }
+ try {
+ for (Async<String> async : asyncs) {
+ async.get();
+ }
+ } catch (Exception e) {
+ throw new ElasticsearchException("failed to start nodes", e);
+ }
+ if (!asyncs.isEmpty()) {
+ synchronized (this) {
+ assertNoTimeout(client().admin().cluster().prepareHealth().setWaitForNodes(Integer.toString(nodes.size())).get());
+ }
+ }
+ }
+
+ /**
+ * Ensures that at most <code>n</code> are up and running.
+ * If less nodes that <code>n</code> are running this method
+ * will not start any additional nodes.
+ */
+ public synchronized void ensureAtMostNumDataNodes(int n) throws IOException {
+ int size = numDataNodes();
+ if (size <= n) {
+ return;
+ }
+ // prevent killing the master if possible and client nodes
+ final Stream<NodeAndClient> collection =
+ n == 0 ? nodes.values().stream() : nodes.values().stream().filter(new DataNodePredicate().and(new MasterNodePredicate(getMasterName()).negate()));
+ final Iterator<NodeAndClient> values = collection.iterator();
+
+ logger.info("changing cluster size from {} to {}, {} data nodes", size(), n + numSharedClientNodes, n);
+ Set<NodeAndClient> nodesToRemove = new HashSet<>();
+ int numNodesAndClients = 0;
+ while (values.hasNext() && numNodesAndClients++ < size-n) {
+ NodeAndClient next = values.next();
+ nodesToRemove.add(next);
+ removeDisruptionSchemeFromNode(next);
+ next.close();
+ }
+ for (NodeAndClient toRemove : nodesToRemove) {
+ nodes.remove(toRemove.name);
+ }
+ if (!nodesToRemove.isEmpty() && size() > 0) {
+ assertNoTimeout(client().admin().cluster().prepareHealth().setWaitForNodes(Integer.toString(nodes.size())).get());
+ }
+ }
+
+ private NodeAndClient buildNode(Settings settings, Version version) {
+ int ord = nextNodeId.getAndIncrement();
+ return buildNode(ord, random.nextLong(), settings, version);
+ }
+
+ private NodeAndClient buildNode() {
+ int ord = nextNodeId.getAndIncrement();
+ return buildNode(ord, random.nextLong(), null, Version.CURRENT);
+ }
+
+ private NodeAndClient buildNode(int nodeId, long seed, Settings settings, Version version) {
+ assert Thread.holdsLock(this);
+ ensureOpen();
+ settings = getSettings(nodeId, seed, settings);
+ Collection<Class<? extends Plugin>> plugins = getPlugins(seed);
+ String name = buildNodeName(nodeId);
+ assert !nodes.containsKey(name);
+ Settings finalSettings = settingsBuilder()
+ .put("path.home", baseDir) // allow overriding path.home
+ .put(settings)
+ .put("name", name)
+ .put(DiscoveryService.SETTING_DISCOVERY_SEED, seed)
+ .build();
+ MockNode node = new MockNode(finalSettings, version, plugins);
+ return new NodeAndClient(name, node);
+ }
+
+ private String buildNodeName(int id) {
+ return nodePrefix + id;
+ }
+
+ /**
+ * Returns the common node name prefix for this test cluster.
+ */
+ public String nodePrefix() {
+ return nodePrefix;
+ }
+
+ @Override
+ public synchronized Client client() {
+ ensureOpen();
+ /* Randomly return a client to one of the nodes in the cluster */
+ return getOrBuildRandomNode().client(random);
+ }
+
+ /**
+ * Returns a node client to a data node in the cluster.
+ * Note: use this with care tests should not rely on a certain nodes client.
+ */
+ public synchronized Client dataNodeClient() {
+ ensureOpen();
+ /* Randomly return a client to one of the nodes in the cluster */
+ return getRandomNodeAndClient(new DataNodePredicate()).client(random);
+ }
+
+ /**
+ * Returns a node client to the current master node.
+ * Note: use this with care tests should not rely on a certain nodes client.
+ */
+ public synchronized Client masterClient() {
+ ensureOpen();
+ NodeAndClient randomNodeAndClient = getRandomNodeAndClient(new MasterNodePredicate(getMasterName()));
+ if (randomNodeAndClient != null) {
+ return randomNodeAndClient.nodeClient(); // ensure node client master is requested
+ }
+ Assert.fail("No master client found");
+ return null; // can't happen
+ }
+
+ /**
+ * Returns a node client to random node but not the master. This method will fail if no non-master client is available.
+ */
+ public synchronized Client nonMasterClient() {
+ ensureOpen();
+ NodeAndClient randomNodeAndClient = getRandomNodeAndClient(new MasterNodePredicate(getMasterName()).negate());
+ if (randomNodeAndClient != null) {
+ return randomNodeAndClient.nodeClient(); // ensure node client non-master is requested
+ }
+ Assert.fail("No non-master client found");
+ return null; // can't happen
+ }
+
+ /**
+ * Returns a client to a node started with "node.client: true"
+ */
+ public synchronized Client clientNodeClient() {
+ ensureOpen();
+ NodeAndClient randomNodeAndClient = getRandomNodeAndClient(new ClientNodePredicate());
+ if (randomNodeAndClient != null) {
+ return randomNodeAndClient.client(random);
+ }
+ int nodeId = nextNodeId.getAndIncrement();
+ Settings settings = getSettings(nodeId, random.nextLong(), Settings.EMPTY);
+ startNodeClient(settings);
+ return getRandomNodeAndClient(new ClientNodePredicate()).client(random);
+ }
+
+ public synchronized Client startNodeClient(Settings settings) {
+ ensureOpen(); // currently unused
+ Builder builder = settingsBuilder().put(settings).put("node.client", true);
+ if (size() == 0) {
+ // if we are the first node - don't wait for a state
+ builder.put("discovery.initial_state_timeout", 0);
+ }
+ String name = startNode(builder);
+ return nodes.get(name).nodeClient();
+ }
+
+ /**
+ * Returns a transport client
+ */
+ public synchronized Client transportClient() {
+ ensureOpen();
+ // randomly return a transport client going to one of the nodes in the cluster
+ return getOrBuildRandomNode().transportClient();
+ }
+
+ /**
+ * Returns a node client to a given node.
+ */
+ public synchronized Client client(String nodeName) {
+ ensureOpen();
+ NodeAndClient nodeAndClient = nodes.get(nodeName);
+ if (nodeAndClient != null) {
+ return nodeAndClient.client(random);
+ }
+ Assert.fail("No node found with name: [" + nodeName + "]");
+ return null; // can't happen
+ }
+
+
+ /**
+ * Returns a "smart" node client to a random node in the cluster
+ */
+ public synchronized Client smartClient() {
+ NodeAndClient randomNodeAndClient = getRandomNodeAndClient();
+ if (randomNodeAndClient != null) {
+ return randomNodeAndClient.nodeClient();
+ }
+ Assert.fail("No smart client found");
+ return null; // can't happen
+ }
+
+ /**
+ * Returns a random node that applies to the given predicate.
+ * The predicate can filter nodes based on the nodes settings.
+ * If all nodes are filtered out this method will return <code>null</code>
+ */
+ public synchronized Client client(final Predicate<Settings> filterPredicate) {
+ ensureOpen();
+ final NodeAndClient randomNodeAndClient = getRandomNodeAndClient(nodeAndClient -> filterPredicate.test(nodeAndClient.node.settings()));
+ if (randomNodeAndClient != null) {
+ return randomNodeAndClient.client(random);
+ }
+ return null;
+ }
+
+ @Override
+ public void close() {
+ if (this.open.compareAndSet(true, false)) {
+ if (activeDisruptionScheme != null) {
+ activeDisruptionScheme.testClusterClosed();
+ activeDisruptionScheme = null;
+ }
+ IOUtils.closeWhileHandlingException(nodes.values());
+ nodes.clear();
+ executor.shutdownNow();
+ }
+ }
+
+ public String getNodeMode() {
+ return nodeMode;
+ }
+
+ private final class NodeAndClient implements Closeable {
+ private MockNode node;
+ private Client nodeClient;
+ private Client transportClient;
+ private final AtomicBoolean closed = new AtomicBoolean(false);
+ private final String name;
+
+ NodeAndClient(String name, MockNode node) {
+ this.node = node;
+ this.name = name;
+ }
+
+ Node node() {
+ if (closed.get()) {
+ throw new RuntimeException("already closed");
+ }
+ return node;
+ }
+
+ Client client(Random random) {
+ if (closed.get()) {
+ throw new RuntimeException("already closed");
+ }
+ double nextDouble = random.nextDouble();
+ if (nextDouble < transportClientRatio) {
+ if (logger.isTraceEnabled()) {
+ logger.trace("Using transport client for node [{}] sniff: [{}]", node.settings().get("name"), false);
+ }
+ return getOrBuildTransportClient();
+ } else {
+ return getOrBuildNodeClient();
+ }
+ }
+
+ Client nodeClient() {
+ if (closed.get()) {
+ throw new RuntimeException("already closed");
+ }
+ return getOrBuildNodeClient();
+ }
+
+ Client transportClient() {
+ if (closed.get()) {
+ throw new RuntimeException("already closed");
+ }
+ return getOrBuildTransportClient();
+ }
+
+ private Client getOrBuildNodeClient() {
+ if (nodeClient != null) {
+ return nodeClient;
+ }
+ return nodeClient = node.client();
+ }
+
+ private Client getOrBuildTransportClient() {
+ if (transportClient != null) {
+ return transportClient;
+ }
+ /* no sniff client for now - doesn't work will all tests since it might throw NoNodeAvailableException if nodes are shut down.
+ * we first need support of transportClientRatio as annotations or so
+ */
+ return transportClient = new TransportClientFactory(false, nodeConfigurationSource.transportClientSettings(), baseDir, nodeMode, nodeConfigurationSource.transportClientPlugins()).client(node, clusterName);
+ }
+
+ void resetClient() throws IOException {
+ if (closed.get() == false) {
+ Releasables.close(nodeClient, transportClient);
+ nodeClient = null;
+ transportClient = null;
+ }
+ }
+
+ void closeNode() {
+ registerDataPath();
+ node.close();
+ }
+
+ void restart(RestartCallback callback) throws Exception {
+ assert callback != null;
+ resetClient();
+ if (!node.isClosed()) {
+ closeNode();
+ }
+ Settings newSettings = callback.onNodeStopped(name);
+ if (newSettings == null) {
+ newSettings = Settings.EMPTY;
+ }
+ if (callback.clearData(name)) {
+ NodeEnvironment nodeEnv = getInstanceFromNode(NodeEnvironment.class, node);
+ if (nodeEnv.hasNodeFile()) {
+ IOUtils.rm(nodeEnv.nodeDataPaths());
+ }
+ }
+ final long newIdSeed = node.settings().getAsLong(DiscoveryService.SETTING_DISCOVERY_SEED, 0l) + 1; // use a new seed to make sure we have new node id
+ Settings finalSettings = Settings.builder().put(node.settings()).put(newSettings).put(DiscoveryService.SETTING_DISCOVERY_SEED, newIdSeed).build();
+ Collection<Class<? extends Plugin>> plugins = node.getPlugins();
+ Version version = node.getVersion();
+ node = new MockNode(finalSettings, version, plugins);
+ node.start();
+ }
+
+ void registerDataPath() {
+ NodeEnvironment nodeEnv = getInstanceFromNode(NodeEnvironment.class, node);
+ if (nodeEnv.hasNodeFile()) {
+ dataDirToClean.addAll(Arrays.asList(nodeEnv.nodeDataPaths()));
+ }
+ }
+
+
+ @Override
+ public void close() throws IOException {
+ resetClient();
+ closed.set(true);
+ closeNode();
+ }
+ }
+
+ public static final String TRANSPORT_CLIENT_PREFIX = "transport_client_";
+
+ static class TransportClientFactory {
+ private final boolean sniff;
+ private final Settings settings;
+ private final Path baseDir;
+ private final String nodeMode;
+ private final Collection<Class<? extends Plugin>> plugins;
+
+ TransportClientFactory(boolean sniff, Settings settings, Path baseDir, String nodeMode, Collection<Class<? extends Plugin>> plugins) {
+ this.sniff = sniff;
+ this.settings = settings != null ? settings : Settings.EMPTY;
+ this.baseDir = baseDir;
+ this.nodeMode = nodeMode;
+ this.plugins = plugins;
+ }
+
+ public Client client(Node node, String clusterName) {
+ TransportAddress addr = node.injector().getInstance(TransportService.class).boundAddress().publishAddress();
+ Settings nodeSettings = node.settings();
+ Builder builder = settingsBuilder()
+ .put("client.transport.nodes_sampler_interval", "1s")
+ .put("path.home", baseDir)
+ .put("name", TRANSPORT_CLIENT_PREFIX + node.settings().get("name"))
+ .put(ClusterName.SETTING, clusterName).put("client.transport.sniff", sniff)
+ .put("node.mode", nodeSettings.get("node.mode", nodeMode))
+ .put("node.local", nodeSettings.get("node.local", ""))
+ .put("logger.prefix", nodeSettings.get("logger.prefix", ""))
+ .put("logger.level", nodeSettings.get("logger.level", "INFO"))
+ .put(InternalSettingsPreparer.IGNORE_SYSTEM_PROPERTIES_SETTING, true)
+ .put(settings);
+
+ TransportClient.Builder clientBuilder = TransportClient.builder().settings(builder.build());
+ for (Class<? extends Plugin> plugin : plugins) {
+ clientBuilder.addPlugin(plugin);
+ }
+ TransportClient client = clientBuilder.build();
+ client.addTransportAddress(addr);
+ return client;
+ }
+ }
+
+ @Override
+ public synchronized void beforeTest(Random random, double transportClientRatio) throws IOException, InterruptedException {
+ super.beforeTest(random, transportClientRatio);
+ reset(true);
+ }
+
+ private synchronized void reset(boolean wipeData) throws IOException {
+ // clear all rules for mock transport services
+ for (NodeAndClient nodeAndClient : nodes.values()) {
+ TransportService transportService = nodeAndClient.node.injector().getInstance(TransportService.class);
+ if (transportService instanceof MockTransportService) {
+ final MockTransportService mockTransportService = (MockTransportService) transportService;
+ mockTransportService.clearAllRules();
+ mockTransportService.clearTracers();
+ }
+ }
+ randomlyResetClients();
+ if (wipeData) {
+ wipeDataDirectories();
+ }
+ if (nextNodeId.get() == sharedNodesSeeds.length && nodes.size() == sharedNodesSeeds.length) {
+ logger.debug("Cluster hasn't changed - moving out - nodes: [{}] nextNodeId: [{}] numSharedNodes: [{}]", nodes.keySet(), nextNodeId.get(), sharedNodesSeeds.length);
+ return;
+ }
+ logger.debug("Cluster is NOT consistent - restarting shared nodes - nodes: [{}] nextNodeId: [{}] numSharedNodes: [{}]", nodes.keySet(), nextNodeId.get(), sharedNodesSeeds.length);
+
+
+ Set<NodeAndClient> sharedNodes = new HashSet<>();
+ assert sharedNodesSeeds.length == numSharedDataNodes + numSharedClientNodes;
+ boolean changed = false;
+ for (int i = 0; i < numSharedDataNodes; i++) {
+ String buildNodeName = buildNodeName(i);
+ NodeAndClient nodeAndClient = nodes.get(buildNodeName);
+ if (nodeAndClient == null) {
+ changed = true;
+ nodeAndClient = buildNode(i, sharedNodesSeeds[i], null, Version.CURRENT);
+ nodeAndClient.node.start();
+ logger.info("Start Shared Node [{}] not shared", nodeAndClient.name);
+ }
+ sharedNodes.add(nodeAndClient);
+ }
+ for (int i = numSharedDataNodes; i < numSharedDataNodes + numSharedClientNodes; i++) {
+ String buildNodeName = buildNodeName(i);
+ NodeAndClient nodeAndClient = nodes.get(buildNodeName);
+ if (nodeAndClient == null) {
+ changed = true;
+ Builder clientSettingsBuilder = Settings.builder().put("node.client", true);
+ nodeAndClient = buildNode(i, sharedNodesSeeds[i], clientSettingsBuilder.build(), Version.CURRENT);
+ nodeAndClient.node.start();
+ logger.info("Start Shared Node [{}] not shared", nodeAndClient.name);
+ }
+ sharedNodes.add(nodeAndClient);
+ }
+ if (!changed && sharedNodes.size() == nodes.size()) {
+ logger.debug("Cluster is consistent - moving out - nodes: [{}] nextNodeId: [{}] numSharedNodes: [{}]", nodes.keySet(), nextNodeId.get(), sharedNodesSeeds.length);
+ if (size() > 0) {
+ client().admin().cluster().prepareHealth().setWaitForNodes(Integer.toString(sharedNodesSeeds.length)).get();
+ }
+ return; // we are consistent - return
+ }
+ for (NodeAndClient nodeAndClient : sharedNodes) {
+ nodes.remove(nodeAndClient.name);
+ }
+
+ // trash the remaining nodes
+ final Collection<NodeAndClient> toShutDown = nodes.values();
+ for (NodeAndClient nodeAndClient : toShutDown) {
+ logger.debug("Close Node [{}] not shared", nodeAndClient.name);
+ nodeAndClient.close();
+ }
+ nodes.clear();
+ for (NodeAndClient nodeAndClient : sharedNodes) {
+ publishNode(nodeAndClient);
+ }
+ nextNodeId.set(sharedNodesSeeds.length);
+ assert size() == sharedNodesSeeds.length;
+ if (size() > 0) {
+ client().admin().cluster().prepareHealth().setWaitForNodes(Integer.toString(sharedNodesSeeds.length)).get();
+ }
+ logger.debug("Cluster is consistent again - nodes: [{}] nextNodeId: [{}] numSharedNodes: [{}]", nodes.keySet(), nextNodeId.get(), sharedNodesSeeds.length);
+ }
+
+ @Override
+ public synchronized void afterTest() throws IOException {
+ wipeDataDirectories();
+ randomlyResetClients(); /* reset all clients - each test gets its own client based on the Random instance created above. */
+ }
+
+ @Override
+ public void beforeIndexDeletion() {
+ // Check that the operations counter on index shard has reached 0.
+ // The assumption here is that after a test there are no ongoing write operations.
+ // test that have ongoing write operations after the test (for example because ttl is used
+ // and not all docs have been purged after the test) and inherit from
+ // ElasticsearchIntegrationTest must override beforeIndexDeletion() to avoid failures.
+ assertShardIndexCounter();
+ //check that shards that have same sync id also contain same number of documents
+ assertSameSyncIdSameDocs();
+
+ }
+
+ private void assertSameSyncIdSameDocs() {
+ Map<String, Long> docsOnShards = new HashMap<>();
+ final Collection<NodeAndClient> nodesAndClients = nodes.values();
+ for (NodeAndClient nodeAndClient : nodesAndClients) {
+ IndicesService indexServices = getInstance(IndicesService.class, nodeAndClient.name);
+ for (IndexService indexService : indexServices) {
+ for (IndexShard indexShard : indexService) {
+ CommitStats commitStats = indexShard.commitStats();
+ if (commitStats != null) { // null if the engine is closed or if the shard is recovering
+ String syncId = commitStats.getUserData().get(Engine.SYNC_COMMIT_ID);
+ if (syncId != null) {
+ long liveDocsOnShard = commitStats.getNumDocs();
+ if (docsOnShards.get(syncId) != null) {
+ assertThat("sync id is equal but number of docs does not match on node " + nodeAndClient.name + ". expected " + docsOnShards.get(syncId) + " but got " + liveDocsOnShard, docsOnShards.get(syncId), equalTo(liveDocsOnShard));
+ } else {
+ docsOnShards.put(syncId, liveDocsOnShard);
+ }
+ }
+ }
+ }
+ }
+ }
+ }
+
+ private void assertShardIndexCounter() {
+ final Collection<NodeAndClient> nodesAndClients = nodes.values();
+ for (NodeAndClient nodeAndClient : nodesAndClients) {
+ IndicesService indexServices = getInstance(IndicesService.class, nodeAndClient.name);
+ for (IndexService indexService : indexServices) {
+ for (IndexShard indexShard : indexService) {
+ assertThat("index shard counter on shard " + indexShard.shardId() + " on node " + nodeAndClient.name + " not 0", indexShard.getOperationsCount(), equalTo(0));
+ }
+ }
+ }
+ }
+
+ private void randomlyResetClients() throws IOException {
+ // only reset the clients on nightly tests, it causes heavy load...
+ if (RandomizedTest.isNightly() && rarely(random)) {
+ final Collection<NodeAndClient> nodesAndClients = nodes.values();
+ for (NodeAndClient nodeAndClient : nodesAndClients) {
+ nodeAndClient.resetClient();
+ }
+ }
+ }
+
+ private void wipeDataDirectories() {
+ if (!dataDirToClean.isEmpty()) {
+ try {
+ for (Path path : dataDirToClean) {
+ try {
+ FileSystemUtils.deleteSubDirectories(path);
+ logger.info("Successfully wiped data directory for node location: {}", path);
+ } catch (IOException e) {
+ logger.info("Failed to wipe data directory for node location: {}", path);
+ }
+ }
+ } finally {
+ dataDirToClean.clear();
+ }
+ }
+ }
+
+ /**
+ * Returns a reference to a random node's {@link ClusterService}
+ */
+ public ClusterService clusterService() {
+ return clusterService(null);
+ }
+
+ /**
+ * Returns a reference to a node's {@link ClusterService}. If the given node is null, a random node will be selected.
+ */
+ public synchronized ClusterService clusterService(@Nullable String node) {
+ return getInstance(ClusterService.class, node);
+ }
+
+ /**
+ * Returns an Iterable to all instances for the given class &gt;T&lt; across all nodes in the cluster.
+ */
+ public synchronized <T> Iterable<T> getInstances(Class<T> clazz) {
+ List<T> instances = new ArrayList<>(nodes.size());
+ for (NodeAndClient nodeAndClient : nodes.values()) {
+ instances.add(getInstanceFromNode(clazz, nodeAndClient.node));
+ }
+ return instances;
+ }
+
+ /**
+ * Returns an Iterable to all instances for the given class &gt;T&lt; across all data nodes in the cluster.
+ */
+ public synchronized <T> Iterable<T> getDataNodeInstances(Class<T> clazz) {
+ return getInstances(clazz, new DataNodePredicate());
+ }
+
+ private synchronized <T> Iterable<T> getInstances(Class<T> clazz, Predicate<NodeAndClient> predicate) {
+ Iterable<NodeAndClient> filteredNodes = nodes.values().stream().filter(predicate)::iterator;
+ List<T> instances = new ArrayList<>();
+ for (NodeAndClient nodeAndClient : filteredNodes) {
+ instances.add(getInstanceFromNode(clazz, nodeAndClient.node));
+ }
+ return instances;
+ }
+
+ /**
+ * Returns a reference to the given nodes instances of the given class &gt;T&lt;
+ */
+ public synchronized <T> T getInstance(Class<T> clazz, final String node) {
+ return getInstance(clazz, nc -> node == null || node.equals(nc.name));
+ }
+
+ public synchronized <T> T getDataNodeInstance(Class<T> clazz) {
+ return getInstance(clazz, new DataNodePredicate());
+ }
+
+ private synchronized <T> T getInstance(Class<T> clazz, Predicate<NodeAndClient> predicate) {
+ NodeAndClient randomNodeAndClient = getRandomNodeAndClient(predicate);
+ assert randomNodeAndClient != null;
+ return getInstanceFromNode(clazz, randomNodeAndClient.node);
+ }
+
+ /**
+ * Returns a reference to a random nodes instances of the given class &gt;T&lt;
+ */
+ public synchronized <T> T getInstance(Class<T> clazz) {
+ return getInstance(clazz, nc -> true);
+ }
+
+ private synchronized <T> T getInstanceFromNode(Class<T> clazz, Node node) {
+ return node.injector().getInstance(clazz);
+ }
+
+ @Override
+ public synchronized int size() {
+ return this.nodes.size();
+ }
+
+ @Override
+ public InetSocketAddress[] httpAddresses() {
+ List<InetSocketAddress> addresses = new ArrayList<>();
+ for (HttpServerTransport httpServerTransport : getInstances(HttpServerTransport.class)) {
+ addresses.add(((InetSocketTransportAddress) httpServerTransport.boundAddress().publishAddress()).address());
+ }
+ return addresses.toArray(new InetSocketAddress[addresses.size()]);
+ }
+
+ /**
+ * Stops a random data node in the cluster. Returns true if a node was found to stop, false otherwise.
+ */
+ public synchronized boolean stopRandomDataNode() throws IOException {
+ ensureOpen();
+ NodeAndClient nodeAndClient = getRandomNodeAndClient(new DataNodePredicate());
+ if (nodeAndClient != null) {
+ logger.info("Closing random node [{}] ", nodeAndClient.name);
+ removeDisruptionSchemeFromNode(nodeAndClient);
+ nodes.remove(nodeAndClient.name);
+ nodeAndClient.close();
+ return true;
+ }
+ return false;
+ }
+
+ /**
+ * Stops a random node in the cluster that applies to the given filter or non if the non of the nodes applies to the
+ * filter.
+ */
+ public synchronized void stopRandomNode(final Predicate<Settings> filter) throws IOException {
+ ensureOpen();
+ NodeAndClient nodeAndClient = getRandomNodeAndClient(nc -> filter.test(nc.node.settings()));
+ if (nodeAndClient != null) {
+ logger.info("Closing filtered random node [{}] ", nodeAndClient.name);
+ removeDisruptionSchemeFromNode(nodeAndClient);
+ nodes.remove(nodeAndClient.name);
+ nodeAndClient.close();
+ }
+ }
+
+ /**
+ * Stops the current master node forcefully
+ */
+ public synchronized void stopCurrentMasterNode() throws IOException {
+ ensureOpen();
+ assert size() > 0;
+ String masterNodeName = getMasterName();
+ assert nodes.containsKey(masterNodeName);
+ logger.info("Closing master node [{}] ", masterNodeName);
+ removeDisruptionSchemeFromNode(nodes.get(masterNodeName));
+ NodeAndClient remove = nodes.remove(masterNodeName);
+ remove.close();
+ }
+
+ /**
+ * Stops the any of the current nodes but not the master node.
+ */
+ public void stopRandomNonMasterNode() throws IOException {
+ NodeAndClient nodeAndClient = getRandomNodeAndClient(new MasterNodePredicate(getMasterName()).negate());
+ if (nodeAndClient != null) {
+ logger.info("Closing random non master node [{}] current master [{}] ", nodeAndClient.name, getMasterName());
+ removeDisruptionSchemeFromNode(nodeAndClient);
+ nodes.remove(nodeAndClient.name);
+ nodeAndClient.close();
+ }
+ }
+
+ /**
+ * Restarts a random node in the cluster
+ */
+ public void restartRandomNode() throws Exception {
+ restartRandomNode(EMPTY_CALLBACK);
+ }
+
+ /**
+ * Restarts a random node in the cluster and calls the callback during restart.
+ */
+ public void restartRandomNode(RestartCallback callback) throws Exception {
+ restartRandomNode(nc -> true, callback);
+ }
+
+ /**
+ * Restarts a random data node in the cluster
+ */
+ public void restartRandomDataNode() throws Exception {
+ restartRandomDataNode(EMPTY_CALLBACK);
+ }
+
+ /**
+ * Restarts a random data node in the cluster and calls the callback during restart.
+ */
+ public void restartRandomDataNode(RestartCallback callback) throws Exception {
+ restartRandomNode(new DataNodePredicate(), callback);
+ }
+
+ /**
+ * Restarts a random node in the cluster and calls the callback during restart.
+ */
+ private void restartRandomNode(Predicate<NodeAndClient> predicate, RestartCallback callback) throws Exception {
+ ensureOpen();
+ NodeAndClient nodeAndClient = getRandomNodeAndClient(predicate);
+ if (nodeAndClient != null) {
+ logger.info("Restarting random node [{}] ", nodeAndClient.name);
+ nodeAndClient.restart(callback);
+ }
+ }
+
+ /**
+ * Restarts a node and calls the callback during restart.
+ */
+ public void restartNode(String nodeName, RestartCallback callback) throws Exception {
+ ensureOpen();
+ NodeAndClient nodeAndClient = nodes.get(nodeName);
+ if (nodeAndClient != null) {
+ logger.info("Restarting node [{}] ", nodeAndClient.name);
+ nodeAndClient.restart(callback);
+ }
+ }
+
+ private void restartAllNodes(boolean rollingRestart, RestartCallback callback) throws Exception {
+ ensureOpen();
+ List<NodeAndClient> toRemove = new ArrayList<>();
+ try {
+ for (NodeAndClient nodeAndClient : nodes.values()) {
+ if (!callback.doRestart(nodeAndClient.name)) {
+ logger.info("Closing node [{}] during restart", nodeAndClient.name);
+ toRemove.add(nodeAndClient);
+ if (activeDisruptionScheme != null) {
+ activeDisruptionScheme.removeFromNode(nodeAndClient.name, this);
+ }
+ nodeAndClient.close();
+ }
+ }
+ } finally {
+ for (NodeAndClient nodeAndClient : toRemove) {
+ nodes.remove(nodeAndClient.name);
+ }
+ }
+ logger.info("Restarting remaining nodes rollingRestart [{}]", rollingRestart);
+ if (rollingRestart) {
+ int numNodesRestarted = 0;
+ for (NodeAndClient nodeAndClient : nodes.values()) {
+ callback.doAfterNodes(numNodesRestarted++, nodeAndClient.nodeClient());
+ logger.info("Restarting node [{}] ", nodeAndClient.name);
+ if (activeDisruptionScheme != null) {
+ activeDisruptionScheme.removeFromNode(nodeAndClient.name, this);
+ }
+ nodeAndClient.restart(callback);
+ if (activeDisruptionScheme != null) {
+ activeDisruptionScheme.applyToNode(nodeAndClient.name, this);
+ }
+ }
+ } else {
+ int numNodesRestarted = 0;
+ for (NodeAndClient nodeAndClient : nodes.values()) {
+ callback.doAfterNodes(numNodesRestarted++, nodeAndClient.nodeClient());
+ logger.info("Stopping node [{}] ", nodeAndClient.name);
+ if (activeDisruptionScheme != null) {
+ activeDisruptionScheme.removeFromNode(nodeAndClient.name, this);
+ }
+ nodeAndClient.closeNode();
+ }
+ for (NodeAndClient nodeAndClient : nodes.values()) {
+ logger.info("Starting node [{}] ", nodeAndClient.name);
+ if (activeDisruptionScheme != null) {
+ activeDisruptionScheme.removeFromNode(nodeAndClient.name, this);
+ }
+ nodeAndClient.restart(callback);
+ if (activeDisruptionScheme != null) {
+ activeDisruptionScheme.applyToNode(nodeAndClient.name, this);
+ }
+ }
+ }
+ }
+
+
+ public static final RestartCallback EMPTY_CALLBACK = new RestartCallback() {
+ @Override
+ public Settings onNodeStopped(String node) {
+ return null;
+ }
+ };
+
+ /**
+ * Restarts all nodes in the cluster. It first stops all nodes and then restarts all the nodes again.
+ */
+ public void fullRestart() throws Exception {
+ fullRestart(EMPTY_CALLBACK);
+ }
+
+ /**
+ * Restarts all nodes in a rolling restart fashion ie. only restarts on node a time.
+ */
+ public void rollingRestart() throws Exception {
+ rollingRestart(EMPTY_CALLBACK);
+ }
+
+ /**
+ * Restarts all nodes in a rolling restart fashion ie. only restarts on node a time.
+ */
+ public void rollingRestart(RestartCallback function) throws Exception {
+ restartAllNodes(true, function);
+ }
+
+ /**
+ * Restarts all nodes in the cluster. It first stops all nodes and then restarts all the nodes again.
+ */
+ public void fullRestart(RestartCallback function) throws Exception {
+ restartAllNodes(false, function);
+ }
+
+
+ /**
+ * Returns the name of the current master node in the cluster.
+ */
+ public String getMasterName() {
+ return getMasterName(null);
+ }
+
+ /**
+ * Returns the name of the current master node in the cluster and executes the request via the node specified
+ * in the viaNode parameter. If viaNode isn't specified a random node will be picked to the send the request to.
+ */
+ public String getMasterName(@Nullable String viaNode) {
+ try {
+ Client client = viaNode != null ? client(viaNode) : client();
+ ClusterState state = client.admin().cluster().prepareState().execute().actionGet().getState();
+ return state.nodes().masterNode().name();
+ } catch (Throwable e) {
+ logger.warn("Can't fetch cluster state", e);
+ throw new RuntimeException("Can't get master node " + e.getMessage(), e);
+ }
+ }
+
+ synchronized Set<String> allDataNodesButN(int numNodes) {
+ return nRandomDataNodes(numDataNodes() - numNodes);
+ }
+
+ private synchronized Set<String> nRandomDataNodes(int numNodes) {
+ assert size() >= numNodes;
+ Map<String, NodeAndClient> dataNodes =
+ nodes
+ .entrySet()
+ .stream()
+ .filter(new EntryNodePredicate(new DataNodePredicate()))
+ .collect(Collectors.toMap(Map.Entry::getKey, Map.Entry::getValue));
+ final HashSet<String> set = new HashSet<>();
+ final Iterator<String> iterator = dataNodes.keySet().iterator();
+ for (int i = 0; i < numNodes; i++) {
+ assert iterator.hasNext();
+ set.add(iterator.next());
+ }
+ return set;
+ }
+
+ /**
+ * Returns a set of nodes that have at least one shard of the given index.
+ */
+ public synchronized Set<String> nodesInclude(String index) {
+ if (clusterService().state().routingTable().hasIndex(index)) {
+ List<ShardRouting> allShards = clusterService().state().routingTable().allShards(index);
+ DiscoveryNodes discoveryNodes = clusterService().state().getNodes();
+ Set<String> nodes = new HashSet<>();
+ for (ShardRouting shardRouting : allShards) {
+ if (shardRouting.assignedToNode()) {
+ DiscoveryNode discoveryNode = discoveryNodes.get(shardRouting.currentNodeId());
+ nodes.add(discoveryNode.getName());
+ }
+ }
+ return nodes;
+ }
+ return Collections.emptySet();
+ }
+
+ /**
+ * Starts a node with default settings and returns it's name.
+ */
+ public synchronized String startNode() {
+ return startNode(Settings.EMPTY, Version.CURRENT);
+ }
+
+ /**
+ * Starts a node with default settings ad the specified version and returns it's name.
+ */
+ public synchronized String startNode(Version version) {
+ return startNode(Settings.EMPTY, version);
+ }
+
+ /**
+ * Starts a node with the given settings builder and returns it's name.
+ */
+ public synchronized String startNode(Settings.Builder settings) {
+ return startNode(settings.build(), Version.CURRENT);
+ }
+
+ /**
+ * Starts a node with the given settings and returns it's name.
+ */
+ public synchronized String startNode(Settings settings) {
+ return startNode(settings, Version.CURRENT);
+ }
+
+ /**
+ * Starts a node with the given settings and version and returns it's name.
+ */
+ public synchronized String startNode(Settings settings, Version version) {
+ NodeAndClient buildNode = buildNode(settings, version);
+ buildNode.node().start();
+ publishNode(buildNode);
+ return buildNode.name;
+ }
+
+ public synchronized Async<List<String>> startMasterOnlyNodesAsync(int numNodes) {
+ return startMasterOnlyNodesAsync(numNodes, Settings.EMPTY);
+ }
+
+ public synchronized Async<List<String>> startMasterOnlyNodesAsync(int numNodes, Settings settings) {
+ Settings settings1 = Settings.builder().put(settings).put("node.master", true).put("node.data", false).build();
+ return startNodesAsync(numNodes, settings1, Version.CURRENT);
+ }
+
+ public synchronized Async<List<String>> startDataOnlyNodesAsync(int numNodes) {
+ return startDataOnlyNodesAsync(numNodes, Settings.EMPTY);
+ }
+
+ public synchronized Async<List<String>> startDataOnlyNodesAsync(int numNodes, Settings settings) {
+ Settings settings1 = Settings.builder().put(settings).put("node.master", false).put("node.data", true).build();
+ return startNodesAsync(numNodes, settings1, Version.CURRENT);
+ }
+
+ public synchronized Async<String> startMasterOnlyNodeAsync() {
+ return startMasterOnlyNodeAsync(Settings.EMPTY);
+ }
+
+ public synchronized Async<String> startMasterOnlyNodeAsync(Settings settings) {
+ Settings settings1 = Settings.builder().put(settings).put("node.master", true).put("node.data", false).build();
+ return startNodeAsync(settings1, Version.CURRENT);
+ }
+
+ public synchronized String startMasterOnlyNode(Settings settings) {
+ Settings settings1 = Settings.builder().put(settings).put("node.master", true).put("node.data", false).build();
+ return startNode(settings1, Version.CURRENT);
+ }
+
+ public synchronized Async<String> startDataOnlyNodeAsync() {
+ return startDataOnlyNodeAsync(Settings.EMPTY);
+ }
+
+ public synchronized Async<String> startDataOnlyNodeAsync(Settings settings) {
+ Settings settings1 = Settings.builder().put(settings).put("node.master", false).put("node.data", true).build();
+ return startNodeAsync(settings1, Version.CURRENT);
+ }
+
+ public synchronized String startDataOnlyNode(Settings settings) {
+ Settings settings1 = Settings.builder().put(settings).put("node.master", false).put("node.data", true).build();
+ return startNode(settings1, Version.CURRENT);
+ }
+
+ /**
+ * Starts a node in an async manner with the given settings and returns future with its name.
+ */
+ public synchronized Async<String> startNodeAsync() {
+ return startNodeAsync(Settings.EMPTY, Version.CURRENT);
+ }
+
+ /**
+ * Starts a node in an async manner with the given settings and returns future with its name.
+ */
+ public synchronized Async<String> startNodeAsync(final Settings settings) {
+ return startNodeAsync(settings, Version.CURRENT);
+ }
+
+ /**
+ * Starts a node in an async manner with the given settings and version and returns future with its name.
+ */
+ public synchronized Async<String> startNodeAsync(final Settings settings, final Version version) {
+ final NodeAndClient buildNode = buildNode(settings, version);
+ final Future<String> submit = executor.submit(() -> {
+ buildNode.node().start();
+ publishNode(buildNode);
+ return buildNode.name;
+ });
+ return () -> submit.get();
+ }
+
+ /**
+ * Starts multiple nodes in an async manner and returns future with its name.
+ */
+ public synchronized Async<List<String>> startNodesAsync(final int numNodes) {
+ return startNodesAsync(numNodes, Settings.EMPTY, Version.CURRENT);
+ }
+
+ /**
+ * Starts multiple nodes in an async manner with the given settings and returns future with its name.
+ */
+ public synchronized Async<List<String>> startNodesAsync(final int numNodes, final Settings settings) {
+ return startNodesAsync(numNodes, settings, Version.CURRENT);
+ }
+
+ /**
+ * Starts multiple nodes in an async manner with the given settings and version and returns future with its name.
+ */
+ public synchronized Async<List<String>> startNodesAsync(final int numNodes, final Settings settings, final Version version) {
+ final List<Async<String>> asyncs = new ArrayList<>();
+ for (int i = 0; i < numNodes; i++) {
+ asyncs.add(startNodeAsync(settings, version));
+ }
+
+ return () -> {
+ List<String> ids = new ArrayList<>();
+ for (Async<String> async : asyncs) {
+ ids.add(async.get());
+ }
+ return ids;
+ };
+ }
+
+ /**
+ * Starts multiple nodes (based on the number of settings provided) in an async manner, with explicit settings for each node.
+ * The order of the node names returned matches the order of the settings provided.
+ */
+ public synchronized Async<List<String>> startNodesAsync(final Settings... settings) {
+ List<Async<String>> asyncs = new ArrayList<>();
+ for (Settings setting : settings) {
+ asyncs.add(startNodeAsync(setting, Version.CURRENT));
+ }
+ return () -> {
+ List<String> ids = new ArrayList<>();
+ for (Async<String> async : asyncs) {
+ ids.add(async.get());
+ }
+ return ids;
+ };
+ }
+
+ private synchronized void publishNode(NodeAndClient nodeAndClient) {
+ assert !nodeAndClient.node().isClosed();
+ NodeEnvironment nodeEnv = getInstanceFromNode(NodeEnvironment.class, nodeAndClient.node);
+ if (nodeEnv.hasNodeFile()) {
+ dataDirToClean.addAll(Arrays.asList(nodeEnv.nodeDataPaths()));
+ }
+ nodes.put(nodeAndClient.name, nodeAndClient);
+ applyDisruptionSchemeToNode(nodeAndClient);
+ }
+
+ public void closeNonSharedNodes(boolean wipeData) throws IOException {
+ reset(wipeData);
+ }
+
+ @Override
+ public int numDataNodes() {
+ return dataNodeAndClients().size();
+ }
+
+ @Override
+ public int numDataAndMasterNodes() {
+ return dataAndMasterNodes().size();
+ }
+
+ public void setDisruptionScheme(ServiceDisruptionScheme scheme) {
+ clearDisruptionScheme();
+ scheme.applyToCluster(this);
+ activeDisruptionScheme = scheme;
+ }
+
+ public void clearDisruptionScheme() {
+ if (activeDisruptionScheme != null) {
+ TimeValue expectedHealingTime = activeDisruptionScheme.expectedTimeToHeal();
+ logger.info("Clearing active scheme {}, expected healing time {}", activeDisruptionScheme, expectedHealingTime);
+ activeDisruptionScheme.removeAndEnsureHealthy(this);
+ }
+ activeDisruptionScheme = null;
+ }
+
+ private void applyDisruptionSchemeToNode(NodeAndClient nodeAndClient) {
+ if (activeDisruptionScheme != null) {
+ assert nodes.containsKey(nodeAndClient.name);
+ activeDisruptionScheme.applyToNode(nodeAndClient.name, this);
+ }
+ }
+
+ private void removeDisruptionSchemeFromNode(NodeAndClient nodeAndClient) {
+ if (activeDisruptionScheme != null) {
+ assert nodes.containsKey(nodeAndClient.name);
+ activeDisruptionScheme.removeFromNode(nodeAndClient.name, this);
+ }
+ }
+
+ private synchronized Collection<NodeAndClient> dataNodeAndClients() {
+ return filterNodes(nodes, new DataNodePredicate());
+ }
+
+ private synchronized Collection<NodeAndClient> dataAndMasterNodes() {
+ return filterNodes(nodes, new DataOrMasterNodePredicate());
+ }
+
+ private synchronized Collection<NodeAndClient> filterNodes(Map<String, InternalTestCluster.NodeAndClient> map, Predicate<NodeAndClient> predicate) {
+ return map
+ .values()
+ .stream()
+ .filter(predicate)
+ .collect(Collectors.toCollection(ArrayList::new));
+ }
+
+ private static final class DataNodePredicate implements Predicate<NodeAndClient> {
+ @Override
+ public boolean test(NodeAndClient nodeAndClient) {
+ return DiscoveryNode.dataNode(nodeAndClient.node.settings());
+ }
+ }
+
+ private static final class DataOrMasterNodePredicate implements Predicate<NodeAndClient> {
+ @Override
+ public boolean test(NodeAndClient nodeAndClient) {
+ return DiscoveryNode.dataNode(nodeAndClient.node.settings()) ||
+ DiscoveryNode.masterNode(nodeAndClient.node.settings());
+ }
+ }
+
+ private static final class MasterNodePredicate implements Predicate<NodeAndClient> {
+ private final String masterNodeName;
+
+ public MasterNodePredicate(String masterNodeName) {
+ this.masterNodeName = masterNodeName;
+ }
+
+ @Override
+ public boolean test(NodeAndClient nodeAndClient) {
+ return masterNodeName.equals(nodeAndClient.name);
+ }
+ }
+
+ private static final class ClientNodePredicate implements Predicate<NodeAndClient> {
+ @Override
+ public boolean test(NodeAndClient nodeAndClient) {
+ return DiscoveryNode.clientNode(nodeAndClient.node.settings());
+ }
+ }
+
+ private static final class EntryNodePredicate implements Predicate<Map.Entry<String, NodeAndClient>> {
+ private final Predicate<NodeAndClient> delegateNodePredicate;
+
+ EntryNodePredicate(Predicate<NodeAndClient> delegateNodePredicate) {
+ this.delegateNodePredicate = delegateNodePredicate;
+ }
+
+ @Override
+ public boolean test(Map.Entry<String, NodeAndClient> entry) {
+ return delegateNodePredicate.test(entry.getValue());
+ }
+ }
+
+ synchronized String routingKeyForShard(String index, String type, int shard, Random random) {
+ assertThat(shard, greaterThanOrEqualTo(0));
+ assertThat(shard, greaterThanOrEqualTo(0));
+ for (NodeAndClient n : nodes.values()) {
+ Node node = n.node;
+ IndicesService indicesService = getInstanceFromNode(IndicesService.class, node);
+ ClusterService clusterService = getInstanceFromNode(ClusterService.class, node);
+ IndexService indexService = indicesService.indexService(index);
+ if (indexService != null) {
+ assertThat(indexService.getIndexSettings().getSettings().getAsInt(IndexMetaData.SETTING_NUMBER_OF_SHARDS, -1), greaterThan(shard));
+ OperationRouting operationRouting = getInstanceFromNode(OperationRouting.class, node);
+ while (true) {
+ String routing = RandomStrings.randomAsciiOfLength(random, 10);
+ final int targetShard = operationRouting.indexShards(clusterService.state(), index, type, null, routing).shardId().getId();
+ if (shard == targetShard) {
+ return routing;
+ }
+ }
+ }
+ }
+ fail("Could not find a node that holds " + index);
+ return null;
+ }
+
+ @Override
+ public synchronized Iterator<Client> iterator() {
+ ensureOpen();
+ final Iterator<NodeAndClient> iterator = nodes.values().iterator();
+ return new Iterator<Client>() {
+
+ @Override
+ public boolean hasNext() {
+ return iterator.hasNext();
+ }
+
+ @Override
+ public Client next() {
+ return iterator.next().client(random);
+ }
+
+ @Override
+ public void remove() {
+ throw new UnsupportedOperationException("");
+ }
+
+ };
+ }
+
+ /**
+ * Returns a predicate that only accepts settings of nodes with one of the given names.
+ */
+ public static Predicate<Settings> nameFilter(String... nodeName) {
+ return new NodeNamePredicate(new HashSet<>(Arrays.asList(nodeName)));
+ }
+
+ private static final class NodeNamePredicate implements Predicate<Settings> {
+ private final HashSet<String> nodeNames;
+
+
+ public NodeNamePredicate(HashSet<String> nodeNames) {
+ this.nodeNames = nodeNames;
+ }
+
+ @Override
+ public boolean test(Settings settings) {
+ return nodeNames.contains(settings.get("name"));
+
+ }
+ }
+
+
+ /**
+ * An abstract class that is called during {@link #rollingRestart(InternalTestCluster.RestartCallback)}
+ * and / or {@link #fullRestart(InternalTestCluster.RestartCallback)} to execute actions at certain
+ * stages of the restart.
+ */
+ public static class RestartCallback {
+
+ /**
+ * Executed once the give node name has been stopped.
+ */
+ public Settings onNodeStopped(String nodeName) throws Exception {
+ return Settings.EMPTY;
+ }
+
+ /**
+ * Executed for each node before the <tt>n+1</tt> node is restarted. The given client is
+ * an active client to the node that will be restarted next.
+ */
+ public void doAfterNodes(int n, Client client) throws Exception {
+ }
+
+ /**
+ * If this returns <code>true</code> all data for the node with the given node name will be cleared including
+ * gateways and all index data. Returns <code>false</code> by default.
+ */
+ public boolean clearData(String nodeName) {
+ return false;
+ }
+
+
+ /**
+ * If this returns <code>false</code> the node with the given node name will not be restarted. It will be
+ * closed and removed from the cluster. Returns <code>true</code> by default.
+ */
+ public boolean doRestart(String nodeName) {
+ return true;
+ }
+ }
+
+ public Settings getDefaultSettings() {
+ return defaultSettings;
+ }
+
+ @Override
+ public void ensureEstimatedStats() {
+ if (size() > 0) {
+ // Checks that the breakers have been reset without incurring a
+ // network request, because a network request can increment one
+ // of the breakers
+ for (NodeAndClient nodeAndClient : nodes.values()) {
+ final IndicesFieldDataCache fdCache = getInstanceFromNode(IndicesFieldDataCache.class, nodeAndClient.node);
+ // Clean up the cache, ensuring that entries' listeners have been called
+ fdCache.getCache().refresh();
+
+ final String name = nodeAndClient.name;
+ final CircuitBreakerService breakerService = getInstanceFromNode(CircuitBreakerService.class, nodeAndClient.node);
+ CircuitBreaker fdBreaker = breakerService.getBreaker(CircuitBreaker.FIELDDATA);
+ assertThat("Fielddata breaker not reset to 0 on node: " + name, fdBreaker.getUsed(), equalTo(0L));
+ // Anything that uses transport or HTTP can increase the
+ // request breaker (because they use bigarrays), because of
+ // that the breaker can sometimes be incremented from ping
+ // requests from other clusters because Jenkins is running
+ // multiple ES testing jobs in parallel on the same machine.
+ // To combat this we check whether the breaker has reached 0
+ // in an assertBusy loop, so it will try for 10 seconds and
+ // fail if it never reached 0
+ try {
+ assertBusy(new Runnable() {
+ @Override
+ public void run() {
+ CircuitBreaker reqBreaker = breakerService.getBreaker(CircuitBreaker.REQUEST);
+ assertThat("Request breaker not reset to 0 on node: " + name, reqBreaker.getUsed(), equalTo(0L));
+ }
+ });
+ } catch (Exception e) {
+ fail("Exception during check for request breaker reset to 0: " + e);
+ }
+
+ NodeService nodeService = getInstanceFromNode(NodeService.class, nodeAndClient.node);
+ NodeStats stats = nodeService.stats(CommonStatsFlags.ALL, false, false, false, false, false, false, false, false, false, false);
+ assertThat("Fielddata size must be 0 on node: " + stats.getNode(), stats.getIndices().getFieldData().getMemorySizeInBytes(), equalTo(0l));
+ assertThat("Query cache size must be 0 on node: " + stats.getNode(), stats.getIndices().getQueryCache().getMemorySizeInBytes(), equalTo(0l));
+ assertThat("FixedBitSet cache size must be 0 on node: " + stats.getNode(), stats.getIndices().getSegments().getBitsetMemoryInBytes(), equalTo(0l));
+ }
+ }
+ }
+
+ @Override
+ public void assertAfterTest() throws IOException {
+ super.assertAfterTest();
+ for (NodeEnvironment env : this.getInstances(NodeEnvironment.class)) {
+ Set<ShardId> shardIds = env.lockedShards();
+ for (ShardId id : shardIds) {
+ try {
+ env.shardLock(id, TimeUnit.SECONDS.toMillis(5)).close();
+ } catch (IOException ex) {
+ fail("Shard " + id + " is still locked after 5 sec waiting");
+ }
+ }
+ }
+ }
+
+ /**
+ * Simple interface that allows to wait for an async operation to finish
+ * @param <T> the result of the async execution
+ */
+ public interface Async<T> {
+ T get() throws ExecutionException, InterruptedException;
+ }
+
+}
diff --git a/test/framework/src/main/java/org/elasticsearch/test/MockIndexEventListener.java b/test/framework/src/main/java/org/elasticsearch/test/MockIndexEventListener.java
new file mode 100644
index 0000000000..c02c1d8503
--- /dev/null
+++ b/test/framework/src/main/java/org/elasticsearch/test/MockIndexEventListener.java
@@ -0,0 +1,164 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.elasticsearch.test;
+
+import org.elasticsearch.cluster.routing.ShardRouting;
+import org.elasticsearch.common.Nullable;
+import org.elasticsearch.common.inject.Module;
+import org.elasticsearch.common.settings.Settings;
+import org.elasticsearch.index.Index;
+import org.elasticsearch.index.IndexModule;
+import org.elasticsearch.index.IndexService;
+import org.elasticsearch.index.shard.IndexEventListener;
+import org.elasticsearch.index.shard.IndexShard;
+import org.elasticsearch.index.shard.IndexShardState;
+import org.elasticsearch.index.shard.ShardId;
+import org.elasticsearch.plugins.Plugin;
+
+import java.util.Collection;
+import java.util.Collections;
+
+/**
+ * This is a testing plugin that registers a generic {@link org.elasticsearch.test.MockIndexEventListener.TestEventListener} as a node level service as well as a listener
+ * on every index. Tests can access it like this:
+ * <pre>
+ * TestEventListener listener = internalCluster().getInstance(MockIndexEventListener.TestEventListener.class, node1);
+ * listener.setNewDelegate(new IndexEventListener() {
+ * // do some stuff
+ * });
+ * </pre>
+ * This allows tests to use the listener without registering their own plugins.
+ */
+public final class MockIndexEventListener {
+
+ public static class TestPlugin extends Plugin {
+ private final TestEventListener listener = new TestEventListener();
+ @Override
+ public String name() {
+ return "mock-index-listener";
+ }
+ @Override
+ public String description() {
+ return "a mock index listener for testing only";
+ }
+
+ @Override
+ public void onIndexModule(IndexModule module) {
+ module.addIndexEventListener(listener);
+ }
+
+ @Override
+ public Collection<Module> nodeModules() {
+ return Collections.singleton(binder -> binder.bind(TestEventListener.class).toInstance(listener));
+ }
+ }
+
+ public static class TestEventListener implements IndexEventListener {
+ private volatile IndexEventListener delegate = new IndexEventListener() {};
+
+ public void setNewDelegate(IndexEventListener listener) {
+ delegate = listener == null ? new IndexEventListener() {} : listener;
+ }
+
+ @Override
+ public void shardRoutingChanged(IndexShard indexShard, @Nullable ShardRouting oldRouting, ShardRouting newRouting) {
+ delegate.shardRoutingChanged(indexShard, oldRouting, newRouting);
+ }
+
+ @Override
+ public void afterIndexShardCreated(IndexShard indexShard) {
+ delegate.afterIndexShardCreated(indexShard);
+ }
+
+ @Override
+ public void afterIndexShardStarted(IndexShard indexShard) {
+ delegate.afterIndexShardStarted(indexShard);
+ }
+
+ @Override
+ public void beforeIndexShardClosed(ShardId shardId, @Nullable IndexShard indexShard, Settings indexSettings) {
+ delegate.beforeIndexShardClosed(shardId, indexShard, indexSettings);
+ }
+
+ @Override
+ public void afterIndexShardClosed(ShardId shardId, @Nullable IndexShard indexShard, Settings indexSettings) {
+ delegate.afterIndexShardClosed(shardId, indexShard, indexSettings);
+ }
+
+ @Override
+ public void indexShardStateChanged(IndexShard indexShard, @Nullable IndexShardState previousState, IndexShardState currentState, @Nullable String reason) {
+ delegate.indexShardStateChanged(indexShard, previousState, currentState, reason);
+ }
+
+ @Override
+ public void onShardInactive(IndexShard indexShard) {
+ delegate.onShardInactive(indexShard);
+ }
+
+ @Override
+ public void beforeIndexCreated(Index index, Settings indexSettings) {
+ delegate.beforeIndexCreated(index, indexSettings);
+ }
+
+ @Override
+ public void afterIndexCreated(IndexService indexService) {
+ delegate.afterIndexCreated(indexService);
+ }
+
+ @Override
+ public void beforeIndexShardCreated(ShardId shardId, Settings indexSettings) {
+ delegate.beforeIndexShardCreated(shardId, indexSettings);
+ }
+
+ @Override
+ public void beforeIndexClosed(IndexService indexService) {
+ delegate.beforeIndexClosed(indexService);
+ }
+
+ @Override
+ public void afterIndexClosed(Index index, Settings indexSettings) {
+ delegate.afterIndexClosed(index, indexSettings);
+ }
+
+ @Override
+ public void beforeIndexShardDeleted(ShardId shardId, Settings indexSettings) {
+ delegate.beforeIndexShardDeleted(shardId, indexSettings);
+ }
+
+ @Override
+ public void afterIndexShardDeleted(ShardId shardId, Settings indexSettings) {
+ delegate.afterIndexShardDeleted(shardId, indexSettings);
+ }
+
+ @Override
+ public void afterIndexDeleted(Index index, Settings indexSettings) {
+ delegate.afterIndexDeleted(index, indexSettings);
+ }
+
+ @Override
+ public void beforeIndexDeleted(IndexService indexService) {
+ delegate.beforeIndexDeleted(indexService);
+ }
+
+ @Override
+ public void beforeIndexAddedToCluster(Index index, Settings indexSettings) {
+ delegate.beforeIndexAddedToCluster(index, indexSettings);
+ }
+ }
+}
diff --git a/test/framework/src/main/java/org/elasticsearch/test/NodeConfigurationSource.java b/test/framework/src/main/java/org/elasticsearch/test/NodeConfigurationSource.java
new file mode 100644
index 0000000000..5dfb845c19
--- /dev/null
+++ b/test/framework/src/main/java/org/elasticsearch/test/NodeConfigurationSource.java
@@ -0,0 +1,78 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.elasticsearch.test;
+
+import org.elasticsearch.common.settings.Settings;
+import org.elasticsearch.index.MockEngineFactoryPlugin;
+import org.elasticsearch.node.NodeMocksPlugin;
+import org.elasticsearch.plugins.Plugin;
+import org.elasticsearch.search.MockSearchService;
+import org.elasticsearch.test.store.MockFSIndexStore;
+import org.elasticsearch.test.transport.AssertingLocalTransport;
+import org.elasticsearch.test.transport.MockTransportService;
+
+import java.util.ArrayList;
+import java.util.Collection;
+import java.util.Collections;
+import java.util.List;
+
+public abstract class NodeConfigurationSource {
+
+ public static final NodeConfigurationSource EMPTY = new NodeConfigurationSource() {
+ @Override
+ public Settings nodeSettings(int nodeOrdinal) {
+ return null;
+ }
+
+ @Override
+ public Settings transportClientSettings() {
+ return null;
+ }
+ };
+
+ /**
+ * @return the settings for the node represented by the given ordinal, or {@code null} if there are no settings defined
+ */
+ public abstract Settings nodeSettings(int nodeOrdinal);
+
+ /** Plugins that will be randomly added to the node */
+ public Collection<Class<? extends Plugin>> mockPlugins() {
+ List<Class<? extends Plugin>> plugins = new ArrayList<>();
+ plugins.add(MockTransportService.TestPlugin.class);
+ plugins.add(MockFSIndexStore.TestPlugin.class);
+ plugins.add(NodeMocksPlugin.class);
+ plugins.add(MockEngineFactoryPlugin.class);
+ plugins.add(MockSearchService.TestPlugin.class);
+ plugins.add(AssertingLocalTransport.TestPlugin.class);
+ return plugins;
+ }
+
+ /** Returns plugins that should be loaded on the node */
+ public Collection<Class<? extends Plugin>> nodePlugins() {
+ return Collections.emptyList();
+ }
+
+ public abstract Settings transportClientSettings();
+
+ /** Returns plugins that should be loaded in the transport client */
+ public Collection<Class<? extends Plugin>> transportClientPlugins() {
+ return Collections.emptyList();
+ }
+
+}
diff --git a/test/framework/src/main/java/org/elasticsearch/test/StreamsUtils.java b/test/framework/src/main/java/org/elasticsearch/test/StreamsUtils.java
new file mode 100644
index 0000000000..3bace95c23
--- /dev/null
+++ b/test/framework/src/main/java/org/elasticsearch/test/StreamsUtils.java
@@ -0,0 +1,61 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.test;
+
+import java.nio.charset.StandardCharsets;
+import org.elasticsearch.common.io.Streams;
+import org.elasticsearch.common.io.stream.BytesStreamOutput;
+
+import java.io.FileNotFoundException;
+import java.io.IOException;
+import java.io.InputStream;
+import java.io.InputStreamReader;
+
+public class StreamsUtils {
+
+ public static String copyToStringFromClasspath(ClassLoader classLoader, String path) throws IOException {
+ InputStream is = classLoader.getResourceAsStream(path);
+ if (is == null) {
+ throw new FileNotFoundException("Resource [" + path + "] not found in classpath with class loader [" + classLoader + "]");
+ }
+ return Streams.copyToString(new InputStreamReader(is, StandardCharsets.UTF_8));
+ }
+
+ public static String copyToStringFromClasspath(String path) throws IOException {
+ InputStream is = Streams.class.getResourceAsStream(path);
+ if (is == null) {
+ throw new FileNotFoundException("Resource [" + path + "] not found in classpath");
+ }
+ return Streams.copyToString(new InputStreamReader(is, StandardCharsets.UTF_8));
+ }
+
+ public static byte[] copyToBytesFromClasspath(String path) throws IOException {
+ try (InputStream is = Streams.class.getResourceAsStream(path)) {
+ if (is == null) {
+ throw new FileNotFoundException("Resource [" + path + "] not found in classpath");
+ }
+ try (BytesStreamOutput out = new BytesStreamOutput()) {
+ Streams.copy(is, out);
+ return out.bytes().toBytes();
+ }
+ }
+ }
+
+}
diff --git a/test/framework/src/main/java/org/elasticsearch/test/TestCluster.java b/test/framework/src/main/java/org/elasticsearch/test/TestCluster.java
new file mode 100644
index 0000000000..858fbab9ab
--- /dev/null
+++ b/test/framework/src/main/java/org/elasticsearch/test/TestCluster.java
@@ -0,0 +1,232 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.test;
+
+import com.carrotsearch.hppc.ObjectArrayList;
+import org.elasticsearch.action.admin.cluster.state.ClusterStateResponse;
+import org.elasticsearch.action.admin.indices.template.get.GetIndexTemplatesResponse;
+import org.elasticsearch.client.Client;
+import org.elasticsearch.cluster.metadata.IndexMetaData;
+import org.elasticsearch.cluster.metadata.IndexTemplateMetaData;
+import org.elasticsearch.common.logging.ESLogger;
+import org.elasticsearch.common.logging.Loggers;
+import org.elasticsearch.index.IndexNotFoundException;
+import org.elasticsearch.indices.IndexTemplateMissingException;
+import org.elasticsearch.repositories.RepositoryMissingException;
+
+import java.io.Closeable;
+import java.io.IOException;
+import java.net.InetSocketAddress;
+import java.util.Random;
+import java.util.Set;
+
+import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.*;
+
+/**
+ * Base test cluster that exposes the basis to run tests against any elasticsearch cluster, whose layout
+ * (e.g. number of nodes) is predefined and cannot be changed during the tests execution
+ */
+public abstract class TestCluster implements Iterable<Client>, Closeable {
+
+ protected final ESLogger logger = Loggers.getLogger(getClass());
+ private final long seed;
+
+ protected Random random;
+
+ protected double transportClientRatio = 0.0;
+
+ public TestCluster(long seed) {
+ this.seed = seed;
+ }
+
+ public long seed() {
+ return seed;
+ }
+
+ /**
+ * This method should be executed before each test to reset the cluster to its initial state.
+ */
+ public void beforeTest(Random random, double transportClientRatio) throws IOException, InterruptedException {
+ assert transportClientRatio >= 0.0 && transportClientRatio <= 1.0;
+ logger.debug("Reset test cluster with transport client ratio: [{}]", transportClientRatio);
+ this.transportClientRatio = transportClientRatio;
+ this.random = new Random(random.nextLong());
+ }
+
+ /**
+ * Wipes any data that a test can leave behind: indices, templates (except exclude templates) and repositories
+ */
+ public void wipe(Set<String> excludeTemplates) {
+ wipeIndices("_all");
+ wipeAllTemplates(excludeTemplates);
+ wipeRepositories();
+ }
+
+ /**
+ * Assertions that should run before the cluster is wiped should be called in this method
+ */
+ public void beforeIndexDeletion() {
+ }
+
+ /**
+ * This method checks all the things that need to be checked after each test
+ */
+ public void assertAfterTest() throws IOException {
+ ensureEstimatedStats();
+ }
+
+ /**
+ * This method should be executed during tear down, after each test (but after assertAfterTest)
+ */
+ public abstract void afterTest() throws IOException;
+
+ /**
+ * Returns a client connected to any node in the cluster
+ */
+ public abstract Client client();
+
+ /**
+ * Returns the number of nodes in the cluster.
+ */
+ public abstract int size();
+
+ /**
+ * Returns the number of data nodes in the cluster.
+ */
+ public abstract int numDataNodes();
+
+ /**
+ * Returns the number of data and master eligible nodes in the cluster.
+ */
+ public abstract int numDataAndMasterNodes();
+
+ /**
+ * Returns the http addresses of the nodes within the cluster.
+ * Can be used to run REST tests against the test cluster.
+ */
+ public abstract InetSocketAddress[] httpAddresses();
+
+ /**
+ * Closes the current cluster
+ */
+ @Override
+ public abstract void close() throws IOException;
+
+ /**
+ * Deletes the given indices from the tests cluster. If no index name is passed to this method
+ * all indices are removed.
+ */
+ public void wipeIndices(String... indices) {
+ assert indices != null && indices.length > 0;
+ if (size() > 0) {
+ try {
+ assertAcked(client().admin().indices().prepareDelete(indices));
+ } catch (IndexNotFoundException e) {
+ // ignore
+ } catch (IllegalArgumentException e) {
+ // Happens if `action.destructive_requires_name` is set to true
+ // which is the case in the CloseIndexDisableCloseAllTests
+ if ("_all".equals(indices[0])) {
+ ClusterStateResponse clusterStateResponse = client().admin().cluster().prepareState().execute().actionGet();
+ ObjectArrayList<String> concreteIndices = new ObjectArrayList<>();
+ for (IndexMetaData indexMetaData : clusterStateResponse.getState().metaData()) {
+ concreteIndices.add(indexMetaData.getIndex());
+ }
+ if (!concreteIndices.isEmpty()) {
+ assertAcked(client().admin().indices().prepareDelete(concreteIndices.toArray(String.class)));
+ }
+ }
+ }
+ }
+ }
+
+ /**
+ * Removes all templates, except the templates defined in the exclude
+ */
+ public void wipeAllTemplates(Set<String> exclude) {
+ if (size() > 0) {
+ GetIndexTemplatesResponse response = client().admin().indices().prepareGetTemplates().get();
+ for (IndexTemplateMetaData indexTemplate : response.getIndexTemplates()) {
+ if (exclude.contains(indexTemplate.getName())) {
+ continue;
+ }
+ try {
+ client().admin().indices().prepareDeleteTemplate(indexTemplate.getName()).execute().actionGet();
+ } catch (IndexTemplateMissingException e) {
+ // ignore
+ }
+ }
+ }
+ }
+
+ /**
+ * Deletes index templates, support wildcard notation.
+ * If no template name is passed to this method all templates are removed.
+ */
+ public void wipeTemplates(String... templates) {
+ if (size() > 0) {
+ // if nothing is provided, delete all
+ if (templates.length == 0) {
+ templates = new String[]{"*"};
+ }
+ for (String template : templates) {
+ try {
+ client().admin().indices().prepareDeleteTemplate(template).execute().actionGet();
+ } catch (IndexTemplateMissingException e) {
+ // ignore
+ }
+ }
+ }
+ }
+
+ /**
+ * Deletes repositories, supports wildcard notation.
+ */
+ public void wipeRepositories(String... repositories) {
+ if (size() > 0) {
+ // if nothing is provided, delete all
+ if (repositories.length == 0) {
+ repositories = new String[]{"*"};
+ }
+ for (String repository : repositories) {
+ try {
+ client().admin().cluster().prepareDeleteRepository(repository).execute().actionGet();
+ } catch (RepositoryMissingException ex) {
+ // ignore
+ }
+ }
+ }
+ }
+
+ /**
+ * Ensures that any breaker statistics are reset to 0.
+ *
+ * The implementation is specific to the test cluster, because the act of
+ * checking some breaker stats can increase them.
+ */
+ public abstract void ensureEstimatedStats();
+
+ /**
+ * Returns the cluster name
+ */
+ public abstract String getClusterName();
+
+
+}
diff --git a/test/framework/src/main/java/org/elasticsearch/test/TestSearchContext.java b/test/framework/src/main/java/org/elasticsearch/test/TestSearchContext.java
new file mode 100644
index 0000000000..51fb0c905f
--- /dev/null
+++ b/test/framework/src/main/java/org/elasticsearch/test/TestSearchContext.java
@@ -0,0 +1,671 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.elasticsearch.test;
+
+import com.carrotsearch.hppc.ObjectObjectAssociativeContainer;
+
+import org.apache.lucene.search.Collector;
+import org.apache.lucene.search.Query;
+import org.apache.lucene.search.Sort;
+import org.apache.lucene.util.Counter;
+import org.elasticsearch.action.search.SearchType;
+import org.elasticsearch.cache.recycler.PageCacheRecycler;
+import org.elasticsearch.common.HasContext;
+import org.elasticsearch.common.HasContextAndHeaders;
+import org.elasticsearch.common.HasHeaders;
+import org.elasticsearch.common.ParseFieldMatcher;
+import org.elasticsearch.common.collect.ImmutableOpenMap;
+import org.elasticsearch.common.util.BigArrays;
+import org.elasticsearch.index.IndexService;
+import org.elasticsearch.index.analysis.AnalysisService;
+import org.elasticsearch.index.cache.bitset.BitsetFilterCache;
+import org.elasticsearch.index.engine.Engine;
+import org.elasticsearch.index.fielddata.IndexFieldDataService;
+import org.elasticsearch.index.mapper.MappedFieldType;
+import org.elasticsearch.index.mapper.MapperService;
+import org.elasticsearch.index.mapper.object.ObjectMapper;
+import org.elasticsearch.index.query.ParsedQuery;
+import org.elasticsearch.index.shard.IndexShard;
+import org.elasticsearch.index.similarity.SimilarityService;
+import org.elasticsearch.script.ScriptService;
+import org.elasticsearch.search.SearchShardTarget;
+import org.elasticsearch.search.aggregations.SearchContextAggregations;
+import org.elasticsearch.search.dfs.DfsSearchResult;
+import org.elasticsearch.search.fetch.FetchSearchResult;
+import org.elasticsearch.search.fetch.FetchSubPhase;
+import org.elasticsearch.search.fetch.FetchSubPhaseContext;
+import org.elasticsearch.search.fetch.innerhits.InnerHitsContext;
+import org.elasticsearch.search.fetch.script.ScriptFieldsContext;
+import org.elasticsearch.search.fetch.source.FetchSourceContext;
+import org.elasticsearch.search.highlight.SearchContextHighlight;
+import org.elasticsearch.search.internal.ContextIndexSearcher;
+import org.elasticsearch.search.internal.ScrollContext;
+import org.elasticsearch.search.internal.SearchContext;
+import org.elasticsearch.search.internal.ShardSearchRequest;
+import org.elasticsearch.search.lookup.SearchLookup;
+import org.elasticsearch.search.profile.Profiler;
+import org.elasticsearch.search.profile.Profilers;
+import org.elasticsearch.search.query.QuerySearchResult;
+import org.elasticsearch.search.rescore.RescoreSearchContext;
+import org.elasticsearch.search.suggest.SuggestionSearchContext;
+import org.elasticsearch.threadpool.ThreadPool;
+
+import java.util.Collections;
+import java.util.HashMap;
+import java.util.List;
+import java.util.Map;
+import java.util.Set;
+
+public class TestSearchContext extends SearchContext {
+
+ final PageCacheRecycler pageCacheRecycler;
+ final BigArrays bigArrays;
+ final IndexService indexService;
+ final IndexFieldDataService indexFieldDataService;
+ final BitsetFilterCache fixedBitSetFilterCache;
+ final ThreadPool threadPool;
+ final Map<Class<?>, Collector> queryCollectors = new HashMap<>();
+ final IndexShard indexShard;
+ final Counter timeEstimateCounter = Counter.newCounter();
+ final QuerySearchResult queryResult = new QuerySearchResult();
+ ScriptService scriptService;
+ ParsedQuery originalQuery;
+ ParsedQuery postFilter;
+ Query query;
+ Float minScore;
+
+ ContextIndexSearcher searcher;
+ int size;
+ private int terminateAfter = DEFAULT_TERMINATE_AFTER;
+ private String[] types;
+ private SearchContextAggregations aggregations;
+
+ private final long originNanoTime = System.nanoTime();
+ private final Map<String, FetchSubPhaseContext> subPhaseContexts = new HashMap<>();
+
+ public TestSearchContext(ThreadPool threadPool,PageCacheRecycler pageCacheRecycler, BigArrays bigArrays, ScriptService scriptService, IndexService indexService) {
+ super(ParseFieldMatcher.STRICT, null);
+ this.pageCacheRecycler = pageCacheRecycler;
+ this.bigArrays = bigArrays.withCircuitBreaking();
+ this.indexService = indexService;
+ this.indexFieldDataService = indexService.fieldData();
+ this.fixedBitSetFilterCache = indexService.cache().bitsetFilterCache();
+ this.threadPool = threadPool;
+ this.indexShard = indexService.getShardOrNull(0);
+ this.scriptService = scriptService;
+ }
+
+ public TestSearchContext() {
+ super(ParseFieldMatcher.STRICT, null);
+ this.pageCacheRecycler = null;
+ this.bigArrays = null;
+ this.indexService = null;
+ this.indexFieldDataService = null;
+ this.threadPool = null;
+ this.fixedBitSetFilterCache = null;
+ this.indexShard = null;
+ scriptService = null;
+ }
+
+ public void setTypes(String... types) {
+ this.types = types;
+ }
+
+ @Override
+ public void preProcess() {
+ }
+
+ @Override
+ public Query searchFilter(String[] types) {
+ return null;
+ }
+
+ @Override
+ public long id() {
+ return 0;
+ }
+
+ @Override
+ public String source() {
+ return null;
+ }
+
+ @Override
+ public ShardSearchRequest request() {
+ return null;
+ }
+
+ @Override
+ public SearchType searchType() {
+ return null;
+ }
+
+ @Override
+ public SearchContext searchType(SearchType searchType) {
+ return null;
+ }
+
+ @Override
+ public SearchShardTarget shardTarget() {
+ return null;
+ }
+
+ @Override
+ public int numberOfShards() {
+ return 1;
+ }
+
+ @Override
+ public boolean hasTypes() {
+ return false;
+ }
+
+ @Override
+ public String[] types() {
+ return new String[0];
+ }
+
+ @Override
+ public float queryBoost() {
+ return 0;
+ }
+
+ @Override
+ public SearchContext queryBoost(float queryBoost) {
+ return null;
+ }
+
+ @Override
+ public long getOriginNanoTime() {
+ return originNanoTime;
+ }
+
+ @Override
+ protected long nowInMillisImpl() {
+ return 0;
+ }
+
+ @Override
+ public ScrollContext scrollContext() {
+ return null;
+ }
+
+ @Override
+ public SearchContext scrollContext(ScrollContext scrollContext) {
+ throw new UnsupportedOperationException();
+ }
+
+ @Override
+ public SearchContextAggregations aggregations() {
+ return aggregations;
+ }
+
+ @Override
+ public SearchContext aggregations(SearchContextAggregations aggregations) {
+ this.aggregations = aggregations;
+ return this;
+ }
+
+ @Override
+ public <SubPhaseContext extends FetchSubPhaseContext> SubPhaseContext getFetchSubPhaseContext(FetchSubPhase.ContextFactory<SubPhaseContext> contextFactory) {
+ String subPhaseName = contextFactory.getName();
+ if (subPhaseContexts.get(subPhaseName) == null) {
+ subPhaseContexts.put(subPhaseName, contextFactory.newContextInstance());
+ }
+ return (SubPhaseContext) subPhaseContexts.get(subPhaseName);
+ }
+
+ @Override
+ public SearchContextHighlight highlight() {
+ return null;
+ }
+
+ @Override
+ public void highlight(SearchContextHighlight highlight) {
+ }
+
+ @Override
+ public SuggestionSearchContext suggest() {
+ return null;
+ }
+
+ @Override
+ public void suggest(SuggestionSearchContext suggest) {
+ }
+
+ @Override
+ public List<RescoreSearchContext> rescore() {
+ return null;
+ }
+
+ @Override
+ public void addRescore(RescoreSearchContext rescore) {
+ }
+
+ @Override
+ public boolean hasScriptFields() {
+ return false;
+ }
+
+ @Override
+ public ScriptFieldsContext scriptFields() {
+ return null;
+ }
+
+ @Override
+ public boolean sourceRequested() {
+ return false;
+ }
+
+ @Override
+ public boolean hasFetchSourceContext() {
+ return false;
+ }
+
+ @Override
+ public FetchSourceContext fetchSourceContext() {
+ return null;
+ }
+
+ @Override
+ public SearchContext fetchSourceContext(FetchSourceContext fetchSourceContext) {
+ return null;
+ }
+
+ @Override
+ public ContextIndexSearcher searcher() {
+ return searcher;
+ }
+
+ public void setSearcher(Engine.Searcher searcher) {
+ this.searcher = new ContextIndexSearcher(searcher, indexService.cache().query(), indexShard.getQueryCachingPolicy());
+ }
+
+ @Override
+ public IndexShard indexShard() {
+ return indexShard;
+ }
+
+ @Override
+ public MapperService mapperService() {
+ if (indexService != null) {
+ return indexService.mapperService();
+ }
+ return null;
+ }
+
+ @Override
+ public AnalysisService analysisService() { return indexService.analysisService();}
+
+ @Override
+ public SimilarityService similarityService() {
+ return null;
+ }
+
+ @Override
+ public ScriptService scriptService() {
+ return scriptService;
+ }
+
+ @Override
+ public PageCacheRecycler pageCacheRecycler() {
+ return pageCacheRecycler;
+ }
+
+ @Override
+ public BigArrays bigArrays() {
+ return bigArrays;
+ }
+
+ @Override
+ public BitsetFilterCache bitsetFilterCache() {
+ return fixedBitSetFilterCache;
+ }
+
+ @Override
+ public IndexFieldDataService fieldData() {
+ return indexFieldDataService;
+ }
+
+ @Override
+ public long timeoutInMillis() {
+ return 0;
+ }
+
+ @Override
+ public void timeoutInMillis(long timeoutInMillis) {
+ }
+
+ @Override
+ public int terminateAfter() {
+ return terminateAfter;
+ }
+
+ @Override
+ public void terminateAfter(int terminateAfter) {
+ this.terminateAfter = terminateAfter;
+ }
+
+ @Override
+ public SearchContext minimumScore(float minimumScore) {
+ this.minScore = minimumScore;
+ return this;
+ }
+
+ @Override
+ public Float minimumScore() {
+ return minScore;
+ }
+
+ @Override
+ public SearchContext sort(Sort sort) {
+ return null;
+ }
+
+ @Override
+ public Sort sort() {
+ return null;
+ }
+
+ @Override
+ public SearchContext trackScores(boolean trackScores) {
+ return null;
+ }
+
+ @Override
+ public boolean trackScores() {
+ return false;
+ }
+
+ @Override
+ public SearchContext parsedPostFilter(ParsedQuery postFilter) {
+ this.postFilter = postFilter;
+ return this;
+ }
+
+ @Override
+ public ParsedQuery parsedPostFilter() {
+ return postFilter;
+ }
+
+ @Override
+ public Query aliasFilter() {
+ return null;
+ }
+
+ @Override
+ public SearchContext parsedQuery(ParsedQuery query) {
+ this.originalQuery = query;
+ this.query = query.query();
+ return this;
+ }
+
+ @Override
+ public ParsedQuery parsedQuery() {
+ return originalQuery;
+ }
+
+ @Override
+ public Query query() {
+ return query;
+ }
+
+ @Override
+ public int from() {
+ return 0;
+ }
+
+ @Override
+ public SearchContext from(int from) {
+ return null;
+ }
+
+ @Override
+ public int size() {
+ return size;
+ }
+
+ public void setSize(int size) {
+ this.size = size;
+ }
+
+
+ @Override
+ public SearchContext size(int size) {
+ return null;
+ }
+
+ @Override
+ public boolean hasFieldNames() {
+ return false;
+ }
+
+ @Override
+ public List<String> fieldNames() {
+ return null;
+ }
+
+ @Override
+ public void emptyFieldNames() {
+ }
+
+ @Override
+ public boolean explain() {
+ return false;
+ }
+
+ @Override
+ public void explain(boolean explain) {
+ }
+
+ @Override
+ public List<String> groupStats() {
+ return null;
+ }
+
+ @Override
+ public void groupStats(List<String> groupStats) {
+ }
+
+ @Override
+ public boolean version() {
+ return false;
+ }
+
+ @Override
+ public void version(boolean version) {
+ }
+
+ @Override
+ public int[] docIdsToLoad() {
+ return new int[0];
+ }
+
+ @Override
+ public int docIdsToLoadFrom() {
+ return 0;
+ }
+
+ @Override
+ public int docIdsToLoadSize() {
+ return 0;
+ }
+
+ @Override
+ public SearchContext docIdsToLoad(int[] docIdsToLoad, int docsIdsToLoadFrom, int docsIdsToLoadSize) {
+ return null;
+ }
+
+ @Override
+ public void accessed(long accessTime) {
+ }
+
+ @Override
+ public long lastAccessTime() {
+ return 0;
+ }
+
+ @Override
+ public long keepAlive() {
+ return 0;
+ }
+
+ @Override
+ public void keepAlive(long keepAlive) {
+ }
+
+ @Override
+ public SearchLookup lookup() {
+ return new SearchLookup(mapperService(), fieldData(), null);
+ }
+
+ @Override
+ public DfsSearchResult dfsResult() {
+ return null;
+ }
+
+ @Override
+ public QuerySearchResult queryResult() {
+ return queryResult;
+ }
+
+ @Override
+ public FetchSearchResult fetchResult() {
+ return null;
+ }
+
+ @Override
+ public MappedFieldType smartNameFieldType(String name) {
+ if (mapperService() != null) {
+ return mapperService().smartNameFieldType(name, types());
+ }
+ return null;
+ }
+
+ @Override
+ public MappedFieldType smartNameFieldTypeFromAnyType(String name) {
+ if (mapperService() != null) {
+ return mapperService().smartNameFieldType(name);
+ }
+ return null;
+ }
+
+ @Override
+ public ObjectMapper getObjectMapper(String name) {
+ if (mapperService() != null) {
+ return mapperService().getObjectMapper(name, types);
+ }
+ return null;
+ }
+
+ @Override
+ public void doClose() {
+ }
+
+ @Override
+ public Counter timeEstimateCounter() {
+ return timeEstimateCounter;
+ }
+
+ @Override
+ public void innerHits(InnerHitsContext innerHitsContext) {
+ throw new UnsupportedOperationException();
+ }
+
+ @Override
+ public InnerHitsContext innerHits() {
+ throw new UnsupportedOperationException();
+ }
+
+ @Override
+ public <V> V putInContext(Object key, Object value) {
+ return null;
+ }
+
+ @Override
+ public void putAllInContext(ObjectObjectAssociativeContainer<Object, Object> map) {
+ }
+
+ @Override
+ public <V> V getFromContext(Object key) {
+ return null;
+ }
+
+ @Override
+ public <V> V getFromContext(Object key, V defaultValue) {
+ return defaultValue;
+ }
+
+ @Override
+ public boolean hasInContext(Object key) {
+ return false;
+ }
+
+ @Override
+ public int contextSize() {
+ return 0;
+ }
+
+ @Override
+ public boolean isContextEmpty() {
+ return true;
+ }
+
+ @Override
+ public ImmutableOpenMap<Object, Object> getContext() {
+ return ImmutableOpenMap.of();
+ }
+
+ @Override
+ public void copyContextFrom(HasContext other) {
+ }
+
+ @Override
+ public <V> void putHeader(String key, V value) {}
+
+ @Override
+ public <V> V getHeader(String key) {
+ return null;
+ }
+
+ @Override
+ public boolean hasHeader(String key) {
+ return false;
+ }
+
+ @Override
+ public Set<String> getHeaders() {
+ return Collections.emptySet();
+ }
+
+ @Override
+ public void copyHeadersFrom(HasHeaders from) {}
+
+ @Override
+ public void copyContextAndHeadersFrom(HasContextAndHeaders other) {}
+
+ @Override
+ public Profilers getProfilers() {
+ return null; // no profiling
+ }
+
+ @Override
+ public Map<Class<?>, Collector> queryCollectors() {return queryCollectors;}
+
+}
diff --git a/test/framework/src/main/java/org/elasticsearch/test/VersionUtils.java b/test/framework/src/main/java/org/elasticsearch/test/VersionUtils.java
new file mode 100644
index 0000000000..93eef969b4
--- /dev/null
+++ b/test/framework/src/main/java/org/elasticsearch/test/VersionUtils.java
@@ -0,0 +1,112 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.test;
+
+import org.elasticsearch.Version;
+
+import java.lang.reflect.Field;
+import java.lang.reflect.Modifier;
+import java.util.ArrayList;
+import java.util.Collections;
+import java.util.HashSet;
+import java.util.List;
+import java.util.Random;
+import java.util.Set;
+
+/** Utilities for selecting versions in tests */
+public class VersionUtils {
+
+ private static final List<Version> SORTED_VERSIONS;
+ static {
+ Field[] declaredFields = Version.class.getFields();
+ Set<Integer> ids = new HashSet<>();
+ for (Field field : declaredFields) {
+ final int mod = field.getModifiers();
+ if (Modifier.isStatic(mod) && Modifier.isFinal(mod) && Modifier.isPublic(mod)) {
+ if (field.getType() == Version.class) {
+ try {
+ Version object = (Version) field.get(null);
+ ids.add(object.id);
+ } catch (Throwable e) {
+ throw new RuntimeException(e);
+ }
+ }
+ }
+ }
+ List<Integer> idList = new ArrayList<>(ids);
+ Collections.sort(idList);
+ List<Version> version = new ArrayList<>();
+ for (Integer integer : idList) {
+ version.add(Version.fromId(integer));
+ }
+ SORTED_VERSIONS = Collections.unmodifiableList(version);
+ }
+
+ /** Returns immutable list of all known versions. */
+ public static List<Version> allVersions() {
+ return Collections.unmodifiableList(SORTED_VERSIONS);
+ }
+
+ public static Version getPreviousVersion(Version version) {
+ int index = SORTED_VERSIONS.indexOf(version);
+ assert index > 0;
+ return SORTED_VERSIONS.get(index - 1);
+ }
+
+ /** Returns the {@link Version} before the {@link Version#CURRENT} */
+ public static Version getPreviousVersion() {
+ Version version = getPreviousVersion(Version.CURRENT);
+ assert version.before(Version.CURRENT);
+ return version;
+ }
+
+ /** Returns the oldest {@link Version} */
+ public static Version getFirstVersion() {
+ return SORTED_VERSIONS.get(0);
+ }
+
+ /** Returns a random {@link Version} from all available versions. */
+ public static Version randomVersion(Random random) {
+ return SORTED_VERSIONS.get(random.nextInt(SORTED_VERSIONS.size()));
+ }
+
+ /** Returns a random {@link Version} between <code>minVersion</code> and <code>maxVersion</code> (inclusive). */
+ public static Version randomVersionBetween(Random random, Version minVersion, Version maxVersion) {
+ int minVersionIndex = 0;
+ if (minVersion != null) {
+ minVersionIndex = SORTED_VERSIONS.indexOf(minVersion);
+ }
+ int maxVersionIndex = SORTED_VERSIONS.size() - 1;
+ if (maxVersion != null) {
+ maxVersionIndex = SORTED_VERSIONS.indexOf(maxVersion);
+ }
+ if (minVersionIndex == -1) {
+ throw new IllegalArgumentException("minVersion [" + minVersion + "] does not exist.");
+ } else if (maxVersionIndex == -1) {
+ throw new IllegalArgumentException("maxVersion [" + maxVersion + "] does not exist.");
+ } else if (minVersionIndex > maxVersionIndex) {
+ throw new IllegalArgumentException("maxVersion [" + maxVersion + "] cannot be less than minVersion [" + minVersion + "]");
+ } else {
+ // minVersionIndex is inclusive so need to add 1 to this index
+ int range = maxVersionIndex + 1 - minVersionIndex;
+ return SORTED_VERSIONS.get(minVersionIndex + random.nextInt(range));
+ }
+ }
+}
diff --git a/test/framework/src/main/java/org/elasticsearch/test/XContentTestUtils.java b/test/framework/src/main/java/org/elasticsearch/test/XContentTestUtils.java
new file mode 100644
index 0000000000..866a19e0a7
--- /dev/null
+++ b/test/framework/src/main/java/org/elasticsearch/test/XContentTestUtils.java
@@ -0,0 +1,126 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.test;
+
+import org.elasticsearch.common.xcontent.ToXContent;
+import org.elasticsearch.common.xcontent.XContentBuilder;
+import org.elasticsearch.common.xcontent.XContentFactory;
+import org.elasticsearch.common.xcontent.XContentHelper;
+
+import java.io.IOException;
+import java.util.ArrayList;
+import java.util.List;
+import java.util.Map;
+
+import static org.elasticsearch.common.xcontent.ToXContent.EMPTY_PARAMS;
+
+public final class XContentTestUtils {
+ private XContentTestUtils() {
+
+ }
+
+ public static Map<String, Object> convertToMap(ToXContent part) throws IOException {
+ XContentBuilder builder = XContentFactory.jsonBuilder();
+ builder.startObject();
+ part.toXContent(builder, EMPTY_PARAMS);
+ builder.endObject();
+ return XContentHelper.convertToMap(builder.bytes(), false).v2();
+ }
+
+
+ /**
+ * Compares to maps generated from XContentObjects. The order of elements in arrays is ignored.
+ *
+ * @return null if maps are equal or path to the element where the difference was found
+ */
+ public static String differenceBetweenMapsIgnoringArrayOrder(Map<String, Object> first, Map<String, Object> second) {
+ return differenceBetweenMapsIgnoringArrayOrder("", first, second);
+ }
+
+ private static String differenceBetweenMapsIgnoringArrayOrder(String path, Map<String, Object> first, Map<String, Object> second) {
+ if (first.size() != second.size()) {
+ return path + ": sizes of the maps don't match: " + first.size() + " != " + second.size();
+ }
+
+ for (String key : first.keySet()) {
+ String reason = differenceBetweenObjectsIgnoringArrayOrder(path + "/" + key, first.get(key), second.get(key));
+ if (reason != null) {
+ return reason;
+ }
+ }
+ return null;
+ }
+
+ @SuppressWarnings("unchecked")
+ private static String differenceBetweenObjectsIgnoringArrayOrder(String path, Object first, Object second) {
+ if (first == null) {
+ if (second == null) {
+ return null;
+ } else {
+ return path + ": first element is null, the second element is not null";
+ }
+ } else if (first instanceof List) {
+ if (second instanceof List) {
+ List<Object> secondList = new ArrayList<>((List<Object>) second);
+ List<Object> firstList = (List<Object>) first;
+ if (firstList.size() == secondList.size()) {
+ String reason = path + ": no matches found";
+ for (Object firstObj : firstList) {
+ boolean found = false;
+ for (Object secondObj : secondList) {
+ reason = differenceBetweenObjectsIgnoringArrayOrder(path + "/*", firstObj, secondObj);
+ if (reason == null) {
+ secondList.remove(secondObj);
+ found = true;
+ break;
+ }
+ }
+ if (found == false) {
+ return reason;
+ }
+ }
+ if (secondList.isEmpty()) {
+ return null;
+ } else {
+ return path + ": the second list is not empty";
+ }
+ } else {
+ return path + ": sizes of the arrays don't match: " + firstList.size() + " != " + secondList.size();
+ }
+ } else {
+ return path + ": the second element is not an array";
+ }
+ } else if (first instanceof Map) {
+ if (second instanceof Map) {
+ return differenceBetweenMapsIgnoringArrayOrder(path, (Map<String, Object>) first, (Map<String, Object>) second);
+ } else {
+ return path + ": the second element is not a map";
+ }
+ } else {
+ if (first.equals(second)) {
+ return null;
+ } else {
+ return path + ": the elements don't match: [" + first + "] != [" + second + "]";
+ }
+
+ }
+ }
+
+}
diff --git a/test/framework/src/main/java/org/elasticsearch/test/client/RandomizingClient.java b/test/framework/src/main/java/org/elasticsearch/test/client/RandomizingClient.java
new file mode 100644
index 0000000000..5814cac131
--- /dev/null
+++ b/test/framework/src/main/java/org/elasticsearch/test/client/RandomizingClient.java
@@ -0,0 +1,69 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.test.client;
+
+import com.carrotsearch.randomizedtesting.generators.RandomPicks;
+import org.apache.lucene.util.TestUtil;
+import org.elasticsearch.action.search.SearchRequestBuilder;
+import org.elasticsearch.action.search.SearchType;
+import org.elasticsearch.client.Client;
+import org.elasticsearch.client.FilterClient;
+import org.elasticsearch.cluster.routing.Preference;
+
+import java.util.Arrays;
+import java.util.EnumSet;
+import java.util.Random;
+
+/** A {@link Client} that randomizes request parameters. */
+public class RandomizingClient extends FilterClient {
+
+ private final SearchType defaultSearchType;
+ private final String defaultPreference;
+
+
+ public RandomizingClient(Client client, Random random) {
+ super(client);
+ // we don't use the QUERY_AND_FETCH types that break quite a lot of tests
+ // given that they return `size*num_shards` hits instead of `size`
+ defaultSearchType = RandomPicks.randomFrom(random, Arrays.asList(
+ SearchType.DFS_QUERY_THEN_FETCH,
+ SearchType.QUERY_THEN_FETCH));
+ if (random.nextInt(10) == 0) {
+ defaultPreference = RandomPicks.randomFrom(random, EnumSet.of(Preference.PRIMARY_FIRST, Preference.LOCAL)).type();
+ } else if (random.nextInt(10) == 0) {
+ String s = TestUtil.randomRealisticUnicodeString(random, 1, 10);
+ defaultPreference = s.startsWith("_") ? null : s; // '_' is a reserved character
+ } else {
+ defaultPreference = null;
+ }
+
+ }
+
+ @Override
+ public SearchRequestBuilder prepareSearch(String... indices) {
+ return in.prepareSearch(indices).setSearchType(defaultSearchType).setPreference(defaultPreference);
+ }
+
+ @Override
+ public String toString() {
+ return "randomized(" + super.toString() + ")";
+ }
+
+}
diff --git a/test/framework/src/main/java/org/elasticsearch/test/cluster/NoopClusterService.java b/test/framework/src/main/java/org/elasticsearch/test/cluster/NoopClusterService.java
new file mode 100644
index 0000000000..cb3d643f55
--- /dev/null
+++ b/test/framework/src/main/java/org/elasticsearch/test/cluster/NoopClusterService.java
@@ -0,0 +1,170 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.elasticsearch.test.cluster;
+
+import org.elasticsearch.Version;
+import org.elasticsearch.cluster.*;
+import org.elasticsearch.cluster.block.ClusterBlock;
+import org.elasticsearch.cluster.node.DiscoveryNode;
+import org.elasticsearch.cluster.node.DiscoveryNodes;
+import org.elasticsearch.cluster.routing.OperationRouting;
+import org.elasticsearch.cluster.service.PendingClusterTask;
+import org.elasticsearch.common.component.Lifecycle;
+import org.elasticsearch.common.component.LifecycleListener;
+import org.elasticsearch.common.transport.DummyTransportAddress;
+import org.elasticsearch.common.unit.TimeValue;
+
+import java.util.List;
+
+public class NoopClusterService implements ClusterService {
+
+ final ClusterState state;
+
+ public NoopClusterService() {
+ this(ClusterState.builder(new ClusterName("noop")).build());
+ }
+
+ public NoopClusterService(ClusterState state) {
+ if (state.getNodes().size() == 0) {
+ state = ClusterState.builder(state).nodes(
+ DiscoveryNodes.builder()
+ .put(new DiscoveryNode("noop_id", DummyTransportAddress.INSTANCE, Version.CURRENT))
+ .localNodeId("noop_id")).build();
+ }
+
+ assert state.getNodes().localNode() != null;
+ this.state = state;
+
+ }
+
+ @Override
+ public DiscoveryNode localNode() {
+ return state.getNodes().localNode();
+ }
+
+ @Override
+ public ClusterState state() {
+ return state;
+ }
+
+ @Override
+ public void addInitialStateBlock(ClusterBlock block) throws IllegalStateException {
+
+ }
+
+ @Override
+ public void removeInitialStateBlock(ClusterBlock block) throws IllegalStateException {
+
+ }
+
+ @Override
+ public OperationRouting operationRouting() {
+ return null;
+ }
+
+ @Override
+ public void addFirst(ClusterStateListener listener) {
+
+ }
+
+ @Override
+ public void addLast(ClusterStateListener listener) {
+
+ }
+
+ @Override
+ public void add(ClusterStateListener listener) {
+
+ }
+
+ @Override
+ public void remove(ClusterStateListener listener) {
+
+ }
+
+ @Override
+ public void add(LocalNodeMasterListener listener) {
+
+ }
+
+ @Override
+ public void remove(LocalNodeMasterListener listener) {
+
+ }
+
+ @Override
+ public void add(TimeValue timeout, TimeoutClusterStateListener listener) {
+
+ }
+
+ @Override
+ public void submitStateUpdateTask(String source, ClusterStateUpdateTask updateTask) {
+
+ }
+
+ @Override
+ public <T> void submitStateUpdateTask(String source, T task, ClusterStateTaskConfig config, ClusterStateTaskExecutor<T> executor, ClusterStateTaskListener listener) {
+
+ }
+
+ @Override
+ public List<PendingClusterTask> pendingTasks() {
+ return null;
+ }
+
+ @Override
+ public int numberOfPendingTasks() {
+ return 0;
+ }
+
+ @Override
+ public TimeValue getMaxTaskWaitTime() {
+ return TimeValue.timeValueMillis(0);
+ }
+
+ @Override
+ public Lifecycle.State lifecycleState() {
+ return null;
+ }
+
+ @Override
+ public void addLifecycleListener(LifecycleListener listener) {
+
+ }
+
+ @Override
+ public void removeLifecycleListener(LifecycleListener listener) {
+
+ }
+
+ @Override
+ public ClusterService start() {
+ return null;
+ }
+
+ @Override
+ public ClusterService stop() {
+ return null;
+ }
+
+ @Override
+ public void close() {
+
+ }
+}
diff --git a/test/framework/src/main/java/org/elasticsearch/test/cluster/TestClusterService.java b/test/framework/src/main/java/org/elasticsearch/test/cluster/TestClusterService.java
new file mode 100644
index 0000000000..5dc8cce99c
--- /dev/null
+++ b/test/framework/src/main/java/org/elasticsearch/test/cluster/TestClusterService.java
@@ -0,0 +1,282 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.elasticsearch.test.cluster;
+
+import org.elasticsearch.ElasticsearchException;
+import org.elasticsearch.Version;
+import org.elasticsearch.cluster.*;
+import org.elasticsearch.cluster.block.ClusterBlock;
+import org.elasticsearch.cluster.node.DiscoveryNode;
+import org.elasticsearch.cluster.node.DiscoveryNodes;
+import org.elasticsearch.cluster.routing.OperationRouting;
+import org.elasticsearch.cluster.routing.allocation.decider.AwarenessAllocationDecider;
+import org.elasticsearch.cluster.service.PendingClusterTask;
+import org.elasticsearch.common.Nullable;
+import org.elasticsearch.common.component.Lifecycle;
+import org.elasticsearch.common.component.LifecycleListener;
+import org.elasticsearch.common.logging.ESLogger;
+import org.elasticsearch.common.logging.Loggers;
+import org.elasticsearch.common.settings.Settings;
+import org.elasticsearch.common.transport.DummyTransportAddress;
+import org.elasticsearch.common.unit.TimeValue;
+import org.elasticsearch.common.util.concurrent.ConcurrentCollections;
+import org.elasticsearch.common.util.concurrent.FutureUtils;
+import org.elasticsearch.threadpool.ThreadPool;
+
+import java.util.*;
+import java.util.concurrent.CopyOnWriteArrayList;
+import java.util.concurrent.ScheduledFuture;
+
+/** a class that simulate simple cluster service features, like state storage and listeners */
+public class TestClusterService implements ClusterService {
+
+ volatile ClusterState state;
+ private final List<ClusterStateListener> listeners = new CopyOnWriteArrayList<>();
+ private final Queue<NotifyTimeout> onGoingTimeouts = ConcurrentCollections.newQueue();
+ private final ThreadPool threadPool;
+ private final ESLogger logger = Loggers.getLogger(getClass(), Settings.EMPTY);
+ private final OperationRouting operationRouting = new OperationRouting(Settings.Builder.EMPTY_SETTINGS, new AwarenessAllocationDecider());
+
+ public TestClusterService() {
+ this(ClusterState.builder(new ClusterName("test")).build());
+ }
+
+ public TestClusterService(ThreadPool threadPool) {
+ this(ClusterState.builder(new ClusterName("test")).build(), threadPool);
+ }
+
+ public TestClusterService(ClusterState state) {
+ this(state, null);
+ }
+
+ public TestClusterService(ClusterState state, @Nullable ThreadPool threadPool) {
+ if (state.getNodes().size() == 0) {
+ state = ClusterState.builder(state).nodes(
+ DiscoveryNodes.builder()
+ .put(new DiscoveryNode("test_node", DummyTransportAddress.INSTANCE, Version.CURRENT))
+ .localNodeId("test_node")).build();
+ }
+
+ assert state.getNodes().localNode() != null;
+ this.state = state;
+ this.threadPool = threadPool;
+
+ }
+
+
+ /** set the current state and trigger any registered listeners about the change, mimicking an update task */
+ synchronized public ClusterState setState(ClusterState state) {
+ assert state.getNodes().localNode() != null;
+ // make sure we have a version increment
+ state = ClusterState.builder(state).version(this.state.version() + 1).build();
+ return setStateAndNotifyListeners(state);
+ }
+
+ private ClusterState setStateAndNotifyListeners(ClusterState state) {
+ ClusterChangedEvent event = new ClusterChangedEvent("test", state, this.state);
+ this.state = state;
+ for (ClusterStateListener listener : listeners) {
+ listener.clusterChanged(event);
+ }
+ return state;
+ }
+
+ /** set the current state and trigger any registered listeners about the change */
+ public ClusterState setState(ClusterState.Builder state) {
+ return setState(state.build());
+ }
+
+ @Override
+ public DiscoveryNode localNode() {
+ return state.getNodes().localNode();
+ }
+
+ @Override
+ public ClusterState state() {
+ return state;
+ }
+
+ @Override
+ public void addInitialStateBlock(ClusterBlock block) throws IllegalStateException {
+ throw new UnsupportedOperationException();
+
+ }
+
+ @Override
+ public void removeInitialStateBlock(ClusterBlock block) throws IllegalStateException {
+ throw new UnsupportedOperationException();
+
+ }
+
+ @Override
+ public OperationRouting operationRouting() {
+ return operationRouting;
+ }
+
+ @Override
+ public void addFirst(ClusterStateListener listener) {
+ listeners.add(0, listener);
+ }
+
+ @Override
+ public void addLast(ClusterStateListener listener) {
+ listeners.add(listener);
+ }
+
+ @Override
+ public void add(ClusterStateListener listener) {
+ listeners.add(listener);
+ }
+
+ @Override
+ public void remove(ClusterStateListener listener) {
+ listeners.remove(listener);
+ for (Iterator<NotifyTimeout> it = onGoingTimeouts.iterator(); it.hasNext(); ) {
+ NotifyTimeout timeout = it.next();
+ if (timeout.listener.equals(listener)) {
+ timeout.cancel();
+ it.remove();
+ }
+ }
+ }
+
+ @Override
+ public void add(LocalNodeMasterListener listener) {
+ throw new UnsupportedOperationException();
+ }
+
+ @Override
+ public void remove(LocalNodeMasterListener listener) {
+ throw new UnsupportedOperationException();
+ }
+
+ @Override
+ public void add(final TimeValue timeout, final TimeoutClusterStateListener listener) {
+ if (threadPool == null) {
+ throw new UnsupportedOperationException("TestClusterService wasn't initialized with a thread pool");
+ }
+ NotifyTimeout notifyTimeout = new NotifyTimeout(listener, timeout);
+ notifyTimeout.future = threadPool.schedule(timeout, ThreadPool.Names.GENERIC, notifyTimeout);
+ onGoingTimeouts.add(notifyTimeout);
+ listeners.add(listener);
+ listener.postAdded();
+ }
+
+ @Override
+ public void submitStateUpdateTask(String source, ClusterStateUpdateTask updateTask) {
+ submitStateUpdateTask(source, null, updateTask, updateTask, updateTask);
+ }
+
+ @Override
+ synchronized public <T> void submitStateUpdateTask(String source, T task, ClusterStateTaskConfig config, ClusterStateTaskExecutor<T> executor, ClusterStateTaskListener listener) {
+ logger.debug("processing [{}]", source);
+ if (state().nodes().localNodeMaster() == false && executor.runOnlyOnMaster()) {
+ listener.onNoLongerMaster(source);
+ logger.debug("failed [{}], no longer master", source);
+ return;
+ }
+ ClusterStateTaskExecutor.BatchResult<T> batchResult;
+ ClusterState previousClusterState = state;
+ try {
+ batchResult = executor.execute(previousClusterState, Arrays.asList(task));
+ } catch (Exception e) {
+ batchResult = ClusterStateTaskExecutor.BatchResult.<T>builder().failure(task, e).build(previousClusterState);
+ }
+
+ batchResult.executionResults.get(task).handle(
+ () -> {},
+ ex -> listener.onFailure(source, new ElasticsearchException("failed to process cluster state update task [" + source + "]", ex))
+ );
+
+ setStateAndNotifyListeners(batchResult.resultingState);
+ listener.clusterStateProcessed(source, previousClusterState, batchResult.resultingState);
+ logger.debug("finished [{}]", source);
+
+ }
+
+ @Override
+ public TimeValue getMaxTaskWaitTime() {
+ throw new UnsupportedOperationException();
+ }
+
+ @Override
+ public List<PendingClusterTask> pendingTasks() {
+ throw new UnsupportedOperationException();
+
+ }
+
+ @Override
+ public int numberOfPendingTasks() {
+ throw new UnsupportedOperationException();
+ }
+
+ @Override
+ public Lifecycle.State lifecycleState() {
+ throw new UnsupportedOperationException();
+ }
+
+ @Override
+ public void addLifecycleListener(LifecycleListener listener) {
+ throw new UnsupportedOperationException();
+ }
+
+ @Override
+ public void removeLifecycleListener(LifecycleListener listener) {
+ throw new UnsupportedOperationException();
+ }
+
+ @Override
+ public ClusterService start() throws ElasticsearchException {
+ throw new UnsupportedOperationException();
+ }
+
+ @Override
+ public ClusterService stop() throws ElasticsearchException {
+ throw new UnsupportedOperationException();
+ }
+
+ @Override
+ public void close() throws ElasticsearchException {
+ throw new UnsupportedOperationException();
+ }
+
+ class NotifyTimeout implements Runnable {
+ final TimeoutClusterStateListener listener;
+ final TimeValue timeout;
+ volatile ScheduledFuture future;
+
+ NotifyTimeout(TimeoutClusterStateListener listener, TimeValue timeout) {
+ this.listener = listener;
+ this.timeout = timeout;
+ }
+
+ public void cancel() {
+ FutureUtils.cancel(future);
+ }
+
+ @Override
+ public void run() {
+ if (future != null && future.isCancelled()) {
+ return;
+ }
+ listener.onTimeout(this.timeout);
+ // note, we rely on the listener to remove itself in case of timeout if needed
+ }
+ }
+}
diff --git a/test/framework/src/main/java/org/elasticsearch/test/discovery/ClusterDiscoveryConfiguration.java b/test/framework/src/main/java/org/elasticsearch/test/discovery/ClusterDiscoveryConfiguration.java
new file mode 100644
index 0000000000..484f65ea65
--- /dev/null
+++ b/test/framework/src/main/java/org/elasticsearch/test/discovery/ClusterDiscoveryConfiguration.java
@@ -0,0 +1,167 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.elasticsearch.test.discovery;
+
+import com.carrotsearch.randomizedtesting.RandomizedTest;
+import org.elasticsearch.ElasticsearchException;
+import org.elasticsearch.common.SuppressForbidden;
+import org.elasticsearch.common.network.NetworkUtils;
+import org.elasticsearch.common.settings.Settings;
+import org.elasticsearch.common.util.CollectionUtils;
+import org.elasticsearch.plugins.Plugin;
+import org.elasticsearch.test.InternalTestCluster;
+import org.elasticsearch.test.NodeConfigurationSource;
+
+import java.io.IOException;
+import java.net.InetSocketAddress;
+import java.net.ServerSocket;
+import java.util.Collection;
+import java.util.Collections;
+import java.util.HashSet;
+import java.util.Set;
+
+public class ClusterDiscoveryConfiguration extends NodeConfigurationSource {
+
+ static Settings DEFAULT_NODE_SETTINGS = Settings.settingsBuilder().put("discovery.type", "zen").build();
+ private static final String IP_ADDR = "127.0.0.1";
+
+ final int numOfNodes;
+ final Settings nodeSettings;
+ final Settings transportClientSettings;
+
+ public ClusterDiscoveryConfiguration(int numOfNodes, Settings extraSettings) {
+ this.numOfNodes = numOfNodes;
+ this.nodeSettings = Settings.builder().put(DEFAULT_NODE_SETTINGS).put(extraSettings).build();
+ this.transportClientSettings = Settings.builder().put(extraSettings).build();
+ }
+
+ @Override
+ public Settings nodeSettings(int nodeOrdinal) {
+ return nodeSettings;
+ }
+
+ @Override
+ public Settings transportClientSettings() {
+ return transportClientSettings;
+ }
+
+ public static class UnicastZen extends ClusterDiscoveryConfiguration {
+
+ // this variable is incremented on each bind attempt and will maintain the next port that should be tried
+ private static int nextPort = calcBasePort();
+
+ private final int[] unicastHostOrdinals;
+ private final int[] unicastHostPorts;
+
+ public UnicastZen(int numOfNodes, Settings extraSettings) {
+ this(numOfNodes, numOfNodes, extraSettings);
+ }
+
+ public UnicastZen(int numOfNodes, int numOfUnicastHosts, Settings extraSettings) {
+ super(numOfNodes, extraSettings);
+ if (numOfUnicastHosts == numOfNodes) {
+ unicastHostOrdinals = new int[numOfNodes];
+ for (int i = 0; i < numOfNodes; i++) {
+ unicastHostOrdinals[i] = i;
+ }
+ } else {
+ Set<Integer> ordinals = new HashSet<>(numOfUnicastHosts);
+ while (ordinals.size() != numOfUnicastHosts) {
+ ordinals.add(RandomizedTest.randomInt(numOfNodes - 1));
+ }
+ unicastHostOrdinals = CollectionUtils.toArray(ordinals);
+ }
+ this.unicastHostPorts = unicastHostPorts(numOfNodes);
+ assert unicastHostOrdinals.length <= unicastHostPorts.length;
+ }
+
+ public UnicastZen(int numOfNodes, int[] unicastHostOrdinals) {
+ this(numOfNodes, Settings.EMPTY, unicastHostOrdinals);
+ }
+
+ public UnicastZen(int numOfNodes, Settings extraSettings, int[] unicastHostOrdinals) {
+ super(numOfNodes, extraSettings);
+ this.unicastHostOrdinals = unicastHostOrdinals;
+ this.unicastHostPorts = unicastHostPorts(numOfNodes);
+ assert unicastHostOrdinals.length <= unicastHostPorts.length;
+ }
+
+ private static int calcBasePort() {
+ return 30000 + InternalTestCluster.JVM_BASE_PORT_OFFEST;
+ }
+
+ @Override
+ public Settings nodeSettings(int nodeOrdinal) {
+ Settings.Builder builder = Settings.builder();
+
+ String[] unicastHosts = new String[unicastHostOrdinals.length];
+ if (nodeOrdinal >= unicastHostPorts.length) {
+ throw new ElasticsearchException("nodeOrdinal [" + nodeOrdinal + "] is greater than the number unicast ports [" + unicastHostPorts.length + "]");
+ } else {
+ // we need to pin the node port & host so we'd know where to point things
+ builder.put("transport.tcp.port", unicastHostPorts[nodeOrdinal]);
+ builder.put("transport.host", IP_ADDR); // only bind on one IF we use v4 here by default
+ builder.put("transport.bind_host", IP_ADDR);
+ builder.put("transport.publish_host", IP_ADDR);
+ builder.put("http.enabled", false);
+ for (int i = 0; i < unicastHostOrdinals.length; i++) {
+ unicastHosts[i] = IP_ADDR + ":" + (unicastHostPorts[unicastHostOrdinals[i]]);
+ }
+ }
+ builder.putArray("discovery.zen.ping.unicast.hosts", unicastHosts);
+ return builder.put(super.nodeSettings(nodeOrdinal)).build();
+ }
+
+ @SuppressForbidden(reason = "we know we pass a IP address")
+ protected synchronized static int[] unicastHostPorts(int numHosts) {
+ int[] unicastHostPorts = new int[numHosts];
+
+ final int basePort = calcBasePort();
+ final int maxPort = basePort + InternalTestCluster.PORTS_PER_JVM;
+ int tries = 0;
+ for (int i = 0; i < unicastHostPorts.length; i++) {
+ boolean foundPortInRange = false;
+ while (tries < InternalTestCluster.PORTS_PER_JVM && !foundPortInRange) {
+ try (ServerSocket serverSocket = new ServerSocket()) {
+ // Set SO_REUSEADDR as we may bind here and not be able to reuse the address immediately without it.
+ serverSocket.setReuseAddress(NetworkUtils.defaultReuseAddress());
+ serverSocket.bind(new InetSocketAddress(IP_ADDR, nextPort));
+ // bind was a success
+ foundPortInRange = true;
+ unicastHostPorts[i] = nextPort;
+ } catch (IOException e) {
+ // Do nothing
+ }
+
+ nextPort++;
+ if (nextPort >= maxPort) {
+ // Roll back to the beginning of the range and do not go into another JVM's port range
+ nextPort = basePort;
+ }
+ tries++;
+ }
+
+ if (!foundPortInRange) {
+ throw new ElasticsearchException("could not find enough open ports in range [" + basePort + "-" + maxPort + "]. required [" + unicastHostPorts.length + "] ports");
+ }
+ }
+ return unicastHostPorts;
+ }
+ }
+}
diff --git a/test/framework/src/main/java/org/elasticsearch/test/disruption/BlockClusterStateProcessing.java b/test/framework/src/main/java/org/elasticsearch/test/disruption/BlockClusterStateProcessing.java
new file mode 100644
index 0000000000..e318843e84
--- /dev/null
+++ b/test/framework/src/main/java/org/elasticsearch/test/disruption/BlockClusterStateProcessing.java
@@ -0,0 +1,107 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.elasticsearch.test.disruption;
+
+import org.elasticsearch.cluster.ClusterService;
+import org.elasticsearch.cluster.ClusterState;
+import org.elasticsearch.cluster.ClusterStateUpdateTask;
+import org.elasticsearch.common.Priority;
+import org.elasticsearch.common.unit.TimeValue;
+import org.elasticsearch.test.InternalTestCluster;
+
+import java.util.Random;
+import java.util.concurrent.CountDownLatch;
+import java.util.concurrent.atomic.AtomicReference;
+
+public class BlockClusterStateProcessing extends SingleNodeDisruption {
+
+ AtomicReference<CountDownLatch> disruptionLatch = new AtomicReference<>();
+
+
+ public BlockClusterStateProcessing(Random random) {
+ this(null, random);
+ }
+
+ public BlockClusterStateProcessing(String disruptedNode, Random random) {
+ super(random);
+ this.disruptedNode = disruptedNode;
+ }
+
+
+ @Override
+ public void startDisrupting() {
+ final String disruptionNodeCopy = disruptedNode;
+ if (disruptionNodeCopy == null) {
+ return;
+ }
+ ClusterService clusterService = cluster.getInstance(ClusterService.class, disruptionNodeCopy);
+ if (clusterService == null) {
+ return;
+ }
+ logger.info("delaying cluster state updates on node [{}]", disruptionNodeCopy);
+ boolean success = disruptionLatch.compareAndSet(null, new CountDownLatch(1));
+ assert success : "startDisrupting called without waiting on stopDistrupting to complete";
+ final CountDownLatch started = new CountDownLatch(1);
+ clusterService.submitStateUpdateTask("service_disruption_block", new ClusterStateUpdateTask(Priority.IMMEDIATE) {
+
+ @Override
+ public boolean runOnlyOnMaster() {
+ return false;
+ }
+
+ @Override
+ public ClusterState execute(ClusterState currentState) throws Exception {
+ started.countDown();
+ CountDownLatch latch = disruptionLatch.get();
+ if (latch != null) {
+ latch.await();
+ }
+ return currentState;
+ }
+
+ @Override
+ public void onFailure(String source, Throwable t) {
+ logger.error("unexpected error during disruption", t);
+ }
+ });
+ try {
+ started.await();
+ } catch (InterruptedException e) {
+ }
+ }
+
+ @Override
+ public void stopDisrupting() {
+ CountDownLatch latch = disruptionLatch.get();
+ if (latch != null) {
+ latch.countDown();
+ }
+
+ }
+
+ @Override
+ public void removeAndEnsureHealthy(InternalTestCluster cluster) {
+ removeFromCluster(cluster);
+ }
+
+ @Override
+ public TimeValue expectedTimeToHeal() {
+ return TimeValue.timeValueMinutes(0);
+ }
+}
diff --git a/test/framework/src/main/java/org/elasticsearch/test/disruption/IntermittentLongGCDisruption.java b/test/framework/src/main/java/org/elasticsearch/test/disruption/IntermittentLongGCDisruption.java
new file mode 100644
index 0000000000..d957220c6d
--- /dev/null
+++ b/test/framework/src/main/java/org/elasticsearch/test/disruption/IntermittentLongGCDisruption.java
@@ -0,0 +1,129 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.elasticsearch.test.disruption;
+
+import org.elasticsearch.common.unit.TimeValue;
+
+import java.util.HashSet;
+import java.util.Random;
+import java.util.Set;
+import java.util.concurrent.atomic.AtomicInteger;
+
+/**
+ * Simulates irregular long gc intervals.
+ */
+public class IntermittentLongGCDisruption extends LongGCDisruption {
+
+ volatile boolean disrupting;
+ volatile Thread worker;
+
+ final long intervalBetweenDelaysMin;
+ final long intervalBetweenDelaysMax;
+ final long delayDurationMin;
+ final long delayDurationMax;
+
+
+ public IntermittentLongGCDisruption(Random random) {
+ this(null, random);
+ }
+
+ public IntermittentLongGCDisruption(String disruptedNode, Random random) {
+ this(disruptedNode, random, 100, 200, 300, 20000);
+ }
+
+ public IntermittentLongGCDisruption(String disruptedNode, Random random, long intervalBetweenDelaysMin,
+ long intervalBetweenDelaysMax, long delayDurationMin, long delayDurationMax) {
+ this(random, disruptedNode, intervalBetweenDelaysMin, intervalBetweenDelaysMax, delayDurationMin, delayDurationMax);
+ }
+
+ public IntermittentLongGCDisruption(Random random, String disruptedNode, long intervalBetweenDelaysMin, long intervalBetweenDelaysMax,
+ long delayDurationMin, long delayDurationMax) {
+ super(random, disruptedNode);
+ this.intervalBetweenDelaysMin = intervalBetweenDelaysMin;
+ this.intervalBetweenDelaysMax = intervalBetweenDelaysMax;
+ this.delayDurationMin = delayDurationMin;
+ this.delayDurationMax = delayDurationMax;
+ }
+
+ final static AtomicInteger thread_ids = new AtomicInteger();
+
+ @Override
+ public void startDisrupting() {
+ disrupting = true;
+ worker = new Thread(new BackgroundWorker(), "long_gc_simulation_" + thread_ids.incrementAndGet());
+ worker.setDaemon(true);
+ worker.start();
+ }
+
+ @Override
+ public void stopDisrupting() {
+ if (worker == null) {
+ return;
+ }
+ logger.info("stopping long GCs on [{}]", disruptedNode);
+ disrupting = false;
+ worker.interrupt();
+ try {
+ worker.join(2 * (intervalBetweenDelaysMax + delayDurationMax));
+ } catch (InterruptedException e) {
+ logger.info("background thread failed to stop");
+ }
+ worker = null;
+ }
+
+ private void simulateLongGC(final TimeValue duration) throws InterruptedException {
+ final String disruptionNodeCopy = disruptedNode;
+ if (disruptionNodeCopy == null) {
+ return;
+ }
+ logger.info("node [{}] goes into GC for for [{}]", disruptionNodeCopy, duration);
+ final Set<Thread> nodeThreads = new HashSet<>();
+ try {
+ while (stopNodeThreads(disruptionNodeCopy, nodeThreads)) ;
+ if (!nodeThreads.isEmpty()) {
+ Thread.sleep(duration.millis());
+ }
+ } finally {
+ logger.info("node [{}] resumes from GC", disruptionNodeCopy);
+ resumeThreads(nodeThreads);
+ }
+ }
+
+ class BackgroundWorker implements Runnable {
+
+ @Override
+ public void run() {
+ while (disrupting && disruptedNode != null) {
+ try {
+ TimeValue duration = new TimeValue(delayDurationMin + random.nextInt((int) (delayDurationMax - delayDurationMin)));
+ simulateLongGC(duration);
+
+ duration = new TimeValue(intervalBetweenDelaysMin + random.nextInt((int) (intervalBetweenDelaysMax - intervalBetweenDelaysMin)));
+ if (disrupting && disruptedNode != null) {
+ Thread.sleep(duration.millis());
+ }
+ } catch (InterruptedException e) {
+ } catch (Exception e) {
+ logger.error("error in background worker", e);
+ }
+ }
+ }
+ }
+
+}
diff --git a/test/framework/src/main/java/org/elasticsearch/test/disruption/LongGCDisruption.java b/test/framework/src/main/java/org/elasticsearch/test/disruption/LongGCDisruption.java
new file mode 100644
index 0000000000..1e8dcb10db
--- /dev/null
+++ b/test/framework/src/main/java/org/elasticsearch/test/disruption/LongGCDisruption.java
@@ -0,0 +1,127 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.test.disruption;
+
+import org.elasticsearch.common.SuppressForbidden;
+import org.elasticsearch.common.unit.TimeValue;
+import org.elasticsearch.test.InternalTestCluster;
+
+import java.util.HashSet;
+import java.util.Random;
+import java.util.Set;
+import java.util.regex.Pattern;
+
+/**
+ * Suspends all threads on the specified node in order to simulate a long gc.
+ */
+public class LongGCDisruption extends SingleNodeDisruption {
+
+ private final static Pattern[] unsafeClasses = new Pattern[]{
+ // logging has shared JVM locks - we may suspend a thread and block other nodes from doing their thing
+ Pattern.compile("Logger")
+ };
+
+ protected final String disruptedNode;
+ private Set<Thread> suspendedThreads;
+
+ public LongGCDisruption(Random random, String disruptedNode) {
+ super(random);
+ this.disruptedNode = disruptedNode;
+ }
+
+ @Override
+ public synchronized void startDisrupting() {
+ if (suspendedThreads == null) {
+ suspendedThreads = new HashSet<>();
+ stopNodeThreads(disruptedNode, suspendedThreads);
+ } else {
+ throw new IllegalStateException("can't disrupt twice, call stopDisrupting() first");
+ }
+ }
+
+ @Override
+ public synchronized void stopDisrupting() {
+ if (suspendedThreads != null) {
+ resumeThreads(suspendedThreads);
+ suspendedThreads = null;
+ }
+ }
+
+ @Override
+ public void removeAndEnsureHealthy(InternalTestCluster cluster) {
+ removeFromCluster(cluster);
+ ensureNodeCount(cluster);
+ }
+
+ @Override
+ public TimeValue expectedTimeToHeal() {
+ return TimeValue.timeValueMillis(0);
+ }
+
+ @SuppressForbidden(reason = "stops/resumes threads intentionally")
+ protected boolean stopNodeThreads(String node, Set<Thread> nodeThreads) {
+ Thread[] allThreads = null;
+ while (allThreads == null) {
+ allThreads = new Thread[Thread.activeCount()];
+ if (Thread.enumerate(allThreads) > allThreads.length) {
+ // we didn't make enough space, retry
+ allThreads = null;
+ }
+ }
+ boolean stopped = false;
+ final String nodeThreadNamePart = "[" + node + "]";
+ for (Thread thread : allThreads) {
+ if (thread == null) {
+ continue;
+ }
+ String name = thread.getName();
+ if (name.contains(nodeThreadNamePart)) {
+ if (thread.isAlive() && nodeThreads.add(thread)) {
+ stopped = true;
+ thread.suspend();
+ // double check the thread is not in a shared resource like logging. If so, let it go and come back..
+ boolean safe = true;
+ safe:
+ for (StackTraceElement stackElement : thread.getStackTrace()) {
+ String className = stackElement.getClassName();
+ for (Pattern unsafePattern : unsafeClasses) {
+ if (unsafePattern.matcher(className).find()) {
+ safe = false;
+ break safe;
+ }
+ }
+ }
+ if (!safe) {
+ thread.resume();
+ nodeThreads.remove(thread);
+ }
+ }
+ }
+ }
+ return stopped;
+ }
+
+ @SuppressForbidden(reason = "stops/resumes threads intentionally")
+ protected void resumeThreads(Set<Thread> threads) {
+ for (Thread thread : threads) {
+ thread.resume();
+ }
+ }
+}
diff --git a/test/framework/src/main/java/org/elasticsearch/test/disruption/NetworkDelaysPartition.java b/test/framework/src/main/java/org/elasticsearch/test/disruption/NetworkDelaysPartition.java
new file mode 100644
index 0000000000..c422b04272
--- /dev/null
+++ b/test/framework/src/main/java/org/elasticsearch/test/disruption/NetworkDelaysPartition.java
@@ -0,0 +1,94 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.elasticsearch.test.disruption;
+
+import org.elasticsearch.common.unit.TimeValue;
+import org.elasticsearch.test.transport.MockTransportService;
+
+import java.util.Random;
+import java.util.Set;
+
+public class NetworkDelaysPartition extends NetworkPartition {
+
+ static long DEFAULT_DELAY_MIN = 10000;
+ static long DEFAULT_DELAY_MAX = 90000;
+
+
+ final long delayMin;
+ final long delayMax;
+
+ TimeValue duration;
+
+ public NetworkDelaysPartition(Random random) {
+ this(random, DEFAULT_DELAY_MIN, DEFAULT_DELAY_MAX);
+ }
+
+ public NetworkDelaysPartition(Random random, long delayMin, long delayMax) {
+ super(random);
+ this.delayMin = delayMin;
+ this.delayMax = delayMax;
+ }
+
+ public NetworkDelaysPartition(String node1, String node2, Random random) {
+ this(node1, node2, DEFAULT_DELAY_MIN, DEFAULT_DELAY_MAX, random);
+ }
+
+ public NetworkDelaysPartition(String node1, String node2, long delayMin, long delayMax, Random random) {
+ super(node1, node2, random);
+ this.delayMin = delayMin;
+ this.delayMax = delayMax;
+ }
+
+ public NetworkDelaysPartition(Set<String> nodesSideOne, Set<String> nodesSideTwo, Random random) {
+ this(nodesSideOne, nodesSideTwo, DEFAULT_DELAY_MIN, DEFAULT_DELAY_MAX, random);
+ }
+
+ public NetworkDelaysPartition(Set<String> nodesSideOne, Set<String> nodesSideTwo, long delay, Random random) {
+ this(nodesSideOne, nodesSideTwo, delay, delay, random);
+ }
+
+ public NetworkDelaysPartition(Set<String> nodesSideOne, Set<String> nodesSideTwo, long delayMin, long delayMax, Random random) {
+ super(nodesSideOne, nodesSideTwo, random);
+ this.delayMin = delayMin;
+ this.delayMax = delayMax;
+
+ }
+
+ @Override
+ public synchronized void startDisrupting() {
+ duration = new TimeValue(delayMin == delayMax ? delayMin : delayMin + random.nextInt((int) (delayMax - delayMin)));
+ super.startDisrupting();
+ }
+
+ @Override
+ void applyDisruption(MockTransportService transportService1, MockTransportService transportService2) {
+ transportService1.addUnresponsiveRule(transportService1, duration);
+ transportService1.addUnresponsiveRule(transportService2, duration);
+ }
+
+ @Override
+ protected String getPartitionDescription() {
+ return "network delays for [" + duration + "]";
+ }
+
+ @Override
+ public TimeValue expectedTimeToHeal() {
+ return TimeValue.timeValueMillis(delayMax);
+ }
+}
diff --git a/test/framework/src/main/java/org/elasticsearch/test/disruption/NetworkDisconnectPartition.java b/test/framework/src/main/java/org/elasticsearch/test/disruption/NetworkDisconnectPartition.java
new file mode 100644
index 0000000000..ed0aa17cfc
--- /dev/null
+++ b/test/framework/src/main/java/org/elasticsearch/test/disruption/NetworkDisconnectPartition.java
@@ -0,0 +1,57 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.elasticsearch.test.disruption;
+
+import org.elasticsearch.common.unit.TimeValue;
+import org.elasticsearch.test.transport.MockTransportService;
+
+import java.util.Random;
+import java.util.Set;
+
+public class NetworkDisconnectPartition extends NetworkPartition {
+
+
+ public NetworkDisconnectPartition(Random random) {
+ super(random);
+ }
+
+ public NetworkDisconnectPartition(String node1, String node2, Random random) {
+ super(node1, node2, random);
+ }
+
+ public NetworkDisconnectPartition(Set<String> nodesSideOne, Set<String> nodesSideTwo, Random random) {
+ super(nodesSideOne, nodesSideTwo, random);
+ }
+
+ @Override
+ protected String getPartitionDescription() {
+ return "disconnected";
+ }
+
+ @Override
+ void applyDisruption(MockTransportService transportService1, MockTransportService transportService2) {
+ transportService1.addFailToSendNoConnectRule(transportService2);
+ transportService2.addFailToSendNoConnectRule(transportService1);
+ }
+
+ @Override
+ public TimeValue expectedTimeToHeal() {
+ return TimeValue.timeValueSeconds(0);
+ }
+}
diff --git a/test/framework/src/main/java/org/elasticsearch/test/disruption/NetworkPartition.java b/test/framework/src/main/java/org/elasticsearch/test/disruption/NetworkPartition.java
new file mode 100644
index 0000000000..9a65fc579f
--- /dev/null
+++ b/test/framework/src/main/java/org/elasticsearch/test/disruption/NetworkPartition.java
@@ -0,0 +1,204 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.elasticsearch.test.disruption;
+
+import org.elasticsearch.common.logging.ESLogger;
+import org.elasticsearch.common.logging.Loggers;
+import org.elasticsearch.test.InternalTestCluster;
+import org.elasticsearch.test.transport.MockTransportService;
+import org.elasticsearch.transport.TransportService;
+
+import java.util.Collection;
+import java.util.Collections;
+import java.util.HashSet;
+import java.util.Random;
+import java.util.Set;
+
+import static org.junit.Assert.assertFalse;
+
+public abstract class NetworkPartition implements ServiceDisruptionScheme {
+
+ protected final ESLogger logger = Loggers.getLogger(getClass());
+
+ final Set<String> nodesSideOne;
+ final Set<String> nodesSideTwo;
+ volatile boolean autoExpand;
+ protected final Random random;
+ protected volatile InternalTestCluster cluster;
+ protected volatile boolean activeDisruption = false;
+
+
+ public NetworkPartition(Random random) {
+ this.random = new Random(random.nextLong());
+ nodesSideOne = new HashSet<>();
+ nodesSideTwo = new HashSet<>();
+ autoExpand = true;
+ }
+
+ public NetworkPartition(String node1, String node2, Random random) {
+ this(random);
+ nodesSideOne.add(node1);
+ nodesSideTwo.add(node2);
+ autoExpand = false;
+ }
+
+ public NetworkPartition(Set<String> nodesSideOne, Set<String> nodesSideTwo, Random random) {
+ this(random);
+ this.nodesSideOne.addAll(nodesSideOne);
+ this.nodesSideTwo.addAll(nodesSideTwo);
+ autoExpand = false;
+ }
+
+
+ public Collection<String> getNodesSideOne() {
+ return Collections.unmodifiableCollection(nodesSideOne);
+ }
+
+ public Collection<String> getNodesSideTwo() {
+ return Collections.unmodifiableCollection(nodesSideTwo);
+ }
+
+ public Collection<String> getMajoritySide() {
+ if (nodesSideOne.size() >= nodesSideTwo.size()) {
+ return getNodesSideOne();
+ } else {
+ return getNodesSideTwo();
+ }
+ }
+
+ public Collection<String> getMinoritySide() {
+ if (nodesSideOne.size() >= nodesSideTwo.size()) {
+ return getNodesSideTwo();
+ } else {
+ return getNodesSideOne();
+ }
+ }
+
+ @Override
+ public void applyToCluster(InternalTestCluster cluster) {
+ this.cluster = cluster;
+ if (autoExpand) {
+ for (String node : cluster.getNodeNames()) {
+ applyToNode(node, cluster);
+ }
+ }
+ }
+
+ @Override
+ public void removeFromCluster(InternalTestCluster cluster) {
+ stopDisrupting();
+ }
+
+ @Override
+ public void removeAndEnsureHealthy(InternalTestCluster cluster) {
+ removeFromCluster(cluster);
+ ensureNodeCount(cluster);
+ }
+
+ protected void ensureNodeCount(InternalTestCluster cluster) {
+ assertFalse("cluster failed to form after disruption was healed", cluster.client().admin().cluster().prepareHealth()
+ .setWaitForNodes("" + cluster.size())
+ .setWaitForRelocatingShards(0)
+ .get().isTimedOut());
+ }
+
+ @Override
+ public synchronized void applyToNode(String node, InternalTestCluster cluster) {
+ if (!autoExpand || nodesSideOne.contains(node) || nodesSideTwo.contains(node)) {
+ return;
+ }
+ if (nodesSideOne.isEmpty()) {
+ nodesSideOne.add(node);
+ } else if (nodesSideTwo.isEmpty()) {
+ nodesSideTwo.add(node);
+ } else if (random.nextBoolean()) {
+ nodesSideOne.add(node);
+ } else {
+ nodesSideTwo.add(node);
+ }
+ }
+
+ @Override
+ public synchronized void removeFromNode(String node, InternalTestCluster cluster) {
+ MockTransportService transportService = (MockTransportService) cluster.getInstance(TransportService.class, node);
+ Set<String> otherSideNodes;
+ if (nodesSideOne.contains(node)) {
+ otherSideNodes = nodesSideTwo;
+ nodesSideOne.remove(node);
+ } else if (nodesSideTwo.contains(node)) {
+ otherSideNodes = nodesSideOne;
+ nodesSideTwo.remove(node);
+ } else {
+ return;
+ }
+ for (String node2 : otherSideNodes) {
+ MockTransportService transportService2 = (MockTransportService) cluster.getInstance(TransportService.class, node2);
+ removeDisruption(transportService, transportService2);
+ }
+ }
+
+ @Override
+ public synchronized void testClusterClosed() {
+
+ }
+
+ protected abstract String getPartitionDescription();
+
+ @Override
+ public synchronized void startDisrupting() {
+ if (nodesSideOne.size() == 0 || nodesSideTwo.size() == 0) {
+ return;
+ }
+ logger.info("nodes {} will be partitioned from {}. partition type [{}]", nodesSideOne, nodesSideTwo, getPartitionDescription());
+ activeDisruption = true;
+ for (String node1 : nodesSideOne) {
+ MockTransportService transportService1 = (MockTransportService) cluster.getInstance(TransportService.class, node1);
+ for (String node2 : nodesSideTwo) {
+ MockTransportService transportService2 = (MockTransportService) cluster.getInstance(TransportService.class, node2);
+ applyDisruption(transportService1, transportService2);
+ }
+ }
+ }
+
+
+ @Override
+ public synchronized void stopDisrupting() {
+ if (nodesSideOne.size() == 0 || nodesSideTwo.size() == 0 || !activeDisruption) {
+ return;
+ }
+ logger.info("restoring partition between nodes {} & nodes {}", nodesSideOne, nodesSideTwo);
+ for (String node1 : nodesSideOne) {
+ MockTransportService transportService1 = (MockTransportService) cluster.getInstance(TransportService.class, node1);
+ for (String node2 : nodesSideTwo) {
+ MockTransportService transportService2 = (MockTransportService) cluster.getInstance(TransportService.class, node2);
+ removeDisruption(transportService1, transportService2);
+ }
+ }
+ activeDisruption = false;
+ }
+
+ abstract void applyDisruption(MockTransportService transportService1, MockTransportService transportService2);
+
+
+ protected void removeDisruption(MockTransportService transportService1, MockTransportService transportService2) {
+ transportService1.clearRule(transportService2);
+ transportService2.clearRule(transportService1);
+ }
+
+}
diff --git a/test/framework/src/main/java/org/elasticsearch/test/disruption/NetworkPartitionIT.java b/test/framework/src/main/java/org/elasticsearch/test/disruption/NetworkPartitionIT.java
new file mode 100644
index 0000000000..b1ce97374a
--- /dev/null
+++ b/test/framework/src/main/java/org/elasticsearch/test/disruption/NetworkPartitionIT.java
@@ -0,0 +1,45 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.test.disruption;
+
+import org.elasticsearch.plugins.Plugin;
+import org.elasticsearch.test.ESIntegTestCase;
+import org.elasticsearch.test.InternalTestCluster;
+import org.elasticsearch.test.transport.MockTransportService;
+
+import java.io.IOException;
+import java.util.Collection;
+
+public class NetworkPartitionIT extends ESIntegTestCase {
+ @Override
+ protected Collection<Class<? extends Plugin>> nodePlugins() {
+ return pluginList(MockTransportService.TestPlugin.class);
+ }
+
+ public void testNetworkPartitionWithNodeShutdown() throws IOException {
+ internalCluster().ensureAtLeastNumDataNodes(2);
+ String[] nodeNames = internalCluster().getNodeNames();
+ NetworkPartition networkPartition = new NetworkUnresponsivePartition(nodeNames[0], nodeNames[1], getRandom());
+ internalCluster().setDisruptionScheme(networkPartition);
+ networkPartition.startDisrupting();
+ internalCluster().stopRandomNode(InternalTestCluster.nameFilter(nodeNames[0]));
+ internalCluster().clearDisruptionScheme();
+ }
+}
diff --git a/test/framework/src/main/java/org/elasticsearch/test/disruption/NetworkUnresponsivePartition.java b/test/framework/src/main/java/org/elasticsearch/test/disruption/NetworkUnresponsivePartition.java
new file mode 100644
index 0000000000..b69b7af3e5
--- /dev/null
+++ b/test/framework/src/main/java/org/elasticsearch/test/disruption/NetworkUnresponsivePartition.java
@@ -0,0 +1,56 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.elasticsearch.test.disruption;
+
+import org.elasticsearch.common.unit.TimeValue;
+import org.elasticsearch.test.transport.MockTransportService;
+
+import java.util.Random;
+import java.util.Set;
+
+public class NetworkUnresponsivePartition extends NetworkPartition {
+
+ public NetworkUnresponsivePartition(Random random) {
+ super(random);
+ }
+
+ public NetworkUnresponsivePartition(String node1, String node2, Random random) {
+ super(node1, node2, random);
+ }
+
+ public NetworkUnresponsivePartition(Set<String> nodesSideOne, Set<String> nodesSideTwo, Random random) {
+ super(nodesSideOne, nodesSideTwo, random);
+ }
+
+ @Override
+ protected String getPartitionDescription() {
+ return "unresponsive";
+ }
+
+ @Override
+ void applyDisruption(MockTransportService transportService1, MockTransportService transportService2) {
+ transportService1.addUnresponsiveRule(transportService2);
+ transportService2.addUnresponsiveRule(transportService1);
+ }
+
+ @Override
+ public TimeValue expectedTimeToHeal() {
+ return TimeValue.timeValueSeconds(0);
+ }
+}
diff --git a/test/framework/src/main/java/org/elasticsearch/test/disruption/NoOpDisruptionScheme.java b/test/framework/src/main/java/org/elasticsearch/test/disruption/NoOpDisruptionScheme.java
new file mode 100644
index 0000000000..06bef2105e
--- /dev/null
+++ b/test/framework/src/main/java/org/elasticsearch/test/disruption/NoOpDisruptionScheme.java
@@ -0,0 +1,71 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.test.disruption;
+
+import org.elasticsearch.common.unit.TimeValue;
+import org.elasticsearch.test.InternalTestCluster;
+
+public class NoOpDisruptionScheme implements ServiceDisruptionScheme {
+
+ @Override
+ public void applyToCluster(InternalTestCluster cluster) {
+
+ }
+
+ @Override
+ public void removeFromCluster(InternalTestCluster cluster) {
+
+ }
+
+ @Override
+ public void applyToNode(String node, InternalTestCluster cluster) {
+
+ }
+
+ @Override
+ public void removeFromNode(String node, InternalTestCluster cluster) {
+
+ }
+
+ @Override
+ public void startDisrupting() {
+
+ }
+
+ @Override
+ public void stopDisrupting() {
+
+ }
+
+ @Override
+ public void testClusterClosed() {
+
+ }
+
+ @Override
+ public void removeAndEnsureHealthy(InternalTestCluster cluster) {
+
+ }
+
+ @Override
+ public TimeValue expectedTimeToHeal() {
+ return TimeValue.timeValueSeconds(0);
+ }
+}
diff --git a/test/framework/src/main/java/org/elasticsearch/test/disruption/ServiceDisruptionScheme.java b/test/framework/src/main/java/org/elasticsearch/test/disruption/ServiceDisruptionScheme.java
new file mode 100644
index 0000000000..b5f3bcacbd
--- /dev/null
+++ b/test/framework/src/main/java/org/elasticsearch/test/disruption/ServiceDisruptionScheme.java
@@ -0,0 +1,44 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.elasticsearch.test.disruption;
+
+import org.elasticsearch.common.unit.TimeValue;
+import org.elasticsearch.test.InternalTestCluster;
+
+public interface ServiceDisruptionScheme {
+
+ void applyToCluster(InternalTestCluster cluster);
+
+ void removeFromCluster(InternalTestCluster cluster);
+
+ void removeAndEnsureHealthy(InternalTestCluster cluster);
+
+ void applyToNode(String node, InternalTestCluster cluster);
+
+ void removeFromNode(String node, InternalTestCluster cluster);
+
+ void startDisrupting();
+
+ void stopDisrupting();
+
+ void testClusterClosed();
+
+ TimeValue expectedTimeToHeal();
+
+}
diff --git a/test/framework/src/main/java/org/elasticsearch/test/disruption/SingleNodeDisruption.java b/test/framework/src/main/java/org/elasticsearch/test/disruption/SingleNodeDisruption.java
new file mode 100644
index 0000000000..f74280c014
--- /dev/null
+++ b/test/framework/src/main/java/org/elasticsearch/test/disruption/SingleNodeDisruption.java
@@ -0,0 +1,91 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.elasticsearch.test.disruption;
+
+import org.elasticsearch.common.logging.ESLogger;
+import org.elasticsearch.common.logging.Loggers;
+import org.elasticsearch.test.InternalTestCluster;
+
+import java.util.Random;
+
+import static org.junit.Assert.assertFalse;
+
+public abstract class SingleNodeDisruption implements ServiceDisruptionScheme {
+
+ protected final ESLogger logger = Loggers.getLogger(getClass());
+
+ protected volatile String disruptedNode;
+ protected volatile InternalTestCluster cluster;
+ protected final Random random;
+
+
+ public SingleNodeDisruption(String disruptedNode, Random random) {
+ this(random);
+ this.disruptedNode = disruptedNode;
+ }
+
+ public SingleNodeDisruption(Random random) {
+ this.random = new Random(random.nextLong());
+ }
+
+ @Override
+ public void applyToCluster(InternalTestCluster cluster) {
+ this.cluster = cluster;
+ if (disruptedNode == null) {
+ String[] nodes = cluster.getNodeNames();
+ disruptedNode = nodes[random.nextInt(nodes.length)];
+ }
+ }
+
+ @Override
+ public void removeFromCluster(InternalTestCluster cluster) {
+ if (disruptedNode != null) {
+ removeFromNode(disruptedNode, cluster);
+ }
+ }
+
+ @Override
+ public synchronized void applyToNode(String node, InternalTestCluster cluster) {
+
+ }
+
+ @Override
+ public synchronized void removeFromNode(String node, InternalTestCluster cluster) {
+ if (disruptedNode == null) {
+ return;
+ }
+ if (!node.equals(disruptedNode)) {
+ return;
+ }
+ stopDisrupting();
+ disruptedNode = null;
+ }
+
+ @Override
+ public synchronized void testClusterClosed() {
+ disruptedNode = null;
+ }
+
+ protected void ensureNodeCount(InternalTestCluster cluster) {
+ assertFalse("cluster failed to form after disruption was healed", cluster.client().admin().cluster().prepareHealth()
+ .setWaitForNodes("" + cluster.size())
+ .setWaitForRelocatingShards(0)
+ .get().isTimedOut());
+ }
+}
diff --git a/test/framework/src/main/java/org/elasticsearch/test/disruption/SlowClusterStateProcessing.java b/test/framework/src/main/java/org/elasticsearch/test/disruption/SlowClusterStateProcessing.java
new file mode 100644
index 0000000000..b9c663686b
--- /dev/null
+++ b/test/framework/src/main/java/org/elasticsearch/test/disruption/SlowClusterStateProcessing.java
@@ -0,0 +1,176 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.elasticsearch.test.disruption;
+
+import org.elasticsearch.cluster.ClusterService;
+import org.elasticsearch.cluster.ClusterState;
+import org.elasticsearch.cluster.ClusterStateUpdateTask;
+import org.elasticsearch.common.Priority;
+import org.elasticsearch.common.unit.TimeValue;
+import org.elasticsearch.test.InternalTestCluster;
+
+import java.util.Random;
+import java.util.concurrent.CountDownLatch;
+import java.util.concurrent.atomic.AtomicBoolean;
+
+public class SlowClusterStateProcessing extends SingleNodeDisruption {
+
+ volatile boolean disrupting;
+ volatile Thread worker;
+
+ final long intervalBetweenDelaysMin;
+ final long intervalBetweenDelaysMax;
+ final long delayDurationMin;
+ final long delayDurationMax;
+
+
+ public SlowClusterStateProcessing(Random random) {
+ this(null, random);
+ }
+
+ public SlowClusterStateProcessing(String disruptedNode, Random random) {
+ this(disruptedNode, random, 100, 200, 300, 20000);
+ }
+
+ public SlowClusterStateProcessing(String disruptedNode, Random random, long intervalBetweenDelaysMin,
+ long intervalBetweenDelaysMax, long delayDurationMin, long delayDurationMax) {
+ this(random, intervalBetweenDelaysMin, intervalBetweenDelaysMax, delayDurationMin, delayDurationMax);
+ this.disruptedNode = disruptedNode;
+ }
+
+ public SlowClusterStateProcessing(Random random,
+ long intervalBetweenDelaysMin, long intervalBetweenDelaysMax, long delayDurationMin,
+ long delayDurationMax) {
+ super(random);
+ this.intervalBetweenDelaysMin = intervalBetweenDelaysMin;
+ this.intervalBetweenDelaysMax = intervalBetweenDelaysMax;
+ this.delayDurationMin = delayDurationMin;
+ this.delayDurationMax = delayDurationMax;
+ }
+
+
+ @Override
+ public void startDisrupting() {
+ disrupting = true;
+ worker = new Thread(new BackgroundWorker());
+ worker.setDaemon(true);
+ worker.start();
+ }
+
+ @Override
+ public void stopDisrupting() {
+ if (worker == null) {
+ return;
+ }
+ logger.info("stopping to slow down cluster state processing on [{}]", disruptedNode);
+ disrupting = false;
+ worker.interrupt();
+ try {
+ worker.join(2 * (intervalBetweenDelaysMax + delayDurationMax));
+ } catch (InterruptedException e) {
+ logger.info("background thread failed to stop");
+ }
+ worker = null;
+ }
+
+
+ private boolean interruptClusterStateProcessing(final TimeValue duration) throws InterruptedException {
+ final String disruptionNodeCopy = disruptedNode;
+ if (disruptionNodeCopy == null) {
+ return false;
+ }
+ logger.info("delaying cluster state updates on node [{}] for [{}]", disruptionNodeCopy, duration);
+ final CountDownLatch countDownLatch = new CountDownLatch(1);
+ ClusterService clusterService = cluster.getInstance(ClusterService.class, disruptionNodeCopy);
+ if (clusterService == null) {
+ return false;
+ }
+ final AtomicBoolean stopped = new AtomicBoolean(false);
+ clusterService.submitStateUpdateTask("service_disruption_delay", new ClusterStateUpdateTask(Priority.IMMEDIATE) {
+
+ @Override
+ public boolean runOnlyOnMaster() {
+ return false;
+ }
+
+ @Override
+ public ClusterState execute(ClusterState currentState) throws Exception {
+ long count = duration.millis() / 200;
+ // wait while checking for a stopped
+ for (; count > 0 && !stopped.get(); count--) {
+ Thread.sleep(200);
+ }
+ if (!stopped.get()) {
+ Thread.sleep(duration.millis() % 200);
+ }
+ countDownLatch.countDown();
+ return currentState;
+ }
+
+ @Override
+ public void onFailure(String source, Throwable t) {
+ countDownLatch.countDown();
+ }
+ });
+ try {
+ countDownLatch.await();
+ } catch (InterruptedException e) {
+ stopped.set(true);
+ // try to wait again, we really want the cluster state thread to be freed up when stopping disruption
+ countDownLatch.await();
+ }
+ return true;
+ }
+
+ @Override
+ public void removeAndEnsureHealthy(InternalTestCluster cluster) {
+ removeFromCluster(cluster);
+ ensureNodeCount(cluster);
+ }
+
+ @Override
+ public TimeValue expectedTimeToHeal() {
+ return TimeValue.timeValueMillis(0);
+ }
+
+ class BackgroundWorker implements Runnable {
+
+ @Override
+ public void run() {
+ while (disrupting && disruptedNode != null) {
+ try {
+ TimeValue duration = new TimeValue(delayDurationMin + random.nextInt((int) (delayDurationMax - delayDurationMin)));
+ if (!interruptClusterStateProcessing(duration)) {
+ continue;
+ }
+ if (intervalBetweenDelaysMax > 0) {
+ duration = new TimeValue(intervalBetweenDelaysMin + random.nextInt((int) (intervalBetweenDelaysMax - intervalBetweenDelaysMin)));
+ if (disrupting && disruptedNode != null) {
+ Thread.sleep(duration.millis());
+ }
+ }
+ } catch (InterruptedException e) {
+ } catch (Exception e) {
+ logger.error("error in background worker", e);
+ }
+ }
+ }
+ }
+
+}
diff --git a/test/framework/src/main/java/org/elasticsearch/test/engine/AssertingSearcher.java b/test/framework/src/main/java/org/elasticsearch/test/engine/AssertingSearcher.java
new file mode 100644
index 0000000000..0187d4ac03
--- /dev/null
+++ b/test/framework/src/main/java/org/elasticsearch/test/engine/AssertingSearcher.java
@@ -0,0 +1,91 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.test.engine;
+
+import org.apache.lucene.index.DirectoryReader;
+import org.apache.lucene.index.IndexReader;
+import org.apache.lucene.search.IndexSearcher;
+import org.elasticsearch.common.logging.ESLogger;
+import org.elasticsearch.index.engine.Engine;
+import org.elasticsearch.index.shard.ShardId;
+
+import java.util.concurrent.atomic.AtomicBoolean;
+
+/**
+ * A searcher that asserts the IndexReader's refcount on close
+ */
+class AssertingSearcher extends Engine.Searcher {
+ private final Engine.Searcher wrappedSearcher;
+ private final ShardId shardId;
+ private RuntimeException firstReleaseStack;
+ private final Object lock = new Object();
+ private final int initialRefCount;
+ private final ESLogger logger;
+ private final AtomicBoolean closed = new AtomicBoolean(false);
+
+ AssertingSearcher(IndexSearcher indexSearcher, final Engine.Searcher wrappedSearcher,
+ ShardId shardId,
+ ESLogger logger) {
+ super(wrappedSearcher.source(), indexSearcher);
+ // we only use the given index searcher here instead of the IS of the wrapped searcher. the IS might be a wrapped searcher
+ // with a wrapped reader.
+ this.wrappedSearcher = wrappedSearcher;
+ this.logger = logger;
+ this.shardId = shardId;
+ initialRefCount = wrappedSearcher.reader().getRefCount();
+ assert initialRefCount > 0 : "IndexReader#getRefCount() was [" + initialRefCount + "] expected a value > [0] - reader is already closed";
+ }
+
+ @Override
+ public String source() {
+ return wrappedSearcher.source();
+ }
+
+ @Override
+ public void close() {
+ synchronized (lock) {
+ if (closed.compareAndSet(false, true)) {
+ firstReleaseStack = new RuntimeException();
+ final int refCount = wrappedSearcher.reader().getRefCount();
+ // this assert seems to be paranoid but given LUCENE-5362 we better add some assertions here to make sure we catch any potential
+ // problems.
+ assert refCount > 0 : "IndexReader#getRefCount() was [" + refCount + "] expected a value > [0] - reader is already closed. Initial refCount was: [" + initialRefCount + "]";
+ try {
+ wrappedSearcher.close();
+ } catch (RuntimeException ex) {
+ logger.debug("Failed to release searcher", ex);
+ throw ex;
+ }
+ } else {
+ AssertionError error = new AssertionError("Released Searcher more than once, source [" + wrappedSearcher.source() + "]");
+ error.initCause(firstReleaseStack);
+ throw error;
+ }
+ }
+ }
+
+ public ShardId shardId() {
+ return shardId;
+ }
+
+ public boolean isOpen() {
+ return closed.get() == false;
+ }
+}
diff --git a/test/framework/src/main/java/org/elasticsearch/test/engine/MockEngineFactory.java b/test/framework/src/main/java/org/elasticsearch/test/engine/MockEngineFactory.java
new file mode 100644
index 0000000000..de51670f57
--- /dev/null
+++ b/test/framework/src/main/java/org/elasticsearch/test/engine/MockEngineFactory.java
@@ -0,0 +1,52 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.elasticsearch.test.engine;
+
+import org.apache.lucene.index.FilterDirectoryReader;
+import org.elasticsearch.common.inject.BindingAnnotation;
+import org.elasticsearch.common.inject.Inject;
+import org.elasticsearch.index.engine.Engine;
+import org.elasticsearch.index.engine.EngineConfig;
+import org.elasticsearch.index.engine.EngineFactory;
+
+import java.lang.annotation.Retention;
+import java.lang.annotation.Target;
+
+import static java.lang.annotation.ElementType.FIELD;
+import static java.lang.annotation.ElementType.PARAMETER;
+import static java.lang.annotation.RetentionPolicy.RUNTIME;
+
+public final class MockEngineFactory implements EngineFactory {
+
+ private final Class<? extends FilterDirectoryReader> wrapper;
+
+ public MockEngineFactory(Class<? extends FilterDirectoryReader> wrapper) {
+ this.wrapper = wrapper;
+ }
+
+ @Override
+ public Engine newReadWriteEngine(EngineConfig config, boolean skipTranslogRecovery) {
+ return new MockInternalEngine(config, skipTranslogRecovery, wrapper);
+ }
+
+ @Override
+ public Engine newReadOnlyEngine(EngineConfig config) {
+ return new MockShadowEngine(config, wrapper);
+ }
+}
diff --git a/test/framework/src/main/java/org/elasticsearch/test/engine/MockEngineSupport.java b/test/framework/src/main/java/org/elasticsearch/test/engine/MockEngineSupport.java
new file mode 100644
index 0000000000..70dfa6847b
--- /dev/null
+++ b/test/framework/src/main/java/org/elasticsearch/test/engine/MockEngineSupport.java
@@ -0,0 +1,216 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.elasticsearch.test.engine;
+
+import org.apache.lucene.index.DirectoryReader;
+import org.apache.lucene.index.FilterDirectoryReader;
+import org.apache.lucene.index.IndexReader;
+import org.apache.lucene.search.*;
+import org.apache.lucene.util.LuceneTestCase;
+import org.elasticsearch.ElasticsearchException;
+import org.elasticsearch.common.logging.ESLogger;
+import org.elasticsearch.common.logging.Loggers;
+import org.elasticsearch.common.settings.Settings;
+import org.elasticsearch.index.engine.Engine;
+import org.elasticsearch.index.engine.EngineConfig;
+import org.elasticsearch.index.engine.EngineException;
+import org.elasticsearch.index.shard.ShardId;
+import org.elasticsearch.test.ESIntegTestCase;
+
+import java.io.Closeable;
+import java.io.IOException;
+import java.lang.reflect.Constructor;
+import java.util.IdentityHashMap;
+import java.util.Random;
+import java.util.concurrent.atomic.AtomicBoolean;
+
+/**
+ * Support class to build MockEngines like {@link org.elasticsearch.test.engine.MockInternalEngine} or {@link org.elasticsearch.test.engine.MockShadowEngine}
+ * since they need to subclass the actual engine
+ */
+public final class MockEngineSupport {
+
+ public static final String WRAP_READER_RATIO = "index.engine.mock.random.wrap_reader_ratio";
+ public static final String READER_WRAPPER_TYPE = "index.engine.mock.random.wrapper";
+ public static final String FLUSH_ON_CLOSE_RATIO = "index.engine.mock.flush_on_close.ratio";
+
+ private final AtomicBoolean closing = new AtomicBoolean(false);
+ private final ESLogger logger = Loggers.getLogger(Engine.class);
+ private final ShardId shardId;
+ private final QueryCache filterCache;
+ private final QueryCachingPolicy filterCachingPolicy;
+ private final SearcherCloseable searcherCloseable;
+ private final MockContext mockContext;
+
+ public static class MockContext {
+ private final Random random;
+ private final boolean wrapReader;
+ private final Class<? extends FilterDirectoryReader> wrapper;
+ private final Settings indexSettings;
+ private final double flushOnClose;
+
+ public MockContext(Random random, boolean wrapReader, Class<? extends FilterDirectoryReader> wrapper, Settings indexSettings) {
+ this.random = random;
+ this.wrapReader = wrapReader;
+ this.wrapper = wrapper;
+ this.indexSettings = indexSettings;
+ flushOnClose = indexSettings.getAsDouble(FLUSH_ON_CLOSE_RATIO, 0.5d);
+ }
+ }
+
+ public MockEngineSupport(EngineConfig config, Class<? extends FilterDirectoryReader> wrapper) {
+ Settings settings = config.getIndexSettings().getSettings();
+ shardId = config.getShardId();
+ filterCache = config.getQueryCache();
+ filterCachingPolicy = config.getQueryCachingPolicy();
+ final long seed = settings.getAsLong(ESIntegTestCase.SETTING_INDEX_SEED, 0l);
+ Random random = new Random(seed);
+ final double ratio = settings.getAsDouble(WRAP_READER_RATIO, 0.0d); // DISABLED by default - AssertingDR is crazy slow
+ boolean wrapReader = random.nextDouble() < ratio;
+ if (logger.isTraceEnabled()) {
+ logger.trace("Using [{}] for shard [{}] seed: [{}] wrapReader: [{}]", this.getClass().getName(), shardId, seed, wrapReader);
+ }
+ mockContext = new MockContext(random, wrapReader, wrapper, settings);
+ this.searcherCloseable = new SearcherCloseable();
+ LuceneTestCase.closeAfterSuite(searcherCloseable); // only one suite closeable per Engine
+ }
+
+ enum CloseAction {
+ FLUSH_AND_CLOSE,
+ CLOSE;
+ }
+
+
+ /**
+ * Returns the CloseAction to execute on the actual engine. Note this method changes the state on
+ * the first call and treats subsequent calls as if the engine passed is already closed.
+ */
+ public CloseAction flushOrClose(Engine engine, CloseAction originalAction) throws IOException {
+ if (closing.compareAndSet(false, true)) { // only do the random thing if we are the first call to this since super.flushOnClose() calls #close() again and then we might end up with a stackoverflow.
+ if (mockContext.flushOnClose > mockContext.random.nextDouble()) {
+ return CloseAction.FLUSH_AND_CLOSE;
+ } else {
+ return CloseAction.CLOSE;
+ }
+ } else {
+ return originalAction;
+ }
+ }
+
+ public AssertingIndexSearcher newSearcher(String source, IndexSearcher searcher, SearcherManager manager) throws EngineException {
+ IndexReader reader = searcher.getIndexReader();
+ IndexReader wrappedReader = reader;
+ assert reader != null;
+ if (reader instanceof DirectoryReader && mockContext.wrapReader) {
+ wrappedReader = wrapReader((DirectoryReader) reader);
+ }
+ // this executes basic query checks and asserts that weights are normalized only once etc.
+ final AssertingIndexSearcher assertingIndexSearcher = new AssertingIndexSearcher(mockContext.random, wrappedReader);
+ assertingIndexSearcher.setSimilarity(searcher.getSimilarity(true));
+ assertingIndexSearcher.setQueryCache(filterCache);
+ assertingIndexSearcher.setQueryCachingPolicy(filterCachingPolicy);
+ return assertingIndexSearcher;
+ }
+
+ private DirectoryReader wrapReader(DirectoryReader reader) {
+ try {
+ Constructor<?>[] constructors = mockContext.wrapper.getConstructors();
+ Constructor<?> nonRandom = null;
+ for (Constructor<?> constructor : constructors) {
+ Class<?>[] parameterTypes = constructor.getParameterTypes();
+ if (parameterTypes.length > 0 && parameterTypes[0] == DirectoryReader.class) {
+ if (parameterTypes.length == 1) {
+ nonRandom = constructor;
+ } else if (parameterTypes.length == 2 && parameterTypes[1] == Settings.class) {
+
+ return (DirectoryReader) constructor.newInstance(reader, mockContext.indexSettings);
+ }
+ }
+ }
+ if (nonRandom != null) {
+ return (DirectoryReader) nonRandom.newInstance(reader);
+ }
+ } catch (Exception e) {
+ throw new ElasticsearchException("Can not wrap reader", e);
+ }
+ return reader;
+ }
+
+ public static abstract class DirectoryReaderWrapper extends FilterDirectoryReader {
+ protected final SubReaderWrapper subReaderWrapper;
+
+ public DirectoryReaderWrapper(DirectoryReader in, SubReaderWrapper subReaderWrapper) throws IOException {
+ super(in, subReaderWrapper);
+ this.subReaderWrapper = subReaderWrapper;
+ }
+
+ @Override
+ public Object getCoreCacheKey() {
+ return in.getCoreCacheKey();
+ }
+
+ }
+
+ public Engine.Searcher wrapSearcher(String source, Engine.Searcher engineSearcher, IndexSearcher searcher, SearcherManager manager) {
+ final AssertingIndexSearcher assertingIndexSearcher = newSearcher(source, searcher, manager);
+ assertingIndexSearcher.setSimilarity(searcher.getSimilarity(true));
+ // pass the original searcher to the super.newSearcher() method to make sure this is the searcher that will
+ // be released later on. If we wrap an index reader here must not pass the wrapped version to the manager
+ // on release otherwise the reader will be closed too early. - good news, stuff will fail all over the place if we don't get this right here
+ AssertingSearcher assertingSearcher = new AssertingSearcher(assertingIndexSearcher, engineSearcher, shardId, logger) {
+ @Override
+ public void close() {
+ try {
+ searcherCloseable.remove(this);
+ } finally {
+ super.close();
+ }
+ }
+ };
+ searcherCloseable.add(assertingSearcher, engineSearcher.source());
+ return assertingSearcher;
+ }
+
+ private static final class SearcherCloseable implements Closeable {
+
+ private final IdentityHashMap<AssertingSearcher, RuntimeException> openSearchers = new IdentityHashMap<>();
+
+ @Override
+ public synchronized void close() throws IOException {
+ if (openSearchers.isEmpty() == false) {
+ AssertionError error = new AssertionError("Unreleased searchers found");
+ for (RuntimeException ex : openSearchers.values()) {
+ error.addSuppressed(ex);
+ }
+ throw error;
+ }
+ }
+
+ void add(AssertingSearcher searcher, String source) {
+ final RuntimeException ex = new RuntimeException("Unreleased Searcher, source [" + source+ "]");
+ synchronized (this) {
+ openSearchers.put(searcher, ex);
+ }
+ }
+
+ synchronized void remove(AssertingSearcher searcher) {
+ openSearchers.remove(searcher);
+ }
+ }
+}
diff --git a/test/framework/src/main/java/org/elasticsearch/test/engine/MockInternalEngine.java b/test/framework/src/main/java/org/elasticsearch/test/engine/MockInternalEngine.java
new file mode 100644
index 0000000000..15bb291868
--- /dev/null
+++ b/test/framework/src/main/java/org/elasticsearch/test/engine/MockInternalEngine.java
@@ -0,0 +1,83 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.elasticsearch.test.engine;
+
+import org.apache.lucene.index.FilterDirectoryReader;
+import org.apache.lucene.search.IndexSearcher;
+import org.apache.lucene.search.SearcherManager;
+import org.elasticsearch.index.engine.EngineConfig;
+import org.elasticsearch.index.engine.EngineException;
+import org.elasticsearch.index.engine.InternalEngine;
+
+import java.io.IOException;
+
+final class MockInternalEngine extends InternalEngine {
+ private MockEngineSupport support;
+ private final boolean randomizeFlushOnClose;
+ private Class<? extends FilterDirectoryReader> wrapperClass;
+
+ MockInternalEngine(EngineConfig config, boolean skipInitialTranslogRecovery, Class<? extends FilterDirectoryReader> wrapper) throws EngineException {
+ super(config, skipInitialTranslogRecovery);
+ randomizeFlushOnClose = config.getIndexSettings().isOnSharedFilesystem() == false;
+ wrapperClass = wrapper;
+
+ }
+
+ private synchronized MockEngineSupport support() {
+ // lazy initialized since we need it already on super() ctor execution :(
+ if (support == null) {
+ support = new MockEngineSupport(config(), wrapperClass);
+ }
+ return support;
+ }
+
+ @Override
+ public void close() throws IOException {
+ switch (support().flushOrClose(this, MockEngineSupport.CloseAction.CLOSE)) {
+ case FLUSH_AND_CLOSE:
+ super.flushAndClose();
+ break;
+ case CLOSE:
+ super.close();
+ break;
+ }
+ }
+
+ @Override
+ public void flushAndClose() throws IOException {
+ if (randomizeFlushOnClose) {
+ switch (support().flushOrClose(this, MockEngineSupport.CloseAction.FLUSH_AND_CLOSE)) {
+ case FLUSH_AND_CLOSE:
+ super.flushAndClose();
+ break;
+ case CLOSE:
+ super.close();
+ break;
+ }
+ } else {
+ super.flushAndClose();
+ }
+ }
+
+ @Override
+ protected Searcher newSearcher(String source, IndexSearcher searcher, SearcherManager manager) throws EngineException {
+ final Searcher engineSearcher = super.newSearcher(source, searcher, manager);
+ return support().wrapSearcher(source, engineSearcher, searcher, manager);
+ }
+}
diff --git a/test/framework/src/main/java/org/elasticsearch/test/engine/MockShadowEngine.java b/test/framework/src/main/java/org/elasticsearch/test/engine/MockShadowEngine.java
new file mode 100644
index 0000000000..f05f69bf27
--- /dev/null
+++ b/test/framework/src/main/java/org/elasticsearch/test/engine/MockShadowEngine.java
@@ -0,0 +1,48 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.test.engine;
+
+import org.apache.lucene.index.AssertingDirectoryReader;
+import org.apache.lucene.index.FilterDirectoryReader;
+import org.apache.lucene.search.AssertingIndexSearcher;
+import org.apache.lucene.search.IndexSearcher;
+import org.apache.lucene.search.SearcherManager;
+import org.elasticsearch.index.engine.EngineConfig;
+import org.elasticsearch.index.engine.EngineException;
+import org.elasticsearch.index.engine.ShadowEngine;
+
+import java.io.IOException;
+import java.util.Map;
+
+final class MockShadowEngine extends ShadowEngine {
+ private final MockEngineSupport support;
+
+ MockShadowEngine(EngineConfig config, Class<? extends FilterDirectoryReader> wrapper) {
+ super(config);
+ this.support = new MockEngineSupport(config, wrapper);
+ }
+
+ @Override
+ protected Searcher newSearcher(String source, IndexSearcher searcher, SearcherManager manager) throws EngineException {
+ final Searcher engineSearcher = super.newSearcher(source, searcher, manager);
+ return support.wrapSearcher(source, engineSearcher, searcher, manager);
+ }
+
+}
diff --git a/test/framework/src/main/java/org/elasticsearch/test/engine/ThrowingLeafReaderWrapper.java b/test/framework/src/main/java/org/elasticsearch/test/engine/ThrowingLeafReaderWrapper.java
new file mode 100644
index 0000000000..422b9375a1
--- /dev/null
+++ b/test/framework/src/main/java/org/elasticsearch/test/engine/ThrowingLeafReaderWrapper.java
@@ -0,0 +1,190 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.test.engine;
+
+import org.apache.lucene.index.*;
+import org.apache.lucene.util.Bits;
+import org.apache.lucene.util.BytesRef;
+import org.apache.lucene.util.automaton.CompiledAutomaton;
+
+import java.io.IOException;
+
+/**
+ * An FilterLeafReader that allows to throw exceptions if certain methods
+ * are called on is. This allows to test parts of the system under certain
+ * error conditions that would otherwise not be possible.
+ */
+public class ThrowingLeafReaderWrapper extends FilterLeafReader {
+
+ private final Thrower thrower;
+
+ /**
+ * Flags passed to {@link Thrower#maybeThrow(org.elasticsearch.test.engine.ThrowingLeafReaderWrapper.Flags)}
+ * when the corresponding method is called.
+ */
+ public enum Flags {
+ TermVectors,
+ Terms,
+ TermsEnum,
+ Intersect,
+ DocsEnum,
+ DocsAndPositionsEnum,
+ Fields,
+ Norms, NumericDocValues, BinaryDocValues, SortedDocValues, SortedSetDocValues;
+ }
+
+ /**
+ * A callback interface that allows to throw certain exceptions for
+ * methods called on the IndexReader that is wrapped by {@link ThrowingLeafReaderWrapper}
+ */
+ public static interface Thrower {
+ /**
+ * Maybe throws an exception ;)
+ */
+ public void maybeThrow(Flags flag) throws IOException;
+
+ /**
+ * If this method returns true the {@link Terms} instance for the given field
+ * is wrapped with Thrower support otherwise no exception will be thrown for
+ * the current {@link Terms} instance or any other instance obtained from it.
+ */
+ public boolean wrapTerms(String field);
+ }
+
+ public ThrowingLeafReaderWrapper(LeafReader in, Thrower thrower) {
+ super(in);
+ this.thrower = thrower;
+ }
+
+
+ @Override
+ public Fields fields() throws IOException {
+ Fields fields = super.fields();
+ thrower.maybeThrow(Flags.Fields);
+ return fields == null ? null : new ThrowingFields(fields, thrower);
+ }
+
+ @Override
+ public Fields getTermVectors(int docID) throws IOException {
+ Fields fields = super.getTermVectors(docID);
+ thrower.maybeThrow(Flags.TermVectors);
+ return fields == null ? null : new ThrowingFields(fields, thrower);
+ }
+
+ /**
+ * Wraps a Fields but with additional asserts
+ */
+ public static class ThrowingFields extends FilterFields {
+ private final Thrower thrower;
+
+ public ThrowingFields(Fields in, Thrower thrower) {
+ super(in);
+ this.thrower = thrower;
+ }
+
+ @Override
+ public Terms terms(String field) throws IOException {
+ Terms terms = super.terms(field);
+ if (thrower.wrapTerms(field)) {
+ thrower.maybeThrow(Flags.Terms);
+ return terms == null ? null : new ThrowingTerms(terms, thrower);
+ }
+ return terms;
+ }
+ }
+
+ /**
+ * Wraps a Terms but with additional asserts
+ */
+ public static class ThrowingTerms extends FilterTerms {
+ private final Thrower thrower;
+
+ public ThrowingTerms(Terms in, Thrower thrower) {
+ super(in);
+ this.thrower = thrower;
+ }
+
+ @Override
+ public TermsEnum intersect(CompiledAutomaton automaton, BytesRef bytes) throws IOException {
+ TermsEnum termsEnum = in.intersect(automaton, bytes);
+ thrower.maybeThrow(Flags.Intersect);
+ return new ThrowingTermsEnum(termsEnum, thrower);
+ }
+
+ @Override
+ public TermsEnum iterator() throws IOException {
+ TermsEnum termsEnum = super.iterator();
+ thrower.maybeThrow(Flags.TermsEnum);
+ return new ThrowingTermsEnum(termsEnum, thrower);
+ }
+ }
+
+ static class ThrowingTermsEnum extends FilterTermsEnum {
+ private final Thrower thrower;
+
+ public ThrowingTermsEnum(TermsEnum in, Thrower thrower) {
+ super(in);
+ this.thrower = thrower;
+
+ }
+
+ @Override
+ public PostingsEnum postings(PostingsEnum reuse, int flags) throws IOException {
+ if ((flags & PostingsEnum.POSITIONS) != 0) {
+ thrower.maybeThrow(Flags.DocsAndPositionsEnum);
+ } else {
+ thrower.maybeThrow(Flags.DocsEnum);
+ }
+ return super.postings(reuse, flags);
+ }
+ }
+
+
+ @Override
+ public NumericDocValues getNumericDocValues(String field) throws IOException {
+ thrower.maybeThrow(Flags.NumericDocValues);
+ return super.getNumericDocValues(field);
+
+ }
+
+ @Override
+ public BinaryDocValues getBinaryDocValues(String field) throws IOException {
+ thrower.maybeThrow(Flags.BinaryDocValues);
+ return super.getBinaryDocValues(field);
+ }
+
+ @Override
+ public SortedDocValues getSortedDocValues(String field) throws IOException {
+ thrower.maybeThrow(Flags.SortedDocValues);
+ return super.getSortedDocValues(field);
+ }
+
+ @Override
+ public SortedSetDocValues getSortedSetDocValues(String field) throws IOException {
+ thrower.maybeThrow(Flags.SortedSetDocValues);
+ return super.getSortedSetDocValues(field);
+ }
+
+ @Override
+ public NumericDocValues getNormValues(String field) throws IOException {
+ thrower.maybeThrow(Flags.Norms);
+ return super.getNormValues(field);
+ }
+}
diff --git a/test/framework/src/main/java/org/elasticsearch/test/gateway/NoopGatewayAllocator.java b/test/framework/src/main/java/org/elasticsearch/test/gateway/NoopGatewayAllocator.java
new file mode 100644
index 0000000000..825b203022
--- /dev/null
+++ b/test/framework/src/main/java/org/elasticsearch/test/gateway/NoopGatewayAllocator.java
@@ -0,0 +1,53 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.test.gateway;
+
+import org.elasticsearch.cluster.routing.allocation.FailedRerouteAllocation;
+import org.elasticsearch.cluster.routing.allocation.RoutingAllocation;
+import org.elasticsearch.cluster.routing.allocation.StartedRerouteAllocation;
+import org.elasticsearch.common.settings.Settings;
+import org.elasticsearch.gateway.GatewayAllocator;
+
+/**
+ * An allocator used for tests that doesn't do anything
+ */
+public class NoopGatewayAllocator extends GatewayAllocator {
+
+ public static final NoopGatewayAllocator INSTANCE = new NoopGatewayAllocator();
+
+ protected NoopGatewayAllocator() {
+ super(Settings.EMPTY, null, null);
+ }
+
+ @Override
+ public void applyStartedShards(StartedRerouteAllocation allocation) {
+ // noop
+ }
+
+ @Override
+ public void applyFailedShards(FailedRerouteAllocation allocation) {
+ // noop
+ }
+
+ @Override
+ public boolean allocateUnassigned(RoutingAllocation allocation) {
+ return false;
+ }
+}
diff --git a/test/framework/src/main/java/org/elasticsearch/test/hamcrest/CollectionAssertions.java b/test/framework/src/main/java/org/elasticsearch/test/hamcrest/CollectionAssertions.java
new file mode 100644
index 0000000000..b21e94d30a
--- /dev/null
+++ b/test/framework/src/main/java/org/elasticsearch/test/hamcrest/CollectionAssertions.java
@@ -0,0 +1,33 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.elasticsearch.test.hamcrest;
+
+import org.elasticsearch.common.collect.ImmutableOpenMap;
+import org.hamcrest.Matcher;
+
+/**
+ * Assertions for easier handling of our custom collections,
+ * for example ImmutableOpenMap
+ */
+public class CollectionAssertions {
+
+ public static Matcher<ImmutableOpenMap> hasKey(final String key) {
+ return new CollectionMatchers.ImmutableOpenMapHasKeyMatcher(key);
+ }
+}
diff --git a/test/framework/src/main/java/org/elasticsearch/test/hamcrest/CollectionMatchers.java b/test/framework/src/main/java/org/elasticsearch/test/hamcrest/CollectionMatchers.java
new file mode 100644
index 0000000000..521ba58b0e
--- /dev/null
+++ b/test/framework/src/main/java/org/elasticsearch/test/hamcrest/CollectionMatchers.java
@@ -0,0 +1,59 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.elasticsearch.test.hamcrest;
+
+import org.elasticsearch.common.collect.ImmutableOpenMap;
+import org.hamcrest.Description;
+import org.hamcrest.TypeSafeMatcher;
+
+/**
+ * Matchers for easier handling of our custom collections,
+ * for example ImmutableOpenMap
+ */
+public class CollectionMatchers {
+
+ public static class ImmutableOpenMapHasKeyMatcher extends TypeSafeMatcher<ImmutableOpenMap> {
+
+ private final String key;
+
+ public ImmutableOpenMapHasKeyMatcher(String key) {
+ this.key = key;
+ }
+
+ @Override
+ protected boolean matchesSafely(ImmutableOpenMap item) {
+ return item.containsKey(key);
+ }
+
+ @Override
+ public void describeMismatchSafely(final ImmutableOpenMap map, final Description mismatchDescription) {
+ if (map.size() == 0) {
+ mismatchDescription.appendText("was empty");
+ } else {
+ mismatchDescription.appendText(" was ").appendValue(map);
+ }
+ }
+
+ @Override
+ public void describeTo(Description description) {
+ description.appendText("ImmutableOpenMap should contain key " + key);
+ }
+ }
+
+}
diff --git a/test/framework/src/main/java/org/elasticsearch/test/hamcrest/ElasticsearchAssertions.java b/test/framework/src/main/java/org/elasticsearch/test/hamcrest/ElasticsearchAssertions.java
new file mode 100644
index 0000000000..9d8ad7f7dc
--- /dev/null
+++ b/test/framework/src/main/java/org/elasticsearch/test/hamcrest/ElasticsearchAssertions.java
@@ -0,0 +1,831 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.elasticsearch.test.hamcrest;
+
+import org.apache.lucene.search.BooleanQuery;
+import org.apache.lucene.search.Query;
+import org.elasticsearch.ElasticsearchException;
+import org.elasticsearch.ExceptionsHelper;
+import org.elasticsearch.Version;
+import org.elasticsearch.action.ActionFuture;
+import org.elasticsearch.action.ActionRequest;
+import org.elasticsearch.action.ActionRequestBuilder;
+import org.elasticsearch.action.ShardOperationFailedException;
+import org.elasticsearch.action.admin.cluster.health.ClusterHealthRequestBuilder;
+import org.elasticsearch.action.admin.cluster.health.ClusterHealthResponse;
+import org.elasticsearch.action.admin.cluster.node.info.NodesInfoResponse;
+import org.elasticsearch.action.admin.cluster.node.info.PluginsAndModules;
+import org.elasticsearch.action.admin.indices.alias.exists.AliasesExistResponse;
+import org.elasticsearch.action.admin.indices.delete.DeleteIndexRequestBuilder;
+import org.elasticsearch.action.admin.indices.delete.DeleteIndexResponse;
+import org.elasticsearch.action.admin.indices.template.get.GetIndexTemplatesResponse;
+import org.elasticsearch.action.bulk.BulkResponse;
+import org.elasticsearch.action.get.GetResponse;
+import org.elasticsearch.action.percolate.PercolateResponse;
+import org.elasticsearch.action.search.SearchPhaseExecutionException;
+import org.elasticsearch.action.search.SearchRequestBuilder;
+import org.elasticsearch.action.search.SearchResponse;
+import org.elasticsearch.action.search.ShardSearchFailure;
+import org.elasticsearch.action.support.broadcast.BroadcastResponse;
+import org.elasticsearch.action.support.master.AcknowledgedRequestBuilder;
+import org.elasticsearch.action.support.master.AcknowledgedResponse;
+import org.elasticsearch.cluster.block.ClusterBlock;
+import org.elasticsearch.cluster.block.ClusterBlockException;
+import org.elasticsearch.cluster.metadata.IndexTemplateMetaData;
+import org.elasticsearch.common.Nullable;
+import org.elasticsearch.common.bytes.BytesReference;
+import org.elasticsearch.common.io.stream.BytesStreamOutput;
+import org.elasticsearch.common.io.stream.NamedWriteableAwareStreamInput;
+import org.elasticsearch.common.io.stream.NamedWriteableRegistry;
+import org.elasticsearch.common.io.stream.StreamInput;
+import org.elasticsearch.common.io.stream.StreamOutput;
+import org.elasticsearch.common.io.stream.Streamable;
+import org.elasticsearch.plugins.PluginInfo;
+import org.elasticsearch.rest.RestStatus;
+import org.elasticsearch.search.SearchHit;
+import org.elasticsearch.search.suggest.Suggest;
+import org.elasticsearch.test.VersionUtils;
+import org.elasticsearch.test.rest.client.http.HttpResponse;
+import org.hamcrest.CoreMatchers;
+import org.hamcrest.Matcher;
+import org.hamcrest.Matchers;
+import org.junit.Assert;
+
+import java.io.IOException;
+import java.lang.reflect.Constructor;
+import java.lang.reflect.InvocationTargetException;
+import java.nio.file.Files;
+import java.nio.file.Path;
+import java.util.ArrayList;
+import java.util.Arrays;
+import java.util.HashSet;
+import java.util.List;
+import java.util.Locale;
+import java.util.Set;
+import java.util.function.Function;
+import java.util.function.Predicate;
+import java.util.stream.Collectors;
+
+import static org.apache.lucene.util.LuceneTestCase.random;
+import static org.elasticsearch.test.VersionUtils.randomVersion;
+import static org.hamcrest.CoreMatchers.equalTo;
+import static org.hamcrest.CoreMatchers.is;
+import static org.hamcrest.MatcherAssert.assertThat;
+import static org.hamcrest.Matchers.contains;
+import static org.hamcrest.Matchers.greaterThan;
+import static org.hamcrest.Matchers.greaterThanOrEqualTo;
+import static org.hamcrest.Matchers.hasItem;
+import static org.hamcrest.Matchers.hasKey;
+import static org.hamcrest.Matchers.instanceOf;
+import static org.hamcrest.Matchers.not;
+import static org.hamcrest.Matchers.notNullValue;
+import static org.hamcrest.Matchers.nullValue;
+import static org.junit.Assert.assertArrayEquals;
+import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertFalse;
+import static org.junit.Assert.assertNotNull;
+import static org.junit.Assert.assertTrue;
+import static org.junit.Assert.fail;
+
+/**
+ *
+ */
+public class ElasticsearchAssertions {
+
+ public static void assertAcked(AcknowledgedRequestBuilder<?, ?, ?> builder) {
+ assertAcked(builder.get());
+ }
+
+ public static void assertNoTimeout(ClusterHealthRequestBuilder requestBuilder) {
+ assertNoTimeout(requestBuilder.get());
+ }
+
+ public static void assertNoTimeout(ClusterHealthResponse response) {
+ assertThat("ClusterHealthResponse has timed out - returned: [" + response + "]", response.isTimedOut(), is(false));
+ }
+
+ public static void assertAcked(AcknowledgedResponse response) {
+ assertThat(response.getClass().getSimpleName() + " failed - not acked", response.isAcknowledged(), equalTo(true));
+ assertVersionSerializable(response);
+ }
+
+ public static void assertAcked(DeleteIndexRequestBuilder builder) {
+ assertAcked(builder.get());
+ }
+
+ public static void assertAcked(DeleteIndexResponse response) {
+ assertThat("Delete Index failed - not acked", response.isAcknowledged(), equalTo(true));
+ assertVersionSerializable(response);
+ }
+
+ /**
+ * Executes the request and fails if the request has not been blocked.
+ *
+ * @param builder the request builder
+ */
+ public static void assertBlocked(ActionRequestBuilder builder) {
+ assertBlocked(builder, null);
+ }
+
+ /**
+ * Checks that all shard requests of a replicated brodcast request failed due to a cluster block
+ *
+ * @param replicatedBroadcastResponse the response that should only contain failed shard responses
+ *
+ * */
+ public static void assertBlocked(BroadcastResponse replicatedBroadcastResponse) {
+ assertThat("all shard requests should have failed", replicatedBroadcastResponse.getFailedShards(), Matchers.equalTo(replicatedBroadcastResponse.getTotalShards()));
+ for (ShardOperationFailedException exception : replicatedBroadcastResponse.getShardFailures()) {
+ ClusterBlockException clusterBlockException = (ClusterBlockException) ExceptionsHelper.unwrap(exception.getCause(), ClusterBlockException.class);
+ assertNotNull("expected the cause of failure to be a ClusterBlockException but got " + exception.getCause().getMessage(), clusterBlockException);
+ assertThat(clusterBlockException.blocks().size(), greaterThan(0));
+ assertThat(clusterBlockException.status(), CoreMatchers.equalTo(RestStatus.FORBIDDEN));
+ }
+ }
+
+ /**
+ * Executes the request and fails if the request has not been blocked by a specific {@link ClusterBlock}.
+ *
+ * @param builder the request builder
+ * @param expectedBlock the expected block
+ */
+ public static void assertBlocked(ActionRequestBuilder builder, ClusterBlock expectedBlock) {
+ try {
+ builder.get();
+ fail("Request executed with success but a ClusterBlockException was expected");
+ } catch (ClusterBlockException e) {
+ assertThat(e.blocks().size(), greaterThan(0));
+ assertThat(e.status(), equalTo(RestStatus.FORBIDDEN));
+
+ if (expectedBlock != null) {
+ boolean found = false;
+ for (ClusterBlock clusterBlock : e.blocks()) {
+ if (clusterBlock.id() == expectedBlock.id()) {
+ found = true;
+ break;
+ }
+ }
+ assertThat("Request should have been blocked by [" + expectedBlock + "] instead of " + e.blocks(), found, equalTo(true));
+ }
+ }
+ }
+
+ public static String formatShardStatus(BroadcastResponse response) {
+ String msg = " Total shards: " + response.getTotalShards() + " Successful shards: " + response.getSuccessfulShards() + " & "
+ + response.getFailedShards() + " shard failures:";
+ for (ShardOperationFailedException failure : response.getShardFailures()) {
+ msg += "\n " + failure.toString();
+ }
+ return msg;
+ }
+
+ public static String formatShardStatus(SearchResponse response) {
+ String msg = " Total shards: " + response.getTotalShards() + " Successful shards: " + response.getSuccessfulShards() + " & "
+ + response.getFailedShards() + " shard failures:";
+ for (ShardSearchFailure failure : response.getShardFailures()) {
+ msg += "\n " + failure.toString();
+ }
+ return msg;
+ }
+
+ public static void assertNoSearchHits(SearchResponse searchResponse) {
+ assertEquals(0, searchResponse.getHits().getHits().length);
+ }
+
+ public static void assertSearchHits(SearchResponse searchResponse, String... ids) {
+ String shardStatus = formatShardStatus(searchResponse);
+
+ Set<String> idsSet = new HashSet<>(Arrays.asList(ids));
+ for (SearchHit hit : searchResponse.getHits()) {
+ assertThat("id [" + hit.getId() + "] was found in search results but wasn't expected (type [" + hit.getType() + "], index [" + hit.index() + "])"
+ + shardStatus, idsSet.remove(hit.getId()),
+ equalTo(true));
+ }
+ assertThat("Some expected ids were not found in search results: " + Arrays.toString(idsSet.toArray(new String[idsSet.size()])) + "."
+ + shardStatus, idsSet.size(), equalTo(0));
+ assertVersionSerializable(searchResponse);
+ }
+
+ public static void assertSortValues(SearchResponse searchResponse, Object[]... sortValues) {
+ assertSearchResponse(searchResponse);
+ SearchHit[] hits = searchResponse.getHits().getHits();
+ assertEquals(sortValues.length, hits.length);
+ for (int i = 0; i < sortValues.length; ++i) {
+ final Object[] hitsSortValues = hits[i].getSortValues();
+ assertArrayEquals("Offset " + Integer.toString(i) + ", id " + hits[i].getId(), sortValues[i], hitsSortValues);
+ }
+ assertVersionSerializable(searchResponse);
+ }
+
+ public static void assertOrderedSearchHits(SearchResponse searchResponse, String... ids) {
+ String shardStatus = formatShardStatus(searchResponse);
+ assertThat("Expected different hit count. " + shardStatus, searchResponse.getHits().hits().length, equalTo(ids.length));
+ for (int i = 0; i < ids.length; i++) {
+ SearchHit hit = searchResponse.getHits().hits()[i];
+ assertThat("Expected id: " + ids[i] + " at position " + i + " but wasn't." + shardStatus, hit.getId(), equalTo(ids[i]));
+ }
+ assertVersionSerializable(searchResponse);
+ }
+
+ public static void assertHitCount(SearchResponse countResponse, long expectedHitCount) {
+ if (countResponse.getHits().totalHits() != expectedHitCount) {
+ fail("Count is " + countResponse.getHits().totalHits() + " but " + expectedHitCount + " was expected. " + formatShardStatus(countResponse));
+ }
+ assertVersionSerializable(countResponse);
+ }
+
+ public static void assertMatchCount(PercolateResponse percolateResponse, long expectedHitCount) {
+ if (percolateResponse.getCount() != expectedHitCount) {
+ fail("Count is " + percolateResponse.getCount() + " but " + expectedHitCount + " was expected. " + formatShardStatus(percolateResponse));
+ }
+ assertVersionSerializable(percolateResponse);
+ }
+
+ public static void assertExists(GetResponse response) {
+ String message = String.format(Locale.ROOT, "Expected %s/%s/%s to exist, but does not", response.getIndex(), response.getType(), response.getId());
+ assertThat(message, response.isExists(), is(true));
+ }
+
+ public static void assertFirstHit(SearchResponse searchResponse, Matcher<SearchHit> matcher) {
+ assertSearchHit(searchResponse, 1, matcher);
+ }
+
+ public static void assertSecondHit(SearchResponse searchResponse, Matcher<SearchHit> matcher) {
+ assertSearchHit(searchResponse, 2, matcher);
+ }
+
+ public static void assertThirdHit(SearchResponse searchResponse, Matcher<SearchHit> matcher) {
+ assertSearchHit(searchResponse, 3, matcher);
+ }
+
+ public static void assertFourthHit(SearchResponse searchResponse, Matcher<SearchHit> matcher) {
+ assertSearchHit(searchResponse, 4, matcher);
+ }
+
+ public static void assertFifthHit(SearchResponse searchResponse, Matcher<SearchHit> matcher) {
+ assertSearchHit(searchResponse, 5, matcher);
+ }
+
+ public static void assertSearchHit(SearchResponse searchResponse, int number, Matcher<SearchHit> matcher) {
+ assertThat(number, greaterThan(0));
+ assertThat("SearchHit number must be greater than 0", number, greaterThan(0));
+ assertThat(searchResponse.getHits().totalHits(), greaterThanOrEqualTo((long) number));
+ assertSearchHit(searchResponse.getHits().getAt(number - 1), matcher);
+ assertVersionSerializable(searchResponse);
+ }
+
+ public static void assertNoFailures(SearchResponse searchResponse) {
+ assertThat("Unexpected ShardFailures: " + Arrays.toString(searchResponse.getShardFailures()),
+ searchResponse.getShardFailures().length, equalTo(0));
+ assertVersionSerializable(searchResponse);
+ }
+
+ public static void assertFailures(SearchResponse searchResponse) {
+ assertThat("Expected at least one shard failure, got none",
+ searchResponse.getShardFailures().length, greaterThan(0));
+ assertVersionSerializable(searchResponse);
+ }
+
+ public static void assertNoFailures(BulkResponse response) {
+ assertThat("Unexpected ShardFailures: " + response.buildFailureMessage(),
+ response.hasFailures(), is(false));
+ assertVersionSerializable(response);
+ }
+
+ public static void assertFailures(SearchRequestBuilder searchRequestBuilder, RestStatus restStatus, Matcher<String> reasonMatcher) {
+ //when the number for shards is randomized and we expect failures
+ //we can either run into partial or total failures depending on the current number of shards
+ try {
+ SearchResponse searchResponse = searchRequestBuilder.get();
+ assertThat("Expected shard failures, got none", searchResponse.getShardFailures().length, greaterThan(0));
+ for (ShardSearchFailure shardSearchFailure : searchResponse.getShardFailures()) {
+ assertThat(shardSearchFailure.status(), equalTo(restStatus));
+ assertThat(shardSearchFailure.reason(), reasonMatcher);
+ }
+ assertVersionSerializable(searchResponse);
+ } catch (SearchPhaseExecutionException e) {
+ assertThat(e.status(), equalTo(restStatus));
+ assertThat(e.toString(), reasonMatcher);
+ for (ShardSearchFailure shardSearchFailure : e.shardFailures()) {
+ assertThat(shardSearchFailure.status(), equalTo(restStatus));
+ assertThat(shardSearchFailure.reason(), reasonMatcher);
+ }
+ } catch (Exception e) {
+ fail("SearchPhaseExecutionException expected but got " + e.getClass());
+ }
+ }
+
+ public static void assertFailures(PercolateResponse percolateResponse) {
+ assertThat("Expected at least one shard failure, got none",
+ percolateResponse.getShardFailures().length, greaterThan(0));
+ assertVersionSerializable(percolateResponse);
+ }
+
+ public static void assertNoFailures(BroadcastResponse response) {
+ assertThat("Unexpected ShardFailures: " + Arrays.toString(response.getShardFailures()), response.getFailedShards(), equalTo(0));
+ assertVersionSerializable(response);
+ }
+
+ public static void assertAllSuccessful(BroadcastResponse response) {
+ assertNoFailures(response);
+ assertThat("Expected all shards successful but got successful [" + response.getSuccessfulShards() + "] total [" + response.getTotalShards() + "]",
+ response.getTotalShards(), equalTo(response.getSuccessfulShards()));
+ assertVersionSerializable(response);
+ }
+
+ public static void assertAllSuccessful(SearchResponse response) {
+ assertNoFailures(response);
+ assertThat("Expected all shards successful but got successful [" + response.getSuccessfulShards() + "] total [" + response.getTotalShards() + "]",
+ response.getTotalShards(), equalTo(response.getSuccessfulShards()));
+ assertVersionSerializable(response);
+ }
+
+ public static void assertSearchHit(SearchHit searchHit, Matcher<SearchHit> matcher) {
+ assertThat(searchHit, matcher);
+ assertVersionSerializable(searchHit);
+ }
+
+ public static void assertHighlight(SearchResponse resp, int hit, String field, int fragment, Matcher<String> matcher) {
+ assertHighlight(resp, hit, field, fragment, greaterThan(fragment), matcher);
+ }
+
+ public static void assertHighlight(SearchResponse resp, int hit, String field, int fragment, int totalFragments, Matcher<String> matcher) {
+ assertHighlight(resp, hit, field, fragment, equalTo(totalFragments), matcher);
+ }
+
+ public static void assertHighlight(SearchHit hit, String field, int fragment, Matcher<String> matcher) {
+ assertHighlight(hit, field, fragment, greaterThan(fragment), matcher);
+ }
+
+ public static void assertHighlight(SearchHit hit, String field, int fragment, int totalFragments, Matcher<String> matcher) {
+ assertHighlight(hit, field, fragment, equalTo(totalFragments), matcher);
+ }
+
+ private static void assertHighlight(SearchResponse resp, int hit, String field, int fragment, Matcher<Integer> fragmentsMatcher, Matcher<String> matcher) {
+ assertNoFailures(resp);
+ assertThat("not enough hits", resp.getHits().hits().length, greaterThan(hit));
+ assertHighlight(resp.getHits().hits()[hit], field, fragment, fragmentsMatcher, matcher);
+ assertVersionSerializable(resp);
+ }
+
+ private static void assertHighlight(SearchHit hit, String field, int fragment, Matcher<Integer> fragmentsMatcher, Matcher<String> matcher) {
+ assertThat(hit.getHighlightFields(), hasKey(field));
+ assertThat(hit.getHighlightFields().get(field).fragments().length, fragmentsMatcher);
+ assertThat(hit.highlightFields().get(field).fragments()[fragment].string(), matcher);
+ }
+
+ public static void assertNotHighlighted(SearchResponse resp, int hit, String field) {
+ assertNoFailures(resp);
+ assertThat("not enough hits", resp.getHits().hits().length, greaterThan(hit));
+ assertThat(resp.getHits().hits()[hit].getHighlightFields(), not(hasKey(field)));
+ }
+
+ public static void assertSuggestionSize(Suggest searchSuggest, int entry, int size, String key) {
+ assertThat(searchSuggest, notNullValue());
+ String msg = "Suggest result: " + searchSuggest.toString();
+ assertThat(msg, searchSuggest.size(), greaterThanOrEqualTo(1));
+ assertThat(msg, searchSuggest.getSuggestion(key).getName(), equalTo(key));
+ assertThat(msg, searchSuggest.getSuggestion(key).getEntries().size(), greaterThanOrEqualTo(entry));
+ assertThat(msg, searchSuggest.getSuggestion(key).getEntries().get(entry).getOptions().size(), equalTo(size));
+ assertVersionSerializable(searchSuggest);
+ }
+
+ public static void assertSuggestionPhraseCollateMatchExists(Suggest searchSuggest, String key, int numberOfPhraseExists) {
+ int counter = 0;
+ assertThat(searchSuggest, notNullValue());
+ String msg = "Suggest result: " + searchSuggest.toString();
+ assertThat(msg, searchSuggest.size(), greaterThanOrEqualTo(1));
+ assertThat(msg, searchSuggest.getSuggestion(key).getName(), equalTo(key));
+
+ for (Suggest.Suggestion.Entry.Option option : searchSuggest.getSuggestion(key).getEntries().get(0).getOptions()) {
+ if (option.collateMatch()) {
+ counter++;
+ }
+ }
+
+ assertThat(counter, equalTo(numberOfPhraseExists));
+ }
+
+ public static void assertSuggestion(Suggest searchSuggest, int entry, int ord, String key, String text) {
+ assertThat(searchSuggest, notNullValue());
+ String msg = "Suggest result: " + searchSuggest.toString();
+ assertThat(msg, searchSuggest.size(), greaterThanOrEqualTo(1));
+ assertThat(msg, searchSuggest.getSuggestion(key).getName(), equalTo(key));
+ assertThat(msg, searchSuggest.getSuggestion(key).getEntries().size(), greaterThanOrEqualTo(entry));
+ assertThat(msg, searchSuggest.getSuggestion(key).getEntries().get(entry).getOptions().size(), greaterThan(ord));
+ assertThat(msg, searchSuggest.getSuggestion(key).getEntries().get(entry).getOptions().get(ord).getText().string(), equalTo(text));
+ assertVersionSerializable(searchSuggest);
+ }
+
+ /**
+ * Assert suggestion returns exactly the provided text.
+ */
+ public static void assertSuggestion(Suggest searchSuggest, int entry, String key, String... text) {
+ assertSuggestion(searchSuggest, entry, key, text.length, text);
+ }
+
+ /**
+ * Assert suggestion returns size suggestions and the first are the provided
+ * text.
+ */
+ public static void assertSuggestion(Suggest searchSuggest, int entry, String key, int size, String... text) {
+ assertSuggestionSize(searchSuggest, entry, size, key);
+ for (int i = 0; i < text.length; i++) {
+ assertSuggestion(searchSuggest, entry, i, key, text[i]);
+ }
+ }
+
+ /**
+ * Assert that an index template is missing
+ */
+ public static void assertIndexTemplateMissing(GetIndexTemplatesResponse templatesResponse, String name) {
+ List<String> templateNames = new ArrayList<>();
+ for (IndexTemplateMetaData indexTemplateMetaData : templatesResponse.getIndexTemplates()) {
+ templateNames.add(indexTemplateMetaData.name());
+ }
+ assertThat(templateNames, not(hasItem(name)));
+ }
+
+ /**
+ * Assert that an index template exists
+ */
+ public static void assertIndexTemplateExists(GetIndexTemplatesResponse templatesResponse, String name) {
+ List<String> templateNames = new ArrayList<>();
+ for (IndexTemplateMetaData indexTemplateMetaData : templatesResponse.getIndexTemplates()) {
+ templateNames.add(indexTemplateMetaData.name());
+ }
+ assertThat(templateNames, hasItem(name));
+ }
+
+ /**
+ * Assert that aliases are missing
+ */
+ public static void assertAliasesMissing(AliasesExistResponse aliasesExistResponse) {
+ assertFalse("Aliases shouldn't exist", aliasesExistResponse.exists());
+ }
+
+ /**
+ * Assert that aliases exist
+ */
+ public static void assertAliasesExist(AliasesExistResponse aliasesExistResponse) {
+ assertTrue("Aliases should exist", aliasesExistResponse.exists());
+ }
+
+ /*
+ * matchers
+ */
+ public static Matcher<SearchHit> hasId(final String id) {
+ return new ElasticsearchMatchers.SearchHitHasIdMatcher(id);
+ }
+
+ public static Matcher<SearchHit> hasType(final String type) {
+ return new ElasticsearchMatchers.SearchHitHasTypeMatcher(type);
+ }
+
+ public static Matcher<SearchHit> hasIndex(final String index) {
+ return new ElasticsearchMatchers.SearchHitHasIndexMatcher(index);
+ }
+
+ public static Matcher<SearchHit> hasScore(final float score) {
+ return new ElasticsearchMatchers.SearchHitHasScoreMatcher(score);
+ }
+
+ public static Matcher<HttpResponse> hasStatus(RestStatus restStatus) {
+ return new ElasticsearchMatchers.HttpResponseHasStatusMatcher(restStatus);
+ }
+
+ public static <T extends Query> T assertBooleanSubQuery(Query query, Class<T> subqueryType, int i) {
+ assertThat(query, instanceOf(BooleanQuery.class));
+ BooleanQuery q = (BooleanQuery) query;
+ assertThat(q.getClauses().length, greaterThan(i));
+ assertThat(q.getClauses()[i].getQuery(), instanceOf(subqueryType));
+ return (T) q.getClauses()[i].getQuery();
+ }
+
+ /**
+ * Run the request from a given builder and check that it throws an exception of the right type
+ */
+ public static <E extends Throwable> void assertThrows(ActionRequestBuilder<?, ?, ?> builder, Class<E> exceptionClass) {
+ assertThrows(builder.execute(), exceptionClass);
+ }
+
+ /**
+ * Run the request from a given builder and check that it throws an exception of the right type, with a given {@link org.elasticsearch.rest.RestStatus}
+ */
+ public static <E extends Throwable> void assertThrows(ActionRequestBuilder<?, ?, ?> builder, Class<E> exceptionClass, RestStatus status) {
+ assertThrows(builder.execute(), exceptionClass, status);
+ }
+
+ /**
+ * Run the request from a given builder and check that it throws an exception of the right type
+ *
+ * @param extraInfo extra information to add to the failure message
+ */
+ public static <E extends Throwable> void assertThrows(ActionRequestBuilder<?, ?, ?> builder, Class<E> exceptionClass, String extraInfo) {
+ assertThrows(builder.execute(), exceptionClass, extraInfo);
+ }
+
+ /**
+ * Run future.actionGet() and check that it throws an exception of the right type
+ */
+ public static <E extends Throwable> void assertThrows(ActionFuture future, Class<E> exceptionClass) {
+ assertThrows(future, exceptionClass, null, null);
+ }
+
+ /**
+ * Run future.actionGet() and check that it throws an exception of the right type, with a given {@link org.elasticsearch.rest.RestStatus}
+ */
+ public static <E extends Throwable> void assertThrows(ActionFuture future, Class<E> exceptionClass, RestStatus status) {
+ assertThrows(future, exceptionClass, status, null);
+ }
+
+ /**
+ * Run future.actionGet() and check that it throws an exception of the right type
+ *
+ * @param extraInfo extra information to add to the failure message
+ */
+ public static <E extends Throwable> void assertThrows(ActionFuture future, Class<E> exceptionClass, String extraInfo) {
+ assertThrows(future, exceptionClass, null, extraInfo);
+ }
+
+ /**
+ * Run future.actionGet() and check that it throws an exception of the right type, optionally checking the exception's rest status
+ *
+ * @param exceptionClass expected exception class
+ * @param status {@link org.elasticsearch.rest.RestStatus} to check for. Can be null to disable the check
+ * @param extraInfo extra information to add to the failure message. Can be null.
+ */
+ public static <E extends Throwable> void assertThrows(ActionFuture future, Class<E> exceptionClass, @Nullable RestStatus status, @Nullable String extraInfo) {
+ boolean fail = false;
+ extraInfo = extraInfo == null || extraInfo.isEmpty() ? "" : extraInfo + ": ";
+ extraInfo += "expected a " + exceptionClass + " exception to be thrown";
+
+ if (status != null) {
+ extraInfo += " with status [" + status + "]";
+ }
+
+
+ try {
+ future.actionGet();
+ fail = true;
+
+ } catch (ElasticsearchException esException) {
+ assertThat(extraInfo, esException.unwrapCause(), instanceOf(exceptionClass));
+ if (status != null) {
+ assertThat(extraInfo, ExceptionsHelper.status(esException), equalTo(status));
+ }
+ } catch (Throwable e) {
+ assertThat(extraInfo, e, instanceOf(exceptionClass));
+ if (status != null) {
+ assertThat(extraInfo, ExceptionsHelper.status(e), equalTo(status));
+ }
+ }
+ // has to be outside catch clause to get a proper message
+ if (fail) {
+ throw new AssertionError(extraInfo);
+ }
+ }
+
+ public static <E extends Throwable> void assertThrows(ActionRequestBuilder<?, ?, ?> builder, RestStatus status) {
+ assertThrows(builder.execute(), status);
+ }
+
+ public static <E extends Throwable> void assertThrows(ActionRequestBuilder<?, ?, ?> builder, RestStatus status, String extraInfo) {
+ assertThrows(builder.execute(), status, extraInfo);
+ }
+
+ public static <E extends Throwable> void assertThrows(ActionFuture future, RestStatus status) {
+ assertThrows(future, status, null);
+ }
+
+ public static void assertThrows(ActionFuture future, RestStatus status, String extraInfo) {
+ boolean fail = false;
+ extraInfo = extraInfo == null || extraInfo.isEmpty() ? "" : extraInfo + ": ";
+ extraInfo += "expected a " + status + " status exception to be thrown";
+
+ try {
+ future.actionGet();
+ fail = true;
+ } catch (Throwable e) {
+ assertThat(extraInfo, ExceptionsHelper.status(e), equalTo(status));
+ }
+ // has to be outside catch clause to get a proper message
+ if (fail) {
+ throw new AssertionError(extraInfo);
+ }
+ }
+
+ private static BytesReference serialize(Version version, Streamable streamable) throws IOException {
+ BytesStreamOutput output = new BytesStreamOutput();
+ output.setVersion(version);
+ streamable.writeTo(output);
+ output.flush();
+ return output.bytes();
+ }
+
+ public static void assertVersionSerializable(Streamable streamable) {
+ assertTrue(Version.CURRENT.after(VersionUtils.getPreviousVersion()));
+ assertVersionSerializable(randomVersion(random()), streamable);
+ }
+
+ public static void assertVersionSerializable(Version version, Streamable streamable) {
+ assertVersionSerializable(version, streamable, null);
+ }
+
+ public static void assertVersionSerializable(Version version, Streamable streamable, NamedWriteableRegistry namedWriteableRegistry) {
+ try {
+ Streamable newInstance = tryCreateNewInstance(streamable);
+ if (newInstance == null) {
+ return; // can't create a new instance - we never modify a
+ // streamable that comes in.
+ }
+ if (streamable instanceof ActionRequest) {
+ ((ActionRequest<?>) streamable).validate();
+ }
+ BytesReference orig = serialize(version, streamable);
+ StreamInput input = StreamInput.wrap(orig);
+ if (namedWriteableRegistry != null) {
+ input = new NamedWriteableAwareStreamInput(input, namedWriteableRegistry);
+ }
+ input.setVersion(version);
+ newInstance.readFrom(input);
+ assertThat("Stream should be fully read with version [" + version + "] for streamable [" + streamable + "]", input.available(),
+ equalTo(0));
+ assertThat("Serialization failed with version [" + version + "] bytes should be equal for streamable [" + streamable + "]",
+ serialize(version, streamable), equalTo(orig));
+ } catch (Throwable ex) {
+ throw new RuntimeException("failed to check serialization - version [" + version + "] for streamable [" + streamable + "]", ex);
+ }
+
+ }
+
+ public static void assertVersionSerializable(Version version, final Throwable t) {
+ ElasticsearchAssertions.assertVersionSerializable(version, new ThrowableWrapper(t));
+ }
+
+ public static final class ThrowableWrapper implements Streamable {
+ Throwable throwable;
+ public ThrowableWrapper(Throwable t) {
+ throwable = t;
+ }
+
+ public ThrowableWrapper() {
+ throwable = null;
+ }
+
+ @Override
+ public void readFrom(StreamInput in) throws IOException {
+ throwable = in.readThrowable();
+ }
+
+ @Override
+ public void writeTo(StreamOutput out) throws IOException {
+ out.writeThrowable(throwable);
+ }
+ }
+
+
+ private static Streamable tryCreateNewInstance(Streamable streamable) throws NoSuchMethodException, InstantiationException,
+ IllegalAccessException, InvocationTargetException {
+ try {
+ Class<? extends Streamable> clazz = streamable.getClass();
+ Constructor<? extends Streamable> constructor = clazz.getConstructor();
+ assertThat(constructor, Matchers.notNullValue());
+ Streamable newInstance = constructor.newInstance();
+ return newInstance;
+ } catch (Throwable e) {
+ return null;
+ }
+ }
+
+ /**
+ * Applies basic assertions on the SearchResponse. This method checks if all shards were successful, if
+ * any of the shards threw an exception and if the response is serializeable.
+ */
+ public static SearchResponse assertSearchResponse(SearchRequestBuilder request) {
+ return assertSearchResponse(request.get());
+ }
+
+ /**
+ * Applies basic assertions on the SearchResponse. This method checks if all shards were successful, if
+ * any of the shards threw an exception and if the response is serializeable.
+ */
+ public static SearchResponse assertSearchResponse(SearchResponse response) {
+ assertNoFailures(response);
+ return response;
+ }
+
+ public static void assertNodeContainsPlugins(NodesInfoResponse response, String nodeId,
+ List<String> expectedJvmPluginNames,
+ List<String> expectedJvmPluginDescriptions,
+ List<String> expectedJvmVersions,
+ List<String> expectedSitePluginNames,
+ List<String> expectedSitePluginDescriptions,
+ List<String> expectedSiteVersions) {
+
+ Assert.assertThat(response.getNodesMap().get(nodeId), notNullValue());
+
+ PluginsAndModules plugins = response.getNodesMap().get(nodeId).getPlugins();
+ Assert.assertThat(plugins, notNullValue());
+
+ List<String> pluginNames = filterAndMap(plugins, jvmPluginPredicate, nameFunction);
+ for (String expectedJvmPluginName : expectedJvmPluginNames) {
+ Assert.assertThat(pluginNames, hasItem(expectedJvmPluginName));
+ }
+
+ List<String> pluginDescriptions = filterAndMap(plugins, jvmPluginPredicate, descriptionFunction);
+ for (String expectedJvmPluginDescription : expectedJvmPluginDescriptions) {
+ Assert.assertThat(pluginDescriptions, hasItem(expectedJvmPluginDescription));
+ }
+
+ List<String> jvmPluginVersions = filterAndMap(plugins, jvmPluginPredicate, versionFunction);
+ for (String pluginVersion : expectedJvmVersions) {
+ Assert.assertThat(jvmPluginVersions, hasItem(pluginVersion));
+ }
+
+ boolean anyHaveUrls =
+ plugins
+ .getPluginInfos()
+ .stream()
+ .filter(jvmPluginPredicate.and(sitePluginPredicate.negate()))
+ .map(urlFunction)
+ .anyMatch(p -> p != null);
+ assertFalse(anyHaveUrls);
+
+ List<String> sitePluginNames = filterAndMap(plugins, sitePluginPredicate, nameFunction);
+
+ Assert.assertThat(sitePluginNames.isEmpty(), is(expectedSitePluginNames.isEmpty()));
+ for (String expectedSitePluginName : expectedSitePluginNames) {
+ Assert.assertThat(sitePluginNames, hasItem(expectedSitePluginName));
+ }
+
+ List<String> sitePluginDescriptions = filterAndMap(plugins, sitePluginPredicate, descriptionFunction);
+ Assert.assertThat(sitePluginDescriptions.isEmpty(), is(expectedSitePluginDescriptions.isEmpty()));
+ for (String sitePluginDescription : expectedSitePluginDescriptions) {
+ Assert.assertThat(sitePluginDescriptions, hasItem(sitePluginDescription));
+ }
+
+ List<String> sitePluginUrls = filterAndMap(plugins, sitePluginPredicate, urlFunction);
+ Assert.assertThat(sitePluginUrls, not(contains(nullValue())));
+
+ List<String> sitePluginVersions = filterAndMap(plugins, sitePluginPredicate, versionFunction);
+ Assert.assertThat(sitePluginVersions.isEmpty(), is(expectedSiteVersions.isEmpty()));
+ for (String pluginVersion : expectedSiteVersions) {
+ Assert.assertThat(sitePluginVersions, hasItem(pluginVersion));
+ }
+ }
+
+ private static List<String> filterAndMap(PluginsAndModules pluginsInfo, Predicate<PluginInfo> predicate, Function<PluginInfo, String> function) {
+ return pluginsInfo.getPluginInfos().stream().filter(predicate).map(function).collect(Collectors.toList());
+ }
+
+ private static Predicate<PluginInfo> jvmPluginPredicate = p -> p.isJvm();
+
+ private static Predicate<PluginInfo> sitePluginPredicate = p -> p.isSite();
+
+ private static Function<PluginInfo, String> nameFunction = p -> p.getName();
+
+ private static Function<PluginInfo, String> descriptionFunction = p -> p.getDescription();
+
+ private static Function<PluginInfo, String> urlFunction = p -> p.getUrl();
+
+ private static Function<PluginInfo, String> versionFunction = p -> p.getVersion();
+
+ /**
+ * Check if a file exists
+ */
+ public static void assertFileExists(Path file) {
+ assertThat("file/dir [" + file + "] should exist.", Files.exists(file), is(true));
+ }
+
+ /**
+ * Check if a file does not exist
+ */
+ public static void assertFileNotExists(Path file) {
+ assertThat("file/dir [" + file + "] should not exist.", Files.exists(file), is(false));
+ }
+
+ /**
+ * Check if a directory exists
+ */
+ public static void assertDirectoryExists(Path dir) {
+ assertFileExists(dir);
+ assertThat("file [" + dir + "] should be a directory.", Files.isDirectory(dir), is(true));
+ }
+}
diff --git a/test/framework/src/main/java/org/elasticsearch/test/hamcrest/ElasticsearchMatchers.java b/test/framework/src/main/java/org/elasticsearch/test/hamcrest/ElasticsearchMatchers.java
new file mode 100644
index 0000000000..1853d291c6
--- /dev/null
+++ b/test/framework/src/main/java/org/elasticsearch/test/hamcrest/ElasticsearchMatchers.java
@@ -0,0 +1,145 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.elasticsearch.test.hamcrest;
+
+import org.elasticsearch.rest.RestStatus;
+import org.elasticsearch.search.SearchHit;
+import org.elasticsearch.test.rest.client.http.HttpResponse;
+import org.hamcrest.Description;
+import org.hamcrest.Matcher;
+import org.hamcrest.TypeSafeMatcher;
+
+public class ElasticsearchMatchers {
+
+ public static class SearchHitHasIdMatcher extends TypeSafeMatcher<SearchHit> {
+ private String id;
+
+ public SearchHitHasIdMatcher(String id) {
+ this.id = id;
+ }
+
+ @Override
+ protected boolean matchesSafely(SearchHit searchHit) {
+ return searchHit.getId().equals(id);
+ }
+
+ @Override
+ public void describeMismatchSafely(final SearchHit searchHit, final Description mismatchDescription) {
+ mismatchDescription.appendText(" was ").appendValue(searchHit.getId());
+ }
+
+ @Override
+ public void describeTo(final Description description) {
+ description.appendText("searchHit id should be ").appendValue(id);
+ }
+ }
+
+ public static class SearchHitHasTypeMatcher extends TypeSafeMatcher<SearchHit> {
+ private String type;
+
+ public SearchHitHasTypeMatcher(String type) {
+ this.type = type;
+ }
+
+ @Override
+ public boolean matchesSafely(final SearchHit searchHit) {
+ return searchHit.getType().equals(type);
+ }
+
+ @Override
+ public void describeMismatchSafely(final SearchHit searchHit, final Description mismatchDescription) {
+ mismatchDescription.appendText(" was ").appendValue(searchHit.getType());
+ }
+
+ @Override
+ public void describeTo(final Description description) {
+ description.appendText("searchHit type should be ").appendValue(type);
+ }
+ }
+
+ public static class SearchHitHasIndexMatcher extends TypeSafeMatcher<SearchHit> {
+ private String index;
+
+ public SearchHitHasIndexMatcher(String index) {
+ this.index = index;
+ }
+
+ @Override
+ public boolean matchesSafely(final SearchHit searchHit) {
+ return searchHit.getIndex().equals(index);
+ }
+
+ @Override
+ public void describeMismatchSafely(final SearchHit searchHit, final Description mismatchDescription) {
+ mismatchDescription.appendText(" was ").appendValue(searchHit.getIndex());
+ }
+
+ @Override
+ public void describeTo(final Description description) {
+ description.appendText("searchHit index should be ").appendValue(index);
+ }
+ }
+
+ public static class SearchHitHasScoreMatcher extends TypeSafeMatcher<SearchHit> {
+ private float score;
+
+ public SearchHitHasScoreMatcher(float score) {
+ this.score = score;
+ }
+
+ @Override
+ protected boolean matchesSafely(SearchHit searchHit) {
+ return searchHit.getScore() == score;
+ }
+
+ @Override
+ public void describeMismatchSafely(final SearchHit searchHit, final Description mismatchDescription) {
+ mismatchDescription.appendText(" was ").appendValue(searchHit.getScore());
+ }
+
+ @Override
+ public void describeTo(final Description description) {
+ description.appendText("searchHit score should be ").appendValue(score);
+ }
+ }
+
+ public static class HttpResponseHasStatusMatcher extends TypeSafeMatcher<HttpResponse> {
+
+ private RestStatus restStatus;
+
+ public HttpResponseHasStatusMatcher(RestStatus restStatus) {
+ this.restStatus = restStatus;
+ }
+
+ @Override
+ protected boolean matchesSafely(HttpResponse response) {
+ return response.getStatusCode() == restStatus.getStatus();
+ }
+
+ @Override
+ public void describeMismatchSafely(final HttpResponse response, final Description mismatchDescription) {
+ mismatchDescription.appendText(" was ").appendValue(response.getStatusCode());
+ }
+
+ @Override
+ public void describeTo(final Description description) {
+ description.appendText("HTTP response status code should be ").appendValue(restStatus.getStatus());
+ }
+ }
+}
diff --git a/test/framework/src/main/java/org/elasticsearch/test/hamcrest/RegexMatcher.java b/test/framework/src/main/java/org/elasticsearch/test/hamcrest/RegexMatcher.java
new file mode 100644
index 0000000000..62c35e551c
--- /dev/null
+++ b/test/framework/src/main/java/org/elasticsearch/test/hamcrest/RegexMatcher.java
@@ -0,0 +1,62 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.test.hamcrest;
+
+import org.hamcrest.Description;
+import org.hamcrest.TypeSafeMatcher;
+
+import java.util.regex.Pattern;
+
+/**
+ * Matcher that supports regular expression and allows to provide optional flags
+ */
+public class RegexMatcher extends TypeSafeMatcher<String> {
+
+ private final String regex;
+ private final Pattern pattern;
+
+ public RegexMatcher(String regex) {
+ this.regex = regex;
+ this.pattern = Pattern.compile(regex);
+ }
+
+ public RegexMatcher(String regex, int flag) {
+ this.regex = regex;
+ this.pattern = Pattern.compile(regex, flag);
+ }
+
+ @Override
+ protected boolean matchesSafely(String item) {
+ return pattern.matcher(item).find();
+ }
+
+ @Override
+ public void describeTo(Description description) {
+ description.appendText(regex);
+ }
+
+ public static RegexMatcher matches(String regex) {
+ return new RegexMatcher(regex);
+ }
+
+ public static RegexMatcher matches(String regex, int flag) {
+ return new RegexMatcher(regex, flag);
+ }
+}
diff --git a/test/framework/src/main/java/org/elasticsearch/test/junit/annotations/Network.java b/test/framework/src/main/java/org/elasticsearch/test/junit/annotations/Network.java
new file mode 100644
index 0000000000..d2615eabca
--- /dev/null
+++ b/test/framework/src/main/java/org/elasticsearch/test/junit/annotations/Network.java
@@ -0,0 +1,34 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.elasticsearch.test.junit.annotations;
+
+import com.carrotsearch.randomizedtesting.annotations.TestGroup;
+
+import java.lang.annotation.*;
+
+/**
+ * Annotation used to set if internet network connectivity is required to run the test.
+ * By default, tests annotated with @Network won't be executed.
+ * Set -Dtests.network=true when running test to launch network tests
+ */
+@Retention(RetentionPolicy.RUNTIME)
+@Inherited
+@TestGroup(enabled = false, sysProperty = "tests.network")
+public @interface Network {
+}
diff --git a/test/framework/src/main/java/org/elasticsearch/test/junit/annotations/TestLogging.java b/test/framework/src/main/java/org/elasticsearch/test/junit/annotations/TestLogging.java
new file mode 100644
index 0000000000..e09cc7534e
--- /dev/null
+++ b/test/framework/src/main/java/org/elasticsearch/test/junit/annotations/TestLogging.java
@@ -0,0 +1,41 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.elasticsearch.test.junit.annotations;
+
+import java.lang.annotation.Retention;
+import java.lang.annotation.RetentionPolicy;
+import java.lang.annotation.Target;
+
+import static java.lang.annotation.ElementType.METHOD;
+import static java.lang.annotation.ElementType.PACKAGE;
+import static java.lang.annotation.ElementType.TYPE;
+
+/**
+ * Annotation used to set a custom log level for a specific test method.
+ *
+ * It supports multiple logger:level comma separated key value pairs
+ * Use the _root keyword to set the root logger level
+ * e.g. @TestLogging("_root:DEBUG,org.elasticsearch.cluster.metadata:TRACE")
+ * or just @TestLogging("_root:DEBUG,cluster.metadata:TRACE") since we start the test with -Des.logger.prefix=
+ */
+@Retention(RetentionPolicy.RUNTIME)
+@Target({PACKAGE, TYPE, METHOD})
+public @interface TestLogging {
+ String value();
+}
diff --git a/test/framework/src/main/java/org/elasticsearch/test/junit/listeners/LoggingListener.java b/test/framework/src/main/java/org/elasticsearch/test/junit/listeners/LoggingListener.java
new file mode 100644
index 0000000000..8237095b49
--- /dev/null
+++ b/test/framework/src/main/java/org/elasticsearch/test/junit/listeners/LoggingListener.java
@@ -0,0 +1,118 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.elasticsearch.test.junit.listeners;
+
+import org.elasticsearch.common.logging.ESLogger;
+import org.elasticsearch.common.logging.ESLoggerFactory;
+import org.elasticsearch.common.logging.Loggers;
+import org.elasticsearch.test.junit.annotations.TestLogging;
+import org.junit.runner.Description;
+import org.junit.runner.Result;
+import org.junit.runner.notification.RunListener;
+
+import java.util.HashMap;
+import java.util.Map;
+
+/**
+ * A {@link RunListener} that allows to change the log level for a specific test method.
+ * When a test method is annotated with the {@link org.elasticsearch.test.junit.annotations.TestLogging} annotation, the level for the specified loggers
+ * will be internally saved before the test method execution and overridden with the specified ones.
+ * At the end of the test method execution the original loggers levels will be restored.
+ *
+ * Note: This class is not thread-safe. Given the static nature of the logging api, it assumes that tests
+ * are never run concurrently in the same jvm. For the very same reason no synchronization has been implemented
+ * regarding the save/restore process of the original loggers levels.
+ */
+public class LoggingListener extends RunListener {
+
+ private Map<String, String> previousLoggingMap;
+ private Map<String, String> previousClassLoggingMap;
+ private Map<String, String> previousPackageLoggingMap;
+
+ @Override
+ public void testRunStarted(Description description) throws Exception {
+ previousPackageLoggingMap = processTestLogging(description.getTestClass().getPackage().getAnnotation(TestLogging.class));
+ previousClassLoggingMap = processTestLogging(description.getAnnotation(TestLogging.class));
+ }
+
+ @Override
+ public void testRunFinished(Result result) throws Exception {
+ previousClassLoggingMap = reset(previousClassLoggingMap);
+ previousPackageLoggingMap = reset(previousPackageLoggingMap);
+ }
+
+ @Override
+ public void testStarted(Description description) throws Exception {
+ final TestLogging testLogging = description.getAnnotation(TestLogging.class);
+ previousLoggingMap = processTestLogging(testLogging);
+ }
+
+ @Override
+ public void testFinished(Description description) throws Exception {
+ previousLoggingMap = reset(previousLoggingMap);
+ }
+
+ private static ESLogger resolveLogger(String loggerName) {
+ if (loggerName.equalsIgnoreCase("_root")) {
+ return ESLoggerFactory.getRootLogger();
+ }
+ return Loggers.getLogger(loggerName);
+ }
+
+ private Map<String, String> processTestLogging(TestLogging testLogging) {
+ Map<String, String> map = getLoggersAndLevelsFromAnnotation(testLogging);
+ if (map == null) {
+ return null;
+ }
+ Map<String, String> previousValues = new HashMap<>();
+ for (Map.Entry<String, String> entry : map.entrySet()) {
+ ESLogger esLogger = resolveLogger(entry.getKey());
+ previousValues.put(entry.getKey(), esLogger.getLevel());
+ esLogger.setLevel(entry.getValue());
+ }
+ return previousValues;
+ }
+
+ public static Map<String, String> getLoggersAndLevelsFromAnnotation(TestLogging testLogging) {
+ if (testLogging == null) {
+ return null;
+ }
+ Map<String, String> map = new HashMap<>();
+ final String[] loggersAndLevels = testLogging.value().split(",");
+ for (String loggerAndLevel : loggersAndLevels) {
+ String[] loggerAndLevelArray = loggerAndLevel.split(":");
+ if (loggerAndLevelArray.length >=2) {
+ String loggerName = loggerAndLevelArray[0];
+ String level = loggerAndLevelArray[1];
+ map.put(loggerName, level);
+ }
+ }
+ return map;
+ }
+
+ private Map<String, String> reset(Map<String, String> map) {
+ if (map != null) {
+ for (Map.Entry<String, String> previousLogger : map.entrySet()) {
+ ESLogger esLogger = resolveLogger(previousLogger.getKey());
+ esLogger.setLevel(previousLogger.getValue());
+ }
+ }
+ return null;
+ }
+}
diff --git a/test/framework/src/main/java/org/elasticsearch/test/junit/listeners/ReproduceInfoPrinter.java b/test/framework/src/main/java/org/elasticsearch/test/junit/listeners/ReproduceInfoPrinter.java
new file mode 100644
index 0000000000..47a77dfc9d
--- /dev/null
+++ b/test/framework/src/main/java/org/elasticsearch/test/junit/listeners/ReproduceInfoPrinter.java
@@ -0,0 +1,172 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.elasticsearch.test.junit.listeners;
+
+import com.carrotsearch.randomizedtesting.RandomizedContext;
+import com.carrotsearch.randomizedtesting.ReproduceErrorMessageBuilder;
+import com.carrotsearch.randomizedtesting.TraceFormatting;
+
+import org.elasticsearch.common.Strings;
+import org.elasticsearch.common.logging.ESLogger;
+import org.elasticsearch.common.logging.Loggers;
+import org.elasticsearch.test.ESIntegTestCase;
+import org.elasticsearch.test.ESTestCase;
+import org.junit.internal.AssumptionViolatedException;
+import org.junit.runner.Description;
+import org.junit.runner.notification.Failure;
+import org.junit.runner.notification.RunListener;
+
+import java.util.Locale;
+import java.util.TimeZone;
+
+import static com.carrotsearch.randomizedtesting.SysGlobals.SYSPROP_ITERATIONS;
+import static com.carrotsearch.randomizedtesting.SysGlobals.SYSPROP_PREFIX;
+import static com.carrotsearch.randomizedtesting.SysGlobals.SYSPROP_TESTMETHOD;
+import static org.elasticsearch.test.ESIntegTestCase.TESTS_CLUSTER;
+import static org.elasticsearch.test.rest.ESRestTestCase.REST_TESTS_BLACKLIST;
+import static org.elasticsearch.test.rest.ESRestTestCase.REST_TESTS_SPEC;
+import static org.elasticsearch.test.rest.ESRestTestCase.REST_TESTS_SUITE;
+import static org.elasticsearch.test.rest.ESRestTestCase.Rest;
+
+/**
+ * A {@link RunListener} that emits to {@link System#err} a string with command
+ * line parameters allowing quick test re-run under MVN command line.
+ */
+public class ReproduceInfoPrinter extends RunListener {
+
+ protected final ESLogger logger = Loggers.getLogger(ESTestCase.class);
+
+ @Override
+ public void testStarted(Description description) throws Exception {
+ logger.trace("Test {} started", description.getDisplayName());
+ }
+
+ @Override
+ public void testFinished(Description description) throws Exception {
+ logger.trace("Test {} finished", description.getDisplayName());
+ }
+
+ /**
+ * true if we are running maven integration tests (mvn verify)
+ */
+ static boolean inVerifyPhase() {
+ return Boolean.parseBoolean(System.getProperty("tests.verify.phase"));
+ }
+
+ @Override
+ public void testFailure(Failure failure) throws Exception {
+ // Ignore assumptions.
+ if (failure.getException() instanceof AssumptionViolatedException) {
+ return;
+ }
+
+ final StringBuilder b = new StringBuilder("REPRODUCE WITH: gradle ");
+ String task = System.getProperty("tests.task");
+ // TODO: enforce (intellij still runs the runner?) or use default "test" but that wont' work for integ
+ b.append(task);
+
+ GradleMessageBuilder gradleMessageBuilder = new GradleMessageBuilder(b);
+ gradleMessageBuilder.appendAllOpts(failure.getDescription());
+
+ //Rest tests are a special case as they allow for additional parameters
+ if (failure.getDescription().getTestClass().isAnnotationPresent(Rest.class)) {
+ gradleMessageBuilder.appendRestTestsProperties();
+ }
+
+ System.err.println(b.toString());
+ }
+
+ protected static class GradleMessageBuilder extends ReproduceErrorMessageBuilder {
+
+ public GradleMessageBuilder(StringBuilder b) {
+ super(b);
+ }
+
+ @Override
+ public ReproduceErrorMessageBuilder appendAllOpts(Description description) {
+ super.appendAllOpts(description);
+
+ if (description.getMethodName() != null) {
+ //prints out the raw method description instead of methodName(description) which filters out the parameters
+ super.appendOpt(SYSPROP_TESTMETHOD(), "\"" + description.getMethodName() + "\"");
+ }
+
+ return appendESProperties();
+ }
+
+ @Override
+ public ReproduceErrorMessageBuilder appendEnvironmentSettings() {
+ // we handle our own environment settings
+ return this;
+ }
+
+ /**
+ * Append a single VM option.
+ */
+ @Override
+ public ReproduceErrorMessageBuilder appendOpt(String sysPropName, String value) {
+ if (sysPropName.equals(SYSPROP_ITERATIONS())) { // we don't want the iters to be in there!
+ return this;
+ }
+ if (sysPropName.equals(SYSPROP_TESTMETHOD())) {
+ //don't print out the test method, we print it ourselves in appendAllOpts
+ //without filtering out the parameters (needed for REST tests)
+ return this;
+ }
+ if (sysPropName.equals(SYSPROP_PREFIX())) {
+ // we always use the default prefix
+ return this;
+ }
+ if (Strings.hasLength(value)) {
+ return super.appendOpt(sysPropName, value);
+ }
+ return this;
+ }
+
+ public ReproduceErrorMessageBuilder appendESProperties() {
+ appendProperties("es.logger.level");
+ if (inVerifyPhase()) {
+ // these properties only make sense for integration tests
+ appendProperties("es.node.mode", "es.node.local", TESTS_CLUSTER, ESIntegTestCase.TESTS_ENABLE_MOCK_MODULES);
+ }
+ appendProperties("tests.assertion.disabled", "tests.security.manager", "tests.nightly", "tests.jvms",
+ "tests.client.ratio", "tests.heap.size", "tests.bwc", "tests.bwc.version");
+ if (System.getProperty("tests.jvm.argline") != null && !System.getProperty("tests.jvm.argline").isEmpty()) {
+ appendOpt("tests.jvm.argline", "\"" + System.getProperty("tests.jvm.argline") + "\"");
+ }
+ appendOpt("tests.locale", Locale.getDefault().toString());
+ appendOpt("tests.timezone", TimeZone.getDefault().getID());
+ return this;
+ }
+
+ public ReproduceErrorMessageBuilder appendRestTestsProperties() {
+ return appendProperties(REST_TESTS_SUITE, REST_TESTS_SPEC, REST_TESTS_BLACKLIST);
+ }
+
+ protected ReproduceErrorMessageBuilder appendProperties(String... properties) {
+ for (String sysPropName : properties) {
+ if (Strings.hasLength(System.getProperty(sysPropName))) {
+ appendOpt(sysPropName, System.getProperty(sysPropName));
+ }
+ }
+ return this;
+ }
+
+ }
+}
diff --git a/test/framework/src/main/java/org/elasticsearch/test/junit/rule/RepeatOnExceptionRule.java b/test/framework/src/main/java/org/elasticsearch/test/junit/rule/RepeatOnExceptionRule.java
new file mode 100644
index 0000000000..7ded36f380
--- /dev/null
+++ b/test/framework/src/main/java/org/elasticsearch/test/junit/rule/RepeatOnExceptionRule.java
@@ -0,0 +1,80 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.elasticsearch.test.junit.rule;
+
+import org.elasticsearch.common.logging.ESLogger;
+import org.junit.rules.TestRule;
+import org.junit.runner.Description;
+import org.junit.runners.model.Statement;
+
+/**
+ * A helper rule to catch all BindTransportExceptions
+ * and rerun the test for a configured number of times
+ *
+ * Note: Be aware, that when a test is repeated, the @After and @Before
+ * annotated methods are not run a second time
+ *
+ */
+public class RepeatOnExceptionRule implements TestRule {
+
+ private ESLogger logger;
+ private int retryCount;
+ private Class expectedException;
+
+ /**
+ *
+ * @param logger the es logger from the test class
+ * @param retryCount number of amounts to try a single test before failing
+ * @param expectedException The exception class you want to catch
+ *
+ */
+ public RepeatOnExceptionRule(ESLogger logger, int retryCount, Class expectedException) {
+ this.logger = logger;
+ this.retryCount = retryCount;
+ this.expectedException = expectedException;
+ }
+
+ @Override
+ public Statement apply(final Statement base, Description description) {
+
+ return new Statement() {
+ @Override
+ public void evaluate() throws Throwable {
+ Throwable caughtThrowable = null;
+
+ for (int i = 0; i < retryCount; i++) {
+ try {
+ base.evaluate();
+ return;
+ } catch (Throwable t) {
+ if (t.getClass().equals(expectedException)) {
+ caughtThrowable = t;
+ logger.info("Exception [{}] occurred, rerunning the test after [{}] failures", t, t.getClass().getSimpleName(), i+1);
+ } else {
+ throw t;
+ }
+ }
+ }
+ logger.error("Giving up after [{}] failures... marking test as failed", retryCount);
+ throw caughtThrowable;
+ }
+ };
+
+ }
+}
diff --git a/test/framework/src/main/java/org/elasticsearch/test/rest/BlacklistedPathPatternMatcher.java b/test/framework/src/main/java/org/elasticsearch/test/rest/BlacklistedPathPatternMatcher.java
new file mode 100644
index 0000000000..e5bb75955c
--- /dev/null
+++ b/test/framework/src/main/java/org/elasticsearch/test/rest/BlacklistedPathPatternMatcher.java
@@ -0,0 +1,68 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.elasticsearch.test.rest;
+
+import java.util.regex.Pattern;
+
+/**
+ * Matches blacklist patterns.
+ *
+ * Currently the following syntax is supported:
+ *
+ * <ul>
+ * <li>Exact matches, as in <code>cat.aliases/10_basic/Empty cluster</code></li>
+ * <li>Wildcard matches within the same segment of a path , as in <code>indices.get/10_basic/*allow_no_indices*</code>. This will
+ * match <code>indices.get/10_basic/allow_no_indices</code>, <code>indices.get/10_basic/allow_no_indices_at_all</code> but not
+ * <code>indices.get/10_basic/advanced/allow_no_indices</code> (contains an additional segment)</li>
+ * </ul>
+ *
+ * Each blacklist pattern is a suffix match on the path. Empty patterns are not allowed.
+ */
+final class BlacklistedPathPatternMatcher {
+ private final Pattern pattern;
+
+ /**
+ * Constructs a new <code>BlacklistedPathPatternMatcher</code> instance from the provided suffix pattern.
+ *
+ * @param p The suffix pattern. Must be a non-empty string.
+ */
+ BlacklistedPathPatternMatcher(String p) {
+ // guard against accidentally matching everything as an empty string lead to the pattern ".*" which matches everything
+ if (p == null || p.trim().isEmpty()) {
+ throw new IllegalArgumentException("Empty blacklist patterns are not supported");
+ }
+ // very simple transformation from wildcard to a proper regex
+ String finalPattern = p
+ .replaceAll("\\*", "[^/]*") // support wildcard matches (within a single path segment)
+ .replaceAll("\\\\,", ","); // restore previously escaped ',' in paths.
+
+ // suffix match
+ pattern = Pattern.compile(".*" + finalPattern);
+ }
+
+ /**
+ * Checks whether the provided path matches the suffix pattern, i.e. "/foo/bar" will match the pattern "bar".
+ *
+ * @param path The path to match. Must not be null.
+ * @return true iff this path is a suffix match.
+ */
+ public boolean isSuffixMatch(String path) {
+ return pattern.matcher(path).matches();
+ }
+}
diff --git a/test/framework/src/main/java/org/elasticsearch/test/rest/ESRestTestCase.java b/test/framework/src/main/java/org/elasticsearch/test/rest/ESRestTestCase.java
new file mode 100644
index 0000000000..266f8e8038
--- /dev/null
+++ b/test/framework/src/main/java/org/elasticsearch/test/rest/ESRestTestCase.java
@@ -0,0 +1,374 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.test.rest;
+
+import com.carrotsearch.randomizedtesting.RandomizedTest;
+import com.carrotsearch.randomizedtesting.annotations.TestGroup;
+import com.carrotsearch.randomizedtesting.annotations.TimeoutSuite;
+
+import org.apache.lucene.util.IOUtils;
+import org.apache.lucene.util.LuceneTestCase.SuppressCodecs;
+import org.apache.lucene.util.LuceneTestCase.SuppressFsync;
+import org.apache.lucene.util.TimeUnits;
+import org.elasticsearch.common.Strings;
+import org.elasticsearch.common.SuppressForbidden;
+import org.elasticsearch.common.settings.Settings;
+import org.elasticsearch.common.xcontent.XContentHelper;
+import org.elasticsearch.node.Node;
+import org.elasticsearch.repositories.uri.URLRepository;
+import org.elasticsearch.test.ESIntegTestCase;
+import org.elasticsearch.test.ESIntegTestCase.ClusterScope;
+import org.elasticsearch.test.rest.client.RestException;
+import org.elasticsearch.test.rest.parser.RestTestParseException;
+import org.elasticsearch.test.rest.parser.RestTestSuiteParser;
+import org.elasticsearch.test.rest.section.DoSection;
+import org.elasticsearch.test.rest.section.ExecutableSection;
+import org.elasticsearch.test.rest.section.RestTestSuite;
+import org.elasticsearch.test.rest.section.SkipSection;
+import org.elasticsearch.test.rest.section.TestSection;
+import org.elasticsearch.test.rest.spec.RestApi;
+import org.elasticsearch.test.rest.spec.RestSpec;
+import org.elasticsearch.test.rest.support.FileUtils;
+import org.junit.AfterClass;
+import org.junit.Before;
+import org.junit.BeforeClass;
+
+import java.io.IOException;
+import java.io.InputStream;
+import java.lang.annotation.ElementType;
+import java.lang.annotation.Inherited;
+import java.lang.annotation.Retention;
+import java.lang.annotation.RetentionPolicy;
+import java.lang.annotation.Target;
+import java.net.URI;
+import java.net.URISyntaxException;
+import java.net.URL;
+import java.nio.file.FileSystem;
+import java.nio.file.FileSystems;
+import java.nio.file.Files;
+import java.nio.file.Path;
+import java.nio.file.StandardCopyOption;
+import java.util.ArrayList;
+import java.util.Collections;
+import java.util.Comparator;
+import java.util.List;
+import java.util.Map;
+import java.util.Set;
+
+/**
+ * Runs the clients test suite against an elasticsearch cluster.
+ */
+@ESRestTestCase.Rest
+@SuppressFsync // we aren't trying to test this here, and it can make the test slow
+@SuppressCodecs("*") // requires custom completion postings format
+@ClusterScope(randomDynamicTemplates = false)
+@TimeoutSuite(millis = 40 * TimeUnits.MINUTE) // timeout the suite after 40min and fail the test.
+public abstract class ESRestTestCase extends ESIntegTestCase {
+
+ /**
+ * Property that allows to control whether the REST tests are run (default) or not
+ */
+ public static final String TESTS_REST = "tests.rest";
+
+ /**
+ * Annotation for REST tests
+ */
+ @Inherited
+ @Retention(RetentionPolicy.RUNTIME)
+ @Target(ElementType.TYPE)
+ @TestGroup(enabled = true, sysProperty = ESRestTestCase.TESTS_REST)
+ public @interface Rest {
+ }
+
+ /**
+ * Property that allows to control which REST tests get run. Supports comma separated list of tests
+ * or directories that contain tests e.g. -Dtests.rest.suite=index,get,create/10_with_id
+ */
+ public static final String REST_TESTS_SUITE = "tests.rest.suite";
+ /**
+ * Property that allows to blacklist some of the REST tests based on a comma separated list of globs
+ * e.g. -Dtests.rest.blacklist=get/10_basic/*
+ */
+ public static final String REST_TESTS_BLACKLIST = "tests.rest.blacklist";
+ /**
+ * Property that allows to control whether spec validation is enabled or not (default true).
+ */
+ public static final String REST_TESTS_VALIDATE_SPEC = "tests.rest.validate_spec";
+ /**
+ * Property that allows to control where the REST spec files need to be loaded from
+ */
+ public static final String REST_TESTS_SPEC = "tests.rest.spec";
+
+ public static final String REST_LOAD_PACKAGED_TESTS = "tests.rest.load_packaged";
+
+ private static final String DEFAULT_TESTS_PATH = "/rest-api-spec/test";
+ private static final String DEFAULT_SPEC_PATH = "/rest-api-spec/api";
+
+ /**
+ * This separator pattern matches ',' except it is preceded by a '\'. This allows us to support ',' within paths when it is escaped with
+ * a slash.
+ *
+ * For example, the path string "/a/b/c\,d/e/f,/foo/bar,/baz" is separated to "/a/b/c\,d/e/f", "/foo/bar" and "/baz".
+ *
+ * For reference, this regular expression feature is known as zero-width negative look-behind.
+ *
+ */
+ private static final String PATHS_SEPARATOR = "(?<!\\\\),";
+
+ private final List<BlacklistedPathPatternMatcher> blacklistPathMatchers = new ArrayList<>();
+ private static RestTestExecutionContext restTestExecutionContext;
+
+ private final RestTestCandidate testCandidate;
+
+ public ESRestTestCase(RestTestCandidate testCandidate) {
+ this.testCandidate = testCandidate;
+ String[] blacklist = resolvePathsProperty(REST_TESTS_BLACKLIST, null);
+ for (String entry : blacklist) {
+ this.blacklistPathMatchers.add(new BlacklistedPathPatternMatcher(entry));
+ }
+ }
+
+ @Override
+ protected void afterIfFailed(List<Throwable> errors) {
+ logger.info("Stash dump on failure [{}]", XContentHelper.toString(restTestExecutionContext.stash()));
+ super.afterIfFailed(errors);
+ }
+
+ @Override
+ protected Settings nodeSettings(int nodeOrdinal) {
+ return Settings.builder()
+ .putArray(URLRepository.ALLOWED_URLS_SETTING, "http://snapshot.test*")
+ .put(Node.HTTP_ENABLED, true)
+ .put("node.testattr", "test")
+ .put(super.nodeSettings(nodeOrdinal)).build();
+ }
+
+ public static Iterable<Object[]> createParameters(int id, int count) throws IOException, RestTestParseException {
+ TestGroup testGroup = Rest.class.getAnnotation(TestGroup.class);
+ String sysProperty = TestGroup.Utilities.getSysProperty(Rest.class);
+ boolean enabled;
+ try {
+ enabled = RandomizedTest.systemPropertyAsBoolean(sysProperty, testGroup.enabled());
+ } catch (IllegalArgumentException e) {
+ // Ignore malformed system property, disable the group if malformed though.
+ enabled = false;
+ }
+ if (!enabled) {
+ return new ArrayList<>();
+ }
+ //parse tests only if rest test group is enabled, otherwise rest tests might not even be available on file system
+ List<RestTestCandidate> restTestCandidates = collectTestCandidates(id, count);
+ List<Object[]> objects = new ArrayList<>();
+ for (RestTestCandidate restTestCandidate : restTestCandidates) {
+ objects.add(new Object[]{restTestCandidate});
+ }
+ return objects;
+ }
+
+ private static List<RestTestCandidate> collectTestCandidates(int id, int count) throws RestTestParseException, IOException {
+ List<RestTestCandidate> testCandidates = new ArrayList<>();
+ FileSystem fileSystem = getFileSystem();
+ // don't make a try-with, getFileSystem returns null
+ // ... and you can't close() the default filesystem
+ try {
+ String[] paths = resolvePathsProperty(REST_TESTS_SUITE, DEFAULT_TESTS_PATH);
+ Map<String, Set<Path>> yamlSuites = FileUtils.findYamlSuites(fileSystem, DEFAULT_TESTS_PATH, paths);
+ RestTestSuiteParser restTestSuiteParser = new RestTestSuiteParser();
+ //yaml suites are grouped by directory (effectively by api)
+ for (String api : yamlSuites.keySet()) {
+ List<Path> yamlFiles = new ArrayList<>(yamlSuites.get(api));
+ for (Path yamlFile : yamlFiles) {
+ String key = api + yamlFile.getFileName().toString();
+ if (mustExecute(key, id, count)) {
+ RestTestSuite restTestSuite = restTestSuiteParser.parse(api, yamlFile);
+ for (TestSection testSection : restTestSuite.getTestSections()) {
+ testCandidates.add(new RestTestCandidate(restTestSuite, testSection));
+ }
+ }
+ }
+ }
+ } finally {
+ IOUtils.close(fileSystem);
+ }
+
+ //sort the candidates so they will always be in the same order before being shuffled, for repeatability
+ Collections.sort(testCandidates, new Comparator<RestTestCandidate>() {
+ @Override
+ public int compare(RestTestCandidate o1, RestTestCandidate o2) {
+ return o1.getTestPath().compareTo(o2.getTestPath());
+ }
+ });
+
+ return testCandidates;
+ }
+
+ private static boolean mustExecute(String test, int id, int count) {
+ int hash = (int) (Math.abs((long)test.hashCode()) % count);
+ return hash == id;
+ }
+
+ private static String[] resolvePathsProperty(String propertyName, String defaultValue) {
+ String property = System.getProperty(propertyName);
+ if (!Strings.hasLength(property)) {
+ return defaultValue == null ? Strings.EMPTY_ARRAY : new String[]{defaultValue};
+ } else {
+ return property.split(PATHS_SEPARATOR);
+ }
+ }
+
+ /**
+ * Returns a new FileSystem to read REST resources, or null if they
+ * are available from classpath.
+ */
+ @SuppressForbidden(reason = "proper use of URL, hack around a JDK bug")
+ static FileSystem getFileSystem() throws IOException {
+ // REST suite handling is currently complicated, with lots of filtering and so on
+ // For now, to work embedded in a jar, return a ZipFileSystem over the jar contents.
+ URL codeLocation = FileUtils.class.getProtectionDomain().getCodeSource().getLocation();
+ boolean loadPackaged = RandomizedTest.systemPropertyAsBoolean(REST_LOAD_PACKAGED_TESTS, true);
+ if (codeLocation.getFile().endsWith(".jar") && loadPackaged) {
+ try {
+ // hack around a bug in the zipfilesystem implementation before java 9,
+ // its checkWritable was incorrect and it won't work without write permissions.
+ // if we add the permission, it will open jars r/w, which is too scary! so copy to a safe r-w location.
+ Path tmp = Files.createTempFile(null, ".jar");
+ try (InputStream in = codeLocation.openStream()) {
+ Files.copy(in, tmp, StandardCopyOption.REPLACE_EXISTING);
+ }
+ return FileSystems.newFileSystem(new URI("jar:" + tmp.toUri()), Collections.<String,Object>emptyMap());
+ } catch (URISyntaxException e) {
+ throw new IOException("couldn't open zipfilesystem: ", e);
+ }
+ } else {
+ return null;
+ }
+ }
+
+ @BeforeClass
+ public static void initExecutionContext() throws IOException, RestException {
+ String[] specPaths = resolvePathsProperty(REST_TESTS_SPEC, DEFAULT_SPEC_PATH);
+ RestSpec restSpec = null;
+ FileSystem fileSystem = getFileSystem();
+ // don't make a try-with, getFileSystem returns null
+ // ... and you can't close() the default filesystem
+ try {
+ restSpec = RestSpec.parseFrom(fileSystem, DEFAULT_SPEC_PATH, specPaths);
+ } finally {
+ IOUtils.close(fileSystem);
+ }
+ validateSpec(restSpec);
+ restTestExecutionContext = new RestTestExecutionContext(restSpec);
+ }
+
+ private static void validateSpec(RestSpec restSpec) {
+ boolean validateSpec = RandomizedTest.systemPropertyAsBoolean(REST_TESTS_VALIDATE_SPEC, true);
+ if (validateSpec) {
+ StringBuilder errorMessage = new StringBuilder();
+ for (RestApi restApi : restSpec.getApis()) {
+ if (restApi.getMethods().contains("GET") && restApi.isBodySupported()) {
+ if (!restApi.getMethods().contains("POST")) {
+ errorMessage.append("\n- ").append(restApi.getName()).append(" supports GET with a body but doesn't support POST");
+ }
+ }
+ }
+ if (errorMessage.length() > 0) {
+ throw new IllegalArgumentException(errorMessage.toString());
+ }
+ }
+ }
+
+ @AfterClass
+ public static void close() {
+ if (restTestExecutionContext != null) {
+ restTestExecutionContext.close();
+ restTestExecutionContext = null;
+ }
+ }
+
+ @Override
+ protected int maximumNumberOfShards() {
+ return 3; // never go crazy in the REST tests
+ }
+
+ @Override
+ protected int maximumNumberOfReplicas() {
+ // hardcoded 1 since this is what clients also do and our tests must expect that we have only node
+ // with replicas set to 1 ie. the cluster won't be green
+ return 1;
+
+ }
+
+ /**
+ * Used to obtain settings for the REST client that is used to send REST requests.
+ */
+ protected Settings restClientSettings() {
+ return Settings.EMPTY;
+ }
+
+ @Before
+ public void reset() throws IOException, RestException {
+ //skip test if it matches one of the blacklist globs
+ for (BlacklistedPathPatternMatcher blacklistedPathMatcher : blacklistPathMatchers) {
+ String testPath = testCandidate.getSuitePath() + "/" + testCandidate.getTestSection().getName();
+ assumeFalse("[" + testCandidate.getTestPath() + "] skipped, reason: blacklisted", blacklistedPathMatcher.isSuffixMatch(testPath));
+ }
+ //The client needs non static info to get initialized, therefore it can't be initialized in the before class
+ restTestExecutionContext.initClient(cluster().httpAddresses(), restClientSettings());
+ restTestExecutionContext.clear();
+
+ //skip test if the whole suite (yaml file) is disabled
+ assumeFalse(buildSkipMessage(testCandidate.getSuitePath(), testCandidate.getSetupSection().getSkipSection()),
+ testCandidate.getSetupSection().getSkipSection().skip(restTestExecutionContext.esVersion()));
+ //skip test if test section is disabled
+ assumeFalse(buildSkipMessage(testCandidate.getTestPath(), testCandidate.getTestSection().getSkipSection()),
+ testCandidate.getTestSection().getSkipSection().skip(restTestExecutionContext.esVersion()));
+ }
+
+ private static String buildSkipMessage(String description, SkipSection skipSection) {
+ StringBuilder messageBuilder = new StringBuilder();
+ if (skipSection.isVersionCheck()) {
+ messageBuilder.append("[").append(description).append("] skipped, reason: [").append(skipSection.getReason()).append("] ");
+ } else {
+ messageBuilder.append("[").append(description).append("] skipped, reason: features ").append(skipSection.getFeatures()).append(" not supported");
+ }
+ return messageBuilder.toString();
+ }
+
+ public void test() throws IOException {
+ //let's check that there is something to run, otherwise there might be a problem with the test section
+ if (testCandidate.getTestSection().getExecutableSections().size() == 0) {
+ throw new IllegalArgumentException("No executable sections loaded for [" + testCandidate.getTestPath() + "]");
+ }
+
+ if (!testCandidate.getSetupSection().isEmpty()) {
+ logger.info("start setup test [{}]", testCandidate.getTestPath());
+ for (DoSection doSection : testCandidate.getSetupSection().getDoSections()) {
+ doSection.execute(restTestExecutionContext);
+ }
+ logger.info("end setup test [{}]", testCandidate.getTestPath());
+ }
+
+ restTestExecutionContext.clear();
+
+ for (ExecutableSection executableSection : testCandidate.getTestSection().getExecutableSections()) {
+ executableSection.execute(restTestExecutionContext);
+ }
+ }
+}
diff --git a/test/framework/src/main/java/org/elasticsearch/test/rest/FakeRestRequest.java b/test/framework/src/main/java/org/elasticsearch/test/rest/FakeRestRequest.java
new file mode 100644
index 0000000000..a24869b40b
--- /dev/null
+++ b/test/framework/src/main/java/org/elasticsearch/test/rest/FakeRestRequest.java
@@ -0,0 +1,104 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.test.rest;
+
+import org.elasticsearch.common.bytes.BytesReference;
+import org.elasticsearch.rest.RestRequest;
+
+import java.util.HashMap;
+import java.util.Map;
+
+public class FakeRestRequest extends RestRequest {
+
+ private final Map<String, String> headers;
+
+ private final Map<String, String> params;
+
+ public FakeRestRequest() {
+ this(new HashMap<String, String>(), new HashMap<String, String>());
+ }
+
+ public FakeRestRequest(Map<String, String> headers, Map<String, String> context) {
+ this.headers = headers;
+ for (Map.Entry<String, String> entry : context.entrySet()) {
+ putInContext(entry.getKey(), entry.getValue());
+ }
+ this.params = new HashMap<>();
+ }
+
+ @Override
+ public Method method() {
+ return Method.GET;
+ }
+
+ @Override
+ public String uri() {
+ return "/";
+ }
+
+ @Override
+ public String rawPath() {
+ return "/";
+ }
+
+ @Override
+ public boolean hasContent() {
+ return false;
+ }
+
+ @Override
+ public BytesReference content() {
+ return null;
+ }
+
+ @Override
+ public String header(String name) {
+ return headers.get(name);
+ }
+
+ @Override
+ public Iterable<Map.Entry<String, String>> headers() {
+ return headers.entrySet();
+ }
+
+ @Override
+ public boolean hasParam(String key) {
+ return params.containsKey(key);
+ }
+
+ @Override
+ public String param(String key) {
+ return params.get(key);
+ }
+
+ @Override
+ public String param(String key, String defaultValue) {
+ String value = params.get(key);
+ if (value == null) {
+ return defaultValue;
+ }
+ return value;
+ }
+
+ @Override
+ public Map<String, String> params() {
+ return params;
+ }
+} \ No newline at end of file
diff --git a/test/framework/src/main/java/org/elasticsearch/test/rest/RestTestCandidate.java b/test/framework/src/main/java/org/elasticsearch/test/rest/RestTestCandidate.java
new file mode 100644
index 0000000000..e454c396a3
--- /dev/null
+++ b/test/framework/src/main/java/org/elasticsearch/test/rest/RestTestCandidate.java
@@ -0,0 +1,67 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.elasticsearch.test.rest;
+
+import org.elasticsearch.test.rest.section.RestTestSuite;
+import org.elasticsearch.test.rest.section.SetupSection;
+import org.elasticsearch.test.rest.section.TestSection;
+
+/**
+ * Wraps {@link org.elasticsearch.test.rest.section.TestSection}s ready to be run.
+ * Each test section is associated to its {@link org.elasticsearch.test.rest.section.RestTestSuite}.
+ */
+public class RestTestCandidate {
+
+ private final RestTestSuite restTestSuite;
+ private final TestSection testSection;
+
+ public RestTestCandidate(RestTestSuite restTestSuite, TestSection testSection) {
+ this.restTestSuite = restTestSuite;
+ this.testSection = testSection;
+ }
+
+ public String getApi() {
+ return restTestSuite.getApi();
+ }
+
+ public String getName() {
+ return restTestSuite.getName();
+ }
+
+ public String getSuitePath() {
+ return restTestSuite.getPath();
+ }
+
+ public String getTestPath() {
+ return restTestSuite.getPath() + "/" + testSection.getName();
+ }
+
+ public SetupSection getSetupSection() {
+ return restTestSuite.getSetupSection();
+ }
+
+ public TestSection getTestSection() {
+ return testSection;
+ }
+
+ @Override
+ public String toString() {
+ return getTestPath();
+ }
+}
diff --git a/test/framework/src/main/java/org/elasticsearch/test/rest/RestTestExecutionContext.java b/test/framework/src/main/java/org/elasticsearch/test/rest/RestTestExecutionContext.java
new file mode 100644
index 0000000000..4054b8efce
--- /dev/null
+++ b/test/framework/src/main/java/org/elasticsearch/test/rest/RestTestExecutionContext.java
@@ -0,0 +1,157 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.elasticsearch.test.rest;
+
+import org.elasticsearch.Version;
+import org.elasticsearch.common.logging.ESLogger;
+import org.elasticsearch.common.logging.Loggers;
+import org.elasticsearch.common.settings.Settings;
+import org.elasticsearch.common.xcontent.XContentFactory;
+import org.elasticsearch.test.rest.client.RestClient;
+import org.elasticsearch.test.rest.client.RestException;
+import org.elasticsearch.test.rest.client.RestResponse;
+import org.elasticsearch.test.rest.spec.RestSpec;
+
+import java.io.Closeable;
+import java.io.IOException;
+import java.net.InetSocketAddress;
+import java.util.HashMap;
+import java.util.List;
+import java.util.Map;
+
+/**
+ * Execution context passed across the REST tests.
+ * Holds the REST client used to communicate with elasticsearch.
+ * Caches the last obtained test response and allows to stash part of it within variables
+ * that can be used as input values in following requests.
+ */
+public class RestTestExecutionContext implements Closeable {
+
+ private static final ESLogger logger = Loggers.getLogger(RestTestExecutionContext.class);
+
+ private final Stash stash = new Stash();
+
+ private final RestSpec restSpec;
+
+ private RestClient restClient;
+
+ private RestResponse response;
+
+ public RestTestExecutionContext(RestSpec restSpec) {
+ this.restSpec = restSpec;
+ }
+
+ /**
+ * Calls an elasticsearch api with the parameters and request body provided as arguments.
+ * Saves the obtained response in the execution context.
+ * @throws RestException if the returned status code is non ok
+ */
+ public RestResponse callApi(String apiName, Map<String, String> params, List<Map<String, Object>> bodies,
+ Map<String, String> headers) throws IOException, RestException {
+ //makes a copy of the parameters before modifying them for this specific request
+ HashMap<String, String> requestParams = new HashMap<>(params);
+ for (Map.Entry<String, String> entry : requestParams.entrySet()) {
+ if (stash.isStashedValue(entry.getValue())) {
+ entry.setValue(stash.unstashValue(entry.getValue()).toString());
+ }
+ }
+
+ String body = actualBody(bodies);
+
+ try {
+ response = callApiInternal(apiName, requestParams, body, headers);
+ //we always stash the last response body
+ stash.stashValue("body", response.getBody());
+ return response;
+ } catch(RestException e) {
+ response = e.restResponse();
+ throw e;
+ }
+ }
+
+ private String actualBody(List<Map<String, Object>> bodies) throws IOException {
+ if (bodies.isEmpty()) {
+ return "";
+ }
+
+ if (bodies.size() == 1) {
+ return bodyAsString(stash.unstashMap(bodies.get(0)));
+ }
+
+ StringBuilder bodyBuilder = new StringBuilder();
+ for (Map<String, Object> body : bodies) {
+ bodyBuilder.append(bodyAsString(stash.unstashMap(body))).append("\n");
+ }
+ return bodyBuilder.toString();
+ }
+
+ private String bodyAsString(Map<String, Object> body) throws IOException {
+ return XContentFactory.jsonBuilder().map(body).string();
+ }
+
+ private RestResponse callApiInternal(String apiName, Map<String, String> params, String body, Map<String, String> headers) throws IOException, RestException {
+ return restClient.callApi(apiName, params, body, headers);
+ }
+
+ /**
+ * Extracts a specific value from the last saved response
+ */
+ public Object response(String path) throws IOException {
+ return response.evaluate(path, stash);
+ }
+
+ /**
+ * Creates the embedded REST client when needed. Needs to be called before each test.
+ */
+ public void initClient(InetSocketAddress[] addresses, Settings settings) throws IOException, RestException {
+ if (restClient == null) {
+ restClient = new RestClient(restSpec, settings, addresses);
+ }
+ }
+
+ /**
+ * Clears the last obtained response and the stashed fields
+ */
+ public void clear() {
+ logger.debug("resetting client, response and stash");
+ response = null;
+ stash.clear();
+ }
+
+ public Stash stash() {
+ return stash;
+ }
+
+ /**
+ * Returns the current es version as a string
+ */
+ public Version esVersion() {
+ return restClient.getEsVersion();
+ }
+
+ /**
+ * Closes the execution context and releases the underlying resources
+ */
+ @Override
+ public void close() {
+ if (restClient != null) {
+ restClient.close();
+ }
+ }
+}
diff --git a/test/framework/src/main/java/org/elasticsearch/test/rest/Stash.java b/test/framework/src/main/java/org/elasticsearch/test/rest/Stash.java
new file mode 100644
index 0000000000..855cbb4d97
--- /dev/null
+++ b/test/framework/src/main/java/org/elasticsearch/test/rest/Stash.java
@@ -0,0 +1,128 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.test.rest;
+
+import org.elasticsearch.common.Strings;
+import org.elasticsearch.common.logging.ESLogger;
+import org.elasticsearch.common.logging.Loggers;
+import org.elasticsearch.common.xcontent.ToXContent;
+import org.elasticsearch.common.xcontent.XContentBuilder;
+
+import java.io.IOException;
+import java.util.HashMap;
+import java.util.List;
+import java.util.Map;
+
+/**
+ * Allows to cache the last obtained test response and or part of it within variables
+ * that can be used as input values in following requests and assertions.
+ */
+public class Stash implements ToXContent {
+
+ private static final ESLogger logger = Loggers.getLogger(Stash.class);
+
+ public static final Stash EMPTY = new Stash();
+
+ private final Map<String, Object> stash = new HashMap<>();
+
+ /**
+ * Allows to saved a specific field in the stash as key-value pair
+ */
+ public void stashValue(String key, Object value) {
+ logger.trace("stashing [{}]=[{}]", key, value);
+ Object old = stash.put(key, value);
+ if (old != null && old != value) {
+ logger.trace("replaced stashed value [{}] with same key [{}]", old, key);
+ }
+ }
+
+ /**
+ * Clears the previously stashed values
+ */
+ public void clear() {
+ stash.clear();
+ }
+
+ /**
+ * Tells whether a particular value needs to be looked up in the stash
+ * The stash contains fields eventually extracted from previous responses that can be reused
+ * as arguments for following requests (e.g. scroll_id)
+ */
+ public boolean isStashedValue(Object key) {
+ if (key == null) {
+ return false;
+ }
+ String stashKey = key.toString();
+ return Strings.hasLength(stashKey) && stashKey.startsWith("$");
+ }
+
+ /**
+ * Extracts a value from the current stash
+ * The stash contains fields eventually extracted from previous responses that can be reused
+ * as arguments for following requests (e.g. scroll_id)
+ */
+ public Object unstashValue(String value) {
+ Object stashedValue = stash.get(value.substring(1));
+ if (stashedValue == null) {
+ throw new IllegalArgumentException("stashed value not found for key [" + value + "]");
+ }
+ return stashedValue;
+ }
+
+ /**
+ * Recursively unstashes map values if needed
+ */
+ public Map<String, Object> unstashMap(Map<String, Object> map) {
+ Map<String, Object> copy = new HashMap<>(map);
+ unstashObject(copy);
+ return copy;
+ }
+
+ @SuppressWarnings("unchecked")
+ private void unstashObject(Object obj) {
+ if (obj instanceof List) {
+ List list = (List) obj;
+ for (int i = 0; i < list.size(); i++) {
+ Object o = list.get(i);
+ if (isStashedValue(o)) {
+ list.set(i, unstashValue(o.toString()));
+ } else {
+ unstashObject(o);
+ }
+ }
+ }
+ if (obj instanceof Map) {
+ Map<String, Object> map = (Map) obj;
+ for (Map.Entry<String, Object> entry : map.entrySet()) {
+ if (isStashedValue(entry.getValue())) {
+ entry.setValue(unstashValue(entry.getValue().toString()));
+ } else {
+ unstashObject(entry.getValue());
+ }
+ }
+ }
+ }
+
+ @Override
+ public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException {
+ builder.field("stash", stash);
+ return builder;
+ }
+}
diff --git a/test/framework/src/main/java/org/elasticsearch/test/rest/client/RestClient.java b/test/framework/src/main/java/org/elasticsearch/test/rest/client/RestClient.java
new file mode 100644
index 0000000000..63a8b397c4
--- /dev/null
+++ b/test/framework/src/main/java/org/elasticsearch/test/rest/client/RestClient.java
@@ -0,0 +1,304 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.elasticsearch.test.rest.client;
+
+import com.carrotsearch.randomizedtesting.RandomizedTest;
+import org.apache.http.config.Registry;
+import org.apache.http.config.RegistryBuilder;
+import org.apache.http.conn.socket.ConnectionSocketFactory;
+import org.apache.http.conn.socket.PlainConnectionSocketFactory;
+import org.apache.http.conn.ssl.SSLConnectionSocketFactory;
+import org.apache.http.conn.ssl.SSLContexts;
+import org.apache.http.impl.client.CloseableHttpClient;
+import org.apache.http.impl.client.HttpClients;
+import org.apache.http.impl.conn.PoolingHttpClientConnectionManager;
+import org.apache.lucene.util.IOUtils;
+import org.elasticsearch.Version;
+import org.elasticsearch.client.support.Headers;
+import org.elasticsearch.common.Strings;
+import org.elasticsearch.common.io.PathUtils;
+import org.elasticsearch.common.logging.ESLogger;
+import org.elasticsearch.common.logging.Loggers;
+import org.elasticsearch.common.network.NetworkAddress;
+import org.elasticsearch.common.settings.Settings;
+import org.elasticsearch.common.util.set.Sets;
+import org.elasticsearch.test.rest.client.http.HttpRequestBuilder;
+import org.elasticsearch.test.rest.client.http.HttpResponse;
+import org.elasticsearch.test.rest.spec.RestApi;
+import org.elasticsearch.test.rest.spec.RestSpec;
+
+import javax.net.ssl.SSLContext;
+import java.io.Closeable;
+import java.io.IOException;
+import java.io.InputStream;
+import java.net.InetSocketAddress;
+import java.nio.file.Files;
+import java.nio.file.Path;
+import java.security.KeyManagementException;
+import java.security.KeyStore;
+import java.security.KeyStoreException;
+import java.security.NoSuchAlgorithmException;
+import java.security.cert.CertificateException;
+import java.util.ArrayList;
+import java.util.HashMap;
+import java.util.List;
+import java.util.Map;
+import java.util.Set;
+import java.util.concurrent.TimeUnit;
+
+/**
+ * REST client used to test the elasticsearch REST layer
+ * Holds the {@link RestSpec} used to translate api calls into REST calls
+ */
+public class RestClient implements Closeable {
+
+ public static final String PROTOCOL = "protocol";
+ public static final String TRUSTSTORE_PATH = "truststore.path";
+ public static final String TRUSTSTORE_PASSWORD = "truststore.password";
+
+ private static final ESLogger logger = Loggers.getLogger(RestClient.class);
+ //query_string params that don't need to be declared in the spec, thay are supported by default
+ private static final Set<String> ALWAYS_ACCEPTED_QUERY_STRING_PARAMS = Sets.newHashSet("pretty", "source", "filter_path");
+
+ private final String protocol;
+ private final RestSpec restSpec;
+ private final CloseableHttpClient httpClient;
+ private final Headers headers;
+ private final InetSocketAddress[] addresses;
+ private final Version esVersion;
+
+ public RestClient(RestSpec restSpec, Settings settings, InetSocketAddress[] addresses) throws IOException, RestException {
+ assert addresses.length > 0;
+ this.restSpec = restSpec;
+ this.headers = new Headers(settings);
+ this.protocol = settings.get(PROTOCOL, "http");
+ this.httpClient = createHttpClient(settings);
+ this.addresses = addresses;
+ this.esVersion = readAndCheckVersion();
+ logger.info("REST client initialized {}, elasticsearch version: [{}]", addresses, esVersion);
+ }
+
+ private Version readAndCheckVersion() throws IOException, RestException {
+ //we make a manual call here without using callApi method, mainly because we are initializing
+ //and the randomized context doesn't exist for the current thread (would be used to choose the method otherwise)
+ RestApi restApi = restApi("info");
+ assert restApi.getPaths().size() == 1;
+ assert restApi.getMethods().size() == 1;
+
+ String version = null;
+ for (InetSocketAddress address : addresses) {
+ RestResponse restResponse = new RestResponse(httpRequestBuilder(address)
+ .path(restApi.getPaths().get(0))
+ .method(restApi.getMethods().get(0)).execute());
+ checkStatusCode(restResponse);
+
+ Object latestVersion = restResponse.evaluate("version.number");
+ if (latestVersion == null) {
+ throw new RuntimeException("elasticsearch version not found in the response");
+ }
+ if (version == null) {
+ version = latestVersion.toString();
+ } else {
+ if (!latestVersion.equals(version)) {
+ throw new IllegalArgumentException("provided nodes addresses run different elasticsearch versions");
+ }
+ }
+ }
+ return Version.fromString(version);
+ }
+
+ public Version getEsVersion() {
+ return esVersion;
+ }
+
+ /**
+ * Calls an api with the provided parameters and body
+ * @throws RestException if the obtained status code is non ok, unless the specific error code needs to be ignored
+ * according to the ignore parameter received as input (which won't get sent to elasticsearch)
+ */
+ public RestResponse callApi(String apiName, Map<String, String> params, String body, Map<String, String> headers) throws IOException, RestException {
+
+ List<Integer> ignores = new ArrayList<>();
+ Map<String, String> requestParams = null;
+ if (params != null) {
+ //makes a copy of the parameters before modifying them for this specific request
+ requestParams = new HashMap<>(params);
+ //ignore is a special parameter supported by the clients, shouldn't be sent to es
+ String ignoreString = requestParams.remove("ignore");
+ if (Strings.hasLength(ignoreString)) {
+ try {
+ ignores.add(Integer.valueOf(ignoreString));
+ } catch(NumberFormatException e) {
+ throw new IllegalArgumentException("ignore value should be a number, found [" + ignoreString + "] instead");
+ }
+ }
+ }
+
+ HttpRequestBuilder httpRequestBuilder = callApiBuilder(apiName, requestParams, body);
+ for (Map.Entry<String, String> header : headers.entrySet()) {
+ httpRequestBuilder.addHeader(header.getKey(), header.getValue());
+ }
+ logger.debug("calling api [{}]", apiName);
+ HttpResponse httpResponse = httpRequestBuilder.execute();
+
+ // http HEAD doesn't support response body
+ // For the few api (exists class of api) that use it we need to accept 404 too
+ if (!httpResponse.supportsBody()) {
+ ignores.add(404);
+ }
+
+ RestResponse restResponse = new RestResponse(httpResponse);
+ checkStatusCode(restResponse, ignores);
+ return restResponse;
+ }
+
+ private void checkStatusCode(RestResponse restResponse, List<Integer> ignores) throws RestException {
+ //ignore is a catch within the client, to prevent the client from throwing error if it gets non ok codes back
+ if (ignores.contains(restResponse.getStatusCode())) {
+ if (logger.isDebugEnabled()) {
+ logger.debug("ignored non ok status codes {} as requested", ignores);
+ }
+ return;
+ }
+ checkStatusCode(restResponse);
+ }
+
+ private void checkStatusCode(RestResponse restResponse) throws RestException {
+ if (restResponse.isError()) {
+ throw new RestException("non ok status code [" + restResponse.getStatusCode() + "] returned", restResponse);
+ }
+ }
+
+ private HttpRequestBuilder callApiBuilder(String apiName, Map<String, String> params, String body) {
+
+ //create doesn't exist in the spec but is supported in the clients (index with op_type=create)
+ boolean indexCreateApi = "create".equals(apiName);
+ String api = indexCreateApi ? "index" : apiName;
+ RestApi restApi = restApi(api);
+
+ HttpRequestBuilder httpRequestBuilder = httpRequestBuilder();
+
+ //divide params between ones that go within query string and ones that go within path
+ Map<String, String> pathParts = new HashMap<>();
+ if (params != null) {
+ for (Map.Entry<String, String> entry : params.entrySet()) {
+ if (restApi.getPathParts().contains(entry.getKey())) {
+ pathParts.put(entry.getKey(), entry.getValue());
+ } else {
+ if (restApi.getParams().contains(entry.getKey()) || ALWAYS_ACCEPTED_QUERY_STRING_PARAMS.contains(entry.getKey())) {
+ httpRequestBuilder.addParam(entry.getKey(), entry.getValue());
+ } else {
+ throw new IllegalArgumentException("param [" + entry.getKey() + "] not supported in [" + restApi.getName() + "] api");
+ }
+ }
+ }
+ }
+
+ if (indexCreateApi) {
+ httpRequestBuilder.addParam("op_type", "create");
+ }
+
+ List<String> supportedMethods = restApi.getSupportedMethods(pathParts.keySet());
+ if (Strings.hasLength(body)) {
+ if (!restApi.isBodySupported()) {
+ throw new IllegalArgumentException("body is not supported by [" + restApi.getName() + "] api");
+ }
+ //test the GET with source param instead of GET/POST with body
+ if (supportedMethods.contains("GET") && RandomizedTest.rarely()) {
+ logger.debug("sending the request body as source param with GET method");
+ httpRequestBuilder.addParam("source", body).method("GET");
+ } else {
+ httpRequestBuilder.body(body).method(RandomizedTest.randomFrom(supportedMethods));
+ }
+ } else {
+ if (restApi.isBodyRequired()) {
+ throw new IllegalArgumentException("body is required by [" + restApi.getName() + "] api");
+ }
+ httpRequestBuilder.method(RandomizedTest.randomFrom(supportedMethods));
+ }
+
+ //the rest path to use is randomized out of the matching ones (if more than one)
+ RestPath restPath = RandomizedTest.randomFrom(restApi.getFinalPaths(pathParts));
+ return httpRequestBuilder.pathParts(restPath.getPathParts());
+ }
+
+ private RestApi restApi(String apiName) {
+ RestApi restApi = restSpec.getApi(apiName);
+ if (restApi == null) {
+ throw new IllegalArgumentException("rest api [" + apiName + "] doesn't exist in the rest spec");
+ }
+ return restApi;
+ }
+
+ protected HttpRequestBuilder httpRequestBuilder(InetSocketAddress address) {
+ return new HttpRequestBuilder(httpClient)
+ .addHeaders(headers)
+ .protocol(protocol)
+ .host(NetworkAddress.formatAddress(address.getAddress())).port(address.getPort());
+ }
+
+ protected HttpRequestBuilder httpRequestBuilder() {
+ //the address used is randomized between the available ones
+ InetSocketAddress address = RandomizedTest.randomFrom(addresses);
+ return httpRequestBuilder(address);
+ }
+
+ protected CloseableHttpClient createHttpClient(Settings settings) throws IOException {
+ SSLConnectionSocketFactory sslsf;
+ String keystorePath = settings.get(TRUSTSTORE_PATH);
+ if (keystorePath != null) {
+ final String keystorePass = settings.get(TRUSTSTORE_PASSWORD);
+ if (keystorePass == null) {
+ throw new IllegalStateException(TRUSTSTORE_PATH + " is provided but not " + TRUSTSTORE_PASSWORD);
+ }
+ Path path = PathUtils.get(keystorePath);
+ if (!Files.exists(path)) {
+ throw new IllegalStateException(TRUSTSTORE_PATH + " is set but points to a non-existing file");
+ }
+ try {
+ KeyStore keyStore = KeyStore.getInstance("jks");
+ try (InputStream is = Files.newInputStream(path)) {
+ keyStore.load(is, keystorePass.toCharArray());
+ }
+ SSLContext sslcontext = SSLContexts.custom()
+ .loadTrustMaterial(keyStore, null)
+ .build();
+ sslsf = new SSLConnectionSocketFactory(sslcontext);
+ } catch (KeyStoreException|NoSuchAlgorithmException|KeyManagementException|CertificateException e) {
+ throw new RuntimeException(e);
+ }
+ } else {
+ sslsf = SSLConnectionSocketFactory.getSocketFactory();
+ }
+
+ Registry<ConnectionSocketFactory> socketFactoryRegistry = RegistryBuilder.<ConnectionSocketFactory>create()
+ .register("http", PlainConnectionSocketFactory.getSocketFactory())
+ .register("https", sslsf)
+ .build();
+ return HttpClients.createMinimal(new PoolingHttpClientConnectionManager(socketFactoryRegistry, null, null, null, 15, TimeUnit.SECONDS));
+ }
+
+ /**
+ * Closes the REST client and the underlying http client
+ */
+ @Override
+ public void close() {
+ IOUtils.closeWhileHandlingException(httpClient);
+ }
+}
diff --git a/test/framework/src/main/java/org/elasticsearch/test/rest/client/RestException.java b/test/framework/src/main/java/org/elasticsearch/test/rest/client/RestException.java
new file mode 100644
index 0000000000..2236134837
--- /dev/null
+++ b/test/framework/src/main/java/org/elasticsearch/test/rest/client/RestException.java
@@ -0,0 +1,41 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.elasticsearch.test.rest.client;
+
+/**
+ * Thrown when a status code that holds an error is received (unless needs to be ignored)
+ * Holds the original {@link RestResponse}
+ */
+public class RestException extends Exception {
+
+ private final RestResponse restResponse;
+
+ public RestException(String message, RestResponse restResponse) {
+ super(message);
+ this.restResponse = restResponse;
+ }
+
+ public RestResponse restResponse() {
+ return restResponse;
+ }
+
+ public int statusCode() {
+ return restResponse.getStatusCode();
+ }
+}
diff --git a/test/framework/src/main/java/org/elasticsearch/test/rest/client/RestPath.java b/test/framework/src/main/java/org/elasticsearch/test/rest/client/RestPath.java
new file mode 100644
index 0000000000..f6e3ddabd5
--- /dev/null
+++ b/test/framework/src/main/java/org/elasticsearch/test/rest/client/RestPath.java
@@ -0,0 +1,97 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.test.rest.client;
+
+import java.util.*;
+
+public class RestPath {
+ private final List<PathPart> parts;
+ private final List<String> placeholders;
+
+ public RestPath(List<String> parts) {
+ List<PathPart> pathParts = new ArrayList<>(parts.size());
+ for (String part : parts) {
+ pathParts.add(new PathPart(part, false));
+ }
+ this.parts = pathParts;
+ this.placeholders = Collections.emptyList();
+ }
+
+ public RestPath(String path) {
+ String[] pathParts = path.split("/");
+ List<String> placeholders = new ArrayList<>();
+ List<PathPart> parts = new ArrayList<>();
+ for (String pathPart : pathParts) {
+ if (pathPart.length() > 0) {
+ if (pathPart.startsWith("{")) {
+ if (pathPart.indexOf('}') != pathPart.length() - 1) {
+ throw new IllegalArgumentException("more than one parameter found in the same path part: [" + pathPart + "]");
+ }
+ String placeholder = pathPart.substring(1, pathPart.length() - 1);
+ parts.add(new PathPart(placeholder, true));
+ placeholders.add(placeholder);
+ } else {
+ parts.add(new PathPart(pathPart, false));
+ }
+ }
+ }
+ this.placeholders = placeholders;
+ this.parts = parts;
+ }
+
+ public String[] getPathParts() {
+ String[] parts = new String[this.parts.size()];
+ int i = 0;
+ for (PathPart part : this.parts) {
+ parts[i++] = part.pathPart;
+ }
+ return parts;
+ }
+
+ public boolean matches(Set<String> params) {
+ return placeholders.size() == params.size() && placeholders.containsAll(params);
+ }
+
+ public RestPath replacePlaceholders(Map<String,String> params) {
+ List<String> finalPathParts = new ArrayList<>(parts.size());
+ for (PathPart pathPart : parts) {
+ if (pathPart.isPlaceholder) {
+ String value = params.get(pathPart.pathPart);
+ if (value == null) {
+ throw new IllegalArgumentException("parameter [" + pathPart.pathPart + "] missing");
+ }
+ finalPathParts.add(value);
+ } else {
+ finalPathParts.add(pathPart.pathPart);
+ }
+ }
+ return new RestPath(finalPathParts);
+ }
+
+ private static class PathPart {
+ private final boolean isPlaceholder;
+ private final String pathPart;
+
+ private PathPart(String pathPart, boolean isPlaceholder) {
+ this.isPlaceholder = isPlaceholder;
+ this.pathPart = pathPart;
+ }
+ }
+} \ No newline at end of file
diff --git a/test/framework/src/main/java/org/elasticsearch/test/rest/client/RestResponse.java b/test/framework/src/main/java/org/elasticsearch/test/rest/client/RestResponse.java
new file mode 100644
index 0000000000..cdcd6a21a5
--- /dev/null
+++ b/test/framework/src/main/java/org/elasticsearch/test/rest/client/RestResponse.java
@@ -0,0 +1,115 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.elasticsearch.test.rest.client;
+
+import org.elasticsearch.test.rest.Stash;
+import org.elasticsearch.test.rest.client.http.HttpResponse;
+import org.elasticsearch.test.rest.json.JsonPath;
+
+import java.io.IOException;
+
+/**
+ * Response obtained from a REST call
+ * Supports parsing the response body as json when needed and returning specific values extracted from it
+ */
+public class RestResponse {
+
+ private final HttpResponse response;
+ private JsonPath parsedResponse;
+
+ public RestResponse(HttpResponse response) {
+ this.response = response;
+ }
+
+ public int getStatusCode() {
+ return response.getStatusCode();
+ }
+
+ public String getReasonPhrase() {
+ return response.getReasonPhrase();
+ }
+
+ /**
+ * Returns the body properly parsed depending on the content type.
+ * Might be a string or a json object parsed as a map.
+ */
+ public Object getBody() throws IOException {
+ if (isJson()) {
+ return parsedResponse().evaluate("");
+ }
+ return response.getBody();
+ }
+
+ /**
+ * Returns the body as a string
+ */
+ public String getBodyAsString() {
+ return response.getBody();
+ }
+
+ public boolean isError() {
+ return response.isError();
+ }
+
+ /**
+ * Parses the response body as json and extracts a specific value from it (identified by the provided path)
+ */
+ public Object evaluate(String path) throws IOException {
+ return evaluate(path, Stash.EMPTY);
+ }
+
+ /**
+ * Parses the response body as json and extracts a specific value from it (identified by the provided path)
+ */
+ public Object evaluate(String path, Stash stash) throws IOException {
+
+ if (response == null) {
+ return null;
+ }
+
+ JsonPath jsonPath = parsedResponse();
+
+ if (jsonPath == null) {
+ //special case: api that don't support body (e.g. exists) return true if 200, false if 404, even if no body
+ //is_true: '' means the response had no body but the client returned true (caused by 200)
+ //is_false: '' means the response had no body but the client returned false (caused by 404)
+ if ("".equals(path) && !response.supportsBody()) {
+ return !response.isError();
+ }
+ return null;
+ }
+
+ return jsonPath.evaluate(path, stash);
+ }
+
+ private boolean isJson() {
+ String contentType = response.getHeaders().get("Content-Type");
+ return contentType != null && contentType.contains("application/json");
+ }
+
+ private JsonPath parsedResponse() throws IOException {
+ if (parsedResponse != null) {
+ return parsedResponse;
+ }
+ if (response == null || !response.hasBody()) {
+ return null;
+ }
+ return parsedResponse = new JsonPath(response.getBody());
+ }
+}
diff --git a/test/framework/src/main/java/org/elasticsearch/test/rest/client/http/HttpDeleteWithEntity.java b/test/framework/src/main/java/org/elasticsearch/test/rest/client/http/HttpDeleteWithEntity.java
new file mode 100644
index 0000000000..480fc7b2f0
--- /dev/null
+++ b/test/framework/src/main/java/org/elasticsearch/test/rest/client/http/HttpDeleteWithEntity.java
@@ -0,0 +1,40 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.elasticsearch.test.rest.client.http;
+
+import org.apache.http.client.methods.HttpEntityEnclosingRequestBase;
+
+import java.net.URI;
+
+/**
+ * Allows to send DELETE requests providing a body (not supported out of the box)
+ */
+public class HttpDeleteWithEntity extends HttpEntityEnclosingRequestBase {
+
+ public final static String METHOD_NAME = "DELETE";
+
+ public HttpDeleteWithEntity(final URI uri) {
+ setURI(uri);
+ }
+
+ @Override
+ public String getMethod() {
+ return METHOD_NAME;
+ }
+}
diff --git a/test/framework/src/main/java/org/elasticsearch/test/rest/client/http/HttpGetWithEntity.java b/test/framework/src/main/java/org/elasticsearch/test/rest/client/http/HttpGetWithEntity.java
new file mode 100644
index 0000000000..aa0129f466
--- /dev/null
+++ b/test/framework/src/main/java/org/elasticsearch/test/rest/client/http/HttpGetWithEntity.java
@@ -0,0 +1,40 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.elasticsearch.test.rest.client.http;
+
+import org.apache.http.client.methods.HttpEntityEnclosingRequestBase;
+
+import java.net.URI;
+
+/**
+ * Allows to send GET requests providing a body (not supported out of the box)
+ */
+public class HttpGetWithEntity extends HttpEntityEnclosingRequestBase {
+
+ public final static String METHOD_NAME = "GET";
+
+ public HttpGetWithEntity(final URI uri) {
+ setURI(uri);
+ }
+
+ @Override
+ public String getMethod() {
+ return METHOD_NAME;
+ }
+} \ No newline at end of file
diff --git a/test/framework/src/main/java/org/elasticsearch/test/rest/client/http/HttpRequestBuilder.java b/test/framework/src/main/java/org/elasticsearch/test/rest/client/http/HttpRequestBuilder.java
new file mode 100644
index 0000000000..f6ce416dbf
--- /dev/null
+++ b/test/framework/src/main/java/org/elasticsearch/test/rest/client/http/HttpRequestBuilder.java
@@ -0,0 +1,246 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.elasticsearch.test.rest.client.http;
+
+import org.apache.http.client.methods.*;
+import org.apache.http.entity.StringEntity;
+import org.apache.http.impl.client.CloseableHttpClient;
+import org.elasticsearch.client.support.Headers;
+import org.elasticsearch.common.Strings;
+import org.elasticsearch.common.logging.ESLogger;
+import org.elasticsearch.common.logging.Loggers;
+import org.elasticsearch.common.network.NetworkAddress;
+import org.elasticsearch.common.transport.InetSocketTransportAddress;
+import org.elasticsearch.http.HttpServerTransport;
+
+import java.io.IOException;
+import java.io.UnsupportedEncodingException;
+import java.net.URI;
+import java.net.URISyntaxException;
+import java.net.URLEncoder;
+import java.nio.charset.Charset;
+import java.util.HashMap;
+import java.util.Map;
+import java.util.stream.Collectors;
+
+/**
+ * Executable builder for an http request
+ * Holds an {@link org.apache.http.client.HttpClient} that is used to send the built http request
+ */
+public class HttpRequestBuilder {
+
+ private static final ESLogger logger = Loggers.getLogger(HttpRequestBuilder.class);
+
+ static final Charset DEFAULT_CHARSET = Charset.forName("utf-8");
+
+ private final CloseableHttpClient httpClient;
+
+ private String protocol = "http";
+
+ private String host;
+
+ private int port;
+
+ private String path = "";
+
+ private final Map<String, String> params = new HashMap<>();
+
+ private final Map<String, String> headers = new HashMap<>();
+
+ private String method = HttpGetWithEntity.METHOD_NAME;
+
+ private String body;
+
+ public HttpRequestBuilder(CloseableHttpClient httpClient) {
+ this.httpClient = httpClient;
+ }
+
+ public HttpRequestBuilder host(String host) {
+ this.host = host;
+ return this;
+ }
+
+ public HttpRequestBuilder httpTransport(HttpServerTransport httpServerTransport) {
+ InetSocketTransportAddress transportAddress = (InetSocketTransportAddress) httpServerTransport.boundAddress().publishAddress();
+ return host(NetworkAddress.formatAddress(transportAddress.address().getAddress())).port(transportAddress.address().getPort());
+ }
+
+ public HttpRequestBuilder port(int port) {
+ this.port = port;
+ return this;
+ }
+
+ /**
+ * Sets the path to send the request to. Url encoding needs to be applied by the caller.
+ * Use {@link #pathParts(String...)} instead if the path needs to be encoded, part by part.
+ */
+ public HttpRequestBuilder path(String path) {
+ this.path = path;
+ return this;
+ }
+
+ /**
+ * Sets the path by providing the different parts (without slashes), which will be properly encoded.
+ */
+ public HttpRequestBuilder pathParts(String... path) {
+ //encode rules for path and query string parameters are different. We use URI to encode the path, and URLEncoder for each query string parameter (see addParam).
+ //We need to encode each path part separately though, as each one might contain slashes that need to be escaped, which needs to be done manually.
+ if (path.length == 0) {
+ this.path = "/";
+ return this;
+ }
+ StringBuilder finalPath = new StringBuilder();
+ for (String pathPart : path) {
+ try {
+ finalPath.append('/');
+ URI uri = new URI(null, null, null, -1, pathPart, null, null);
+ //manually escape any slash that each part may contain
+ finalPath.append(uri.getRawPath().replaceAll("/", "%2F"));
+ } catch(URISyntaxException e) {
+ throw new RuntimeException("unable to build uri", e);
+ }
+ }
+ this.path = finalPath.toString();
+ return this;
+ }
+
+ public HttpRequestBuilder addParam(String name, String value) {
+ try {
+ this.params.put(name, URLEncoder.encode(value, "utf-8"));
+ return this;
+ } catch (UnsupportedEncodingException e) {
+ throw new RuntimeException(e);
+ }
+ }
+
+ public HttpRequestBuilder addHeaders(Headers headers) {
+ for (String header : headers.headers().names()) {
+ this.headers.put(header, headers.headers().get(header));
+ }
+ return this;
+ }
+
+ public HttpRequestBuilder addHeader(String name, String value) {
+ this.headers.put(name, value);
+ return this;
+ }
+
+ public HttpRequestBuilder protocol(String protocol) {
+ this.protocol = protocol;
+ return this;
+ }
+
+ public HttpRequestBuilder method(String method) {
+ this.method = method;
+ return this;
+ }
+
+ public HttpRequestBuilder body(String body) {
+ if (Strings.hasLength(body)) {
+ this.body = body;
+ }
+ return this;
+ }
+
+ public HttpResponse execute() throws IOException {
+ HttpUriRequest httpUriRequest = buildRequest();
+ if (logger.isTraceEnabled()) {
+ StringBuilder stringBuilder = new StringBuilder(httpUriRequest.getMethod()).append(" ").append(httpUriRequest.getURI());
+ if (Strings.hasLength(body)) {
+ stringBuilder.append("\n").append(body);
+ }
+ logger.trace("sending request \n{}", stringBuilder.toString());
+ }
+ for (Map.Entry<String, String> entry : this.headers.entrySet()) {
+ logger.trace("adding header [{} => {}]", entry.getKey(), entry.getValue());
+ httpUriRequest.addHeader(entry.getKey(), entry.getValue());
+ }
+ try (CloseableHttpResponse closeableHttpResponse = httpClient.execute(httpUriRequest)) {
+ HttpResponse httpResponse = new HttpResponse(httpUriRequest, closeableHttpResponse);
+ logger.trace("got response \n{}\n{}", closeableHttpResponse, httpResponse.hasBody() ? httpResponse.getBody() : "");
+ return httpResponse;
+ }
+ }
+
+ private HttpUriRequest buildRequest() {
+
+ if (HttpGetWithEntity.METHOD_NAME.equalsIgnoreCase(method)) {
+ return addOptionalBody(new HttpGetWithEntity(buildUri()));
+ }
+
+ if (HttpHead.METHOD_NAME.equalsIgnoreCase(method)) {
+ checkBodyNotSupported();
+ return new HttpHead(buildUri());
+ }
+
+ if (HttpOptions.METHOD_NAME.equalsIgnoreCase(method)) {
+ checkBodyNotSupported();
+ return new HttpOptions(buildUri());
+ }
+
+ if (HttpDeleteWithEntity.METHOD_NAME.equalsIgnoreCase(method)) {
+ return addOptionalBody(new HttpDeleteWithEntity(buildUri()));
+ }
+
+ if (HttpPut.METHOD_NAME.equalsIgnoreCase(method)) {
+ return addOptionalBody(new HttpPut(buildUri()));
+ }
+
+ if (HttpPost.METHOD_NAME.equalsIgnoreCase(method)) {
+ return addOptionalBody(new HttpPost(buildUri()));
+ }
+
+ throw new UnsupportedOperationException("method [" + method + "] not supported");
+ }
+
+ private URI buildUri() {
+ StringBuilder uriBuilder = new StringBuilder(protocol).append("://").append(host).append(":").append(port).append(path);
+ if (params.size() > 0) {
+ uriBuilder.append("?").append(params.entrySet().stream().map(e -> e.getKey() + "=" + e.getValue()).collect(Collectors.joining("&")));
+ }
+ //using this constructor no url encoding happens, as we did everything upfront in addParam and pathPart methods
+ return URI.create(uriBuilder.toString());
+ }
+
+ private HttpEntityEnclosingRequestBase addOptionalBody(HttpEntityEnclosingRequestBase requestBase) {
+ if (Strings.hasText(body)) {
+ requestBase.setEntity(new StringEntity(body, DEFAULT_CHARSET));
+ }
+ return requestBase;
+ }
+
+ private void checkBodyNotSupported() {
+ if (Strings.hasText(body)) {
+ throw new IllegalArgumentException("request body not supported with head request");
+ }
+ }
+
+ @Override
+ public String toString() {
+ StringBuilder stringBuilder = new StringBuilder(method).append(" '")
+ .append(host).append(":").append(port).append(path).append("'");
+ if (!params.isEmpty()) {
+ stringBuilder.append(", params=").append(params);
+ }
+ if (Strings.hasLength(body)) {
+ stringBuilder.append(", body=\n").append(body);
+ }
+ return stringBuilder.toString();
+ }
+}
diff --git a/test/framework/src/main/java/org/elasticsearch/test/rest/client/http/HttpResponse.java b/test/framework/src/main/java/org/elasticsearch/test/rest/client/http/HttpResponse.java
new file mode 100644
index 0000000000..9945edbefa
--- /dev/null
+++ b/test/framework/src/main/java/org/elasticsearch/test/rest/client/http/HttpResponse.java
@@ -0,0 +1,108 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.elasticsearch.test.rest.client.http;
+
+import org.apache.http.Header;
+import org.apache.http.client.methods.CloseableHttpResponse;
+import org.apache.http.client.methods.HttpHead;
+import org.apache.http.client.methods.HttpUriRequest;
+import org.apache.http.util.EntityUtils;
+import org.elasticsearch.common.logging.ESLogger;
+import org.elasticsearch.common.logging.Loggers;
+
+import java.io.IOException;
+import java.util.HashMap;
+import java.util.Map;
+
+/**
+ * Response obtained from an http request
+ * Always consumes the whole response body loading it entirely into a string
+ */
+public class HttpResponse {
+
+ private static final ESLogger logger = Loggers.getLogger(HttpResponse.class);
+
+ private final HttpUriRequest httpRequest;
+ private final int statusCode;
+ private final String reasonPhrase;
+ private final String body;
+ private final Map<String, String> headers = new HashMap<>();
+
+ HttpResponse(HttpUriRequest httpRequest, CloseableHttpResponse httpResponse) {
+ this.httpRequest = httpRequest;
+ this.statusCode = httpResponse.getStatusLine().getStatusCode();
+ this.reasonPhrase = httpResponse.getStatusLine().getReasonPhrase();
+ for (Header header : httpResponse.getAllHeaders()) {
+ this.headers.put(header.getName(), header.getValue());
+ }
+ if (httpResponse.getEntity() != null) {
+ try {
+ this.body = EntityUtils.toString(httpResponse.getEntity(), HttpRequestBuilder.DEFAULT_CHARSET);
+ } catch (IOException e) {
+ EntityUtils.consumeQuietly(httpResponse.getEntity());
+ throw new RuntimeException(e);
+ } finally {
+ try {
+ httpResponse.close();
+ } catch (IOException e) {
+ logger.error(e.getMessage(), e);
+ }
+ }
+ } else {
+ this.body = null;
+ }
+ }
+
+ public boolean isError() {
+ return statusCode >= 400;
+ }
+
+ public int getStatusCode() {
+ return statusCode;
+ }
+
+ public String getReasonPhrase() {
+ return reasonPhrase;
+ }
+
+ public String getBody() {
+ return body;
+ }
+
+ public boolean hasBody() {
+ return body != null;
+ }
+
+ public boolean supportsBody() {
+ return !HttpHead.METHOD_NAME.equals(httpRequest.getMethod());
+ }
+
+ public Map<String, String> getHeaders() {
+ return headers;
+ }
+
+ @Override
+ public String toString() {
+ StringBuilder stringBuilder = new StringBuilder(statusCode).append(" ").append(reasonPhrase);
+ if (hasBody()) {
+ stringBuilder.append("\n").append(body);
+ }
+ return stringBuilder.toString();
+ }
+}
diff --git a/test/framework/src/main/java/org/elasticsearch/test/rest/json/JsonPath.java b/test/framework/src/main/java/org/elasticsearch/test/rest/json/JsonPath.java
new file mode 100644
index 0000000000..c3dbd58343
--- /dev/null
+++ b/test/framework/src/main/java/org/elasticsearch/test/rest/json/JsonPath.java
@@ -0,0 +1,126 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.elasticsearch.test.rest.json;
+
+import org.elasticsearch.common.xcontent.XContentParser;
+import org.elasticsearch.common.xcontent.json.JsonXContent;
+import org.elasticsearch.test.rest.Stash;
+
+import java.io.IOException;
+import java.util.ArrayList;
+import java.util.List;
+import java.util.Map;
+
+/**
+ * Holds a json object and allows to extract specific values from it
+ */
+public class JsonPath {
+
+ final String json;
+ final Map<String, Object> jsonMap;
+
+ public JsonPath(String json) throws IOException {
+ this.json = json;
+ this.jsonMap = convertToMap(json);
+ }
+
+ private static Map<String, Object> convertToMap(String json) throws IOException {
+ try (XContentParser parser = JsonXContent.jsonXContent.createParser(json)) {
+ return parser.mapOrdered();
+ }
+ }
+
+ /**
+ * Returns the object corresponding to the provided path if present, null otherwise
+ */
+ public Object evaluate(String path) {
+ return evaluate(path, Stash.EMPTY);
+ }
+
+ /**
+ * Returns the object corresponding to the provided path if present, null otherwise
+ */
+ public Object evaluate(String path, Stash stash) {
+ String[] parts = parsePath(path);
+ Object object = jsonMap;
+ for (String part : parts) {
+ object = evaluate(part, object, stash);
+ if (object == null) {
+ return null;
+ }
+ }
+ return object;
+ }
+
+ @SuppressWarnings("unchecked")
+ private Object evaluate(String key, Object object, Stash stash) {
+ if (stash.isStashedValue(key)) {
+ key = stash.unstashValue(key).toString();
+ }
+
+ if (object instanceof Map) {
+ return ((Map<String, Object>) object).get(key);
+ }
+ if (object instanceof List) {
+ List<Object> list = (List<Object>) object;
+ try {
+ return list.get(Integer.valueOf(key));
+ } catch (NumberFormatException e) {
+ throw new IllegalArgumentException("element was a list, but [" + key + "] was not numeric", e);
+ } catch (IndexOutOfBoundsException e) {
+ throw new IllegalArgumentException("element was a list with " + list.size() + " elements, but [" + key + "] was out of bounds", e);
+ }
+ }
+
+ throw new IllegalArgumentException("no object found for [" + key + "] within object of class [" + object.getClass() + "]");
+ }
+
+ private String[] parsePath(String path) {
+ List<String> list = new ArrayList<>();
+ StringBuilder current = new StringBuilder();
+ boolean escape = false;
+ for (int i = 0; i < path.length(); i++) {
+ char c = path.charAt(i);
+ if (c == '\\') {
+ escape = true;
+ continue;
+ }
+
+ if (c == '.') {
+ if (escape) {
+ escape = false;
+ } else {
+ if (current.length() > 0) {
+ list.add(current.toString());
+ current.setLength(0);
+ }
+ continue;
+ }
+ }
+
+ current.append(c);
+ }
+
+ if (current.length() > 0) {
+ list.add(current.toString());
+ }
+
+ return list.toArray(new String[list.size()]);
+ }
+}
diff --git a/test/framework/src/main/java/org/elasticsearch/test/rest/parser/DoSectionParser.java b/test/framework/src/main/java/org/elasticsearch/test/rest/parser/DoSectionParser.java
new file mode 100644
index 0000000000..2a20e0f314
--- /dev/null
+++ b/test/framework/src/main/java/org/elasticsearch/test/rest/parser/DoSectionParser.java
@@ -0,0 +1,105 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.elasticsearch.test.rest.parser;
+
+import org.elasticsearch.common.xcontent.XContentFactory;
+import org.elasticsearch.common.xcontent.XContentParser;
+import org.elasticsearch.common.xcontent.XContentType;
+import org.elasticsearch.test.rest.section.ApiCallSection;
+import org.elasticsearch.test.rest.section.DoSection;
+
+import java.io.IOException;
+import java.util.HashMap;
+import java.util.Map;
+
+/**
+ * Parser for do sections
+ */
+public class DoSectionParser implements RestTestFragmentParser<DoSection> {
+
+ @Override
+ public DoSection parse(RestTestSuiteParseContext parseContext) throws IOException, RestTestParseException {
+
+ XContentParser parser = parseContext.parser();
+
+ String currentFieldName = null;
+ XContentParser.Token token;
+
+ DoSection doSection = new DoSection();
+ ApiCallSection apiCallSection = null;
+ Map<String, String> headers = new HashMap<>();
+
+ while ((token = parser.nextToken()) != XContentParser.Token.END_OBJECT) {
+ if (token == XContentParser.Token.FIELD_NAME) {
+ currentFieldName = parser.currentName();
+ } else if (token.isValue()) {
+ if ("catch".equals(currentFieldName)) {
+ doSection.setCatch(parser.text());
+ }
+ } else if (token == XContentParser.Token.START_OBJECT) {
+ if ("headers".equals(currentFieldName)) {
+ String headerName = null;
+ while ((token = parser.nextToken()) != XContentParser.Token.END_OBJECT) {
+ if (token == XContentParser.Token.FIELD_NAME) {
+ headerName = parser.currentName();
+ } else if (token.isValue()) {
+ headers.put(headerName, parser.text());
+ }
+ }
+ } else if (currentFieldName != null) { // must be part of API call then
+ apiCallSection = new ApiCallSection(currentFieldName);
+ String paramName = null;
+ while ((token = parser.nextToken()) != XContentParser.Token.END_OBJECT) {
+ if (token == XContentParser.Token.FIELD_NAME) {
+ paramName = parser.currentName();
+ } else if (token.isValue()) {
+ if ("body".equals(paramName)) {
+ String body = parser.text();
+ XContentType bodyContentType = XContentFactory.xContentType(body);
+ XContentParser bodyParser = XContentFactory.xContent(bodyContentType).createParser(body);
+ //multiple bodies are supported e.g. in case of bulk provided as a whole string
+ while(bodyParser.nextToken() != null) {
+ apiCallSection.addBody(bodyParser.mapOrdered());
+ }
+ } else {
+ apiCallSection.addParam(paramName, parser.text());
+ }
+ } else if (token == XContentParser.Token.START_OBJECT) {
+ if ("body".equals(paramName)) {
+ apiCallSection.addBody(parser.mapOrdered());
+ }
+ }
+ }
+ }
+ }
+ }
+ try {
+ if (apiCallSection == null) {
+ throw new RestTestParseException("client call section is mandatory within a do section");
+ }
+ if (headers.isEmpty() == false) {
+ apiCallSection.addHeaders(headers);
+ }
+ doSection.setApiCallSection(apiCallSection);
+ } finally {
+ parser.nextToken();
+ }
+ return doSection;
+ }
+}
diff --git a/test/framework/src/main/java/org/elasticsearch/test/rest/parser/GreaterThanEqualToParser.java b/test/framework/src/main/java/org/elasticsearch/test/rest/parser/GreaterThanEqualToParser.java
new file mode 100644
index 0000000000..68f833d35c
--- /dev/null
+++ b/test/framework/src/main/java/org/elasticsearch/test/rest/parser/GreaterThanEqualToParser.java
@@ -0,0 +1,40 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.test.rest.parser;
+
+import org.elasticsearch.common.collect.Tuple;
+import org.elasticsearch.test.rest.section.GreaterThanEqualToAssertion;
+
+import java.io.IOException;
+
+/**
+ * Parser for gte assert sections
+ */
+public class GreaterThanEqualToParser implements RestTestFragmentParser<GreaterThanEqualToAssertion> {
+
+ @Override
+ public GreaterThanEqualToAssertion parse(RestTestSuiteParseContext parseContext) throws IOException, RestTestParseException {
+ Tuple<String,Object> stringObjectTuple = parseContext.parseTuple();
+ if (! (stringObjectTuple.v2() instanceof Comparable) ) {
+ throw new RestTestParseException("gte section can only be used with objects that support natural ordering, found " + stringObjectTuple.v2().getClass().getSimpleName());
+ }
+ return new GreaterThanEqualToAssertion(stringObjectTuple.v1(), stringObjectTuple.v2());
+ }
+}
diff --git a/test/framework/src/main/java/org/elasticsearch/test/rest/parser/GreaterThanParser.java b/test/framework/src/main/java/org/elasticsearch/test/rest/parser/GreaterThanParser.java
new file mode 100644
index 0000000000..a66122138c
--- /dev/null
+++ b/test/framework/src/main/java/org/elasticsearch/test/rest/parser/GreaterThanParser.java
@@ -0,0 +1,39 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.elasticsearch.test.rest.parser;
+
+import org.elasticsearch.common.collect.Tuple;
+import org.elasticsearch.test.rest.section.GreaterThanAssertion;
+
+import java.io.IOException;
+
+/**
+ * Parser for gt assert sections
+ */
+public class GreaterThanParser implements RestTestFragmentParser<GreaterThanAssertion> {
+
+ @Override
+ public GreaterThanAssertion parse(RestTestSuiteParseContext parseContext) throws IOException, RestTestParseException {
+ Tuple<String,Object> stringObjectTuple = parseContext.parseTuple();
+ if (! (stringObjectTuple.v2() instanceof Comparable) ) {
+ throw new RestTestParseException("gt section can only be used with objects that support natural ordering, found " + stringObjectTuple.v2().getClass().getSimpleName());
+ }
+ return new GreaterThanAssertion(stringObjectTuple.v1(), stringObjectTuple.v2());
+ }
+}
diff --git a/test/framework/src/main/java/org/elasticsearch/test/rest/parser/IsFalseParser.java b/test/framework/src/main/java/org/elasticsearch/test/rest/parser/IsFalseParser.java
new file mode 100644
index 0000000000..81cade6d84
--- /dev/null
+++ b/test/framework/src/main/java/org/elasticsearch/test/rest/parser/IsFalseParser.java
@@ -0,0 +1,34 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.elasticsearch.test.rest.parser;
+
+import org.elasticsearch.test.rest.section.IsFalseAssertion;
+
+import java.io.IOException;
+
+/**
+ * Parser for is_false assert sections
+ */
+public class IsFalseParser implements RestTestFragmentParser<IsFalseAssertion> {
+
+ @Override
+ public IsFalseAssertion parse(RestTestSuiteParseContext parseContext) throws IOException, RestTestParseException {
+ return new IsFalseAssertion(parseContext.parseField());
+ }
+}
diff --git a/test/framework/src/main/java/org/elasticsearch/test/rest/parser/IsTrueParser.java b/test/framework/src/main/java/org/elasticsearch/test/rest/parser/IsTrueParser.java
new file mode 100644
index 0000000000..922629b47e
--- /dev/null
+++ b/test/framework/src/main/java/org/elasticsearch/test/rest/parser/IsTrueParser.java
@@ -0,0 +1,34 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.elasticsearch.test.rest.parser;
+
+import org.elasticsearch.test.rest.section.IsTrueAssertion;
+
+import java.io.IOException;
+
+/**
+ * Parser for is_true assert sections
+ */
+public class IsTrueParser implements RestTestFragmentParser<IsTrueAssertion> {
+
+ @Override
+ public IsTrueAssertion parse(RestTestSuiteParseContext parseContext) throws IOException, RestTestParseException {
+ return new IsTrueAssertion(parseContext.parseField());
+ }
+}
diff --git a/test/framework/src/main/java/org/elasticsearch/test/rest/parser/LengthParser.java b/test/framework/src/main/java/org/elasticsearch/test/rest/parser/LengthParser.java
new file mode 100644
index 0000000000..414be59f4c
--- /dev/null
+++ b/test/framework/src/main/java/org/elasticsearch/test/rest/parser/LengthParser.java
@@ -0,0 +1,48 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.elasticsearch.test.rest.parser;
+
+import org.elasticsearch.common.collect.Tuple;
+import org.elasticsearch.test.rest.section.LengthAssertion;
+
+import java.io.IOException;
+
+/**
+ * Parser for length assert sections
+ */
+public class LengthParser implements RestTestFragmentParser<LengthAssertion> {
+
+ @Override
+ public LengthAssertion parse(RestTestSuiteParseContext parseContext) throws IOException, RestTestParseException {
+ Tuple<String,Object> stringObjectTuple = parseContext.parseTuple();
+ assert stringObjectTuple.v2() != null;
+ int value;
+ if (stringObjectTuple.v2() instanceof Number) {
+ value = ((Number) stringObjectTuple.v2()).intValue();
+ } else {
+ try {
+ value = Integer.valueOf(stringObjectTuple.v2().toString());
+ } catch(NumberFormatException e) {
+ throw new RestTestParseException("length is not a valid number", e);
+ }
+
+ }
+ return new LengthAssertion(stringObjectTuple.v1(), value);
+ }
+}
diff --git a/test/framework/src/main/java/org/elasticsearch/test/rest/parser/LessThanOrEqualToParser.java b/test/framework/src/main/java/org/elasticsearch/test/rest/parser/LessThanOrEqualToParser.java
new file mode 100644
index 0000000000..f2d53d05a5
--- /dev/null
+++ b/test/framework/src/main/java/org/elasticsearch/test/rest/parser/LessThanOrEqualToParser.java
@@ -0,0 +1,40 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.test.rest.parser;
+
+import org.elasticsearch.common.collect.Tuple;
+import org.elasticsearch.test.rest.section.LessThanOrEqualToAssertion;
+
+import java.io.IOException;
+
+/**
+ * Parser for lte assert section
+ */
+public class LessThanOrEqualToParser implements RestTestFragmentParser<LessThanOrEqualToAssertion> {
+
+ @Override
+ public LessThanOrEqualToAssertion parse(RestTestSuiteParseContext parseContext) throws IOException, RestTestParseException {
+ Tuple<String,Object> stringObjectTuple = parseContext.parseTuple();
+ if (! (stringObjectTuple.v2() instanceof Comparable) ) {
+ throw new RestTestParseException("lte section can only be used with objects that support natural ordering, found " + stringObjectTuple.v2().getClass().getSimpleName());
+ }
+ return new LessThanOrEqualToAssertion(stringObjectTuple.v1(), stringObjectTuple.v2());
+ }
+}
diff --git a/test/framework/src/main/java/org/elasticsearch/test/rest/parser/LessThanParser.java b/test/framework/src/main/java/org/elasticsearch/test/rest/parser/LessThanParser.java
new file mode 100644
index 0000000000..065dd19d6a
--- /dev/null
+++ b/test/framework/src/main/java/org/elasticsearch/test/rest/parser/LessThanParser.java
@@ -0,0 +1,39 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.elasticsearch.test.rest.parser;
+
+import org.elasticsearch.common.collect.Tuple;
+import org.elasticsearch.test.rest.section.LessThanAssertion;
+
+import java.io.IOException;
+
+/**
+ * Parser for lt assert sections
+ */
+public class LessThanParser implements RestTestFragmentParser<LessThanAssertion> {
+
+ @Override
+ public LessThanAssertion parse(RestTestSuiteParseContext parseContext) throws IOException, RestTestParseException {
+ Tuple<String,Object> stringObjectTuple = parseContext.parseTuple();
+ if (! (stringObjectTuple.v2() instanceof Comparable) ) {
+ throw new RestTestParseException("lt section can only be used with objects that support natural ordering, found " + stringObjectTuple.v2().getClass().getSimpleName());
+ }
+ return new LessThanAssertion(stringObjectTuple.v1(), stringObjectTuple.v2());
+ }
+}
diff --git a/test/framework/src/main/java/org/elasticsearch/test/rest/parser/MatchParser.java b/test/framework/src/main/java/org/elasticsearch/test/rest/parser/MatchParser.java
new file mode 100644
index 0000000000..30ee18a4e0
--- /dev/null
+++ b/test/framework/src/main/java/org/elasticsearch/test/rest/parser/MatchParser.java
@@ -0,0 +1,36 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.elasticsearch.test.rest.parser;
+
+import org.elasticsearch.common.collect.Tuple;
+import org.elasticsearch.test.rest.section.MatchAssertion;
+
+import java.io.IOException;
+
+/**
+ * Parser for match assert sections
+ */
+public class MatchParser implements RestTestFragmentParser<MatchAssertion> {
+
+ @Override
+ public MatchAssertion parse(RestTestSuiteParseContext parseContext) throws IOException, RestTestParseException {
+ Tuple<String,Object> stringObjectTuple = parseContext.parseTuple();
+ return new MatchAssertion(stringObjectTuple.v1(), stringObjectTuple.v2());
+ }
+}
diff --git a/test/framework/src/main/java/org/elasticsearch/test/rest/parser/RestTestFragmentParser.java b/test/framework/src/main/java/org/elasticsearch/test/rest/parser/RestTestFragmentParser.java
new file mode 100644
index 0000000000..8d2bd8be76
--- /dev/null
+++ b/test/framework/src/main/java/org/elasticsearch/test/rest/parser/RestTestFragmentParser.java
@@ -0,0 +1,33 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.elasticsearch.test.rest.parser;
+
+import java.io.IOException;
+
+/**
+ * Base parser for a REST test suite fragment
+ * @param <T> the test fragment's type that gets parsed and returned
+ */
+public interface RestTestFragmentParser<T> {
+
+ /**
+ * Parses a test fragment given the current {@link RestTestSuiteParseContext}
+ */
+ T parse(RestTestSuiteParseContext parseContext) throws IOException, RestTestParseException;
+}
diff --git a/test/framework/src/main/java/org/elasticsearch/test/rest/parser/RestTestParseException.java b/test/framework/src/main/java/org/elasticsearch/test/rest/parser/RestTestParseException.java
new file mode 100644
index 0000000000..3e1af2cd74
--- /dev/null
+++ b/test/framework/src/main/java/org/elasticsearch/test/rest/parser/RestTestParseException.java
@@ -0,0 +1,33 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.elasticsearch.test.rest.parser;
+
+/**
+ * Exception thrown whenever there is a problem parsing any of the REST test suite fragment
+ */
+public class RestTestParseException extends Exception {
+
+ RestTestParseException(String message) {
+ super(message);
+ }
+
+ RestTestParseException(String message, Throwable cause) {
+ super(message, cause);
+ }
+}
diff --git a/test/framework/src/main/java/org/elasticsearch/test/rest/parser/RestTestSectionParser.java b/test/framework/src/main/java/org/elasticsearch/test/rest/parser/RestTestSectionParser.java
new file mode 100644
index 0000000000..0763615111
--- /dev/null
+++ b/test/framework/src/main/java/org/elasticsearch/test/rest/parser/RestTestSectionParser.java
@@ -0,0 +1,51 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.elasticsearch.test.rest.parser;
+
+import org.elasticsearch.common.xcontent.XContentParser;
+import org.elasticsearch.test.rest.section.TestSection;
+
+import java.io.IOException;
+
+/**
+ * Parser for a complete test section
+ */
+public class RestTestSectionParser implements RestTestFragmentParser<TestSection> {
+
+ @Override
+ public TestSection parse(RestTestSuiteParseContext parseContext) throws IOException, RestTestParseException {
+ XContentParser parser = parseContext.parser();
+ parseContext.advanceToFieldName();
+ TestSection testSection = new TestSection(parser.currentName());
+ parser.nextToken();
+ testSection.setSkipSection(parseContext.parseSkipSection());
+
+ while ( parser.currentToken() != XContentParser.Token.END_ARRAY) {
+ parseContext.advanceToFieldName();
+ testSection.addExecutableSection(parseContext.parseExecutableSection());
+ }
+
+ parser.nextToken();
+ assert parser.currentToken() == XContentParser.Token.END_OBJECT;
+ parser.nextToken();
+
+ return testSection;
+ }
+
+}
diff --git a/test/framework/src/main/java/org/elasticsearch/test/rest/parser/RestTestSuiteParseContext.java b/test/framework/src/main/java/org/elasticsearch/test/rest/parser/RestTestSuiteParseContext.java
new file mode 100644
index 0000000000..036310e4e4
--- /dev/null
+++ b/test/framework/src/main/java/org/elasticsearch/test/rest/parser/RestTestSuiteParseContext.java
@@ -0,0 +1,165 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.elasticsearch.test.rest.parser;
+
+import org.elasticsearch.common.collect.Tuple;
+import org.elasticsearch.common.xcontent.XContentParser;
+import org.elasticsearch.test.rest.section.DoSection;
+import org.elasticsearch.test.rest.section.ExecutableSection;
+import org.elasticsearch.test.rest.section.SetupSection;
+import org.elasticsearch.test.rest.section.SkipSection;
+import org.elasticsearch.test.rest.section.TestSection;
+
+import java.io.IOException;
+import java.util.HashMap;
+import java.util.Map;
+
+/**
+ * Context shared across the whole tests parse phase.
+ * Provides shared parse methods and holds information needed to parse the test sections (e.g. es version)
+ */
+public class RestTestSuiteParseContext {
+
+ private static final SetupSectionParser SETUP_SECTION_PARSER = new SetupSectionParser();
+ private static final RestTestSectionParser TEST_SECTION_PARSER = new RestTestSectionParser();
+ private static final SkipSectionParser SKIP_SECTION_PARSER = new SkipSectionParser();
+ private static final DoSectionParser DO_SECTION_PARSER = new DoSectionParser();
+ private static final Map<String, RestTestFragmentParser<? extends ExecutableSection>> EXECUTABLE_SECTIONS_PARSERS = new HashMap<>();
+ static {
+ EXECUTABLE_SECTIONS_PARSERS.put("do", DO_SECTION_PARSER);
+ EXECUTABLE_SECTIONS_PARSERS.put("set", new SetSectionParser());
+ EXECUTABLE_SECTIONS_PARSERS.put("match", new MatchParser());
+ EXECUTABLE_SECTIONS_PARSERS.put("is_true", new IsTrueParser());
+ EXECUTABLE_SECTIONS_PARSERS.put("is_false", new IsFalseParser());
+ EXECUTABLE_SECTIONS_PARSERS.put("gt", new GreaterThanParser());
+ EXECUTABLE_SECTIONS_PARSERS.put("gte", new GreaterThanEqualToParser());
+ EXECUTABLE_SECTIONS_PARSERS.put("lt", new LessThanParser());
+ EXECUTABLE_SECTIONS_PARSERS.put("lte", new LessThanOrEqualToParser());
+ EXECUTABLE_SECTIONS_PARSERS.put("length", new LengthParser());
+ }
+
+ private final String api;
+ private final String suiteName;
+ private final XContentParser parser;
+
+ public RestTestSuiteParseContext(String api, String suiteName, XContentParser parser) {
+ this.api = api;
+ this.suiteName = suiteName;
+ this.parser = parser;
+ }
+
+ public String getApi() {
+ return api;
+ }
+
+ public String getSuiteName() {
+ return suiteName;
+ }
+
+ public XContentParser parser() {
+ return parser;
+ }
+
+ public SetupSection parseSetupSection() throws IOException, RestTestParseException {
+
+ advanceToFieldName();
+
+ if ("setup".equals(parser.currentName())) {
+ parser.nextToken();
+ SetupSection setupSection = SETUP_SECTION_PARSER.parse(this);
+ parser.nextToken();
+ return setupSection;
+ }
+
+ return SetupSection.EMPTY;
+ }
+
+ public TestSection parseTestSection() throws IOException, RestTestParseException {
+ return TEST_SECTION_PARSER.parse(this);
+ }
+
+ public SkipSection parseSkipSection() throws IOException, RestTestParseException {
+
+ advanceToFieldName();
+
+ if ("skip".equals(parser.currentName())) {
+ SkipSection skipSection = SKIP_SECTION_PARSER.parse(this);
+ parser.nextToken();
+ return skipSection;
+ }
+
+ return SkipSection.EMPTY;
+ }
+
+ public ExecutableSection parseExecutableSection() throws IOException, RestTestParseException {
+ advanceToFieldName();
+ String section = parser.currentName();
+ RestTestFragmentParser<? extends ExecutableSection> execSectionParser = EXECUTABLE_SECTIONS_PARSERS.get(section);
+ if (execSectionParser == null) {
+ throw new RestTestParseException("no parser found for executable section [" + section + "]");
+ }
+ ExecutableSection executableSection = execSectionParser.parse(this);
+ parser.nextToken();
+ return executableSection;
+ }
+
+ public DoSection parseDoSection() throws IOException, RestTestParseException {
+ return DO_SECTION_PARSER.parse(this);
+ }
+
+ public void advanceToFieldName() throws IOException, RestTestParseException {
+ XContentParser.Token token = parser.currentToken();
+ //we are in the beginning, haven't called nextToken yet
+ if (token == null) {
+ token = parser.nextToken();
+ }
+ if (token == XContentParser.Token.START_ARRAY) {
+ token = parser.nextToken();
+ }
+ if (token == XContentParser.Token.START_OBJECT) {
+ token = parser.nextToken();
+ }
+ if (token != XContentParser.Token.FIELD_NAME) {
+ throw new RestTestParseException("malformed test section: field name expected but found " + token);
+ }
+ }
+
+ public String parseField() throws IOException, RestTestParseException {
+ parser.nextToken();
+ assert parser.currentToken().isValue();
+ String field = parser.text();
+ parser.nextToken();
+ return field;
+ }
+
+ public Tuple<String, Object> parseTuple() throws IOException, RestTestParseException {
+ parser.nextToken();
+ advanceToFieldName();
+ Map<String,Object> map = parser.map();
+ assert parser.currentToken() == XContentParser.Token.END_OBJECT;
+ parser.nextToken();
+
+ if (map.size() != 1) {
+ throw new RestTestParseException("expected key value pair but found " + map.size() + " ");
+ }
+
+ Map.Entry<String, Object> entry = map.entrySet().iterator().next();
+ return Tuple.tuple(entry.getKey(), entry.getValue());
+ }
+}
diff --git a/test/framework/src/main/java/org/elasticsearch/test/rest/parser/RestTestSuiteParser.java b/test/framework/src/main/java/org/elasticsearch/test/rest/parser/RestTestSuiteParser.java
new file mode 100644
index 0000000000..e8422887ad
--- /dev/null
+++ b/test/framework/src/main/java/org/elasticsearch/test/rest/parser/RestTestSuiteParser.java
@@ -0,0 +1,100 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.elasticsearch.test.rest.parser;
+
+import org.elasticsearch.common.xcontent.XContentParser;
+import org.elasticsearch.common.xcontent.yaml.YamlXContent;
+import org.elasticsearch.test.rest.section.RestTestSuite;
+import org.elasticsearch.test.rest.section.TestSection;
+
+import java.io.IOException;
+import java.nio.ByteBuffer;
+import java.nio.channels.FileChannel;
+import java.nio.file.Files;
+import java.nio.file.Path;
+import java.nio.file.StandardOpenOption;
+
+/**
+ * Parser for a complete test suite (yaml file)
+ */
+public class RestTestSuiteParser implements RestTestFragmentParser<RestTestSuite> {
+
+ public RestTestSuite parse(String api, Path file) throws IOException, RestTestParseException {
+
+ if (!Files.isRegularFile(file)) {
+ throw new IllegalArgumentException(file.toAbsolutePath() + " is not a file");
+ }
+
+ String filename = file.getFileName().toString();
+ //remove the file extension
+ int i = filename.lastIndexOf('.');
+ if (i > 0) {
+ filename = filename.substring(0, i);
+ }
+
+ //our yaml parser seems to be too tolerant. Each yaml suite must end with \n, otherwise clients tests might break.
+ try (FileChannel channel = FileChannel.open(file, StandardOpenOption.READ)) {
+ ByteBuffer bb = ByteBuffer.wrap(new byte[1]);
+ channel.read(bb, channel.size() - 1);
+ if (bb.get(0) != 10) {
+ throw new RestTestParseException("test suite [" + api + "/" + filename + "] doesn't end with line feed (\\n)");
+ }
+ }
+
+ XContentParser parser = YamlXContent.yamlXContent.createParser(Files.newInputStream(file));
+ try {
+ RestTestSuiteParseContext testParseContext = new RestTestSuiteParseContext(api, filename, parser);
+ return parse(testParseContext);
+ } catch(Exception e) {
+ throw new RestTestParseException("Error parsing " + api + "/" + filename, e);
+ } finally {
+ parser.close();
+ }
+ }
+
+ @Override
+ public RestTestSuite parse(RestTestSuiteParseContext parseContext) throws IOException, RestTestParseException {
+ XContentParser parser = parseContext.parser();
+
+ parser.nextToken();
+ assert parser.currentToken() == XContentParser.Token.START_OBJECT;
+
+ RestTestSuite restTestSuite = new RestTestSuite(parseContext.getApi(), parseContext.getSuiteName());
+
+ restTestSuite.setSetupSection(parseContext.parseSetupSection());
+
+ while(true) {
+ //the "---" section separator is not understood by the yaml parser. null is returned, same as when the parser is closed
+ //we need to somehow distinguish between a null in the middle of a test ("---")
+ // and a null at the end of the file (at least two consecutive null tokens)
+ if(parser.currentToken() == null) {
+ if (parser.nextToken() == null) {
+ break;
+ }
+ }
+
+ TestSection testSection = parseContext.parseTestSection();
+ if (!restTestSuite.addTestSection(testSection)) {
+ throw new RestTestParseException("duplicate test section [" + testSection.getName() + "] found in [" + restTestSuite.getPath() + "]");
+ }
+ }
+
+ return restTestSuite;
+ }
+}
diff --git a/test/framework/src/main/java/org/elasticsearch/test/rest/parser/SetSectionParser.java b/test/framework/src/main/java/org/elasticsearch/test/rest/parser/SetSectionParser.java
new file mode 100644
index 0000000000..8afafc09f7
--- /dev/null
+++ b/test/framework/src/main/java/org/elasticsearch/test/rest/parser/SetSectionParser.java
@@ -0,0 +1,57 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.elasticsearch.test.rest.parser;
+
+import org.elasticsearch.common.xcontent.XContentParser;
+import org.elasticsearch.test.rest.section.SetSection;
+
+import java.io.IOException;
+
+/**
+ * Parser for set sections
+ */
+public class SetSectionParser implements RestTestFragmentParser<SetSection> {
+
+ @Override
+ public SetSection parse(RestTestSuiteParseContext parseContext) throws IOException, RestTestParseException {
+
+ XContentParser parser = parseContext.parser();
+
+ String currentFieldName = null;
+ XContentParser.Token token;
+
+ SetSection setSection = new SetSection();
+
+ while ((token = parser.nextToken()) != XContentParser.Token.END_OBJECT) {
+ if (token == XContentParser.Token.FIELD_NAME) {
+ currentFieldName = parser.currentName();
+ } else if (token.isValue()) {
+ setSection.addSet(currentFieldName, parser.text());
+ }
+ }
+
+ parser.nextToken();
+
+ if (setSection.getStash().isEmpty()) {
+ throw new RestTestParseException("set section must set at least a value");
+ }
+
+ return setSection;
+ }
+}
diff --git a/test/framework/src/main/java/org/elasticsearch/test/rest/parser/SetupSectionParser.java b/test/framework/src/main/java/org/elasticsearch/test/rest/parser/SetupSectionParser.java
new file mode 100644
index 0000000000..2a2e39ea74
--- /dev/null
+++ b/test/framework/src/main/java/org/elasticsearch/test/rest/parser/SetupSectionParser.java
@@ -0,0 +1,54 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.elasticsearch.test.rest.parser;
+
+import org.elasticsearch.common.xcontent.XContentParser;
+import org.elasticsearch.test.rest.section.SetupSection;
+
+import java.io.IOException;
+
+/**
+ * Parser for setup sections
+ */
+public class SetupSectionParser implements RestTestFragmentParser<SetupSection> {
+
+ @Override
+ public SetupSection parse(RestTestSuiteParseContext parseContext) throws IOException, RestTestParseException {
+
+ XContentParser parser = parseContext.parser();
+
+ SetupSection setupSection = new SetupSection();
+ setupSection.setSkipSection(parseContext.parseSkipSection());
+
+ while (parser.currentToken() != XContentParser.Token.END_ARRAY) {
+ parseContext.advanceToFieldName();
+ if (!"do".equals(parser.currentName())) {
+ throw new RestTestParseException("section [" + parser.currentName() + "] not supported within setup section");
+ }
+
+ parser.nextToken();
+ setupSection.addDoSection(parseContext.parseDoSection());
+ parser.nextToken();
+ }
+
+ parser.nextToken();
+
+ return setupSection;
+ }
+}
diff --git a/test/framework/src/main/java/org/elasticsearch/test/rest/parser/SkipSectionParser.java b/test/framework/src/main/java/org/elasticsearch/test/rest/parser/SkipSectionParser.java
new file mode 100644
index 0000000000..3373382101
--- /dev/null
+++ b/test/framework/src/main/java/org/elasticsearch/test/rest/parser/SkipSectionParser.java
@@ -0,0 +1,83 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.elasticsearch.test.rest.parser;
+
+import org.elasticsearch.common.Strings;
+import org.elasticsearch.common.xcontent.XContentParser;
+import org.elasticsearch.test.rest.section.SkipSection;
+
+import java.io.IOException;
+import java.util.ArrayList;
+import java.util.List;
+
+/**
+ * Parser for skip sections
+ */
+public class SkipSectionParser implements RestTestFragmentParser<SkipSection> {
+
+ @Override
+ public SkipSection parse(RestTestSuiteParseContext parseContext) throws IOException, RestTestParseException {
+
+ XContentParser parser = parseContext.parser();
+
+ String currentFieldName = null;
+ XContentParser.Token token;
+ String version = null;
+ String reason = null;
+ List<String> features = new ArrayList<>();
+
+ while ((token = parser.nextToken()) != XContentParser.Token.END_OBJECT) {
+ if (token == XContentParser.Token.FIELD_NAME) {
+ currentFieldName = parser.currentName();
+ } else if (token.isValue()) {
+ if ("version".equals(currentFieldName)) {
+ version = parser.text();
+ } else if ("reason".equals(currentFieldName)) {
+ reason = parser.text();
+ } else if ("features".equals(currentFieldName)) {
+ features.add(parser.text());
+ }
+ else {
+ throw new RestTestParseException("field " + currentFieldName + " not supported within skip section");
+ }
+ } else if (token == XContentParser.Token.START_ARRAY) {
+ if ("features".equals(currentFieldName)) {
+ while(parser.nextToken() != XContentParser.Token.END_ARRAY) {
+ features.add(parser.text());
+ }
+ }
+ }
+ }
+
+ parser.nextToken();
+
+ if (!Strings.hasLength(version) && features.isEmpty()) {
+ throw new RestTestParseException("version or features is mandatory within skip section");
+ }
+ if (Strings.hasLength(version) && !features.isEmpty()) {
+ throw new RestTestParseException("version or features are mutually exclusive");
+ }
+ if (Strings.hasLength(version) && !Strings.hasLength(reason)) {
+ throw new RestTestParseException("reason is mandatory within skip version section");
+ }
+
+ return new SkipSection(version, features, reason);
+
+ }
+}
diff --git a/test/framework/src/main/java/org/elasticsearch/test/rest/section/ApiCallSection.java b/test/framework/src/main/java/org/elasticsearch/test/rest/section/ApiCallSection.java
new file mode 100644
index 0000000000..030469148e
--- /dev/null
+++ b/test/framework/src/main/java/org/elasticsearch/test/rest/section/ApiCallSection.java
@@ -0,0 +1,83 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.elasticsearch.test.rest.section;
+
+import java.util.ArrayList;
+import java.util.Collections;
+import java.util.HashMap;
+import java.util.List;
+import java.util.Map;
+
+import static java.util.Collections.unmodifiableMap;
+
+/**
+ * Represents a test fragment that contains the information needed to call an api
+ */
+public class ApiCallSection {
+
+ private final String api;
+ private final Map<String, String> params = new HashMap<>();
+ private final Map<String, String> headers = new HashMap<>();
+ private final List<Map<String, Object>> bodies = new ArrayList<>();
+
+ public ApiCallSection(String api) {
+ this.api = api;
+ }
+
+ public String getApi() {
+ return api;
+ }
+
+ public Map<String, String> getParams() {
+ //make sure we never modify the parameters once returned
+ return unmodifiableMap(params);
+ }
+
+ public void addParam(String key, String value) {
+ String existingValue = params.get(key);
+ if (existingValue != null) {
+ value = existingValue + "," + value;
+ }
+ this.params.put(key, value);
+ }
+
+ public void addHeaders(Map<String, String> otherHeaders) {
+ this.headers.putAll(otherHeaders);
+ }
+
+ public void addHeader(String key, String value) {
+ this.headers.put(key, value);
+ }
+
+ public Map<String, String> getHeaders() {
+ return unmodifiableMap(headers);
+ }
+
+ public List<Map<String, Object>> getBodies() {
+ return Collections.unmodifiableList(bodies);
+ }
+
+ public void addBody(Map<String, Object> body) {
+ this.bodies.add(body);
+ }
+
+ public boolean hasBody() {
+ return bodies.size() > 0;
+ }
+}
diff --git a/test/framework/src/main/java/org/elasticsearch/test/rest/section/Assertion.java b/test/framework/src/main/java/org/elasticsearch/test/rest/section/Assertion.java
new file mode 100644
index 0000000000..c420309f20
--- /dev/null
+++ b/test/framework/src/main/java/org/elasticsearch/test/rest/section/Assertion.java
@@ -0,0 +1,83 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.elasticsearch.test.rest.section;
+
+import org.elasticsearch.test.rest.RestTestExecutionContext;
+
+import java.io.IOException;
+import java.util.Map;
+
+/**
+ * Base class for executable sections that hold assertions
+ */
+public abstract class Assertion implements ExecutableSection {
+
+ private final String field;
+ private final Object expectedValue;
+
+ protected Assertion(String field, Object expectedValue) {
+ this.field = field;
+ this.expectedValue = expectedValue;
+ }
+
+ public final String getField() {
+ return field;
+ }
+
+ public final Object getExpectedValue() {
+ return expectedValue;
+ }
+
+ protected final Object resolveExpectedValue(RestTestExecutionContext executionContext) throws IOException {
+ if (expectedValue instanceof Map) {
+ @SuppressWarnings("unchecked")
+ Map<String, Object> map = (Map<String, Object>) expectedValue;
+ return executionContext.stash().unstashMap(map);
+ }
+
+ if (executionContext.stash().isStashedValue(expectedValue)) {
+ return executionContext.stash().unstashValue(expectedValue.toString());
+ }
+ return expectedValue;
+ }
+
+ protected final Object getActualValue(RestTestExecutionContext executionContext) throws IOException {
+ if (executionContext.stash().isStashedValue(field)) {
+ return executionContext.stash().unstashValue(field);
+ }
+ return executionContext.response(field);
+ }
+
+ @Override
+ public final void execute(RestTestExecutionContext executionContext) throws IOException {
+ doAssert(getActualValue(executionContext), resolveExpectedValue(executionContext));
+ }
+
+ /**
+ * Executes the assertion comparing the actual value (parsed from the response) with the expected one
+ */
+ protected abstract void doAssert(Object actualValue, Object expectedValue);
+
+ /**
+ * a utility to get the class of an object, protecting for null (i.e., returning null if the input is null)
+ */
+ protected Class<?> safeClass(Object o) {
+ return o == null ? null : o.getClass();
+ }
+}
diff --git a/test/framework/src/main/java/org/elasticsearch/test/rest/section/DoSection.java b/test/framework/src/main/java/org/elasticsearch/test/rest/section/DoSection.java
new file mode 100644
index 0000000000..38504c4af5
--- /dev/null
+++ b/test/framework/src/main/java/org/elasticsearch/test/rest/section/DoSection.java
@@ -0,0 +1,145 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.elasticsearch.test.rest.section;
+
+import org.elasticsearch.common.Strings;
+import org.elasticsearch.common.collect.Tuple;
+import org.elasticsearch.common.logging.ESLogger;
+import org.elasticsearch.common.logging.Loggers;
+import org.elasticsearch.test.rest.RestTestExecutionContext;
+import org.elasticsearch.test.rest.client.RestException;
+import org.elasticsearch.test.rest.client.RestResponse;
+
+import java.io.IOException;
+import java.util.HashMap;
+import java.util.Map;
+
+import static org.elasticsearch.common.collect.Tuple.tuple;
+import static org.elasticsearch.test.hamcrest.RegexMatcher.matches;
+import static org.hamcrest.Matchers.allOf;
+import static org.hamcrest.Matchers.equalTo;
+import static org.hamcrest.Matchers.greaterThanOrEqualTo;
+import static org.hamcrest.Matchers.not;
+import static org.hamcrest.Matchers.notNullValue;
+import static org.junit.Assert.assertThat;
+import static org.junit.Assert.fail;
+
+/**
+ * Represents a do section:
+ *
+ * - do:
+ * catch: missing
+ * headers:
+ * Authorization: Basic user:pass
+ * Content-Type: application/json
+ * update:
+ * index: test_1
+ * type: test
+ * id: 1
+ * body: { doc: { foo: bar } }
+ *
+ */
+public class DoSection implements ExecutableSection {
+
+ private static final ESLogger logger = Loggers.getLogger(DoSection.class);
+
+ private String catchParam;
+ private ApiCallSection apiCallSection;
+
+ public String getCatch() {
+ return catchParam;
+ }
+
+ public void setCatch(String catchParam) {
+ this.catchParam = catchParam;
+ }
+
+ public ApiCallSection getApiCallSection() {
+ return apiCallSection;
+ }
+
+ public void setApiCallSection(ApiCallSection apiCallSection) {
+ this.apiCallSection = apiCallSection;
+ }
+
+ @Override
+ public void execute(RestTestExecutionContext executionContext) throws IOException {
+
+ if ("param".equals(catchParam)) {
+ //client should throw validation error before sending request
+ //lets just return without doing anything as we don't have any client to test here
+ logger.info("found [catch: param], no request sent");
+ return;
+ }
+
+ try {
+ RestResponse restResponse = executionContext.callApi(apiCallSection.getApi(), apiCallSection.getParams(),
+ apiCallSection.getBodies(), apiCallSection.getHeaders());
+ if (Strings.hasLength(catchParam)) {
+ String catchStatusCode;
+ if (catches.containsKey(catchParam)) {
+ catchStatusCode = catches.get(catchParam).v1();
+ } else if (catchParam.startsWith("/") && catchParam.endsWith("/")) {
+ catchStatusCode = "4xx|5xx";
+ } else {
+ throw new UnsupportedOperationException("catch value [" + catchParam + "] not supported");
+ }
+ fail(formatStatusCodeMessage(restResponse, catchStatusCode));
+ }
+ } catch(RestException e) {
+ if (!Strings.hasLength(catchParam)) {
+ fail(formatStatusCodeMessage(e.restResponse(), "2xx"));
+ } else if (catches.containsKey(catchParam)) {
+ assertStatusCode(e.restResponse());
+ } else if (catchParam.length() > 2 && catchParam.startsWith("/") && catchParam.endsWith("/")) {
+ //the text of the error message matches regular expression
+ assertThat(formatStatusCodeMessage(e.restResponse(), "4xx|5xx"), e.statusCode(), greaterThanOrEqualTo(400));
+ Object error = executionContext.response("error");
+ assertThat("error was expected in the response", error, notNullValue());
+ //remove delimiters from regex
+ String regex = catchParam.substring(1, catchParam.length() - 1);
+ assertThat("the error message was expected to match the provided regex but didn't",
+ error.toString(), matches(regex));
+ } else {
+ throw new UnsupportedOperationException("catch value [" + catchParam + "] not supported");
+ }
+ }
+ }
+
+ private void assertStatusCode(RestResponse restResponse) {
+ Tuple<String, org.hamcrest.Matcher<Integer>> stringMatcherTuple = catches.get(catchParam);
+ assertThat(formatStatusCodeMessage(restResponse, stringMatcherTuple.v1()),
+ restResponse.getStatusCode(), stringMatcherTuple.v2());
+ }
+
+ private String formatStatusCodeMessage(RestResponse restResponse, String expected) {
+ return "expected [" + expected + "] status code but api [" + apiCallSection.getApi() + "] returned ["
+ + restResponse.getStatusCode() + " " + restResponse.getReasonPhrase() + "] [" + restResponse.getBodyAsString() + "]";
+ }
+
+ private static Map<String, Tuple<String, org.hamcrest.Matcher<Integer>>> catches = new HashMap<>();
+
+ static {
+ catches.put("missing", tuple("404", equalTo(404)));
+ catches.put("conflict", tuple("409", equalTo(409)));
+ catches.put("forbidden", tuple("403", equalTo(403)));
+ catches.put("request_timeout", tuple("408", equalTo(408)));
+ catches.put("request", tuple("4xx|5xx", allOf(greaterThanOrEqualTo(400), not(equalTo(404)), not(equalTo(408)), not(equalTo(409)), not(equalTo(403)))));
+ }
+}
diff --git a/test/framework/src/main/java/org/elasticsearch/test/rest/section/ExecutableSection.java b/test/framework/src/main/java/org/elasticsearch/test/rest/section/ExecutableSection.java
new file mode 100644
index 0000000000..669d82cdd7
--- /dev/null
+++ b/test/framework/src/main/java/org/elasticsearch/test/rest/section/ExecutableSection.java
@@ -0,0 +1,34 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.elasticsearch.test.rest.section;
+
+import org.elasticsearch.test.rest.RestTestExecutionContext;
+
+import java.io.IOException;
+
+/**
+ * Represents a test fragment that can be executed (e.g. api call, assertion)
+ */
+public interface ExecutableSection {
+
+ /**
+ * Executes the section passing in the execution context
+ */
+ void execute(RestTestExecutionContext executionContext) throws IOException;
+}
diff --git a/test/framework/src/main/java/org/elasticsearch/test/rest/section/GreaterThanAssertion.java b/test/framework/src/main/java/org/elasticsearch/test/rest/section/GreaterThanAssertion.java
new file mode 100644
index 0000000000..ade7fbd59c
--- /dev/null
+++ b/test/framework/src/main/java/org/elasticsearch/test/rest/section/GreaterThanAssertion.java
@@ -0,0 +1,58 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.elasticsearch.test.rest.section;
+
+import org.elasticsearch.common.logging.ESLogger;
+import org.elasticsearch.common.logging.Loggers;
+
+import static org.hamcrest.Matchers.greaterThan;
+import static org.hamcrest.Matchers.instanceOf;
+import static org.junit.Assert.assertThat;
+import static org.junit.Assert.fail;
+
+/**
+ * Represents a gt assert section:
+ * <p>
+ * - gt: { fields._ttl: 0}
+ */
+public class GreaterThanAssertion extends Assertion {
+
+ private static final ESLogger logger = Loggers.getLogger(GreaterThanAssertion.class);
+
+ public GreaterThanAssertion(String field, Object expectedValue) {
+ super(field, expectedValue);
+ }
+
+ @Override
+ @SuppressWarnings("unchecked")
+ protected void doAssert(Object actualValue, Object expectedValue) {
+ logger.trace("assert that [{}] is greater than [{}] (field: [{}])", actualValue, expectedValue, getField());
+ assertThat("value of [" + getField() + "] is not comparable (got [" + safeClass(actualValue) + "])", actualValue, instanceOf(Comparable.class));
+ assertThat("expected value of [" + getField() + "] is not comparable (got [" + expectedValue.getClass() + "])", expectedValue, instanceOf(Comparable.class));
+ try {
+ assertThat(errorMessage(), (Comparable) actualValue, greaterThan((Comparable) expectedValue));
+ } catch (ClassCastException e) {
+ fail("cast error while checking (" + errorMessage() + "): " + e);
+ }
+ }
+
+ private String errorMessage() {
+ return "field [" + getField() + "] is not greater than [" + getExpectedValue() + "]";
+ }
+}
diff --git a/test/framework/src/main/java/org/elasticsearch/test/rest/section/GreaterThanEqualToAssertion.java b/test/framework/src/main/java/org/elasticsearch/test/rest/section/GreaterThanEqualToAssertion.java
new file mode 100644
index 0000000000..cfdca7bc33
--- /dev/null
+++ b/test/framework/src/main/java/org/elasticsearch/test/rest/section/GreaterThanEqualToAssertion.java
@@ -0,0 +1,58 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.test.rest.section;
+
+import org.elasticsearch.common.logging.ESLogger;
+import org.elasticsearch.common.logging.Loggers;
+
+import static org.hamcrest.Matchers.greaterThanOrEqualTo;
+import static org.hamcrest.Matchers.instanceOf;
+import static org.junit.Assert.assertThat;
+import static org.junit.Assert.fail;
+
+/**
+ * Represents a gte assert section:
+ *
+ * - gte: { fields._ttl: 0 }
+ */
+public class GreaterThanEqualToAssertion extends Assertion {
+
+ private static final ESLogger logger = Loggers.getLogger(GreaterThanEqualToAssertion.class);
+
+ public GreaterThanEqualToAssertion(String field, Object expectedValue) {
+ super(field, expectedValue);
+ }
+
+ @Override
+ protected void doAssert(Object actualValue, Object expectedValue) {
+ logger.trace("assert that [{}] is greater than or equal to [{}] (field: [{}])", actualValue, expectedValue, getField());
+ assertThat("value of [" + getField() + "] is not comparable (got [" + safeClass(actualValue) + "])", actualValue, instanceOf(Comparable.class));
+ assertThat("expected value of [" + getField() + "] is not comparable (got [" + expectedValue.getClass() + "])", expectedValue, instanceOf(Comparable.class));
+ try {
+ assertThat(errorMessage(), (Comparable) actualValue, greaterThanOrEqualTo((Comparable) expectedValue));
+ } catch (ClassCastException e) {
+ fail("cast error while checking (" + errorMessage() + "): " + e);
+ }
+ }
+
+ private String errorMessage() {
+ return "field [" + getField() + "] is not greater than or equal to [" + getExpectedValue() + "]";
+ }
+}
diff --git a/test/framework/src/main/java/org/elasticsearch/test/rest/section/IsFalseAssertion.java b/test/framework/src/main/java/org/elasticsearch/test/rest/section/IsFalseAssertion.java
new file mode 100644
index 0000000000..9f3a8b6df9
--- /dev/null
+++ b/test/framework/src/main/java/org/elasticsearch/test/rest/section/IsFalseAssertion.java
@@ -0,0 +1,61 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.elasticsearch.test.rest.section;
+
+import org.elasticsearch.common.logging.ESLogger;
+import org.elasticsearch.common.logging.Loggers;
+
+import static org.hamcrest.Matchers.*;
+import static org.junit.Assert.assertThat;
+
+/**
+ * Represents an is_false assert section:
+ *
+ * - is_false: get.fields.bar
+ *
+ */
+public class IsFalseAssertion extends Assertion {
+
+ private static final ESLogger logger = Loggers.getLogger(IsFalseAssertion.class);
+
+ public IsFalseAssertion(String field) {
+ super(field, false);
+ }
+
+ @Override
+ @SuppressWarnings("unchecked")
+ protected void doAssert(Object actualValue, Object expectedValue) {
+ logger.trace("assert that [{}] doesn't have a true value (field: [{}])", actualValue, getField());
+
+ if (actualValue == null) {
+ return;
+ }
+
+ String actualString = actualValue.toString();
+ assertThat(errorMessage(), actualString, anyOf(
+ equalTo(""),
+ equalToIgnoringCase(Boolean.FALSE.toString()),
+ equalTo("0")
+ ));
+ }
+
+ private String errorMessage() {
+ return "field [" + getField() + "] has a true value but it shouldn't";
+ }
+}
diff --git a/test/framework/src/main/java/org/elasticsearch/test/rest/section/IsTrueAssertion.java b/test/framework/src/main/java/org/elasticsearch/test/rest/section/IsTrueAssertion.java
new file mode 100644
index 0000000000..aacb5f0a3b
--- /dev/null
+++ b/test/framework/src/main/java/org/elasticsearch/test/rest/section/IsTrueAssertion.java
@@ -0,0 +1,55 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.elasticsearch.test.rest.section;
+
+import org.elasticsearch.common.logging.ESLogger;
+import org.elasticsearch.common.logging.Loggers;
+
+import static org.hamcrest.Matchers.*;
+import static org.junit.Assert.assertThat;
+
+/**
+ * Represents an is_true assert section:
+ *
+ * - is_true: get.fields.bar
+ *
+ */
+public class IsTrueAssertion extends Assertion {
+
+ private static final ESLogger logger = Loggers.getLogger(IsTrueAssertion.class);
+
+ public IsTrueAssertion(String field) {
+ super(field, true);
+ }
+
+ @Override
+ protected void doAssert(Object actualValue, Object expectedValue) {
+ logger.trace("assert that [{}] has a true value (field [{}])", actualValue, getField());
+ String errorMessage = errorMessage();
+ assertThat(errorMessage, actualValue, notNullValue());
+ String actualString = actualValue.toString();
+ assertThat(errorMessage, actualString, not(equalTo("")));
+ assertThat(errorMessage, actualString, not(equalToIgnoringCase(Boolean.FALSE.toString())));
+ assertThat(errorMessage, actualString, not(equalTo("0")));
+ }
+
+ private String errorMessage() {
+ return "field [" + getField() + "] doesn't have a true value";
+ }
+}
diff --git a/test/framework/src/main/java/org/elasticsearch/test/rest/section/LengthAssertion.java b/test/framework/src/main/java/org/elasticsearch/test/rest/section/LengthAssertion.java
new file mode 100644
index 0000000000..265487a038
--- /dev/null
+++ b/test/framework/src/main/java/org/elasticsearch/test/rest/section/LengthAssertion.java
@@ -0,0 +1,63 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.elasticsearch.test.rest.section;
+
+import org.elasticsearch.common.logging.ESLogger;
+import org.elasticsearch.common.logging.Loggers;
+
+import java.util.List;
+import java.util.Map;
+
+import static org.hamcrest.Matchers.equalTo;
+import static org.hamcrest.Matchers.instanceOf;
+import static org.junit.Assert.assertThat;
+
+/**
+ * Represents a length assert section:
+ * <p>
+ * - length: { hits.hits: 1 }
+ */
+public class LengthAssertion extends Assertion {
+
+ private static final ESLogger logger = Loggers.getLogger(LengthAssertion.class);
+
+ public LengthAssertion(String field, Object expectedValue) {
+ super(field, expectedValue);
+ }
+
+ @Override
+ protected void doAssert(Object actualValue, Object expectedValue) {
+ logger.trace("assert that [{}] has length [{}] (field: [{}])", actualValue, expectedValue, getField());
+ assertThat("expected value of [" + getField() + "] is not numeric (got [" + expectedValue.getClass() + "]", expectedValue, instanceOf(Number.class));
+ int length = ((Number) expectedValue).intValue();
+ if (actualValue instanceof String) {
+ assertThat(errorMessage(), ((String) actualValue).length(), equalTo(length));
+ } else if (actualValue instanceof List) {
+ assertThat(errorMessage(), ((List) actualValue).size(), equalTo(length));
+ } else if (actualValue instanceof Map) {
+ assertThat(errorMessage(), ((Map) actualValue).keySet().size(), equalTo(length));
+ } else {
+ throw new UnsupportedOperationException("value is of unsupported type [" + safeClass(actualValue) + "]");
+ }
+ }
+
+ private String errorMessage() {
+ return "field [" + getField() + "] doesn't have length [" + getExpectedValue() + "]";
+ }
+}
diff --git a/test/framework/src/main/java/org/elasticsearch/test/rest/section/LessThanAssertion.java b/test/framework/src/main/java/org/elasticsearch/test/rest/section/LessThanAssertion.java
new file mode 100644
index 0000000000..89387ff895
--- /dev/null
+++ b/test/framework/src/main/java/org/elasticsearch/test/rest/section/LessThanAssertion.java
@@ -0,0 +1,59 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.elasticsearch.test.rest.section;
+
+import org.elasticsearch.common.logging.ESLogger;
+import org.elasticsearch.common.logging.Loggers;
+
+import static org.hamcrest.Matchers.instanceOf;
+import static org.hamcrest.Matchers.lessThan;
+import static org.junit.Assert.assertThat;
+import static org.junit.Assert.fail;
+
+/**
+ * Represents a lt assert section:
+ *
+ * - lt: { fields._ttl: 20000}
+ *
+ */
+public class LessThanAssertion extends Assertion {
+
+ private static final ESLogger logger = Loggers.getLogger(LessThanAssertion.class);
+
+ public LessThanAssertion(String field, Object expectedValue) {
+ super(field, expectedValue);
+ }
+
+ @Override
+ @SuppressWarnings("unchecked")
+ protected void doAssert(Object actualValue, Object expectedValue) {
+ logger.trace("assert that [{}] is less than [{}] (field: [{}])", actualValue, expectedValue, getField());
+ assertThat("value of [" + getField() + "] is not comparable (got [" + safeClass(actualValue) + "])", actualValue, instanceOf(Comparable.class));
+ assertThat("expected value of [" + getField() + "] is not comparable (got [" + expectedValue.getClass() + "])", expectedValue, instanceOf(Comparable.class));
+ try {
+ assertThat(errorMessage(), (Comparable) actualValue, lessThan((Comparable) expectedValue));
+ } catch (ClassCastException e) {
+ fail("cast error while checking (" + errorMessage() + "): " + e);
+ }
+ }
+
+ private String errorMessage() {
+ return "field [" + getField() + "] is not less than [" + getExpectedValue() + "]";
+ }
+}
diff --git a/test/framework/src/main/java/org/elasticsearch/test/rest/section/LessThanOrEqualToAssertion.java b/test/framework/src/main/java/org/elasticsearch/test/rest/section/LessThanOrEqualToAssertion.java
new file mode 100644
index 0000000000..99cbf1155d
--- /dev/null
+++ b/test/framework/src/main/java/org/elasticsearch/test/rest/section/LessThanOrEqualToAssertion.java
@@ -0,0 +1,58 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.test.rest.section;
+
+import org.elasticsearch.common.logging.ESLogger;
+import org.elasticsearch.common.logging.Loggers;
+
+import static org.hamcrest.Matchers.instanceOf;
+import static org.hamcrest.Matchers.lessThanOrEqualTo;
+import static org.junit.Assert.assertThat;
+import static org.junit.Assert.fail;
+
+/**
+ * Represents a lte assert section:
+ *
+ * - lte: { fields._ttl: 0 }
+ */
+public class LessThanOrEqualToAssertion extends Assertion {
+
+ private static final ESLogger logger = Loggers.getLogger(LessThanOrEqualToAssertion.class);
+
+ public LessThanOrEqualToAssertion(String field, Object expectedValue) {
+ super(field, expectedValue);
+ }
+
+ @Override
+ protected void doAssert(Object actualValue, Object expectedValue) {
+ logger.trace("assert that [{}] is less than or equal to [{}] (field: [{}])", actualValue, expectedValue, getField());
+ assertThat("value of [" + getField() + "] is not comparable (got [" + safeClass(actualValue) + "])", actualValue, instanceOf(Comparable.class));
+ assertThat("expected value of [" + getField() + "] is not comparable (got [" + expectedValue.getClass() + "])", expectedValue, instanceOf(Comparable.class));
+ try {
+ assertThat(errorMessage(), (Comparable) actualValue, lessThanOrEqualTo((Comparable) expectedValue));
+ } catch (ClassCastException e) {
+ fail("cast error while checking (" + errorMessage() + "): " + e);
+ }
+ }
+
+ private String errorMessage() {
+ return "field [" + getField() + "] is not less than or equal to [" + getExpectedValue() + "]";
+ }
+}
diff --git a/test/framework/src/main/java/org/elasticsearch/test/rest/section/MatchAssertion.java b/test/framework/src/main/java/org/elasticsearch/test/rest/section/MatchAssertion.java
new file mode 100644
index 0000000000..16efcae96c
--- /dev/null
+++ b/test/framework/src/main/java/org/elasticsearch/test/rest/section/MatchAssertion.java
@@ -0,0 +1,77 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.elasticsearch.test.rest.section;
+
+import org.elasticsearch.common.logging.ESLogger;
+import org.elasticsearch.common.logging.Loggers;
+
+import java.util.regex.Pattern;
+
+import static org.elasticsearch.test.hamcrest.RegexMatcher.matches;
+import static org.hamcrest.Matchers.*;
+import static org.junit.Assert.assertThat;
+
+/**
+ * Represents a match assert section:
+ *
+ * - match: { get.fields._routing: "5" }
+ *
+ */
+public class MatchAssertion extends Assertion {
+
+ private static final ESLogger logger = Loggers.getLogger(MatchAssertion.class);
+
+ public MatchAssertion(String field, Object expectedValue) {
+ super(field, expectedValue);
+ }
+
+ @Override
+ protected void doAssert(Object actualValue, Object expectedValue) {
+
+ //if the value is wrapped into / it is a regexp (e.g. /s+d+/)
+ if (expectedValue instanceof String) {
+ String expValue = ((String) expectedValue).trim();
+ if (expValue.length() > 2 && expValue.startsWith("/") && expValue.endsWith("/")) {
+ assertThat("field [" + getField() + "] was expected to be of type String but is an instanceof [" + safeClass(actualValue) + "]", actualValue, instanceOf(String.class));
+ String stringValue = (String) actualValue;
+ String regex = expValue.substring(1, expValue.length() - 1);
+ logger.trace("assert that [{}] matches [{}]", stringValue, regex);
+ assertThat("field [" + getField() + "] was expected to match the provided regex but didn't",
+ stringValue, matches(regex, Pattern.COMMENTS));
+ return;
+ }
+ }
+
+ assertThat(errorMessage(), actualValue, notNullValue());
+ logger.trace("assert that [{}] matches [{}] (field [{}])", actualValue, expectedValue, getField());
+ if (!actualValue.getClass().equals(safeClass(expectedValue))) {
+ if (actualValue instanceof Number && expectedValue instanceof Number) {
+ //Double 1.0 is equal to Integer 1
+ assertThat(errorMessage(), ((Number) actualValue).doubleValue(), equalTo(((Number) expectedValue).doubleValue()));
+ return;
+ }
+ }
+
+ assertThat(errorMessage(), actualValue, equalTo(expectedValue));
+ }
+
+ private String errorMessage() {
+ return "field [" + getField() + "] doesn't match the expected value";
+ }
+}
diff --git a/test/framework/src/main/java/org/elasticsearch/test/rest/section/RestTestSuite.java b/test/framework/src/main/java/org/elasticsearch/test/rest/section/RestTestSuite.java
new file mode 100644
index 0000000000..d53671bc6b
--- /dev/null
+++ b/test/framework/src/main/java/org/elasticsearch/test/rest/section/RestTestSuite.java
@@ -0,0 +1,75 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.elasticsearch.test.rest.section;
+
+import java.util.ArrayList;
+import java.util.List;
+import java.util.Set;
+import java.util.TreeSet;
+
+/**
+ * Holds a REST test suite loaded from a specific yaml file.
+ * Supports a setup section and multiple test sections.
+ */
+public class RestTestSuite {
+
+ private final String api;
+ private final String name;
+
+ private SetupSection setupSection;
+
+ private Set<TestSection> testSections = new TreeSet<>();
+
+ public RestTestSuite(String api, String name) {
+ this.api = api;
+ this.name = name;
+ }
+
+ public String getApi() {
+ return api;
+ }
+
+ public String getName() {
+ return name;
+ }
+
+ public String getPath() {
+ return api + "/" + name;
+ }
+
+ public SetupSection getSetupSection() {
+ return setupSection;
+ }
+
+ public void setSetupSection(SetupSection setupSection) {
+ this.setupSection = setupSection;
+ }
+
+ /**
+ * Adds a {@link org.elasticsearch.test.rest.section.TestSection} to the REST suite
+ * @return true if the test section was not already present, false otherwise
+ */
+ public boolean addTestSection(TestSection testSection) {
+ return this.testSections.add(testSection);
+ }
+
+ public List<TestSection> getTestSections() {
+ return new ArrayList<>(testSections);
+ }
+}
diff --git a/test/framework/src/main/java/org/elasticsearch/test/rest/section/SetSection.java b/test/framework/src/main/java/org/elasticsearch/test/rest/section/SetSection.java
new file mode 100644
index 0000000000..940664b4ee
--- /dev/null
+++ b/test/framework/src/main/java/org/elasticsearch/test/rest/section/SetSection.java
@@ -0,0 +1,52 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.elasticsearch.test.rest.section;
+
+import org.elasticsearch.test.rest.RestTestExecutionContext;
+
+import java.io.IOException;
+import java.util.HashMap;
+import java.util.Map;
+
+/**
+ * Represents a set section:
+ *
+ * - set: {_scroll_id: scroll_id}
+ *
+ */
+public class SetSection implements ExecutableSection {
+
+ private Map<String, String> stash = new HashMap<>();
+
+ public void addSet(String responseField, String stashedField) {
+ stash.put(responseField, stashedField);
+ }
+
+ public Map<String, String> getStash() {
+ return stash;
+ }
+
+ @Override
+ public void execute(RestTestExecutionContext executionContext) throws IOException {
+ for (Map.Entry<String, String> entry : stash.entrySet()) {
+ Object actualValue = executionContext.response(entry.getKey());
+ executionContext.stash().stashValue(entry.getValue(), actualValue);
+ }
+ }
+}
diff --git a/test/framework/src/main/java/org/elasticsearch/test/rest/section/SetupSection.java b/test/framework/src/main/java/org/elasticsearch/test/rest/section/SetupSection.java
new file mode 100644
index 0000000000..45c66fbad4
--- /dev/null
+++ b/test/framework/src/main/java/org/elasticsearch/test/rest/section/SetupSection.java
@@ -0,0 +1,59 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.elasticsearch.test.rest.section;
+
+import java.util.ArrayList;
+import java.util.List;
+
+/**
+ * Represents a setup section. Holds a skip section and multiple do sections.
+ */
+public class SetupSection {
+
+ public static final SetupSection EMPTY;
+
+ static {
+ EMPTY = new SetupSection();
+ EMPTY.setSkipSection(SkipSection.EMPTY);
+ }
+
+ private SkipSection skipSection;
+
+ private List<DoSection> doSections = new ArrayList<>();
+
+ public SkipSection getSkipSection() {
+ return skipSection;
+ }
+
+ public void setSkipSection(SkipSection skipSection) {
+ this.skipSection = skipSection;
+ }
+
+ public List<DoSection> getDoSections() {
+ return doSections;
+ }
+
+ public void addDoSection(DoSection doSection) {
+ this.doSections.add(doSection);
+ }
+
+ public boolean isEmpty() {
+ return EMPTY.equals(this);
+ }
+}
diff --git a/test/framework/src/main/java/org/elasticsearch/test/rest/section/SkipSection.java b/test/framework/src/main/java/org/elasticsearch/test/rest/section/SkipSection.java
new file mode 100644
index 0000000000..179d0a1e86
--- /dev/null
+++ b/test/framework/src/main/java/org/elasticsearch/test/rest/section/SkipSection.java
@@ -0,0 +1,114 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.elasticsearch.test.rest.section;
+
+import org.elasticsearch.Version;
+import org.elasticsearch.test.VersionUtils;
+import org.elasticsearch.test.rest.support.Features;
+
+import java.util.ArrayList;
+import java.util.List;
+
+/**
+ * Represents a skip section that tells whether a specific test section or suite needs to be skipped
+ * based on:
+ * - the elasticsearch version the tests are running against
+ * - a specific test feature required that might not be implemented yet by the runner
+ */
+public class SkipSection {
+
+ public static final SkipSection EMPTY = new SkipSection();
+
+ private final Version lowerVersion;
+ private final Version upperVersion;
+ private final List<String> features;
+ private final String reason;
+
+ private SkipSection() {
+ this.lowerVersion = null;
+ this.upperVersion = null;
+ this.features = new ArrayList<>();
+ this.reason = null;
+ }
+
+ public SkipSection(String versionRange, List<String> features, String reason) {
+ assert features != null;
+ assert versionRange != null && features.isEmpty() || versionRange == null && features.isEmpty() == false;
+ Version[] versions = parseVersionRange(versionRange);
+ this.lowerVersion = versions[0];
+ this.upperVersion = versions[1];
+ this.features = features;
+ this.reason = reason;
+ }
+
+ public Version getLowerVersion() {
+ return lowerVersion;
+ }
+
+ public Version getUpperVersion() {
+ return upperVersion;
+ }
+
+ public List<String> getFeatures() {
+ return features;
+ }
+
+ public String getReason() {
+ return reason;
+ }
+
+ public boolean skip(Version currentVersion) {
+ if (isEmpty()) {
+ return false;
+ }
+ if (isVersionCheck()) {
+ return currentVersion.onOrAfter(lowerVersion) && currentVersion.onOrBefore(upperVersion);
+ } else {
+ return Features.areAllSupported(features) == false;
+ }
+ }
+
+ public boolean isVersionCheck() {
+ return features.isEmpty();
+ }
+
+ public boolean isEmpty() {
+ return EMPTY.equals(this);
+ }
+
+ private Version[] parseVersionRange(String versionRange) {
+ if (versionRange == null) {
+ return new Version[] { null, null };
+ }
+ if (versionRange.trim().equals("all")) {
+ return new Version[]{VersionUtils.getFirstVersion(), Version.CURRENT};
+ }
+ String[] skipVersions = versionRange.split("-");
+ if (skipVersions.length > 2) {
+ throw new IllegalArgumentException("version range malformed: " + versionRange);
+ }
+
+ String lower = skipVersions[0].trim();
+ String upper = skipVersions[1].trim();
+ return new Version[] {
+ lower.isEmpty() ? VersionUtils.getFirstVersion() : Version.fromString(lower),
+ upper.isEmpty() ? Version.CURRENT : Version.fromString(upper)
+ };
+ }
+}
diff --git a/test/framework/src/main/java/org/elasticsearch/test/rest/section/TestSection.java b/test/framework/src/main/java/org/elasticsearch/test/rest/section/TestSection.java
new file mode 100644
index 0000000000..3f44e5ce76
--- /dev/null
+++ b/test/framework/src/main/java/org/elasticsearch/test/rest/section/TestSection.java
@@ -0,0 +1,78 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.elasticsearch.test.rest.section;
+
+import java.util.ArrayList;
+import java.util.List;
+
+/**
+ * Represents a test section, which is composed of a skip section and multiple executable sections.
+ */
+public class TestSection implements Comparable<TestSection> {
+ private final String name;
+ private SkipSection skipSection;
+ private final List<ExecutableSection> executableSections;
+
+ public TestSection(String name) {
+ this.name = name;
+ this.executableSections = new ArrayList<>();
+ }
+
+ public String getName() {
+ return name;
+ }
+
+ public SkipSection getSkipSection() {
+ return skipSection;
+ }
+
+ public void setSkipSection(SkipSection skipSection) {
+ this.skipSection = skipSection;
+ }
+
+ public List<ExecutableSection> getExecutableSections() {
+ return executableSections;
+ }
+
+ public void addExecutableSection(ExecutableSection executableSection) {
+ this.executableSections.add(executableSection);
+ }
+
+ @Override
+ public boolean equals(Object o) {
+ if (this == o) return true;
+ if (o == null || getClass() != o.getClass()) return false;
+
+ TestSection that = (TestSection) o;
+
+ if (name != null ? !name.equals(that.name) : that.name != null) return false;
+
+ return true;
+ }
+
+ @Override
+ public int hashCode() {
+ return name != null ? name.hashCode() : 0;
+ }
+
+ @Override
+ public int compareTo(TestSection o) {
+ return name.compareTo(o.getName());
+ }
+}
diff --git a/test/framework/src/main/java/org/elasticsearch/test/rest/spec/RestApi.java b/test/framework/src/main/java/org/elasticsearch/test/rest/spec/RestApi.java
new file mode 100644
index 0000000000..60c39b6441
--- /dev/null
+++ b/test/framework/src/main/java/org/elasticsearch/test/rest/spec/RestApi.java
@@ -0,0 +1,171 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.elasticsearch.test.rest.spec;
+
+import org.apache.http.client.methods.HttpPost;
+import org.apache.http.client.methods.HttpPut;
+import org.elasticsearch.test.rest.client.RestPath;
+
+import java.util.ArrayList;
+import java.util.List;
+import java.util.Map;
+import java.util.Set;
+
+/**
+ * Represents an elasticsearch REST endpoint (api)
+ */
+public class RestApi {
+
+ private final String name;
+ private List<String> methods = new ArrayList<>();
+ private List<String> paths = new ArrayList<>();
+ private List<String> pathParts = new ArrayList<>();
+ private List<String> params = new ArrayList<>();
+ private BODY body = BODY.NOT_SUPPORTED;
+
+ public enum BODY {
+ NOT_SUPPORTED, OPTIONAL, REQUIRED
+ }
+
+ RestApi(String name) {
+ this.name = name;
+ }
+
+ public String getName() {
+ return name;
+ }
+
+ public List<String> getMethods() {
+ return methods;
+ }
+
+ /**
+ * Returns the supported http methods given the rest parameters provided
+ */
+ public List<String> getSupportedMethods(Set<String> restParams) {
+ //we try to avoid hardcoded mappings but the index api is the exception
+ if ("index".equals(name) || "create".equals(name)) {
+ List<String> indexMethods = new ArrayList<>();
+ for (String method : methods) {
+ if (restParams.contains("id")) {
+ //PUT when the id is provided
+ if (HttpPut.METHOD_NAME.equals(method)) {
+ indexMethods.add(method);
+ }
+ } else {
+ //POST without id
+ if (HttpPost.METHOD_NAME.equals(method)) {
+ indexMethods.add(method);
+ }
+ }
+ }
+ return indexMethods;
+ }
+
+ return methods;
+ }
+
+ void addMethod(String method) {
+ this.methods.add(method);
+ }
+
+ public List<String> getPaths() {
+ return paths;
+ }
+
+ void addPath(String path) {
+ this.paths.add(path);
+ }
+
+ public List<String> getPathParts() {
+ return pathParts;
+ }
+
+ void addPathPart(String pathPart) {
+ this.pathParts.add(pathPart);
+ }
+
+ public List<String> getParams() {
+ return params;
+ }
+
+ void addParam(String param) {
+ this.params.add(param);
+ }
+
+ void setBodyOptional() {
+ this.body = BODY.OPTIONAL;
+ }
+
+ void setBodyRequired() {
+ this.body = BODY.REQUIRED;
+ }
+
+ public boolean isBodySupported() {
+ return body != BODY.NOT_SUPPORTED;
+ }
+
+ public boolean isBodyRequired() {
+ return body == BODY.REQUIRED;
+ }
+
+ /**
+ * Finds the best matching rest path given the current parameters and replaces
+ * placeholders with their corresponding values received as arguments
+ */
+ public RestPath[] getFinalPaths(Map<String, String> pathParams) {
+ List<RestPath> matchingRestPaths = findMatchingRestPaths(pathParams.keySet());
+ if (matchingRestPaths == null || matchingRestPaths.isEmpty()) {
+ throw new IllegalArgumentException("unable to find matching rest path for api [" + name + "] and path params " + pathParams);
+ }
+
+ RestPath[] restPaths = new RestPath[matchingRestPaths.size()];
+ for (int i = 0; i < matchingRestPaths.size(); i++) {
+ RestPath restPath = matchingRestPaths.get(i);
+ restPaths[i] = restPath.replacePlaceholders(pathParams);
+ }
+ return restPaths;
+ }
+
+ /**
+ * Finds the matching rest paths out of the available ones with the current api (based on REST spec).
+ *
+ * The best path is the one that has exactly the same number of placeholders to replace
+ * (e.g. /{index}/{type}/{id} when the path params are exactly index, type and id).
+ */
+ private List<RestPath> findMatchingRestPaths(Set<String> restParams) {
+
+ List<RestPath> matchingRestPaths = new ArrayList<>();
+ RestPath[] restPaths = buildRestPaths();
+ for (RestPath restPath : restPaths) {
+ if (restPath.matches(restParams)) {
+ matchingRestPaths.add(restPath);
+ }
+ }
+ return matchingRestPaths;
+ }
+
+ private RestPath[] buildRestPaths() {
+ RestPath[] restPaths = new RestPath[paths.size()];
+ for (int i = 0; i < restPaths.length; i++) {
+ restPaths[i] = new RestPath(paths.get(i));
+ }
+ return restPaths;
+ }
+}
diff --git a/test/framework/src/main/java/org/elasticsearch/test/rest/spec/RestApiParser.java b/test/framework/src/main/java/org/elasticsearch/test/rest/spec/RestApiParser.java
new file mode 100644
index 0000000000..0328e4c87d
--- /dev/null
+++ b/test/framework/src/main/java/org/elasticsearch/test/rest/spec/RestApiParser.java
@@ -0,0 +1,139 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.elasticsearch.test.rest.spec;
+
+import org.elasticsearch.common.xcontent.XContentParser;
+
+import java.io.IOException;
+
+/**
+ * Parser for a REST api spec (single json file)
+ */
+public class RestApiParser {
+
+ public RestApi parse(XContentParser parser) throws IOException {
+
+ try {
+ while ( parser.nextToken() != XContentParser.Token.FIELD_NAME ) {
+ //move to first field name
+ }
+
+ RestApi restApi = new RestApi(parser.currentName());
+
+ int level = -1;
+ while (parser.nextToken() != XContentParser.Token.END_OBJECT || level >= 0) {
+
+ if (parser.currentToken() == XContentParser.Token.FIELD_NAME) {
+ if ("methods".equals(parser.currentName())) {
+ parser.nextToken();
+ while (parser.nextToken() == XContentParser.Token.VALUE_STRING) {
+ restApi.addMethod(parser.text());
+ }
+ }
+
+ if ("url".equals(parser.currentName())) {
+ String currentFieldName = "url";
+ int innerLevel = -1;
+ while(parser.nextToken() != XContentParser.Token.END_OBJECT || innerLevel >= 0) {
+ if (parser.currentToken() == XContentParser.Token.FIELD_NAME) {
+ currentFieldName = parser.currentName();
+ }
+
+ if (parser.currentToken() == XContentParser.Token.START_ARRAY && "paths".equals(currentFieldName)) {
+ while (parser.nextToken() == XContentParser.Token.VALUE_STRING) {
+ restApi.addPath(parser.text());
+ }
+ }
+
+ if (parser.currentToken() == XContentParser.Token.START_OBJECT && "parts".equals(currentFieldName)) {
+ while (parser.nextToken() == XContentParser.Token.FIELD_NAME) {
+ restApi.addPathPart(parser.currentName());
+ parser.nextToken();
+ if (parser.currentToken() != XContentParser.Token.START_OBJECT) {
+ throw new IOException("Expected parts field in rest api definition to contain an object");
+ }
+ parser.skipChildren();
+ }
+ }
+
+ if (parser.currentToken() == XContentParser.Token.START_OBJECT && "params".equals(currentFieldName)) {
+ while (parser.nextToken() == XContentParser.Token.FIELD_NAME) {
+ restApi.addParam(parser.currentName());
+ parser.nextToken();
+ if (parser.currentToken() != XContentParser.Token.START_OBJECT) {
+ throw new IOException("Expected params field in rest api definition to contain an object");
+ }
+ parser.skipChildren();
+ }
+ }
+
+ if (parser.currentToken() == XContentParser.Token.START_OBJECT) {
+ innerLevel++;
+ }
+ if (parser.currentToken() == XContentParser.Token.END_OBJECT) {
+ innerLevel--;
+ }
+ }
+ }
+
+ if ("body".equals(parser.currentName())) {
+ parser.nextToken();
+ if (parser.currentToken() != XContentParser.Token.VALUE_NULL) {
+ boolean requiredFound = false;
+ while(parser.nextToken() != XContentParser.Token.END_OBJECT) {
+ if (parser.currentToken() == XContentParser.Token.FIELD_NAME) {
+ if ("required".equals(parser.currentName())) {
+ requiredFound = true;
+ parser.nextToken();
+ if (parser.booleanValue()) {
+ restApi.setBodyRequired();
+ } else {
+ restApi.setBodyOptional();
+ }
+ }
+ }
+ }
+ if (!requiredFound) {
+ restApi.setBodyOptional();
+ }
+ }
+ }
+ }
+
+ if (parser.currentToken() == XContentParser.Token.START_OBJECT) {
+ level++;
+ }
+ if (parser.currentToken() == XContentParser.Token.END_OBJECT) {
+ level--;
+ }
+
+ }
+
+ parser.nextToken();
+ assert parser.currentToken() == XContentParser.Token.END_OBJECT : "Expected [END_OBJECT] but was [" + parser.currentToken() +"]";
+ parser.nextToken();
+
+ return restApi;
+
+ } finally {
+ parser.close();
+ }
+ }
+
+}
diff --git a/test/framework/src/main/java/org/elasticsearch/test/rest/spec/RestSpec.java b/test/framework/src/main/java/org/elasticsearch/test/rest/spec/RestSpec.java
new file mode 100644
index 0000000000..2f154728b9
--- /dev/null
+++ b/test/framework/src/main/java/org/elasticsearch/test/rest/spec/RestSpec.java
@@ -0,0 +1,73 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.elasticsearch.test.rest.spec;
+
+import org.elasticsearch.common.xcontent.XContentParser;
+import org.elasticsearch.common.xcontent.json.JsonXContent;
+import org.elasticsearch.test.rest.support.FileUtils;
+
+import java.io.IOException;
+import java.io.InputStream;
+import java.nio.file.FileSystem;
+import java.nio.file.Files;
+import java.nio.file.Path;
+import java.util.Collection;
+import java.util.HashMap;
+import java.util.Map;
+
+/**
+ * Holds the elasticsearch REST spec
+ */
+public class RestSpec {
+ Map<String, RestApi> restApiMap = new HashMap<>();
+
+ private RestSpec() {
+ }
+
+ void addApi(RestApi restApi) {
+ restApiMap.put(restApi.getName(), restApi);
+ }
+
+ public RestApi getApi(String api) {
+ return restApiMap.get(api);
+ }
+
+ public Collection<RestApi> getApis() {
+ return restApiMap.values();
+ }
+
+ /**
+ * Parses the complete set of REST spec available under the provided directories
+ */
+ public static RestSpec parseFrom(FileSystem fileSystem, String optionalPathPrefix, String... paths) throws IOException {
+ RestSpec restSpec = new RestSpec();
+ for (String path : paths) {
+ for (Path jsonFile : FileUtils.findJsonSpec(fileSystem, optionalPathPrefix, path)) {
+ try (InputStream stream = Files.newInputStream(jsonFile)) {
+ XContentParser parser = JsonXContent.jsonXContent.createParser(stream);
+ RestApi restApi = new RestApiParser().parse(parser);
+ restSpec.addApi(restApi);
+ } catch (Throwable ex) {
+ throw new IOException("Can't parse rest spec file: [" + jsonFile + "]", ex);
+ }
+ }
+ }
+ return restSpec;
+ }
+}
diff --git a/test/framework/src/main/java/org/elasticsearch/test/rest/support/Features.java b/test/framework/src/main/java/org/elasticsearch/test/rest/support/Features.java
new file mode 100644
index 0000000000..0f51f72e8e
--- /dev/null
+++ b/test/framework/src/main/java/org/elasticsearch/test/rest/support/Features.java
@@ -0,0 +1,57 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.test.rest.support;
+
+import org.elasticsearch.test.ESIntegTestCase;
+
+import java.util.Arrays;
+import java.util.List;
+
+/**
+ * Allows to register additional features supported by the tests runner.
+ * This way any runner can add extra features and use proper skip sections to avoid
+ * breaking others runners till they have implemented the new feature as well.
+ *
+ * Once all runners have implemented the feature, it can be removed from the list
+ * and the related skip sections can be removed from the tests as well.
+ */
+public final class Features {
+
+ private static final List<String> SUPPORTED = Arrays.asList("stash_in_path", "groovy_scripting", "headers");
+
+ private Features() {
+
+ }
+
+ /**
+ * Tells whether all the features provided as argument are supported
+ */
+ public static boolean areAllSupported(List<String> features) {
+ for (String feature : features) {
+ if ("requires_replica".equals(feature) && ESIntegTestCase.cluster().numDataNodes() >= 2) {
+ continue;
+ }
+ if (!SUPPORTED.contains(feature)) {
+ return false;
+ }
+ }
+ return true;
+ }
+}
diff --git a/test/framework/src/main/java/org/elasticsearch/test/rest/support/FileUtils.java b/test/framework/src/main/java/org/elasticsearch/test/rest/support/FileUtils.java
new file mode 100644
index 0000000000..69acae55fd
--- /dev/null
+++ b/test/framework/src/main/java/org/elasticsearch/test/rest/support/FileUtils.java
@@ -0,0 +1,167 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.elasticsearch.test.rest.support;
+
+import org.elasticsearch.common.Strings;
+import org.elasticsearch.common.io.PathUtils;
+
+import java.io.IOException;
+import java.net.URL;
+import java.nio.file.DirectoryStream;
+import java.nio.file.FileSystem;
+import java.nio.file.FileVisitResult;
+import java.nio.file.Files;
+import java.nio.file.NoSuchFileException;
+import java.nio.file.NotDirectoryException;
+import java.nio.file.Path;
+import java.nio.file.SimpleFileVisitor;
+import java.nio.file.attribute.BasicFileAttributes;
+import java.util.HashMap;
+import java.util.HashSet;
+import java.util.Map;
+import java.util.Set;
+
+public final class FileUtils {
+
+ private static final String YAML_SUFFIX = ".yaml";
+ private static final String JSON_SUFFIX = ".json";
+
+ private FileUtils() {
+
+ }
+
+ /**
+ * Returns the json files found within the directory provided as argument.
+ * Files are looked up in the classpath, or optionally from {@code fileSystem} if its not null.
+ */
+ public static Set<Path> findJsonSpec(FileSystem fileSystem, String optionalPathPrefix, String path) throws IOException {
+ Path dir = resolveFile(fileSystem, optionalPathPrefix, path, null);
+
+ if (!Files.isDirectory(dir)) {
+ throw new NotDirectoryException(path);
+ }
+
+ Set<Path> jsonFiles = new HashSet<>();
+ try (DirectoryStream<Path> stream = Files.newDirectoryStream(dir)) {
+ for (Path item : stream) {
+ if (item.toString().endsWith(JSON_SUFFIX)) {
+ jsonFiles.add(item);
+ }
+ }
+ }
+
+ if (jsonFiles.isEmpty()) {
+ throw new NoSuchFileException(path, null, "no json files found");
+ }
+
+ return jsonFiles;
+ }
+
+ /**
+ * Returns the yaml files found within the paths provided.
+ * Each input path can either be a single file (the .yaml suffix is optional) or a directory.
+ * Each path is looked up in the classpath, or optionally from {@code fileSystem} if its not null.
+ */
+ public static Map<String, Set<Path>> findYamlSuites(FileSystem fileSystem, String optionalPathPrefix, final String... paths) throws IOException {
+ Map<String, Set<Path>> yamlSuites = new HashMap<>();
+ for (String path : paths) {
+ collectFiles(resolveFile(fileSystem, optionalPathPrefix, path, YAML_SUFFIX), YAML_SUFFIX, yamlSuites);
+ }
+ return yamlSuites;
+ }
+
+ private static Path resolveFile(FileSystem fileSystem, String optionalPathPrefix, String path, String optionalFileSuffix) throws IOException {
+ if (fileSystem != null) {
+ Path file = findFile(fileSystem, path, optionalFileSuffix);
+ if (!lenientExists(file)) {
+ // try with optional prefix: /rest-api-spec/test (or /rest-api-spec/api) is optional
+ String newPath = optionalPathPrefix + "/" + path;
+ file = findFile(fileSystem, newPath, optionalFileSuffix);
+ if (!lenientExists(file)) {
+ throw new NoSuchFileException("path prefix: " + optionalPathPrefix + ", path: " + path + ", file suffix: " + optionalFileSuffix);
+ }
+ }
+ return file;
+ } else {
+ //try within classpath
+ URL resource = findResource(path, optionalFileSuffix);
+ if (resource == null) {
+ //try within classpath with optional prefix: /rest-api-spec/test (or /rest-api-spec/api) is optional
+ String newPath = optionalPathPrefix + "/" + path;
+ resource = findResource(newPath, optionalFileSuffix);
+ if (resource == null) {
+ throw new NoSuchFileException(path);
+ }
+ }
+ try {
+ return PathUtils.get(resource.toURI());
+ } catch (Exception e) {
+ // some filesystems have REALLY useless exceptions here.
+ // ZipFileSystem I am looking at you.
+ throw new RuntimeException("couldn't retrieve URL: " + resource, e);
+ }
+ }
+ }
+
+ private static URL findResource(String path, String optionalFileSuffix) {
+ URL resource = FileUtils.class.getResource(path);
+ if (resource == null) {
+ //if not found we append the file suffix to the path (as it is optional)
+ if (Strings.hasLength(optionalFileSuffix) && !path.endsWith(optionalFileSuffix)) {
+ resource = FileUtils.class.getResource(path + optionalFileSuffix);
+ }
+ }
+ return resource;
+ }
+
+ // used because this test "guesses" from like 4 different places from the filesystem!
+ private static boolean lenientExists(Path file) {
+ boolean exists = false;
+ try {
+ exists = Files.exists(file);
+ } catch (SecurityException ok) {}
+ return exists;
+ }
+
+ private static Path findFile(FileSystem fileSystem, String path, String optionalFileSuffix) {
+ Path file = fileSystem.getPath(path);
+ if (!lenientExists(file)) {
+ file = fileSystem.getPath(path + optionalFileSuffix);
+ }
+ return file;
+ }
+
+ private static void collectFiles(final Path dir, final String fileSuffix, final Map<String, Set<Path>> files) throws IOException {
+ Files.walkFileTree(dir, new SimpleFileVisitor<Path>() {
+ @Override
+ public FileVisitResult visitFile(Path file, BasicFileAttributes attrs) throws IOException {
+ if (file.toString().endsWith(fileSuffix)) {
+ String groupName = file.toAbsolutePath().getParent().getFileName().toString();
+ Set<Path> filesSet = files.get(groupName);
+ if (filesSet == null) {
+ filesSet = new HashSet<>();
+ files.put(groupName, filesSet);
+ }
+ filesSet.add(file);
+ }
+ return FileVisitResult.CONTINUE;
+ }
+ });
+ }
+}
diff --git a/test/framework/src/main/java/org/elasticsearch/test/store/MockFSDirectoryService.java b/test/framework/src/main/java/org/elasticsearch/test/store/MockFSDirectoryService.java
new file mode 100644
index 0000000000..27a2e6fb22
--- /dev/null
+++ b/test/framework/src/main/java/org/elasticsearch/test/store/MockFSDirectoryService.java
@@ -0,0 +1,220 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.test.store;
+
+import com.carrotsearch.randomizedtesting.SeedUtils;
+import com.carrotsearch.randomizedtesting.generators.RandomPicks;
+import java.nio.charset.StandardCharsets;
+import org.apache.lucene.index.CheckIndex;
+import org.apache.lucene.index.IndexWriter;
+import org.apache.lucene.store.*;
+import org.apache.lucene.util.LuceneTestCase;
+import org.apache.lucene.util.TestRuleMarkFailure;
+import org.elasticsearch.cluster.metadata.IndexMetaData;
+import org.elasticsearch.common.inject.Inject;
+import org.elasticsearch.common.io.stream.BytesStreamOutput;
+import org.elasticsearch.common.logging.ESLogger;
+import org.elasticsearch.common.lucene.Lucene;
+import org.elasticsearch.common.settings.Settings;
+import org.elasticsearch.index.IndexModule;
+import org.elasticsearch.index.IndexSettings;
+import org.elasticsearch.index.shard.*;
+import org.elasticsearch.index.store.FsDirectoryService;
+import org.elasticsearch.index.store.IndexStore;
+import org.elasticsearch.index.store.Store;
+import org.elasticsearch.indices.IndicesService;
+import org.elasticsearch.test.ESIntegTestCase;
+import org.elasticsearch.test.ESTestCase;
+import org.junit.Assert;
+
+import java.io.Closeable;
+import java.io.IOException;
+import java.io.PrintStream;
+import java.nio.file.Path;
+import java.util.*;
+
+public class MockFSDirectoryService extends FsDirectoryService {
+
+ public static final String RANDOM_IO_EXCEPTION_RATE_ON_OPEN = "index.store.mock.random.io_exception_rate_on_open";
+ public static final String RANDOM_PREVENT_DOUBLE_WRITE = "index.store.mock.random.prevent_double_write";
+ public static final String RANDOM_NO_DELETE_OPEN_FILE = "index.store.mock.random.no_delete_open_file";
+ public static final String CRASH_INDEX = "index.store.mock.random.crash_index";
+
+ private final FsDirectoryService delegateService;
+ private final Random random;
+ private final double randomIOExceptionRate;
+ private final double randomIOExceptionRateOnOpen;
+ private final MockDirectoryWrapper.Throttling throttle;
+ private final boolean preventDoubleWrite;
+ private final boolean noDeleteOpenFile;
+ private final boolean crashIndex;
+
+ @Inject
+ public MockFSDirectoryService(IndexSettings idxSettings, IndexStore indexStore, final ShardPath path) {
+ super(idxSettings, indexStore, path);
+ Settings indexSettings = idxSettings.getSettings();
+ final long seed = indexSettings.getAsLong(ESIntegTestCase.SETTING_INDEX_SEED, 0l);
+ this.random = new Random(seed);
+
+ randomIOExceptionRate = indexSettings.getAsDouble(RANDOM_IO_EXCEPTION_RATE, 0.0d);
+ randomIOExceptionRateOnOpen = indexSettings.getAsDouble(RANDOM_IO_EXCEPTION_RATE_ON_OPEN, 0.0d);
+ preventDoubleWrite = indexSettings.getAsBoolean(RANDOM_PREVENT_DOUBLE_WRITE, true); // true is default in MDW
+ noDeleteOpenFile = indexSettings.getAsBoolean(RANDOM_NO_DELETE_OPEN_FILE, random.nextBoolean()); // true is default in MDW
+ random.nextInt(shardId.getId() + 1); // some randomness per shard
+ throttle = MockDirectoryWrapper.Throttling.NEVER;
+ crashIndex = indexSettings.getAsBoolean(CRASH_INDEX, true);
+
+ if (logger.isDebugEnabled()) {
+ logger.debug("Using MockDirWrapper with seed [{}] throttle: [{}] crashIndex: [{}]", SeedUtils.formatSeed(seed),
+ throttle, crashIndex);
+ }
+ delegateService = randomDirectorService(indexStore, path);
+ }
+
+
+ @Override
+ public Directory newDirectory() throws IOException {
+ return wrap(delegateService.newDirectory());
+ }
+
+ @Override
+ protected synchronized Directory newFSDirectory(Path location, LockFactory lockFactory) throws IOException {
+ throw new UnsupportedOperationException();
+ }
+
+ public static void checkIndex(ESLogger logger, Store store, ShardId shardId) {
+ if (store.tryIncRef()) {
+ logger.info("start check index");
+ try {
+ Directory dir = store.directory();
+ if (!Lucene.indexExists(dir)) {
+ return;
+ }
+ if (IndexWriter.isLocked(dir)) {
+ ESTestCase.checkIndexFailed = true;
+ throw new IllegalStateException("IndexWriter is still open on shard " + shardId);
+ }
+ try (CheckIndex checkIndex = new CheckIndex(dir)) {
+ BytesStreamOutput os = new BytesStreamOutput();
+ PrintStream out = new PrintStream(os, false, StandardCharsets.UTF_8.name());
+ checkIndex.setInfoStream(out);
+ out.flush();
+ CheckIndex.Status status = checkIndex.checkIndex();
+ if (!status.clean) {
+ ESTestCase.checkIndexFailed = true;
+ logger.warn("check index [failure] index files={}\n{}",
+ Arrays.toString(dir.listAll()),
+ new String(os.bytes().toBytes(), StandardCharsets.UTF_8));
+ throw new IOException("index check failure");
+ } else {
+ if (logger.isDebugEnabled()) {
+ logger.debug("check index [success]\n{}", new String(os.bytes().toBytes(), StandardCharsets.UTF_8));
+ }
+ }
+ }
+ } catch (Exception e) {
+ logger.warn("failed to check index", e);
+ } finally {
+ logger.info("end check index");
+ store.decRef();
+ }
+ }
+ }
+
+ @Override
+ public void onPause(long nanos) {
+ delegateService.onPause(nanos);
+ }
+
+ @Override
+ public StoreRateLimiting rateLimiting() {
+ return delegateService.rateLimiting();
+ }
+
+ @Override
+ public long throttleTimeInNanos() {
+ return delegateService.throttleTimeInNanos();
+ }
+
+ public static final String RANDOM_IO_EXCEPTION_RATE = "index.store.mock.random.io_exception_rate";
+
+ private Directory wrap(Directory dir) {
+ final ElasticsearchMockDirectoryWrapper w = new ElasticsearchMockDirectoryWrapper(random, dir, this.crashIndex);
+ w.setRandomIOExceptionRate(randomIOExceptionRate);
+ w.setRandomIOExceptionRateOnOpen(randomIOExceptionRateOnOpen);
+ w.setThrottling(throttle);
+ w.setCheckIndexOnClose(false); // we do this on the index level
+ w.setPreventDoubleWrite(preventDoubleWrite);
+ // TODO: make this test robust to virus scanner
+ w.setEnableVirusScanner(false);
+ w.setNoDeleteOpenFile(noDeleteOpenFile);
+ w.setUseSlowOpenClosers(false);
+ LuceneTestCase.closeAfterSuite(new CloseableDirectory(w));
+ return w;
+ }
+
+ private FsDirectoryService randomDirectorService(IndexStore indexStore, ShardPath path) {
+ final IndexSettings indexSettings = indexStore.getIndexSettings();
+ final IndexMetaData build = IndexMetaData.builder(indexSettings.getIndexMetaData()).settings(Settings.builder().put(indexSettings.getSettings()).put(IndexModule.STORE_TYPE, RandomPicks.randomFrom(random, IndexModule.Type.values()).getSettingsKey())).build();
+ final IndexSettings newIndexSettings = new IndexSettings(build, indexSettings.getNodeSettings(), Collections.emptyList());
+ return new FsDirectoryService(newIndexSettings, indexStore, path);
+ }
+
+ public static final class ElasticsearchMockDirectoryWrapper extends MockDirectoryWrapper {
+
+ private final boolean crash;
+
+ public ElasticsearchMockDirectoryWrapper(Random random, Directory delegate, boolean crash) {
+ super(random, delegate);
+ this.crash = crash;
+ }
+
+ @Override
+ public synchronized void crash() throws IOException {
+ if (crash) {
+ super.crash();
+ }
+ }
+ }
+
+ final class CloseableDirectory implements Closeable {
+ private final BaseDirectoryWrapper dir;
+ private final TestRuleMarkFailure failureMarker;
+
+ public CloseableDirectory(BaseDirectoryWrapper dir) {
+ this.dir = dir;
+ this.failureMarker = ESTestCase.getSuiteFailureMarker();
+ }
+
+ @Override
+ public void close() {
+ // We only attempt to check open/closed state if there were no other test
+ // failures.
+ try {
+ if (failureMarker.wasSuccessful() && dir.isOpen()) {
+ Assert.fail("Directory not closed: " + dir);
+ }
+ } finally {
+ // TODO: perform real close of the delegate: LUCENE-4058
+ // dir.close();
+ }
+ }
+ }
+}
diff --git a/test/framework/src/main/java/org/elasticsearch/test/store/MockFSIndexStore.java b/test/framework/src/main/java/org/elasticsearch/test/store/MockFSIndexStore.java
new file mode 100644
index 0000000000..86cf0ddb56
--- /dev/null
+++ b/test/framework/src/main/java/org/elasticsearch/test/store/MockFSIndexStore.java
@@ -0,0 +1,104 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.test.store;
+
+import org.elasticsearch.common.Nullable;
+import org.elasticsearch.common.logging.ESLogger;
+import org.elasticsearch.common.logging.Loggers;
+import org.elasticsearch.common.settings.Settings;
+import org.elasticsearch.index.IndexModule;
+import org.elasticsearch.index.IndexSettings;
+import org.elasticsearch.index.shard.*;
+import org.elasticsearch.index.store.DirectoryService;
+import org.elasticsearch.index.store.IndexStore;
+import org.elasticsearch.index.store.IndexStoreConfig;
+import org.elasticsearch.plugins.Plugin;
+
+import java.util.Collections;
+import java.util.EnumSet;
+import java.util.IdentityHashMap;
+import java.util.Map;
+
+public class MockFSIndexStore extends IndexStore {
+
+ public static final String CHECK_INDEX_ON_CLOSE = "index.store.mock.check_index_on_close";
+
+ public static class TestPlugin extends Plugin {
+ @Override
+ public String name() {
+ return "mock-index-store";
+ }
+ @Override
+ public String description() {
+ return "a mock index store for testing";
+ }
+ @Override
+ public Settings additionalSettings() {
+ return Settings.builder().put(IndexModule.STORE_TYPE, "mock").build();
+ }
+
+ @Override
+ public void onIndexModule(IndexModule indexModule) {
+ Settings indexSettings = indexModule.getSettings();
+ if ("mock".equals(indexSettings.get(IndexModule.STORE_TYPE))) {
+ if (indexSettings.getAsBoolean(CHECK_INDEX_ON_CLOSE, true)) {
+ indexModule.addIndexEventListener(new Listener());
+ }
+ indexModule.addIndexStore("mock", MockFSIndexStore::new);
+ }
+ }
+ }
+
+ MockFSIndexStore(IndexSettings indexSettings,
+ IndexStoreConfig config) {
+ super(indexSettings, config);
+ }
+
+ public DirectoryService newDirectoryService(ShardPath path) {
+ return new MockFSDirectoryService(indexSettings, this, path);
+ }
+
+ private static final EnumSet<IndexShardState> validCheckIndexStates = EnumSet.of(
+ IndexShardState.STARTED, IndexShardState.RELOCATED, IndexShardState.POST_RECOVERY
+ );
+ private static final class Listener implements IndexEventListener {
+
+ private final Map<IndexShard, Boolean> shardSet = Collections.synchronizedMap(new IdentityHashMap<>());
+ @Override
+ public void afterIndexShardClosed(ShardId shardId, @Nullable IndexShard indexShard, Settings indexSettings) {
+ if (indexShard != null) {
+ Boolean remove = shardSet.remove(indexShard);
+ if (remove == Boolean.TRUE) {
+ ESLogger logger = Loggers.getLogger(getClass(), indexShard.indexSettings().getSettings(), indexShard.shardId());
+ MockFSDirectoryService.checkIndex(logger, indexShard.store(), indexShard.shardId());
+ }
+ }
+ }
+
+ @Override
+ public void indexShardStateChanged(IndexShard indexShard, @Nullable IndexShardState previousState, IndexShardState currentState, @Nullable String reason) {
+ if (currentState == IndexShardState.CLOSED && validCheckIndexStates.contains(previousState) && indexShard.indexSettings().isOnSharedFilesystem() == false) {
+ shardSet.put(indexShard, Boolean.TRUE);
+ }
+
+ }
+ }
+
+}
diff --git a/test/framework/src/main/java/org/elasticsearch/test/transport/AssertingLocalTransport.java b/test/framework/src/main/java/org/elasticsearch/test/transport/AssertingLocalTransport.java
new file mode 100644
index 0000000000..8b39500357
--- /dev/null
+++ b/test/framework/src/main/java/org/elasticsearch/test/transport/AssertingLocalTransport.java
@@ -0,0 +1,91 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.test.transport;
+
+import org.elasticsearch.Version;
+import org.elasticsearch.cluster.node.DiscoveryNode;
+import org.elasticsearch.common.inject.Inject;
+import org.elasticsearch.common.io.stream.NamedWriteableRegistry;
+import org.elasticsearch.common.network.NetworkModule;
+import org.elasticsearch.common.settings.Settings;
+import org.elasticsearch.plugins.Plugin;
+import org.elasticsearch.test.ESIntegTestCase;
+import org.elasticsearch.test.VersionUtils;
+import org.elasticsearch.test.hamcrest.ElasticsearchAssertions;
+import org.elasticsearch.threadpool.ThreadPool;
+import org.elasticsearch.transport.TransportException;
+import org.elasticsearch.transport.TransportRequest;
+import org.elasticsearch.transport.TransportRequestOptions;
+import org.elasticsearch.transport.TransportResponse;
+import org.elasticsearch.transport.TransportResponseHandler;
+import org.elasticsearch.transport.local.LocalTransport;
+
+import java.io.IOException;
+import java.util.Random;
+
+public class AssertingLocalTransport extends LocalTransport {
+
+ public static class TestPlugin extends Plugin {
+ @Override
+ public String name() {
+ return "asserting-local-transport";
+ }
+ @Override
+ public String description() {
+ return "an asserting transport for testing";
+ }
+ public void onModule(NetworkModule module) {
+ module.registerTransport("mock", AssertingLocalTransport.class);
+ }
+ @Override
+ public Settings additionalSettings() {
+ return Settings.builder().put(NetworkModule.TRANSPORT_TYPE_KEY, "mock").build();
+ }
+ }
+
+ public static final String ASSERTING_TRANSPORT_MIN_VERSION_KEY = "transport.asserting.version.min";
+ public static final String ASSERTING_TRANSPORT_MAX_VERSION_KEY = "transport.asserting.version.max";
+ private final Random random;
+ private final Version minVersion;
+ private final Version maxVersion;
+
+ @Inject
+ public AssertingLocalTransport(Settings settings, ThreadPool threadPool, Version version, NamedWriteableRegistry namedWriteableRegistry) {
+ super(settings, threadPool, version, namedWriteableRegistry);
+ final long seed = settings.getAsLong(ESIntegTestCase.SETTING_INDEX_SEED, 0l);
+ random = new Random(seed);
+ minVersion = settings.getAsVersion(ASSERTING_TRANSPORT_MIN_VERSION_KEY, Version.V_0_18_0);
+ maxVersion = settings.getAsVersion(ASSERTING_TRANSPORT_MAX_VERSION_KEY, Version.CURRENT);
+ }
+
+ @Override
+ protected void handleParsedResponse(final TransportResponse response, final TransportResponseHandler handler) {
+ ElasticsearchAssertions.assertVersionSerializable(VersionUtils.randomVersionBetween(random, minVersion, maxVersion), response,
+ namedWriteableRegistry);
+ super.handleParsedResponse(response, handler);
+ }
+
+ @Override
+ public void sendRequest(final DiscoveryNode node, final long requestId, final String action, final TransportRequest request, TransportRequestOptions options) throws IOException, TransportException {
+ ElasticsearchAssertions.assertVersionSerializable(VersionUtils.randomVersionBetween(random, minVersion, maxVersion), request,
+ namedWriteableRegistry);
+ super.sendRequest(node, requestId, action, request, options);
+ }
+}
diff --git a/test/framework/src/main/java/org/elasticsearch/test/transport/CapturingTransport.java b/test/framework/src/main/java/org/elasticsearch/test/transport/CapturingTransport.java
new file mode 100644
index 0000000000..2363d98a11
--- /dev/null
+++ b/test/framework/src/main/java/org/elasticsearch/test/transport/CapturingTransport.java
@@ -0,0 +1,183 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.elasticsearch.test.transport;
+
+import org.elasticsearch.ElasticsearchException;
+import org.elasticsearch.cluster.node.DiscoveryNode;
+import org.elasticsearch.common.component.Lifecycle;
+import org.elasticsearch.common.component.LifecycleListener;
+import org.elasticsearch.common.transport.BoundTransportAddress;
+import org.elasticsearch.common.transport.TransportAddress;
+import org.elasticsearch.common.util.concurrent.ConcurrentCollections;
+import org.elasticsearch.transport.*;
+
+import java.io.IOException;
+import java.util.*;
+import java.util.concurrent.BlockingQueue;
+
+/** A transport class that doesn't send anything but rather captures all requests for inspection from tests */
+public class CapturingTransport implements Transport {
+ private TransportServiceAdapter adapter;
+
+ static public class CapturedRequest {
+ final public DiscoveryNode node;
+ final public long requestId;
+ final public String action;
+ final public TransportRequest request;
+
+ public CapturedRequest(DiscoveryNode node, long requestId, String action, TransportRequest request) {
+ this.node = node;
+ this.requestId = requestId;
+ this.action = action;
+ this.request = request;
+ }
+ }
+
+ private BlockingQueue<CapturedRequest> capturedRequests = ConcurrentCollections.newBlockingQueue();
+
+ /** returns all requests captured so far. Doesn't clear the captured request list. See {@link #clear()} */
+ public CapturedRequest[] capturedRequests() {
+ return capturedRequests.toArray(new CapturedRequest[0]);
+ }
+
+ /**
+ * returns all requests captured so far, grouped by target node.
+ * Doesn't clear the captured request list. See {@link #clear()}
+ */
+ public Map<String, List<CapturedRequest>> capturedRequestsByTargetNode() {
+ Map<String, List<CapturedRequest>> map = new HashMap<>();
+ for (CapturedRequest request : capturedRequests) {
+ List<CapturedRequest> nodeList = map.get(request.node.id());
+ if (nodeList == null) {
+ nodeList = new ArrayList<>();
+ map.put(request.node.id(), nodeList);
+ }
+ nodeList.add(request);
+ }
+ return map;
+ }
+
+ /** clears captured requests */
+ public void clear() {
+ capturedRequests.clear();
+ }
+
+ /** simulate a response for the given requestId */
+ public void handleResponse(final long requestId, final TransportResponse response) {
+ adapter.onResponseReceived(requestId).handleResponse(response);
+ }
+
+ /** simulate a remote error for the given requesTId */
+ public void handleResponse(final long requestId, final Throwable t) {
+ adapter.onResponseReceived(requestId).handleException(new RemoteTransportException("remote failure", t));
+ }
+
+
+ @Override
+ public void sendRequest(DiscoveryNode node, long requestId, String action, TransportRequest request, TransportRequestOptions options) throws IOException, TransportException {
+ capturedRequests.add(new CapturedRequest(node, requestId, action, request));
+ }
+
+
+ @Override
+ public void transportServiceAdapter(TransportServiceAdapter adapter) {
+ this.adapter = adapter;
+ }
+
+ @Override
+ public BoundTransportAddress boundAddress() {
+ return null;
+ }
+
+ @Override
+ public Map<String, BoundTransportAddress> profileBoundAddresses() {
+ return null;
+ }
+
+ @Override
+ public TransportAddress[] addressesFromString(String address, int perAddressLimit) throws Exception {
+ // WTF
+ return new TransportAddress[0];
+ }
+
+ @Override
+ public boolean addressSupported(Class<? extends TransportAddress> address) {
+ return false;
+ }
+
+ @Override
+ public boolean nodeConnected(DiscoveryNode node) {
+ return true;
+ }
+
+ @Override
+ public void connectToNode(DiscoveryNode node) throws ConnectTransportException {
+
+ }
+
+ @Override
+ public void connectToNodeLight(DiscoveryNode node) throws ConnectTransportException {
+
+ }
+
+ @Override
+ public void disconnectFromNode(DiscoveryNode node) {
+
+ }
+
+ @Override
+ public long serverOpen() {
+ return 0;
+ }
+
+ @Override
+ public Lifecycle.State lifecycleState() {
+ return null;
+ }
+
+ @Override
+ public void addLifecycleListener(LifecycleListener listener) {
+
+ }
+
+ @Override
+ public void removeLifecycleListener(LifecycleListener listener) {
+
+ }
+
+ @Override
+ public Transport start() {
+ return null;
+ }
+
+ @Override
+ public Transport stop() {
+ return null;
+ }
+
+ @Override
+ public void close() {
+
+ }
+
+ @Override
+ public List<String> getLocalAddresses() {
+ return Collections.emptyList();
+ }
+}
diff --git a/test/framework/src/main/java/org/elasticsearch/test/transport/MockTransportService.java b/test/framework/src/main/java/org/elasticsearch/test/transport/MockTransportService.java
new file mode 100644
index 0000000000..d636341e42
--- /dev/null
+++ b/test/framework/src/main/java/org/elasticsearch/test/transport/MockTransportService.java
@@ -0,0 +1,618 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.test.transport;
+
+import org.elasticsearch.cluster.node.DiscoveryNode;
+import org.elasticsearch.common.component.Lifecycle;
+import org.elasticsearch.common.component.LifecycleListener;
+import org.elasticsearch.common.inject.Inject;
+import org.elasticsearch.common.io.stream.BytesStreamOutput;
+import org.elasticsearch.common.io.stream.StreamInput;
+import org.elasticsearch.common.network.NetworkModule;
+import org.elasticsearch.common.network.NetworkService;
+import org.elasticsearch.common.settings.Settings;
+import org.elasticsearch.common.transport.BoundTransportAddress;
+import org.elasticsearch.common.transport.TransportAddress;
+import org.elasticsearch.common.unit.TimeValue;
+import org.elasticsearch.common.util.concurrent.AbstractRunnable;
+import org.elasticsearch.common.util.concurrent.ConcurrentCollections;
+import org.elasticsearch.plugins.Plugin;
+import org.elasticsearch.threadpool.ThreadPool;
+import org.elasticsearch.transport.ConnectTransportException;
+import org.elasticsearch.transport.RequestHandlerRegistry;
+import org.elasticsearch.transport.Transport;
+import org.elasticsearch.transport.TransportException;
+import org.elasticsearch.transport.TransportRequest;
+import org.elasticsearch.transport.TransportRequestOptions;
+import org.elasticsearch.transport.TransportService;
+import org.elasticsearch.transport.TransportServiceAdapter;
+
+import java.io.IOException;
+import java.util.Arrays;
+import java.util.HashSet;
+import java.util.List;
+import java.util.Map;
+import java.util.Set;
+import java.util.concurrent.ConcurrentMap;
+import java.util.concurrent.CopyOnWriteArrayList;
+
+/**
+ * A mock transport service that allows to simulate different network topology failures.
+ * Internally it maps TransportAddress objects to rules that inject failures.
+ * Adding rules for a node is done by adding rules for all bound addresses of a node
+ * (and the publish address, if different).
+ * Matching requests to rules is based on the transport address associated with the
+ * discovery node of the request, namely by DiscoveryNode.getAddress().
+ * This address is usually the publish address of the node but can also be a different one
+ * (for example, @see org.elasticsearch.discovery.zen.ping.unicast.UnicastZenPing, which constructs
+ * fake DiscoveryNode instances where the publish address is one of the bound addresses).
+ */
+public class MockTransportService extends TransportService {
+
+ public static class TestPlugin extends Plugin {
+ @Override
+ public String name() {
+ return "mock-transport-service";
+ }
+ @Override
+ public String description() {
+ return "a mock transport service for testing";
+ }
+ public void onModule(NetworkModule module) {
+ module.registerTransportService("mock", MockTransportService.class);
+ }
+ @Override
+ public Settings additionalSettings() {
+ return Settings.builder().put(NetworkModule.TRANSPORT_SERVICE_TYPE_KEY, "mock").build();
+ }
+ }
+
+ private final Transport original;
+
+ @Inject
+ public MockTransportService(Settings settings, Transport transport, ThreadPool threadPool) {
+ super(settings, new LookupTestTransport(transport), threadPool);
+ this.original = transport;
+ }
+
+ public static TransportAddress[] extractTransportAddresses(TransportService transportService) {
+ HashSet<TransportAddress> transportAddresses = new HashSet<>();
+ BoundTransportAddress boundTransportAddress = transportService.boundAddress();
+ transportAddresses.addAll(Arrays.asList(boundTransportAddress.boundAddresses()));
+ transportAddresses.add(boundTransportAddress.publishAddress());
+ return transportAddresses.toArray(new TransportAddress[transportAddresses.size()]);
+ }
+
+ /**
+ * Clears all the registered rules.
+ */
+ public void clearAllRules() {
+ transport().transports.clear();
+ }
+
+ /**
+ * Clears the rule associated with the provided transport service.
+ */
+ public void clearRule(TransportService transportService) {
+ for (TransportAddress transportAddress : extractTransportAddresses(transportService)) {
+ clearRule(transportAddress);
+ }
+ }
+
+ /**
+ * Clears the rule associated with the provided transport address.
+ */
+ public void clearRule(TransportAddress transportAddress) {
+ transport().transports.remove(transportAddress);
+ }
+
+ /**
+ * Returns the original Transport service wrapped by this mock transport service.
+ */
+ public Transport original() {
+ return original;
+ }
+
+ /**
+ * Adds a rule that will cause every send request to fail, and each new connect since the rule
+ * is added to fail as well.
+ */
+ public void addFailToSendNoConnectRule(TransportService transportService) {
+ for (TransportAddress transportAddress : extractTransportAddresses(transportService)) {
+ addFailToSendNoConnectRule(transportAddress);
+ }
+ }
+
+ /**
+ * Adds a rule that will cause every send request to fail, and each new connect since the rule
+ * is added to fail as well.
+ */
+ public void addFailToSendNoConnectRule(TransportAddress transportAddress) {
+ addDelegate(transportAddress, new DelegateTransport(original) {
+ @Override
+ public void connectToNode(DiscoveryNode node) throws ConnectTransportException {
+ throw new ConnectTransportException(node, "DISCONNECT: simulated");
+ }
+
+ @Override
+ public void connectToNodeLight(DiscoveryNode node) throws ConnectTransportException {
+ throw new ConnectTransportException(node, "DISCONNECT: simulated");
+ }
+
+ @Override
+ public void sendRequest(DiscoveryNode node, long requestId, String action, TransportRequest request, TransportRequestOptions options) throws IOException, TransportException {
+ throw new ConnectTransportException(node, "DISCONNECT: simulated");
+ }
+ });
+ }
+
+ /**
+ * Adds a rule that will cause matching operations to throw ConnectTransportExceptions
+ */
+ public void addFailToSendNoConnectRule(TransportService transportService, final String... blockedActions) {
+ addFailToSendNoConnectRule(transportService, new HashSet<>(Arrays.asList(blockedActions)));
+ }
+
+ /**
+ * Adds a rule that will cause matching operations to throw ConnectTransportExceptions
+ */
+ public void addFailToSendNoConnectRule(TransportAddress transportAddress, final String... blockedActions) {
+ addFailToSendNoConnectRule(transportAddress, new HashSet<>(Arrays.asList(blockedActions)));
+ }
+
+ /**
+ * Adds a rule that will cause matching operations to throw ConnectTransportExceptions
+ */
+ public void addFailToSendNoConnectRule(TransportService transportService, final Set<String> blockedActions) {
+ for (TransportAddress transportAddress : extractTransportAddresses(transportService)) {
+ addFailToSendNoConnectRule(transportAddress, blockedActions);
+ }
+ }
+
+ /**
+ * Adds a rule that will cause matching operations to throw ConnectTransportExceptions
+ */
+ public void addFailToSendNoConnectRule(TransportAddress transportAddress, final Set<String> blockedActions) {
+
+ addDelegate(transportAddress, new DelegateTransport(original) {
+ @Override
+ public void connectToNode(DiscoveryNode node) throws ConnectTransportException {
+ original.connectToNode(node);
+ }
+
+ @Override
+ public void connectToNodeLight(DiscoveryNode node) throws ConnectTransportException {
+ original.connectToNodeLight(node);
+ }
+
+ @Override
+ public void sendRequest(DiscoveryNode node, long requestId, String action, TransportRequest request, TransportRequestOptions options) throws IOException, TransportException {
+ if (blockedActions.contains(action)) {
+ logger.info("--> preventing {} request", action);
+ throw new ConnectTransportException(node, "DISCONNECT: prevented " + action + " request");
+ }
+ original.sendRequest(node, requestId, action, request, options);
+ }
+ });
+ }
+
+ /**
+ * Adds a rule that will cause ignores each send request, simulating an unresponsive node
+ * and failing to connect once the rule was added.
+ */
+ public void addUnresponsiveRule(TransportService transportService) {
+ for (TransportAddress transportAddress : extractTransportAddresses(transportService)) {
+ addUnresponsiveRule(transportAddress);
+ }
+ }
+
+ /**
+ * Adds a rule that will cause ignores each send request, simulating an unresponsive node
+ * and failing to connect once the rule was added.
+ */
+ public void addUnresponsiveRule(TransportAddress transportAddress) {
+ addDelegate(transportAddress, new DelegateTransport(original) {
+ @Override
+ public void connectToNode(DiscoveryNode node) throws ConnectTransportException {
+ throw new ConnectTransportException(node, "UNRESPONSIVE: simulated");
+ }
+
+ @Override
+ public void connectToNodeLight(DiscoveryNode node) throws ConnectTransportException {
+ throw new ConnectTransportException(node, "UNRESPONSIVE: simulated");
+ }
+
+ @Override
+ public void sendRequest(DiscoveryNode node, long requestId, String action, TransportRequest request, TransportRequestOptions options) throws IOException, TransportException {
+ // don't send anything, the receiving node is unresponsive
+ }
+ });
+ }
+
+ /**
+ * Adds a rule that will cause ignores each send request, simulating an unresponsive node
+ * and failing to connect once the rule was added.
+ *
+ * @param duration the amount of time to delay sending and connecting.
+ */
+ public void addUnresponsiveRule(TransportService transportService, final TimeValue duration) {
+ for (TransportAddress transportAddress : extractTransportAddresses(transportService)) {
+ addUnresponsiveRule(transportAddress, duration);
+ }
+ }
+
+ /**
+ * Adds a rule that will cause ignores each send request, simulating an unresponsive node
+ * and failing to connect once the rule was added.
+ *
+ * @param duration the amount of time to delay sending and connecting.
+ */
+ public void addUnresponsiveRule(TransportAddress transportAddress, final TimeValue duration) {
+ final long startTime = System.currentTimeMillis();
+
+ addDelegate(transportAddress, new DelegateTransport(original) {
+
+ TimeValue getDelay() {
+ return new TimeValue(duration.millis() - (System.currentTimeMillis() - startTime));
+ }
+
+ @Override
+ public void connectToNode(DiscoveryNode node) throws ConnectTransportException {
+ TimeValue delay = getDelay();
+ if (delay.millis() <= 0) {
+ original.connectToNode(node);
+ return;
+ }
+
+ // TODO: Replace with proper setting
+ TimeValue connectingTimeout = NetworkService.TcpSettings.TCP_DEFAULT_CONNECT_TIMEOUT;
+ try {
+ if (delay.millis() < connectingTimeout.millis()) {
+ Thread.sleep(delay.millis());
+ original.connectToNode(node);
+ } else {
+ Thread.sleep(connectingTimeout.millis());
+ throw new ConnectTransportException(node, "UNRESPONSIVE: simulated");
+ }
+ } catch (InterruptedException e) {
+ throw new ConnectTransportException(node, "UNRESPONSIVE: interrupted while sleeping", e);
+ }
+ }
+
+ @Override
+ public void connectToNodeLight(DiscoveryNode node) throws ConnectTransportException {
+ TimeValue delay = getDelay();
+ if (delay.millis() <= 0) {
+ original.connectToNodeLight(node);
+ return;
+ }
+
+ // TODO: Replace with proper setting
+ TimeValue connectingTimeout = NetworkService.TcpSettings.TCP_DEFAULT_CONNECT_TIMEOUT;
+ try {
+ if (delay.millis() < connectingTimeout.millis()) {
+ Thread.sleep(delay.millis());
+ original.connectToNodeLight(node);
+ } else {
+ Thread.sleep(connectingTimeout.millis());
+ throw new ConnectTransportException(node, "UNRESPONSIVE: simulated");
+ }
+ } catch (InterruptedException e) {
+ throw new ConnectTransportException(node, "UNRESPONSIVE: interrupted while sleeping", e);
+ }
+ }
+
+ @Override
+ public void sendRequest(final DiscoveryNode node, final long requestId, final String action, TransportRequest request, final TransportRequestOptions options) throws IOException, TransportException {
+ // delayed sending - even if larger then the request timeout to simulated a potential late response from target node
+
+ TimeValue delay = getDelay();
+ if (delay.millis() <= 0) {
+ original.sendRequest(node, requestId, action, request, options);
+ return;
+ }
+
+ // poor mans request cloning...
+ RequestHandlerRegistry reg = MockTransportService.this.getRequestHandler(action);
+ BytesStreamOutput bStream = new BytesStreamOutput();
+ request.writeTo(bStream);
+ final TransportRequest clonedRequest = reg.newRequest();
+ clonedRequest.readFrom(StreamInput.wrap(bStream.bytes()));
+
+ threadPool.schedule(delay, ThreadPool.Names.GENERIC, new AbstractRunnable() {
+ @Override
+ public void onFailure(Throwable e) {
+ logger.debug("failed to send delayed request", e);
+ }
+
+ @Override
+ protected void doRun() throws IOException {
+ original.sendRequest(node, requestId, action, clonedRequest, options);
+ }
+ });
+ }
+ });
+ }
+
+ /**
+ * Adds a new delegate transport that is used for communication with the given transport service.
+ *
+ * @return <tt>true</tt> iff no other delegate was registered for any of the addresses bound by transport service, otherwise <tt>false</tt>
+ */
+ public boolean addDelegate(TransportService transportService, DelegateTransport transport) {
+ boolean noRegistered = true;
+ for (TransportAddress transportAddress : extractTransportAddresses(transportService)) {
+ noRegistered &= addDelegate(transportAddress, transport);
+ }
+ return noRegistered;
+ }
+
+ /**
+ * Adds a new delegate transport that is used for communication with the given transport address.
+ *
+ * @return <tt>true</tt> iff no other delegate was registered for this address before, otherwise <tt>false</tt>
+ */
+ public boolean addDelegate(TransportAddress transportAddress, DelegateTransport transport) {
+ return transport().transports.put(transportAddress, transport) == null;
+ }
+
+ private LookupTestTransport transport() {
+ return (LookupTestTransport) transport;
+ }
+
+ /**
+ * A lookup transport that has a list of potential Transport implementations to delegate to for node operations,
+ * if none is registered, then the default one is used.
+ */
+ private static class LookupTestTransport extends DelegateTransport {
+
+ final ConcurrentMap<TransportAddress, Transport> transports = ConcurrentCollections.newConcurrentMap();
+
+ LookupTestTransport(Transport transport) {
+ super(transport);
+ }
+
+ private Transport getTransport(DiscoveryNode node) {
+ Transport transport = transports.get(node.getAddress());
+ if (transport != null) {
+ return transport;
+ }
+ return this.transport;
+ }
+
+ @Override
+ public boolean nodeConnected(DiscoveryNode node) {
+ return getTransport(node).nodeConnected(node);
+ }
+
+ @Override
+ public void connectToNode(DiscoveryNode node) throws ConnectTransportException {
+ getTransport(node).connectToNode(node);
+ }
+
+ @Override
+ public void connectToNodeLight(DiscoveryNode node) throws ConnectTransportException {
+ getTransport(node).connectToNodeLight(node);
+ }
+
+ @Override
+ public void disconnectFromNode(DiscoveryNode node) {
+ getTransport(node).disconnectFromNode(node);
+ }
+
+ @Override
+ public void sendRequest(DiscoveryNode node, long requestId, String action, TransportRequest request, TransportRequestOptions options) throws IOException, TransportException {
+ getTransport(node).sendRequest(node, requestId, action, request, options);
+ }
+ }
+
+ /**
+ * A pure delegate transport.
+ * Can be extracted to a common class if needed in other places in the codebase.
+ */
+ public static class DelegateTransport implements Transport {
+
+ protected final Transport transport;
+
+
+ public DelegateTransport(Transport transport) {
+ this.transport = transport;
+ }
+
+ @Override
+ public void transportServiceAdapter(TransportServiceAdapter service) {
+ transport.transportServiceAdapter(service);
+ }
+
+ @Override
+ public BoundTransportAddress boundAddress() {
+ return transport.boundAddress();
+ }
+
+ @Override
+ public TransportAddress[] addressesFromString(String address, int perAddressLimit) throws Exception {
+ return transport.addressesFromString(address, perAddressLimit);
+ }
+
+ @Override
+ public boolean addressSupported(Class<? extends TransportAddress> address) {
+ return transport.addressSupported(address);
+ }
+
+ @Override
+ public boolean nodeConnected(DiscoveryNode node) {
+ return transport.nodeConnected(node);
+ }
+
+ @Override
+ public void connectToNode(DiscoveryNode node) throws ConnectTransportException {
+ transport.connectToNode(node);
+ }
+
+ @Override
+ public void connectToNodeLight(DiscoveryNode node) throws ConnectTransportException {
+ transport.connectToNodeLight(node);
+ }
+
+ @Override
+ public void disconnectFromNode(DiscoveryNode node) {
+ transport.disconnectFromNode(node);
+ }
+
+ @Override
+ public void sendRequest(DiscoveryNode node, long requestId, String action, TransportRequest request, TransportRequestOptions options) throws IOException, TransportException {
+ transport.sendRequest(node, requestId, action, request, options);
+ }
+
+ @Override
+ public long serverOpen() {
+ return transport.serverOpen();
+ }
+
+ @Override
+ public List<String> getLocalAddresses() {
+ return transport.getLocalAddresses();
+ }
+
+ @Override
+ public Lifecycle.State lifecycleState() {
+ return transport.lifecycleState();
+ }
+
+ @Override
+ public void addLifecycleListener(LifecycleListener listener) {
+ transport.addLifecycleListener(listener);
+ }
+
+ @Override
+ public void removeLifecycleListener(LifecycleListener listener) {
+ transport.removeLifecycleListener(listener);
+ }
+
+ @Override
+ public Transport start() {
+ transport.start();
+ return this;
+ }
+
+ @Override
+ public Transport stop() {
+ transport.stop();
+ return this;
+ }
+
+ @Override
+ public void close() {
+ transport.close();
+ }
+
+ @Override
+ public Map<String, BoundTransportAddress> profileBoundAddresses() {
+ return transport.profileBoundAddresses();
+ }
+ }
+
+
+ List<Tracer> activeTracers = new CopyOnWriteArrayList<>();
+
+ public static class Tracer {
+ public void receivedRequest(long requestId, String action) {
+ }
+
+ public void responseSent(long requestId, String action) {
+ }
+
+ public void responseSent(long requestId, String action, Throwable t) {
+ }
+
+ public void receivedResponse(long requestId, DiscoveryNode sourceNode, String action) {
+ }
+
+ public void requestSent(DiscoveryNode node, long requestId, String action, TransportRequestOptions options) {
+ }
+ }
+
+ public void addTracer(Tracer tracer) {
+ activeTracers.add(tracer);
+ }
+
+ public boolean removeTracer(Tracer tracer) {
+ return activeTracers.remove(tracer);
+ }
+
+ public void clearTracers() {
+ activeTracers.clear();
+ }
+
+ @Override
+ protected Adapter createAdapter() {
+ return new MockAdapter();
+ }
+
+ class MockAdapter extends Adapter {
+
+ @Override
+ protected boolean traceEnabled() {
+ return super.traceEnabled() || activeTracers.isEmpty() == false;
+ }
+
+ @Override
+ protected void traceReceivedRequest(long requestId, String action) {
+ super.traceReceivedRequest(requestId, action);
+ for (Tracer tracer : activeTracers) {
+ tracer.receivedRequest(requestId, action);
+ }
+ }
+
+ @Override
+ protected void traceResponseSent(long requestId, String action) {
+ super.traceResponseSent(requestId, action);
+ for (Tracer tracer : activeTracers) {
+ tracer.responseSent(requestId, action);
+ }
+ }
+
+ @Override
+ protected void traceResponseSent(long requestId, String action, Throwable t) {
+ super.traceResponseSent(requestId, action, t);
+ for (Tracer tracer : activeTracers) {
+ tracer.responseSent(requestId, action, t);
+ }
+ }
+
+ @Override
+ protected void traceReceivedResponse(long requestId, DiscoveryNode sourceNode, String action) {
+ super.traceReceivedResponse(requestId, sourceNode, action);
+ for (Tracer tracer : activeTracers) {
+ tracer.receivedResponse(requestId, sourceNode, action);
+ }
+ }
+
+ @Override
+ protected void traceRequestSent(DiscoveryNode node, long requestId, String action, TransportRequestOptions options) {
+ super.traceRequestSent(node, requestId, action, options);
+ for (Tracer tracer : activeTracers) {
+ tracer.requestSent(node, requestId, action, options);
+ }
+ }
+ }
+
+
+}
diff --git a/test/framework/src/main/resources/log4j.properties b/test/framework/src/main/resources/log4j.properties
new file mode 100644
index 0000000000..22f54ef68e
--- /dev/null
+++ b/test/framework/src/main/resources/log4j.properties
@@ -0,0 +1,9 @@
+es.logger.level=INFO
+log4j.rootLogger=${es.logger.level}, out
+
+log4j.logger.org.apache.http=INFO, out
+log4j.additivity.org.apache.http=false
+
+log4j.appender.out=org.apache.log4j.ConsoleAppender
+log4j.appender.out.layout=org.apache.log4j.PatternLayout
+log4j.appender.out.layout.conversionPattern=[%d{ISO8601}][%-5p][%-25c] %m%n