summaryrefslogtreecommitdiff
path: root/core/src
diff options
context:
space:
mode:
authorIgor Motov <igor@motovs.org>2016-01-27 19:08:26 -0500
committerIgor Motov <igor@motovs.org>2016-01-27 19:08:26 -0500
commit8c5171fac85b940948cc543c3a4686ef1f173cd3 (patch)
tree0e173c324a5c22e84c42f332c25863d473b19953 /core/src
parent7ef762c8f0758d08fb136a4fbecf873f0fdb95ec (diff)
Tests: add common blob store tests for reuse in plugins
Adds unit tests for blob operations and integration tests for repository operations. These tests can be used by repository plugins to verify that repository operations were implemented as expected by BlobStoreRepository.
Diffstat (limited to 'core/src')
-rw-r--r--core/src/main/java/org/elasticsearch/common/blobstore/BlobStore.java4
-rw-r--r--core/src/test/java/org/elasticsearch/common/blobstore/BlobStoreTests.java139
-rw-r--r--core/src/test/java/org/elasticsearch/common/blobstore/FsBlobStoreContainerTests.java38
-rw-r--r--core/src/test/java/org/elasticsearch/common/blobstore/FsBlobStoreTests.java38
-rw-r--r--core/src/test/java/org/elasticsearch/snapshots/FsBlobStoreRepositoryIT.java37
-rw-r--r--core/src/test/java/org/elasticsearch/snapshots/mockstore/BlobStoreWrapper.java2
-rw-r--r--core/src/test/java/org/elasticsearch/test/ESBlobStoreContainerTestCase.java115
-rw-r--r--core/src/test/java/org/elasticsearch/test/ESBlobStoreRepositoryIntegTestCase.java197
-rw-r--r--core/src/test/java/org/elasticsearch/test/ESBlobStoreTestCase.java82
9 files changed, 510 insertions, 142 deletions
diff --git a/core/src/main/java/org/elasticsearch/common/blobstore/BlobStore.java b/core/src/main/java/org/elasticsearch/common/blobstore/BlobStore.java
index df3a8c6be8..9275b37915 100644
--- a/core/src/main/java/org/elasticsearch/common/blobstore/BlobStore.java
+++ b/core/src/main/java/org/elasticsearch/common/blobstore/BlobStore.java
@@ -18,16 +18,16 @@
*/
package org.elasticsearch.common.blobstore;
+import java.io.Closeable;
import java.io.IOException;
/**
*
*/
-public interface BlobStore {
+public interface BlobStore extends Closeable {
BlobContainer blobContainer(BlobPath path);
void delete(BlobPath path) throws IOException;
- void close();
}
diff --git a/core/src/test/java/org/elasticsearch/common/blobstore/BlobStoreTests.java b/core/src/test/java/org/elasticsearch/common/blobstore/BlobStoreTests.java
deleted file mode 100644
index 80afa5d51f..0000000000
--- a/core/src/test/java/org/elasticsearch/common/blobstore/BlobStoreTests.java
+++ /dev/null
@@ -1,139 +0,0 @@
-/*
- * Licensed to Elasticsearch under one or more contributor
- * license agreements. See the NOTICE file distributed with
- * this work for additional information regarding copyright
- * ownership. Elasticsearch licenses this file to you under
- * the Apache License, Version 2.0 (the "License"); you may
- * not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing,
- * software distributed under the License is distributed on an
- * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
- * KIND, either express or implied. See the License for the
- * specific language governing permissions and limitations
- * under the License.
- */
-package org.elasticsearch.common.blobstore;
-
-import org.apache.lucene.util.BytesRef;
-import org.apache.lucene.util.BytesRefBuilder;
-import org.apache.lucene.util.LuceneTestCase;
-import org.elasticsearch.common.blobstore.fs.FsBlobStore;
-import org.elasticsearch.common.bytes.BytesArray;
-import org.elasticsearch.common.settings.Settings;
-import org.elasticsearch.common.unit.ByteSizeUnit;
-import org.elasticsearch.common.unit.ByteSizeValue;
-import org.elasticsearch.test.ESTestCase;
-
-import java.io.IOException;
-import java.io.InputStream;
-import java.nio.file.Path;
-import java.util.Arrays;
-import java.util.HashMap;
-import java.util.Map;
-
-import static org.hamcrest.CoreMatchers.equalTo;
-import static org.hamcrest.CoreMatchers.notNullValue;
-
-@LuceneTestCase.SuppressFileSystems("ExtrasFS")
-public class BlobStoreTests extends ESTestCase {
- public void testWriteRead() throws IOException {
- final BlobStore store = newBlobStore();
- final BlobContainer container = store.blobContainer(new BlobPath());
- byte[] data = randomBytes(randomIntBetween(10, scaledRandomIntBetween(1024, 1 << 16)));
- container.writeBlob("foobar", new BytesArray(data));
- try (InputStream stream = container.readBlob("foobar")) {
- BytesRefBuilder target = new BytesRefBuilder();
- while (target.length() < data.length) {
- byte[] buffer = new byte[scaledRandomIntBetween(1, data.length - target.length())];
- int offset = scaledRandomIntBetween(0, buffer.length - 1);
- int read = stream.read(buffer, offset, buffer.length - offset);
- target.append(new BytesRef(buffer, offset, read));
- }
- assertEquals(data.length, target.length());
- assertArrayEquals(data, Arrays.copyOfRange(target.bytes(), 0, target.length()));
- }
- store.close();
- }
-
- public void testMoveAndList() throws IOException {
- final BlobStore store = newBlobStore();
- final BlobContainer container = store.blobContainer(new BlobPath());
- assertThat(container.listBlobs().size(), equalTo(0));
- int numberOfFooBlobs = randomIntBetween(0, 10);
- int numberOfBarBlobs = randomIntBetween(3, 20);
- Map<String, Long> generatedBlobs = new HashMap<>();
- for (int i = 0; i < numberOfFooBlobs; i++) {
- int length = randomIntBetween(10, 100);
- String name = "foo-" + i + "-";
- generatedBlobs.put(name, (long) length);
- createRandomBlob(container, name, length);
- }
- for (int i = 1; i < numberOfBarBlobs; i++) {
- int length = randomIntBetween(10, 100);
- String name = "bar-" + i + "-";
- generatedBlobs.put(name, (long) length);
- createRandomBlob(container, name, length);
- }
- int length = randomIntBetween(10, 100);
- String name = "bar-0-";
- generatedBlobs.put(name, (long) length);
- byte[] data = createRandomBlob(container, name, length);
-
- Map<String, BlobMetaData> blobs = container.listBlobs();
- assertThat(blobs.size(), equalTo(numberOfFooBlobs + numberOfBarBlobs));
- for (Map.Entry<String, Long> generated : generatedBlobs.entrySet()) {
- BlobMetaData blobMetaData = blobs.get(generated.getKey());
- assertThat(generated.getKey(), blobMetaData, notNullValue());
- assertThat(blobMetaData.name(), equalTo(generated.getKey()));
- assertThat(blobMetaData.length(), equalTo(generated.getValue()));
- }
-
- assertThat(container.listBlobsByPrefix("foo-").size(), equalTo(numberOfFooBlobs));
- assertThat(container.listBlobsByPrefix("bar-").size(), equalTo(numberOfBarBlobs));
- assertThat(container.listBlobsByPrefix("baz-").size(), equalTo(0));
-
- String newName = "bar-new";
- // Move to a new location
- container.move(name, newName);
- assertThat(container.listBlobsByPrefix(name).size(), equalTo(0));
- blobs = container.listBlobsByPrefix(newName);
- assertThat(blobs.size(), equalTo(1));
- assertThat(blobs.get(newName).length(), equalTo(generatedBlobs.get(name)));
- assertThat(data, equalTo(readBlobFully(container, newName, length)));
- store.close();
- }
-
- protected byte[] createRandomBlob(BlobContainer container, String name, int length) throws IOException {
- byte[] data = randomBytes(length);
- container.writeBlob(name, new BytesArray(data));
- return data;
- }
-
- protected byte[] readBlobFully(BlobContainer container, String name, int length) throws IOException {
- byte[] data = new byte[length];
- try (InputStream inputStream = container.readBlob(name)) {
- assertThat(inputStream.read(data), equalTo(length));
- assertThat(inputStream.read(), equalTo(-1));
- }
- return data;
- }
-
- protected byte[] randomBytes(int length) {
- byte[] data = new byte[length];
- for (int i = 0; i < data.length; i++) {
- data[i] = (byte) randomInt();
- }
- return data;
- }
-
- protected BlobStore newBlobStore() throws IOException {
- Path tempDir = createTempDir();
- Settings settings = randomBoolean() ? Settings.EMPTY : Settings.builder().put("buffer_size", new ByteSizeValue(randomIntBetween(1, 100), ByteSizeUnit.KB)).build();
- FsBlobStore store = new FsBlobStore(settings, tempDir);
- return store;
- }
-}
diff --git a/core/src/test/java/org/elasticsearch/common/blobstore/FsBlobStoreContainerTests.java b/core/src/test/java/org/elasticsearch/common/blobstore/FsBlobStoreContainerTests.java
new file mode 100644
index 0000000000..63c04b1c5e
--- /dev/null
+++ b/core/src/test/java/org/elasticsearch/common/blobstore/FsBlobStoreContainerTests.java
@@ -0,0 +1,38 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.elasticsearch.common.blobstore;
+
+import org.apache.lucene.util.LuceneTestCase;
+import org.elasticsearch.common.blobstore.fs.FsBlobStore;
+import org.elasticsearch.common.settings.Settings;
+import org.elasticsearch.common.unit.ByteSizeUnit;
+import org.elasticsearch.common.unit.ByteSizeValue;
+import org.elasticsearch.test.ESBlobStoreContainerTestCase;
+
+import java.io.IOException;
+import java.nio.file.Path;
+
+@LuceneTestCase.SuppressFileSystems("ExtrasFS")
+public class FsBlobStoreContainerTests extends ESBlobStoreContainerTestCase {
+ protected BlobStore newBlobStore() throws IOException {
+ Path tempDir = createTempDir();
+ Settings settings = randomBoolean() ? Settings.EMPTY : Settings.builder().put("buffer_size", new ByteSizeValue(randomIntBetween(1, 100), ByteSizeUnit.KB)).build();
+ return new FsBlobStore(settings, tempDir);
+ }
+}
diff --git a/core/src/test/java/org/elasticsearch/common/blobstore/FsBlobStoreTests.java b/core/src/test/java/org/elasticsearch/common/blobstore/FsBlobStoreTests.java
new file mode 100644
index 0000000000..f6f53549ce
--- /dev/null
+++ b/core/src/test/java/org/elasticsearch/common/blobstore/FsBlobStoreTests.java
@@ -0,0 +1,38 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.elasticsearch.common.blobstore;
+
+import org.apache.lucene.util.LuceneTestCase;
+import org.elasticsearch.common.blobstore.fs.FsBlobStore;
+import org.elasticsearch.common.settings.Settings;
+import org.elasticsearch.common.unit.ByteSizeUnit;
+import org.elasticsearch.common.unit.ByteSizeValue;
+import org.elasticsearch.test.ESBlobStoreTestCase;
+
+import java.io.IOException;
+import java.nio.file.Path;
+
+@LuceneTestCase.SuppressFileSystems("ExtrasFS")
+public class FsBlobStoreTests extends ESBlobStoreTestCase {
+ protected BlobStore newBlobStore() throws IOException {
+ Path tempDir = createTempDir();
+ Settings settings = randomBoolean() ? Settings.EMPTY : Settings.builder().put("buffer_size", new ByteSizeValue(randomIntBetween(1, 100), ByteSizeUnit.KB)).build();
+ return new FsBlobStore(settings, tempDir);
+ }
+}
diff --git a/core/src/test/java/org/elasticsearch/snapshots/FsBlobStoreRepositoryIT.java b/core/src/test/java/org/elasticsearch/snapshots/FsBlobStoreRepositoryIT.java
new file mode 100644
index 0000000000..22a3ecac8e
--- /dev/null
+++ b/core/src/test/java/org/elasticsearch/snapshots/FsBlobStoreRepositoryIT.java
@@ -0,0 +1,37 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.elasticsearch.snapshots;
+
+import org.elasticsearch.common.settings.Settings;
+import org.elasticsearch.common.unit.ByteSizeUnit;
+import org.elasticsearch.test.ESBlobStoreRepositoryIntegTestCase;
+
+import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertAcked;
+
+public class FsBlobStoreRepositoryIT extends ESBlobStoreRepositoryIntegTestCase {
+ @Override
+ protected void createTestRepository(String name) {
+ assertAcked(client().admin().cluster().preparePutRepository(name)
+ .setType("fs").setSettings(Settings.settingsBuilder()
+ .put("location", randomRepoPath())
+ .put("compress", randomBoolean())
+ .put("chunk_size", randomIntBetween(100, 1000), ByteSizeUnit.BYTES)));
+
+ }
+}
diff --git a/core/src/test/java/org/elasticsearch/snapshots/mockstore/BlobStoreWrapper.java b/core/src/test/java/org/elasticsearch/snapshots/mockstore/BlobStoreWrapper.java
index 086aac209b..5ac1e82dba 100644
--- a/core/src/test/java/org/elasticsearch/snapshots/mockstore/BlobStoreWrapper.java
+++ b/core/src/test/java/org/elasticsearch/snapshots/mockstore/BlobStoreWrapper.java
@@ -46,7 +46,7 @@ public class BlobStoreWrapper implements BlobStore {
}
@Override
- public void close() {
+ public void close() throws IOException {
delegate.close();
}
diff --git a/core/src/test/java/org/elasticsearch/test/ESBlobStoreContainerTestCase.java b/core/src/test/java/org/elasticsearch/test/ESBlobStoreContainerTestCase.java
new file mode 100644
index 0000000000..291d00a8dd
--- /dev/null
+++ b/core/src/test/java/org/elasticsearch/test/ESBlobStoreContainerTestCase.java
@@ -0,0 +1,115 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.elasticsearch.test;
+
+import org.apache.lucene.util.BytesRef;
+import org.apache.lucene.util.BytesRefBuilder;
+import org.elasticsearch.common.blobstore.BlobContainer;
+import org.elasticsearch.common.blobstore.BlobMetaData;
+import org.elasticsearch.common.blobstore.BlobPath;
+import org.elasticsearch.common.blobstore.BlobStore;
+import org.elasticsearch.common.bytes.BytesArray;
+
+import java.io.IOException;
+import java.io.InputStream;
+import java.util.Arrays;
+import java.util.HashMap;
+import java.util.Map;
+
+import static org.elasticsearch.test.ESBlobStoreTestCase.writeRandomBlob;
+import static org.elasticsearch.test.ESBlobStoreTestCase.randomBytes;
+import static org.elasticsearch.test.ESBlobStoreTestCase.readBlobFully;
+import static org.hamcrest.CoreMatchers.equalTo;
+import static org.hamcrest.CoreMatchers.notNullValue;
+
+/**
+ * Generic test case for blob store container implementation.
+ * These tests check basic blob store functionality.
+ */
+public abstract class ESBlobStoreContainerTestCase extends ESTestCase {
+
+ public void testWriteRead() throws IOException {
+ try(final BlobStore store = newBlobStore()) {
+ final BlobContainer container = store.blobContainer(new BlobPath());
+ byte[] data = randomBytes(randomIntBetween(10, scaledRandomIntBetween(1024, 1 << 16)));
+ container.writeBlob("foobar", new BytesArray(data));
+ try (InputStream stream = container.readBlob("foobar")) {
+ BytesRefBuilder target = new BytesRefBuilder();
+ while (target.length() < data.length) {
+ byte[] buffer = new byte[scaledRandomIntBetween(1, data.length - target.length())];
+ int offset = scaledRandomIntBetween(0, buffer.length - 1);
+ int read = stream.read(buffer, offset, buffer.length - offset);
+ target.append(new BytesRef(buffer, offset, read));
+ }
+ assertEquals(data.length, target.length());
+ assertArrayEquals(data, Arrays.copyOfRange(target.bytes(), 0, target.length()));
+ }
+ }
+ }
+
+ public void testMoveAndList() throws IOException {
+ try(final BlobStore store = newBlobStore()) {
+ final BlobContainer container = store.blobContainer(new BlobPath());
+ assertThat(container.listBlobs().size(), equalTo(0));
+ int numberOfFooBlobs = randomIntBetween(0, 10);
+ int numberOfBarBlobs = randomIntBetween(3, 20);
+ Map<String, Long> generatedBlobs = new HashMap<>();
+ for (int i = 0; i < numberOfFooBlobs; i++) {
+ int length = randomIntBetween(10, 100);
+ String name = "foo-" + i + "-";
+ generatedBlobs.put(name, (long) length);
+ writeRandomBlob(container, name, length);
+ }
+ for (int i = 1; i < numberOfBarBlobs; i++) {
+ int length = randomIntBetween(10, 100);
+ String name = "bar-" + i + "-";
+ generatedBlobs.put(name, (long) length);
+ writeRandomBlob(container, name, length);
+ }
+ int length = randomIntBetween(10, 100);
+ String name = "bar-0-";
+ generatedBlobs.put(name, (long) length);
+ byte[] data = writeRandomBlob(container, name, length);
+
+ Map<String, BlobMetaData> blobs = container.listBlobs();
+ assertThat(blobs.size(), equalTo(numberOfFooBlobs + numberOfBarBlobs));
+ for (Map.Entry<String, Long> generated : generatedBlobs.entrySet()) {
+ BlobMetaData blobMetaData = blobs.get(generated.getKey());
+ assertThat(generated.getKey(), blobMetaData, notNullValue());
+ assertThat(blobMetaData.name(), equalTo(generated.getKey()));
+ assertThat(blobMetaData.length(), equalTo(generated.getValue()));
+ }
+
+ assertThat(container.listBlobsByPrefix("foo-").size(), equalTo(numberOfFooBlobs));
+ assertThat(container.listBlobsByPrefix("bar-").size(), equalTo(numberOfBarBlobs));
+ assertThat(container.listBlobsByPrefix("baz-").size(), equalTo(0));
+
+ String newName = "bar-new";
+ // Move to a new location
+ container.move(name, newName);
+ assertThat(container.listBlobsByPrefix(name).size(), equalTo(0));
+ blobs = container.listBlobsByPrefix(newName);
+ assertThat(blobs.size(), equalTo(1));
+ assertThat(blobs.get(newName).length(), equalTo(generatedBlobs.get(name)));
+ assertThat(data, equalTo(readBlobFully(container, newName, length)));
+ }
+ }
+
+ protected abstract BlobStore newBlobStore() throws IOException;
+}
diff --git a/core/src/test/java/org/elasticsearch/test/ESBlobStoreRepositoryIntegTestCase.java b/core/src/test/java/org/elasticsearch/test/ESBlobStoreRepositoryIntegTestCase.java
new file mode 100644
index 0000000000..dc49683de6
--- /dev/null
+++ b/core/src/test/java/org/elasticsearch/test/ESBlobStoreRepositoryIntegTestCase.java
@@ -0,0 +1,197 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.elasticsearch.test;
+
+import org.elasticsearch.action.admin.cluster.snapshots.create.CreateSnapshotRequestBuilder;
+import org.elasticsearch.action.admin.cluster.snapshots.create.CreateSnapshotResponse;
+import org.elasticsearch.action.admin.cluster.snapshots.restore.RestoreSnapshotRequestBuilder;
+import org.elasticsearch.action.admin.cluster.snapshots.restore.RestoreSnapshotResponse;
+import org.elasticsearch.action.index.IndexRequestBuilder;
+
+import java.util.Arrays;
+import java.util.HashSet;
+import java.util.List;
+import java.util.Locale;
+import java.util.Set;
+import java.util.concurrent.ExecutionException;
+
+import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertAcked;
+import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertHitCount;
+import static org.hamcrest.Matchers.equalTo;
+import static org.hamcrest.Matchers.greaterThan;
+
+/**
+ * Basic integration tests for blob-based repository validation.
+ */
+public abstract class ESBlobStoreRepositoryIntegTestCase extends ESIntegTestCase {
+
+ protected abstract void createTestRepository(String name);
+
+ public void testSnapshotAndRestore() throws Exception {
+ String repoName = randomAsciiName();
+ logger.info("--> creating repository {}", repoName);
+ createTestRepository(repoName);
+ int indexCount = randomIntBetween(1, 5);
+ int[] docCounts = new int[indexCount];
+ String[] indexNames = generateRandomNames(indexCount);
+ for (int i = 0; i < indexCount; i++) {
+ logger.info("--> create random index {} with {} records", indexNames[i], docCounts[i]);
+ docCounts[i] = iterations(10, 1000);
+ addRandomDocuments(indexNames[i], docCounts[i]);
+ assertHitCount(client().prepareSearch(indexNames[i]).setSize(0).get(), docCounts[i]);
+ }
+
+ String snapshotName = randomAsciiName();
+ logger.info("--> create snapshot {}:{}", repoName, snapshotName);
+ assertSuccessfulSnapshot(client().admin().cluster().prepareCreateSnapshot(repoName, snapshotName).setWaitForCompletion(true).setIndices(indexNames));
+
+ List<String> deleteIndices = randomSubsetOf(randomIntBetween(0, indexCount), indexNames);
+ if (deleteIndices.size() > 0) {
+ logger.info("--> delete indices {}", deleteIndices);
+ assertAcked(client().admin().indices().prepareDelete(deleteIndices.toArray(new String[deleteIndices.size()])));
+ }
+
+ Set<String> closeIndices = new HashSet<>(Arrays.asList(indexNames));
+ closeIndices.removeAll(deleteIndices);
+
+ if (closeIndices.size() > 0) {
+ for (String index : closeIndices) {
+ if (randomBoolean()) {
+ logger.info("--> add random documents to {}", index);
+ addRandomDocuments(index, randomIntBetween(10, 1000));
+ } else {
+ int docCount = (int) client().prepareSearch(index).setSize(0).get().getHits().totalHits();
+ int deleteCount = randomIntBetween(1, docCount);
+ logger.info("--> delete {} random documents from {}", deleteCount, index);
+ for (int i = 0; i < deleteCount; i++) {
+ int doc = randomIntBetween(0, docCount - 1);
+ client().prepareDelete(index, index, Integer.toString(doc)).get();
+ }
+ client().admin().indices().prepareRefresh(index).get();
+ }
+ }
+
+ logger.info("--> close indices {}", closeIndices);
+ assertAcked(client().admin().indices().prepareClose(closeIndices.toArray(new String[closeIndices.size()])));
+ }
+
+ logger.info("--> restore all indices from the snapshot");
+ assertSuccessfulRestore(client().admin().cluster().prepareRestoreSnapshot(repoName, snapshotName).setWaitForCompletion(true));
+
+ ensureGreen();
+
+ for (int i = 0; i < indexCount; i++) {
+ assertHitCount(client().prepareSearch(indexNames[i]).setSize(0).get(), docCounts[i]);
+ }
+ }
+
+ public void testMultipleSnapshotAndRollback() throws Exception {
+ String repoName = randomAsciiName();
+ logger.info("--> creating repository {}", repoName);
+ createTestRepository(repoName);
+ int iterationCount = randomIntBetween(2, 5);
+ int[] docCounts = new int[iterationCount];
+ String indexName = randomAsciiName();
+ String snapshotName = randomAsciiName();
+ assertAcked(client().admin().indices().prepareCreate(indexName).get());
+ for (int i = 0; i < iterationCount; i++) {
+ if (randomBoolean() && i > 0) { // don't delete on the first iteration
+ int docCount = docCounts[i - 1];
+ if (docCount > 0) {
+ int deleteCount = randomIntBetween(1, docCount);
+ logger.info("--> delete {} random documents from {}", deleteCount, indexName);
+ for (int j = 0; j < deleteCount; j++) {
+ int doc = randomIntBetween(0, docCount - 1);
+ client().prepareDelete(indexName, indexName, Integer.toString(doc)).get();
+ }
+ client().admin().indices().prepareRefresh(indexName).get();
+ }
+ } else {
+ int docCount = randomIntBetween(10, 1000);
+ logger.info("--> add {} random documents to {}", docCount, indexName);
+ addRandomDocuments(indexName, docCount);
+ }
+ // Check number of documents in this iteration
+ docCounts[i] = (int) client().prepareSearch(indexName).setSize(0).get().getHits().totalHits();
+ logger.info("--> create snapshot {}:{} with {} documents", repoName, snapshotName + "-" + i, docCounts[i]);
+ assertSuccessfulSnapshot(client().admin().cluster().prepareCreateSnapshot(repoName, snapshotName + "-" + i).setWaitForCompletion(true).setIndices(indexName));
+ }
+
+ int restoreOperations = randomIntBetween(1, 3);
+ for (int i = 0; i < restoreOperations; i++) {
+ int iterationToRestore = randomIntBetween(0, iterationCount - 1);
+ logger.info("--> performing restore of the iteration {}", iterationToRestore);
+
+ logger.info("--> close index");
+ assertAcked(client().admin().indices().prepareClose(indexName));
+
+ logger.info("--> restore index from the snapshot");
+ assertSuccessfulRestore(client().admin().cluster().prepareRestoreSnapshot(repoName, snapshotName + "-" + iterationToRestore).setWaitForCompletion(true));
+ ensureGreen();
+ assertHitCount(client().prepareSearch(indexName).setSize(0).get(), docCounts[iterationToRestore]);
+ }
+ }
+
+ protected void addRandomDocuments(String name, int numDocs) throws ExecutionException, InterruptedException {
+ IndexRequestBuilder[] indexRequestBuilders = new IndexRequestBuilder[numDocs];
+ for (int i = 0; i < numDocs; i++) {
+ indexRequestBuilders[i] = client().prepareIndex(name, name, Integer.toString(i))
+ .setRouting(randomAsciiOfLength(randomIntBetween(1, 10))).setSource("field", "value");
+ }
+ indexRandom(true, indexRequestBuilders);
+ }
+
+ protected String[] generateRandomNames(int num) {
+ Set<String> names = new HashSet<>();
+ for (int i = 0; i < num; i++) {
+ String name;
+ do {
+ name = randomAsciiName();
+ } while (names.contains(name));
+ names.add(name);
+ }
+ return names.toArray(new String[num]);
+ }
+
+ public static CreateSnapshotResponse assertSuccessfulSnapshot(CreateSnapshotRequestBuilder requestBuilder) {
+ CreateSnapshotResponse response = requestBuilder.get();
+ assertSuccessfulSnapshot(response);
+ return response;
+ }
+
+ public static void assertSuccessfulSnapshot(CreateSnapshotResponse response) {
+ assertThat(response.getSnapshotInfo().successfulShards(), greaterThan(0));
+ assertThat(response.getSnapshotInfo().successfulShards(), equalTo(response.getSnapshotInfo().totalShards()));
+ }
+
+ public static RestoreSnapshotResponse assertSuccessfulRestore(RestoreSnapshotRequestBuilder requestBuilder) {
+ RestoreSnapshotResponse response = requestBuilder.get();
+ assertSuccessfulRestore(response);
+ return response;
+ }
+
+ public static void assertSuccessfulRestore(RestoreSnapshotResponse response) {
+ assertThat(response.getRestoreInfo().successfulShards(), greaterThan(0));
+ assertThat(response.getRestoreInfo().successfulShards(), equalTo(response.getRestoreInfo().totalShards()));
+ }
+
+ public static String randomAsciiName() {
+ return randomAsciiOfLength(randomIntBetween(1, 10)).toLowerCase(Locale.ROOT);
+ }
+}
diff --git a/core/src/test/java/org/elasticsearch/test/ESBlobStoreTestCase.java b/core/src/test/java/org/elasticsearch/test/ESBlobStoreTestCase.java
new file mode 100644
index 0000000000..80432d628e
--- /dev/null
+++ b/core/src/test/java/org/elasticsearch/test/ESBlobStoreTestCase.java
@@ -0,0 +1,82 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.elasticsearch.test;
+
+import org.elasticsearch.common.blobstore.BlobContainer;
+import org.elasticsearch.common.blobstore.BlobPath;
+import org.elasticsearch.common.blobstore.BlobStore;
+import org.elasticsearch.common.bytes.BytesArray;
+import org.junit.Test;
+
+import java.io.IOException;
+import java.io.InputStream;
+
+import static org.hamcrest.CoreMatchers.equalTo;
+
+/**
+ * Generic test case for blob store implementation.
+ * These tests check basic blob store functionality.
+ */
+public abstract class ESBlobStoreTestCase extends ESTestCase {
+
+ public void testContainerCreationAndDeletion() throws IOException {
+ try(final BlobStore store = newBlobStore()) {
+ final BlobContainer containerFoo = store.blobContainer(new BlobPath().add("foo"));
+ final BlobContainer containerBar = store.blobContainer(new BlobPath().add("bar"));
+ byte[] data1 = randomBytes(randomIntBetween(10, scaledRandomIntBetween(1024, 1 << 16)));
+ byte[] data2 = randomBytes(randomIntBetween(10, scaledRandomIntBetween(1024, 1 << 16)));
+ containerFoo.writeBlob("test", new BytesArray(data1));
+ containerBar.writeBlob("test", new BytesArray(data2));
+
+ assertArrayEquals(readBlobFully(containerFoo, "test", data1.length), data1);
+ assertArrayEquals(readBlobFully(containerBar, "test", data2.length), data2);
+
+ assertTrue(containerFoo.blobExists("test"));
+ assertTrue(containerBar.blobExists("test"));
+ store.delete(new BlobPath());
+ assertFalse(containerFoo.blobExists("test"));
+ assertFalse(containerBar.blobExists("test"));
+ }
+ }
+
+ public static byte[] writeRandomBlob(BlobContainer container, String name, int length) throws IOException {
+ byte[] data = randomBytes(length);
+ container.writeBlob(name, new BytesArray(data));
+ return data;
+ }
+
+ public static byte[] readBlobFully(BlobContainer container, String name, int length) throws IOException {
+ byte[] data = new byte[length];
+ try (InputStream inputStream = container.readBlob(name)) {
+ assertThat(inputStream.read(data), equalTo(length));
+ assertThat(inputStream.read(), equalTo(-1));
+ }
+ return data;
+ }
+
+ public static byte[] randomBytes(int length) {
+ byte[] data = new byte[length];
+ for (int i = 0; i < data.length; i++) {
+ data[i] = (byte) randomInt();
+ }
+ return data;
+ }
+
+ protected abstract BlobStore newBlobStore() throws IOException;
+}