summaryrefslogtreecommitdiff
path: root/plugins/repository-hdfs
diff options
context:
space:
mode:
authorgfyoung <gfyoung17@gmail.com>2016-06-30 23:00:10 -0400
committerAli Beyad <ali@elastic.co>2016-06-30 23:00:10 -0400
commitd24cc65cad2f3152237df8b6c457a2d0a603f13a (patch)
treecabec4a52ec125b0e9ddebea63b83d865b997afb /plugins/repository-hdfs
parenta24d302e73251ae4fd2d41abff1fa2cab419be66 (diff)
Raised IOException on deleteBlob (#18815)
Raise IOException on deleteBlob if the blob doesn't exist This commit raises an IOException on BlobContainer#deleteBlob if the blob does not exist, in conformance with the BlobContainer interface contract. Each implementation of BlobContainer now conforms to this contract (file system, S3, Azure, HDFS). This commit also contains blob container tests for each of the repository implementations. Closes #18530
Diffstat (limited to 'plugins/repository-hdfs')
-rw-r--r--plugins/repository-hdfs/src/main/java/org/elasticsearch/repositories/hdfs/HdfsBlobContainer.java18
-rw-r--r--plugins/repository-hdfs/src/test/java/org/elasticsearch/repositories/hdfs/HdfsBlobStoreContainerTests.java104
2 files changed, 113 insertions, 9 deletions
diff --git a/plugins/repository-hdfs/src/main/java/org/elasticsearch/repositories/hdfs/HdfsBlobContainer.java b/plugins/repository-hdfs/src/main/java/org/elasticsearch/repositories/hdfs/HdfsBlobContainer.java
index 6ba726e2b2..c8b3d9f7e1 100644
--- a/plugins/repository-hdfs/src/main/java/org/elasticsearch/repositories/hdfs/HdfsBlobContainer.java
+++ b/plugins/repository-hdfs/src/main/java/org/elasticsearch/repositories/hdfs/HdfsBlobContainer.java
@@ -68,16 +68,16 @@ final class HdfsBlobContainer extends AbstractBlobContainer {
@Override
public void deleteBlob(String blobName) throws IOException {
- try {
- store.execute(new Operation<Boolean>() {
- @Override
- public Boolean run(FileContext fileContext) throws IOException {
- return fileContext.delete(new Path(path, blobName), true);
- }
- });
- } catch (FileNotFoundException ok) {
- // behaves like Files.deleteIfExists
+ if (!blobExists(blobName)) {
+ throw new IOException("Blob [" + blobName + "] does not exist");
}
+
+ store.execute(new Operation<Boolean>() {
+ @Override
+ public Boolean run(FileContext fileContext) throws IOException {
+ return fileContext.delete(new Path(path, blobName), true);
+ }
+ });
}
@Override
diff --git a/plugins/repository-hdfs/src/test/java/org/elasticsearch/repositories/hdfs/HdfsBlobStoreContainerTests.java b/plugins/repository-hdfs/src/test/java/org/elasticsearch/repositories/hdfs/HdfsBlobStoreContainerTests.java
new file mode 100644
index 0000000000..a96a8183e5
--- /dev/null
+++ b/plugins/repository-hdfs/src/test/java/org/elasticsearch/repositories/hdfs/HdfsBlobStoreContainerTests.java
@@ -0,0 +1,104 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.repositories.hdfs;
+
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.fs.AbstractFileSystem;
+import org.apache.hadoop.fs.FileContext;
+import org.apache.hadoop.fs.UnsupportedFileSystemException;
+import org.elasticsearch.common.blobstore.BlobStore;
+import org.elasticsearch.repositories.ESBlobStoreContainerTestCase;
+
+import javax.security.auth.Subject;
+import java.io.IOException;
+import java.lang.reflect.Constructor;
+import java.lang.reflect.InvocationTargetException;
+import java.net.URI;
+import java.net.URISyntaxException;
+import java.security.AccessController;
+import java.security.Principal;
+import java.security.PrivilegedAction;
+import java.util.Collections;
+
+public class HdfsBlobStoreContainerTests extends ESBlobStoreContainerTestCase {
+
+ @Override
+ protected BlobStore newBlobStore() throws IOException {
+ return AccessController.doPrivileged(
+ new PrivilegedAction<HdfsBlobStore>() {
+ @Override
+ public HdfsBlobStore run() {
+ try {
+ FileContext fileContext = createContext(new URI("hdfs:///"));
+ return new HdfsBlobStore(fileContext, "temp", 1024);
+ } catch (IOException | URISyntaxException e) {
+ throw new RuntimeException(e);
+ }
+ }
+ });
+ }
+
+ public FileContext createContext(URI uri) {
+ // mirrors HdfsRepository.java behaviour
+ Configuration cfg = new Configuration(true);
+ cfg.setClassLoader(HdfsRepository.class.getClassLoader());
+ cfg.reloadConfiguration();
+
+ Constructor<?> ctor;
+ Subject subject;
+
+ try {
+ Class<?> clazz = Class.forName("org.apache.hadoop.security.User");
+ ctor = clazz.getConstructor(String.class);
+ ctor.setAccessible(true);
+ } catch (ClassNotFoundException | NoSuchMethodException e) {
+ throw new RuntimeException(e);
+ }
+
+ try {
+ Principal principal = (Principal) ctor.newInstance(System.getProperty("user.name"));
+ subject = new Subject(false, Collections.singleton(principal),
+ Collections.emptySet(), Collections.emptySet());
+ } catch (InstantiationException | IllegalAccessException | InvocationTargetException e) {
+ throw new RuntimeException(e);
+ }
+
+ // disable file system cache
+ cfg.setBoolean("fs.hdfs.impl.disable.cache", true);
+
+ // set file system to TestingFs to avoid a bunch of security
+ // checks, similar to what is done in HdfsTests.java
+ cfg.set(String.format("fs.AbstractFileSystem.%s.impl", uri.getScheme()),
+ TestingFs.class.getName());
+
+ // create the FileContext with our user
+ return Subject.doAs(subject, new PrivilegedAction<FileContext>() {
+ @Override
+ public FileContext run() {
+ try {
+ TestingFs fs = (TestingFs) AbstractFileSystem.get(uri, cfg);
+ return FileContext.getFileContext(fs, cfg);
+ } catch (UnsupportedFileSystemException e) {
+ throw new RuntimeException(e);
+ }
+ }
+ });
+ }
+}