summaryrefslogtreecommitdiff
path: root/hadoop-hdds/container-service/src/main/java/org/apache
diff options
context:
space:
mode:
authorAnu Engineer <aengineer@apache.org>2018-05-10 17:08:26 -0700
committerAnu Engineer <aengineer@apache.org>2018-05-10 17:08:26 -0700
commit30293f6065c9e5b41c07cd670c7a6a1768d1434b (patch)
treeca2cd6f016e197ee2a108b72a4284c8dfc388181 /hadoop-hdds/container-service/src/main/java/org/apache
parentdb1ab0fc1674177fdbe8f50c557aa4052ce77efc (diff)
HDDS-34. Remove .meta file during creation of container
Contributed by Bharat Viswanadham.
Diffstat (limited to 'hadoop-hdds/container-service/src/main/java/org/apache')
-rw-r--r--hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/helpers/ContainerData.java27
-rw-r--r--hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/helpers/ContainerUtils.java35
-rw-r--r--hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/impl/ContainerManagerImpl.java42
3 files changed, 8 insertions, 96 deletions
diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/helpers/ContainerData.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/helpers/ContainerData.java
index 799cca3113..947dc7d463 100644
--- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/helpers/ContainerData.java
+++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/helpers/ContainerData.java
@@ -18,14 +18,12 @@
package org.apache.hadoop.ozone.container.common.helpers;
-import org.apache.commons.codec.digest.DigestUtils;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.hdds.scm.ScmConfigKeys;
import org.apache.hadoop.hdds.protocol.proto.ContainerProtos;
import org.apache.hadoop.hdds.protocol.proto.ContainerProtos
.ContainerLifeCycleState;
import org.apache.hadoop.ozone.OzoneConsts;
-import org.apache.hadoop.util.Time;
import java.io.IOException;
import java.util.Collections;
@@ -45,7 +43,6 @@ public class ContainerData {
private String dbPath; // Path to Level DB Store.
// Path to Physical file system where container and checksum are stored.
private String containerFilePath;
- private String hash;
private AtomicLong bytesUsed;
private long maxSize;
private long containerID;
@@ -95,10 +92,6 @@ public class ContainerData {
data.setState(protoData.getState());
}
- if(protoData.hasHash()) {
- data.setHash(protoData.getHash());
- }
-
if (protoData.hasBytesUsed()) {
data.setBytesUsed(protoData.getBytesUsed());
}
@@ -123,10 +116,6 @@ public class ContainerData {
builder.setDbPath(this.getDBPath());
}
- if (this.getHash() != null) {
- builder.setHash(this.getHash());
- }
-
if (this.getContainerPath() != null) {
builder.setContainerPath(this.getContainerPath());
}
@@ -274,22 +263,6 @@ public class ContainerData {
// TODO: closed or closing here
setState(ContainerLifeCycleState.CLOSED);
- // Some thing brain dead for now. name + Time stamp of when we get the close
- // container message.
- setHash(DigestUtils.sha256Hex(this.getContainerID() +
- Long.toString(Time.monotonicNow())));
- }
-
- /**
- * Final hash for this container.
- * @return - Hash
- */
- public String getHash() {
- return hash;
- }
-
- public void setHash(String hash) {
- this.hash = hash;
}
public void setMaxSize(long maxSize) {
diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/helpers/ContainerUtils.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/helpers/ContainerUtils.java
index e24435418d..959d88c027 100644
--- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/helpers/ContainerUtils.java
+++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/helpers/ContainerUtils.java
@@ -47,7 +47,7 @@ import static org.apache.hadoop.hdds.protocol.proto.ContainerProtos.Result
import static org.apache.hadoop.hdds.protocol.proto.ContainerProtos.Result
.UNABLE_TO_FIND_DATA_DIR;
import static org.apache.hadoop.ozone.OzoneConsts.CONTAINER_EXTENSION;
-import static org.apache.hadoop.ozone.OzoneConsts.CONTAINER_META;
+
/**
* A set of helper functions to create proper responses.
@@ -194,10 +194,9 @@ public final class ContainerUtils {
* Verifies that this in indeed a new container.
*
* @param containerFile - Container File to verify
- * @param metadataFile - metadata File to verify
* @throws IOException
*/
- public static void verifyIsNewContainer(File containerFile, File metadataFile)
+ public static void verifyIsNewContainer(File containerFile)
throws IOException {
Logger log = LoggerFactory.getLogger(ContainerManagerImpl.class);
if (containerFile.exists()) {
@@ -207,13 +206,6 @@ public final class ContainerUtils {
"disk.");
}
- if (metadataFile.exists()) {
- log.error("metadata found on disk, but missing container. Refusing to" +
- " write this container. File: {} ", metadataFile.toPath());
- throw new FileAlreadyExistsException(("metadata found on disk, but " +
- "missing container. Refusing to write this container."));
- }
-
File parentPath = new File(containerFile.getParent());
if (!parentPath.exists() && !parentPath.mkdirs()) {
@@ -228,11 +220,6 @@ public final class ContainerUtils {
throw new IOException("creation of a new container file failed.");
}
- if (!metadataFile.createNewFile()) {
- log.error("creation of the metadata file failed. File: {}",
- metadataFile.toPath());
- throw new IOException("creation of a new container file failed.");
- }
}
public static String getContainerDbFileName(String containerName) {
@@ -287,20 +274,6 @@ public final class ContainerUtils {
}
/**
- * Returns Metadata location.
- *
- * @param containerData - Data
- * @param location - Path
- * @return Path
- */
- public static File getMetadataFile(ContainerData containerData,
- Path location) {
- return location.resolve(Long.toString(containerData
- .getContainerID()).concat(CONTAINER_META))
- .toFile();
- }
-
- /**
* Returns container file location.
*
* @param containerData - Data
@@ -395,10 +368,10 @@ public final class ContainerUtils {
String rootPath = getContainerNameFromFile(new File(containerData
.getContainerPath()));
Path containerPath = Paths.get(rootPath.concat(CONTAINER_EXTENSION));
- Path metaPath = Paths.get(rootPath.concat(CONTAINER_META));
+
FileUtils.forceDelete(containerPath.toFile());
- FileUtils.forceDelete(metaPath.toFile());
+
}
/**
diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/impl/ContainerManagerImpl.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/impl/ContainerManagerImpl.java
index 1893b3b3b8..cb603347ac 100644
--- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/impl/ContainerManagerImpl.java
+++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/impl/ContainerManagerImpl.java
@@ -20,7 +20,6 @@ package org.apache.hadoop.ozone.container.common.impl;
import com.google.common.annotations.VisibleForTesting;
import com.google.common.base.Preconditions;
-import org.apache.commons.codec.digest.DigestUtils;
import org.apache.commons.io.FileUtils;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.hdds.scm.ScmConfigKeys;
@@ -101,7 +100,6 @@ import static org.apache.hadoop.hdds.protocol.proto.ContainerProtos.Result
import static org.apache.hadoop.hdds.protocol.proto.ContainerProtos.Result
.UNSUPPORTED_REQUEST;
import static org.apache.hadoop.ozone.OzoneConsts.CONTAINER_EXTENSION;
-import static org.apache.hadoop.ozone.OzoneConsts.CONTAINER_META;
/**
* A Generic ContainerManagerImpl that will be called from Ozone
@@ -233,18 +231,11 @@ public class ContainerManagerImpl implements ContainerManager {
long containerID = Long.parseLong(keyName);
try {
String containerFileName = containerName.concat(CONTAINER_EXTENSION);
- String metaFileName = containerName.concat(CONTAINER_META);
containerStream = new FileInputStream(containerFileName);
- metaStream = new FileInputStream(metaFileName);
-
- MessageDigest sha = MessageDigest.getInstance(OzoneConsts.FILE_HASH);
-
- dis = new DigestInputStream(containerStream, sha);
-
ContainerProtos.ContainerData containerDataProto =
- ContainerProtos.ContainerData.parseDelimitedFrom(dis);
+ ContainerProtos.ContainerData.parseDelimitedFrom(containerStream);
ContainerData containerData;
if (containerDataProto == null) {
// Sometimes container metadata might have been created but empty,
@@ -255,19 +246,6 @@ public class ContainerManagerImpl implements ContainerManager {
return;
}
containerData = ContainerData.getFromProtBuf(containerDataProto, conf);
- ContainerProtos.ContainerMeta meta =
- ContainerProtos.ContainerMeta.parseDelimitedFrom(metaStream);
- if (meta != null && !DigestUtils.sha256Hex(sha.digest())
- .equals(meta.getHash())) {
- // This means we were not able read data from the disk when booted the
- // datanode. We are going to rely on SCM understanding that we don't
- // have valid data for this container when we send container reports.
- // Hopefully SCM will ask us to delete this container and rebuild it.
- LOG.error("Invalid SHA found for container data. Name :{}"
- + "cowardly refusing to read invalid data", containerName);
- containerMap.put(containerID, new ContainerStatus(null));
- return;
- }
ContainerStatus containerStatus = new ContainerStatus(containerData);
// Initialize pending deletion blocks count in in-memory
@@ -298,7 +276,7 @@ public class ContainerManagerImpl implements ContainerManager {
containerStatus.setBytesUsed(bytesUsed);
containerMap.put(containerID, containerStatus);
- } catch (IOException | NoSuchAlgorithmException ex) {
+ } catch (IOException ex) {
LOG.error("read failed for file: {} ex: {}", containerName,
ex.getMessage());
@@ -398,12 +376,10 @@ public class ContainerManagerImpl implements ContainerManager {
File containerFile = ContainerUtils.getContainerFile(containerData,
location);
- File metadataFile = ContainerUtils.getMetadataFile(containerData,
- location);
String containerName = Long.toString(containerData.getContainerID());
if(!overwrite) {
- ContainerUtils.verifyIsNewContainer(containerFile, metadataFile);
+ ContainerUtils.verifyIsNewContainer(containerFile);
metadataPath = this.locationManager.getDataPath(containerName);
metadataPath = ContainerUtils.createMetadata(metadataPath,
containerName, conf);
@@ -412,7 +388,7 @@ public class ContainerManagerImpl implements ContainerManager {
}
containerStream = new FileOutputStream(containerFile);
- metaStream = new FileOutputStream(metadataFile);
+
MessageDigest sha = MessageDigest.getInstance(OzoneConsts.FILE_HASH);
dos = new DigestOutputStream(containerStream, sha);
@@ -425,13 +401,6 @@ public class ContainerManagerImpl implements ContainerManager {
.getProtoBufMessage();
protoData.writeDelimitedTo(dos);
- ContainerProtos.ContainerMeta protoMeta = ContainerProtos
- .ContainerMeta.newBuilder()
- .setFileName(containerFile.toString())
- .setHash(DigestUtils.sha256Hex(sha.digest()))
- .build();
- protoMeta.writeDelimitedTo(metaStream);
-
} catch (IOException ex) {
// TODO : we need to clean up partially constructed files
// The proper way to do would be for a thread
@@ -913,9 +882,6 @@ public class ContainerManagerImpl implements ContainerManager {
.setWriteBytes(container.getWriteBytes())
.setContainerID(container.getContainer().getContainerID());
- if (container.getContainer().getHash() != null) {
- ciBuilder.setFinalhash(container.getContainer().getHash());
- }
crBuilder.addReports(ciBuilder.build());
}