summaryrefslogtreecommitdiff
path: root/hadoop-hdds/container-service/src/main/java/org/apache
diff options
context:
space:
mode:
authorAnu Engineer <aengineer@apache.org>2018-05-07 14:42:18 -0700
committerAnu Engineer <aengineer@apache.org>2018-05-07 14:58:52 -0700
commit3a43ac2851f5dea4deb8a1dfebf9bf65fc57bd76 (patch)
tree90f57883dda458ccd87c2c9e1f9c8530aa6c5316 /hadoop-hdds/container-service/src/main/java/org/apache
parenta3a1552c33d5650fbd0a702369fccd21b8c9d3e2 (diff)
HDDS-1. Remove SCM Block DB. Contributed by Xiaoyu Yao.
Diffstat (limited to 'hadoop-hdds/container-service/src/main/java/org/apache')
-rw-r--r--hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/helpers/ChunkUtils.java20
-rw-r--r--hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/helpers/ContainerData.java33
-rw-r--r--hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/helpers/ContainerReport.java19
-rw-r--r--hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/helpers/ContainerUtils.java14
-rw-r--r--hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/helpers/DeletedContainerBlocksSummary.java19
-rw-r--r--hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/helpers/FileUtils.java3
-rw-r--r--hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/helpers/KeyUtils.java6
-rw-r--r--hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/impl/ChunkManagerImpl.java76
-rw-r--r--hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/impl/ContainerManagerImpl.java210
-rw-r--r--hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/impl/Dispatcher.java121
-rw-r--r--hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/impl/KeyManagerImpl.java66
-rw-r--r--hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/impl/RandomContainerDeletionChoosingPolicy.java4
-rw-r--r--hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/impl/TopNOrderedContainerDeletionChoosingPolicy.java4
-rw-r--r--hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/interfaces/ChunkManager.java19
-rw-r--r--hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/interfaces/ContainerDeletionChoosingPolicy.java2
-rw-r--r--hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/interfaces/ContainerManager.java76
-rw-r--r--hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/interfaces/KeyManager.java17
-rw-r--r--hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/statemachine/background/BlockDeletingService.java8
-rw-r--r--hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/statemachine/commandhandler/CloseContainerHandler.java9
-rw-r--r--hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/statemachine/commandhandler/DeleteBlocksCommandHandler.java9
-rw-r--r--hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/states/endpoint/HeartbeatEndpointTask.java2
-rw-r--r--hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/transport/server/ratis/ContainerStateMachine.java21
-rw-r--r--hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/utils/ContainerCache.java36
-rw-r--r--hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/protocol/commands/CloseContainerCommand.java14
24 files changed, 379 insertions, 429 deletions
diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/helpers/ChunkUtils.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/helpers/ChunkUtils.java
index 68bf4421f6..8c5609d63c 100644
--- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/helpers/ChunkUtils.java
+++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/helpers/ChunkUtils.java
@@ -21,7 +21,6 @@ import com.google.common.base.Preconditions;
import com.google.protobuf.ByteString;
import org.apache.commons.codec.binary.Hex;
import org.apache.commons.codec.digest.DigestUtils;
-import org.apache.hadoop.hdds.scm.container.common.helpers.Pipeline;
import org.apache.hadoop.hdds.scm.container.common.helpers
.StorageContainerException;
import org.apache.hadoop.hdds.protocol.proto.ContainerProtos;
@@ -105,18 +104,17 @@ public final class ChunkUtils {
* Validates chunk data and returns a file object to Chunk File that we are
* expected to write data to.
*
- * @param pipeline - pipeline.
* @param data - container data.
* @param info - chunk info.
* @return File
* @throws StorageContainerException
*/
- public static File validateChunk(Pipeline pipeline, ContainerData data,
+ public static File validateChunk(ContainerData data,
ChunkInfo info) throws StorageContainerException {
Logger log = LoggerFactory.getLogger(ChunkManagerImpl.class);
- File chunkFile = getChunkFile(pipeline, data, info);
+ File chunkFile = getChunkFile(data, info);
if (ChunkUtils.isOverWriteRequested(chunkFile, info)) {
if (!ChunkUtils.isOverWritePermitted(info)) {
log.error("Rejecting write chunk request. Chunk overwrite " +
@@ -132,21 +130,21 @@ public final class ChunkUtils {
/**
* Validates that Path to chunk file exists.
*
- * @param pipeline - Container Info.
* @param data - Container Data
* @param info - Chunk info
* @return - File.
* @throws StorageContainerException
*/
- public static File getChunkFile(Pipeline pipeline, ContainerData data,
+ public static File getChunkFile(ContainerData data,
ChunkInfo info) throws StorageContainerException {
+ Preconditions.checkNotNull(data, "Container data can't be null");
Logger log = LoggerFactory.getLogger(ChunkManagerImpl.class);
- if (data == null) {
- log.error("Invalid container Name: {}", pipeline.getContainerName());
- throw new StorageContainerException("Unable to find the container Name:" +
+ if (data.getContainerID() < 0) {
+ log.error("Invalid container id: {}", data.getContainerID());
+ throw new StorageContainerException("Unable to find the container id:" +
" " +
- pipeline.getContainerName(), CONTAINER_NOT_FOUND);
+ data.getContainerID(), CONTAINER_NOT_FOUND);
}
File dataDir = ContainerUtils.getDataDirectory(data).toFile();
@@ -335,7 +333,7 @@ public final class ChunkUtils {
ContainerProtos.ReadChunkResponseProto.newBuilder();
response.setChunkData(info.getProtoBufMessage());
response.setData(ByteString.copyFrom(data));
- response.setPipeline(msg.getReadChunk().getPipeline());
+ response.setBlockID(msg.getReadChunk().getBlockID());
ContainerProtos.ContainerCommandResponseProto.Builder builder =
ContainerUtils.getContainerResponse(msg, ContainerProtos.Result
diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/helpers/ContainerData.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/helpers/ContainerData.java
index c29374c07c..c20282adaf 100644
--- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/helpers/ContainerData.java
+++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/helpers/ContainerData.java
@@ -40,7 +40,6 @@ import java.util.concurrent.atomic.AtomicLong;
*/
public class ContainerData {
- private final String containerName;
private final Map<String, String> metadata;
private String dbPath; // Path to Level DB Store.
// Path to Physical file system where container and checksum are stored.
@@ -48,18 +47,18 @@ public class ContainerData {
private String hash;
private AtomicLong bytesUsed;
private long maxSize;
- private Long containerID;
+ private long containerID;
private HddsProtos.LifeCycleState state;
/**
* Constructs a ContainerData Object.
*
- * @param containerName - Name
+ * @param containerID - ID
+ * @param conf - Configuration
*/
- public ContainerData(String containerName, Long containerID,
+ public ContainerData(long containerID,
Configuration conf) {
this.metadata = new TreeMap<>();
- this.containerName = containerName;
this.maxSize = conf.getLong(ScmConfigKeys.SCM_CONTAINER_CLIENT_MAX_SIZE_KEY,
ScmConfigKeys.SCM_CONTAINER_CLIENT_MAX_SIZE_DEFAULT) * OzoneConsts.GB;
this.bytesUsed = new AtomicLong(0L);
@@ -76,7 +75,7 @@ public class ContainerData {
public static ContainerData getFromProtBuf(
ContainerProtos.ContainerData protoData, Configuration conf)
throws IOException {
- ContainerData data = new ContainerData(protoData.getName(),
+ ContainerData data = new ContainerData(
protoData.getContainerID(), conf);
for (int x = 0; x < protoData.getMetadataCount(); x++) {
data.addMetadata(protoData.getMetadata(x).getKey(),
@@ -117,7 +116,6 @@ public class ContainerData {
public ContainerProtos.ContainerData getProtoBufMessage() {
ContainerProtos.ContainerData.Builder builder = ContainerProtos
.ContainerData.newBuilder();
- builder.setName(this.getContainerName());
builder.setContainerID(this.getContainerID());
if (this.getDBPath() != null) {
@@ -157,15 +155,6 @@ public class ContainerData {
}
/**
- * Returns the name of the container.
- *
- * @return - name
- */
- public String getContainerName() {
- return containerName;
- }
-
- /**
* Adds metadata.
*/
public void addMetadata(String key, String value) throws IOException {
@@ -231,9 +220,11 @@ public class ContainerData {
*
* @return String Name.
*/
- public String getName() {
- return getContainerName();
- }
+ // TODO: check the ContainerCache class to see if we are using the ContainerID instead.
+ /*
+ public String getName() {
+ return getContainerID();
+ }*/
/**
* Get container file path.
@@ -255,7 +246,7 @@ public class ContainerData {
* Get container ID.
* @return - container ID.
*/
- public synchronized Long getContainerID() {
+ public synchronized long getContainerID() {
return containerID;
}
@@ -284,7 +275,7 @@ public class ContainerData {
// Some thing brain dead for now. name + Time stamp of when we get the close
// container message.
- setHash(DigestUtils.sha256Hex(this.getContainerName() +
+ setHash(DigestUtils.sha256Hex(this.getContainerID() +
Long.toString(Time.monotonicNow())));
}
diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/helpers/ContainerReport.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/helpers/ContainerReport.java
index 50d2da3975..19634f48b8 100644
--- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/helpers/ContainerReport.java
+++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/helpers/ContainerReport.java
@@ -26,7 +26,6 @@ import org.apache.hadoop.hdds.protocol.proto.StorageContainerDatanodeProtocolPro
*/
public class ContainerReport {
private static final int UNKNOWN = -1;
- private final String containerName;
private final String finalhash;
private long size;
private long keyCount;
@@ -51,11 +50,11 @@ public class ContainerReport {
/**
* Constructs the ContainerReport.
*
- * @param containerName - Container Name.
+ * @param containerID - Container ID.
* @param finalhash - Final Hash.
*/
- public ContainerReport(String containerName, String finalhash) {
- this.containerName = containerName;
+ public ContainerReport(long containerID, String finalhash) {
+ this.containerID = containerID;
this.finalhash = finalhash;
this.size = UNKNOWN;
this.keyCount = UNKNOWN;
@@ -74,7 +73,7 @@ public class ContainerReport {
*/
public static ContainerReport getFromProtoBuf(ContainerInfo info) {
Preconditions.checkNotNull(info);
- ContainerReport report = new ContainerReport(info.getContainerName(),
+ ContainerReport report = new ContainerReport(info.getContainerID(),
info.getFinalhash());
if (info.hasSize()) {
report.setSize(info.getSize());
@@ -103,15 +102,6 @@ public class ContainerReport {
}
/**
- * Gets the container name.
- *
- * @return - Name
- */
- public String getContainerName() {
- return containerName;
- }
-
- /**
* Returns the final signature for this container.
*
* @return - hash
@@ -203,7 +193,6 @@ public class ContainerReport {
*/
public ContainerInfo getProtoBufMessage() {
return ContainerInfo.newBuilder()
- .setContainerName(this.getContainerName())
.setKeyCount(this.getKeyCount())
.setSize(this.getSize())
.setUsed(this.getBytesUsed())
diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/helpers/ContainerUtils.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/helpers/ContainerUtils.java
index 1818188cb6..e24435418d 100644
--- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/helpers/ContainerUtils.java
+++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/helpers/ContainerUtils.java
@@ -184,6 +184,12 @@ public final class ContainerUtils {
removeExtension(containerFile.getName())).toString();
}
+ public static long getContainerIDFromFile(File containerFile) {
+ Preconditions.checkNotNull(containerFile);
+ String containerID = getContainerNameFromFile(containerFile);
+ return Long.parseLong(containerID);
+ }
+
/**
* Verifies that this in indeed a new container.
*
@@ -289,8 +295,8 @@ public final class ContainerUtils {
*/
public static File getMetadataFile(ContainerData containerData,
Path location) {
- return location.resolve(containerData
- .getContainerName().concat(CONTAINER_META))
+ return location.resolve(Long.toString(containerData
+ .getContainerID()).concat(CONTAINER_META))
.toFile();
}
@@ -303,8 +309,8 @@ public final class ContainerUtils {
*/
public static File getContainerFile(ContainerData containerData,
Path location) {
- return location.resolve(containerData
- .getContainerName().concat(CONTAINER_EXTENSION))
+ return location.resolve(Long.toString(containerData
+ .getContainerID()).concat(CONTAINER_EXTENSION))
.toFile();
}
diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/helpers/DeletedContainerBlocksSummary.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/helpers/DeletedContainerBlocksSummary.java
index ade162a263..9d0ec957f2 100644
--- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/helpers/DeletedContainerBlocksSummary.java
+++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/helpers/DeletedContainerBlocksSummary.java
@@ -19,6 +19,7 @@ package org.apache.hadoop.ozone.container.common.helpers;
import com.google.common.collect.Maps;
import org.apache.hadoop.hdds.protocol.proto
.StorageContainerDatanodeProtocolProtos.DeletedBlocksTransaction;
+import org.apache.hadoop.util.StringUtils;
import java.util.List;
import java.util.Map;
@@ -37,7 +38,7 @@ public final class DeletedContainerBlocksSummary {
// value : the number of blocks need to be deleted in this container
// if the message contains multiple entries for same block,
// blocks will be merged
- private final Map<String, Integer> blockSummary;
+ private final Map<Long, Integer> blockSummary;
// total number of blocks in this message
private int numOfBlocks;
@@ -47,14 +48,14 @@ public final class DeletedContainerBlocksSummary {
blockSummary = Maps.newHashMap();
blocks.forEach(entry -> {
txSummary.put(entry.getTxID(), entry.getCount());
- if (blockSummary.containsKey(entry.getContainerName())) {
- blockSummary.put(entry.getContainerName(),
- blockSummary.get(entry.getContainerName())
- + entry.getBlockIDCount());
+ if (blockSummary.containsKey(entry.getContainerID())) {
+ blockSummary.put(entry.getContainerID(),
+ blockSummary.get(entry.getContainerID())
+ + entry.getLocalIDCount());
} else {
- blockSummary.put(entry.getContainerName(), entry.getBlockIDCount());
+ blockSummary.put(entry.getContainerID(), entry.getLocalIDCount());
}
- numOfBlocks += entry.getBlockIDCount();
+ numOfBlocks += entry.getLocalIDCount();
});
}
@@ -93,9 +94,9 @@ public final class DeletedContainerBlocksSummary {
.append("TimesProceed=")
.append(blks.getCount())
.append(", ")
- .append(blks.getContainerName())
+ .append(blks.getContainerID())
.append(" : [")
- .append(String.join(",", blks.getBlockIDList())).append("]")
+ .append(StringUtils.join(',', blks.getLocalIDList())).append("]")
.append("\n");
}
return sb.toString();
diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/helpers/FileUtils.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/helpers/FileUtils.java
index 566db02510..ec274525e4 100644
--- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/helpers/FileUtils.java
+++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/helpers/FileUtils.java
@@ -65,7 +65,8 @@ public final class FileUtils {
ContainerProtos.ReadChunkResponseProto.newBuilder();
readChunkresponse.setChunkData(info.getProtoBufMessage());
readChunkresponse.setData(ByteString.copyFrom(data));
- readChunkresponse.setPipeline(msg.getGetSmallFile().getKey().getPipeline());
+ readChunkresponse.setBlockID(msg.getGetSmallFile().getKey().
+ getKeyData().getBlockID());
ContainerProtos.GetSmallFileResponseProto.Builder getSmallFile =
ContainerProtos.GetSmallFileResponseProto.newBuilder();
diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/helpers/KeyUtils.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/helpers/KeyUtils.java
index 33eb911d4e..dbd5772d32 100644
--- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/helpers/KeyUtils.java
+++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/helpers/KeyUtils.java
@@ -63,11 +63,11 @@ public final class KeyUtils {
ContainerCache cache = ContainerCache.getInstance(conf);
Preconditions.checkNotNull(cache);
try {
- return cache.getDB(container.getContainerName(), container.getDBPath());
+ return cache.getDB(container.getContainerID(), container.getDBPath());
} catch (IOException ex) {
String message =
String.format("Unable to open DB. DB Name: %s, Path: %s. ex: %s",
- container.getContainerName(), container.getDBPath(), ex.getMessage());
+ container.getContainerID(), container.getDBPath(), ex.getMessage());
throw new StorageContainerException(message, UNABLE_TO_READ_METADATA_DB);
}
}
@@ -83,7 +83,7 @@ public final class KeyUtils {
Preconditions.checkNotNull(container);
ContainerCache cache = ContainerCache.getInstance(conf);
Preconditions.checkNotNull(cache);
- cache.removeDB(container.getContainerName());
+ cache.removeDB(container.getContainerID());
}
/**
* Shutdown all DB Handles.
diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/impl/ChunkManagerImpl.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/impl/ChunkManagerImpl.java
index 457c417b10..350519692c 100644
--- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/impl/ChunkManagerImpl.java
+++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/impl/ChunkManagerImpl.java
@@ -19,11 +19,11 @@ package org.apache.hadoop.ozone.container.common.impl;
import com.google.common.base.Preconditions;
import org.apache.hadoop.fs.FileUtil;
-import org.apache.hadoop.hdds.scm.container.common.helpers.Pipeline;
import org.apache.hadoop.hdds.scm.container.common.helpers
.StorageContainerException;
import org.apache.hadoop.hdds.protocol.proto.ContainerProtos;
import org.apache.hadoop.ozone.OzoneConsts;
+import org.apache.hadoop.hdds.client.BlockID;
import org.apache.hadoop.ozone.container.common.helpers.ChunkInfo;
import org.apache.hadoop.ozone.container.common.helpers.ChunkUtils;
import org.apache.hadoop.ozone.container.common.helpers.ContainerData;
@@ -66,13 +66,12 @@ public class ChunkManagerImpl implements ChunkManager {
/**
* writes a given chunk.
*
- * @param pipeline - Name and the set of machines that make this container.
- * @param keyName - Name of the Key.
+ * @param blockID - ID of the block.
* @param info - ChunkInfo.
* @throws StorageContainerException
*/
@Override
- public void writeChunk(Pipeline pipeline, String keyName, ChunkInfo info,
+ public void writeChunk(BlockID blockID, ChunkInfo info,
byte[] data, ContainerProtos.Stage stage)
throws StorageContainerException {
// we don't want container manager to go away while we are writing chunks.
@@ -80,13 +79,13 @@ public class ChunkManagerImpl implements ChunkManager {
// TODO : Take keyManager Write lock here.
try {
- Preconditions.checkNotNull(pipeline, "Pipeline cannot be null");
- String containerName = pipeline.getContainerName();
- Preconditions.checkNotNull(containerName,
- "Container name cannot be null");
+ Preconditions.checkNotNull(blockID, "Block ID cannot be null.");
+ long containerID = blockID.getContainerID();
+ Preconditions.checkState(containerID >= 0,
+ "Container ID cannot be negative");
ContainerData container =
- containerManager.readContainer(containerName);
- File chunkFile = ChunkUtils.validateChunk(pipeline, container, info);
+ containerManager.readContainer(containerID);
+ File chunkFile = ChunkUtils.validateChunk(container, info);
File tmpChunkFile = getTmpChunkFile(chunkFile, info);
LOG.debug("writing chunk:{} chunk stage:{} chunk file:{} tmp chunk file",
@@ -96,16 +95,16 @@ public class ChunkManagerImpl implements ChunkManager {
ChunkUtils.writeData(tmpChunkFile, info, data);
break;
case COMMIT_DATA:
- commitChunk(tmpChunkFile, chunkFile, containerName, info.getLen());
+ commitChunk(tmpChunkFile, chunkFile, containerID, info.getLen());
break;
case COMBINED:
// directly write to the chunk file
long oldSize = chunkFile.length();
ChunkUtils.writeData(chunkFile, info, data);
long newSize = chunkFile.length();
- containerManager.incrBytesUsed(containerName, newSize - oldSize);
- containerManager.incrWriteCount(containerName);
- containerManager.incrWriteBytes(containerName, info.getLen());
+ containerManager.incrBytesUsed(containerID, newSize - oldSize);
+ containerManager.incrWriteCount(containerID);
+ containerManager.incrWriteBytes(containerID, info.getLen());
break;
default:
throw new IOException("Can not identify write operation.");
@@ -136,22 +135,21 @@ public class ChunkManagerImpl implements ChunkManager {
// Commit the chunk by renaming the temporary chunk file to chunk file
private void commitChunk(File tmpChunkFile, File chunkFile,
- String containerName, long chunkLen) throws IOException {
+ long containerID, long chunkLen) throws IOException {
long sizeDiff = tmpChunkFile.length() - chunkFile.length();
// It is safe to replace here as the earlier chunk if existing should be
// caught as part of validateChunk
Files.move(tmpChunkFile.toPath(), chunkFile.toPath(),
StandardCopyOption.REPLACE_EXISTING);
- containerManager.incrBytesUsed(containerName, sizeDiff);
- containerManager.incrWriteCount(containerName);
- containerManager.incrWriteBytes(containerName, chunkLen);
+ containerManager.incrBytesUsed(containerID, sizeDiff);
+ containerManager.incrWriteCount(containerID);
+ containerManager.incrWriteBytes(containerID, chunkLen);
}
/**
* reads the data defined by a chunk.
*
- * @param pipeline - container pipeline.
- * @param keyName - Name of the Key
+ * @param blockID - ID of the block.
* @param info - ChunkInfo.
* @return byte array
* @throws StorageContainerException
@@ -159,20 +157,20 @@ public class ChunkManagerImpl implements ChunkManager {
* TODO: Explore if we need to do that for ozone.
*/
@Override
- public byte[] readChunk(Pipeline pipeline, String keyName, ChunkInfo info)
+ public byte[] readChunk(BlockID blockID, ChunkInfo info)
throws StorageContainerException {
containerManager.readLock();
try {
- Preconditions.checkNotNull(pipeline, "Pipeline cannot be null");
- String containerName = pipeline.getContainerName();
- Preconditions.checkNotNull(containerName,
- "Container name cannot be null");
+ Preconditions.checkNotNull(blockID, "Block ID cannot be null.");
+ long containerID = blockID.getContainerID();
+ Preconditions.checkState(containerID >= 0,
+ "Container ID cannot be negative");
ContainerData container =
- containerManager.readContainer(containerName);
- File chunkFile = ChunkUtils.getChunkFile(pipeline, container, info);
+ containerManager.readContainer(containerID);
+ File chunkFile = ChunkUtils.getChunkFile(container, info);
ByteBuffer data = ChunkUtils.readData(chunkFile, info);
- containerManager.incrReadCount(containerName);
- containerManager.incrReadBytes(containerName, chunkFile.length());
+ containerManager.incrReadCount(containerID);
+ containerManager.incrReadBytes(containerID, chunkFile.length());
return data.array();
} catch (ExecutionException | NoSuchAlgorithmException e) {
LOG.error("read data failed. error: {}", e);
@@ -191,25 +189,25 @@ public class ChunkManagerImpl implements ChunkManager {
/**
* Deletes a given chunk.
*
- * @param pipeline - Pipeline.
- * @param keyName - Key Name
+ * @param blockID - ID of the block.
* @param info - Chunk Info
* @throws StorageContainerException
*/
@Override
- public void deleteChunk(Pipeline pipeline, String keyName, ChunkInfo info)
+ public void deleteChunk(BlockID blockID, ChunkInfo info)
throws StorageContainerException {
containerManager.readLock();
try {
- Preconditions.checkNotNull(pipeline, "Pipeline cannot be null");
- String containerName = pipeline.getContainerName();
- Preconditions.checkNotNull(containerName,
- "Container name cannot be null");
- File chunkFile = ChunkUtils.getChunkFile(pipeline, containerManager
- .readContainer(containerName), info);
+ Preconditions.checkNotNull(blockID, "Block ID cannot be null.");
+ long containerID = blockID.getContainerID();
+ Preconditions.checkState(containerID >= 0,
+ "Container ID cannot be negative");
+
+ File chunkFile = ChunkUtils.getChunkFile(containerManager
+ .readContainer(containerID), info);
if ((info.getOffset() == 0) && (info.getLen() == chunkFile.length())) {
FileUtil.fullyDelete(chunkFile);
- containerManager.decrBytesUsed(containerName, chunkFile.length());
+ containerManager.decrBytesUsed(containerID, chunkFile.length());
} else {
LOG.error("Not Supported Operation. Trying to delete a " +
"chunk that is in shared file. chunk info : " + info.toString());
diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/impl/ContainerManagerImpl.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/impl/ContainerManagerImpl.java
index 5e7375cd9d..1893b3b3b8 100644
--- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/impl/ContainerManagerImpl.java
+++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/impl/ContainerManagerImpl.java
@@ -24,7 +24,6 @@ import org.apache.commons.codec.digest.DigestUtils;
import org.apache.commons.io.FileUtils;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.hdds.scm.ScmConfigKeys;
-import org.apache.hadoop.hdds.scm.container.common.helpers.Pipeline;
import org.apache.hadoop.hdds.scm.container.common.helpers
.StorageContainerException;
import org.apache.hadoop.hdfs.server.datanode.StorageLocation;
@@ -113,7 +112,9 @@ public class ContainerManagerImpl implements ContainerManager {
static final Logger LOG =
LoggerFactory.getLogger(ContainerManagerImpl.class);
- private final ConcurrentSkipListMap<String, ContainerStatus>
+ // TODO: consider primitive collection like eclipse-collections
+ // to avoid autoboxing overhead
+ private final ConcurrentSkipListMap<Long, ContainerStatus>
containerMap = new ConcurrentSkipListMap<>();
// Use a non-fair RW lock for better throughput, we may revisit this decision
@@ -229,6 +230,7 @@ public class ContainerManagerImpl implements ContainerManager {
Preconditions.checkNotNull(keyName,
"Container Name to container key mapping is null");
+ long containerID = Long.parseLong(keyName);
try {
String containerFileName = containerName.concat(CONTAINER_EXTENSION);
String metaFileName = containerName.concat(CONTAINER_META);
@@ -249,7 +251,7 @@ public class ContainerManagerImpl implements ContainerManager {
// when loading the info we get a null, this often means last time
// SCM was ending up at some middle phase causing that the metadata
// was not populated. Such containers are marked as inactive.
- containerMap.put(keyName, new ContainerStatus(null));
+ containerMap.put(containerID, new ContainerStatus(null));
return;
}
containerData = ContainerData.getFromProtBuf(containerDataProto, conf);
@@ -263,7 +265,7 @@ public class ContainerManagerImpl implements ContainerManager {
// Hopefully SCM will ask us to delete this container and rebuild it.
LOG.error("Invalid SHA found for container data. Name :{}"
+ "cowardly refusing to read invalid data", containerName);
- containerMap.put(keyName, new ContainerStatus(null));
+ containerMap.put(containerID, new ContainerStatus(null));
return;
}
@@ -295,7 +297,7 @@ public class ContainerManagerImpl implements ContainerManager {
}).sum();
containerStatus.setBytesUsed(bytesUsed);
- containerMap.put(keyName, containerStatus);
+ containerMap.put(containerID, containerStatus);
} catch (IOException | NoSuchAlgorithmException ex) {
LOG.error("read failed for file: {} ex: {}", containerName,
ex.getMessage());
@@ -303,7 +305,7 @@ public class ContainerManagerImpl implements ContainerManager {
// TODO : Add this file to a recovery Queue.
// Remember that this container is busted and we cannot use it.
- containerMap.put(keyName, new ContainerStatus(null));
+ containerMap.put(containerID, new ContainerStatus(null));
throw new StorageContainerException("Unable to read container info",
UNABLE_TO_READ_METADATA_DB);
} finally {
@@ -316,18 +318,17 @@ public class ContainerManagerImpl implements ContainerManager {
/**
* Creates a container with the given name.
*
- * @param pipeline -- Nodes which make up this container.
* @param containerData - Container Name and metadata.
* @throws StorageContainerException - Exception
*/
@Override
- public void createContainer(Pipeline pipeline, ContainerData containerData)
+ public void createContainer(ContainerData containerData)
throws StorageContainerException {
Preconditions.checkNotNull(containerData, "Container data cannot be null");
writeLock();
try {
- if (containerMap.containsKey(containerData.getName())) {
- LOG.debug("container already exists. {}", containerData.getName());
+ if (containerMap.containsKey(containerData.getContainerID())) {
+ LOG.debug("container already exists. {}", containerData.getContainerID());
throw new StorageContainerException("container already exists.",
CONTAINER_EXISTS);
}
@@ -399,7 +400,7 @@ public class ContainerManagerImpl implements ContainerManager {
location);
File metadataFile = ContainerUtils.getMetadataFile(containerData,
location);
- String containerName = containerData.getContainerName();
+ String containerName = Long.toString(containerData.getContainerID());
if(!overwrite) {
ContainerUtils.verifyIsNewContainer(containerFile, metadataFile);
@@ -446,7 +447,7 @@ public class ContainerManagerImpl implements ContainerManager {
LOG.error("Creation of container failed. Name: {}, we might need to " +
"cleanup partially created artifacts. ",
- containerData.getContainerName(), ex);
+ containerData.getContainerID(), ex);
throw new StorageContainerException("Container creation failed. ",
ex, CONTAINER_INTERNAL_ERROR);
} finally {
@@ -459,45 +460,45 @@ public class ContainerManagerImpl implements ContainerManager {
/**
* Deletes an existing container.
*
- * @param pipeline - nodes that make this container.
- * @param containerName - name of the container.
+ * @param containerID - ID of the container.
* @param forceDelete - whether this container should be deleted forcibly.
* @throws StorageContainerException
*/
@Override
- public void deleteContainer(Pipeline pipeline, String containerName,
+ public void deleteContainer(long containerID,
boolean forceDelete) throws StorageContainerException {
- Preconditions.checkNotNull(containerName, "Container name cannot be null");
- Preconditions.checkState(containerName.length() > 0,
- "Container name length cannot be zero.");
+ Preconditions.checkState(containerID >= 0,
+ "Container ID cannot be negative.");
writeLock();
try {
- if (isOpen(pipeline.getContainerName())) {
+ if (isOpen(containerID)) {
throw new StorageContainerException(
"Deleting an open container is not allowed.",
UNCLOSED_CONTAINER_IO);
}
- ContainerStatus status = containerMap.get(containerName);
+ ContainerStatus status = containerMap.get(containerID);
if (status == null) {
- LOG.debug("No such container. Name: {}", containerName);
- throw new StorageContainerException("No such container. Name : " +
- containerName, CONTAINER_NOT_FOUND);
+ LOG.debug("No such container. ID: {}", containerID);
+ throw new StorageContainerException("No such container. ID : " +
+ containerID, CONTAINER_NOT_FOUND);
}
if (status.getContainer() == null) {
- LOG.debug("Invalid container data. Name: {}", containerName);
+ LOG.debug("Invalid container data. ID: {}", containerID);
throw new StorageContainerException("Invalid container data. Name : " +
- containerName, CONTAINER_NOT_FOUND);
+ containerID, CONTAINER_NOT_FOUND);
}
ContainerUtils.removeContainer(status.getContainer(), conf, forceDelete);
- containerMap.remove(containerName);
+ containerMap.remove(containerID);
} catch (StorageContainerException e) {
throw e;
} catch (IOException e) {
// TODO : An I/O error during delete can leave partial artifacts on the
// disk. We will need the cleaner thread to cleanup this information.
- LOG.error("Failed to cleanup container. Name: {}", containerName, e);
- throw new StorageContainerException(containerName, e, IO_EXCEPTION);
+ String errMsg = String.format("Failed to cleanup container. ID: %d",
+ containerID);
+ LOG.error(errMsg, e);
+ throw new StorageContainerException(errMsg, e, IO_EXCEPTION);
} finally {
writeUnlock();
}
@@ -511,25 +512,29 @@ public class ContainerManagerImpl implements ContainerManager {
* time. It is possible that using this iteration you can miss certain
* container from the listing.
*
- * @param prefix - Return keys that match this prefix.
+ * @param startContainerID - Return containers with ID >= startContainerID.
* @param count - how many to return
- * @param prevKey - Previous Key Value or empty String.
* @param data - Actual containerData
* @throws StorageContainerException
*/
@Override
- public void listContainer(String prefix, long count, String prevKey,
+ public void listContainer(long startContainerID, long count,
List<ContainerData> data) throws StorageContainerException {
- // TODO : Support list with Prefix and PrevKey
Preconditions.checkNotNull(data,
"Internal assertion: data cannot be null");
+ Preconditions.checkState(startContainerID >= 0,
+ "Start container ID cannot be negative");
+ Preconditions.checkState(count > 0,
+ "max number of containers returned " +
+ "must be positive");
+
readLock();
try {
- ConcurrentNavigableMap<String, ContainerStatus> map;
- if (prevKey == null || prevKey.isEmpty()) {
+ ConcurrentNavigableMap<Long, ContainerStatus> map;
+ if (startContainerID == 0) {
map = containerMap.tailMap(containerMap.firstKey(), true);
} else {
- map = containerMap.tailMap(prevKey, false);
+ map = containerMap.tailMap(startContainerID, false);
}
int currentCount = 0;
@@ -549,24 +554,23 @@ public class ContainerManagerImpl implements ContainerManager {
/**
* Get metadata about a specific container.
*
- * @param containerName - Name of the container
+ * @param containerID - ID of the container
* @return ContainerData - Container Data.
* @throws StorageContainerException
*/
@Override
- public ContainerData readContainer(String containerName) throws
- StorageContainerException {
- Preconditions.checkNotNull(containerName, "Container name cannot be null");
- Preconditions.checkState(containerName.length() > 0,
- "Container name length cannot be zero.");
- if (!containerMap.containsKey(containerName)) {
- throw new StorageContainerException("Unable to find the container. Name: "
- + containerName, CONTAINER_NOT_FOUND);
+ public ContainerData readContainer(long containerID)
+ throws StorageContainerException {
+ Preconditions.checkState(containerID >= 0,
+ "Container ID cannot be negative.");
+ if (!containerMap.containsKey(containerID)) {
+ throw new StorageContainerException("Unable to find the container. ID: "
+ + containerID, CONTAINER_NOT_FOUND);
}
- ContainerData cData = containerMap.get(containerName).getContainer();
+ ContainerData cData = containerMap.get(containerID).getContainer();
if (cData == null) {
- throw new StorageContainerException("Invalid container data. Name: "
- + containerName, CONTAINER_INTERNAL_ERROR);
+ throw new StorageContainerException("Invalid container data. ID: "
+ + containerID, CONTAINER_INTERNAL_ERROR);
}
return cData;
}
@@ -575,13 +579,13 @@ public class ContainerManagerImpl implements ContainerManager {
* Closes a open container, if it is already closed or does not exist a
* StorageContainerException is thrown.
*
- * @param containerName - Name of the container.
+ * @param containerID - ID of the container.
* @throws StorageContainerException
*/
@Override
- public void closeContainer(String containerName)
+ public void closeContainer(long containerID)
throws StorageContainerException, NoSuchAlgorithmException {
- ContainerData containerData = readContainer(containerName);
+ ContainerData containerData = readContainer(containerID);
containerData.closeContainer();
writeContainerInfo(containerData, true);
MetadataStore db = KeyUtils.getDB(containerData, conf);
@@ -602,15 +606,13 @@ public class ContainerManagerImpl implements ContainerManager {
// issues.
ContainerStatus status = new ContainerStatus(containerData);
- containerMap.put(containerName, status);
+ containerMap.put(containerID, status);
}
@Override
- public void updateContainer(Pipeline pipeline, String containerName,
- ContainerData data, boolean forceUpdate)
- throws StorageContainerException {
- Preconditions.checkNotNull(pipeline, "Pipeline cannot be null");
- Preconditions.checkNotNull(containerName, "Container name cannot be null");
+ public void updateContainer(long containerID, ContainerData data,
+ boolean forceUpdate) throws StorageContainerException {
+ Preconditions.checkState(containerID >= 0, "Container ID cannot be negative.");
Preconditions.checkNotNull(data, "Container data cannot be null");
FileOutputStream containerStream = null;
DigestOutputStream dos = null;
@@ -618,9 +620,9 @@ public class ContainerManagerImpl implements ContainerManager {
File containerFileBK = null, containerFile = null;
boolean deleted = false;
- if(!containerMap.containsKey(containerName)) {
+ if(!containerMap.containsKey(containerID)) {
throw new StorageContainerException("Container doesn't exist. Name :"
- + containerName, CONTAINER_NOT_FOUND);
+ + containerID, CONTAINER_NOT_FOUND);
}
try {
@@ -633,7 +635,7 @@ public class ContainerManagerImpl implements ContainerManager {
try {
Path location = locationManager.getContainerPath();
- ContainerData orgData = containerMap.get(containerName).getContainer();
+ ContainerData orgData = containerMap.get(containerID).getContainer();
if (orgData == null) {
// updating a invalid container
throw new StorageContainerException("Update a container with invalid" +
@@ -642,7 +644,7 @@ public class ContainerManagerImpl implements ContainerManager {
if (!forceUpdate && !orgData.isOpen()) {
throw new StorageContainerException(
- "Update a closed container is not allowed. Name: " + containerName,
+ "Update a closed container is not allowed. ID: " + containerID,
UNSUPPORTED_REQUEST);
}
@@ -652,7 +654,7 @@ public class ContainerManagerImpl implements ContainerManager {
if (!forceUpdate) {
if (!containerFile.exists() || !containerFile.canWrite()) {
throw new StorageContainerException(
- "Container file not exists or corrupted. Name: " + containerName,
+ "Container file not exists or corrupted. ID: " + containerID,
CONTAINER_INTERNAL_ERROR);
}
@@ -672,7 +674,7 @@ public class ContainerManagerImpl implements ContainerManager {
// Update the in-memory map
ContainerStatus newStatus = new ContainerStatus(data);
- containerMap.replace(containerName, newStatus);
+ containerMap.replace(containerID, newStatus);
} catch (IOException e) {
// Restore the container file from backup
if(containerFileBK != null && containerFileBK.exists() && deleted) {
@@ -683,8 +685,8 @@ public class ContainerManagerImpl implements ContainerManager {
CONTAINER_INTERNAL_ERROR);
} else {
throw new StorageContainerException(
- "Failed to restore container data from the backup. Name: "
- + containerName, CONTAINER_INTERNAL_ERROR);
+ "Failed to restore container data from the backup. ID: "
+ + containerID, CONTAINER_INTERNAL_ERROR);
}
} else {
throw new StorageContainerException(
@@ -711,22 +713,22 @@ public class ContainerManagerImpl implements ContainerManager {
/**
* Checks if a container exists.
*
- * @param containerName - Name of the container.
+ * @param containerID - ID of the container.
* @return true if the container is open false otherwise.
* @throws StorageContainerException - Throws Exception if we are not able to
* find the container.
*/
@Override
- public boolean isOpen(String containerName) throws StorageContainerException {
- final ContainerStatus status = containerMap.get(containerName);
+ public boolean isOpen(long containerID) throws StorageContainerException {
+ final ContainerStatus status = containerMap.get(containerID);
if (status == null) {
throw new StorageContainerException(
- "Container status not found: " + containerName, CONTAINER_NOT_FOUND);
+ "Container status not found: " + containerID, CONTAINER_NOT_FOUND);
}
final ContainerData cData = status.getContainer();
if (cData == null) {
throw new StorageContainerException(
- "Container not found: " + containerName, CONTAINER_NOT_FOUND);
+ "Container not found: " + containerID, CONTAINER_NOT_FOUND);
}
return cData.isOpen();
}
@@ -746,7 +748,7 @@ public class ContainerManagerImpl implements ContainerManager {
@VisibleForTesting
- public ConcurrentSkipListMap<String, ContainerStatus> getContainerMap() {
+ public ConcurrentSkipListMap<Long, ContainerStatus> getContainerMap() {
return containerMap;
}
@@ -901,7 +903,7 @@ public class ContainerManagerImpl implements ContainerManager {
for (ContainerStatus container: containers) {
StorageContainerDatanodeProtocolProtos.ContainerInfo.Builder ciBuilder =
StorageContainerDatanodeProtocolProtos.ContainerInfo.newBuilder();
- ciBuilder.setContainerName(container.getContainer().getContainerName())
+ ciBuilder.setContainerID(container.getContainer().getContainerID())
.setSize(container.getContainer().getMaxSize())
.setUsed(container.getContainer().getBytesUsed())
.setKeyCount(container.getContainer().getKeyCount())
@@ -966,7 +968,7 @@ public class ContainerManagerImpl implements ContainerManager {
}
@Override
- public void incrPendingDeletionBlocks(int numBlocks, String containerId) {
+ public void incrPendingDeletionBlocks(int numBlocks, long containerId) {
writeLock();
try {
ContainerStatus status = containerMap.get(containerId);
@@ -977,7 +979,7 @@ public class ContainerManagerImpl implements ContainerManager {
}
@Override
- public void decrPendingDeletionBlocks(int numBlocks, String containerId) {
+ public void decrPendingDeletionBlocks(int numBlocks, long containerId) {
writeLock();
try {
ContainerStatus status = containerMap.get(containerId);
@@ -990,35 +992,35 @@ public class ContainerManagerImpl implements ContainerManager {
/**
* Increase the read count of the container.
*
- * @param containerName - Name of the container.
+ * @param containerId - ID of the container.
*/
@Override
- public void incrReadCount(String containerName) {
- ContainerStatus status = containerMap.get(containerName);
+ public void incrReadCount(long containerId) {
+ ContainerStatus status = containerMap.get(containerId);
status.incrReadCount();
}
- public long getReadCount(String containerName) {
- ContainerStatus status = containerMap.get(containerName);
+ public long getReadCount(long containerId) {
+ ContainerStatus status = containerMap.get(containerId);
return status.getReadCount();
}
/**
* Increse the read counter for bytes read from the container.
*
- * @param containerName - Name of the container.
+ * @param containerId - ID of the container.
* @param readBytes - bytes read from the container.
*/
@Override
- public void incrReadBytes(String containerName, long readBytes) {
- ContainerStatus status = containerMap.get(containerName);
+ public void incrReadBytes(long containerId, long readBytes) {
+ ContainerStatus status = containerMap.get(containerId);
status.incrReadBytes(readBytes);
}
- public long getReadBytes(String containerName) {
+ public long getReadBytes(long containerId) {
readLock();
try {
- ContainerStatus status = containerMap.get(containerName);
+ ContainerStatus status = containerMap.get(containerId);
return status.getReadBytes();
} finally {
readUnlock();
@@ -1028,76 +1030,76 @@ public class ContainerManagerImpl implements ContainerManager {
/**
* Increase the write count of the container.
*
- * @param containerName - Name of the container.
+ * @param containerId - Name of the container.
*/
@Override
- public void incrWriteCount(String containerName) {
- ContainerStatus status = containerMap.get(containerName);
+ public void incrWriteCount(long containerId) {
+ ContainerStatus status = containerMap.get(containerId);
status.incrWriteCount();
}
- public long getWriteCount(String containerName) {
- ContainerStatus status = containerMap.get(containerName);
+ public long getWriteCount(long containerId) {
+ ContainerStatus status = containerMap.get(containerId);
return status.getWriteCount();
}
/**
* Increse the write counter for bytes write into the container.
*
- * @param containerName - Name of the container.
+ * @param containerId - ID of the container.
* @param writeBytes - bytes write into the container.
*/
@Override
- public void incrWriteBytes(String containerName, long writeBytes) {
- ContainerStatus status = containerMap.get(containerName);
+ public void incrWriteBytes(long containerId, long writeBytes) {
+ ContainerStatus status = containerMap.get(containerId);
status.incrWriteBytes(writeBytes);
}
- public long getWriteBytes(String containerName) {
- ContainerStatus status = containerMap.get(containerName);
+ public long getWriteBytes(long containerId) {
+ ContainerStatus status = containerMap.get(containerId);
return status.getWriteBytes();
}
/**
* Increase the bytes used by the container.
*
- * @param containerName - Name of the container.
+ * @param containerId - ID of the container.
* @param used - additional bytes used by the container.
* @return the current bytes used.
*/
@Override
- public long incrBytesUsed(String containerName, long used) {
- ContainerStatus status = containerMap.get(containerName);
+ public long incrBytesUsed(long containerId, long used) {
+ ContainerStatus status = containerMap.get(containerId);
return status.incrBytesUsed(used);
}
/**
* Decrease the bytes used by the container.
*
- * @param containerName - Name of the container.
+ * @param containerId - ID of the container.
* @param used - additional bytes reclaimed by the container.
* @return the current bytes used.
*/
@Override
- public long decrBytesUsed(String containerName, long used) {
- ContainerStatus status = containerMap.get(containerName);
+ public long decrBytesUsed(long containerId, long used) {
+ ContainerStatus status = containerMap.get(containerId);
return status.decrBytesUsed(used);
}
- public long getBytesUsed(String containerName) {
- ContainerStatus status = containerMap.get(containerName);
+ public long getBytesUsed(long containerId) {
+ ContainerStatus status = containerMap.get(containerId);
return status.getBytesUsed();
}
/**
* Get the number of keys in the container.
*
- * @param containerName - Name of the container.
+ * @param containerId - ID of the container.
* @return the current key count.
*/
@Override
- public long getNumKeys(String containerName) {
- ContainerStatus status = containerMap.get(containerName);
+ public long getNumKeys(long containerId) {
+ ContainerStatus status = containerMap.get(containerId);
return status.getNumKeys(); }
/**
diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/impl/Dispatcher.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/impl/Dispatcher.java
index d319565012..46bd842953 100644
--- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/impl/Dispatcher.java
+++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/impl/Dispatcher.java
@@ -30,6 +30,7 @@ import org.apache.hadoop.hdds.protocol.proto.ContainerProtos
import org.apache.hadoop.hdds.protocol.proto.ContainerProtos
.ContainerCommandResponseProto;
import org.apache.hadoop.hdds.protocol.proto.ContainerProtos.Type;
+import org.apache.hadoop.hdds.client.BlockID;
import org.apache.hadoop.ozone.container.common.helpers.ChunkInfo;
import org.apache.hadoop.ozone.container.common.helpers.ChunkUtils;
import org.apache.hadoop.ozone.container.common.helpers.ContainerData;
@@ -186,7 +187,7 @@ public class Dispatcher implements ContainerDispatcher {
} catch (IOException ex) {
LOG.warn("Container operation failed. " +
"Container: {} Operation: {} trace ID: {} Error: {}",
- msg.getCreateContainer().getContainerData().getName(),
+ msg.getCreateContainer().getContainerData().getContainerID(),
msg.getCmdType().name(),
msg.getTraceID(),
ex.toString(), ex);
@@ -230,7 +231,7 @@ public class Dispatcher implements ContainerDispatcher {
} catch (IOException ex) {
LOG.warn("Container operation failed. " +
"Container: {} Operation: {} trace ID: {} Error: {}",
- msg.getCreateContainer().getContainerData().getName(),
+ msg.getCreateContainer().getContainerData().getContainerID(),
msg.getCmdType().name(),
msg.getTraceID(),
ex.toString(), ex);
@@ -273,7 +274,7 @@ public class Dispatcher implements ContainerDispatcher {
} catch (IOException ex) {
LOG.warn("Container operation failed. " +
"Container: {} Operation: {} trace ID: {} Error: {}",
- msg.getCreateContainer().getContainerData().getName(),
+ msg.getCreateContainer().getContainerData().getContainerID(),
msg.getCmdType().name(),
msg.getTraceID(),
ex.toString(), ex);
@@ -318,17 +319,14 @@ public class Dispatcher implements ContainerDispatcher {
msg.getTraceID());
return ContainerUtils.malformedRequest(msg);
}
-
- Pipeline pipeline = Pipeline.getFromProtoBuf(
- msg.getUpdateContainer().getPipeline());
- String containerName = msg.getUpdateContainer()
- .getContainerData().getName();
+ long containerID = msg.getUpdateContainer()
+ .getContainerData().getContainerID();
ContainerData data = ContainerData.getFromProtBuf(
msg.getUpdateContainer().getContainerData(), conf);
boolean forceUpdate = msg.getUpdateContainer().getForceUpdate();
- this.containerManager.updateContainer(
- pipeline, containerName, data, forceUpdate);
+ this.containerManager.updateContainer(containerID,
+ data, forceUpdate);
return ContainerUtils.getContainerResponse(msg);
}
@@ -349,8 +347,9 @@ public class Dispatcher implements ContainerDispatcher {
return ContainerUtils.malformedRequest(msg);
}
- String name = msg.getReadContainer().getName();
- ContainerData container = this.containerManager.readContainer(name);
+ long containerID = msg.getReadContainer().getContainerID();
+ ContainerData container = this.containerManager.
+ readContainer(containerID);
return ContainerUtils.getReadContainerResponse(msg, container);
}
@@ -370,12 +369,9 @@ public class Dispatcher implements ContainerDispatcher {
return ContainerUtils.malformedRequest(msg);
}
- Pipeline pipeline = Pipeline.getFromProtoBuf(
- msg.getDeleteContainer().getPipeline());
- Preconditions.checkNotNull(pipeline);
- String name = msg.getDeleteContainer().getName();
+ long containerID = msg.getDeleteContainer().getContainerID();
boolean forceDelete = msg.getDeleteContainer().getForceDelete();
- this.containerManager.deleteContainer(pipeline, name, forceDelete);
+ this.containerManager.deleteContainer(containerID, forceDelete);
return ContainerUtils.getContainerResponse(msg);
}
@@ -401,7 +397,7 @@ public class Dispatcher implements ContainerDispatcher {
msg.getCreateContainer().getPipeline());
Preconditions.checkNotNull(pipeline, "Pipeline cannot be null");
- this.containerManager.createContainer(pipeline, cData);
+ this.containerManager.createContainer(cData);
return ContainerUtils.getContainerResponse(msg);
}
@@ -420,14 +416,12 @@ public class Dispatcher implements ContainerDispatcher {
msg.getTraceID());
return ContainerUtils.malformedRequest(msg);
}
- Pipeline pipeline = Pipeline.getFromProtoBuf(msg.getCloseContainer()
- .getPipeline());
- Preconditions.checkNotNull(pipeline, "Pipeline cannot be null");
- if (!this.containerManager.isOpen(pipeline.getContainerName())) {
+ long containerID = msg.getCloseContainer().getContainerID();
+ if (!this.containerManager.isOpen(containerID)) {
throw new StorageContainerException("Attempting to close a closed " +
"container.", CLOSED_CONTAINER_IO);
}
- this.containerManager.closeContainer(pipeline.getContainerName());
+ this.containerManager.closeContainer(containerID);
return ContainerUtils.getContainerResponse(msg);
} catch (NoSuchAlgorithmException e) {
throw new StorageContainerException("No such Algorithm", e,
@@ -449,11 +443,9 @@ public class Dispatcher implements ContainerDispatcher {
msg.getTraceID());
return ContainerUtils.malformedRequest(msg);
}
- String keyName = msg.getWriteChunk().getKeyName();
- Pipeline pipeline = Pipeline.getFromProtoBuf(
- msg.getWriteChunk().getPipeline());
- Preconditions.checkNotNull(pipeline);
- if (!this.containerManager.isOpen(pipeline.getContainerName())) {
+ BlockID blockID = BlockID.getFromProtobuf(
+ msg.getWriteChunk().getBlockID());
+ if (!this.containerManager.isOpen(blockID.getContainerID())) {
throw new StorageContainerException("Write to closed container.",
CLOSED_CONTAINER_IO);
}
@@ -469,7 +461,7 @@ public class Dispatcher implements ContainerDispatcher {
}
this.containerManager.getChunkManager()
- .writeChunk(pipeline, keyName, chunkInfo,
+ .writeChunk(blockID, chunkInfo,
data, msg.getWriteChunk().getStage());
return ChunkUtils.getChunkResponse(msg);
@@ -489,17 +481,13 @@ public class Dispatcher implements ContainerDispatcher {
msg.getTraceID());
return ContainerUtils.malformedRequest(msg);
}
-
- String keyName = msg.getReadChunk().getKeyName();
- Pipeline pipeline = Pipeline.getFromProtoBuf(
- msg.getReadChunk().getPipeline());
- Preconditions.checkNotNull(pipeline);
-
+ BlockID blockID = BlockID.getFromProtobuf(
+ msg.getReadChunk().getBlockID());
ChunkInfo chunkInfo = ChunkInfo.getFromProtoBuf(msg.getReadChunk()
.getChunkData());
Preconditions.checkNotNull(chunkInfo);
- byte[] data = this.containerManager.getChunkManager().readChunk(pipeline,
- keyName, chunkInfo);
+ byte[] data = this.containerManager.getChunkManager().
+ readChunk(blockID, chunkInfo);
metrics.incContainerBytesStats(Type.ReadChunk, data.length);
return ChunkUtils.getReadChunkResponse(msg, data, chunkInfo);
}
@@ -519,11 +507,10 @@ public class Dispatcher implements ContainerDispatcher {
return ContainerUtils.malformedRequest(msg);
}
- String keyName = msg.getDeleteChunk().getKeyName();
- Pipeline pipeline = Pipeline.getFromProtoBuf(
- msg.getDeleteChunk().getPipeline());
- Preconditions.checkNotNull(pipeline);
- if (!this.containerManager.isOpen(pipeline.getContainerName())) {
+ BlockID blockID = BlockID.getFromProtobuf(msg.getDeleteChunk()
+ .getBlockID());
+ long containerID = blockID.getContainerID();
+ if (!this.containerManager.isOpen(containerID)) {
throw new StorageContainerException("Write to closed container.",
CLOSED_CONTAINER_IO);
}
@@ -531,7 +518,7 @@ public class Dispatcher implements ContainerDispatcher {
.getChunkData());
Preconditions.checkNotNull(chunkInfo);
- this.containerManager.getChunkManager().deleteChunk(pipeline, keyName,
+ this.containerManager.getChunkManager().deleteChunk(blockID,
chunkInfo);
return ChunkUtils.getChunkResponse(msg);
}
@@ -550,15 +537,16 @@ public class Dispatcher implements ContainerDispatcher {
msg.getTraceID());
return ContainerUtils.malformedRequest(msg);
}
- Pipeline pipeline = Pipeline.getFromProtoBuf(msg.getPutKey().getPipeline());
- Preconditions.checkNotNull(pipeline);
- if (!this.containerManager.isOpen(pipeline.getContainerName())) {
+ BlockID blockID = BlockID.getFromProtobuf(
+ msg.getPutKey().getKeyData().getBlockID());
+ long containerID = blockID.getContainerID();
+ if (!this.containerManager.isOpen(containerID)) {
throw new StorageContainerException("Write to closed container.",
CLOSED_CONTAINER_IO);
}
KeyData keyData = KeyData.getFromProtoBuf(msg.getPutKey().getKeyData());
Preconditions.checkNotNull(keyData);
- this.containerManager.getKeyManager().putKey(pipeline, keyData);
+ this.containerManager.getKeyManager().putKey(keyData);
long numBytes = keyData.getProtoBufMessage().toByteArray().length;
metrics.incContainerBytesStats(Type.PutKey, numBytes);
return KeyUtils.getKeyResponse(msg);
@@ -601,17 +589,15 @@ public class Dispatcher implements ContainerDispatcher {
msg.getTraceID());
return ContainerUtils.malformedRequest(msg);
}
- Pipeline pipeline =
- Pipeline.getFromProtoBuf(msg.getDeleteKey().getPipeline());
- Preconditions.checkNotNull(pipeline);
- if (!this.containerManager.isOpen(pipeline.getContainerName())) {
+ BlockID blockID = BlockID.getFromProtobuf(msg.getDeleteKey()
+ .getBlockID());
+ Preconditions.checkNotNull(blockID);
+ long containerID = blockID.getContainerID();
+ if (!this.containerManager.isOpen(containerID)) {
throw new StorageContainerException("Write to closed container.",
CLOSED_CONTAINER_IO);
}
- String keyName = msg.getDeleteKey().getName();
- Preconditions.checkNotNull(keyName);
- Preconditions.checkState(!keyName.isEmpty());
- this.containerManager.getKeyManager().deleteKey(pipeline, keyName);
+ this.containerManager.getKeyManager().deleteKey(blockID);
return KeyUtils.getKeyResponse(msg);
}
@@ -632,12 +618,11 @@ public class Dispatcher implements ContainerDispatcher {
}
try {
- Pipeline pipeline =
- Pipeline.getFromProtoBuf(msg.getPutSmallFile()
- .getKey().getPipeline());
+ BlockID blockID = BlockID.getFromProtobuf(msg.
+ getPutSmallFile().getKey().getKeyData().getBlockID());
+ long containerID = blockID.getContainerID();
- Preconditions.checkNotNull(pipeline);
- if (!this.containerManager.isOpen(pipeline.getContainerName())) {
+ if (!this.containerManager.isOpen(containerID)) {
throw new StorageContainerException("Write to closed container.",
CLOSED_CONTAINER_IO);
}
@@ -648,12 +633,12 @@ public class Dispatcher implements ContainerDispatcher {
byte[] data = msg.getPutSmallFile().getData().toByteArray();
metrics.incContainerBytesStats(Type.PutSmallFile, data.length);
- this.containerManager.getChunkManager().writeChunk(pipeline, keyData
- .getKeyName(), chunkInfo, data, ContainerProtos.Stage.COMBINED);
+ this.containerManager.getChunkManager().writeChunk(blockID,
+ chunkInfo, data, ContainerProtos.Stage.COMBINED);
List<ContainerProtos.ChunkInfo> chunks = new LinkedList<>();
chunks.add(chunkInfo.getProtoBufMessage());
keyData.setChunks(chunks);
- this.containerManager.getKeyManager().putKey(pipeline, keyData);
+ this.containerManager.getKeyManager().putKey(keyData);
return FileUtils.getPutFileResponse(msg);
} catch (StorageContainerException e) {
return ContainerUtils.logAndReturnError(LOG, e, msg);
@@ -680,12 +665,7 @@ public class Dispatcher implements ContainerDispatcher {
return ContainerUtils.malformedRequest(msg);
}
try {
- Pipeline pipeline =
- Pipeline.getFromProtoBuf(msg.getGetSmallFile()
- .getKey().getPipeline());
-
long bytes = 0;
- Preconditions.checkNotNull(pipeline);
KeyData keyData = KeyData.getFromProtoBuf(msg.getGetSmallFile()
.getKey().getKeyData());
KeyData data = this.containerManager.getKeyManager().getKey(keyData);
@@ -694,9 +674,8 @@ public class Dispatcher implements ContainerDispatcher {
bytes += chunk.getSerializedSize();
ByteString current =
ByteString.copyFrom(this.containerManager.getChunkManager()
- .readChunk(
- pipeline, keyData.getKeyName(), ChunkInfo.getFromProtoBuf(
- chunk)));
+ .readChunk(keyData.getBlockID(),
+ ChunkInfo.getFromProtoBuf(chunk)));
dataBuf = dataBuf.concat(current);
c = chunk;
}
diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/impl/KeyManagerImpl.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/impl/KeyManagerImpl.java
index cf6bf12214..f920ded293 100644
--- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/impl/KeyManagerImpl.java
+++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/impl/KeyManagerImpl.java
@@ -19,20 +19,18 @@
package org.apache.hadoop.ozone.container.common.impl;
import com.google.common.base.Preconditions;
+import com.google.common.primitives.Longs;
import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.hdds.scm.container.common.helpers.Pipeline;
import org.apache.hadoop.hdds.scm.container.common.helpers
.StorageContainerException;
-import org.apache.hadoop.hdfs.DFSUtil;
import org.apache.hadoop.hdds.protocol.proto.ContainerProtos;
+import org.apache.hadoop.hdds.client.BlockID;
import org.apache.hadoop.ozone.container.common.helpers.ContainerData;
import org.apache.hadoop.ozone.container.common.helpers.KeyData;
import org.apache.hadoop.ozone.container.common.helpers.KeyUtils;
import org.apache.hadoop.ozone.container.common.interfaces.ContainerManager;
import org.apache.hadoop.ozone.container.common.interfaces.KeyManager;
import org.apache.hadoop.ozone.container.common.utils.ContainerCache;
-import org.apache.hadoop.utils.MetadataKeyFilters.KeyPrefixFilter;
-import org.apache.hadoop.utils.MetadataKeyFilters.MetadataKeyFilter;
import org.apache.hadoop.utils.MetadataStore;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
@@ -73,22 +71,21 @@ public class KeyManagerImpl implements KeyManager {
* {@inheritDoc}
*/
@Override
- public void putKey(Pipeline pipeline, KeyData data) throws IOException {
+ public void putKey(KeyData data) throws IOException {
+ Preconditions.checkNotNull(data, "KeyData cannot be null for put operation.");
+ Preconditions.checkState(data.getContainerID() >= 0, "Container ID cannot be negative");
containerManager.readLock();
try {
// We are not locking the key manager since LevelDb serializes all actions
// against a single DB. We rely on DB level locking to avoid conflicts.
- Preconditions.checkNotNull(pipeline, "Pipeline cannot be null");
- String containerName = pipeline.getContainerName();
- Preconditions.checkNotNull(containerName,
- "Container name cannot be null");
- ContainerData cData = containerManager.readContainer(containerName);
+ ContainerData cData = containerManager.readContainer(
+ data.getContainerID());
MetadataStore db = KeyUtils.getDB(cData, conf);
// This is a post condition that acts as a hint to the user.
// Should never fail.
Preconditions.checkNotNull(db, "DB cannot be null here");
- db.put(data.getKeyName().getBytes(KeyUtils.ENCODING), data
+ db.put(Longs.toByteArray(data.getLocalID()), data
.getProtoBufMessage().toByteArray());
} finally {
containerManager.readUnlock();
@@ -103,17 +100,17 @@ public class KeyManagerImpl implements KeyManager {
containerManager.readLock();
try {
Preconditions.checkNotNull(data, "Key data cannot be null");
- Preconditions.checkNotNull(data.getContainerName(),
+ Preconditions.checkNotNull(data.getContainerID(),
"Container name cannot be null");
ContainerData cData = containerManager.readContainer(data
- .getContainerName());
+ .getContainerID());
MetadataStore db = KeyUtils.getDB(cData, conf);
// This is a post condition that acts as a hint to the user.
// Should never fail.
Preconditions.checkNotNull(db, "DB cannot be null here");
- byte[] kData = db.get(data.getKeyName().getBytes(KeyUtils.ENCODING));
+ byte[] kData = db.get(Longs.toByteArray(data.getLocalID()));
if (kData == null) {
throw new StorageContainerException("Unable to find the key.",
NO_SUCH_KEY);
@@ -130,15 +127,19 @@ public class KeyManagerImpl implements KeyManager {
* {@inheritDoc}
*/
@Override
- public void deleteKey(Pipeline pipeline, String keyName)
+ public void deleteKey(BlockID blockID)
throws IOException {
+ Preconditions.checkNotNull(blockID, "block ID cannot be null.");
+ Preconditions.checkState(blockID.getContainerID() >= 0,
+ "Container ID cannot be negative.");
+ Preconditions.checkState(blockID.getLocalID() >= 0,
+ "Local ID cannot be negative.");
+
containerManager.readLock();
try {
- Preconditions.checkNotNull(pipeline, "Pipeline cannot be null");
- String containerName = pipeline.getContainerName();
- Preconditions.checkNotNull(containerName,
- "Container name cannot be null");
- ContainerData cData = containerManager.readContainer(containerName);
+
+ ContainerData cData = containerManager
+ .readContainer(blockID.getContainerID());
MetadataStore db = KeyUtils.getDB(cData, conf);
// This is a post condition that acts as a hint to the user.
@@ -149,12 +150,13 @@ public class KeyManagerImpl implements KeyManager {
// to delete a key which might have just gotten inserted after
// the get check.
- byte[] kData = db.get(keyName.getBytes(KeyUtils.ENCODING));
+ byte[] kKey = Longs.toByteArray(blockID.getLocalID());
+ byte[] kData = db.get(kKey);
if (kData == null) {
throw new StorageContainerException("Unable to find the key.",
NO_SUCH_KEY);
}
- db.delete(keyName.getBytes(KeyUtils.ENCODING));
+ db.delete(kKey);
} finally {
containerManager.readUnlock();
}
@@ -165,26 +167,22 @@ public class KeyManagerImpl implements KeyManager {
*/
@Override
public List<KeyData> listKey(
- Pipeline pipeline, String prefix, String startKey, int count)
+ long containerID, long startLocalID, int count)
throws IOException {
- Preconditions.checkNotNull(pipeline,
- "Pipeline cannot be null.");
+ Preconditions.checkState(containerID >= 0, "Container ID cannot be negative");
+ Preconditions.checkState(startLocalID >= 0, "startLocal ID cannot be negative");
Preconditions.checkArgument(count > 0,
"Count must be a positive number.");
- ContainerData cData = containerManager.readContainer(pipeline
- .getContainerName());
+ ContainerData cData = containerManager.readContainer(containerID);
MetadataStore db = KeyUtils.getDB(cData, conf);
- List<KeyData> result = new ArrayList<KeyData>();
- byte[] startKeyInBytes = startKey == null ? null :
- DFSUtil.string2Bytes(startKey);
- MetadataKeyFilter prefixFilter = new KeyPrefixFilter(prefix);
+ List<KeyData> result = new ArrayList<>();
+ byte[] startKeyInBytes = Longs.toByteArray(startLocalID);
List<Map.Entry<byte[], byte[]>> range =
- db.getSequentialRangeKVs(startKeyInBytes, count, prefixFilter);
+ db.getSequentialRangeKVs(startKeyInBytes, count, null);
for (Map.Entry<byte[], byte[]> entry : range) {
- String keyName = KeyUtils.getKeyName(entry.getKey());
KeyData value = KeyUtils.getKeyData(entry.getValue());
- KeyData data = new KeyData(value.getContainerName(), keyName);
+ KeyData data = new KeyData(value.getBlockID());
result.add(data);
}
return result;
diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/impl/RandomContainerDeletionChoosingPolicy.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/impl/RandomContainerDeletionChoosingPolicy.java
index 3e267d2b37..06177cb332 100644
--- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/impl/RandomContainerDeletionChoosingPolicy.java
+++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/impl/RandomContainerDeletionChoosingPolicy.java
@@ -41,7 +41,7 @@ public class RandomContainerDeletionChoosingPolicy
@Override
public List<ContainerData> chooseContainerForBlockDeletion(int count,
- Map<String, ContainerStatus> candidateContainers)
+ Map<Long, ContainerStatus> candidateContainers)
throws StorageContainerException {
Preconditions.checkNotNull(candidateContainers,
"Internal assertion: candidate containers cannot be null");
@@ -58,7 +58,7 @@ public class RandomContainerDeletionChoosingPolicy
LOG.debug("Select container {} for block deletion, "
+ "pending deletion blocks num: {}.",
- entry.getContainer().getContainerName(),
+ entry.getContainer().getContainerID(),
entry.getNumPendingDeletionBlocks());
} else {
break;
diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/impl/TopNOrderedContainerDeletionChoosingPolicy.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/impl/TopNOrderedContainerDeletionChoosingPolicy.java
index 0169a96cf9..246342672f 100644
--- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/impl/TopNOrderedContainerDeletionChoosingPolicy.java
+++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/impl/TopNOrderedContainerDeletionChoosingPolicy.java
@@ -53,7 +53,7 @@ public class TopNOrderedContainerDeletionChoosingPolicy
@Override
public List<ContainerData> chooseContainerForBlockDeletion(int count,
- Map<String, ContainerStatus> candidateContainers)
+ Map<Long, ContainerStatus> candidateContainers)
throws StorageContainerException {
Preconditions.checkNotNull(candidateContainers,
"Internal assertion: candidate containers cannot be null");
@@ -74,7 +74,7 @@ public class TopNOrderedContainerDeletionChoosingPolicy
LOG.debug(
"Select container {} for block deletion, "
+ "pending deletion blocks num: {}.",
- entry.getContainer().getContainerName(),
+ entry.getContainer().getContainerID(),
entry.getNumPendingDeletionBlocks());
} else {
LOG.debug("Stop looking for next container, there is no"
diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/interfaces/ChunkManager.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/interfaces/ChunkManager.java
index f55d74ca2b..26dcf217cd 100644
--- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/interfaces/ChunkManager.java
+++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/interfaces/ChunkManager.java
@@ -18,10 +18,10 @@
package org.apache.hadoop.ozone.container.common.interfaces;
-import org.apache.hadoop.hdds.scm.container.common.helpers.Pipeline;
import org.apache.hadoop.hdds.scm.container.common.helpers
.StorageContainerException;
import org.apache.hadoop.hdds.protocol.proto.ContainerProtos;
+import org.apache.hadoop.hdds.client.BlockID;
import org.apache.hadoop.ozone.container.common.helpers.ChunkInfo;
/**
@@ -32,20 +32,18 @@ public interface ChunkManager {
/**
* writes a given chunk.
- * @param pipeline - Name and the set of machines that make this container.
- * @param keyName - Name of the Key.
+ * @param blockID - ID of the block.
* @param info - ChunkInfo.
* @param stage - Chunk Stage write.
* @throws StorageContainerException
*/
- void writeChunk(Pipeline pipeline, String keyName,
- ChunkInfo info, byte[] data, ContainerProtos.Stage stage)
+ void writeChunk(BlockID blockID,
+ ChunkInfo info, byte[] data, ContainerProtos.Stage stage)
throws StorageContainerException;
/**
* reads the data defined by a chunk.
- * @param pipeline - container pipeline.
- * @param keyName - Name of the Key
+ * @param blockID - ID of the block.
* @param info - ChunkInfo.
* @return byte array
* @throws StorageContainerException
@@ -53,17 +51,16 @@ public interface ChunkManager {
* TODO: Right now we do not support partial reads and writes of chunks.
* TODO: Explore if we need to do that for ozone.
*/
- byte[] readChunk(Pipeline pipeline, String keyName, ChunkInfo info) throws
+ byte[] readChunk(BlockID blockID, ChunkInfo info) throws
StorageContainerException;
/**
* Deletes a given chunk.
- * @param pipeline - Pipeline.
- * @param keyName - Key Name
+ * @param blockID - ID of the block.
* @param info - Chunk Info
* @throws StorageContainerException
*/
- void deleteChunk(Pipeline pipeline, String keyName, ChunkInfo info) throws
+ void deleteChunk(BlockID blockID, ChunkInfo info) throws
StorageContainerException;
// TODO : Support list operations.
diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/interfaces/ContainerDeletionChoosingPolicy.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/interfaces/ContainerDeletionChoosingPolicy.java
index f7280e2a3c..6b60c52719 100644
--- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/interfaces/ContainerDeletionChoosingPolicy.java
+++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/interfaces/ContainerDeletionChoosingPolicy.java
@@ -41,6 +41,6 @@ public interface ContainerDeletionChoosingPolicy {
* @throws StorageContainerException
*/
List<ContainerData> chooseContainerForBlockDeletion(int count,
- Map<String, ContainerStatus> candidateContainers)
+ Map<Long, ContainerStatus> candidateContainers)
throws StorageContainerException;
}
diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/interfaces/ContainerManager.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/interfaces/ContainerManager.java
index 2ff636e87f..84d95f8612 100644
--- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/interfaces/ContainerManager.java
+++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/interfaces/ContainerManager.java
@@ -21,7 +21,6 @@ package org.apache.hadoop.ozone.container.common.interfaces;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability;
import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.hdds.scm.container.common.helpers.Pipeline;
import org.apache.hadoop.hdds.scm.container.common.helpers
.StorageContainerException;
import org.apache.hadoop.hdfs.server.datanode.StorageLocation;
@@ -60,48 +59,43 @@ public interface ContainerManager extends RwLock {
/**
* Creates a container with the given name.
*
- * @param pipeline -- Nodes which make up this container.
* @param containerData - Container Name and metadata.
* @throws StorageContainerException
*/
- void createContainer(Pipeline pipeline, ContainerData containerData)
+ void createContainer(ContainerData containerData)
throws StorageContainerException;
/**
* Deletes an existing container.
*
- * @param pipeline - nodes that make this container.
- * @param containerName - name of the container.
+ * @param containerID - ID of the container.
* @param forceDelete - whether this container should be deleted forcibly.
* @throws StorageContainerException
*/
- void deleteContainer(Pipeline pipeline, String containerName,
+ void deleteContainer(long containerID,
boolean forceDelete) throws StorageContainerException;
/**
* Update an existing container.
*
- * @param pipeline container nodes
- * @param containerName name of the container
+ * @param containerID ID of the container
* @param data container data
* @param forceUpdate if true, update container forcibly.
* @throws StorageContainerException
*/
- void updateContainer(Pipeline pipeline, String containerName,
- ContainerData data, boolean forceUpdate) throws StorageContainerException;
+ void updateContainer(long containerID, ContainerData data,
+ boolean forceUpdate) throws StorageContainerException;
/**
* As simple interface for container Iterations.
*
- * @param prefix - Return only values matching this prefix
- * @param count - how many to return
- * @param prevKey - Previous key - Server returns results from this point.
- * @param data - Actual containerData
+ * @param startContainerID - Return containers with ID >= startContainerID.
+ * @param count - how many to return
+ * @param data - Actual containerData
* @throws StorageContainerException
*/
- void listContainer(String prefix, long count, String prevKey,
- List<ContainerData> data)
- throws StorageContainerException;
+ void listContainer(long startContainerID, long count,
+ List<ContainerData> data) throws StorageContainerException;
/**
* Choose containers for block deletion.
@@ -115,30 +109,30 @@ public interface ContainerManager extends RwLock {
/**
* Get metadata about a specific container.
*
- * @param containerName - Name of the container
+ * @param containerID - ID of the container.
* @return ContainerData - Container Data.
* @throws StorageContainerException
*/
- ContainerData readContainer(String containerName)
+ ContainerData readContainer(long containerID)
throws StorageContainerException;
/**
* Closes a open container, if it is already closed or does not exist a
* StorageContainerException is thrown.
- * @param containerName - Name of the container.
+ * @param containerID - ID of the container.
* @throws StorageContainerException
*/
- void closeContainer(String containerName)
+ void closeContainer(long containerID)
throws StorageContainerException, NoSuchAlgorithmException;
/**
* Checks if a container exists.
- * @param containerName - Name of the container.
+ * @param containerID - ID of the container.
* @return true if the container is open false otherwise.
* @throws StorageContainerException - Throws Exception if we are not
* able to find the container.
*/
- boolean isOpen(String containerName) throws StorageContainerException;
+ boolean isOpen(long containerID) throws StorageContainerException;
/**
* Supports clean shutdown of container.
@@ -203,7 +197,7 @@ public interface ContainerManager extends RwLock {
* @param containerId
* container id
*/
- void incrPendingDeletionBlocks(int numBlocks, String containerId);
+ void incrPendingDeletionBlocks(int numBlocks, long containerId);
/**
* Decrease pending deletion blocks count number of specified container.
@@ -213,64 +207,64 @@ public interface ContainerManager extends RwLock {
* @param containerId
* container id
*/
- void decrPendingDeletionBlocks(int numBlocks, String containerId);
+ void decrPendingDeletionBlocks(int numBlocks, long containerId);
/**
* Increase the read count of the container.
- * @param containerName - Name of the container.
+ * @param containerId - ID of the container.
*/
- void incrReadCount(String containerName);
+ void incrReadCount(long containerId);
/**
* Increse the read counter for bytes read from the container.
- * @param containerName - Name of the container.
+ * @param containerId - ID of the container.
* @param readBytes - bytes read from the container.
*/
- void incrReadBytes(String containerName, long readBytes);
+ void incrReadBytes(long containerId, long readBytes);
/**
* Increase the write count of the container.
- * @param containerName - Name of the container.
+ * @param containerId - ID of the container.
*/
- void incrWriteCount(String containerName);
+ void incrWriteCount(long containerId);
/**
* Increase the write counter for bytes write into the container.
- * @param containerName - Name of the container.
+ * @param containerId - ID of the container.
* @param writeBytes - bytes write into the container.
*/
- void incrWriteBytes(String containerName, long writeBytes);
+ void incrWriteBytes(long containerId, long writeBytes);
/**
* Increase the bytes used by the container.
- * @param containerName - Name of the container.
+ * @param containerId - ID of the container.
* @param used - additional bytes used by the container.
* @return the current bytes used.
*/
- long incrBytesUsed(String containerName, long used);
+ long incrBytesUsed(long containerId, long used);
/**
* Decrease the bytes used by the container.
- * @param containerName - Name of the container.
+ * @param containerId - ID of the container.
* @param used - additional bytes reclaimed by the container.
* @return the current bytes used.
*/
- long decrBytesUsed(String containerName, long used);
+ long decrBytesUsed(long containerId, long used);
/**
* Get the bytes used by the container.
- * @param containerName - Name of the container.
+ * @param containerId - ID of the container.
* @return the current bytes used by the container.
*/
- long getBytesUsed(String containerName);
+ long getBytesUsed(long containerId);
/**
* Get the number of keys in the container.
- * @param containerName - Name of the container.
+ * @param containerId - ID of the container.
* @return the current key count.
*/
- long getNumKeys(String containerName);
+ long getNumKeys(long containerId);
/**
* Get the container report state to send via HB to SCM.
diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/interfaces/KeyManager.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/interfaces/KeyManager.java
index 8c27ba94c4..158ce38efc 100644
--- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/interfaces/KeyManager.java
+++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/interfaces/KeyManager.java
@@ -17,9 +17,9 @@
*/
package org.apache.hadoop.ozone.container.common.interfaces;
-import org.apache.hadoop.hdds.scm.container.common.helpers.Pipeline;
import org.apache.hadoop.hdds.scm.container.common.helpers
.StorageContainerException;
+import org.apache.hadoop.hdds.client.BlockID;
import org.apache.hadoop.ozone.container.common.helpers.KeyData;
import java.io.IOException;
@@ -32,11 +32,10 @@ public interface KeyManager {
/**
* Puts or overwrites a key.
*
- * @param pipeline - Pipeline.
* @param data - Key Data.
* @throws IOException
*/
- void putKey(Pipeline pipeline, KeyData data) throws IOException;
+ void putKey(KeyData data) throws IOException;
/**
* Gets an existing key.
@@ -50,23 +49,21 @@ public interface KeyManager {
/**
* Deletes an existing Key.
*
- * @param pipeline - Pipeline.
- * @param keyName Key Data.
+ * @param blockID - ID of the block.
* @throws StorageContainerException
*/
- void deleteKey(Pipeline pipeline, String keyName)
+ void deleteKey(BlockID blockID)
throws IOException;
/**
* List keys in a container.
*
- * @param pipeline - pipeline.
- * @param prefix - Prefix in needed.
- * @param startKey - Key to start from, EMPTY_STRING to begin.
+ * @param containerID - ID of the container.
+ * @param startLocalID - Key to start from, 0 to begin.
* @param count - Number of keys to return.
* @return List of Keys that match the criteria.
*/
- List<KeyData> listKey(Pipeline pipeline, String prefix, String startKey,
+ List<KeyData> listKey(long containerID, long startLocalID,
int count) throws IOException;
/**
diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/statemachine/background/BlockDeletingService.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/statemachine/background/BlockDeletingService.java
index ac95b2a12c..7c3fa30bd1 100644
--- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/statemachine/background/BlockDeletingService.java
+++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/statemachine/background/BlockDeletingService.java
@@ -180,12 +180,12 @@ public class BlockDeletingService extends BackgroundService{
meta.getSequentialRangeKVs(null, blockLimitPerTask, filter);
if (toDeleteBlocks.isEmpty()) {
LOG.debug("No under deletion block found in container : {}",
- containerData.getContainerName());
+ containerData.getContainerID());
}
List<String> succeedBlocks = new LinkedList<>();
LOG.debug("Container : {}, To-Delete blocks : {}",
- containerData.getContainerName(), toDeleteBlocks.size());
+ containerData.getContainerID(), toDeleteBlocks.size());
File dataDir = ContainerUtils.getDataDirectory(containerData).toFile();
if (!dataDir.exists() || !dataDir.isDirectory()) {
LOG.error("Invalid container data dir {} : "
@@ -220,11 +220,11 @@ public class BlockDeletingService extends BackgroundService{
meta.writeBatch(batch);
// update count of pending deletion blocks in in-memory container status
containerManager.decrPendingDeletionBlocks(succeedBlocks.size(),
- containerData.getContainerName());
+ containerData.getContainerID());
if (!succeedBlocks.isEmpty()) {
LOG.info("Container: {}, deleted blocks: {}, task elapsed time: {}ms",
- containerData.getContainerName(), succeedBlocks.size(),
+ containerData.getContainerID(), succeedBlocks.size(),
Time.monotonicNow() - startTime);
}
crr.addAll(succeedBlocks);
diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/statemachine/commandhandler/CloseContainerHandler.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/statemachine/commandhandler/CloseContainerHandler.java
index f7b49b7590..d8adc7df0f 100644
--- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/statemachine/commandhandler/CloseContainerHandler.java
+++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/statemachine/commandhandler/CloseContainerHandler.java
@@ -58,19 +58,20 @@ public class CloseContainerHandler implements CommandHandler {
LOG.debug("Processing Close Container command.");
invocationCount++;
long startTime = Time.monotonicNow();
- String containerName = "UNKNOWN";
+ // TODO: define this as INVALID_CONTAINER_ID in HddsConsts.java (TBA)
+ long containerID = -1;
try {
SCMCloseContainerCmdResponseProto
closeContainerProto =
SCMCloseContainerCmdResponseProto
.parseFrom(command.getProtoBufMessage());
- containerName = closeContainerProto.getContainerName();
+ containerID = closeContainerProto.getContainerID();
- container.getContainerManager().closeContainer(containerName);
+ container.getContainerManager().closeContainer(containerID);
} catch (Exception e) {
- LOG.error("Can't close container " + containerName, e);
+ LOG.error("Can't close container " + containerID, e);
} finally {
long endTime = Time.monotonicNow();
totalTime += endTime - startTime;
diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/statemachine/commandhandler/DeleteBlocksCommandHandler.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/statemachine/commandhandler/DeleteBlocksCommandHandler.java
index f106e3d55f..5231660b6d 100644
--- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/statemachine/commandhandler/DeleteBlocksCommandHandler.java
+++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/statemachine/commandhandler/DeleteBlocksCommandHandler.java
@@ -16,6 +16,7 @@
*/
package org.apache.hadoop.ozone.container.common.statemachine.commandhandler;
+import com.google.common.primitives.Longs;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.hdfs.DFSUtil;
import org.apache.hadoop.hdds.protocol.proto
@@ -108,7 +109,7 @@ public class DeleteBlocksCommandHandler implements CommandHandler {
txResultBuilder.setSuccess(true);
} catch (IOException e) {
LOG.warn("Failed to delete blocks for container={}, TXID={}",
- entry.getContainerName(), entry.getTxID(), e);
+ entry.getContainerID(), entry.getTxID(), e);
txResultBuilder.setSuccess(false);
}
resultBuilder.addResults(txResultBuilder.build());
@@ -150,7 +151,7 @@ public class DeleteBlocksCommandHandler implements CommandHandler {
*/
private void deleteContainerBlocks(DeletedBlocksTransaction delTX,
Configuration config) throws IOException {
- String containerId = delTX.getContainerName();
+ long containerId = delTX.getContainerID();
ContainerData containerInfo = containerManager.readContainer(containerId);
if (LOG.isDebugEnabled()) {
LOG.debug("Processing Container : {}, DB path : {}", containerId,
@@ -159,9 +160,9 @@ public class DeleteBlocksCommandHandler implements CommandHandler {
int newDeletionBlocks = 0;
MetadataStore containerDB = KeyUtils.getDB(containerInfo, config);
- for (String blk : delTX.getBlockIDList()) {
+ for (Long blk : delTX.getLocalIDList()) {
BatchOperation batch = new BatchOperation();
- byte[] blkBytes = DFSUtil.string2Bytes(blk);
+ byte[] blkBytes = Longs.toByteArray(blk);
byte[] blkInfo = containerDB.get(blkBytes);
if (blkInfo != null) {
// Found the block in container db,
diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/states/endpoint/HeartbeatEndpointTask.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/states/endpoint/HeartbeatEndpointTask.java
index 5dee10f44b..eba565db64 100644
--- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/states/endpoint/HeartbeatEndpointTask.java
+++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/states/endpoint/HeartbeatEndpointTask.java
@@ -170,7 +170,7 @@ public class HeartbeatEndpointTask
commandResponseProto.getCloseContainerProto());
if (LOG.isDebugEnabled()) {
LOG.debug("Received SCM container close request for container {}",
- closeContainer.getContainerName());
+ closeContainer.getContainerID());
}
this.context.addCommand(closeContainer);
break;
diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/transport/server/ratis/ContainerStateMachine.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/transport/server/ratis/ContainerStateMachine.java
index 1a89e44bd1..89eaaced03 100644
--- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/transport/server/ratis/ContainerStateMachine.java
+++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/transport/server/ratis/ContainerStateMachine.java
@@ -94,7 +94,7 @@ public class ContainerStateMachine extends BaseStateMachine {
private ThreadPoolExecutor writeChunkExecutor;
private final ConcurrentHashMap<Long, CompletableFuture<Message>>
writeChunkFutureMap;
- private final ConcurrentHashMap<String, CompletableFuture<Message>>
+ private final ConcurrentHashMap<Long, CompletableFuture<Message>>
createContainerFutureMap;
ContainerStateMachine(ContainerDispatcher dispatcher,
@@ -146,8 +146,7 @@ public class ContainerStateMachine extends BaseStateMachine {
// create the log entry proto
final WriteChunkRequestProto commitWriteChunkProto =
WriteChunkRequestProto.newBuilder()
- .setPipeline(write.getPipeline())
- .setKeyName(write.getKeyName())
+ .setBlockID(write.getBlockID())
.setChunkData(write.getChunkData())
// skipping the data field as it is
// already set in statemachine data proto
@@ -196,9 +195,9 @@ public class ContainerStateMachine extends BaseStateMachine {
private CompletableFuture<Message> handleWriteChunk(
ContainerCommandRequestProto requestProto, long entryIndex) {
final WriteChunkRequestProto write = requestProto.getWriteChunk();
- String containerName = write.getPipeline().getContainerName();
+ long containerID = write.getBlockID().getContainerID();
CompletableFuture<Message> future =
- createContainerFutureMap.get(containerName);
+ createContainerFutureMap.get(containerID);
CompletableFuture<Message> writeChunkFuture;
if (future != null) {
writeChunkFuture = future.thenApplyAsync(
@@ -213,10 +212,10 @@ public class ContainerStateMachine extends BaseStateMachine {
private CompletableFuture<Message> handleCreateContainer(
ContainerCommandRequestProto requestProto) {
- String containerName =
- requestProto.getCreateContainer().getContainerData().getName();
+ long containerID =
+ requestProto.getCreateContainer().getContainerData().getContainerID();
createContainerFutureMap.
- computeIfAbsent(containerName, k -> new CompletableFuture<>());
+ computeIfAbsent(containerID, k -> new CompletableFuture<>());
return CompletableFuture.completedFuture(() -> ByteString.EMPTY);
}
@@ -270,9 +269,9 @@ public class ContainerStateMachine extends BaseStateMachine {
} else {
Message message = runCommand(requestProto);
if (cmdType == ContainerProtos.Type.CreateContainer) {
- String containerName =
- requestProto.getCreateContainer().getContainerData().getName();
- createContainerFutureMap.remove(containerName).complete(message);
+ long containerID =
+ requestProto.getCreateContainer().getContainerData().getContainerID();
+ createContainerFutureMap.remove(containerID).complete(message);
}
return CompletableFuture.completedFuture(message);
}
diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/utils/ContainerCache.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/utils/ContainerCache.java
index 6ae45b6d08..4d9c690311 100644
--- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/utils/ContainerCache.java
+++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/utils/ContainerCache.java
@@ -69,15 +69,15 @@ public final class ContainerCache extends LRUMap {
/**
* Closes a db instance.
*
- * @param container - name of the container to be closed.
+ * @param containerID - ID of the container to be closed.
* @param db - db instance to close.
*/
- private void closeDB(String container, MetadataStore db) {
+ private void closeDB(long containerID, MetadataStore db) {
if (db != null) {
try {
db.close();
} catch (IOException e) {
- LOG.error("Error closing DB. Container: " + container, e);
+ LOG.error("Error closing DB. Container: " + containerID, e);
}
}
}
@@ -93,7 +93,7 @@ public final class ContainerCache extends LRUMap {
while (iterator.hasNext()) {
iterator.next();
MetadataStore db = (MetadataStore) iterator.getValue();
- closeDB(iterator.getKey().toString(), db);
+ closeDB(((Number)iterator.getKey()).longValue(), db);
}
// reset the cache
cache.clear();
@@ -110,7 +110,7 @@ public final class ContainerCache extends LRUMap {
lock.lock();
try {
MetadataStore db = (MetadataStore) entry.getValue();
- closeDB(entry.getKey().toString(), db);
+ closeDB(((Number)entry.getKey()).longValue(), db);
} finally {
lock.unlock();
}
@@ -120,28 +120,27 @@ public final class ContainerCache extends LRUMap {
/**
* Returns a DB handle if available, create the handler otherwise.
*
- * @param containerName - Name of the container.
+ * @param containerID - ID of the container.
* @return MetadataStore.
*/
- public MetadataStore getDB(String containerName, String containerDBPath)
+ public MetadataStore getDB(long containerID, String containerDBPath)
throws IOException {
- Preconditions.checkNotNull(containerName);
- Preconditions.checkState(!containerName.isEmpty());
+ Preconditions.checkState(containerID >= 0, "Container ID cannot be negative.");
lock.lock();
try {
- MetadataStore db = (MetadataStore) this.get(containerName);
+ MetadataStore db = (MetadataStore) this.get(containerID);
if (db == null) {
db = MetadataStoreBuilder.newBuilder()
.setDbFile(new File(containerDBPath))
.setCreateIfMissing(false)
.build();
- this.put(containerName, db);
+ this.put(containerID, db);
}
return db;
} catch (Exception e) {
LOG.error("Error opening DB. Container:{} ContainerPath:{}",
- containerName, containerDBPath, e);
+ containerID, containerDBPath, e);
throw e;
} finally {
lock.unlock();
@@ -151,16 +150,15 @@ public final class ContainerCache extends LRUMap {
/**
* Remove a DB handler from cache.
*
- * @param containerName - Name of the container.
+ * @param containerID - ID of the container.
*/
- public void removeDB(String containerName) {
- Preconditions.checkNotNull(containerName);
- Preconditions.checkState(!containerName.isEmpty());
+ public void removeDB(long containerID) {
+ Preconditions.checkState(containerID >= 0, "Container ID cannot be negative.");
lock.lock();
try {
- MetadataStore db = (MetadataStore)this.get(containerName);
- closeDB(containerName, db);
- this.remove(containerName);
+ MetadataStore db = (MetadataStore)this.get(containerID);
+ closeDB(containerID, db);
+ this.remove(containerID);
} finally {
lock.unlock();
}
diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/protocol/commands/CloseContainerCommand.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/protocol/commands/CloseContainerCommand.java
index b1cdbc4913..d1d6488134 100644
--- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/protocol/commands/CloseContainerCommand.java
+++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/protocol/commands/CloseContainerCommand.java
@@ -32,10 +32,10 @@ import static org.apache.hadoop.hdds.protocol.proto
public class CloseContainerCommand
extends SCMCommand<SCMCloseContainerCmdResponseProto> {
- private String containerName;
+ private long containerID;
- public CloseContainerCommand(String containerName) {
- this.containerName = containerName;
+ public CloseContainerCommand(long containerID) {
+ this.containerID = containerID;
}
/**
@@ -60,17 +60,17 @@ public class CloseContainerCommand
public SCMCloseContainerCmdResponseProto getProto() {
return SCMCloseContainerCmdResponseProto.newBuilder()
- .setContainerName(containerName).build();
+ .setContainerID(containerID).build();
}
public static CloseContainerCommand getFromProtobuf(
SCMCloseContainerCmdResponseProto closeContainerProto) {
Preconditions.checkNotNull(closeContainerProto);
- return new CloseContainerCommand(closeContainerProto.getContainerName());
+ return new CloseContainerCommand(closeContainerProto.getContainerID());
}
- public String getContainerName() {
- return containerName;
+ public long getContainerID() {
+ return containerID;
}
}