summaryrefslogtreecommitdiff
path: root/hadoop-hdds/container-service/src/main/java/org/apache
diff options
context:
space:
mode:
authorAnu Engineer <aengineer@apache.org>2018-05-29 12:40:27 -0700
committerAnu Engineer <aengineer@apache.org>2018-05-29 12:48:50 -0700
commit201440b987d5ef3910c2045b2411c213ed6eec1f (patch)
tree3cdd7fbc8baace9ca23feb82a842dc200356dbbf /hadoop-hdds/container-service/src/main/java/org/apache
parent4827e9a9085b306bc379cb6e0b1fe4b92326edcd (diff)
HDDS-81. Moving ContainerReport inside Datanode heartbeat.
Contributed by Nanda Kumar.
Diffstat (limited to 'hadoop-hdds/container-service/src/main/java/org/apache')
-rw-r--r--hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/impl/ContainerManagerImpl.java22
-rw-r--r--hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/impl/StorageLocationReport.java8
-rw-r--r--hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/interfaces/ContainerManager.java8
-rw-r--r--hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/statemachine/DatanodeStateMachine.java7
-rw-r--r--hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/statemachine/StateContext.java16
-rw-r--r--hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/statemachine/commandhandler/CloseContainerCommandHandler.java (renamed from hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/statemachine/commandhandler/CloseContainerHandler.java)20
-rw-r--r--hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/statemachine/commandhandler/CommandDispatcher.java5
-rw-r--r--hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/statemachine/commandhandler/CommandHandler.java8
-rw-r--r--hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/statemachine/commandhandler/DeleteBlocksCommandHandler.java12
-rw-r--r--hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/states/endpoint/HeartbeatEndpointTask.java30
-rw-r--r--hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/states/endpoint/RegisterEndpointTask.java12
-rw-r--r--hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/ozoneimpl/OzoneContainer.java10
-rw-r--r--hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/protocol/StorageContainerDatanodeProtocol.java30
-rw-r--r--hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/protocol/StorageContainerNodeProtocol.java15
-rw-r--r--hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/protocol/commands/CloseContainerCommand.java18
-rw-r--r--hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/protocol/commands/DeleteBlocksCommand.java18
-rw-r--r--hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/protocol/commands/RegisteredCommand.java26
-rw-r--r--hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/protocol/commands/ReregisterCommand.java16
-rw-r--r--hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/protocol/commands/SCMCommand.java4
-rw-r--r--hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/protocolPB/StorageContainerDatanodeProtocolClientSideTranslatorPB.java50
-rw-r--r--hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/protocolPB/StorageContainerDatanodeProtocolServerSideTranslatorPB.java53
21 files changed, 165 insertions, 223 deletions
diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/impl/ContainerManagerImpl.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/impl/ContainerManagerImpl.java
index 9355364eac..af47015324 100644
--- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/impl/ContainerManagerImpl.java
+++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/impl/ContainerManagerImpl.java
@@ -35,11 +35,11 @@ import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos
import org.apache.hadoop.hdds.protocol.proto
.StorageContainerDatanodeProtocolProtos;
import org.apache.hadoop.hdds.protocol.proto
- .StorageContainerDatanodeProtocolProtos.ContainerReportsRequestProto;
+ .StorageContainerDatanodeProtocolProtos.ContainerReportsProto;
import org.apache.hadoop.hdds.protocol.proto
- .StorageContainerDatanodeProtocolProtos.SCMNodeReport;
+ .StorageContainerDatanodeProtocolProtos.NodeReportProto;
import org.apache.hadoop.hdds.protocol.proto
- .StorageContainerDatanodeProtocolProtos.SCMStorageReport;
+ .StorageContainerDatanodeProtocolProtos.StorageReportProto;
import org.apache.hadoop.io.IOUtils;
import org.apache.hadoop.ozone.OzoneConfigKeys;
import org.apache.hadoop.ozone.OzoneConsts;
@@ -854,11 +854,11 @@ public class ContainerManagerImpl implements ContainerManager {
* @return node report.
*/
@Override
- public SCMNodeReport getNodeReport() throws IOException {
+ public NodeReportProto getNodeReport() throws IOException {
StorageLocationReport[] reports = locationManager.getLocationReport();
- SCMNodeReport.Builder nrb = SCMNodeReport.newBuilder();
+ NodeReportProto.Builder nrb = NodeReportProto.newBuilder();
for (int i = 0; i < reports.length; i++) {
- SCMStorageReport.Builder srb = SCMStorageReport.newBuilder();
+ StorageReportProto.Builder srb = StorageReportProto.newBuilder();
nrb.addStorageReport(reports[i].getProtoBufMessage());
}
return nrb.build();
@@ -891,7 +891,7 @@ public class ContainerManagerImpl implements ContainerManager {
* @throws IOException
*/
@Override
- public ContainerReportsRequestProto getContainerReport() throws IOException {
+ public ContainerReportsProto getContainerReport() throws IOException {
LOG.debug("Starting container report iteration.");
// No need for locking since containerMap is a ConcurrentSkipListMap
// And we can never get the exact state since close might happen
@@ -899,12 +899,8 @@ public class ContainerManagerImpl implements ContainerManager {
List<ContainerData> containers = containerMap.values().stream()
.collect(Collectors.toList());
- ContainerReportsRequestProto.Builder crBuilder =
- ContainerReportsRequestProto.newBuilder();
-
- // TODO: support delta based container report
- crBuilder.setDatanodeDetails(datanodeDetails.getProtoBufMessage())
- .setType(ContainerReportsRequestProto.reportType.fullReport);
+ ContainerReportsProto.Builder crBuilder =
+ ContainerReportsProto.newBuilder();
for (ContainerData container: containers) {
long containerId = container.getContainerID();
diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/impl/StorageLocationReport.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/impl/StorageLocationReport.java
index a5ad6c2b5e..87b965679d 100644
--- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/impl/StorageLocationReport.java
+++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/impl/StorageLocationReport.java
@@ -20,7 +20,7 @@ package org.apache.hadoop.ozone.container.common.impl;
import org.apache.hadoop.fs.StorageType;
import org.apache.hadoop.hdds.protocol.proto.
- StorageContainerDatanodeProtocolProtos.SCMStorageReport;
+ StorageContainerDatanodeProtocolProtos.StorageReportProto;
import org.apache.hadoop.hdds.protocol.proto.
StorageContainerDatanodeProtocolProtos.StorageTypeProto;
@@ -137,8 +137,8 @@ public class StorageLocationReport {
* @return SCMStorageReport
* @throws IOException In case, the storage type specified is invalid.
*/
- public SCMStorageReport getProtoBufMessage() throws IOException{
- SCMStorageReport.Builder srb = SCMStorageReport.newBuilder();
+ public StorageReportProto getProtoBufMessage() throws IOException{
+ StorageReportProto.Builder srb = StorageReportProto.newBuilder();
return srb.setStorageUuid(getId())
.setCapacity(getCapacity())
.setScmUsed(getScmUsed())
@@ -156,7 +156,7 @@ public class StorageLocationReport {
* @throws IOException in case of invalid storage type
*/
- public static StorageLocationReport getFromProtobuf(SCMStorageReport report)
+ public static StorageLocationReport getFromProtobuf(StorageReportProto report)
throws IOException {
StorageLocationReport.Builder builder = StorageLocationReport.newBuilder();
builder.setId(report.getStorageUuid())
diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/interfaces/ContainerManager.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/interfaces/ContainerManager.java
index ba70953710..49b68dc2a0 100644
--- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/interfaces/ContainerManager.java
+++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/interfaces/ContainerManager.java
@@ -27,9 +27,9 @@ import org.apache.hadoop.hdfs.server.datanode.StorageLocation;
import org.apache.hadoop.hdfs.util.RwLock;
import org.apache.hadoop.hdds.protocol.DatanodeDetails;
import org.apache.hadoop.hdds.protocol.proto
- .StorageContainerDatanodeProtocolProtos.ContainerReportsRequestProto;
+ .StorageContainerDatanodeProtocolProtos.ContainerReportsProto;
import org.apache.hadoop.hdds.protocol.proto
- .StorageContainerDatanodeProtocolProtos.SCMNodeReport;
+ .StorageContainerDatanodeProtocolProtos.NodeReportProto;
import org.apache.hadoop.ozone.container.common.helpers.ContainerData;
import java.io.IOException;
@@ -171,14 +171,14 @@ public interface ContainerManager extends RwLock {
* Get the Node Report of container storage usage.
* @return node report.
*/
- SCMNodeReport getNodeReport() throws IOException;
+ NodeReportProto getNodeReport() throws IOException;
/**
* Gets container report.
* @return container report.
* @throws IOException
*/
- ContainerReportsRequestProto getContainerReport() throws IOException;
+ ContainerReportsProto getContainerReport() throws IOException;
/**
* Gets container reports.
diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/statemachine/DatanodeStateMachine.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/statemachine/DatanodeStateMachine.java
index a8fe4949ae..d0a4217245 100644
--- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/statemachine/DatanodeStateMachine.java
+++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/statemachine/DatanodeStateMachine.java
@@ -21,8 +21,7 @@ import com.google.common.util.concurrent.ThreadFactoryBuilder;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.hdds.conf.OzoneConfiguration;
import org.apache.hadoop.hdds.protocol.DatanodeDetails;
-import org.apache.hadoop.ozone.container.common.statemachine.commandhandler
- .CloseContainerHandler;
+import org.apache.hadoop.ozone.container.common.statemachine.commandhandler.CloseContainerCommandHandler;
import org.apache.hadoop.ozone.container.common.statemachine.commandhandler
.CommandDispatcher;
import org.apache.hadoop.ozone.container.common.statemachine.commandhandler
@@ -86,7 +85,7 @@ public class DatanodeStateMachine implements Closeable {
// When we add new handlers just adding a new handler here should do the
// trick.
commandDispatcher = CommandDispatcher.newBuilder()
- .addHandler(new CloseContainerHandler())
+ .addHandler(new CloseContainerCommandHandler())
.addHandler(new DeleteBlocksCommandHandler(
container.getContainerManager(), conf))
.setConnectionManager(connectionManager)
@@ -131,7 +130,7 @@ public class DatanodeStateMachine implements Closeable {
try {
LOG.debug("Executing cycle Number : {}", context.getExecutionCount());
nextHB.set(Time.monotonicNow() + heartbeatFrequency);
- context.setReportState(container.getNodeReport());
+ context.setNodeReport(container.getNodeReport());
context.execute(executorService, heartbeatFrequency,
TimeUnit.MILLISECONDS);
now = Time.monotonicNow();
diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/statemachine/StateContext.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/statemachine/StateContext.java
index 27eb57e456..4e3c610f77 100644
--- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/statemachine/StateContext.java
+++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/statemachine/StateContext.java
@@ -18,7 +18,7 @@ package org.apache.hadoop.ozone.container.common.statemachine;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.hdds.protocol.proto
- .StorageContainerDatanodeProtocolProtos.SCMNodeReport;
+ .StorageContainerDatanodeProtocolProtos.NodeReportProto;
import org.apache.hadoop.ozone.container.common.states.DatanodeState;
import org.apache.hadoop.ozone.container.common.states.datanode
.InitDatanodeState;
@@ -52,7 +52,7 @@ public class StateContext {
private final AtomicLong stateExecutionCount;
private final Configuration conf;
private DatanodeStateMachine.DatanodeStates state;
- private SCMNodeReport nrState;
+ private NodeReportProto dnReport;
/**
* Constructs a StateContext.
@@ -69,7 +69,7 @@ public class StateContext {
commandQueue = new LinkedList<>();
lock = new ReentrantLock();
stateExecutionCount = new AtomicLong(0);
- nrState = SCMNodeReport.getDefaultInstance();
+ dnReport = NodeReportProto.getDefaultInstance();
}
/**
@@ -144,16 +144,16 @@ public class StateContext {
* Returns the node report of the datanode state context.
* @return the node report.
*/
- public SCMNodeReport getNodeReport() {
- return nrState;
+ public NodeReportProto getNodeReport() {
+ return dnReport;
}
/**
* Sets the storage location report of the datanode state context.
- * @param nrReport - node report
+ * @param nodeReport node report
*/
- public void setReportState(SCMNodeReport nrReport) {
- this.nrState = nrReport;
+ public void setNodeReport(NodeReportProto nodeReport) {
+ this.dnReport = nodeReport;
}
/**
diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/statemachine/commandhandler/CloseContainerHandler.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/statemachine/commandhandler/CloseContainerCommandHandler.java
index d8adc7df0f..e8c602d1ad 100644
--- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/statemachine/commandhandler/CloseContainerHandler.java
+++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/statemachine/commandhandler/CloseContainerCommandHandler.java
@@ -17,9 +17,9 @@
package org.apache.hadoop.ozone.container.common.statemachine.commandhandler;
import org.apache.hadoop.hdds.protocol.proto
- .StorageContainerDatanodeProtocolProtos.SCMCloseContainerCmdResponseProto;
+ .StorageContainerDatanodeProtocolProtos.SCMCommandProto;
import org.apache.hadoop.hdds.protocol.proto
- .StorageContainerDatanodeProtocolProtos.SCMCmdType;
+ .StorageContainerDatanodeProtocolProtos.CloseContainerCommandProto;
import org.apache.hadoop.ozone.container.common.statemachine
.SCMConnectionManager;
import org.apache.hadoop.ozone.container.common.statemachine.StateContext;
@@ -30,18 +30,18 @@ import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
/**
- * Container Report handler.
+ * Handler for close container command received from SCM.
*/
-public class CloseContainerHandler implements CommandHandler {
+public class CloseContainerCommandHandler implements CommandHandler {
static final Logger LOG =
- LoggerFactory.getLogger(CloseContainerHandler.class);
+ LoggerFactory.getLogger(CloseContainerCommandHandler.class);
private int invocationCount;
private long totalTime;
/**
* Constructs a ContainerReport handler.
*/
- public CloseContainerHandler() {
+ public CloseContainerCommandHandler() {
}
/**
@@ -62,9 +62,9 @@ public class CloseContainerHandler implements CommandHandler {
long containerID = -1;
try {
- SCMCloseContainerCmdResponseProto
+ CloseContainerCommandProto
closeContainerProto =
- SCMCloseContainerCmdResponseProto
+ CloseContainerCommandProto
.parseFrom(command.getProtoBufMessage());
containerID = closeContainerProto.getContainerID();
@@ -84,8 +84,8 @@ public class CloseContainerHandler implements CommandHandler {
* @return Type
*/
@Override
- public SCMCmdType getCommandType() {
- return SCMCmdType.closeContainerCommand;
+ public SCMCommandProto.Type getCommandType() {
+ return SCMCommandProto.Type.closeContainerCommand;
}
/**
diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/statemachine/commandhandler/CommandDispatcher.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/statemachine/commandhandler/CommandDispatcher.java
index 40feca32bd..aedd78fe4b 100644
--- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/statemachine/commandhandler/CommandDispatcher.java
+++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/statemachine/commandhandler/CommandDispatcher.java
@@ -18,7 +18,8 @@
package org.apache.hadoop.ozone.container.common.statemachine.commandhandler;
import com.google.common.base.Preconditions;
-import org.apache.hadoop.hdds.protocol.proto.StorageContainerDatanodeProtocolProtos.SCMCmdType;
+import org.apache.hadoop.hdds.protocol.proto
+ .StorageContainerDatanodeProtocolProtos.SCMCommandProto.Type;
import org.apache.hadoop.ozone.container.common.statemachine.SCMConnectionManager;
import org.apache.hadoop.ozone.container.common.statemachine.StateContext;
import org.apache.hadoop.ozone.container.ozoneimpl.OzoneContainer;
@@ -38,7 +39,7 @@ public final class CommandDispatcher {
static final Logger LOG =
LoggerFactory.getLogger(CommandDispatcher.class);
private final StateContext context;
- private final Map<SCMCmdType, CommandHandler> handlerMap;
+ private final Map<Type, CommandHandler> handlerMap;
private final OzoneContainer container;
private final SCMConnectionManager connectionManager;
diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/statemachine/commandhandler/CommandHandler.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/statemachine/commandhandler/CommandHandler.java
index 13d9f7295d..60e2dc479d 100644
--- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/statemachine/commandhandler/CommandHandler.java
+++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/statemachine/commandhandler/CommandHandler.java
@@ -17,8 +17,10 @@
package org.apache.hadoop.ozone.container.common.statemachine.commandhandler;
-import org.apache.hadoop.hdds.protocol.proto.StorageContainerDatanodeProtocolProtos.SCMCmdType;
-import org.apache.hadoop.ozone.container.common.statemachine.SCMConnectionManager;
+import org.apache.hadoop.hdds.protocol.proto
+ .StorageContainerDatanodeProtocolProtos.SCMCommandProto;
+import org.apache.hadoop.ozone.container.common.statemachine
+ .SCMConnectionManager;
import org.apache.hadoop.ozone.container.common.statemachine.StateContext;
import org.apache.hadoop.ozone.container.ozoneimpl.OzoneContainer;
import org.apache.hadoop.ozone.protocol.commands.SCMCommand;
@@ -42,7 +44,7 @@ public interface CommandHandler {
* Returns the command type that this command handler handles.
* @return Type
*/
- SCMCmdType getCommandType();
+ SCMCommandProto.Type getCommandType();
/**
* Returns number of times this handler has been invoked.
diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/statemachine/commandhandler/DeleteBlocksCommandHandler.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/statemachine/commandhandler/DeleteBlocksCommandHandler.java
index 5231660b6d..ab69bdc38a 100644
--- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/statemachine/commandhandler/DeleteBlocksCommandHandler.java
+++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/statemachine/commandhandler/DeleteBlocksCommandHandler.java
@@ -18,6 +18,8 @@ package org.apache.hadoop.ozone.container.common.statemachine.commandhandler;
import com.google.common.primitives.Longs;
import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.hdds.protocol.proto
+ .StorageContainerDatanodeProtocolProtos.SCMCommandProto;
import org.apache.hadoop.hdfs.DFSUtil;
import org.apache.hadoop.hdds.protocol.proto
.StorageContainerDatanodeProtocolProtos.ContainerBlocksDeletionACKProto;
@@ -26,8 +28,6 @@ import org.apache.hadoop.hdds.protocol.proto
.DeleteBlockTransactionResult;
import org.apache.hadoop.hdds.protocol.proto
.StorageContainerDatanodeProtocolProtos.DeletedBlocksTransaction;
-import org.apache.hadoop.hdds.protocol.proto
- .StorageContainerDatanodeProtocolProtos.SCMCmdType;
import org.apache.hadoop.ozone.OzoneConsts;
import org.apache.hadoop.ozone.container.common.helpers.ContainerData;
import org.apache.hadoop.ozone.container.common.helpers
@@ -73,10 +73,10 @@ public class DeleteBlocksCommandHandler implements CommandHandler {
@Override
public void handle(SCMCommand command, OzoneContainer container,
StateContext context, SCMConnectionManager connectionManager) {
- if (command.getType() != SCMCmdType.deleteBlocksCommand) {
+ if (command.getType() != SCMCommandProto.Type.deleteBlocksCommand) {
LOG.warn("Skipping handling command, expected command "
+ "type {} but found {}",
- SCMCmdType.deleteBlocksCommand, command.getType());
+ SCMCommandProto.Type.deleteBlocksCommand, command.getType());
return;
}
LOG.debug("Processing block deletion command.");
@@ -193,8 +193,8 @@ public class DeleteBlocksCommandHandler implements CommandHandler {
}
@Override
- public SCMCmdType getCommandType() {
- return SCMCmdType.deleteBlocksCommand;
+ public SCMCommandProto.Type getCommandType() {
+ return SCMCommandProto.Type.deleteBlocksCommand;
}
@Override
diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/states/endpoint/HeartbeatEndpointTask.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/states/endpoint/HeartbeatEndpointTask.java
index 01b4c72428..337cdfbcf8 100644
--- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/states/endpoint/HeartbeatEndpointTask.java
+++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/states/endpoint/HeartbeatEndpointTask.java
@@ -23,7 +23,9 @@ import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.hdds.protocol.DatanodeDetails;
import org.apache.hadoop.hdds.protocol.proto.HddsProtos.DatanodeDetailsProto;
import org.apache.hadoop.hdds.protocol.proto
- .StorageContainerDatanodeProtocolProtos.SCMCommandResponseProto;
+ .StorageContainerDatanodeProtocolProtos.SCMHeartbeatRequestProto;
+import org.apache.hadoop.hdds.protocol.proto
+ .StorageContainerDatanodeProtocolProtos.SCMCommandProto;
import org.apache.hadoop.hdds.protocol.proto
.StorageContainerDatanodeProtocolProtos.SCMHeartbeatResponseProto;
import org.apache.hadoop.ozone.container.common.helpers
@@ -97,8 +99,13 @@ public class HeartbeatEndpointTask
try {
Preconditions.checkState(this.datanodeDetailsProto != null);
+ SCMHeartbeatRequestProto request = SCMHeartbeatRequestProto.newBuilder()
+ .setDatanodeDetails(datanodeDetailsProto)
+ .setNodeReport(context.getNodeReport())
+ .build();
+
SCMHeartbeatResponseProto reponse = rpcEndpoint.getEndPoint()
- .sendHeartbeat(datanodeDetailsProto, this.context.getNodeReport());
+ .sendHeartbeat(request);
processResponse(reponse, datanodeDetailsProto);
rpcEndpoint.setLastSuccessfulHeartbeat(ZonedDateTime.now());
rpcEndpoint.zeroMissedCount();
@@ -125,13 +132,13 @@ public class HeartbeatEndpointTask
*/
private void processResponse(SCMHeartbeatResponseProto response,
final DatanodeDetailsProto datanodeDetails) {
- for (SCMCommandResponseProto commandResponseProto : response
+ Preconditions.checkState(response.getDatanodeUUID()
+ .equalsIgnoreCase(datanodeDetails.getUuid()),
+ "Unexpected datanode ID in the response.");
+ // Verify the response is indeed for this datanode.
+ for (SCMCommandProto commandResponseProto : response
.getCommandsList()) {
- // Verify the response is indeed for this datanode.
- Preconditions.checkState(commandResponseProto.getDatanodeUUID()
- .equalsIgnoreCase(datanodeDetails.getUuid()),
- "Unexpected datanode ID in the response.");
- switch (commandResponseProto.getCmdType()) {
+ switch (commandResponseProto.getCommandType()) {
case reregisterCommand:
if (rpcEndpoint.getState() == EndPointStates.HEARTBEAT) {
if (LOG.isDebugEnabled()) {
@@ -148,7 +155,8 @@ public class HeartbeatEndpointTask
break;
case deleteBlocksCommand:
DeleteBlocksCommand db = DeleteBlocksCommand
- .getFromProtobuf(commandResponseProto.getDeleteBlocksProto());
+ .getFromProtobuf(
+ commandResponseProto.getDeleteBlocksCommandProto());
if (!db.blocksTobeDeleted().isEmpty()) {
if (LOG.isDebugEnabled()) {
LOG.debug(DeletedContainerBlocksSummary
@@ -161,7 +169,7 @@ public class HeartbeatEndpointTask
case closeContainerCommand:
CloseContainerCommand closeContainer =
CloseContainerCommand.getFromProtobuf(
- commandResponseProto.getCloseContainerProto());
+ commandResponseProto.getCloseContainerCommandProto());
if (LOG.isDebugEnabled()) {
LOG.debug("Received SCM container close request for container {}",
closeContainer.getContainerID());
@@ -170,7 +178,7 @@ public class HeartbeatEndpointTask
break;
default:
throw new IllegalArgumentException("Unknown response : "
- + commandResponseProto.getCmdType().name());
+ + commandResponseProto.getCommandType().name());
}
}
}
diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/states/endpoint/RegisterEndpointTask.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/states/endpoint/RegisterEndpointTask.java
index 77a70843a8..12b48abb10 100644
--- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/states/endpoint/RegisterEndpointTask.java
+++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/states/endpoint/RegisterEndpointTask.java
@@ -24,11 +24,11 @@ import org.apache.hadoop.hdds.protocol.DatanodeDetails;
import org.apache.hadoop.ozone.container.common.statemachine
.EndpointStateMachine;
import org.apache.hadoop.hdds.protocol.proto
- .StorageContainerDatanodeProtocolProtos.SCMNodeReport;
+ .StorageContainerDatanodeProtocolProtos.NodeReportProto;
import org.apache.hadoop.hdds.protocol.proto
- .StorageContainerDatanodeProtocolProtos.ContainerReportsRequestProto;
+ .StorageContainerDatanodeProtocolProtos.ContainerReportsProto;
import org.apache.hadoop.hdds.protocol.proto
- .StorageContainerDatanodeProtocolProtos.SCMRegisteredCmdResponseProto;
+ .StorageContainerDatanodeProtocolProtos.SCMRegisteredResponseProto;
import org.apache.hadoop.ozone.container.ozoneimpl.OzoneContainer;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
@@ -104,11 +104,11 @@ public final class RegisterEndpointTask implements
rpcEndPoint.lock();
try {
- ContainerReportsRequestProto contianerReport = datanodeContainerManager
+ ContainerReportsProto contianerReport = datanodeContainerManager
.getContainerReport();
- SCMNodeReport nodeReport = datanodeContainerManager.getNodeReport();
+ NodeReportProto nodeReport = datanodeContainerManager.getNodeReport();
// TODO : Add responses to the command Queue.
- SCMRegisteredCmdResponseProto response = rpcEndPoint.getEndPoint()
+ SCMRegisteredResponseProto response = rpcEndPoint.getEndPoint()
.register(datanodeDetails.getProtoBufMessage(), nodeReport,
contianerReport);
Preconditions.checkState(UUID.fromString(response.getDatanodeUUID())
diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/ozoneimpl/OzoneContainer.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/ozoneimpl/OzoneContainer.java
index 6758479077..b357fefb79 100644
--- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/ozoneimpl/OzoneContainer.java
+++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/ozoneimpl/OzoneContainer.java
@@ -19,14 +19,14 @@ package org.apache.hadoop.ozone.container.ozoneimpl;
import com.google.common.annotations.VisibleForTesting;
import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.hdds.protocol.proto
+ .StorageContainerDatanodeProtocolProtos.ContainerReportsProto;
import org.apache.hadoop.hdds.scm.ScmConfigKeys;
import org.apache.hadoop.hdfs.server.datanode.StorageLocation;
import org.apache.hadoop.hdds.protocol.DatanodeDetails;
import org.apache.hadoop.hdds.protocol.proto.HddsProtos;
import org.apache.hadoop.hdds.protocol.proto
- .StorageContainerDatanodeProtocolProtos.ContainerReportsRequestProto;
-import org.apache.hadoop.hdds.protocol.proto
- .StorageContainerDatanodeProtocolProtos.SCMNodeReport;
+ .StorageContainerDatanodeProtocolProtos.NodeReportProto;
import org.apache.hadoop.ozone.OzoneConfigKeys;
import org.apache.hadoop.ozone.container.common.helpers.ContainerData;
import org.apache.hadoop.ozone.container.common.impl.ChunkManagerImpl;
@@ -219,7 +219,7 @@ public class OzoneContainer {
/**
* Returns node report of container storage usage.
*/
- public SCMNodeReport getNodeReport() throws IOException {
+ public NodeReportProto getNodeReport() throws IOException {
return this.manager.getNodeReport();
}
@@ -255,7 +255,7 @@ public class OzoneContainer {
* @return - container report.
* @throws IOException
*/
- public ContainerReportsRequestProto getContainerReport() throws IOException {
+ public ContainerReportsProto getContainerReport() throws IOException {
return this.manager.getContainerReport();
}
diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/protocol/StorageContainerDatanodeProtocol.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/protocol/StorageContainerDatanodeProtocol.java
index e2a3bf5fcc..a950a3144a 100644
--- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/protocol/StorageContainerDatanodeProtocol.java
+++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/protocol/StorageContainerDatanodeProtocol.java
@@ -19,20 +19,20 @@ package org.apache.hadoop.ozone.protocol;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.hdds.protocol.proto.HddsProtos.DatanodeDetailsProto;
import org.apache.hadoop.hdds.protocol.proto
+ .StorageContainerDatanodeProtocolProtos.SCMHeartbeatRequestProto;
+import org.apache.hadoop.hdds.protocol.proto
.StorageContainerDatanodeProtocolProtos.ContainerBlocksDeletionACKProto;
import org.apache.hadoop.hdds.protocol.proto
.StorageContainerDatanodeProtocolProtos
.ContainerBlocksDeletionACKResponseProto;
import org.apache.hadoop.hdds.protocol.proto
- .StorageContainerDatanodeProtocolProtos.ContainerReportsRequestProto;
-import org.apache.hadoop.hdds.protocol.proto
- .StorageContainerDatanodeProtocolProtos.ContainerReportsResponseProto;
+ .StorageContainerDatanodeProtocolProtos.ContainerReportsProto;
import org.apache.hadoop.hdds.protocol.proto
.StorageContainerDatanodeProtocolProtos.SCMHeartbeatResponseProto;
import org.apache.hadoop.hdds.protocol.proto
- .StorageContainerDatanodeProtocolProtos.SCMNodeReport;
+ .StorageContainerDatanodeProtocolProtos.NodeReportProto;
import org.apache.hadoop.hdds.protocol.proto
- .StorageContainerDatanodeProtocolProtos.SCMRegisteredCmdResponseProto;
+ .StorageContainerDatanodeProtocolProtos.SCMRegisteredResponseProto;
import org.apache.hadoop.hdds.protocol.proto
.StorageContainerDatanodeProtocolProtos.SCMVersionRequestProto;
import org.apache.hadoop.hdds.protocol.proto
@@ -55,13 +55,12 @@ public interface StorageContainerDatanodeProtocol {
/**
* Used by data node to send a Heartbeat.
- * @param datanodeDetails - Datanode Details.
- * @param nodeReport - node report state
+ * @param heartbeat Heartbeat
* @return - SCMHeartbeatResponseProto
* @throws IOException
*/
- SCMHeartbeatResponseProto sendHeartbeat(DatanodeDetailsProto datanodeDetails,
- SCMNodeReport nodeReport) throws IOException;
+ SCMHeartbeatResponseProto sendHeartbeat(SCMHeartbeatRequestProto heartbeat)
+ throws IOException;
/**
* Register Datanode.
@@ -70,20 +69,11 @@ public interface StorageContainerDatanodeProtocol {
* @param containerReportsRequestProto - Container Reports.
* @return SCM Command.
*/
- SCMRegisteredCmdResponseProto register(DatanodeDetailsProto datanodeDetails,
- SCMNodeReport nodeReport, ContainerReportsRequestProto
+ SCMRegisteredResponseProto register(DatanodeDetailsProto datanodeDetails,
+ NodeReportProto nodeReport, ContainerReportsProto
containerReportsRequestProto) throws IOException;
/**
- * Send a container report.
- * @param reports -- Container report.
- * @return container reports response.
- * @throws IOException
- */
- ContainerReportsResponseProto sendContainerReport(
- ContainerReportsRequestProto reports) throws IOException;
-
- /**
* Used by datanode to send block deletion ACK to SCM.
* @param request block deletion transactions.
* @return block deletion transaction response.
diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/protocol/StorageContainerNodeProtocol.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/protocol/StorageContainerNodeProtocol.java
index 14038fb094..790f58acf2 100644
--- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/protocol/StorageContainerNodeProtocol.java
+++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/protocol/StorageContainerNodeProtocol.java
@@ -18,11 +18,12 @@
package org.apache.hadoop.ozone.protocol;
import org.apache.hadoop.classification.InterfaceAudience;
-import org.apache.hadoop.hdds.protocol.proto.HddsProtos.DatanodeDetailsProto;
+import org.apache.hadoop.hdds.protocol.DatanodeDetails;
import org.apache.hadoop.hdds.protocol.proto
- .StorageContainerDatanodeProtocolProtos.SCMNodeReport;
+ .StorageContainerDatanodeProtocolProtos.NodeReportProto;
import org.apache.hadoop.hdds.protocol.proto
.StorageContainerDatanodeProtocolProtos.SCMVersionRequestProto;
+import org.apache.hadoop.ozone.protocol.commands.RegisteredCommand;
import org.apache.hadoop.ozone.protocol.commands.SCMCommand;
import java.util.List;
@@ -49,11 +50,11 @@ public interface StorageContainerNodeProtocol {
/**
* Register the node if the node finds that it is not registered with any SCM.
* @param datanodeDetails DatanodeDetails
- * @param nodeReport SCMNodeReport
+ * @param nodeReport NodeReportProto
* @return SCMHeartbeatResponseProto
*/
- SCMCommand register(DatanodeDetailsProto datanodeDetails, SCMNodeReport
- nodeReport);
+ RegisteredCommand register(DatanodeDetails datanodeDetails,
+ NodeReportProto nodeReport);
/**
* Send heartbeat to indicate the datanode is alive and doing well.
@@ -61,7 +62,7 @@ public interface StorageContainerNodeProtocol {
* @param nodeReport - node report.
* @return SCMheartbeat response list
*/
- List<SCMCommand> sendHeartbeat(DatanodeDetailsProto datanodeDetails,
- SCMNodeReport nodeReport);
+ List<SCMCommand> sendHeartbeat(DatanodeDetails datanodeDetails,
+ NodeReportProto nodeReport);
}
diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/protocol/commands/CloseContainerCommand.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/protocol/commands/CloseContainerCommand.java
index d1d6488134..4f4f82b600 100644
--- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/protocol/commands/CloseContainerCommand.java
+++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/protocol/commands/CloseContainerCommand.java
@@ -19,18 +19,16 @@ package org.apache.hadoop.ozone.protocol.commands;
import com.google.common.base.Preconditions;
import org.apache.hadoop.hdds.protocol.proto
- .StorageContainerDatanodeProtocolProtos.SCMCloseContainerCmdResponseProto;
+ .StorageContainerDatanodeProtocolProtos.SCMCommandProto;
import org.apache.hadoop.hdds.protocol.proto
- .StorageContainerDatanodeProtocolProtos.SCMCmdType;
+ .StorageContainerDatanodeProtocolProtos.CloseContainerCommandProto;
-import static org.apache.hadoop.hdds.protocol.proto
- .StorageContainerDatanodeProtocolProtos.SCMCmdType.closeContainerCommand;
/**
* Asks datanode to close a container.
*/
public class CloseContainerCommand
- extends SCMCommand<SCMCloseContainerCmdResponseProto> {
+ extends SCMCommand<CloseContainerCommandProto> {
private long containerID;
@@ -44,8 +42,8 @@ public class CloseContainerCommand
* @return Type
*/
@Override
- public SCMCmdType getType() {
- return closeContainerCommand;
+ public SCMCommandProto.Type getType() {
+ return SCMCommandProto.Type.closeContainerCommand;
}
/**
@@ -58,13 +56,13 @@ public class CloseContainerCommand
return getProto().toByteArray();
}
- public SCMCloseContainerCmdResponseProto getProto() {
- return SCMCloseContainerCmdResponseProto.newBuilder()
+ public CloseContainerCommandProto getProto() {
+ return CloseContainerCommandProto.newBuilder()
.setContainerID(containerID).build();
}
public static CloseContainerCommand getFromProtobuf(
- SCMCloseContainerCmdResponseProto closeContainerProto) {
+ CloseContainerCommandProto closeContainerProto) {
Preconditions.checkNotNull(closeContainerProto);
return new CloseContainerCommand(closeContainerProto.getContainerID());
diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/protocol/commands/DeleteBlocksCommand.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/protocol/commands/DeleteBlocksCommand.java
index a11ca25a30..4fa33f68b0 100644
--- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/protocol/commands/DeleteBlocksCommand.java
+++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/protocol/commands/DeleteBlocksCommand.java
@@ -18,11 +18,11 @@
package org.apache.hadoop.ozone.protocol.commands;
import org.apache.hadoop.hdds.protocol.proto
- .StorageContainerDatanodeProtocolProtos.DeletedBlocksTransaction;
+ .StorageContainerDatanodeProtocolProtos.SCMCommandProto;
import org.apache.hadoop.hdds.protocol.proto
- .StorageContainerDatanodeProtocolProtos.SCMCmdType;
+ .StorageContainerDatanodeProtocolProtos.DeletedBlocksTransaction;
import org.apache.hadoop.hdds.protocol.proto
- .StorageContainerDatanodeProtocolProtos.SCMDeleteBlocksCmdResponseProto;
+ .StorageContainerDatanodeProtocolProtos.DeleteBlocksCommandProto;
import java.util.List;
@@ -30,7 +30,7 @@ import java.util.List;
* A SCM command asks a datanode to delete a number of blocks.
*/
public class DeleteBlocksCommand extends
- SCMCommand<SCMDeleteBlocksCmdResponseProto> {
+ SCMCommand<DeleteBlocksCommandProto> {
private List<DeletedBlocksTransaction> blocksTobeDeleted;
@@ -44,8 +44,8 @@ public class DeleteBlocksCommand extends
}
@Override
- public SCMCmdType getType() {
- return SCMCmdType.deleteBlocksCommand;
+ public SCMCommandProto.Type getType() {
+ return SCMCommandProto.Type.deleteBlocksCommand;
}
@Override
@@ -54,13 +54,13 @@ public class DeleteBlocksCommand extends
}
public static DeleteBlocksCommand getFromProtobuf(
- SCMDeleteBlocksCmdResponseProto deleteBlocksProto) {
+ DeleteBlocksCommandProto deleteBlocksProto) {
return new DeleteBlocksCommand(deleteBlocksProto
.getDeletedBlocksTransactionsList());
}
- public SCMDeleteBlocksCmdResponseProto getProto() {
- return SCMDeleteBlocksCmdResponseProto.newBuilder()
+ public DeleteBlocksCommandProto getProto() {
+ return DeleteBlocksCommandProto.newBuilder()
.addAllDeletedBlocksTransactions(blocksTobeDeleted).build();
}
}
diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/protocol/commands/RegisteredCommand.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/protocol/commands/RegisteredCommand.java
index 69f2c186fa..3a5da72f48 100644
--- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/protocol/commands/RegisteredCommand.java
+++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/protocol/commands/RegisteredCommand.java
@@ -19,18 +19,15 @@ package org.apache.hadoop.ozone.protocol.commands;
import com.google.common.base.Preconditions;
import org.apache.hadoop.hdds.protocol.proto
- .StorageContainerDatanodeProtocolProtos.SCMCmdType;
+ .StorageContainerDatanodeProtocolProtos.SCMRegisteredResponseProto;
import org.apache.hadoop.hdds.protocol.proto
- .StorageContainerDatanodeProtocolProtos.SCMRegisteredCmdResponseProto;
-import org.apache.hadoop.hdds.protocol.proto
- .StorageContainerDatanodeProtocolProtos.SCMRegisteredCmdResponseProto
+ .StorageContainerDatanodeProtocolProtos.SCMRegisteredResponseProto
.ErrorCode;
/**
* Response to Datanode Register call.
*/
-public class RegisteredCommand extends
- SCMCommand<SCMRegisteredCmdResponseProto> {
+public class RegisteredCommand {
private String datanodeUUID;
private String clusterID;
private ErrorCode error;
@@ -60,16 +57,6 @@ public class RegisteredCommand extends
}
/**
- * Returns the type of this command.
- *
- * @return Type
- */
- @Override
- public SCMCmdType getType() {
- return SCMCmdType.registeredCommand;
- }
-
- /**
* Returns datanode UUID.
*
* @return - Datanode ID.
@@ -117,10 +104,9 @@ public class RegisteredCommand extends
*
* @return A protobuf message.
*/
- @Override
public byte[] getProtoBufMessage() {
- SCMRegisteredCmdResponseProto.Builder builder =
- SCMRegisteredCmdResponseProto.newBuilder()
+ SCMRegisteredResponseProto.Builder builder =
+ SCMRegisteredResponseProto.newBuilder()
.setClusterID(this.clusterID)
.setDatanodeUUID(this.datanodeUUID)
.setErrorCode(this.error);
@@ -157,7 +143,7 @@ public class RegisteredCommand extends
* @param response - RegisteredCmdResponseProto
* @return RegisteredCommand
*/
- public RegisteredCommand getFromProtobuf(SCMRegisteredCmdResponseProto
+ public RegisteredCommand getFromProtobuf(SCMRegisteredResponseProto
response) {
Preconditions.checkNotNull(response);
if (response.hasHostname() && response.hasIpAddress()) {
diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/protocol/commands/ReregisterCommand.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/protocol/commands/ReregisterCommand.java
index c167d59ddc..953e31a02e 100644
--- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/protocol/commands/ReregisterCommand.java
+++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/protocol/commands/ReregisterCommand.java
@@ -18,18 +18,16 @@
package org.apache.hadoop.ozone.protocol.commands;
import org.apache.hadoop.hdds.protocol.proto
- .StorageContainerDatanodeProtocolProtos.SCMCmdType;
+ .StorageContainerDatanodeProtocolProtos.SCMCommandProto;
import static org.apache.hadoop.hdds.protocol.proto
- .StorageContainerDatanodeProtocolProtos.SCMCmdType.reregisterCommand;
-import static org.apache.hadoop.hdds.protocol.proto
- .StorageContainerDatanodeProtocolProtos.SCMReregisterCmdResponseProto;
+ .StorageContainerDatanodeProtocolProtos.ReregisterCommandProto;
/**
* Informs a datanode to register itself with SCM again.
*/
public class ReregisterCommand extends
- SCMCommand<SCMReregisterCmdResponseProto>{
+ SCMCommand<ReregisterCommandProto>{
/**
* Returns the type of this command.
@@ -37,8 +35,8 @@ public class ReregisterCommand extends
* @return Type
*/
@Override
- public SCMCmdType getType() {
- return reregisterCommand;
+ public SCMCommandProto.Type getType() {
+ return SCMCommandProto.Type.reregisterCommand;
}
/**
@@ -51,8 +49,8 @@ public class ReregisterCommand extends
return getProto().toByteArray();
}
- public SCMReregisterCmdResponseProto getProto() {
- return SCMReregisterCmdResponseProto
+ public ReregisterCommandProto getProto() {
+ return ReregisterCommandProto
.newBuilder()
.build();
}
diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/protocol/commands/SCMCommand.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/protocol/commands/SCMCommand.java
index 73e4194d8c..35ca802bee 100644
--- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/protocol/commands/SCMCommand.java
+++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/protocol/commands/SCMCommand.java
@@ -19,7 +19,7 @@ package org.apache.hadoop.ozone.protocol.commands;
import com.google.protobuf.GeneratedMessage;
import org.apache.hadoop.hdds.protocol.proto
- .StorageContainerDatanodeProtocolProtos.SCMCmdType;
+ .StorageContainerDatanodeProtocolProtos.SCMCommandProto;
/**
* A class that acts as the base class to convert between Java and SCM
@@ -31,7 +31,7 @@ public abstract class SCMCommand<T extends GeneratedMessage> {
* Returns the type of this command.
* @return Type
*/
- public abstract SCMCmdType getType();
+ public abstract SCMCommandProto.Type getType();
/**
* Gets the protobuf message of this object.
diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/protocolPB/StorageContainerDatanodeProtocolClientSideTranslatorPB.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/protocolPB/StorageContainerDatanodeProtocolClientSideTranslatorPB.java
index a56c57a332..40fe189600 100644
--- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/protocolPB/StorageContainerDatanodeProtocolClientSideTranslatorPB.java
+++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/protocolPB/StorageContainerDatanodeProtocolClientSideTranslatorPB.java
@@ -20,24 +20,23 @@ import com.google.protobuf.RpcController;
import com.google.protobuf.ServiceException;
import org.apache.hadoop.hdds.protocol.proto.HddsProtos.DatanodeDetailsProto;
import org.apache.hadoop.hdds.protocol.proto
+ .StorageContainerDatanodeProtocolProtos.ContainerReportsProto;
+import org.apache.hadoop.hdds.protocol.proto
.StorageContainerDatanodeProtocolProtos.ContainerBlocksDeletionACKProto;
import org.apache.hadoop.hdds.protocol.proto
.StorageContainerDatanodeProtocolProtos
.ContainerBlocksDeletionACKResponseProto;
-import org.apache.hadoop.hdds.protocol.proto
- .StorageContainerDatanodeProtocolProtos.ContainerReportsRequestProto;
-import org.apache.hadoop.hdds.protocol.proto
- .StorageContainerDatanodeProtocolProtos.ContainerReportsResponseProto;
+
import org.apache.hadoop.hdds.protocol.proto
.StorageContainerDatanodeProtocolProtos.SCMHeartbeatRequestProto;
import org.apache.hadoop.hdds.protocol.proto
.StorageContainerDatanodeProtocolProtos.SCMHeartbeatResponseProto;
import org.apache.hadoop.hdds.protocol.proto
- .StorageContainerDatanodeProtocolProtos.SCMNodeReport;
+ .StorageContainerDatanodeProtocolProtos.NodeReportProto;
import org.apache.hadoop.hdds.protocol.proto
.StorageContainerDatanodeProtocolProtos.SCMRegisterRequestProto;
import org.apache.hadoop.hdds.protocol.proto
- .StorageContainerDatanodeProtocolProtos.SCMRegisteredCmdResponseProto;
+ .StorageContainerDatanodeProtocolProtos.SCMRegisteredResponseProto;
import org.apache.hadoop.hdds.protocol.proto
.StorageContainerDatanodeProtocolProtos.SCMVersionRequestProto;
import org.apache.hadoop.hdds.protocol.proto
@@ -123,22 +122,16 @@ public class StorageContainerDatanodeProtocolClientSideTranslatorPB
/**
* Send by datanode to SCM.
*
- * @param datanodeDetailsProto - Datanode Details
- * @param nodeReport - node report
+ * @param heartbeat node heartbeat
* @throws IOException
*/
@Override
public SCMHeartbeatResponseProto sendHeartbeat(
- DatanodeDetailsProto datanodeDetailsProto,
- SCMNodeReport nodeReport) throws IOException {
- SCMHeartbeatRequestProto.Builder req = SCMHeartbeatRequestProto
- .newBuilder();
- req.setDatanodeDetails(datanodeDetailsProto);
- req.setNodeReport(nodeReport);
+ SCMHeartbeatRequestProto heartbeat) throws IOException {
final SCMHeartbeatResponseProto resp;
try {
- resp = rpcProxy.sendHeartbeat(NULL_RPC_CONTROLLER, req.build());
+ resp = rpcProxy.sendHeartbeat(NULL_RPC_CONTROLLER, heartbeat);
} catch (ServiceException e) {
throw ProtobufHelper.getRemoteException(e);
}
@@ -154,16 +147,16 @@ public class StorageContainerDatanodeProtocolClientSideTranslatorPB
* @return SCM Command.
*/
@Override
- public SCMRegisteredCmdResponseProto register(
- DatanodeDetailsProto datanodeDetailsProto, SCMNodeReport nodeReport,
- ContainerReportsRequestProto containerReportsRequestProto)
+ public SCMRegisteredResponseProto register(
+ DatanodeDetailsProto datanodeDetailsProto, NodeReportProto nodeReport,
+ ContainerReportsProto containerReportsRequestProto)
throws IOException {
SCMRegisterRequestProto.Builder req =
SCMRegisterRequestProto.newBuilder();
req.setDatanodeDetails(datanodeDetailsProto);
req.setContainerReport(containerReportsRequestProto);
req.setNodeReport(nodeReport);
- final SCMRegisteredCmdResponseProto response;
+ final SCMRegisteredResponseProto response;
try {
response = rpcProxy.register(NULL_RPC_CONTROLLER, req.build());
} catch (ServiceException e) {
@@ -172,25 +165,6 @@ public class StorageContainerDatanodeProtocolClientSideTranslatorPB
return response;
}
- /**
- * Send a container report.
- *
- * @param reports -- Container report
- * @return HeartbeatRespose.nullcommand.
- * @throws IOException
- */
- @Override
- public ContainerReportsResponseProto sendContainerReport(
- ContainerReportsRequestProto reports) throws IOException {
- final ContainerReportsResponseProto resp;
- try {
- resp = rpcProxy.sendContainerReport(NULL_RPC_CONTROLLER, reports);
- } catch (ServiceException e) {
- throw ProtobufHelper.getRemoteException(e);
- }
- return resp;
- }
-
@Override
public ContainerBlocksDeletionACKResponseProto sendContainerBlocksDeletionACK(
ContainerBlocksDeletionACKProto deletedBlocks) throws IOException {
diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/protocolPB/StorageContainerDatanodeProtocolServerSideTranslatorPB.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/protocolPB/StorageContainerDatanodeProtocolServerSideTranslatorPB.java
index 07dba572ef..7e8bd8a2ac 100644
--- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/protocolPB/StorageContainerDatanodeProtocolServerSideTranslatorPB.java
+++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/protocolPB/StorageContainerDatanodeProtocolServerSideTranslatorPB.java
@@ -19,18 +19,22 @@ package org.apache.hadoop.ozone.protocolPB;
import com.google.protobuf.RpcController;
import com.google.protobuf.ServiceException;
import org.apache.hadoop.hdds.protocol.proto
- .StorageContainerDatanodeProtocolProtos;
+ .StorageContainerDatanodeProtocolProtos.NodeReportProto;
import org.apache.hadoop.hdds.protocol.proto
- .StorageContainerDatanodeProtocolProtos.ContainerBlocksDeletionACKProto;
+ .StorageContainerDatanodeProtocolProtos.SCMRegisterRequestProto;
import org.apache.hadoop.hdds.protocol.proto
- .StorageContainerDatanodeProtocolProtos
- .ContainerBlocksDeletionACKResponseProto;
+ .StorageContainerDatanodeProtocolProtos.SCMRegisteredResponseProto;
+import org.apache.hadoop.hdds.protocol.proto
+ .StorageContainerDatanodeProtocolProtos.SCMVersionRequestProto;
import org.apache.hadoop.hdds.protocol.proto
- .StorageContainerDatanodeProtocolProtos.ContainerReportsRequestProto;
+ .StorageContainerDatanodeProtocolProtos.SCMVersionResponseProto;
import org.apache.hadoop.hdds.protocol.proto
- .StorageContainerDatanodeProtocolProtos.ContainerReportsResponseProto;
+ .StorageContainerDatanodeProtocolProtos.ContainerReportsProto;
import org.apache.hadoop.hdds.protocol.proto
- .StorageContainerDatanodeProtocolProtos.SCMNodeReport;
+ .StorageContainerDatanodeProtocolProtos.ContainerBlocksDeletionACKProto;
+import org.apache.hadoop.hdds.protocol.proto
+ .StorageContainerDatanodeProtocolProtos
+ .ContainerBlocksDeletionACKResponseProto;
import org.apache.hadoop.hdds.protocol.proto
.StorageContainerDatanodeProtocolProtos.SCMHeartbeatRequestProto;
import org.apache.hadoop.hdds.protocol.proto
@@ -55,9 +59,8 @@ public class StorageContainerDatanodeProtocolServerSideTranslatorPB
}
@Override
- public StorageContainerDatanodeProtocolProtos.SCMVersionResponseProto
- getVersion(RpcController controller,
- StorageContainerDatanodeProtocolProtos.SCMVersionRequestProto request)
+ public SCMVersionResponseProto getVersion(RpcController controller,
+ SCMVersionRequestProto request)
throws ServiceException {
try {
return impl.getVersion(request);
@@ -67,15 +70,13 @@ public class StorageContainerDatanodeProtocolServerSideTranslatorPB
}
@Override
- public StorageContainerDatanodeProtocolProtos.SCMRegisteredCmdResponseProto
- register(RpcController controller, StorageContainerDatanodeProtocolProtos
- .SCMRegisterRequestProto request) throws ServiceException {
+ public SCMRegisteredResponseProto register(RpcController controller,
+ SCMRegisterRequestProto request) throws ServiceException {
try {
- ContainerReportsRequestProto containerRequestProto = null;
- SCMNodeReport scmNodeReport = null;
- containerRequestProto = request.getContainerReport();
- scmNodeReport = request.getNodeReport();
- return impl.register(request.getDatanodeDetails(), scmNodeReport,
+ ContainerReportsProto containerRequestProto = request
+ .getContainerReport();
+ NodeReportProto dnNodeReport = request.getNodeReport();
+ return impl.register(request.getDatanodeDetails(), dnNodeReport,
containerRequestProto);
} catch (IOException e) {
throw new ServiceException(e);
@@ -83,27 +84,15 @@ public class StorageContainerDatanodeProtocolServerSideTranslatorPB
}
@Override
- public SCMHeartbeatResponseProto
- sendHeartbeat(RpcController controller,
+ public SCMHeartbeatResponseProto sendHeartbeat(RpcController controller,
SCMHeartbeatRequestProto request) throws ServiceException {
try {
- return impl.sendHeartbeat(request.getDatanodeDetails(),
- request.getNodeReport());
+ return impl.sendHeartbeat(request);
} catch (IOException e) {
throw new ServiceException(e);
}
}
- @Override
- public ContainerReportsResponseProto sendContainerReport(
- RpcController controller, ContainerReportsRequestProto request)
- throws ServiceException {
- try {
- return impl.sendContainerReport(request);
- } catch (IOException e) {
- throw new ServiceException(e);
- }
- }
@Override
public ContainerBlocksDeletionACKResponseProto sendContainerBlocksDeletionACK(