diff options
author | Anu Engineer <aengineer@apache.org> | 2018-04-05 11:24:39 -0700 |
---|---|---|
committer | Owen O'Malley <omalley@apache.org> | 2018-04-26 05:36:04 -0700 |
commit | 8b832f3c3556ef3f970bac636ef9c70ee9dd260d (patch) | |
tree | 82ad3bdf945504bf753f3dca20cc41077c2b19c4 | |
parent | 792ac4d08bfdef2ea66630a72bac3a483fe2a547 (diff) |
HDFS-13405. Ozone: Rename HDSL to HDDS.
Contributed by Ajay Kumar, Elek Marton, Mukul Kumar Singh, Shashikant Banerjee and Anu Engineer.
487 files changed, 3917 insertions, 2716 deletions
diff --git a/hadoop-assemblies/src/main/resources/assemblies/hadoop-src.xml b/hadoop-assemblies/src/main/resources/assemblies/hadoop-src.xml index bafa12b80d..634c526a40 100644 --- a/hadoop-assemblies/src/main/resources/assemblies/hadoop-src.xml +++ b/hadoop-assemblies/src/main/resources/assemblies/hadoop-src.xml @@ -51,7 +51,7 @@ <exclude>**/file:/**</exclude> <exclude>**/SecurityAuth.audit*</exclude> <exclude>hadoop-ozone/**</exclude> - <exclude>hadoop-hdsl/**</exclude> + <exclude>hadoop-hdds/**</exclude> <exclude>hadoop-cblock/**</exclude> </excludes> </fileSet> diff --git a/hadoop-cblock/server/pom.xml b/hadoop-cblock/server/pom.xml index 3fa197d367..8039dad072 100644 --- a/hadoop-cblock/server/pom.xml +++ b/hadoop-cblock/server/pom.xml @@ -37,7 +37,7 @@ http://maven.apache.org/xsd/maven-4.0.0.xsd"> <dependency> <groupId>org.apache.hadoop</groupId> - <artifactId>hadoop-hdsl-server-framework</artifactId> + <artifactId>hadoop-hdds-server-framework</artifactId> </dependency> <dependency> @@ -47,12 +47,12 @@ http://maven.apache.org/xsd/maven-4.0.0.xsd"> <dependency> <groupId>org.apache.hadoop</groupId> - <artifactId>hadoop-hdsl-common</artifactId> + <artifactId>hadoop-hdds-common</artifactId> </dependency> <dependency> <groupId>org.apache.hadoop</groupId> - <artifactId>hadoop-hdsl-client</artifactId> + <artifactId>hadoop-hdds-client</artifactId> </dependency> <dependency> @@ -132,7 +132,7 @@ http://maven.apache.org/xsd/maven-4.0.0.xsd"> ${basedir}/../../hadoop-hdfs-project/hadoop-hdfs/src/main/proto/ </param> <param> - ${basedir}/../../hadoop-hdsl/common/src/main/proto/ + ${basedir}/../../hadoop-hdds/common/src/main/proto/ </param> <param>${basedir}/src/main/proto</param> </imports> diff --git a/hadoop-cblock/server/src/main/java/org/apache/hadoop/cblock/CBlockManager.java b/hadoop-cblock/server/src/main/java/org/apache/hadoop/cblock/CBlockManager.java index 12b505a90c..9318b6c864 100644 --- a/hadoop-cblock/server/src/main/java/org/apache/hadoop/cblock/CBlockManager.java +++ b/hadoop-cblock/server/src/main/java/org/apache/hadoop/cblock/CBlockManager.java @@ -37,24 +37,24 @@ import org.apache.hadoop.cblock.protocolPB import org.apache.hadoop.ipc.Client; import org.apache.hadoop.ozone.OzoneConfigKeys; import org.apache.hadoop.ozone.OzoneConsts; -import org.apache.hadoop.scm.XceiverClientManager; -import org.apache.hadoop.scm.client.ContainerOperationClient; -import org.apache.hadoop.scm.client.ScmClient; +import org.apache.hadoop.hdds.scm.XceiverClientManager; +import org.apache.hadoop.hdds.scm.client.ContainerOperationClient; +import org.apache.hadoop.hdds.scm.client.ScmClient; import org.apache.hadoop.cblock.storage.StorageManager; import org.apache.hadoop.cblock.util.KeyUtil; import org.apache.hadoop.ipc.ProtobufRpcEngine; import org.apache.hadoop.ipc.RPC; import org.apache.hadoop.net.NetUtils; -import org.apache.hadoop.hdsl.conf.OzoneConfiguration; -import org.apache.hadoop.scm.protocolPB +import org.apache.hadoop.hdds.conf.OzoneConfiguration; +import org.apache.hadoop.hdds.scm.protocolPB .StorageContainerLocationProtocolClientSideTranslatorPB; -import org.apache.hadoop.scm.protocolPB.StorageContainerLocationProtocolPB; +import org.apache.hadoop.hdds.scm.protocolPB.StorageContainerLocationProtocolPB; import org.apache.hadoop.security.UserGroupInformation; import org.apache.hadoop.utils.LevelDBStore; import static org.apache.hadoop.cblock.CblockUtils.getCblockServerRpcAddr; import static org.apache.hadoop.cblock.CblockUtils.getCblockServiceRpcAddr; -import static org.apache.hadoop.hdsl.server.ServerUtils +import static org.apache.hadoop.hdds.server.ServerUtils .updateRPCListenAddress; import org.iq80.leveldb.DBIterator; import org.slf4j.Logger; diff --git a/hadoop-cblock/server/src/main/java/org/apache/hadoop/cblock/CblockUtils.java b/hadoop-cblock/server/src/main/java/org/apache/hadoop/cblock/CblockUtils.java index 99ffde0b14..f0f1d05a49 100644 --- a/hadoop-cblock/server/src/main/java/org/apache/hadoop/cblock/CblockUtils.java +++ b/hadoop-cblock/server/src/main/java/org/apache/hadoop/cblock/CblockUtils.java @@ -36,8 +36,8 @@ import static org.apache.hadoop.cblock.CBlockConfigKeys .DFS_CBLOCK_SERVICERPC_HOSTNAME_DEFAULT; import static org.apache.hadoop.cblock.CBlockConfigKeys .DFS_CBLOCK_SERVICERPC_PORT_DEFAULT; -import static org.apache.hadoop.hdsl.HdslUtils.getHostNameFromConfigKeys; -import static org.apache.hadoop.hdsl.HdslUtils.getPortNumberFromConfigKeys; +import static org.apache.hadoop.hdds.HddsUtils.getHostNameFromConfigKeys; +import static org.apache.hadoop.hdds.HddsUtils.getPortNumberFromConfigKeys; /** * Generic stateless utility functions for CBlock components. diff --git a/hadoop-cblock/server/src/main/java/org/apache/hadoop/cblock/client/CBlockVolumeClient.java b/hadoop-cblock/server/src/main/java/org/apache/hadoop/cblock/client/CBlockVolumeClient.java index 4a3878abf1..9227a281d6 100644 --- a/hadoop-cblock/server/src/main/java/org/apache/hadoop/cblock/client/CBlockVolumeClient.java +++ b/hadoop-cblock/server/src/main/java/org/apache/hadoop/cblock/client/CBlockVolumeClient.java @@ -25,7 +25,7 @@ import java.util.concurrent.TimeUnit; import org.apache.hadoop.cblock.CBlockConfigKeys; import org.apache.hadoop.cblock.meta.VolumeInfo; import org.apache.hadoop.cblock.protocolPB.CBlockServiceProtocolPB; -import org.apache.hadoop.hdsl.conf.OzoneConfiguration; +import org.apache.hadoop.hdds.conf.OzoneConfiguration; import org.apache.hadoop.io.retry.RetryPolicies; import org.apache.hadoop.ipc.RPC; import org.apache.hadoop.net.NetUtils; diff --git a/hadoop-cblock/server/src/main/java/org/apache/hadoop/cblock/jscsiHelper/BlockWriterTask.java b/hadoop-cblock/server/src/main/java/org/apache/hadoop/cblock/jscsiHelper/BlockWriterTask.java index 04fe3a4544..f2d289e907 100644 --- a/hadoop-cblock/server/src/main/java/org/apache/hadoop/cblock/jscsiHelper/BlockWriterTask.java +++ b/hadoop-cblock/server/src/main/java/org/apache/hadoop/cblock/jscsiHelper/BlockWriterTask.java @@ -21,9 +21,9 @@ import com.google.common.base.Preconditions; import com.google.common.primitives.Longs; import org.apache.hadoop.cblock.jscsiHelper.cache.LogicalBlock; import org.apache.hadoop.cblock.jscsiHelper.cache.impl.AsyncBlockWriter; -import org.apache.hadoop.scm.XceiverClientSpi; -import org.apache.hadoop.scm.container.common.helpers.Pipeline; -import org.apache.hadoop.scm.storage.ContainerProtocolCalls; +import org.apache.hadoop.hdds.scm.XceiverClientSpi; +import org.apache.hadoop.hdds.scm.container.common.helpers.Pipeline; +import org.apache.hadoop.hdds.scm.storage.ContainerProtocolCalls; import org.apache.hadoop.util.Time; import org.apache.hadoop.utils.LevelDBStore; diff --git a/hadoop-cblock/server/src/main/java/org/apache/hadoop/cblock/jscsiHelper/CBlockClientProtocolClientSideTranslatorPB.java b/hadoop-cblock/server/src/main/java/org/apache/hadoop/cblock/jscsiHelper/CBlockClientProtocolClientSideTranslatorPB.java index 84b68e35e0..0b6c5f31df 100644 --- a/hadoop-cblock/server/src/main/java/org/apache/hadoop/cblock/jscsiHelper/CBlockClientProtocolClientSideTranslatorPB.java +++ b/hadoop-cblock/server/src/main/java/org/apache/hadoop/cblock/jscsiHelper/CBlockClientProtocolClientSideTranslatorPB.java @@ -39,7 +39,7 @@ import org.apache.hadoop.cblock.protocolPB.CBlockClientServerProtocolPB; import org.apache.hadoop.ipc.ProtobufHelper; import org.apache.hadoop.ipc.ProtocolTranslator; import org.apache.hadoop.ipc.RPC; -import org.apache.hadoop.scm.container.common.helpers.Pipeline; +import org.apache.hadoop.hdds.scm.container.common.helpers.Pipeline; import java.io.Closeable; import java.io.IOException; diff --git a/hadoop-cblock/server/src/main/java/org/apache/hadoop/cblock/jscsiHelper/CBlockIStorageImpl.java b/hadoop-cblock/server/src/main/java/org/apache/hadoop/cblock/jscsiHelper/CBlockIStorageImpl.java index 2f35668889..4744968b6d 100644 --- a/hadoop-cblock/server/src/main/java/org/apache/hadoop/cblock/jscsiHelper/CBlockIStorageImpl.java +++ b/hadoop-cblock/server/src/main/java/org/apache/hadoop/cblock/jscsiHelper/CBlockIStorageImpl.java @@ -23,8 +23,8 @@ import org.apache.hadoop.cblock.jscsiHelper.cache.CacheModule; import org.apache.hadoop.cblock.jscsiHelper.cache.LogicalBlock; import org.apache.hadoop.cblock.jscsiHelper.cache.impl.CBlockLocalCache; import org.apache.hadoop.conf.Configuration; -import org.apache.hadoop.scm.XceiverClientManager; -import org.apache.hadoop.scm.container.common.helpers.Pipeline; +import org.apache.hadoop.hdds.scm.XceiverClientManager; +import org.apache.hadoop.hdds.scm.container.common.helpers.Pipeline; import org.jscsi.target.storage.IStorageModule; import org.slf4j.Logger; import org.slf4j.LoggerFactory; diff --git a/hadoop-cblock/server/src/main/java/org/apache/hadoop/cblock/jscsiHelper/CBlockTargetServer.java b/hadoop-cblock/server/src/main/java/org/apache/hadoop/cblock/jscsiHelper/CBlockTargetServer.java index 75e013e4a5..afbd2606b3 100644 --- a/hadoop-cblock/server/src/main/java/org/apache/hadoop/cblock/jscsiHelper/CBlockTargetServer.java +++ b/hadoop-cblock/server/src/main/java/org/apache/hadoop/cblock/jscsiHelper/CBlockTargetServer.java @@ -20,8 +20,8 @@ package org.apache.hadoop.cblock.jscsiHelper; import com.google.common.annotations.VisibleForTesting; import org.apache.hadoop.cblock.proto.MountVolumeResponse; import org.apache.hadoop.cblock.util.KeyUtil; -import org.apache.hadoop.hdsl.conf.OzoneConfiguration; -import org.apache.hadoop.scm.XceiverClientManager; +import org.apache.hadoop.hdds.conf.OzoneConfiguration; +import org.apache.hadoop.hdds.scm.XceiverClientManager; import org.jscsi.target.Configuration; import org.jscsi.target.Target; import org.jscsi.target.TargetServer; diff --git a/hadoop-cblock/server/src/main/java/org/apache/hadoop/cblock/jscsiHelper/ContainerCacheFlusher.java b/hadoop-cblock/server/src/main/java/org/apache/hadoop/cblock/jscsiHelper/ContainerCacheFlusher.java index 292662e6d0..171f3e265e 100644 --- a/hadoop-cblock/server/src/main/java/org/apache/hadoop/cblock/jscsiHelper/ContainerCacheFlusher.java +++ b/hadoop-cblock/server/src/main/java/org/apache/hadoop/cblock/jscsiHelper/ContainerCacheFlusher.java @@ -26,8 +26,8 @@ import org.apache.hadoop.cblock.jscsiHelper.cache.impl.AsyncBlockWriter; import org.apache.hadoop.cblock.jscsiHelper.cache.impl.DiskBlock; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.ozone.OzoneConsts; -import org.apache.hadoop.scm.XceiverClientManager; -import org.apache.hadoop.scm.container.common.helpers.Pipeline; +import org.apache.hadoop.hdds.scm.XceiverClientManager; +import org.apache.hadoop.hdds.scm.container.common.helpers.Pipeline; import org.apache.hadoop.util.Time; import org.apache.hadoop.utils.LevelDBStore; import org.iq80.leveldb.Options; diff --git a/hadoop-cblock/server/src/main/java/org/apache/hadoop/cblock/jscsiHelper/SCSITargetDaemon.java b/hadoop-cblock/server/src/main/java/org/apache/hadoop/cblock/jscsiHelper/SCSITargetDaemon.java index f164f38134..3806d8ba0f 100644 --- a/hadoop-cblock/server/src/main/java/org/apache/hadoop/cblock/jscsiHelper/SCSITargetDaemon.java +++ b/hadoop-cblock/server/src/main/java/org/apache/hadoop/cblock/jscsiHelper/SCSITargetDaemon.java @@ -31,9 +31,9 @@ import org.apache.hadoop.ipc.ProtobufRpcEngine; import org.apache.hadoop.ipc.RPC; import org.apache.hadoop.metrics2.lib.DefaultMetricsSystem; import org.apache.hadoop.net.NetUtils; -import org.apache.hadoop.hdsl.conf.OzoneConfiguration; +import org.apache.hadoop.hdds.conf.OzoneConfiguration; import org.apache.hadoop.ozone.OzoneConsts; -import org.apache.hadoop.scm.client.ContainerOperationClient; +import org.apache.hadoop.hdds.scm.client.ContainerOperationClient; import org.apache.hadoop.security.UserGroupInformation; import org.jscsi.target.Configuration; @@ -47,14 +47,14 @@ import static org.apache.hadoop.cblock.CBlockConfigKeys.DFS_CBLOCK_JSCSI_PORT_DE import static org.apache.hadoop.cblock.CBlockConfigKeys.DFS_CBLOCK_JSCSI_PORT_KEY; import static org.apache.hadoop.cblock.CBlockConfigKeys.DFS_CBLOCK_JSCSI_SERVER_ADDRESS_DEFAULT; import static org.apache.hadoop.cblock.CBlockConfigKeys.DFS_CBLOCK_JSCSI_SERVER_ADDRESS_KEY; -import static org.apache.hadoop.scm.ScmConfigKeys.OZONE_SCM_CLIENT_ADDRESS_KEY; -import static org.apache.hadoop.scm.ScmConfigKeys.OZONE_SCM_CLIENT_BIND_HOST_DEFAULT; -import static org.apache.hadoop.scm.ScmConfigKeys.OZONE_SCM_CLIENT_BIND_HOST_KEY; -import static org.apache.hadoop.scm.ScmConfigKeys.OZONE_SCM_CLIENT_PORT_DEFAULT; -import static org.apache.hadoop.scm.ScmConfigKeys.OZONE_SCM_CLIENT_PORT_KEY; -import static org.apache.hadoop.scm.ScmConfigKeys.OZONE_SCM_DATANODE_ADDRESS_KEY; -import static org.apache.hadoop.scm.ScmConfigKeys.OZONE_SCM_DATANODE_PORT_DEFAULT; -import static org.apache.hadoop.scm.ScmConfigKeys.OZONE_SCM_DATANODE_PORT_KEY; +import static org.apache.hadoop.hdds.scm.ScmConfigKeys.OZONE_SCM_CLIENT_ADDRESS_KEY; +import static org.apache.hadoop.hdds.scm.ScmConfigKeys.OZONE_SCM_CLIENT_BIND_HOST_DEFAULT; +import static org.apache.hadoop.hdds.scm.ScmConfigKeys.OZONE_SCM_CLIENT_BIND_HOST_KEY; +import static org.apache.hadoop.hdds.scm.ScmConfigKeys.OZONE_SCM_CLIENT_PORT_DEFAULT; +import static org.apache.hadoop.hdds.scm.ScmConfigKeys.OZONE_SCM_CLIENT_PORT_KEY; +import static org.apache.hadoop.hdds.scm.ScmConfigKeys.OZONE_SCM_DATANODE_ADDRESS_KEY; +import static org.apache.hadoop.hdds.scm.ScmConfigKeys.OZONE_SCM_DATANODE_PORT_DEFAULT; +import static org.apache.hadoop.hdds.scm.ScmConfigKeys.OZONE_SCM_DATANODE_PORT_KEY; /** * This class runs the target server process. diff --git a/hadoop-cblock/server/src/main/java/org/apache/hadoop/cblock/jscsiHelper/cache/impl/AsyncBlockWriter.java b/hadoop-cblock/server/src/main/java/org/apache/hadoop/cblock/jscsiHelper/cache/impl/AsyncBlockWriter.java index 992578fa83..0192c3859f 100644 --- a/hadoop-cblock/server/src/main/java/org/apache/hadoop/cblock/jscsiHelper/cache/impl/AsyncBlockWriter.java +++ b/hadoop-cblock/server/src/main/java/org/apache/hadoop/cblock/jscsiHelper/cache/impl/AsyncBlockWriter.java @@ -22,10 +22,10 @@ import com.google.common.primitives.Longs; import org.apache.commons.codec.digest.DigestUtils; import org.apache.hadoop.cblock.jscsiHelper.cache.LogicalBlock; import org.apache.hadoop.conf.Configuration; -import org.apache.hadoop.scm.XceiverClientManager; -import org.apache.hadoop.scm.XceiverClientSpi; -import org.apache.hadoop.scm.container.common.helpers.Pipeline; -import org.apache.hadoop.scm.storage.ContainerProtocolCalls; +import org.apache.hadoop.hdds.scm.XceiverClientManager; +import org.apache.hadoop.hdds.scm.XceiverClientSpi; +import org.apache.hadoop.hdds.scm.container.common.helpers.Pipeline; +import org.apache.hadoop.hdds.scm.storage.ContainerProtocolCalls; import org.apache.hadoop.util.Time; import org.apache.hadoop.utils.LevelDBStore; import org.slf4j.Logger; diff --git a/hadoop-cblock/server/src/main/java/org/apache/hadoop/cblock/jscsiHelper/cache/impl/CBlockLocalCache.java b/hadoop-cblock/server/src/main/java/org/apache/hadoop/cblock/jscsiHelper/cache/impl/CBlockLocalCache.java index 1149164f94..ec5a4c98b5 100644 --- a/hadoop-cblock/server/src/main/java/org/apache/hadoop/cblock/jscsiHelper/cache/impl/CBlockLocalCache.java +++ b/hadoop-cblock/server/src/main/java/org/apache/hadoop/cblock/jscsiHelper/cache/impl/CBlockLocalCache.java @@ -24,8 +24,8 @@ import org.apache.hadoop.cblock.jscsiHelper.ContainerCacheFlusher; import org.apache.hadoop.cblock.jscsiHelper.cache.CacheModule; import org.apache.hadoop.cblock.jscsiHelper.cache.LogicalBlock; import org.apache.hadoop.conf.Configuration; -import org.apache.hadoop.scm.XceiverClientManager; -import org.apache.hadoop.scm.container.common.helpers.Pipeline; +import org.apache.hadoop.hdds.scm.XceiverClientManager; +import org.apache.hadoop.hdds.scm.container.common.helpers.Pipeline; import org.apache.hadoop.cblock.jscsiHelper.CBlockTargetMetrics; import org.apache.hadoop.utils.LevelDBStore; import org.slf4j.Logger; diff --git a/hadoop-cblock/server/src/main/java/org/apache/hadoop/cblock/jscsiHelper/cache/impl/SyncBlockReader.java b/hadoop-cblock/server/src/main/java/org/apache/hadoop/cblock/jscsiHelper/cache/impl/SyncBlockReader.java index 7d6e1728d2..557b201042 100644 --- a/hadoop-cblock/server/src/main/java/org/apache/hadoop/cblock/jscsiHelper/cache/impl/SyncBlockReader.java +++ b/hadoop-cblock/server/src/main/java/org/apache/hadoop/cblock/jscsiHelper/cache/impl/SyncBlockReader.java @@ -22,10 +22,10 @@ import com.google.common.util.concurrent.ThreadFactoryBuilder; import org.apache.commons.codec.digest.DigestUtils; import org.apache.hadoop.cblock.jscsiHelper.cache.LogicalBlock; import org.apache.hadoop.conf.Configuration; -import org.apache.hadoop.hdsl.protocol.proto.ContainerProtos; -import org.apache.hadoop.scm.XceiverClientSpi; -import org.apache.hadoop.scm.container.common.helpers.StorageContainerException; -import org.apache.hadoop.scm.storage.ContainerProtocolCalls; +import org.apache.hadoop.hdds.protocol.proto.ContainerProtos; +import org.apache.hadoop.hdds.scm.XceiverClientSpi; +import org.apache.hadoop.hdds.scm.container.common.helpers.StorageContainerException; +import org.apache.hadoop.hdds.scm.storage.ContainerProtocolCalls; import org.apache.hadoop.util.Time; import org.apache.hadoop.util.concurrent.HadoopThreadPoolExecutor; import org.apache.hadoop.utils.LevelDBStore; diff --git a/hadoop-cblock/server/src/main/java/org/apache/hadoop/cblock/kubernetes/DynamicProvisioner.java b/hadoop-cblock/server/src/main/java/org/apache/hadoop/cblock/kubernetes/DynamicProvisioner.java index 72b215b9b3..e21966b41e 100644 --- a/hadoop-cblock/server/src/main/java/org/apache/hadoop/cblock/kubernetes/DynamicProvisioner.java +++ b/hadoop-cblock/server/src/main/java/org/apache/hadoop/cblock/kubernetes/DynamicProvisioner.java @@ -38,7 +38,7 @@ import org.apache.hadoop.cblock.CblockUtils; import org.apache.hadoop.cblock.exception.CBlockException; import org.apache.hadoop.cblock.proto.MountVolumeResponse; import org.apache.hadoop.cblock.storage.StorageManager; -import org.apache.hadoop.hdsl.conf.OzoneConfiguration; +import org.apache.hadoop.hdds.conf.OzoneConfiguration; import org.apache.ratis.shaded.com.google.common.annotations.VisibleForTesting; import org.slf4j.Logger; import org.slf4j.LoggerFactory; diff --git a/hadoop-cblock/server/src/main/java/org/apache/hadoop/cblock/meta/ContainerDescriptor.java b/hadoop-cblock/server/src/main/java/org/apache/hadoop/cblock/meta/ContainerDescriptor.java index 00064a6dc5..2c31224257 100644 --- a/hadoop-cblock/server/src/main/java/org/apache/hadoop/cblock/meta/ContainerDescriptor.java +++ b/hadoop-cblock/server/src/main/java/org/apache/hadoop/cblock/meta/ContainerDescriptor.java @@ -19,7 +19,7 @@ package org.apache.hadoop.cblock.meta; import com.google.protobuf.InvalidProtocolBufferException; import org.apache.hadoop.cblock.protocol.proto.CBlockClientServerProtocolProtos; -import org.apache.hadoop.scm.container.common.helpers.Pipeline; +import org.apache.hadoop.hdds.scm.container.common.helpers.Pipeline; /** * diff --git a/hadoop-cblock/server/src/main/java/org/apache/hadoop/cblock/meta/VolumeDescriptor.java b/hadoop-cblock/server/src/main/java/org/apache/hadoop/cblock/meta/VolumeDescriptor.java index 4f5930d98c..930741db17 100644 --- a/hadoop-cblock/server/src/main/java/org/apache/hadoop/cblock/meta/VolumeDescriptor.java +++ b/hadoop-cblock/server/src/main/java/org/apache/hadoop/cblock/meta/VolumeDescriptor.java @@ -19,7 +19,7 @@ package org.apache.hadoop.cblock.meta; import com.google.protobuf.InvalidProtocolBufferException; import org.apache.hadoop.cblock.protocol.proto.CBlockClientServerProtocolProtos; -import org.apache.hadoop.scm.container.common.helpers.Pipeline; +import org.apache.hadoop.hdds.scm.container.common.helpers.Pipeline; import org.slf4j.Logger; import org.slf4j.LoggerFactory; diff --git a/hadoop-cblock/server/src/main/java/org/apache/hadoop/cblock/proto/MountVolumeResponse.java b/hadoop-cblock/server/src/main/java/org/apache/hadoop/cblock/proto/MountVolumeResponse.java index 70ccd90e72..d33337f1f9 100644 --- a/hadoop-cblock/server/src/main/java/org/apache/hadoop/cblock/proto/MountVolumeResponse.java +++ b/hadoop-cblock/server/src/main/java/org/apache/hadoop/cblock/proto/MountVolumeResponse.java @@ -17,7 +17,7 @@ */ package org.apache.hadoop.cblock.proto; -import org.apache.hadoop.scm.container.common.helpers.Pipeline; +import org.apache.hadoop.hdds.scm.container.common.helpers.Pipeline; import java.util.HashMap; import java.util.List; diff --git a/hadoop-cblock/server/src/main/java/org/apache/hadoop/cblock/protocolPB/CBlockClientServerProtocolServerSideTranslatorPB.java b/hadoop-cblock/server/src/main/java/org/apache/hadoop/cblock/protocolPB/CBlockClientServerProtocolServerSideTranslatorPB.java index bfe2130822..f937a738c1 100644 --- a/hadoop-cblock/server/src/main/java/org/apache/hadoop/cblock/protocolPB/CBlockClientServerProtocolServerSideTranslatorPB.java +++ b/hadoop-cblock/server/src/main/java/org/apache/hadoop/cblock/protocolPB/CBlockClientServerProtocolServerSideTranslatorPB.java @@ -25,7 +25,7 @@ import org.apache.hadoop.cblock.proto.MountVolumeResponse; import org.apache.hadoop.cblock.protocol.proto.CBlockClientServerProtocolProtos; import org.apache.hadoop.cblock.protocol.proto.CBlockServiceProtocolProtos; import org.apache.hadoop.classification.InterfaceAudience; -import org.apache.hadoop.scm.container.common.helpers.Pipeline; +import org.apache.hadoop.hdds.scm.container.common.helpers.Pipeline; import java.io.IOException; import java.util.HashMap; diff --git a/hadoop-cblock/server/src/main/java/org/apache/hadoop/cblock/storage/StorageManager.java b/hadoop-cblock/server/src/main/java/org/apache/hadoop/cblock/storage/StorageManager.java index b9ec4620be..c6c6a78759 100644 --- a/hadoop-cblock/server/src/main/java/org/apache/hadoop/cblock/storage/StorageManager.java +++ b/hadoop-cblock/server/src/main/java/org/apache/hadoop/cblock/storage/StorageManager.java @@ -25,10 +25,10 @@ import org.apache.hadoop.cblock.meta.VolumeDescriptor; import org.apache.hadoop.cblock.meta.VolumeInfo; import org.apache.hadoop.cblock.proto.MountVolumeResponse; import org.apache.hadoop.cblock.util.KeyUtil; -import org.apache.hadoop.hdsl.conf.OzoneConfiguration; -import org.apache.hadoop.hdsl.protocol.proto.HdslProtos; -import org.apache.hadoop.scm.client.ScmClient; -import org.apache.hadoop.scm.container.common.helpers.Pipeline; +import org.apache.hadoop.hdds.conf.OzoneConfiguration; +import org.apache.hadoop.hdds.protocol.proto.HddsProtos; +import org.apache.hadoop.hdds.scm.client.ScmClient; +import org.apache.hadoop.hdds.scm.container.common.helpers.Pipeline; import org.slf4j.Logger; import org.slf4j.LoggerFactory; @@ -187,8 +187,8 @@ public class StorageManager { ContainerDescriptor container = null; try { Pipeline pipeline = storageClient.createContainer( - HdslProtos.ReplicationType.STAND_ALONE, - HdslProtos.ReplicationFactor.ONE, + HddsProtos.ReplicationType.STAND_ALONE, + HddsProtos.ReplicationFactor.ONE, KeyUtil.getContainerName(volume.getUserName(), volume.getVolumeName(), containerIdx), cblockId); diff --git a/hadoop-cblock/server/src/main/proto/CBlockClientServerProtocol.proto b/hadoop-cblock/server/src/main/proto/CBlockClientServerProtocol.proto index 160b254d05..45d0de91e6 100644 --- a/hadoop-cblock/server/src/main/proto/CBlockClientServerProtocol.proto +++ b/hadoop-cblock/server/src/main/proto/CBlockClientServerProtocol.proto @@ -27,7 +27,7 @@ option java_generic_services = true; option java_generate_equals_and_hash = true; package hadoop.cblock; -import "hdsl.proto"; +import "hdds.proto"; import "CBlockServiceProtocol.proto"; /** * This message is sent from CBlock client side to CBlock server to @@ -69,7 +69,7 @@ message ContainerIDProto { required string containerID = 1; required uint64 index = 2; // making pipeline optional to be compatible with exisiting tests - optional hadoop.hdsl.Pipeline pipeline = 3; + optional hadoop.hdds.Pipeline pipeline = 3; } diff --git a/hadoop-cblock/server/src/test/java/org/apache/hadoop/cblock/TestBufferManager.java b/hadoop-cblock/server/src/test/java/org/apache/hadoop/cblock/TestBufferManager.java index e1eb36f2be..50c4ba8831 100644 --- a/hadoop-cblock/server/src/test/java/org/apache/hadoop/cblock/TestBufferManager.java +++ b/hadoop-cblock/server/src/test/java/org/apache/hadoop/cblock/TestBufferManager.java @@ -23,13 +23,13 @@ import org.apache.commons.lang.RandomStringUtils; import org.apache.hadoop.cblock.jscsiHelper.CBlockTargetMetrics; import org.apache.hadoop.cblock.jscsiHelper.ContainerCacheFlusher; import org.apache.hadoop.cblock.jscsiHelper.cache.impl.CBlockLocalCache; -import org.apache.hadoop.hdsl.conf.OzoneConfiguration; +import org.apache.hadoop.hdds.conf.OzoneConfiguration; import org.apache.hadoop.io.IOUtils; -import org.apache.hadoop.scm.XceiverClientManager; -import org.apache.hadoop.scm.XceiverClientSpi; -import org.apache.hadoop.scm.container.common.helpers.Pipeline; -import org.apache.hadoop.scm.protocolPB.StorageContainerLocationProtocolClientSideTranslatorPB; -import org.apache.hadoop.scm.storage.ContainerProtocolCalls; +import org.apache.hadoop.hdds.scm.XceiverClientManager; +import org.apache.hadoop.hdds.scm.XceiverClientSpi; +import org.apache.hadoop.hdds.scm.container.common.helpers.Pipeline; +import org.apache.hadoop.hdds.scm.protocolPB.StorageContainerLocationProtocolClientSideTranslatorPB; +import org.apache.hadoop.hdds.scm.storage.ContainerProtocolCalls; import org.apache.hadoop.test.GenericTestUtils; import org.apache.hadoop.ozone.MiniOzoneCluster; import org.apache.hadoop.ozone.MiniOzoneClassicCluster; diff --git a/hadoop-cblock/server/src/test/java/org/apache/hadoop/cblock/TestCBlockReadWrite.java b/hadoop-cblock/server/src/test/java/org/apache/hadoop/cblock/TestCBlockReadWrite.java index d995ba63c7..fb58a4e7c3 100644 --- a/hadoop-cblock/server/src/test/java/org/apache/hadoop/cblock/TestCBlockReadWrite.java +++ b/hadoop-cblock/server/src/test/java/org/apache/hadoop/cblock/TestCBlockReadWrite.java @@ -24,21 +24,21 @@ import org.apache.hadoop.cblock.jscsiHelper.CBlockTargetMetrics; import org.apache.hadoop.cblock.jscsiHelper.ContainerCacheFlusher; import org.apache.hadoop.cblock.jscsiHelper.cache.LogicalBlock; import org.apache.hadoop.cblock.jscsiHelper.cache.impl.CBlockLocalCache; -import org.apache.hadoop.hdsl.conf.OzoneConfiguration; +import org.apache.hadoop.hdds.conf.OzoneConfiguration; import org.apache.hadoop.io.IOUtils; import org.apache.hadoop.ozone.MiniOzoneClassicCluster; import org.apache.hadoop.ozone.MiniOzoneCluster; import org.apache.hadoop.ozone.OzoneConsts; -import org.apache.hadoop.hdsl.protocol.proto.HdslProtos.LifeCycleState; -import org.apache.hadoop.hdsl.protocol.proto.HdslProtos.ReplicationFactor; -import org.apache.hadoop.hdsl.protocol.proto.HdslProtos.ReplicationType; -import org.apache.hadoop.scm.XceiverClientManager; -import org.apache.hadoop.scm.XceiverClientSpi; -import org.apache.hadoop.scm.container.common.helpers.PipelineChannel; -import org.apache.hadoop.scm.container.common.helpers.Pipeline; -import org.apache.hadoop.scm.protocolPB +import org.apache.hadoop.hdds.protocol.proto.HddsProtos.LifeCycleState; +import org.apache.hadoop.hdds.protocol.proto.HddsProtos.ReplicationFactor; +import org.apache.hadoop.hdds.protocol.proto.HddsProtos.ReplicationType; +import org.apache.hadoop.hdds.scm.XceiverClientManager; +import org.apache.hadoop.hdds.scm.XceiverClientSpi; +import org.apache.hadoop.hdds.scm.container.common.helpers.PipelineChannel; +import org.apache.hadoop.hdds.scm.container.common.helpers.Pipeline; +import org.apache.hadoop.hdds.scm.protocolPB .StorageContainerLocationProtocolClientSideTranslatorPB; -import org.apache.hadoop.scm.storage.ContainerProtocolCalls; +import org.apache.hadoop.hdds.scm.storage.ContainerProtocolCalls; import org.apache.hadoop.test.GenericTestUtils; import org.junit.AfterClass; import org.junit.Assert; diff --git a/hadoop-cblock/server/src/test/java/org/apache/hadoop/cblock/TestCBlockServer.java b/hadoop-cblock/server/src/test/java/org/apache/hadoop/cblock/TestCBlockServer.java index 90fe802303..386c9b2682 100644 --- a/hadoop-cblock/server/src/test/java/org/apache/hadoop/cblock/TestCBlockServer.java +++ b/hadoop-cblock/server/src/test/java/org/apache/hadoop/cblock/TestCBlockServer.java @@ -19,8 +19,8 @@ package org.apache.hadoop.cblock; import org.apache.commons.lang.RandomStringUtils; import org.apache.hadoop.cblock.meta.VolumeInfo; -import org.apache.hadoop.hdsl.conf.OzoneConfiguration; -import org.apache.hadoop.scm.client.ScmClient; +import org.apache.hadoop.hdds.conf.OzoneConfiguration; +import org.apache.hadoop.hdds.scm.client.ScmClient; import org.apache.hadoop.cblock.util.MockStorageClient; import org.junit.After; import org.junit.Before; diff --git a/hadoop-cblock/server/src/test/java/org/apache/hadoop/cblock/TestCBlockServerPersistence.java b/hadoop-cblock/server/src/test/java/org/apache/hadoop/cblock/TestCBlockServerPersistence.java index 1ea183b5e2..db13972e30 100644 --- a/hadoop-cblock/server/src/test/java/org/apache/hadoop/cblock/TestCBlockServerPersistence.java +++ b/hadoop-cblock/server/src/test/java/org/apache/hadoop/cblock/TestCBlockServerPersistence.java @@ -18,8 +18,8 @@ package org.apache.hadoop.cblock; import org.apache.hadoop.cblock.meta.VolumeDescriptor; -import org.apache.hadoop.hdsl.conf.OzoneConfiguration; -import org.apache.hadoop.scm.client.ScmClient; +import org.apache.hadoop.hdds.conf.OzoneConfiguration; +import org.apache.hadoop.hdds.scm.client.ScmClient; import org.apache.hadoop.test.GenericTestUtils; import org.apache.hadoop.cblock.util.MockStorageClient; import org.junit.Test; diff --git a/hadoop-cblock/server/src/test/java/org/apache/hadoop/cblock/TestLocalBlockCache.java b/hadoop-cblock/server/src/test/java/org/apache/hadoop/cblock/TestLocalBlockCache.java index 6eb7ea6762..e1e2909ac1 100644 --- a/hadoop-cblock/server/src/test/java/org/apache/hadoop/cblock/TestLocalBlockCache.java +++ b/hadoop-cblock/server/src/test/java/org/apache/hadoop/cblock/TestLocalBlockCache.java @@ -25,16 +25,16 @@ import org.apache.hadoop.cblock.jscsiHelper.CBlockTargetMetrics; import org.apache.hadoop.cblock.jscsiHelper.ContainerCacheFlusher; import org.apache.hadoop.cblock.jscsiHelper.cache.LogicalBlock; import org.apache.hadoop.cblock.jscsiHelper.cache.impl.CBlockLocalCache; -import org.apache.hadoop.hdsl.conf.OzoneConfiguration; +import org.apache.hadoop.hdds.conf.OzoneConfiguration; import org.apache.hadoop.io.IOUtils; import org.apache.hadoop.ozone.MiniOzoneClassicCluster; import org.apache.hadoop.ozone.MiniOzoneCluster; import org.apache.hadoop.ozone.OzoneConsts; -import org.apache.hadoop.scm.XceiverClientManager; -import org.apache.hadoop.scm.XceiverClientSpi; -import org.apache.hadoop.scm.container.common.helpers.Pipeline; -import org.apache.hadoop.scm.protocolPB.StorageContainerLocationProtocolClientSideTranslatorPB; -import org.apache.hadoop.scm.storage.ContainerProtocolCalls; +import org.apache.hadoop.hdds.scm.XceiverClientManager; +import org.apache.hadoop.hdds.scm.XceiverClientSpi; +import org.apache.hadoop.hdds.scm.container.common.helpers.Pipeline; +import org.apache.hadoop.hdds.scm.protocolPB.StorageContainerLocationProtocolClientSideTranslatorPB; +import org.apache.hadoop.hdds.scm.storage.ContainerProtocolCalls; import org.apache.hadoop.test.GenericTestUtils; import org.apache.hadoop.util.Time; import org.junit.AfterClass; diff --git a/hadoop-cblock/server/src/test/java/org/apache/hadoop/cblock/kubernetes/TestDynamicProvisioner.java b/hadoop-cblock/server/src/test/java/org/apache/hadoop/cblock/kubernetes/TestDynamicProvisioner.java index 8d1a8654c7..0268ccc4f3 100644 --- a/hadoop-cblock/server/src/test/java/org/apache/hadoop/cblock/kubernetes/TestDynamicProvisioner.java +++ b/hadoop-cblock/server/src/test/java/org/apache/hadoop/cblock/kubernetes/TestDynamicProvisioner.java @@ -29,7 +29,7 @@ import org.junit.Test; import java.nio.file.Files; import java.nio.file.Paths; -import org.apache.hadoop.hdsl.conf.OzoneConfiguration; +import org.apache.hadoop.hdds.conf.OzoneConfiguration; /** * Test the resource generation of Dynamic Provisioner. diff --git a/hadoop-cblock/server/src/test/java/org/apache/hadoop/cblock/util/ContainerLookUpService.java b/hadoop-cblock/server/src/test/java/org/apache/hadoop/cblock/util/ContainerLookUpService.java index 8cb57d6d1e..d7dabe300c 100644 --- a/hadoop-cblock/server/src/test/java/org/apache/hadoop/cblock/util/ContainerLookUpService.java +++ b/hadoop-cblock/server/src/test/java/org/apache/hadoop/cblock/util/ContainerLookUpService.java @@ -19,7 +19,7 @@ package org.apache.hadoop.cblock.util; import org.apache.hadoop.cblock.meta.ContainerDescriptor; import org.apache.hadoop.ozone.container.ContainerTestHelper; -import org.apache.hadoop.scm.container.common.helpers.Pipeline; +import org.apache.hadoop.hdds.scm.container.common.helpers.Pipeline; import java.io.IOException; import java.util.concurrent.ConcurrentHashMap; diff --git a/hadoop-cblock/server/src/test/java/org/apache/hadoop/cblock/util/MockStorageClient.java b/hadoop-cblock/server/src/test/java/org/apache/hadoop/cblock/util/MockStorageClient.java index 59c8e018ff..9fa76a859e 100644 --- a/hadoop-cblock/server/src/test/java/org/apache/hadoop/cblock/util/MockStorageClient.java +++ b/hadoop-cblock/server/src/test/java/org/apache/hadoop/cblock/util/MockStorageClient.java @@ -18,12 +18,12 @@ package org.apache.hadoop.cblock.util; import org.apache.hadoop.cblock.meta.ContainerDescriptor; -import org.apache.hadoop.hdsl.protocol.proto.ContainerProtos.ContainerData; +import org.apache.hadoop.hdds.protocol.proto.ContainerProtos.ContainerData; import org.apache.hadoop.ozone.OzoneConsts; -import org.apache.hadoop.hdsl.protocol.proto.HdslProtos; -import org.apache.hadoop.scm.client.ScmClient; -import org.apache.hadoop.scm.container.common.helpers.ContainerInfo; -import org.apache.hadoop.scm.container.common.helpers.Pipeline; +import org.apache.hadoop.hdds.protocol.proto.HddsProtos; +import org.apache.hadoop.hdds.scm.client.ScmClient; +import org.apache.hadoop.hdds.scm.container.common.helpers.ContainerInfo; +import org.apache.hadoop.hdds.scm.container.common.helpers.Pipeline; import java.io.IOException; import java.util.ArrayList; @@ -88,7 +88,7 @@ public class MockStorageClient implements ScmClient { ContainerInfo container = new ContainerInfo.Builder() .setContainerName(containerDescriptor.getContainerID()) .setPipeline(containerDescriptor.getPipeline()) - .setState(HdslProtos.LifeCycleState.ALLOCATED) + .setState(HddsProtos.LifeCycleState.ALLOCATED) .build(); containerList.add(container); return containerList; @@ -134,8 +134,8 @@ public class MockStorageClient implements ScmClient { } @Override - public Pipeline createContainer(HdslProtos.ReplicationType type, - HdslProtos.ReplicationFactor replicationFactor, String containerId, + public Pipeline createContainer(HddsProtos.ReplicationType type, + HddsProtos.ReplicationFactor replicationFactor, String containerId, String owner) throws IOException { int contId = currentContainerId.getAndIncrement(); ContainerLookUpService.addContainer(Long.toString(contId)); @@ -153,8 +153,8 @@ public class MockStorageClient implements ScmClient { * @throws IOException */ @Override - public HdslProtos.NodePool queryNode(EnumSet<HdslProtos.NodeState> - nodeStatuses, HdslProtos.QueryScope queryScope, String poolName) + public HddsProtos.NodePool queryNode(EnumSet<HddsProtos.NodeState> + nodeStatuses, HddsProtos.QueryScope queryScope, String poolName) throws IOException { return null; } @@ -168,8 +168,8 @@ public class MockStorageClient implements ScmClient { * @throws IOException */ @Override - public Pipeline createReplicationPipeline(HdslProtos.ReplicationType type, - HdslProtos.ReplicationFactor factor, HdslProtos.NodePool nodePool) + public Pipeline createReplicationPipeline(HddsProtos.ReplicationType type, + HddsProtos.ReplicationFactor factor, HddsProtos.NodePool nodePool) throws IOException { return null; } diff --git a/hadoop-cblock/tools/src/main/java/org/apache/hadoop/cblock/cli/CBlockCli.java b/hadoop-cblock/tools/src/main/java/org/apache/hadoop/cblock/cli/CBlockCli.java index 224c908d59..c6c0e84978 100644 --- a/hadoop-cblock/tools/src/main/java/org/apache/hadoop/cblock/cli/CBlockCli.java +++ b/hadoop-cblock/tools/src/main/java/org/apache/hadoop/cblock/cli/CBlockCli.java @@ -32,7 +32,7 @@ import org.apache.hadoop.cblock.protocolPB.CBlockServiceProtocolPB; import org.apache.hadoop.conf.Configured; import org.apache.hadoop.ipc.ProtobufRpcEngine; import org.apache.hadoop.ipc.RPC; -import org.apache.hadoop.hdsl.conf.OzoneConfiguration; +import org.apache.hadoop.hdds.conf.OzoneConfiguration; import org.apache.hadoop.util.Tool; import org.apache.hadoop.util.ToolRunner; diff --git a/hadoop-cblock/tools/src/test/org/apache/hadoop/cblock/TestCBlockCLI.java b/hadoop-cblock/tools/src/test/org/apache/hadoop/cblock/TestCBlockCLI.java index f8b05ed684..a3f53aa605 100644 --- a/hadoop-cblock/tools/src/test/org/apache/hadoop/cblock/TestCBlockCLI.java +++ b/hadoop-cblock/tools/src/test/org/apache/hadoop/cblock/TestCBlockCLI.java @@ -21,7 +21,7 @@ import org.apache.hadoop.cblock.cli.CBlockCli; import org.apache.hadoop.cblock.meta.VolumeDescriptor; import org.apache.hadoop.cblock.util.MockStorageClient; import org.apache.hadoop.conf.OzoneConfiguration; -import org.apache.hadoop.scm.client.ScmClient; +import org.apache.hadoop.hdds.scm.client.ScmClient; import org.apache.hadoop.test.GenericTestUtils; import org.junit.After; import org.junit.AfterClass; diff --git a/hadoop-common-project/hadoop-common/src/main/bin/hadoop-functions.sh b/hadoop-common-project/hadoop-common/src/main/bin/hadoop-functions.sh index 59ee7b5ada..960bc63542 100755 --- a/hadoop-common-project/hadoop-common/src/main/bin/hadoop-functions.sh +++ b/hadoop-common-project/hadoop-common/src/main/bin/hadoop-functions.sh @@ -596,8 +596,8 @@ function hadoop_bootstrap YARN_LIB_JARS_DIR=${YARN_LIB_JARS_DIR:-"share/hadoop/yarn/lib"} MAPRED_DIR=${MAPRED_DIR:-"share/hadoop/mapreduce"} MAPRED_LIB_JARS_DIR=${MAPRED_LIB_JARS_DIR:-"share/hadoop/mapreduce/lib"} - HDSL_DIR=${HDSL_DIR:-"share/hadoop/hdsl"} - HDSL_LIB_JARS_DIR=${HDSL_LIB_JARS_DIR:-"share/hadoop/hdsl/lib"} + HDDS_DIR=${HDDS_DIR:-"share/hadoop/hdds"} + HDDS_LIB_JARS_DIR=${HDDS_LIB_JARS_DIR:-"share/hadoop/hdds/lib"} OZONE_DIR=${OZONE_DIR:-"share/hadoop/ozone"} OZONE_LIB_JARS_DIR=${OZONE_LIB_JARS_DIR:-"share/hadoop/ozone/lib"} CBLOCK_DIR=${CBLOCK_DIR:-"share/hadoop/cblock"} diff --git a/hadoop-dist/pom.xml b/hadoop-dist/pom.xml index 98a9edf709..300355064d 100644 --- a/hadoop-dist/pom.xml +++ b/hadoop-dist/pom.xml @@ -219,7 +219,7 @@ </profile> <profile> - <id>hdsl</id> + <id>hdds</id> <activation> <activeByDefault>false</activeByDefault> </activation> @@ -231,11 +231,11 @@ </dependency> <dependency> <groupId>org.apache.hadoop</groupId> - <artifactId>hadoop-hdsl-server-scm</artifactId> + <artifactId>hadoop-hdds-server-scm</artifactId> </dependency> <dependency> <groupId>org.apache.hadoop</groupId> - <artifactId>hadoop-hdsl-tools</artifactId> + <artifactId>hadoop-hdds-tools</artifactId> </dependency> <dependency> <groupId>org.apache.hadoop</groupId> @@ -243,7 +243,7 @@ </dependency> <dependency> <groupId>org.apache.hadoop</groupId> - <artifactId>hadoop-hdsl-container-service</artifactId> + <artifactId>hadoop-hdds-container-service</artifactId> </dependency> <dependency> <groupId>org.apache.hadoop</groupId> @@ -251,7 +251,7 @@ </dependency> <dependency> <groupId>org.apache.hadoop</groupId> - <artifactId>hadoop-hdsl-tools</artifactId> + <artifactId>hadoop-hdds-tools</artifactId> </dependency> <dependency> <groupId>org.apache.hadoop</groupId> diff --git a/hadoop-dist/src/main/compose/cblock/docker-config b/hadoop-dist/src/main/compose/cblock/docker-config index da0c2ace80..4690de0fdc 100644 --- a/hadoop-dist/src/main/compose/cblock/docker-config +++ b/hadoop-dist/src/main/compose/cblock/docker-config @@ -27,7 +27,7 @@ OZONE-SITE.XML_ozone.scm.client.address=scm OZONE-SITE.XML_dfs.cblock.jscsi.cblock.server.address=cblock OZONE-SITE.XML_dfs.cblock.scm.ipaddress=scm OZONE-SITE.XML_dfs.cblock.service.leveldb.path=/tmp -HDFS-SITE.XML_dfs.datanode.plugins=org.apache.hadoop.ozone.web.ObjectStoreRestPlugin,org.apache.hadoop.ozone.HdslDatanodeService +HDFS-SITE.XML_dfs.datanode.plugins=org.apache.hadoop.ozone.web.ObjectStoreRestPlugin,org.apache.hadoop.ozone.HddsDatanodeService HDFS-SITE.XML_dfs.namenode.rpc-address=namenode:9000 HDFS-SITE.XML_dfs.namenode.name.dir=/data/namenode HDFS-SITE.XML_rpc.metrics.quantile.enable=true diff --git a/hadoop-dist/src/main/compose/ozone/docker-config b/hadoop-dist/src/main/compose/ozone/docker-config index d297b19179..8e5efa961f 100644 --- a/hadoop-dist/src/main/compose/ozone/docker-config +++ b/hadoop-dist/src/main/compose/ozone/docker-config @@ -27,7 +27,7 @@ HDFS-SITE.XML_dfs.namenode.rpc-address=namenode:9000 HDFS-SITE.XML_dfs.namenode.name.dir=/data/namenode HDFS-SITE.XML_rpc.metrics.quantile.enable=true HDFS-SITE.XML_rpc.metrics.percentiles.intervals=60,300 -HDFS-SITE.XML_dfs.datanode.plugins=org.apache.hadoop.ozone.web.ObjectStoreRestPlugin,org.apache.hadoop.ozone.HdslDatanodeService +HDFS-SITE.XML_dfs.datanode.plugins=org.apache.hadoop.ozone.web.ObjectStoreRestPlugin,org.apache.hadoop.ozone.HddsDatanodeService LOG4J.PROPERTIES_log4j.rootLogger=INFO, stdout LOG4J.PROPERTIES_log4j.appender.stdout=org.apache.log4j.ConsoleAppender LOG4J.PROPERTIES_log4j.appender.stdout.layout=org.apache.log4j.PatternLayout diff --git a/hadoop-hdsl/client/pom.xml b/hadoop-hdds/client/pom.xml index 1f1eaf0e89..95ff09abb0 100644 --- a/hadoop-hdsl/client/pom.xml +++ b/hadoop-hdds/client/pom.xml @@ -19,24 +19,24 @@ http://maven.apache.org/xsd/maven-4.0.0.xsd"> <modelVersion>4.0.0</modelVersion> <parent> <groupId>org.apache.hadoop</groupId> - <artifactId>hadoop-hdsl</artifactId> + <artifactId>hadoop-hdds</artifactId> <version>3.2.0-SNAPSHOT</version> </parent> - <artifactId>hadoop-hdsl-client</artifactId> + <artifactId>hadoop-hdds-client</artifactId> <version>3.2.0-SNAPSHOT</version> - <description>Apache Hadoop HDSL Client libraries</description> - <name>Apache Hadoop HDSL Client</name> + <description>Apache Hadoop Distributed Data Store Client libraries</description> + <name>Apache HDDS Client</name> <packaging>jar</packaging> <properties> - <hadoop.component>hdsl</hadoop.component> + <hadoop.component>hdds</hadoop.component> <is.hadoop.component>true</is.hadoop.component> </properties> <dependencies> <dependency> <groupId>org.apache.hadoop</groupId> - <artifactId>hadoop-hdsl-common</artifactId> + <artifactId>hadoop-hdds-common</artifactId> <scope>provided</scope> </dependency> diff --git a/hadoop-hdsl/client/src/main/java/org/apache/hadoop/scm/XceiverClient.java b/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/XceiverClient.java index 06b1e998a2..5c702c63b6 100644 --- a/hadoop-hdsl/client/src/main/java/org/apache/hadoop/scm/XceiverClient.java +++ b/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/XceiverClient.java @@ -16,7 +16,7 @@ * limitations under the License. */ -package org.apache.hadoop.scm; +package org.apache.hadoop.hdds.scm; import com.google.common.annotations.VisibleForTesting; import com.google.common.base.Preconditions; @@ -28,19 +28,19 @@ import io.netty.channel.socket.nio.NioSocketChannel; import io.netty.handler.logging.LogLevel; import io.netty.handler.logging.LoggingHandler; import org.apache.hadoop.conf.Configuration; -import org.apache.hadoop.hdsl.protocol.DatanodeDetails; -import org.apache.hadoop.hdsl.protocol.proto.ContainerProtos; -import org.apache.hadoop.hdsl.protocol.proto.HdslProtos; +import org.apache.hadoop.hdds.scm.client.HddsClientUtils; +import org.apache.hadoop.hdds.scm.container.common.helpers.Pipeline; +import org.apache.hadoop.hdds.protocol.DatanodeDetails; +import org.apache.hadoop.hdds.protocol.proto.ContainerProtos; +import org.apache.hadoop.hdds.protocol.proto.HddsProtos; import org.apache.hadoop.ozone.OzoneConfigKeys; -import org.apache.hadoop.ozone.client.OzoneClientUtils; -import org.apache.hadoop.scm.container.common.helpers.Pipeline; import org.slf4j.Logger; import org.slf4j.LoggerFactory; import java.io.IOException; +import java.util.List; import java.util.concurrent.CompletableFuture; import java.util.concurrent.ExecutionException; -import java.util.List; import java.util.concurrent.Semaphore; /** @@ -69,7 +69,7 @@ public class XceiverClient extends XceiverClientSpi { this.pipeline = pipeline; this.config = config; this.semaphore = - new Semaphore(OzoneClientUtils.getMaxOutstandingRequests(config)); + new Semaphore(HddsClientUtils.getMaxOutstandingRequests(config)); } @Override @@ -186,7 +186,7 @@ public class XceiverClient extends XceiverClientSpi { * @return - Stand Alone as the type. */ @Override - public HdslProtos.ReplicationType getPipelineType() { - return HdslProtos.ReplicationType.STAND_ALONE; + public HddsProtos.ReplicationType getPipelineType() { + return HddsProtos.ReplicationType.STAND_ALONE; } } diff --git a/hadoop-hdsl/client/src/main/java/org/apache/hadoop/scm/XceiverClientHandler.java b/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/XceiverClientHandler.java index 4b2d6c44f7..e2b55ac7e8 100644 --- a/hadoop-hdsl/client/src/main/java/org/apache/hadoop/scm/XceiverClientHandler.java +++ b/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/XceiverClientHandler.java @@ -15,19 +15,17 @@ * See the License for the specific language governing permissions and * limitations under the License. */ -package org.apache.hadoop.scm; +package org.apache.hadoop.hdds.scm; import com.google.common.base.Preconditions; - import io.netty.channel.Channel; import io.netty.channel.ChannelHandlerContext; import io.netty.channel.SimpleChannelInboundHandler; import org.apache.commons.lang.StringUtils; -import org.apache.hadoop.hdsl.protocol.proto.ContainerProtos; -import org.apache.hadoop.hdsl.protocol.proto.ContainerProtos +import org.apache.hadoop.hdds.scm.container.common.helpers.Pipeline; +import org.apache.hadoop.hdds.protocol.proto.ContainerProtos; +import org.apache.hadoop.hdds.protocol.proto.ContainerProtos .ContainerCommandResponseProto; - -import org.apache.hadoop.scm.container.common.helpers.Pipeline; import org.apache.hadoop.util.Time; import org.slf4j.Logger; import org.slf4j.LoggerFactory; diff --git a/hadoop-hdsl/client/src/main/java/org/apache/hadoop/scm/XceiverClientInitializer.java b/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/XceiverClientInitializer.java index c70a6862b0..e10a9f6396 100644 --- a/hadoop-hdsl/client/src/main/java/org/apache/hadoop/scm/XceiverClientInitializer.java +++ b/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/XceiverClientInitializer.java @@ -15,7 +15,7 @@ * See the License for the specific language governing permissions and * limitations under the License. */ -package org.apache.hadoop.scm; +package org.apache.hadoop.hdds.scm; import io.netty.channel.ChannelInitializer; import io.netty.channel.ChannelPipeline; @@ -24,8 +24,8 @@ import io.netty.handler.codec.protobuf.ProtobufDecoder; import io.netty.handler.codec.protobuf.ProtobufEncoder; import io.netty.handler.codec.protobuf.ProtobufVarint32FrameDecoder; import io.netty.handler.codec.protobuf.ProtobufVarint32LengthFieldPrepender; -import org.apache.hadoop.hdsl.protocol.proto.ContainerProtos; -import org.apache.hadoop.scm.container.common.helpers.Pipeline; +import org.apache.hadoop.hdds.scm.container.common.helpers.Pipeline; +import org.apache.hadoop.hdds.protocol.proto.ContainerProtos; import java.util.concurrent.Semaphore; diff --git a/hadoop-hdsl/client/src/main/java/org/apache/hadoop/scm/XceiverClientManager.java b/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/XceiverClientManager.java index 3f62a3a1ed..75851042c2 100644 --- a/hadoop-hdsl/client/src/main/java/org/apache/hadoop/scm/XceiverClientManager.java +++ b/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/XceiverClientManager.java @@ -16,33 +16,32 @@ * limitations under the License. */ -package org.apache.hadoop.scm; - -import java.io.Closeable; -import java.io.IOException; -import java.util.concurrent.TimeUnit; -import java.util.concurrent.Callable; +package org.apache.hadoop.hdds.scm; import com.google.common.annotations.VisibleForTesting; import com.google.common.base.Preconditions; - import com.google.common.cache.Cache; import com.google.common.cache.CacheBuilder; import com.google.common.cache.RemovalListener; import com.google.common.cache.RemovalNotification; import org.apache.hadoop.conf.Configuration; -import org.apache.hadoop.hdsl.protocol.proto.HdslProtos; -import org.apache.hadoop.scm.container.common.helpers.Pipeline; +import org.apache.hadoop.hdds.scm.container.common.helpers.Pipeline; +import org.apache.hadoop.hdds.protocol.proto.HddsProtos; -import static org.apache.hadoop.scm.ScmConfigKeys +import java.io.Closeable; +import java.io.IOException; +import java.util.concurrent.Callable; +import java.util.concurrent.TimeUnit; + +import static org.apache.hadoop.hdds.scm.ScmConfigKeys + .SCM_CONTAINER_CLIENT_MAX_SIZE_DEFAULT; +import static org.apache.hadoop.hdds.scm.ScmConfigKeys + .SCM_CONTAINER_CLIENT_MAX_SIZE_KEY; +import static org.apache.hadoop.hdds.scm.ScmConfigKeys .SCM_CONTAINER_CLIENT_STALE_THRESHOLD_DEFAULT; -import static org.apache.hadoop.scm.ScmConfigKeys +import static org.apache.hadoop.hdds.scm.ScmConfigKeys .SCM_CONTAINER_CLIENT_STALE_THRESHOLD_KEY; -import static org.apache.hadoop.scm.ScmConfigKeys - .SCM_CONTAINER_CLIENT_MAX_SIZE_KEY; -import static org.apache.hadoop.scm.ScmConfigKeys - .SCM_CONTAINER_CLIENT_MAX_SIZE_DEFAULT; -import static org.apache.hadoop.hdsl.protocol.proto.HdslProtos +import static org.apache.hadoop.hdds.protocol.proto.HddsProtos .ReplicationType.RATIS; /** @@ -186,24 +185,24 @@ public class XceiverClientManager implements Closeable { * Returns hard coded 3 as replication factor. * @return 3 */ - public HdslProtos.ReplicationFactor getFactor() { + public HddsProtos.ReplicationFactor getFactor() { if(isUseRatis()) { - return HdslProtos.ReplicationFactor.THREE; + return HddsProtos.ReplicationFactor.THREE; } - return HdslProtos.ReplicationFactor.ONE; + return HddsProtos.ReplicationFactor.ONE; } /** * Returns the default replication type. * @return Ratis or Standalone */ - public HdslProtos.ReplicationType getType() { + public HddsProtos.ReplicationType getType() { // TODO : Fix me and make Ratis default before release. // TODO: Remove this as replication factor and type are pipeline properties if(isUseRatis()) { - return HdslProtos.ReplicationType.RATIS; + return HddsProtos.ReplicationType.RATIS; } - return HdslProtos.ReplicationType.STAND_ALONE; + return HddsProtos.ReplicationType.STAND_ALONE; } /** diff --git a/hadoop-hdsl/client/src/main/java/org/apache/hadoop/scm/XceiverClientMetrics.java b/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/XceiverClientMetrics.java index bcece23717..a61eba142a 100644 --- a/hadoop-hdsl/client/src/main/java/org/apache/hadoop/scm/XceiverClientMetrics.java +++ b/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/XceiverClientMetrics.java @@ -15,10 +15,10 @@ * See the License for the specific language governing permissions and * limitations under the License. */ -package org.apache.hadoop.scm; +package org.apache.hadoop.hdds.scm; import org.apache.hadoop.classification.InterfaceAudience; -import org.apache.hadoop.hdsl.protocol.proto.ContainerProtos; +import org.apache.hadoop.hdds.protocol.proto.ContainerProtos; import org.apache.hadoop.metrics2.MetricsSystem; import org.apache.hadoop.metrics2.annotation.Metric; import org.apache.hadoop.metrics2.annotation.Metrics; diff --git a/hadoop-hdsl/client/src/main/java/org/apache/hadoop/scm/XceiverClientRatis.java b/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/XceiverClientRatis.java index 084e3e55a1..d010c6913f 100644 --- a/hadoop-hdsl/client/src/main/java/org/apache/hadoop/scm/XceiverClientRatis.java +++ b/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/XceiverClientRatis.java @@ -16,17 +16,19 @@ * limitations under the License. */ -package org.apache.hadoop.scm; +package org.apache.hadoop.hdds.scm; import com.google.common.base.Preconditions; import com.google.protobuf.InvalidProtocolBufferException; import org.apache.hadoop.conf.Configuration; -import org.apache.hadoop.hdsl.protocol.DatanodeDetails; -import org.apache.hadoop.hdsl.protocol.proto.ContainerProtos.ContainerCommandRequestProto; -import org.apache.hadoop.hdsl.protocol.proto.ContainerProtos.ContainerCommandResponseProto; -import org.apache.hadoop.hdsl.protocol.proto.HdslProtos; -import org.apache.hadoop.ozone.client.OzoneClientUtils; -import org.apache.hadoop.scm.container.common.helpers.Pipeline; +import org.apache.hadoop.hdds.scm.client.HddsClientUtils; +import org.apache.hadoop.hdds.scm.container.common.helpers.Pipeline; +import org.apache.hadoop.hdds.protocol.DatanodeDetails; +import org.apache.hadoop.hdds.protocol.proto.ContainerProtos + .ContainerCommandRequestProto; +import org.apache.hadoop.hdds.protocol.proto.ContainerProtos + .ContainerCommandResponseProto; +import org.apache.hadoop.hdds.protocol.proto.HddsProtos; import org.apache.ratis.RatisHelper; import org.apache.ratis.client.RaftClient; import org.apache.ratis.protocol.RaftClientReply; @@ -60,7 +62,7 @@ public final class XceiverClientRatis extends XceiverClientSpi { ScmConfigKeys.DFS_CONTAINER_RATIS_RPC_TYPE_KEY, ScmConfigKeys.DFS_CONTAINER_RATIS_RPC_TYPE_DEFAULT); final int maxOutstandingRequests = - OzoneClientUtils.getMaxOutstandingRequests(ozoneConf); + HddsClientUtils.getMaxOutstandingRequests(ozoneConf); return new XceiverClientRatis(pipeline, SupportedRpcType.valueOfIgnoreCase(rpcType), maxOutstandingRequests); } @@ -98,8 +100,8 @@ public final class XceiverClientRatis extends XceiverClientSpi { * @return - Ratis */ @Override - public HdslProtos.ReplicationType getPipelineType() { - return HdslProtos.ReplicationType.RATIS; + public HddsProtos.ReplicationType getPipelineType() { + return HddsProtos.ReplicationType.RATIS; } private void reinitialize(List<DatanodeDetails> datanodes, RaftGroup group) diff --git a/hadoop-hdsl/client/src/main/java/org/apache/hadoop/scm/client/ContainerOperationClient.java b/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/client/ContainerOperationClient.java index 08ddfd6984..8f30a7fad1 100644 --- a/hadoop-hdsl/client/src/main/java/org/apache/hadoop/scm/client/ContainerOperationClient.java +++ b/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/client/ContainerOperationClient.java @@ -15,20 +15,22 @@ * See the License for the specific language governing permissions and * limitations under the License. */ -package org.apache.hadoop.scm.client; +package org.apache.hadoop.hdds.scm.client; import com.google.common.base.Preconditions; -import org.apache.hadoop.hdsl.protocol.proto.ContainerProtos.ContainerData; -import org.apache.hadoop.hdsl.protocol.proto.ContainerProtos.ReadContainerResponseProto; -import org.apache.hadoop.hdsl.protocol.proto.HdslProtos; -import org.apache.hadoop.hdsl.protocol.proto.StorageContainerLocationProtocolProtos.ObjectStageChangeRequestProto; -import org.apache.hadoop.scm.XceiverClientManager; -import org.apache.hadoop.scm.XceiverClientSpi; -import org.apache.hadoop.scm.container.common.helpers.ContainerInfo; -import org.apache.hadoop.scm.container.common.helpers.Pipeline; -import org.apache.hadoop.scm.protocolPB +import org.apache.hadoop.hdds.scm.XceiverClientManager; +import org.apache.hadoop.hdds.scm.XceiverClientSpi; +import org.apache.hadoop.hdds.scm.container.common.helpers.ContainerInfo; +import org.apache.hadoop.hdds.scm.container.common.helpers.Pipeline; +import org.apache.hadoop.hdds.scm.protocolPB .StorageContainerLocationProtocolClientSideTranslatorPB; -import org.apache.hadoop.scm.storage.ContainerProtocolCalls; +import org.apache.hadoop.hdds.scm.storage.ContainerProtocolCalls; +import org.apache.hadoop.hdds.protocol.proto.ContainerProtos.ContainerData; +import org.apache.hadoop.hdds.protocol.proto.ContainerProtos + .ReadContainerResponseProto; +import org.apache.hadoop.hdds.protocol.proto.HddsProtos; +import org.apache.hadoop.hdds.protocol.proto + .StorageContainerLocationProtocolProtos.ObjectStageChangeRequestProto; import org.slf4j.Logger; import org.slf4j.LoggerFactory; @@ -37,8 +39,10 @@ import java.util.EnumSet; import java.util.List; import java.util.UUID; -import static org.apache.hadoop.hdsl.protocol.proto.HdslProtos.LifeCycleState.ALLOCATED; -import static org.apache.hadoop.hdsl.protocol.proto.HdslProtos.LifeCycleState.OPEN; +import static org.apache.hadoop.hdds.protocol.proto.HddsProtos.LifeCycleState + .ALLOCATED; +import static org.apache.hadoop.hdds.protocol.proto.HddsProtos.LifeCycleState + .OPEN; /** * This class provides the client-facing APIs of container operations. @@ -189,8 +193,8 @@ public class ContainerOperationClient implements ScmClient { * @inheritDoc */ @Override - public Pipeline createContainer(HdslProtos.ReplicationType type, - HdslProtos.ReplicationFactor factor, + public Pipeline createContainer(HddsProtos.ReplicationType type, + HddsProtos.ReplicationFactor factor, String containerId, String owner) throws IOException { XceiverClientSpi client = null; try { @@ -229,8 +233,8 @@ public class ContainerOperationClient implements ScmClient { * @throws IOException */ @Override - public HdslProtos.NodePool queryNode(EnumSet<HdslProtos.NodeState> - nodeStatuses, HdslProtos.QueryScope queryScope, String poolName) + public HddsProtos.NodePool queryNode(EnumSet<HddsProtos.NodeState> + nodeStatuses, HddsProtos.QueryScope queryScope, String poolName) throws IOException { return storageContainerLocationClient.queryNode(nodeStatuses, queryScope, poolName); @@ -240,8 +244,8 @@ public class ContainerOperationClient implements ScmClient { * Creates a specified replication pipeline. */ @Override - public Pipeline createReplicationPipeline(HdslProtos.ReplicationType type, - HdslProtos.ReplicationFactor factor, HdslProtos.NodePool nodePool) + public Pipeline createReplicationPipeline(HddsProtos.ReplicationType type, + HddsProtos.ReplicationFactor factor, HddsProtos.NodePool nodePool) throws IOException { return storageContainerLocationClient.createReplicationPipeline(type, factor, nodePool); diff --git a/hadoop-hdsl/client/src/main/java/org/apache/hadoop/ozone/client/OzoneClientUtils.java b/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/client/HddsClientUtils.java index c77f9656c4..bc5f8d65b4 100644 --- a/hadoop-hdsl/client/src/main/java/org/apache/hadoop/ozone/client/OzoneClientUtils.java +++ b/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/client/HddsClientUtils.java @@ -16,30 +16,28 @@ * limitations under the License. */ -package org.apache.hadoop.ozone.client; - -import java.text.ParseException; -import java.time.Instant; -import java.time.ZoneId; -import java.time.ZonedDateTime; -import java.time.format.DateTimeFormatter; -import java.util.concurrent.TimeUnit; +package org.apache.hadoop.hdds.scm.client; +import com.google.common.base.Preconditions; import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.classification.InterfaceStability; import org.apache.hadoop.conf.Configuration; -import org.apache.hadoop.hdsl.conf.OzoneConfiguration; +import org.apache.hadoop.hdds.scm.ScmConfigKeys; import org.apache.hadoop.ozone.OzoneConfigKeys; import org.apache.hadoop.ozone.OzoneConsts; -import org.apache.hadoop.scm.ScmConfigKeys; - -import com.google.common.base.Preconditions; import org.apache.http.client.config.RequestConfig; import org.apache.http.impl.client.CloseableHttpClient; import org.apache.http.impl.client.HttpClients; import org.slf4j.Logger; import org.slf4j.LoggerFactory; +import java.text.ParseException; +import java.time.Instant; +import java.time.ZoneId; +import java.time.ZonedDateTime; +import java.time.format.DateTimeFormatter; +import java.util.concurrent.TimeUnit; + /** * Utility methods for Ozone and Container Clients. * @@ -49,14 +47,14 @@ import org.slf4j.LoggerFactory; */ @InterfaceAudience.Public @InterfaceStability.Unstable -public final class OzoneClientUtils { +public final class HddsClientUtils { private static final Logger LOG = LoggerFactory.getLogger( - OzoneClientUtils.class); + HddsClientUtils.class); private static final int NO_PORT = -1; - private OzoneClientUtils() { + private HddsClientUtils() { } /** @@ -69,55 +67,28 @@ public final class OzoneClientUtils { return format.withZone(ZoneId.of(OzoneConsts.OZONE_TIME_ZONE)); }); + /** - * Returns the cache value to be used for list calls. - * @param conf Configuration object - * @return list cache size + * Convert time in millisecond to a human readable format required in ozone. + * @return a human readable string for the input time */ - public static int getListCacheSize(Configuration conf) { - return conf.getInt(OzoneConfigKeys.OZONE_CLIENT_LIST_CACHE_SIZE, - OzoneConfigKeys.OZONE_CLIENT_LIST_CACHE_SIZE_DEFAULT); + public static String formatDateTime(long millis) { + ZonedDateTime dateTime = ZonedDateTime.ofInstant( + Instant.ofEpochSecond(millis), DATE_FORMAT.get().getZone()); + return DATE_FORMAT.get().format(dateTime); } /** - * @return a default instance of {@link CloseableHttpClient}. + * Convert time in ozone date format to millisecond. + * @return time in milliseconds */ - public static CloseableHttpClient newHttpClient() { - return OzoneClientUtils.newHttpClient(new OzoneConfiguration()); + public static long formatDateTime(String date) throws ParseException { + Preconditions.checkNotNull(date, "Date string should not be null."); + return ZonedDateTime.parse(date, DATE_FORMAT.get()) + .toInstant().getEpochSecond(); } - /** - * Returns a {@link CloseableHttpClient} configured by given configuration. - * If conf is null, returns a default instance. - * - * @param conf configuration - * @return a {@link CloseableHttpClient} instance. - */ - public static CloseableHttpClient newHttpClient(Configuration conf) { - long socketTimeout = OzoneConfigKeys - .OZONE_CLIENT_SOCKET_TIMEOUT_DEFAULT; - long connectionTimeout = OzoneConfigKeys - .OZONE_CLIENT_CONNECTION_TIMEOUT_DEFAULT; - if (conf != null) { - socketTimeout = conf.getTimeDuration( - OzoneConfigKeys.OZONE_CLIENT_SOCKET_TIMEOUT, - OzoneConfigKeys.OZONE_CLIENT_SOCKET_TIMEOUT_DEFAULT, - TimeUnit.MILLISECONDS); - connectionTimeout = conf.getTimeDuration( - OzoneConfigKeys.OZONE_CLIENT_CONNECTION_TIMEOUT, - OzoneConfigKeys.OZONE_CLIENT_CONNECTION_TIMEOUT_DEFAULT, - TimeUnit.MILLISECONDS); - } - CloseableHttpClient client = HttpClients.custom() - .setDefaultRequestConfig( - RequestConfig.custom() - .setSocketTimeout(Math.toIntExact(socketTimeout)) - .setConnectTimeout(Math.toIntExact(connectionTimeout)) - .build()) - .build(); - return client; - } /** * verifies that bucket name / volume name is a valid DNS name. @@ -199,23 +170,53 @@ public final class OzoneClientUtils { } /** - * Convert time in millisecond to a human readable format required in ozone. - * @return a human readable string for the input time + * Returns the cache value to be used for list calls. + * @param conf Configuration object + * @return list cache size */ - public static String formatDateTime(long millis) { - ZonedDateTime dateTime = ZonedDateTime.ofInstant( - Instant.ofEpochSecond(millis), DATE_FORMAT.get().getZone()); - return DATE_FORMAT.get().format(dateTime); + public static int getListCacheSize(Configuration conf) { + return conf.getInt(OzoneConfigKeys.OZONE_CLIENT_LIST_CACHE_SIZE, + OzoneConfigKeys.OZONE_CLIENT_LIST_CACHE_SIZE_DEFAULT); } /** - * Convert time in ozone date format to millisecond. - * @return time in milliseconds + * @return a default instance of {@link CloseableHttpClient}. */ - public static long formatDateTime(String date) throws ParseException { - Preconditions.checkNotNull(date, "Date string should not be null."); - return ZonedDateTime.parse(date, DATE_FORMAT.get()) - .toInstant().getEpochSecond(); + public static CloseableHttpClient newHttpClient() { + return HddsClientUtils.newHttpClient(new Configuration()); + } + + /** + * Returns a {@link CloseableHttpClient} configured by given configuration. + * If conf is null, returns a default instance. + * + * @param conf configuration + * @return a {@link CloseableHttpClient} instance. + */ + public static CloseableHttpClient newHttpClient(Configuration conf) { + long socketTimeout = OzoneConfigKeys + .OZONE_CLIENT_SOCKET_TIMEOUT_DEFAULT; + long connectionTimeout = OzoneConfigKeys + .OZONE_CLIENT_CONNECTION_TIMEOUT_DEFAULT; + if (conf != null) { + socketTimeout = conf.getTimeDuration( + OzoneConfigKeys.OZONE_CLIENT_SOCKET_TIMEOUT, + OzoneConfigKeys.OZONE_CLIENT_SOCKET_TIMEOUT_DEFAULT, + TimeUnit.MILLISECONDS); + connectionTimeout = conf.getTimeDuration( + OzoneConfigKeys.OZONE_CLIENT_CONNECTION_TIMEOUT, + OzoneConfigKeys.OZONE_CLIENT_CONNECTION_TIMEOUT_DEFAULT, + TimeUnit.MILLISECONDS); + } + + CloseableHttpClient client = HttpClients.custom() + .setDefaultRequestConfig( + RequestConfig.custom() + .setSocketTimeout(Math.toIntExact(socketTimeout)) + .setConnectTimeout(Math.toIntExact(connectionTimeout)) + .build()) + .build(); + return client; } /** diff --git a/hadoop-hdsl/client/src/main/java/org/apache/hadoop/scm/client/package-info.java b/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/client/package-info.java index 9febb0ac14..73ad78cd78 100644 --- a/hadoop-hdsl/client/src/main/java/org/apache/hadoop/scm/client/package-info.java +++ b/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/client/package-info.java @@ -16,7 +16,7 @@ * limitations under the License. */ -package org.apache.hadoop.scm.client; +package org.apache.hadoop.hdds.scm.client; /** * Client facing classes for the container operations. diff --git a/hadoop-hdsl/client/src/main/java/org/apache/hadoop/scm/package-info.java b/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/package-info.java index e23763d777..9390bc1020 100644 --- a/hadoop-hdsl/client/src/main/java/org/apache/hadoop/scm/package-info.java +++ b/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/package-info.java @@ -16,7 +16,7 @@ * limitations under the License. */ -package org.apache.hadoop.scm; +package org.apache.hadoop.hdds.scm; /** * Classes for different type of container service client. diff --git a/hadoop-hdsl/client/src/main/java/org/apache/hadoop/scm/storage/ChunkInputStream.java b/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/storage/ChunkInputStream.java index 8c54d654c3..9b8eaa9661 100644 --- a/hadoop-hdsl/client/src/main/java/org/apache/hadoop/scm/storage/ChunkInputStream.java +++ b/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/storage/ChunkInputStream.java @@ -16,7 +16,15 @@ * limitations under the License. */ -package org.apache.hadoop.scm.storage; +package org.apache.hadoop.hdds.scm.storage; + +import com.google.protobuf.ByteString; +import org.apache.hadoop.fs.Seekable; +import org.apache.hadoop.hdds.scm.XceiverClientManager; +import org.apache.hadoop.hdds.scm.XceiverClientSpi; +import org.apache.hadoop.hdds.protocol.proto.ContainerProtos.ChunkInfo; +import org.apache.hadoop.hdds.protocol.proto.ContainerProtos + .ReadChunkResponseProto; import java.io.EOFException; import java.io.IOException; @@ -25,14 +33,6 @@ import java.nio.ByteBuffer; import java.util.Arrays; import java.util.List; -import com.google.protobuf.ByteString; - -import org.apache.hadoop.fs.Seekable; -import org.apache.hadoop.hdsl.protocol.proto.ContainerProtos.ReadChunkResponseProto; -import org.apache.hadoop.hdsl.protocol.proto.ContainerProtos.ChunkInfo; -import org.apache.hadoop.scm.XceiverClientSpi; -import org.apache.hadoop.scm.XceiverClientManager; - /** * An {@link InputStream} used by the REST service in combination with the * SCMClient to read the value of a key from a sequence diff --git a/hadoop-hdsl/client/src/main/java/org/apache/hadoop/scm/storage/ChunkOutputStream.java b/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/storage/ChunkOutputStream.java index 52a981f66a..b65df9f89b 100644 --- a/hadoop-hdsl/client/src/main/java/org/apache/hadoop/scm/storage/ChunkOutputStream.java +++ b/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/storage/ChunkOutputStream.java @@ -16,24 +16,24 @@ * limitations under the License. */ -package org.apache.hadoop.scm.storage; +package org.apache.hadoop.hdds.scm.storage; -import static org.apache.hadoop.scm.storage.ContainerProtocolCalls.putKey; -import static org.apache.hadoop.scm.storage.ContainerProtocolCalls.writeChunk; +import com.google.protobuf.ByteString; +import org.apache.commons.codec.digest.DigestUtils; +import org.apache.hadoop.hdds.scm.XceiverClientManager; +import org.apache.hadoop.hdds.scm.XceiverClientSpi; +import org.apache.hadoop.hdds.protocol.proto.ContainerProtos.ChunkInfo; +import org.apache.hadoop.hdds.protocol.proto.ContainerProtos.KeyData; +import org.apache.hadoop.hdds.protocol.proto.HddsProtos.KeyValue; import java.io.IOException; import java.io.OutputStream; import java.nio.ByteBuffer; import java.util.UUID; -import com.google.protobuf.ByteString; - -import org.apache.commons.codec.digest.DigestUtils; -import org.apache.hadoop.hdsl.protocol.proto.ContainerProtos.ChunkInfo; -import org.apache.hadoop.hdsl.protocol.proto.ContainerProtos.KeyData; -import org.apache.hadoop.hdsl.protocol.proto.HdslProtos.KeyValue; -import org.apache.hadoop.scm.XceiverClientManager; -import org.apache.hadoop.scm.XceiverClientSpi; +import static org.apache.hadoop.hdds.scm.storage.ContainerProtocolCalls.putKey; +import static org.apache.hadoop.hdds.scm.storage.ContainerProtocolCalls + .writeChunk; /** * An {@link OutputStream} used by the REST service in combination with the diff --git a/hadoop-hdsl/client/src/main/java/org/apache/hadoop/scm/storage/package-info.java b/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/storage/package-info.java index b367461922..6e7ce94878 100644 --- a/hadoop-hdsl/client/src/main/java/org/apache/hadoop/scm/storage/package-info.java +++ b/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/storage/package-info.java @@ -16,7 +16,7 @@ * limitations under the License. */ -package org.apache.hadoop.scm.storage; +package org.apache.hadoop.hdds.scm.storage; /** * Low level IO streams to upload/download chunks from container service. diff --git a/hadoop-hdsl/common/dev-support/findbugsExcludeFile.xml b/hadoop-hdds/common/dev-support/findbugsExcludeFile.xml index d93c4a1ebf..3571a8929e 100644 --- a/hadoop-hdsl/common/dev-support/findbugsExcludeFile.xml +++ b/hadoop-hdds/common/dev-support/findbugsExcludeFile.xml @@ -16,6 +16,6 @@ --> <FindBugsFilter> <Match> - <Package name="org.apache.hadoop.hdsl.protocol.proto"/> + <Package name="org.apache.hadoop.hdds.protocol.proto"/> </Match> </FindBugsFilter> diff --git a/hadoop-hdsl/common/pom.xml b/hadoop-hdds/common/pom.xml index c2323af3a0..6b2a156a15 100644 --- a/hadoop-hdsl/common/pom.xml +++ b/hadoop-hdds/common/pom.xml @@ -19,22 +19,21 @@ http://maven.apache.org/xsd/maven-4.0.0.xsd"> <modelVersion>4.0.0</modelVersion> <parent> <groupId>org.apache.hadoop</groupId> - <artifactId>hadoop-hdsl</artifactId> + <artifactId>hadoop-hdds</artifactId> <version>3.2.0-SNAPSHOT</version> </parent> - <artifactId>hadoop-hdsl-common</artifactId> + <artifactId>hadoop-hdds-common</artifactId> <version>3.2.0-SNAPSHOT</version> - <description>Apache Hadoop HDSL Common utilities</description> - <name>Apache Hadoop HDSL Common</name> + <description>Apache Hadoop Distributed Data Store Common</description> + <name>Apache HDDS Common</name> <packaging>jar</packaging> <properties> - <hadoop.component>hdsl</hadoop.component> + <hadoop.component>hdds</hadoop.component> <is.hadoop.component>true</is.hadoop.component> </properties> <dependencies> - <dependency> <groupId>org.fusesource.leveldbjni</groupId> <artifactId>leveldbjni-all</artifactId> @@ -109,7 +108,7 @@ http://maven.apache.org/xsd/maven-4.0.0.xsd"> <includes> <include>StorageContainerLocationProtocol.proto</include> <include>DatanodeContainerProtocol.proto</include> - <include>hdsl.proto</include> + <include>hdds.proto</include> <include>ScmBlockLocationProtocol.proto</include> </includes> </source> diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/HddsConfigKeys.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/HddsConfigKeys.java new file mode 100644 index 0000000000..665618cf24 --- /dev/null +++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/HddsConfigKeys.java @@ -0,0 +1,6 @@ +package org.apache.hadoop.hdds; + +public class HddsConfigKeys { + private HddsConfigKeys() { + } +} diff --git a/hadoop-hdsl/common/src/main/java/org/apache/hadoop/hdsl/HdslUtils.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/HddsUtils.java index 6446618f5d..f00f503794 100644 --- a/hadoop-hdsl/common/src/main/java/org/apache/hadoop/hdsl/HdslUtils.java +++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/HddsUtils.java @@ -16,35 +16,34 @@ * limitations under the License. */ -package org.apache.hadoop.hdsl; +package org.apache.hadoop.hdds; -import java.net.InetSocketAddress; - -import java.nio.file.Paths; -import java.util.Collection; -import java.util.HashSet; +import com.google.common.base.Optional; +import com.google.common.base.Strings; +import com.google.common.net.HostAndPort; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.CommonConfigurationKeysPublic; -import org.apache.hadoop.hdsl.conf.OzoneConfiguration; +import org.apache.hadoop.hdds.scm.ScmConfigKeys; import org.apache.hadoop.net.NetUtils; import org.apache.hadoop.ozone.OzoneConfigKeys; -import org.apache.hadoop.scm.ScmConfigKeys; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +import java.net.InetSocketAddress; +import java.nio.file.Paths; +import java.util.Collection; +import java.util.HashSet; -import com.google.common.base.Optional; -import com.google.common.base.Strings; -import com.google.common.net.HostAndPort; import static org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_ENABLED; import static org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_ENABLED_DEFAULT; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; /** - * HDSL specific stateless utility functions. + * HDDS specific stateless utility functions. */ -public class HdslUtils { +public class HddsUtils { - private static final Logger LOG = LoggerFactory.getLogger(HdslUtils.class); + private static final Logger LOG = LoggerFactory.getLogger(HddsUtils.class); /** * The service ID of the solitary Ozone SCM service. @@ -55,7 +54,7 @@ public class HdslUtils { private static final int NO_PORT = -1; - private HdslUtils() { + private HddsUtils() { } /** @@ -233,7 +232,7 @@ public class HdslUtils { return addresses; } - public static boolean isHdslEnabled(Configuration conf) { + public static boolean isHddsEnabled(Configuration conf) { String securityEnabled = conf.get(CommonConfigurationKeysPublic.HADOOP_SECURITY_AUTHENTICATION, "simple"); diff --git a/hadoop-hdsl/common/src/main/java/org/apache/hadoop/ozone/client/OzoneQuota.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/client/OzoneQuota.java index 032dd60d99..59708a956b 100644 --- a/hadoop-hdsl/common/src/main/java/org/apache/hadoop/ozone/client/OzoneQuota.java +++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/client/OzoneQuota.java @@ -16,7 +16,7 @@ * limitations under the License. */ -package org.apache.hadoop.ozone.client; +package org.apache.hadoop.hdds.client; import org.apache.hadoop.ozone.OzoneConsts; diff --git a/hadoop-hdsl/common/src/main/java/org/apache/hadoop/ozone/client/ReplicationFactor.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/client/ReplicationFactor.java index 971cfec740..0215964ab8 100644 --- a/hadoop-hdsl/common/src/main/java/org/apache/hadoop/ozone/client/ReplicationFactor.java +++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/client/ReplicationFactor.java @@ -16,7 +16,7 @@ * limitations under the License. */ -package org.apache.hadoop.ozone.client; +package org.apache.hadoop.hdds.client; /** * The replication factor to be used while writing key into ozone. diff --git a/hadoop-hdsl/common/src/main/java/org/apache/hadoop/ozone/client/ReplicationType.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/client/ReplicationType.java index 537c336e50..259a1a2931 100644 --- a/hadoop-hdsl/common/src/main/java/org/apache/hadoop/ozone/client/ReplicationType.java +++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/client/ReplicationType.java @@ -16,7 +16,7 @@ * limitations under the License. */ -package org.apache.hadoop.ozone.client; +package org.apache.hadoop.hdds.client; /** * The replication type to be used while writing key into ozone. diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/client/package-info.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/client/package-info.java new file mode 100644 index 0000000000..e81f134b25 --- /dev/null +++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/client/package-info.java @@ -0,0 +1,23 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.hdds.client; + +/** + * Base property types for HDDS containers and replications. + */
\ No newline at end of file diff --git a/hadoop-hdsl/common/src/main/java/org/apache/hadoop/hdsl/conf/OzoneConfiguration.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/conf/OzoneConfiguration.java index a185156fe2..f07718c0c8 100644 --- a/hadoop-hdsl/common/src/main/java/org/apache/hadoop/hdsl/conf/OzoneConfiguration.java +++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/conf/OzoneConfiguration.java @@ -16,11 +16,11 @@ * limitations under the License. */ -package org.apache.hadoop.hdsl.conf; +package org.apache.hadoop.hdds.conf; + +import org.apache.hadoop.classification.InterfaceAudience; +import org.apache.hadoop.conf.Configuration; -import java.net.URL; -import java.util.ArrayList; -import java.util.List; import javax.xml.bind.JAXBContext; import javax.xml.bind.JAXBException; import javax.xml.bind.Unmarshaller; @@ -28,8 +28,9 @@ import javax.xml.bind.annotation.XmlAccessType; import javax.xml.bind.annotation.XmlAccessorType; import javax.xml.bind.annotation.XmlElement; import javax.xml.bind.annotation.XmlRootElement; -import org.apache.hadoop.classification.InterfaceAudience; -import org.apache.hadoop.conf.Configuration; +import java.net.URL; +import java.util.ArrayList; +import java.util.List; /** * Configuration for ozone. diff --git a/hadoop-hdsl/common/src/main/java/org/apache/hadoop/hdsl/conf/package-info.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/conf/package-info.java index 2aa5c2501c..948057ebba 100644 --- a/hadoop-hdsl/common/src/main/java/org/apache/hadoop/hdsl/conf/package-info.java +++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/conf/package-info.java @@ -15,4 +15,4 @@ * See the License for the specific language governing permissions and * limitations under the License. */ -package org.apache.hadoop.hdsl.conf;
\ No newline at end of file +package org.apache.hadoop.hdds.conf;
\ No newline at end of file diff --git a/hadoop-hdsl/common/src/main/java/org/apache/hadoop/hdsl/package-info.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/package-info.java index d24d0fb336..f8894e6a7e 100644 --- a/hadoop-hdsl/common/src/main/java/org/apache/hadoop/hdsl/package-info.java +++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/package-info.java @@ -16,8 +16,8 @@ * limitations under the License. */ -package org.apache.hadoop.hdsl; +package org.apache.hadoop.hdds; /** - * Generic HDSL specific configurator and helper classes. + * Generic HDDS specific configurator and helper classes. */
\ No newline at end of file diff --git a/hadoop-hdsl/common/src/main/java/org/apache/hadoop/hdsl/protocol/DatanodeDetails.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/protocol/DatanodeDetails.java index 7049c30aee..1463591861 100644 --- a/hadoop-hdsl/common/src/main/java/org/apache/hadoop/hdsl/protocol/DatanodeDetails.java +++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/protocol/DatanodeDetails.java @@ -16,12 +16,12 @@ * limitations under the License. */ -package org.apache.hadoop.hdsl.protocol; +package org.apache.hadoop.hdds.protocol; import com.google.common.base.Preconditions; import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.classification.InterfaceStability; -import org.apache.hadoop.hdsl.protocol.proto.HdslProtos; +import org.apache.hadoop.hdds.protocol.proto.HddsProtos; import java.util.UUID; @@ -222,7 +222,7 @@ public final class DatanodeDetails implements Comparable<DatanodeDetails> { * @return DatanodeDetails */ public static DatanodeDetails getFromProtoBuf( - HdslProtos.DatanodeDetailsProto datanodeDetailsProto) { + HddsProtos.DatanodeDetailsProto datanodeDetailsProto) { DatanodeDetails.Builder builder = newBuilder(); builder.setUuid(datanodeDetailsProto.getUuid()); if (datanodeDetailsProto.hasIpAddress()) { @@ -251,11 +251,11 @@ public final class DatanodeDetails implements Comparable<DatanodeDetails> { /** * Returns a DatanodeDetails protobuf message from a datanode ID. - * @return Hdsl.DatanodeDetailsProto + * @return HddsProtos.DatanodeDetailsProto */ - public HdslProtos.DatanodeDetailsProto getProtoBufMessage() { - HdslProtos.DatanodeDetailsProto.Builder builder = - HdslProtos.DatanodeDetailsProto.newBuilder() + public HddsProtos.DatanodeDetailsProto getProtoBufMessage() { + HddsProtos.DatanodeDetailsProto.Builder builder = + HddsProtos.DatanodeDetailsProto.newBuilder() .setUuid(getUuidString()); if (ipAddress != null) { builder.setIpAddress(ipAddress); diff --git a/hadoop-hdsl/common/src/main/java/org/apache/hadoop/hdsl/protocol/package-info.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/protocol/package-info.java index 7fd2543b7e..7dae0fce02 100644 --- a/hadoop-hdsl/common/src/main/java/org/apache/hadoop/hdsl/protocol/package-info.java +++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/protocol/package-info.java @@ -17,6 +17,6 @@ */ /** - * This package contains HDSL protocol related classes. + * This package contains HDDS protocol related classes. */ -package org.apache.hadoop.hdsl.protocol;
\ No newline at end of file +package org.apache.hadoop.hdds.protocol;
\ No newline at end of file diff --git a/hadoop-hdsl/common/src/main/java/org/apache/hadoop/scm/ScmConfigKeys.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/ScmConfigKeys.java index 9d153597f4..7f40ab27fe 100644 --- a/hadoop-hdsl/common/src/main/java/org/apache/hadoop/scm/ScmConfigKeys.java +++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/ScmConfigKeys.java @@ -15,7 +15,7 @@ * See the License for the specific language governing permissions and * limitations under the License. */ -package org.apache.hadoop.scm; +package org.apache.hadoop.hdds.scm; import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.classification.InterfaceStability; @@ -121,18 +121,18 @@ public final class ScmConfigKeys { public static final int OZONE_SCM_HTTP_BIND_PORT_DEFAULT = 9876; public static final int OZONE_SCM_HTTPS_BIND_PORT_DEFAULT = 9877; - public static final String HDSL_REST_HTTP_ADDRESS_KEY = - "hdsl.rest.http-address"; - public static final String HDSL_REST_HTTP_ADDRESS_DEFAULT = "0.0.0.0:9880"; - public static final String HDSL_REST_CSRF_ENABLED_KEY = - "hdsl.rest.rest-csrf.enabled"; - public static final boolean HDSL_REST_CSRF_ENABLED_DEFAULT = false; - public static final String HDSL_REST_NETTY_HIGH_WATERMARK = - "hdsl.rest.netty.high.watermark"; - public static final int HDSL_REST_NETTY_HIGH_WATERMARK_DEFAULT = 65536; - public static final int HDSL_REST_NETTY_LOW_WATERMARK_DEFAULT = 32768; - public static final String HDSL_REST_NETTY_LOW_WATERMARK = - "hdsl.rest.netty.low.watermark"; + public static final String HDDS_REST_HTTP_ADDRESS_KEY = + "hdds.rest.http-address"; + public static final String HDDS_REST_HTTP_ADDRESS_DEFAULT = "0.0.0.0:9880"; + public static final String HDDS_REST_CSRF_ENABLED_KEY = + "hdds.rest.rest-csrf.enabled"; + public static final boolean HDDS_REST_CSRF_ENABLED_DEFAULT = false; + public static final String HDDS_REST_NETTY_HIGH_WATERMARK = + "hdds.rest.netty.high.watermark"; + public static final int HDDS_REST_NETTY_HIGH_WATERMARK_DEFAULT = 65536; + public static final int HDDS_REST_NETTY_LOW_WATERMARK_DEFAULT = 32768; + public static final String HDDS_REST_NETTY_LOW_WATERMARK = + "hdds.rest.netty.low.watermark"; public static final String OZONE_SCM_HANDLER_COUNT_KEY = "ozone.scm.handler.count.key"; diff --git a/hadoop-hdsl/common/src/main/java/org/apache/hadoop/scm/ScmInfo.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/ScmInfo.java index e442fe25d8..6236febb7b 100644 --- a/hadoop-hdsl/common/src/main/java/org/apache/hadoop/scm/ScmInfo.java +++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/ScmInfo.java @@ -16,7 +16,7 @@ * limitations under the License. */ -package org.apache.hadoop.scm; +package org.apache.hadoop.hdds.scm; /** * ScmInfo wraps the result returned from SCM#getScmInfo which diff --git a/hadoop-hdsl/common/src/main/java/org/apache/hadoop/scm/XceiverClientSpi.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/XceiverClientSpi.java index 49817d32b9..c96f79b2d5 100644 --- a/hadoop-hdsl/common/src/main/java/org/apache/hadoop/scm/XceiverClientSpi.java +++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/XceiverClientSpi.java @@ -16,16 +16,16 @@ * limitations under the License. */ -package org.apache.hadoop.scm; +package org.apache.hadoop.hdds.scm; import com.google.common.annotations.VisibleForTesting; -import org.apache.hadoop.hdsl.protocol.DatanodeDetails; -import org.apache.hadoop.hdsl.protocol.proto.ContainerProtos +import org.apache.hadoop.hdds.scm.container.common.helpers.Pipeline; +import org.apache.hadoop.hdds.protocol.DatanodeDetails; +import org.apache.hadoop.hdds.protocol.proto.ContainerProtos .ContainerCommandRequestProto; -import org.apache.hadoop.hdsl.protocol.proto.ContainerProtos +import org.apache.hadoop.hdds.protocol.proto.ContainerProtos .ContainerCommandResponseProto; -import org.apache.hadoop.hdsl.protocol.proto.HdslProtos; -import org.apache.hadoop.scm.container.common.helpers.Pipeline; +import org.apache.hadoop.hdds.protocol.proto.HddsProtos; import java.io.Closeable; import java.io.IOException; @@ -125,5 +125,5 @@ public abstract class XceiverClientSpi implements Closeable { * * @return - {Stand_Alone, Ratis or Chained} */ - public abstract HdslProtos.ReplicationType getPipelineType(); + public abstract HddsProtos.ReplicationType getPipelineType(); } diff --git a/hadoop-hdsl/common/src/main/java/org/apache/hadoop/scm/client/ScmClient.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/client/ScmClient.java index 79c60290a4..0d4a2990b6 100644 --- a/hadoop-hdsl/common/src/main/java/org/apache/hadoop/scm/client/ScmClient.java +++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/client/ScmClient.java @@ -15,13 +15,13 @@ * See the License for the specific language governing permissions and * limitations under the License. */ -package org.apache.hadoop.scm.client; +package org.apache.hadoop.hdds.scm.client; import org.apache.hadoop.classification.InterfaceStability; -import org.apache.hadoop.hdsl.protocol.proto.ContainerProtos.ContainerData; -import org.apache.hadoop.hdsl.protocol.proto.HdslProtos; -import org.apache.hadoop.scm.container.common.helpers.ContainerInfo; -import org.apache.hadoop.scm.container.common.helpers.Pipeline; +import org.apache.hadoop.hdds.scm.container.common.helpers.ContainerInfo; +import org.apache.hadoop.hdds.scm.container.common.helpers.Pipeline; +import org.apache.hadoop.hdds.protocol.proto.ContainerProtos.ContainerData; +import org.apache.hadoop.hdds.protocol.proto.HddsProtos; import java.io.IOException; import java.util.EnumSet; @@ -111,8 +111,8 @@ public interface ScmClient { * @return Pipeline * @throws IOException - in case of error. */ - Pipeline createContainer(HdslProtos.ReplicationType type, - HdslProtos.ReplicationFactor replicationFactor, String containerId, + Pipeline createContainer(HddsProtos.ReplicationType type, + HddsProtos.ReplicationFactor replicationFactor, String containerId, String owner) throws IOException; /** @@ -123,8 +123,8 @@ public interface ScmClient { * @return A set of nodes that meet the requested criteria. * @throws IOException */ - HdslProtos.NodePool queryNode(EnumSet<HdslProtos.NodeState> nodeStatuses, - HdslProtos.QueryScope queryScope, String poolName) throws IOException; + HddsProtos.NodePool queryNode(EnumSet<HddsProtos.NodeState> nodeStatuses, + HddsProtos.QueryScope queryScope, String poolName) throws IOException; /** * Creates a specified replication pipeline. @@ -133,7 +133,7 @@ public interface ScmClient { * @param nodePool - Set of machines. * @throws IOException */ - Pipeline createReplicationPipeline(HdslProtos.ReplicationType type, - HdslProtos.ReplicationFactor factor, HdslProtos.NodePool nodePool) + Pipeline createReplicationPipeline(HddsProtos.ReplicationType type, + HddsProtos.ReplicationFactor factor, HddsProtos.NodePool nodePool) throws IOException; } diff --git a/hadoop-hdsl/common/src/main/java/org/apache/hadoop/scm/protocolPB/package-info.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/client/package-info.java index f9a2c090f2..e2f7033d7f 100644 --- a/hadoop-hdsl/common/src/main/java/org/apache/hadoop/scm/protocolPB/package-info.java +++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/client/package-info.java @@ -16,7 +16,7 @@ * limitations under the License. */ -package org.apache.hadoop.scm.protocolPB; +package org.apache.hadoop.hdds.scm.client; /** * This package contains classes for the client of the storage container diff --git a/hadoop-hdsl/common/src/main/java/org/apache/hadoop/scm/container/ContainerStates/ContainerID.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/container/ContainerID.java index a51d3b7352..9520c8c46f 100644 --- a/hadoop-hdsl/common/src/main/java/org/apache/hadoop/scm/container/ContainerStates/ContainerID.java +++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/container/ContainerID.java @@ -16,7 +16,7 @@ * */ -package org.apache.hadoop.ozone.scm.container.ContainerStates; +package org.apache.hadoop.hdds.scm.container; import com.google.common.base.Preconditions; import org.apache.commons.math3.util.MathUtils; diff --git a/hadoop-hdsl/common/src/main/java/org/apache/hadoop/scm/container/common/helpers/AllocatedBlock.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/container/common/helpers/AllocatedBlock.java index f51336faf9..d253b15cd2 100644 --- a/hadoop-hdsl/common/src/main/java/org/apache/hadoop/scm/container/common/helpers/AllocatedBlock.java +++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/container/common/helpers/AllocatedBlock.java @@ -16,7 +16,7 @@ * limitations under the License. */ -package org.apache.hadoop.scm.container.common.helpers; +package org.apache.hadoop.hdds.scm.container.common.helpers; /** * Allocated block wraps the result returned from SCM#allocateBlock which diff --git a/hadoop-hdsl/common/src/main/java/org/apache/hadoop/scm/container/common/helpers/ContainerInfo.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/container/common/helpers/ContainerInfo.java index 8e6fdbbdd5..823a7fbc05 100644 --- a/hadoop-hdsl/common/src/main/java/org/apache/hadoop/scm/container/common/helpers/ContainerInfo.java +++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/container/common/helpers/ContainerInfo.java @@ -16,13 +16,13 @@ * limitations under the License. */ -package org.apache.hadoop.scm.container.common.helpers; +package org.apache.hadoop.hdds.scm.container.common.helpers; import com.google.common.base.Preconditions; import org.apache.commons.lang3.builder.EqualsBuilder; import org.apache.commons.lang3.builder.HashCodeBuilder; -import org.apache.hadoop.hdsl.protocol.proto.HdslProtos; -import org.apache.hadoop.ozone.scm.container.ContainerStates.ContainerID; +import org.apache.hadoop.hdds.scm.container.ContainerID; +import org.apache.hadoop.hdds.protocol.proto.HddsProtos; import org.apache.hadoop.util.Time; import java.util.Comparator; @@ -32,7 +32,7 @@ import java.util.Comparator; */ public class ContainerInfo implements Comparator<ContainerInfo>, Comparable<ContainerInfo> { - private HdslProtos.LifeCycleState state; + private HddsProtos.LifeCycleState state; private Pipeline pipeline; // Bytes allocated by SCM for clients. private long allocatedBytes; @@ -48,7 +48,7 @@ public class ContainerInfo ContainerInfo( long containerID, final String containerName, - HdslProtos.LifeCycleState state, + HddsProtos.LifeCycleState state, Pipeline pipeline, long allocatedBytes, long usedBytes, @@ -73,7 +73,7 @@ public class ContainerInfo public ContainerInfo() { } - public static ContainerInfo fromProtobuf(HdslProtos.SCMContainerInfo info) { + public static ContainerInfo fromProtobuf(HddsProtos.SCMContainerInfo info) { ContainerInfo.Builder builder = new ContainerInfo.Builder(); builder.setPipeline(Pipeline.getFromProtoBuf(info.getPipeline())); builder.setAllocatedBytes(info.getAllocatedBytes()); @@ -95,11 +95,11 @@ public class ContainerInfo return containerName; } - public HdslProtos.LifeCycleState getState() { + public HddsProtos.LifeCycleState getState() { return state; } - public void setState(HdslProtos.LifeCycleState state) { + public void setState(HddsProtos.LifeCycleState state) { this.state = state; } @@ -156,9 +156,9 @@ public class ContainerInfo allocatedBytes += size; } - public HdslProtos.SCMContainerInfo getProtobuf() { - HdslProtos.SCMContainerInfo.Builder builder = - HdslProtos.SCMContainerInfo.newBuilder(); + public HddsProtos.SCMContainerInfo getProtobuf() { + HddsProtos.SCMContainerInfo.Builder builder = + HddsProtos.SCMContainerInfo.newBuilder(); builder.setPipeline(getPipeline().getProtobufMessage()); builder.setAllocatedBytes(getAllocatedBytes()); builder.setUsedBytes(getUsedBytes()); @@ -268,7 +268,7 @@ public class ContainerInfo * Builder class for ContainerInfo. */ public static class Builder { - private HdslProtos.LifeCycleState state; + private HddsProtos.LifeCycleState state; private Pipeline pipeline; private long allocated; private long used; @@ -284,7 +284,7 @@ public class ContainerInfo return this; } - public Builder setState(HdslProtos.LifeCycleState lifeCycleState) { + public Builder setState(HddsProtos.LifeCycleState lifeCycleState) { this.state = lifeCycleState; return this; } diff --git a/hadoop-hdsl/common/src/main/java/org/apache/hadoop/scm/container/common/helpers/DeleteBlockResult.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/container/common/helpers/DeleteBlockResult.java index dceaa6cd7b..fd97eae3b7 100644 --- a/hadoop-hdsl/common/src/main/java/org/apache/hadoop/scm/container/common/helpers/DeleteBlockResult.java +++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/container/common/helpers/DeleteBlockResult.java @@ -15,10 +15,9 @@ * the License. */ -package org.apache.hadoop.scm.container.common.helpers; +package org.apache.hadoop.hdds.scm.container.common.helpers; - -import static org.apache.hadoop.hdsl.protocol.proto +import static org.apache.hadoop.hdds.protocol.proto .ScmBlockLocationProtocolProtos.DeleteScmBlockResult; /** diff --git a/hadoop-hdsl/common/src/main/java/org/apache/hadoop/scm/container/common/helpers/Pipeline.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/container/common/helpers/Pipeline.java index 9f2d1f4ef9..32d0a2d85a 100644 --- a/hadoop-hdsl/common/src/main/java/org/apache/hadoop/scm/container/common/helpers/Pipeline.java +++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/container/common/helpers/Pipeline.java @@ -16,7 +16,7 @@ * limitations under the License. */ -package org.apache.hadoop.scm.container.common.helpers; +package org.apache.hadoop.hdds.scm.container.common.helpers; import com.fasterxml.jackson.annotation.JsonAutoDetect; import com.fasterxml.jackson.annotation.JsonFilter; @@ -29,8 +29,8 @@ import com.fasterxml.jackson.databind.ser.impl.SimpleBeanPropertyFilter; import com.fasterxml.jackson.databind.ser.impl.SimpleFilterProvider; import com.google.common.annotations.VisibleForTesting; import com.google.common.base.Preconditions; -import org.apache.hadoop.hdsl.protocol.DatanodeDetails; -import org.apache.hadoop.hdsl.protocol.proto.HdslProtos; +import org.apache.hadoop.hdds.protocol.DatanodeDetails; +import org.apache.hadoop.hdds.protocol.proto.HddsProtos; import java.io.IOException; import java.util.ArrayList; @@ -83,14 +83,14 @@ public class Pipeline { * @param pipeline - ProtoBuf definition for the pipeline. * @return Pipeline Object */ - public static Pipeline getFromProtoBuf(HdslProtos.Pipeline pipeline) { + public static Pipeline getFromProtoBuf(HddsProtos.Pipeline pipeline) { Preconditions.checkNotNull(pipeline); PipelineChannel pipelineChannel = PipelineChannel.getFromProtoBuf(pipeline.getPipelineChannel()); return new Pipeline(pipeline.getContainerName(), pipelineChannel); } - public HdslProtos.ReplicationFactor getFactor() { + public HddsProtos.ReplicationFactor getFactor() { return pipelineChannel.getFactor(); } @@ -143,9 +143,9 @@ public class Pipeline { * @return Protobuf message */ @JsonIgnore - public HdslProtos.Pipeline getProtobufMessage() { - HdslProtos.Pipeline.Builder builder = - HdslProtos.Pipeline.newBuilder(); + public HddsProtos.Pipeline getProtobufMessage() { + HddsProtos.Pipeline.Builder builder = + HddsProtos.Pipeline.newBuilder(); builder.setContainerName(this.containerName); builder.setPipelineChannel(this.pipelineChannel.getProtobufMessage()); return builder.build(); @@ -194,7 +194,7 @@ public class Pipeline { * * @return - LifeCycleStates. */ - public HdslProtos.LifeCycleState getLifeCycleState() { + public HddsProtos.LifeCycleState getLifeCycleState() { return pipelineChannel.getLifeCycleState(); } @@ -212,7 +212,7 @@ public class Pipeline { * * @return type - Standalone, Ratis, Chained. */ - public HdslProtos.ReplicationType getType() { + public HddsProtos.ReplicationType getType() { return pipelineChannel.getType(); } diff --git a/hadoop-hdsl/common/src/main/java/org/apache/hadoop/scm/container/common/helpers/PipelineChannel.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/container/common/helpers/PipelineChannel.java index 1937968b66..ebd52e9984 100644 --- a/hadoop-hdsl/common/src/main/java/org/apache/hadoop/scm/container/common/helpers/PipelineChannel.java +++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/container/common/helpers/PipelineChannel.java @@ -15,15 +15,15 @@ * See the License for the specific language governing permissions and * limitations under the License. */ -package org.apache.hadoop.scm.container.common.helpers; +package org.apache.hadoop.hdds.scm.container.common.helpers; import com.fasterxml.jackson.annotation.JsonIgnore; import com.google.common.base.Preconditions; -import org.apache.hadoop.hdsl.protocol.DatanodeDetails; -import org.apache.hadoop.hdsl.protocol.proto.HdslProtos; -import org.apache.hadoop.hdsl.protocol.proto.HdslProtos.LifeCycleState; -import org.apache.hadoop.hdsl.protocol.proto.HdslProtos.ReplicationType; -import org.apache.hadoop.hdsl.protocol.proto.HdslProtos.ReplicationFactor; +import org.apache.hadoop.hdds.protocol.DatanodeDetails; +import org.apache.hadoop.hdds.protocol.proto.HddsProtos; +import org.apache.hadoop.hdds.protocol.proto.HddsProtos.LifeCycleState; +import org.apache.hadoop.hdds.protocol.proto.HddsProtos.ReplicationFactor; +import org.apache.hadoop.hdds.protocol.proto.HddsProtos.ReplicationType; import java.util.Map; import java.util.TreeMap; @@ -82,9 +82,9 @@ public class PipelineChannel { } @JsonIgnore - public HdslProtos.PipelineChannel getProtobufMessage() { - HdslProtos.PipelineChannel.Builder builder = - HdslProtos.PipelineChannel.newBuilder(); + public HddsProtos.PipelineChannel getProtobufMessage() { + HddsProtos.PipelineChannel.Builder builder = + HddsProtos.PipelineChannel.newBuilder(); for (DatanodeDetails datanode : datanodes.values()) { builder.addMembers(datanode.getProtoBufMessage()); } @@ -104,7 +104,7 @@ public class PipelineChannel { } public static PipelineChannel getFromProtoBuf( - HdslProtos.PipelineChannel transportProtos) { + HddsProtos.PipelineChannel transportProtos) { Preconditions.checkNotNull(transportProtos); PipelineChannel pipelineChannel = new PipelineChannel(transportProtos.getLeaderID(), @@ -113,7 +113,7 @@ public class PipelineChannel { transportProtos.getFactor(), transportProtos.getName()); - for (HdslProtos.DatanodeDetailsProto dataID : + for (HddsProtos.DatanodeDetailsProto dataID : transportProtos.getMembersList()) { pipelineChannel.addMember(DatanodeDetails.getFromProtoBuf(dataID)); } diff --git a/hadoop-hdsl/common/src/main/java/org/apache/hadoop/scm/container/common/helpers/StorageContainerException.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/container/common/helpers/StorageContainerException.java index e285aec32c..35d8444b9d 100644 --- a/hadoop-hdsl/common/src/main/java/org/apache/hadoop/scm/container/common/helpers/StorageContainerException.java +++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/container/common/helpers/StorageContainerException.java @@ -15,9 +15,9 @@ * See the License for the specific language governing permissions and * limitations under the License. */ -package org.apache.hadoop.scm.container.common.helpers; +package org.apache.hadoop.hdds.scm.container.common.helpers; -import org.apache.hadoop.hdsl.protocol.proto.ContainerProtos; +import org.apache.hadoop.hdds.protocol.proto.ContainerProtos; import java.io.IOException; diff --git a/hadoop-hdsl/common/src/main/java/org/apache/hadoop/scm/container/common/helpers/package-info.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/container/common/helpers/package-info.java index 3fa966379b..ffe0d3d4d9 100644 --- a/hadoop-hdsl/common/src/main/java/org/apache/hadoop/scm/container/common/helpers/package-info.java +++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/container/common/helpers/package-info.java @@ -15,7 +15,7 @@ * See the License for the specific language governing permissions and * limitations under the License. */ -package org.apache.hadoop.scm.container.common.helpers; +package org.apache.hadoop.hdds.scm.container.common.helpers; /** Contains protocol buffer helper classes and utilites used in impl. diff --git a/hadoop-hdsl/common/src/main/java/org/apache/hadoop/scm/package-info.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/package-info.java index ad24f9877a..3c544db3ab 100644 --- a/hadoop-hdsl/common/src/main/java/org/apache/hadoop/scm/package-info.java +++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/package-info.java @@ -16,7 +16,7 @@ * limitations under the License. */ -package org.apache.hadoop.scm; +package org.apache.hadoop.hdds.scm; /** * This package contains classes for the client of the storage container diff --git a/hadoop-hdsl/common/src/main/java/org/apache/hadoop/scm/protocol/LocatedContainer.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/protocol/LocatedContainer.java index 469dab5188..14ee3d2f80 100644 --- a/hadoop-hdsl/common/src/main/java/org/apache/hadoop/scm/protocol/LocatedContainer.java +++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/protocol/LocatedContainer.java @@ -16,7 +16,7 @@ * limitations under the License. */ -package org.apache.hadoop.scm.protocol; +package org.apache.hadoop.hdds.scm.protocol; import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.hdfs.protocol.DatanodeInfo; diff --git a/hadoop-hdsl/common/src/main/java/org/apache/hadoop/scm/protocol/ScmBlockLocationProtocol.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/protocol/ScmBlockLocationProtocol.java index 0bb84acc05..f100fc702c 100644 --- a/hadoop-hdsl/common/src/main/java/org/apache/hadoop/scm/protocol/ScmBlockLocationProtocol.java +++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/protocol/ScmBlockLocationProtocol.java @@ -15,19 +15,19 @@ * See the License for the specific language governing permissions and * limitations under the License. */ -package org.apache.hadoop.scm.protocol; +package org.apache.hadoop.hdds.scm.protocol; + +import org.apache.hadoop.hdds.scm.ScmInfo; +import org.apache.hadoop.hdds.scm.container.common.helpers.AllocatedBlock; +import org.apache.hadoop.hdds.protocol.proto.HddsProtos.ReplicationFactor; +import org.apache.hadoop.hdds.protocol.proto.HddsProtos.ReplicationType; +import org.apache.hadoop.ozone.common.BlockGroup; +import org.apache.hadoop.ozone.common.DeleteBlockGroupResult; import java.io.IOException; import java.util.List; import java.util.Set; -import org.apache.hadoop.ozone.common.DeleteBlockGroupResult; -import org.apache.hadoop.ozone.common.BlockGroup; -import org.apache.hadoop.scm.container.common.helpers.AllocatedBlock; -import org.apache.hadoop.hdsl.protocol.proto.HdslProtos.ReplicationType; -import org.apache.hadoop.hdsl.protocol.proto.HdslProtos.ReplicationFactor; -import org.apache.hadoop.scm.ScmInfo; - /** * ScmBlockLocationProtocol is used by an HDFS node to find the set of nodes * to read/write a block. diff --git a/hadoop-hdsl/common/src/main/java/org/apache/hadoop/scm/protocol/ScmLocatedBlock.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/protocol/ScmLocatedBlock.java index 4e4b3d669e..6cbdee4238 100644 --- a/hadoop-hdsl/common/src/main/java/org/apache/hadoop/scm/protocol/ScmLocatedBlock.java +++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/protocol/ScmLocatedBlock.java @@ -16,14 +16,14 @@ * limitations under the License. */ -package org.apache.hadoop.scm.protocol; - -import java.util.List; -import java.util.stream.Collectors; +package org.apache.hadoop.hdds.scm.protocol; import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.hdfs.protocol.DatanodeInfo; +import java.util.List; +import java.util.stream.Collectors; + /** * Holds the nodes that currently host the block for a block key. */ diff --git a/hadoop-hdsl/common/src/main/java/org/apache/hadoop/scm/protocol/StorageContainerLocationProtocol.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/protocol/StorageContainerLocationProtocol.java index 01fc07552b..a60fbb2f22 100644 --- a/hadoop-hdsl/common/src/main/java/org/apache/hadoop/scm/protocol/StorageContainerLocationProtocol.java +++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/protocol/StorageContainerLocationProtocol.java @@ -15,18 +15,19 @@ * the License. */ -package org.apache.hadoop.scm.protocol; +package org.apache.hadoop.hdds.scm.protocol; + +import org.apache.hadoop.hdds.scm.ScmInfo; +import org.apache.hadoop.hdds.scm.container.common.helpers.ContainerInfo; +import org.apache.hadoop.hdds.scm.container.common.helpers.Pipeline; +import org.apache.hadoop.hdds.protocol.proto.HddsProtos; +import org.apache.hadoop.hdds.protocol.proto + .StorageContainerLocationProtocolProtos.ObjectStageChangeRequestProto; import java.io.IOException; import java.util.EnumSet; import java.util.List; -import org.apache.hadoop.hdsl.protocol.proto.StorageContainerLocationProtocolProtos.ObjectStageChangeRequestProto; -import org.apache.hadoop.scm.ScmInfo; -import org.apache.hadoop.scm.container.common.helpers.ContainerInfo; -import org.apache.hadoop.scm.container.common.helpers.Pipeline; -import org.apache.hadoop.hdsl.protocol.proto.HdslProtos; - /** * ContainerLocationProtocol is used by an HDFS node to find the set of nodes * that currently host a container. @@ -37,8 +38,8 @@ public interface StorageContainerLocationProtocol { * set of datanodes that should be used creating this container. * */ - Pipeline allocateContainer(HdslProtos.ReplicationType replicationType, - HdslProtos.ReplicationFactor factor, String containerName, String owner) + Pipeline allocateContainer(HddsProtos.ReplicationType replicationType, + HddsProtos.ReplicationFactor factor, String containerName, String owner) throws IOException; /** @@ -85,8 +86,8 @@ public interface StorageContainerLocationProtocol { * @param nodeStatuses * @return List of Datanodes. */ - HdslProtos.NodePool queryNode(EnumSet<HdslProtos.NodeState> nodeStatuses, - HdslProtos.QueryScope queryScope, String poolName) throws IOException; + HddsProtos.NodePool queryNode(EnumSet<HddsProtos.NodeState> nodeStatuses, + HddsProtos.QueryScope queryScope, String poolName) throws IOException; /** * Notify from client when begin or finish creating objects like pipeline @@ -109,8 +110,8 @@ public interface StorageContainerLocationProtocol { * @param nodePool - optional machine list to build a pipeline. * @throws IOException */ - Pipeline createReplicationPipeline(HdslProtos.ReplicationType type, - HdslProtos.ReplicationFactor factor, HdslProtos.NodePool nodePool) + Pipeline createReplicationPipeline(HddsProtos.ReplicationType type, + HddsProtos.ReplicationFactor factor, HddsProtos.NodePool nodePool) throws IOException; /** diff --git a/hadoop-hdsl/tools/src/main/java/org/apache/hadoop/ozone/scm/cli/package-info.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/protocol/package-info.java index d1f97755bc..b56a749453 100644 --- a/hadoop-hdsl/tools/src/main/java/org/apache/hadoop/ozone/scm/cli/package-info.java +++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/protocol/package-info.java @@ -16,4 +16,4 @@ * limitations under the License. */ -package org.apache.hadoop.ozone.scm.cli;
\ No newline at end of file +package org.apache.hadoop.hdds.scm.protocol; diff --git a/hadoop-hdsl/common/src/main/java/org/apache/hadoop/scm/protocolPB/ScmBlockLocationProtocolClientSideTranslatorPB.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/protocolPB/ScmBlockLocationProtocolClientSideTranslatorPB.java index 0de759ff23..0012f3e4a8 100644 --- a/hadoop-hdsl/common/src/main/java/org/apache/hadoop/scm/protocolPB/ScmBlockLocationProtocolClientSideTranslatorPB.java +++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/protocolPB/ScmBlockLocationProtocolClientSideTranslatorPB.java @@ -14,31 +14,39 @@ * License for the specific language governing permissions and limitations under * the License. */ -package org.apache.hadoop.scm.protocolPB; +package org.apache.hadoop.hdds.scm.protocolPB; import com.google.common.base.Preconditions; import com.google.common.collect.Sets; import com.google.protobuf.RpcController; import com.google.protobuf.ServiceException; import org.apache.hadoop.classification.InterfaceAudience; +import org.apache.hadoop.hdds.scm.ScmInfo; +import org.apache.hadoop.hdds.scm.container.common.helpers.AllocatedBlock; +import org.apache.hadoop.hdds.scm.container.common.helpers.Pipeline; +import org.apache.hadoop.hdds.scm.protocol.ScmBlockLocationProtocol; +import org.apache.hadoop.hdds.protocol.proto.HddsProtos; +import org.apache.hadoop.hdds.protocol.proto.ScmBlockLocationProtocolProtos + .AllocateScmBlockRequestProto; +import org.apache.hadoop.hdds.protocol.proto.ScmBlockLocationProtocolProtos + .AllocateScmBlockResponseProto; +import org.apache.hadoop.hdds.protocol.proto.ScmBlockLocationProtocolProtos + .DeleteScmKeyBlocksRequestProto; +import org.apache.hadoop.hdds.protocol.proto.ScmBlockLocationProtocolProtos + .DeleteScmKeyBlocksResponseProto; +import org.apache.hadoop.hdds.protocol.proto.ScmBlockLocationProtocolProtos + .GetScmBlockLocationsRequestProto; +import org.apache.hadoop.hdds.protocol.proto.ScmBlockLocationProtocolProtos + .GetScmBlockLocationsResponseProto; +import org.apache.hadoop.hdds.protocol.proto.ScmBlockLocationProtocolProtos + .KeyBlocks; +import org.apache.hadoop.hdds.protocol.proto.ScmBlockLocationProtocolProtos + .ScmLocatedBlockProto; import org.apache.hadoop.ipc.ProtobufHelper; import org.apache.hadoop.ipc.ProtocolTranslator; import org.apache.hadoop.ipc.RPC; -import org.apache.hadoop.ozone.common.DeleteBlockGroupResult; import org.apache.hadoop.ozone.common.BlockGroup; -import org.apache.hadoop.hdsl.protocol.proto.HdslProtos; -import org.apache.hadoop.hdsl.protocol.proto.ScmBlockLocationProtocolProtos.DeleteScmKeyBlocksRequestProto; -import org.apache.hadoop.hdsl.protocol.proto.ScmBlockLocationProtocolProtos.AllocateScmBlockRequestProto; -import org.apache.hadoop.hdsl.protocol.proto.ScmBlockLocationProtocolProtos.AllocateScmBlockResponseProto; -import org.apache.hadoop.hdsl.protocol.proto.ScmBlockLocationProtocolProtos.DeleteScmKeyBlocksResponseProto; -import org.apache.hadoop.hdsl.protocol.proto.ScmBlockLocationProtocolProtos.GetScmBlockLocationsRequestProto; -import org.apache.hadoop.hdsl.protocol.proto.ScmBlockLocationProtocolProtos.GetScmBlockLocationsResponseProto; -import org.apache.hadoop.hdsl.protocol.proto.ScmBlockLocationProtocolProtos.ScmLocatedBlockProto; -import org.apache.hadoop.hdsl.protocol.proto.ScmBlockLocationProtocolProtos.KeyBlocks; -import org.apache.hadoop.scm.container.common.helpers.AllocatedBlock; -import org.apache.hadoop.scm.ScmInfo; -import org.apache.hadoop.scm.container.common.helpers.Pipeline; -import org.apache.hadoop.scm.protocol.ScmBlockLocationProtocol; +import org.apache.hadoop.ozone.common.DeleteBlockGroupResult; import java.io.Closeable; import java.io.IOException; @@ -117,7 +125,7 @@ public final class ScmBlockLocationProtocolClientSideTranslatorPB */ @Override public AllocatedBlock allocateBlock(long size, - HdslProtos.ReplicationType type, HdslProtos.ReplicationFactor factor, + HddsProtos.ReplicationType type, HddsProtos.ReplicationFactor factor, String owner) throws IOException { Preconditions.checkArgument(size > 0, "block size must be greater than 0"); @@ -181,9 +189,9 @@ public final class ScmBlockLocationProtocolClientSideTranslatorPB */ @Override public ScmInfo getScmInfo() throws IOException { - HdslProtos.GetScmInfoRequestProto request = - HdslProtos.GetScmInfoRequestProto.getDefaultInstance(); - HdslProtos.GetScmInfoRespsonseProto resp; + HddsProtos.GetScmInfoRequestProto request = + HddsProtos.GetScmInfoRequestProto.getDefaultInstance(); + HddsProtos.GetScmInfoRespsonseProto resp; try { resp = rpcProxy.getScmInfo(NULL_RPC_CONTROLLER, request); } catch (ServiceException e) { diff --git a/hadoop-hdsl/common/src/main/java/org/apache/hadoop/scm/protocolPB/ScmBlockLocationProtocolPB.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/protocolPB/ScmBlockLocationProtocolPB.java index 019aeeb192..837c95b2aa 100644 --- a/hadoop-hdsl/common/src/main/java/org/apache/hadoop/scm/protocolPB/ScmBlockLocationProtocolPB.java +++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/protocolPB/ScmBlockLocationProtocolPB.java @@ -15,12 +15,12 @@ * See the License for the specific language governing permissions and * limitations under the License. */ -package org.apache.hadoop.scm.protocolPB; +package org.apache.hadoop.hdds.scm.protocolPB; import org.apache.hadoop.classification.InterfaceAudience; -import org.apache.hadoop.ipc.ProtocolInfo; -import org.apache.hadoop.hdsl.protocol.proto.ScmBlockLocationProtocolProtos +import org.apache.hadoop.hdds.protocol.proto.ScmBlockLocationProtocolProtos .ScmBlockLocationProtocolService; +import org.apache.hadoop.ipc.ProtocolInfo; /** * Protocol used from an HDFS node to StorageContainerManager. This extends the diff --git a/hadoop-hdsl/common/src/main/java/org/apache/hadoop/scm/protocolPB/StorageContainerLocationProtocolClientSideTranslatorPB.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/protocolPB/StorageContainerLocationProtocolClientSideTranslatorPB.java index 348b266e5c..3638f63e65 100644 --- a/hadoop-hdsl/common/src/main/java/org/apache/hadoop/scm/protocolPB/StorageContainerLocationProtocolClientSideTranslatorPB.java +++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/protocolPB/StorageContainerLocationProtocolClientSideTranslatorPB.java @@ -14,33 +14,45 @@ * License for the specific language governing permissions and limitations under * the License. */ -package org.apache.hadoop.scm.protocolPB; +package org.apache.hadoop.hdds.scm.protocolPB; import com.google.common.base.Preconditions; import com.google.common.base.Strings; import com.google.protobuf.RpcController; import com.google.protobuf.ServiceException; import org.apache.hadoop.classification.InterfaceAudience; +import org.apache.hadoop.hdds.scm.ScmInfo; +import org.apache.hadoop.hdds.scm.container.common.helpers.ContainerInfo; +import org.apache.hadoop.hdds.scm.container.common.helpers.Pipeline; +import org.apache.hadoop.hdds.scm.protocol.StorageContainerLocationProtocol; +import org.apache.hadoop.hdds.protocol.proto.HddsProtos; +import org.apache.hadoop.hdds.protocol.proto + .StorageContainerLocationProtocolProtos.ContainerRequestProto; +import org.apache.hadoop.hdds.protocol.proto + .StorageContainerLocationProtocolProtos.ContainerResponseProto; +import org.apache.hadoop.hdds.protocol.proto + .StorageContainerLocationProtocolProtos.GetContainerRequestProto; +import org.apache.hadoop.hdds.protocol.proto + .StorageContainerLocationProtocolProtos.GetContainerResponseProto; +import org.apache.hadoop.hdds.protocol.proto + .StorageContainerLocationProtocolProtos.NodeQueryRequestProto; +import org.apache.hadoop.hdds.protocol.proto + .StorageContainerLocationProtocolProtos.NodeQueryResponseProto; +import org.apache.hadoop.hdds.protocol.proto + .StorageContainerLocationProtocolProtos.ObjectStageChangeRequestProto; +import org.apache.hadoop.hdds.protocol.proto + .StorageContainerLocationProtocolProtos.PipelineRequestProto; +import org.apache.hadoop.hdds.protocol.proto + .StorageContainerLocationProtocolProtos.PipelineResponseProto; +import org.apache.hadoop.hdds.protocol.proto + .StorageContainerLocationProtocolProtos.SCMDeleteContainerRequestProto; +import org.apache.hadoop.hdds.protocol.proto + .StorageContainerLocationProtocolProtos.SCMListContainerRequestProto; +import org.apache.hadoop.hdds.protocol.proto + .StorageContainerLocationProtocolProtos.SCMListContainerResponseProto; import org.apache.hadoop.ipc.ProtobufHelper; import org.apache.hadoop.ipc.ProtocolTranslator; import org.apache.hadoop.ipc.RPC; -import org.apache.hadoop.hdsl.protocol.proto.HdslProtos; -import org.apache.hadoop.scm.ScmInfo; -import org.apache.hadoop.scm.container.common.helpers.ContainerInfo; -import org.apache.hadoop.scm.protocol.StorageContainerLocationProtocol; -import org.apache.hadoop.hdsl.protocol.proto.StorageContainerLocationProtocolProtos.ContainerRequestProto; -import org.apache.hadoop.hdsl.protocol.proto.StorageContainerLocationProtocolProtos.ContainerResponseProto; -import org.apache.hadoop.hdsl.protocol.proto.StorageContainerLocationProtocolProtos.GetContainerRequestProto; -import org.apache.hadoop.hdsl.protocol.proto.StorageContainerLocationProtocolProtos.GetContainerResponseProto; -import org.apache.hadoop.hdsl.protocol.proto.StorageContainerLocationProtocolProtos.SCMDeleteContainerRequestProto; -import org.apache.hadoop.hdsl.protocol.proto.StorageContainerLocationProtocolProtos.SCMListContainerRequestProto; -import org.apache.hadoop.hdsl.protocol.proto.StorageContainerLocationProtocolProtos.SCMListContainerResponseProto; -import org.apache.hadoop.hdsl.protocol.proto.StorageContainerLocationProtocolProtos.NodeQueryRequestProto; -import org.apache.hadoop.hdsl.protocol.proto.StorageContainerLocationProtocolProtos.NodeQueryResponseProto; -import org.apache.hadoop.hdsl.protocol.proto.StorageContainerLocationProtocolProtos.ObjectStageChangeRequestProto; -import org.apache.hadoop.hdsl.protocol.proto.StorageContainerLocationProtocolProtos.PipelineRequestProto; -import org.apache.hadoop.hdsl.protocol.proto.StorageContainerLocationProtocolProtos.PipelineResponseProto; -import org.apache.hadoop.scm.container.common.helpers.Pipeline; import java.io.Closeable; import java.io.IOException; @@ -85,8 +97,8 @@ public final class StorageContainerLocationProtocolClientSideTranslatorPB * @throws IOException */ @Override - public Pipeline allocateContainer(HdslProtos.ReplicationType type, - HdslProtos.ReplicationFactor factor, String + public Pipeline allocateContainer(HddsProtos.ReplicationType type, + HddsProtos.ReplicationFactor factor, String containerName, String owner) throws IOException { Preconditions.checkNotNull(containerName, "Container Name cannot be Null"); @@ -151,7 +163,7 @@ public final class StorageContainerLocationProtocolClientSideTranslatorPB SCMListContainerResponseProto response = rpcProxy.listContainer(NULL_RPC_CONTROLLER, request); List<ContainerInfo> containerList = new ArrayList<>(); - for (HdslProtos.SCMContainerInfo containerInfoProto : response + for (HddsProtos.SCMContainerInfo containerInfoProto : response .getContainersList()) { containerList.add(ContainerInfo.fromProtobuf(containerInfoProto)); } @@ -191,8 +203,8 @@ public final class StorageContainerLocationProtocolClientSideTranslatorPB * @return List of Datanodes. */ @Override - public HdslProtos.NodePool queryNode(EnumSet<HdslProtos.NodeState> - nodeStatuses, HdslProtos.QueryScope queryScope, String poolName) + public HddsProtos.NodePool queryNode(EnumSet<HddsProtos.NodeState> + nodeStatuses, HddsProtos.QueryScope queryScope, String poolName) throws IOException { // TODO : We support only cluster wide query right now. So ignoring checking // queryScope and poolName @@ -248,8 +260,8 @@ public final class StorageContainerLocationProtocolClientSideTranslatorPB * @throws IOException */ @Override - public Pipeline createReplicationPipeline(HdslProtos.ReplicationType - replicationType, HdslProtos.ReplicationFactor factor, HdslProtos + public Pipeline createReplicationPipeline(HddsProtos.ReplicationType + replicationType, HddsProtos.ReplicationFactor factor, HddsProtos .NodePool nodePool) throws IOException { PipelineRequestProto request = PipelineRequestProto.newBuilder() .setNodePool(nodePool) @@ -277,10 +289,10 @@ public final class StorageContainerLocationProtocolClientSideTranslatorPB @Override public ScmInfo getScmInfo() throws IOException { - HdslProtos.GetScmInfoRequestProto request = - HdslProtos.GetScmInfoRequestProto.getDefaultInstance(); + HddsProtos.GetScmInfoRequestProto request = + HddsProtos.GetScmInfoRequestProto.getDefaultInstance(); try { - HdslProtos.GetScmInfoRespsonseProto resp = rpcProxy.getScmInfo( + HddsProtos.GetScmInfoRespsonseProto resp = rpcProxy.getScmInfo( NULL_RPC_CONTROLLER, request); ScmInfo.Builder builder = new ScmInfo.Builder() .setClusterId(resp.getClusterId()) diff --git a/hadoop-hdsl/common/src/main/java/org/apache/hadoop/scm/protocolPB/StorageContainerLocationProtocolPB.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/protocolPB/StorageContainerLocationProtocolPB.java index b8c2958afe..f234ad3129 100644 --- a/hadoop-hdsl/common/src/main/java/org/apache/hadoop/scm/protocolPB/StorageContainerLocationProtocolPB.java +++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/protocolPB/StorageContainerLocationProtocolPB.java @@ -15,11 +15,13 @@ * See the License for the specific language governing permissions and * limitations under the License. */ -package org.apache.hadoop.scm.protocolPB; +package org.apache.hadoop.hdds.scm.protocolPB; import org.apache.hadoop.classification.InterfaceAudience; +import org.apache.hadoop.hdds.protocol.proto + .StorageContainerLocationProtocolProtos + .StorageContainerLocationProtocolService; import org.apache.hadoop.ipc.ProtocolInfo; -import org.apache.hadoop.hdsl.protocol.proto.StorageContainerLocationProtocolProtos.StorageContainerLocationProtocolService; /** * Protocol used from an HDFS node to StorageContainerManager. This extends the diff --git a/hadoop-hdsl/common/src/main/java/org/apache/hadoop/scm/client/package-info.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/protocolPB/package-info.java index e85e542eea..652ae60973 100644 --- a/hadoop-hdsl/common/src/main/java/org/apache/hadoop/scm/client/package-info.java +++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/protocolPB/package-info.java @@ -16,7 +16,7 @@ * limitations under the License. */ -package org.apache.hadoop.scm.client; +package org.apache.hadoop.hdds.scm.protocolPB; /** * This package contains classes for the client of the storage container diff --git a/hadoop-hdsl/common/src/main/java/org/apache/hadoop/scm/storage/ContainerProtocolCalls.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/storage/ContainerProtocolCalls.java index 174f1c17e9..1559816bc4 100644 --- a/hadoop-hdsl/common/src/main/java/org/apache/hadoop/scm/storage/ContainerProtocolCalls.java +++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/storage/ContainerProtocolCalls.java @@ -16,44 +16,43 @@ * limitations under the License. */ -package org.apache.hadoop.scm.storage; +package org.apache.hadoop.hdds.scm.storage; import com.google.protobuf.ByteString; -import org.apache.hadoop.hdsl.protocol.proto.ContainerProtos; -import org.apache.hadoop.hdsl.protocol.proto.ContainerProtos.ChunkInfo; -import org.apache.hadoop.hdsl.protocol.proto.ContainerProtos +import org.apache.hadoop.hdds.scm.XceiverClientSpi; +import org.apache.hadoop.hdds.scm.container.common.helpers + .StorageContainerException; +import org.apache.hadoop.hdds.protocol.proto.ContainerProtos; +import org.apache.hadoop.hdds.protocol.proto.ContainerProtos.ChunkInfo; +import org.apache.hadoop.hdds.protocol.proto.ContainerProtos .ContainerCommandRequestProto; -import org.apache.hadoop.hdsl.protocol.proto.ContainerProtos +import org.apache.hadoop.hdds.protocol.proto.ContainerProtos .ContainerCommandResponseProto; -import org.apache.hadoop.hdsl.protocol.proto.ContainerProtos - .GetKeyRequestProto; -import org.apache.hadoop.hdsl.protocol.proto.ContainerProtos +import org.apache.hadoop.hdds.protocol.proto.ContainerProtos.GetKeyRequestProto; +import org.apache.hadoop.hdds.protocol.proto.ContainerProtos .GetKeyResponseProto; -import org.apache.hadoop.hdsl.protocol.proto.ContainerProtos +import org.apache.hadoop.hdds.protocol.proto.ContainerProtos .GetSmallFileRequestProto; -import org.apache.hadoop.hdsl.protocol.proto.ContainerProtos +import org.apache.hadoop.hdds.protocol.proto.ContainerProtos .GetSmallFileResponseProto; -import org.apache.hadoop.hdsl.protocol.proto.ContainerProtos.KeyData; -import org.apache.hadoop.hdsl.protocol.proto.ContainerProtos - .PutKeyRequestProto; -import org.apache.hadoop.hdsl.protocol.proto.ContainerProtos +import org.apache.hadoop.hdds.protocol.proto.ContainerProtos.KeyData; +import org.apache.hadoop.hdds.protocol.proto.ContainerProtos.PutKeyRequestProto; +import org.apache.hadoop.hdds.protocol.proto.ContainerProtos .PutSmallFileRequestProto; -import org.apache.hadoop.hdsl.protocol.proto.ContainerProtos +import org.apache.hadoop.hdds.protocol.proto.ContainerProtos .ReadChunkRequestProto; -import org.apache.hadoop.hdsl.protocol.proto.ContainerProtos +import org.apache.hadoop.hdds.protocol.proto.ContainerProtos .ReadChunkResponseProto; -import org.apache.hadoop.hdsl.protocol.proto.ContainerProtos.Type; -import org.apache.hadoop.hdsl.protocol.proto.ContainerProtos - .WriteChunkRequestProto; -import org.apache.hadoop.hdsl.protocol.proto.ContainerProtos - .ReadContainerResponseProto; -import org.apache.hadoop.hdsl.protocol.proto.ContainerProtos +import org.apache.hadoop.hdds.protocol.proto.ContainerProtos .ReadContainerRequestProto; -import org.apache.hadoop.hdsl.protocol.proto.HdslProtos.KeyValue; -import org.apache.hadoop.scm.container.common.helpers.StorageContainerException; +import org.apache.hadoop.hdds.protocol.proto.ContainerProtos + .ReadContainerResponseProto; +import org.apache.hadoop.hdds.protocol.proto.ContainerProtos.Type; +import org.apache.hadoop.hdds.protocol.proto.ContainerProtos + .WriteChunkRequestProto; +import org.apache.hadoop.hdds.protocol.proto.HddsProtos.KeyValue; import java.io.IOException; -import org.apache.hadoop.scm.XceiverClientSpi; /** * Implementation of all container protocol calls performed by Container diff --git a/hadoop-hdsl/common/src/main/java/org/apache/hadoop/scm/storage/package-info.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/storage/package-info.java index aa89af0695..8e981586bd 100644 --- a/hadoop-hdsl/common/src/main/java/org/apache/hadoop/scm/storage/package-info.java +++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/storage/package-info.java @@ -16,7 +16,7 @@ * limitations under the License. */ -package org.apache.hadoop.scm.storage; +package org.apache.hadoop.hdds.scm.storage; /** * This package contains StorageContainerManager classes. diff --git a/hadoop-hdsl/common/src/main/java/org/apache/hadoop/ozone/OzoneAcl.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/OzoneAcl.java index ff0ac4e2d4..ff0ac4e2d4 100644 --- a/hadoop-hdsl/common/src/main/java/org/apache/hadoop/ozone/OzoneAcl.java +++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/OzoneAcl.java diff --git a/hadoop-hdsl/common/src/main/java/org/apache/hadoop/ozone/OzoneConfigKeys.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/OzoneConfigKeys.java index bad3a84c93..ef96f37939 100644 --- a/hadoop-hdsl/common/src/main/java/org/apache/hadoop/ozone/OzoneConfigKeys.java +++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/OzoneConfigKeys.java @@ -20,10 +20,9 @@ package org.apache.hadoop.ozone; import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.classification.InterfaceStability; -import org.apache.hadoop.ozone.client.ReplicationFactor; -import org.apache.hadoop.ozone.client.ReplicationType; - -import org.apache.hadoop.scm.ScmConfigKeys; +import org.apache.hadoop.hdds.client.ReplicationFactor; +import org.apache.hadoop.hdds.client.ReplicationType; +import org.apache.hadoop.hdds.scm.ScmConfigKeys; /** * This class contains constants for configuration keys used in Ozone. diff --git a/hadoop-hdsl/common/src/main/java/org/apache/hadoop/ozone/OzoneConsts.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/OzoneConsts.java index 2f9e469d40..2f9e469d40 100644 --- a/hadoop-hdsl/common/src/main/java/org/apache/hadoop/ozone/OzoneConsts.java +++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/OzoneConsts.java diff --git a/hadoop-hdsl/common/src/main/java/org/apache/hadoop/ozone/common/BlockGroup.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/common/BlockGroup.java index 9fcc61334e..38ce6ccb0a 100644 --- a/hadoop-hdsl/common/src/main/java/org/apache/hadoop/ozone/common/BlockGroup.java +++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/common/BlockGroup.java @@ -17,8 +17,8 @@ package org.apache.hadoop.ozone.common; -import org.apache.hadoop.hdsl.protocol.proto - .ScmBlockLocationProtocolProtos.KeyBlocks; +import org.apache.hadoop.hdds.protocol.proto.ScmBlockLocationProtocolProtos + .KeyBlocks; import java.util.List; diff --git a/hadoop-hdsl/common/src/main/java/org/apache/hadoop/ozone/common/DeleteBlockGroupResult.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/common/DeleteBlockGroupResult.java index da56385844..ec54ac5407 100644 --- a/hadoop-hdsl/common/src/main/java/org/apache/hadoop/ozone/common/DeleteBlockGroupResult.java +++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/common/DeleteBlockGroupResult.java @@ -17,9 +17,11 @@ */ package org.apache.hadoop.ozone.common; -import org.apache.hadoop.hdsl.protocol.proto.ScmBlockLocationProtocolProtos.DeleteScmBlockResult; -import org.apache.hadoop.hdsl.protocol.proto.ScmBlockLocationProtocolProtos.DeleteScmBlockResult.Result; -import org.apache.hadoop.scm.container.common.helpers.DeleteBlockResult; +import org.apache.hadoop.hdds.scm.container.common.helpers.DeleteBlockResult; +import org.apache.hadoop.hdds.protocol.proto.ScmBlockLocationProtocolProtos + .DeleteScmBlockResult; +import org.apache.hadoop.hdds.protocol.proto.ScmBlockLocationProtocolProtos + .DeleteScmBlockResult.Result; import java.util.ArrayList; import java.util.List; diff --git a/hadoop-hdsl/common/src/main/java/org/apache/hadoop/ozone/common/InconsistentStorageStateException.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/common/InconsistentStorageStateException.java index c3f9234fa9..518b519478 100644 --- a/hadoop-hdsl/common/src/main/java/org/apache/hadoop/ozone/common/InconsistentStorageStateException.java +++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/common/InconsistentStorageStateException.java @@ -17,12 +17,12 @@ */ package org.apache.hadoop.ozone.common; -import java.io.File; -import java.io.IOException; - import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.classification.InterfaceStability; +import java.io.File; +import java.io.IOException; + /** * The exception is thrown when file system state is inconsistent * and is not recoverable. diff --git a/hadoop-hdsl/common/src/main/java/org/apache/hadoop/ozone/common/Storage.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/common/Storage.java index 9df2ffaafb..fb30d921b8 100644 --- a/hadoop-hdsl/common/src/main/java/org/apache/hadoop/ozone/common/Storage.java +++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/common/Storage.java @@ -17,6 +17,13 @@ */ package org.apache.hadoop.ozone.common; +import org.apache.hadoop.classification.InterfaceAudience; +import org.apache.hadoop.fs.FileUtil; +import org.apache.hadoop.hdds.protocol.proto.HddsProtos.NodeType; +import org.apache.hadoop.util.Time; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + import java.io.File; import java.io.IOException; import java.nio.file.DirectoryStream; @@ -24,14 +31,6 @@ import java.nio.file.Files; import java.nio.file.Path; import java.util.Properties; - -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; -import org.apache.hadoop.classification.InterfaceAudience; -import org.apache.hadoop.fs.FileUtil; -import org.apache.hadoop.hdsl.protocol.proto.HdslProtos.NodeType; -import org.apache.hadoop.util.Time; - /** * Storage information file. This Class defines the methods to check * the consistency of the storage dir and the version file. diff --git a/hadoop-hdsl/common/src/main/java/org/apache/hadoop/ozone/common/StorageInfo.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/common/StorageInfo.java index a79980ab03..0e98a4c8a9 100644 --- a/hadoop-hdsl/common/src/main/java/org/apache/hadoop/ozone/common/StorageInfo.java +++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/common/StorageInfo.java @@ -17,19 +17,18 @@ */ package org.apache.hadoop.ozone.common; -import java.io.IOException; +import com.google.common.base.Preconditions; +import org.apache.hadoop.classification.InterfaceAudience; +import org.apache.hadoop.hdds.protocol.proto.HddsProtos.NodeType; + +import java.io.File; import java.io.FileInputStream; import java.io.FileOutputStream; -import java.io.File; +import java.io.IOException; import java.io.RandomAccessFile; - import java.util.Properties; import java.util.UUID; -import com.google.common.base.Preconditions; -import org.apache.hadoop.classification.InterfaceAudience; -import org.apache.hadoop.hdsl.protocol.proto.HdslProtos.NodeType; - /** * Common class for storage information. This class defines the common * properties and functions to set them , write them into the version file diff --git a/hadoop-hdsl/common/src/main/java/org/apache/hadoop/ozone/common/package-info.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/common/package-info.java index 6517e5897e..6517e5897e 100644 --- a/hadoop-hdsl/common/src/main/java/org/apache/hadoop/ozone/common/package-info.java +++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/common/package-info.java diff --git a/hadoop-hdsl/common/src/main/java/org/apache/hadoop/ozone/common/statemachine/InvalidStateTransitionException.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/common/statemachine/InvalidStateTransitionException.java index 9aeff24838..9aeff24838 100644 --- a/hadoop-hdsl/common/src/main/java/org/apache/hadoop/ozone/common/statemachine/InvalidStateTransitionException.java +++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/common/statemachine/InvalidStateTransitionException.java diff --git a/hadoop-hdsl/common/src/main/java/org/apache/hadoop/ozone/common/statemachine/StateMachine.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/common/statemachine/StateMachine.java index bf8cbd596e..bf8cbd596e 100644 --- a/hadoop-hdsl/common/src/main/java/org/apache/hadoop/ozone/common/statemachine/StateMachine.java +++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/common/statemachine/StateMachine.java diff --git a/hadoop-hdsl/common/src/main/java/org/apache/hadoop/ozone/common/statemachine/package-info.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/common/statemachine/package-info.java index 045409e3ed..045409e3ed 100644 --- a/hadoop-hdsl/common/src/main/java/org/apache/hadoop/ozone/common/statemachine/package-info.java +++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/common/statemachine/package-info.java diff --git a/hadoop-hdsl/common/src/main/java/org/apache/hadoop/ozone/container/common/helpers/ChunkInfo.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/container/common/helpers/ChunkInfo.java index 4aa36c38ad..aa1fe74b2a 100644 --- a/hadoop-hdsl/common/src/main/java/org/apache/hadoop/ozone/container/common/helpers/ChunkInfo.java +++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/container/common/helpers/ChunkInfo.java @@ -19,8 +19,8 @@ package org.apache.hadoop.ozone.container.common.helpers; import com.google.common.base.Preconditions; -import org.apache.hadoop.hdsl.protocol.proto.ContainerProtos; -import org.apache.hadoop.hdsl.protocol.proto.HdslProtos; +import org.apache.hadoop.hdds.protocol.proto.ContainerProtos; +import org.apache.hadoop.hdds.protocol.proto.HddsProtos; import java.io.IOException; import java.util.Map; @@ -111,8 +111,8 @@ public class ChunkInfo { } for (Map.Entry<String, String> entry : metadata.entrySet()) { - HdslProtos.KeyValue.Builder keyValBuilder = - HdslProtos.KeyValue.newBuilder(); + HddsProtos.KeyValue.Builder keyValBuilder = + HddsProtos.KeyValue.newBuilder(); builder.addMetadata(keyValBuilder.setKey(entry.getKey()) .setValue(entry.getValue()).build()); } diff --git a/hadoop-hdsl/common/src/main/java/org/apache/hadoop/ozone/container/common/helpers/KeyData.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/container/common/helpers/KeyData.java index eb021525b8..be546c75c3 100644 --- a/hadoop-hdsl/common/src/main/java/org/apache/hadoop/ozone/container/common/helpers/KeyData.java +++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/container/common/helpers/KeyData.java @@ -17,8 +17,8 @@ */ package org.apache.hadoop.ozone.container.common.helpers; -import org.apache.hadoop.hdsl.protocol.proto.ContainerProtos; -import org.apache.hadoop.hdsl.protocol.proto.HdslProtos; +import org.apache.hadoop.hdds.protocol.proto.ContainerProtos; +import org.apache.hadoop.hdds.protocol.proto.HddsProtos; import java.io.IOException; import java.util.Collections; @@ -82,8 +82,8 @@ public class KeyData { builder.setName(this.getKeyName()); builder.addAllChunks(this.chunks); for (Map.Entry<String, String> entry : metadata.entrySet()) { - HdslProtos.KeyValue.Builder keyValBuilder = - HdslProtos.KeyValue.newBuilder(); + HddsProtos.KeyValue.Builder keyValBuilder = + HddsProtos.KeyValue.newBuilder(); builder.addMetadata(keyValBuilder.setKey(entry.getKey()) .setValue(entry.getValue()).build()); } diff --git a/hadoop-hdsl/common/src/main/java/org/apache/hadoop/ozone/container/common/helpers/package-info.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/container/common/helpers/package-info.java index fa5df113d8..fa5df113d8 100644 --- a/hadoop-hdsl/common/src/main/java/org/apache/hadoop/ozone/container/common/helpers/package-info.java +++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/container/common/helpers/package-info.java diff --git a/hadoop-hdsl/common/src/main/java/org/apache/hadoop/ozone/lease/Lease.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/lease/Lease.java index dfa93156da..dfa93156da 100644 --- a/hadoop-hdsl/common/src/main/java/org/apache/hadoop/ozone/lease/Lease.java +++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/lease/Lease.java diff --git a/hadoop-hdsl/common/src/main/java/org/apache/hadoop/ozone/lease/LeaseAlreadyExistException.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/lease/LeaseAlreadyExistException.java index a39ea22df1..a39ea22df1 100644 --- a/hadoop-hdsl/common/src/main/java/org/apache/hadoop/ozone/lease/LeaseAlreadyExistException.java +++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/lease/LeaseAlreadyExistException.java diff --git a/hadoop-hdsl/common/src/main/java/org/apache/hadoop/ozone/lease/LeaseCallbackExecutor.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/lease/LeaseCallbackExecutor.java index 1b7391bf5d..1b7391bf5d 100644 --- a/hadoop-hdsl/common/src/main/java/org/apache/hadoop/ozone/lease/LeaseCallbackExecutor.java +++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/lease/LeaseCallbackExecutor.java diff --git a/hadoop-hdsl/common/src/main/java/org/apache/hadoop/ozone/lease/LeaseException.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/lease/LeaseException.java index 418f4127df..418f4127df 100644 --- a/hadoop-hdsl/common/src/main/java/org/apache/hadoop/ozone/lease/LeaseException.java +++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/lease/LeaseException.java diff --git a/hadoop-hdsl/common/src/main/java/org/apache/hadoop/ozone/lease/LeaseExpiredException.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/lease/LeaseExpiredException.java index 440a023bef..440a023bef 100644 --- a/hadoop-hdsl/common/src/main/java/org/apache/hadoop/ozone/lease/LeaseExpiredException.java +++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/lease/LeaseExpiredException.java diff --git a/hadoop-hdsl/common/src/main/java/org/apache/hadoop/ozone/lease/LeaseManager.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/lease/LeaseManager.java index b8390ddd3d..b8390ddd3d 100644 --- a/hadoop-hdsl/common/src/main/java/org/apache/hadoop/ozone/lease/LeaseManager.java +++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/lease/LeaseManager.java diff --git a/hadoop-hdsl/common/src/main/java/org/apache/hadoop/ozone/lease/LeaseManagerNotRunningException.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/lease/LeaseManagerNotRunningException.java index ced31de439..ced31de439 100644 --- a/hadoop-hdsl/common/src/main/java/org/apache/hadoop/ozone/lease/LeaseManagerNotRunningException.java +++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/lease/LeaseManagerNotRunningException.java diff --git a/hadoop-hdsl/common/src/main/java/org/apache/hadoop/ozone/lease/LeaseNotFoundException.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/lease/LeaseNotFoundException.java index c292d33232..c292d33232 100644 --- a/hadoop-hdsl/common/src/main/java/org/apache/hadoop/ozone/lease/LeaseNotFoundException.java +++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/lease/LeaseNotFoundException.java diff --git a/hadoop-hdsl/common/src/main/java/org/apache/hadoop/ozone/lease/package-info.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/lease/package-info.java index 48ee2e1c6a..48ee2e1c6a 100644 --- a/hadoop-hdsl/common/src/main/java/org/apache/hadoop/ozone/lease/package-info.java +++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/lease/package-info.java diff --git a/hadoop-hdsl/common/src/main/java/org/apache/hadoop/ozone/package-info.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/package-info.java index db399db25a..db399db25a 100644 --- a/hadoop-hdsl/common/src/main/java/org/apache/hadoop/ozone/package-info.java +++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/package-info.java diff --git a/hadoop-hdsl/common/src/main/java/org/apache/hadoop/ozone/protocolPB/ScmBlockLocationProtocolServerSideTranslatorPB.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/protocolPB/ScmBlockLocationProtocolServerSideTranslatorPB.java index c211bd5b9b..fa793419bf 100644 --- a/hadoop-hdsl/common/src/main/java/org/apache/hadoop/ozone/protocolPB/ScmBlockLocationProtocolServerSideTranslatorPB.java +++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/protocolPB/ScmBlockLocationProtocolServerSideTranslatorPB.java @@ -21,30 +21,31 @@ import com.google.common.collect.Sets; import com.google.protobuf.RpcController; import com.google.protobuf.ServiceException; import org.apache.hadoop.classification.InterfaceAudience; +import org.apache.hadoop.hdds.scm.ScmInfo; +import org.apache.hadoop.hdds.scm.container.common.helpers.AllocatedBlock; +import org.apache.hadoop.hdds.scm.protocol.ScmBlockLocationProtocol; +import org.apache.hadoop.hdds.scm.protocol.StorageContainerLocationProtocol; +import org.apache.hadoop.hdds.scm.protocolPB.ScmBlockLocationProtocolPB; +import org.apache.hadoop.hdds.scm.protocolPB.StorageContainerLocationProtocolPB; +import org.apache.hadoop.hdds.protocol.proto.HddsProtos; +import org.apache.hadoop.hdds.protocol.proto.ScmBlockLocationProtocolProtos + .AllocateScmBlockRequestProto; +import org.apache.hadoop.hdds.protocol.proto.ScmBlockLocationProtocolProtos + .AllocateScmBlockResponseProto; +import org.apache.hadoop.hdds.protocol.proto.ScmBlockLocationProtocolProtos + .DeleteKeyBlocksResultProto; +import org.apache.hadoop.hdds.protocol.proto.ScmBlockLocationProtocolProtos + .DeleteScmKeyBlocksRequestProto; +import org.apache.hadoop.hdds.protocol.proto.ScmBlockLocationProtocolProtos + .DeleteScmKeyBlocksResponseProto; +import org.apache.hadoop.hdds.protocol.proto.ScmBlockLocationProtocolProtos + .GetScmBlockLocationsRequestProto; +import org.apache.hadoop.hdds.protocol.proto.ScmBlockLocationProtocolProtos + .GetScmBlockLocationsResponseProto; +import org.apache.hadoop.hdds.protocol.proto.ScmBlockLocationProtocolProtos + .ScmLocatedBlockProto; import org.apache.hadoop.ozone.common.BlockGroup; -import org.apache.hadoop.hdsl.protocol.proto.HdslProtos; -import org.apache.hadoop.hdsl.protocol.proto.ScmBlockLocationProtocolProtos.DeleteKeyBlocksResultProto; -import org.apache.hadoop.scm.container.common.helpers.AllocatedBlock; import org.apache.hadoop.ozone.common.DeleteBlockGroupResult; -import org.apache.hadoop.scm.ScmInfo; -import org.apache.hadoop.scm.protocol.ScmBlockLocationProtocol; -import org.apache.hadoop.scm.protocol.StorageContainerLocationProtocol; -import org.apache.hadoop.scm.protocolPB.ScmBlockLocationProtocolPB; -import org.apache.hadoop.scm.protocolPB.StorageContainerLocationProtocolPB; -import org.apache.hadoop.hdsl.protocol.proto - .ScmBlockLocationProtocolProtos.AllocateScmBlockRequestProto; -import org.apache.hadoop.hdsl.protocol.proto - .ScmBlockLocationProtocolProtos.AllocateScmBlockResponseProto; -import org.apache.hadoop.hdsl.protocol.proto - .ScmBlockLocationProtocolProtos.DeleteScmKeyBlocksRequestProto; -import org.apache.hadoop.hdsl.protocol.proto - .ScmBlockLocationProtocolProtos.DeleteScmKeyBlocksResponseProto; -import org.apache.hadoop.hdsl.protocol.proto - .ScmBlockLocationProtocolProtos.GetScmBlockLocationsRequestProto; -import org.apache.hadoop.hdsl.protocol.proto - .ScmBlockLocationProtocolProtos.GetScmBlockLocationsResponseProto; -import org.apache.hadoop.hdsl.protocol.proto - .ScmBlockLocationProtocolProtos.ScmLocatedBlockProto; import java.io.IOException; import java.util.List; @@ -152,8 +153,8 @@ public final class ScmBlockLocationProtocolServerSideTranslatorPB } @Override - public HdslProtos.GetScmInfoRespsonseProto getScmInfo( - RpcController controller, HdslProtos.GetScmInfoRequestProto req) + public HddsProtos.GetScmInfoRespsonseProto getScmInfo( + RpcController controller, HddsProtos.GetScmInfoRequestProto req) throws ServiceException { ScmInfo scmInfo; try { @@ -161,7 +162,7 @@ public final class ScmBlockLocationProtocolServerSideTranslatorPB } catch (IOException ex) { throw new ServiceException(ex); } - return HdslProtos.GetScmInfoRespsonseProto.newBuilder() + return HddsProtos.GetScmInfoRespsonseProto.newBuilder() .setClusterId(scmInfo.getClusterId()) .setScmId(scmInfo.getScmId()) .build(); diff --git a/hadoop-hdsl/common/src/main/java/org/apache/hadoop/ozone/protocolPB/StorageContainerLocationProtocolServerSideTranslatorPB.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/protocolPB/StorageContainerLocationProtocolServerSideTranslatorPB.java index be19c68eb5..4974268bcb 100644 --- a/hadoop-hdsl/common/src/main/java/org/apache/hadoop/ozone/protocolPB/StorageContainerLocationProtocolServerSideTranslatorPB.java +++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/protocolPB/StorageContainerLocationProtocolServerSideTranslatorPB.java @@ -18,35 +18,45 @@ */ package org.apache.hadoop.ozone.protocolPB; -import java.io.IOException; -import java.util.EnumSet; -import java.util.List; - import com.google.protobuf.RpcController; import com.google.protobuf.ServiceException; - import org.apache.hadoop.classification.InterfaceAudience; -import org.apache.hadoop.hdsl.protocol.proto.HdslProtos; -import org.apache.hadoop.hdsl.protocol.proto +import org.apache.hadoop.hdds.scm.ScmInfo; +import org.apache.hadoop.hdds.scm.container.common.helpers.ContainerInfo; +import org.apache.hadoop.hdds.scm.container.common.helpers.Pipeline; +import org.apache.hadoop.hdds.scm.protocol.StorageContainerLocationProtocol; +import org.apache.hadoop.hdds.scm.protocolPB.StorageContainerLocationProtocolPB; +import org.apache.hadoop.hdds.protocol.proto.HddsProtos; +import org.apache.hadoop.hdds.protocol.proto .StorageContainerLocationProtocolProtos; -import org.apache.hadoop.scm.ScmInfo; -import org.apache.hadoop.scm.container.common.helpers.ContainerInfo; -import org.apache.hadoop.scm.protocol.StorageContainerLocationProtocol; - -import org.apache.hadoop.hdsl.protocol.proto.StorageContainerLocationProtocolProtos.ContainerRequestProto; -import org.apache.hadoop.hdsl.protocol.proto.StorageContainerLocationProtocolProtos.ContainerResponseProto; -import org.apache.hadoop.hdsl.protocol.proto.StorageContainerLocationProtocolProtos.GetContainerRequestProto; -import org.apache.hadoop.hdsl.protocol.proto.StorageContainerLocationProtocolProtos.GetContainerResponseProto; -import org.apache.hadoop.hdsl.protocol.proto.StorageContainerLocationProtocolProtos.SCMDeleteContainerRequestProto; -import org.apache.hadoop.hdsl.protocol.proto.StorageContainerLocationProtocolProtos.SCMDeleteContainerResponseProto; -import org.apache.hadoop.hdsl.protocol.proto.StorageContainerLocationProtocolProtos.SCMListContainerResponseProto; -import org.apache.hadoop.hdsl.protocol.proto.StorageContainerLocationProtocolProtos.SCMListContainerRequestProto; -import org.apache.hadoop.hdsl.protocol.proto.StorageContainerLocationProtocolProtos.ObjectStageChangeRequestProto; -import org.apache.hadoop.hdsl.protocol.proto.StorageContainerLocationProtocolProtos.ObjectStageChangeResponseProto; -import org.apache.hadoop.hdsl.protocol.proto.StorageContainerLocationProtocolProtos.PipelineResponseProto; -import org.apache.hadoop.hdsl.protocol.proto.StorageContainerLocationProtocolProtos.PipelineRequestProto; -import org.apache.hadoop.scm.container.common.helpers.Pipeline; -import org.apache.hadoop.scm.protocolPB.StorageContainerLocationProtocolPB; +import org.apache.hadoop.hdds.protocol.proto + .StorageContainerLocationProtocolProtos.ContainerRequestProto; +import org.apache.hadoop.hdds.protocol.proto + .StorageContainerLocationProtocolProtos.ContainerResponseProto; +import org.apache.hadoop.hdds.protocol.proto + .StorageContainerLocationProtocolProtos.GetContainerRequestProto; +import org.apache.hadoop.hdds.protocol.proto + .StorageContainerLocationProtocolProtos.GetContainerResponseProto; +import org.apache.hadoop.hdds.protocol.proto + .StorageContainerLocationProtocolProtos.ObjectStageChangeRequestProto; +import org.apache.hadoop.hdds.protocol.proto + .StorageContainerLocationProtocolProtos.ObjectStageChangeResponseProto; +import org.apache.hadoop.hdds.protocol.proto + .StorageContainerLocationProtocolProtos.PipelineRequestProto; +import org.apache.hadoop.hdds.protocol.proto + .StorageContainerLocationProtocolProtos.PipelineResponseProto; +import org.apache.hadoop.hdds.protocol.proto + .StorageContainerLocationProtocolProtos.SCMDeleteContainerRequestProto; +import org.apache.hadoop.hdds.protocol.proto + .StorageContainerLocationProtocolProtos.SCMDeleteContainerResponseProto; +import org.apache.hadoop.hdds.protocol.proto + .StorageContainerLocationProtocolProtos.SCMListContainerRequestProto; +import org.apache.hadoop.hdds.protocol.proto + .StorageContainerLocationProtocolProtos.SCMListContainerResponseProto; + +import java.io.IOException; +import java.util.EnumSet; +import java.util.List; /** * This class is the server-side translator that forwards requests received on @@ -150,9 +160,9 @@ public final class StorageContainerLocationProtocolServerSideTranslatorPB StorageContainerLocationProtocolProtos.NodeQueryRequestProto request) throws ServiceException { try { - EnumSet<HdslProtos.NodeState> nodeStateEnumSet = EnumSet.copyOf(request + EnumSet<HddsProtos.NodeState> nodeStateEnumSet = EnumSet.copyOf(request .getQueryList()); - HdslProtos.NodePool datanodes = impl.queryNode(nodeStateEnumSet, + HddsProtos.NodePool datanodes = impl.queryNode(nodeStateEnumSet, request.getScope(), request.getPoolName()); return StorageContainerLocationProtocolProtos .NodeQueryResponseProto.newBuilder() @@ -185,12 +195,12 @@ public final class StorageContainerLocationProtocolServerSideTranslatorPB } @Override - public HdslProtos.GetScmInfoRespsonseProto getScmInfo( - RpcController controller, HdslProtos.GetScmInfoRequestProto req) + public HddsProtos.GetScmInfoRespsonseProto getScmInfo( + RpcController controller, HddsProtos.GetScmInfoRequestProto req) throws ServiceException { try { ScmInfo scmInfo = impl.getScmInfo(); - return HdslProtos.GetScmInfoRespsonseProto.newBuilder() + return HddsProtos.GetScmInfoRespsonseProto.newBuilder() .setClusterId(scmInfo.getClusterId()) .setScmId(scmInfo.getScmId()) .build(); diff --git a/hadoop-hdsl/common/src/main/java/org/apache/hadoop/ozone/protocolPB/package-info.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/protocolPB/package-info.java index 860386d9fd..860386d9fd 100644 --- a/hadoop-hdsl/common/src/main/java/org/apache/hadoop/ozone/protocolPB/package-info.java +++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/protocolPB/package-info.java diff --git a/hadoop-hdsl/common/src/main/java/org/apache/hadoop/ozone/web/utils/JsonUtils.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/web/utils/JsonUtils.java index 909873f1d1..af56da394c 100644 --- a/hadoop-hdsl/common/src/main/java/org/apache/hadoop/ozone/web/utils/JsonUtils.java +++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/web/utils/JsonUtils.java @@ -18,14 +18,14 @@ package org.apache.hadoop.ozone.web.utils; -import java.io.IOException; -import java.util.List; - import com.fasterxml.jackson.databind.ObjectMapper; import com.fasterxml.jackson.databind.ObjectReader; import com.fasterxml.jackson.databind.ObjectWriter; import com.fasterxml.jackson.databind.type.CollectionType; +import java.io.IOException; +import java.util.List; + /** * JSON Utility functions used in ozone. */ diff --git a/hadoop-hdsl/common/src/main/java/org/apache/hadoop/utils/BackgroundService.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/utils/BackgroundService.java index 2ff4e5562f..431da64094 100644 --- a/hadoop-hdsl/common/src/main/java/org/apache/hadoop/utils/BackgroundService.java +++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/utils/BackgroundService.java @@ -24,16 +24,16 @@ import org.slf4j.Logger; import org.slf4j.LoggerFactory; import java.util.List; -import java.util.concurrent.TimeUnit; -import java.util.concurrent.TimeoutException; -import java.util.concurrent.ScheduledExecutorService; -import java.util.concurrent.ThreadFactory; -import java.util.concurrent.Executors; -import java.util.concurrent.ExecutorService; import java.util.concurrent.CompletionService; +import java.util.concurrent.ExecutionException; import java.util.concurrent.ExecutorCompletionService; +import java.util.concurrent.ExecutorService; +import java.util.concurrent.Executors; import java.util.concurrent.Future; -import java.util.concurrent.ExecutionException; +import java.util.concurrent.ScheduledExecutorService; +import java.util.concurrent.ThreadFactory; +import java.util.concurrent.TimeUnit; +import java.util.concurrent.TimeoutException; /** * An abstract class for a background service in ozone. diff --git a/hadoop-hdsl/common/src/main/java/org/apache/hadoop/utils/BackgroundTask.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/utils/BackgroundTask.java index 47e8ebc98f..47e8ebc98f 100644 --- a/hadoop-hdsl/common/src/main/java/org/apache/hadoop/utils/BackgroundTask.java +++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/utils/BackgroundTask.java diff --git a/hadoop-hdsl/common/src/main/java/org/apache/hadoop/utils/BackgroundTaskQueue.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/utils/BackgroundTaskQueue.java index b56ef0c804..b56ef0c804 100644 --- a/hadoop-hdsl/common/src/main/java/org/apache/hadoop/utils/BackgroundTaskQueue.java +++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/utils/BackgroundTaskQueue.java diff --git a/hadoop-hdsl/common/src/main/java/org/apache/hadoop/utils/BackgroundTaskResult.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/utils/BackgroundTaskResult.java index 198300fb5f..198300fb5f 100644 --- a/hadoop-hdsl/common/src/main/java/org/apache/hadoop/utils/BackgroundTaskResult.java +++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/utils/BackgroundTaskResult.java diff --git a/hadoop-hdsl/common/src/main/java/org/apache/hadoop/utils/BatchOperation.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/utils/BatchOperation.java index 47699ebba8..47699ebba8 100644 --- a/hadoop-hdsl/common/src/main/java/org/apache/hadoop/utils/BatchOperation.java +++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/utils/BatchOperation.java diff --git a/hadoop-hdsl/common/src/main/java/org/apache/hadoop/utils/EntryConsumer.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/utils/EntryConsumer.java index c407398611..c407398611 100644 --- a/hadoop-hdsl/common/src/main/java/org/apache/hadoop/utils/EntryConsumer.java +++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/utils/EntryConsumer.java diff --git a/hadoop-hdsl/common/src/main/java/org/apache/hadoop/utils/LevelDBStore.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/utils/LevelDBStore.java index 72ac8d16f7..83ca83d80d 100644 --- a/hadoop-hdsl/common/src/main/java/org/apache/hadoop/utils/LevelDBStore.java +++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/utils/LevelDBStore.java @@ -21,21 +21,21 @@ package org.apache.hadoop.utils; import org.apache.commons.lang3.tuple.ImmutablePair; import org.apache.hadoop.utils.MetadataKeyFilters.MetadataKeyFilter; import org.fusesource.leveldbjni.JniDBFactory; -import org.iq80.leveldb.WriteBatch; import org.iq80.leveldb.DB; -import org.iq80.leveldb.Options; -import org.iq80.leveldb.WriteOptions; import org.iq80.leveldb.DBIterator; -import org.iq80.leveldb.Snapshot; +import org.iq80.leveldb.Options; import org.iq80.leveldb.ReadOptions; +import org.iq80.leveldb.Snapshot; +import org.iq80.leveldb.WriteBatch; +import org.iq80.leveldb.WriteOptions; import org.slf4j.Logger; import org.slf4j.LoggerFactory; import java.io.File; import java.io.IOException; -import java.util.List; import java.util.ArrayList; import java.util.Arrays; +import java.util.List; import java.util.Map; import java.util.Map.Entry; diff --git a/hadoop-hdsl/common/src/main/java/org/apache/hadoop/utils/MetadataKeyFilters.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/utils/MetadataKeyFilters.java index 3ff0a948a8..3ff0a948a8 100644 --- a/hadoop-hdsl/common/src/main/java/org/apache/hadoop/utils/MetadataKeyFilters.java +++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/utils/MetadataKeyFilters.java diff --git a/hadoop-hdsl/common/src/main/java/org/apache/hadoop/utils/MetadataStore.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/utils/MetadataStore.java index b90b08f658..b90b08f658 100644 --- a/hadoop-hdsl/common/src/main/java/org/apache/hadoop/utils/MetadataStore.java +++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/utils/MetadataStore.java diff --git a/hadoop-hdsl/common/src/main/java/org/apache/hadoop/utils/MetadataStoreBuilder.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/utils/MetadataStoreBuilder.java index 095e718778..9e9c32ae56 100644 --- a/hadoop-hdsl/common/src/main/java/org/apache/hadoop/utils/MetadataStoreBuilder.java +++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/utils/MetadataStoreBuilder.java @@ -20,23 +20,24 @@ package org.apache.hadoop.utils; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.ozone.OzoneConfigKeys; -import static org.apache.hadoop.ozone.OzoneConfigKeys - .OZONE_METADATA_STORE_ROCKSDB_STATISTICS; -import static org.apache.hadoop.ozone.OzoneConfigKeys - .OZONE_METADATA_STORE_ROCKSDB_STATISTICS_DEFAULT; -import static org.apache.hadoop.ozone.OzoneConfigKeys - .OZONE_METADATA_STORE_ROCKSDB_STATISTICS_OFF; import org.iq80.leveldb.Options; import org.rocksdb.BlockBasedTableConfig; +import org.rocksdb.Statistics; +import org.rocksdb.StatsLevel; import java.io.File; import java.io.IOException; -import static org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_METADATA_STORE_IMPL_LEVELDB; -import static org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_METADATA_STORE_IMPL_ROCKSDB; - -import org.rocksdb.Statistics; -import org.rocksdb.StatsLevel; +import static org.apache.hadoop.ozone.OzoneConfigKeys + .OZONE_METADATA_STORE_IMPL_LEVELDB; +import static org.apache.hadoop.ozone.OzoneConfigKeys + .OZONE_METADATA_STORE_IMPL_ROCKSDB; +import static org.apache.hadoop.ozone.OzoneConfigKeys + .OZONE_METADATA_STORE_ROCKSDB_STATISTICS; +import static org.apache.hadoop.ozone.OzoneConfigKeys + .OZONE_METADATA_STORE_ROCKSDB_STATISTICS_DEFAULT; +import static org.apache.hadoop.ozone.OzoneConfigKeys + .OZONE_METADATA_STORE_ROCKSDB_STATISTICS_OFF; /** * Builder for metadata store. diff --git a/hadoop-hdsl/common/src/main/java/org/apache/hadoop/utils/RocksDBStore.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/utils/RocksDBStore.java index 2f340a534b..a60e98d9ab 100644 --- a/hadoop-hdsl/common/src/main/java/org/apache/hadoop/utils/RocksDBStore.java +++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/utils/RocksDBStore.java @@ -23,25 +23,25 @@ import org.apache.commons.io.FileUtils; import org.apache.commons.lang3.tuple.ImmutablePair; import org.apache.hadoop.metrics2.util.MBeans; import org.apache.ratis.shaded.com.google.common.annotations.VisibleForTesting; -import org.rocksdb.RocksIterator; +import org.rocksdb.DbPath; import org.rocksdb.Options; -import org.rocksdb.WriteOptions; import org.rocksdb.RocksDB; import org.rocksdb.RocksDBException; +import org.rocksdb.RocksIterator; import org.rocksdb.WriteBatch; -import org.rocksdb.DbPath; +import org.rocksdb.WriteOptions; import org.slf4j.Logger; import org.slf4j.LoggerFactory; import javax.management.ObjectName; import java.io.File; import java.io.IOException; -import java.util.HashMap; -import java.util.Map; -import java.util.List; +import java.util.AbstractMap; import java.util.ArrayList; import java.util.Arrays; -import java.util.AbstractMap; +import java.util.HashMap; +import java.util.List; +import java.util.Map; /** * RocksDB implementation of ozone metadata store. diff --git a/hadoop-hdsl/common/src/main/java/org/apache/hadoop/utils/RocksDBStoreMBean.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/utils/RocksDBStoreMBean.java index 8eb0e4b9cc..88c093e62b 100644 --- a/hadoop-hdsl/common/src/main/java/org/apache/hadoop/utils/RocksDBStoreMBean.java +++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/utils/RocksDBStoreMBean.java @@ -23,7 +23,15 @@ import org.rocksdb.HistogramType; import org.rocksdb.Statistics; import org.rocksdb.TickerType; -import javax.management.*; +import javax.management.Attribute; +import javax.management.AttributeList; +import javax.management.AttributeNotFoundException; +import javax.management.DynamicMBean; +import javax.management.InvalidAttributeValueException; +import javax.management.MBeanAttributeInfo; +import javax.management.MBeanException; +import javax.management.MBeanInfo; +import javax.management.ReflectionException; import java.lang.reflect.Method; import java.util.ArrayList; import java.util.HashSet; diff --git a/hadoop-hdsl/common/src/main/java/org/apache/hadoop/utils/package-info.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/utils/package-info.java index 4466337912..4466337912 100644 --- a/hadoop-hdsl/common/src/main/java/org/apache/hadoop/utils/package-info.java +++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/utils/package-info.java diff --git a/hadoop-hdsl/common/src/main/java/org/apache/ratis/RatisHelper.java b/hadoop-hdds/common/src/main/java/org/apache/ratis/RatisHelper.java index 89a1cb54f7..3a55831364 100644 --- a/hadoop-hdsl/common/src/main/java/org/apache/ratis/RatisHelper.java +++ b/hadoop-hdds/common/src/main/java/org/apache/ratis/RatisHelper.java @@ -18,9 +18,9 @@ package org.apache.ratis; -import org.apache.hadoop.hdsl.protocol.DatanodeDetails; +import org.apache.hadoop.hdds.scm.container.common.helpers.Pipeline; +import org.apache.hadoop.hdds.protocol.DatanodeDetails; import org.apache.hadoop.ozone.OzoneConfigKeys; -import org.apache.hadoop.scm.container.common.helpers.Pipeline; import org.apache.ratis.client.RaftClient; import org.apache.ratis.conf.RaftProperties; import org.apache.ratis.grpc.GrpcConfigKeys; @@ -34,11 +34,11 @@ import org.apache.ratis.util.SizeInBytes; import org.slf4j.Logger; import org.slf4j.LoggerFactory; -import java.util.List; -import java.util.Collections; -import java.util.Collection; import java.util.ArrayList; import java.util.Arrays; +import java.util.Collection; +import java.util.Collections; +import java.util.List; import java.util.stream.Collectors; /** diff --git a/hadoop-hdsl/common/src/main/java/org/apache/ratis/package-info.java b/hadoop-hdds/common/src/main/java/org/apache/ratis/package-info.java index c13c20c606..c13c20c606 100644 --- a/hadoop-hdsl/common/src/main/java/org/apache/ratis/package-info.java +++ b/hadoop-hdds/common/src/main/java/org/apache/ratis/package-info.java diff --git a/hadoop-hdsl/common/src/main/java/org/apache/ratis/shaded/com/google/protobuf/ShadedProtoUtil.java b/hadoop-hdds/common/src/main/java/org/apache/ratis/shaded/com/google/protobuf/ShadedProtoUtil.java index 29242ad99f..29242ad99f 100644 --- a/hadoop-hdsl/common/src/main/java/org/apache/ratis/shaded/com/google/protobuf/ShadedProtoUtil.java +++ b/hadoop-hdds/common/src/main/java/org/apache/ratis/shaded/com/google/protobuf/ShadedProtoUtil.java diff --git a/hadoop-hdsl/common/src/main/java/org/apache/ratis/shaded/com/google/protobuf/package-info.java b/hadoop-hdds/common/src/main/java/org/apache/ratis/shaded/com/google/protobuf/package-info.java index 032dd9634b..032dd9634b 100644 --- a/hadoop-hdsl/common/src/main/java/org/apache/ratis/shaded/com/google/protobuf/package-info.java +++ b/hadoop-hdds/common/src/main/java/org/apache/ratis/shaded/com/google/protobuf/package-info.java diff --git a/hadoop-hdsl/common/src/main/proto/DatanodeContainerProtocol.proto b/hadoop-hdds/common/src/main/proto/DatanodeContainerProtocol.proto index 3060ada59c..a6270eff50 100644 --- a/hadoop-hdsl/common/src/main/proto/DatanodeContainerProtocol.proto +++ b/hadoop-hdds/common/src/main/proto/DatanodeContainerProtocol.proto @@ -24,12 +24,12 @@ // This file contains protocol buffers that are used to transfer data // to and from the datanode. -option java_package = "org.apache.hadoop.hdsl.protocol.proto"; +option java_package = "org.apache.hadoop.hdds.protocol.proto"; option java_outer_classname = "ContainerProtos"; option java_generate_equals_and_hash = true; -package hadoop.hdsl; +package hadoop.hdds; import "hdfs.proto"; -import "hdsl.proto"; +import "hdds.proto"; /** * Commands that are used to manipulate the state of containers on a datanode. diff --git a/hadoop-hdsl/common/src/main/proto/ScmBlockLocationProtocol.proto b/hadoop-hdds/common/src/main/proto/ScmBlockLocationProtocol.proto index f6805d9a73..38d2e16ce8 100644 --- a/hadoop-hdsl/common/src/main/proto/ScmBlockLocationProtocol.proto +++ b/hadoop-hdds/common/src/main/proto/ScmBlockLocationProtocol.proto @@ -22,14 +22,14 @@ * for what changes are allowed for a *unstable* .proto interface. */ -option java_package = "org.apache.hadoop.hdsl.protocol.proto"; +option java_package = "org.apache.hadoop.hdds.protocol.proto"; option java_outer_classname = "ScmBlockLocationProtocolProtos"; option java_generic_services = true; option java_generate_equals_and_hash = true; -package hadoop.hdsl; +package hadoop.hdds; import "hdfs.proto"; -import "hdsl.proto"; +import "hdds.proto"; // SCM Block protocol @@ -53,7 +53,7 @@ message GetScmBlockLocationsResponseProto { */ message ScmLocatedBlockProto { required string key = 1; - required hadoop.hdsl.Pipeline pipeline = 2; + required hadoop.hdds.Pipeline pipeline = 2; } /** @@ -62,7 +62,7 @@ message ScmLocatedBlockProto { message AllocateScmBlockRequestProto { required uint64 size = 1; required ReplicationType type = 2; - required hadoop.hdsl.ReplicationFactor factor = 3; + required hadoop.hdds.ReplicationFactor factor = 3; required string owner = 4; } @@ -127,7 +127,7 @@ message AllocateScmBlockResponseProto { } required Error errorCode = 1; required string key = 2; - required hadoop.hdsl.Pipeline pipeline = 3; + required hadoop.hdds.Pipeline pipeline = 3; required bool createContainer = 4; optional string errorMessage = 5; } @@ -161,6 +161,6 @@ service ScmBlockLocationProtocolService { /** * Gets the scmInfo from SCM. */ - rpc getScmInfo(hadoop.hdsl.GetScmInfoRequestProto) - returns (hadoop.hdsl.GetScmInfoRespsonseProto); + rpc getScmInfo(hadoop.hdds.GetScmInfoRequestProto) + returns (hadoop.hdds.GetScmInfoRespsonseProto); } diff --git a/hadoop-hdsl/common/src/main/proto/StorageContainerLocationProtocol.proto b/hadoop-hdds/common/src/main/proto/StorageContainerLocationProtocol.proto index 795e37c6e5..d7540a3fe4 100644 --- a/hadoop-hdsl/common/src/main/proto/StorageContainerLocationProtocol.proto +++ b/hadoop-hdds/common/src/main/proto/StorageContainerLocationProtocol.proto @@ -22,14 +22,14 @@ * for what changes are allowed for a *unstable* .proto interface. */ -option java_package = "org.apache.hadoop.hdsl.protocol.proto"; +option java_package = "org.apache.hadoop.hdds.protocol.proto"; option java_outer_classname = "StorageContainerLocationProtocolProtos"; option java_generic_services = true; option java_generate_equals_and_hash = true; -package hadoop.hdsl; +package hadoop.hdds; import "hdfs.proto"; -import "hdsl.proto"; +import "hdds.proto"; /** * Request send to SCM asking where the container should be created. diff --git a/hadoop-hdsl/common/src/main/proto/hdsl.proto b/hadoop-hdds/common/src/main/proto/hdds.proto index a4baa9797b..f7b2f72b48 100644 --- a/hadoop-hdsl/common/src/main/proto/hdsl.proto +++ b/hadoop-hdds/common/src/main/proto/hdds.proto @@ -22,11 +22,11 @@ * for what changes are allowed for a *unstable* .proto interface. */ -option java_package = "org.apache.hadoop.hdsl.protocol.proto"; -option java_outer_classname = "HdslProtos"; +option java_package = "org.apache.hadoop.hdds.protocol.proto"; +option java_outer_classname = "HddsProtos"; option java_generic_services = true; option java_generate_equals_and_hash = true; -package hadoop.hdsl; +package hadoop.hdds; message DatanodeDetailsProto { // TODO: make the port as a seperate proto message and use it here diff --git a/hadoop-hdsl/common/src/main/resources/ozone-default.xml b/hadoop-hdds/common/src/main/resources/ozone-default.xml index 9feadcf7b0..8018d294d1 100644 --- a/hadoop-hdsl/common/src/main/resources/ozone-default.xml +++ b/hadoop-hdds/common/src/main/resources/ozone-default.xml @@ -535,7 +535,7 @@ <property> <name>ozone.scm.container.placement.impl</name> <value> - org.apache.hadoop.ozone.scm.container.placement.algorithms.SCMContainerPlacementRandom + org.apache.hadoop.hdds.scm.container.placement.algorithms.SCMContainerPlacementRandom </value> <tag>OZONE, MANAGEMENT</tag> <description>Placement policy class for containers. @@ -995,7 +995,7 @@ <property> - <name>hdsl.rest.rest-csrf.enabled</name> + <name>hdds.rest.rest-csrf.enabled</name> <value>false</value> <description> If true, then enables Object Store REST server protection against @@ -1004,7 +1004,7 @@ </property> <property> - <name>hdsl.rest.http-address</name> + <name>hdds.rest.http-address</name> <value>0.0.0.0:9880</value> <description>The http address of Object Store REST server inside the datanode.</description> @@ -1012,7 +1012,7 @@ <property> - <name>hdsl.rest.netty.high.watermark</name> + <name>hdds.rest.netty.high.watermark</name> <value>65535</value> <description> High watermark configuration to Netty for Object Store REST server. @@ -1020,7 +1020,7 @@ </property> <property> - <name>hdsl.rest.netty.low.watermark</name> + <name>hdds.rest.netty.low.watermark</name> <value>32768</value> <description> Low watermark configuration to Netty for Object Store REST server. diff --git a/hadoop-hdsl/common/src/test/java/org/apache/hadoop/scm/TestArchive.java b/hadoop-hdds/common/src/test/java/org/apache/hadoop/hdds/scm/TestArchive.java index 86adddb9ce..f53f770fcc 100644 --- a/hadoop-hdsl/common/src/test/java/org/apache/hadoop/scm/TestArchive.java +++ b/hadoop-hdds/common/src/test/java/org/apache/hadoop/hdds/scm/TestArchive.java @@ -15,7 +15,7 @@ * See the License for the specific language governing permissions and * limitations under the License. */ -package org.apache.hadoop.scm; +package org.apache.hadoop.hdds.scm; import org.apache.commons.io.FileUtils; import org.apache.commons.lang3.RandomUtils; diff --git a/hadoop-hdsl/common/src/test/java/org/apache/hadoop/scm/package-info.java b/hadoop-hdds/common/src/test/java/org/apache/hadoop/hdds/scm/package-info.java index 9c480d6fff..796694171f 100644 --- a/hadoop-hdsl/common/src/test/java/org/apache/hadoop/scm/package-info.java +++ b/hadoop-hdds/common/src/test/java/org/apache/hadoop/hdds/scm/package-info.java @@ -15,7 +15,7 @@ * See the License for the specific language governing permissions and * limitations under the License. */ -package org.apache.hadoop.scm; +package org.apache.hadoop.hdds.scm; /** Test cases for SCM client classes. */
\ No newline at end of file diff --git a/hadoop-hdsl/common/src/test/java/org/apache/hadoop/ozone/TestMetadataStore.java b/hadoop-hdds/common/src/test/java/org/apache/hadoop/ozone/TestMetadataStore.java index cee66cae16..6b26b60350 100644 --- a/hadoop-hdsl/common/src/test/java/org/apache/hadoop/ozone/TestMetadataStore.java +++ b/hadoop-hdds/common/src/test/java/org/apache/hadoop/ozone/TestMetadataStore.java @@ -21,36 +21,36 @@ import com.google.common.collect.Lists; import org.apache.commons.io.FileUtils; import org.apache.commons.lang3.tuple.ImmutablePair; import org.apache.hadoop.conf.Configuration; -import org.apache.hadoop.hdsl.conf.OzoneConfiguration; +import org.apache.hadoop.hdds.conf.OzoneConfiguration; import org.apache.hadoop.hdfs.DFSUtil; import org.apache.hadoop.hdfs.DFSUtilClient; -import org.apache.hadoop.utils.BatchOperation; -import org.apache.hadoop.utils.MetadataStore; import org.apache.hadoop.test.GenericTestUtils; +import org.apache.hadoop.utils.BatchOperation; import org.apache.hadoop.utils.MetadataKeyFilters.KeyPrefixFilter; import org.apache.hadoop.utils.MetadataKeyFilters.MetadataKeyFilter; +import org.apache.hadoop.utils.MetadataStore; import org.apache.hadoop.utils.MetadataStoreBuilder; -import org.junit.Rule; -import org.junit.Before; import org.junit.After; -import org.junit.Test; import org.junit.Assert; +import org.junit.Before; +import org.junit.Rule; +import org.junit.Test; import org.junit.rules.ExpectedException; import org.junit.runner.RunWith; import org.junit.runners.Parameterized; import java.io.File; import java.io.IOException; -import java.util.List; -import java.util.Map; import java.util.ArrayList; -import java.util.UUID; -import java.util.Collection; import java.util.Arrays; +import java.util.Collection; import java.util.Iterator; +import java.util.List; +import java.util.Map; +import java.util.UUID; import java.util.concurrent.atomic.AtomicInteger; -import static org.junit.runners.Parameterized.*; +import static org.junit.runners.Parameterized.Parameters; /** * Test class for ozone metadata store. diff --git a/hadoop-hdsl/common/src/test/java/org/apache/hadoop/ozone/TestOzoneAcls.java b/hadoop-hdds/common/src/test/java/org/apache/hadoop/ozone/TestOzoneAcls.java index 03c45c5019..03c45c5019 100644 --- a/hadoop-hdsl/common/src/test/java/org/apache/hadoop/ozone/TestOzoneAcls.java +++ b/hadoop-hdds/common/src/test/java/org/apache/hadoop/ozone/TestOzoneAcls.java diff --git a/hadoop-hdsl/common/src/test/java/org/apache/hadoop/ozone/common/TestStateMachine.java b/hadoop-hdds/common/src/test/java/org/apache/hadoop/ozone/common/TestStateMachine.java index d4c626e9f5..c1470bb2ef 100644 --- a/hadoop-hdsl/common/src/test/java/org/apache/hadoop/ozone/common/TestStateMachine.java +++ b/hadoop-hdds/common/src/test/java/org/apache/hadoop/ozone/common/TestStateMachine.java @@ -18,7 +18,8 @@ package org.apache.hadoop.ozone.common; import org.apache.commons.collections.SetUtils; -import org.apache.hadoop.ozone.common.statemachine.InvalidStateTransitionException; +import org.apache.hadoop.ozone.common.statemachine + .InvalidStateTransitionException; import org.apache.hadoop.ozone.common.statemachine.StateMachine; import org.junit.Assert; import org.junit.Rule; @@ -28,12 +29,13 @@ import org.junit.rules.ExpectedException; import java.util.HashSet; import java.util.Set; -import static org.apache.hadoop.ozone.common.TestStateMachine.STATES.INIT; -import static org.apache.hadoop.ozone.common.TestStateMachine.STATES.CREATING; -import static org.apache.hadoop.ozone.common.TestStateMachine.STATES.OPERATIONAL; -import static org.apache.hadoop.ozone.common.TestStateMachine.STATES.CLOSED; import static org.apache.hadoop.ozone.common.TestStateMachine.STATES.CLEANUP; +import static org.apache.hadoop.ozone.common.TestStateMachine.STATES.CLOSED; +import static org.apache.hadoop.ozone.common.TestStateMachine.STATES.CREATING; import static org.apache.hadoop.ozone.common.TestStateMachine.STATES.FINAL; +import static org.apache.hadoop.ozone.common.TestStateMachine.STATES.INIT; +import static org.apache.hadoop.ozone.common.TestStateMachine.STATES + .OPERATIONAL; /** * This class is to test ozone common state machine. diff --git a/hadoop-hdsl/common/src/test/java/org/apache/hadoop/ozone/lease/TestLeaseManager.java b/hadoop-hdds/common/src/test/java/org/apache/hadoop/ozone/lease/TestLeaseManager.java index 517c1a7c47..517c1a7c47 100644 --- a/hadoop-hdsl/common/src/test/java/org/apache/hadoop/ozone/lease/TestLeaseManager.java +++ b/hadoop-hdds/common/src/test/java/org/apache/hadoop/ozone/lease/TestLeaseManager.java diff --git a/hadoop-hdsl/common/src/test/java/org/apache/hadoop/ozone/lease/package-info.java b/hadoop-hdds/common/src/test/java/org/apache/hadoop/ozone/lease/package-info.java index 1071309c73..1071309c73 100644 --- a/hadoop-hdsl/common/src/test/java/org/apache/hadoop/ozone/lease/package-info.java +++ b/hadoop-hdds/common/src/test/java/org/apache/hadoop/ozone/lease/package-info.java diff --git a/hadoop-hdsl/common/src/test/java/org/apache/hadoop/ozone/package-info.java b/hadoop-hdds/common/src/test/java/org/apache/hadoop/ozone/package-info.java index 0030d2e9e1..0030d2e9e1 100644 --- a/hadoop-hdsl/common/src/test/java/org/apache/hadoop/ozone/package-info.java +++ b/hadoop-hdds/common/src/test/java/org/apache/hadoop/ozone/package-info.java diff --git a/hadoop-hdsl/common/src/test/java/org/apache/hadoop/utils/TestRocksDBStoreMBean.java b/hadoop-hdds/common/src/test/java/org/apache/hadoop/utils/TestRocksDBStoreMBean.java index e4f00f9dfe..a7ce60bbeb 100644 --- a/hadoop-hdsl/common/src/test/java/org/apache/hadoop/utils/TestRocksDBStoreMBean.java +++ b/hadoop-hdds/common/src/test/java/org/apache/hadoop/utils/TestRocksDBStoreMBean.java @@ -18,7 +18,7 @@ package org.apache.hadoop.utils; import org.apache.hadoop.conf.Configuration; -import org.apache.hadoop.hdsl.conf.OzoneConfiguration; +import org.apache.hadoop.hdds.conf.OzoneConfiguration; import org.apache.hadoop.ozone.OzoneConfigKeys; import org.apache.hadoop.test.GenericTestUtils; import org.junit.Assert; diff --git a/hadoop-hdsl/container-service/pom.xml b/hadoop-hdds/container-service/pom.xml index 7d6d543396..736272d1a4 100644 --- a/hadoop-hdsl/container-service/pom.xml +++ b/hadoop-hdds/container-service/pom.xml @@ -19,29 +19,29 @@ http://maven.apache.org/xsd/maven-4.0.0.xsd"> <modelVersion>4.0.0</modelVersion> <parent> <groupId>org.apache.hadoop</groupId> - <artifactId>hadoop-hdsl</artifactId> + <artifactId>hadoop-hdds</artifactId> <version>3.2.0-SNAPSHOT</version> </parent> - <artifactId>hadoop-hdsl-container-service</artifactId> + <artifactId>hadoop-hdds-container-service</artifactId> <version>3.2.0-SNAPSHOT</version> - <description>Apache Hadoop HDSL Container server</description> - <name>Apache Hadoop HDSL Container server</name> + <description>Apache HDDS Container server</description> + <name>Apache HDDS Container server</name> <packaging>jar</packaging> <properties> - <hadoop.component>hdsl</hadoop.component> + <hadoop.component>hdds</hadoop.component> <is.hadoop.component>true</is.hadoop.component> </properties> <dependencies> <dependency> <groupId>org.apache.hadoop</groupId> - <artifactId>hadoop-hdsl-common</artifactId> + <artifactId>hadoop-hdds-common</artifactId> <scope>provided</scope> </dependency> <dependency> <groupId>org.apache.hadoop</groupId> - <artifactId>hadoop-hdsl-server-framework</artifactId> + <artifactId>hadoop-hdds-server-framework</artifactId> <scope>provided</scope> </dependency> @@ -79,7 +79,7 @@ http://maven.apache.org/xsd/maven-4.0.0.xsd"> ${basedir}/../../hadoop-hdfs-project/hadoop-hdfs/src/main/proto/ </param> <param> - ${basedir}/../../hadoop-hdsl/common/src/main/proto/ + ${basedir}/../../hadoop-hdds/common/src/main/proto/ </param> <param>${basedir}/src/main/proto</param> </imports> diff --git a/hadoop-hdsl/container-service/src/main/java/org/apache/hadoop/ozone/scm/HdslServerUtil.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/hdds/scm/HddsServerUtil.java index 21123eba96..956aef26d9 100644 --- a/hadoop-hdsl/container-service/src/main/java/org/apache/hadoop/ozone/scm/HdslServerUtil.java +++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/hdds/scm/HddsServerUtil.java @@ -15,13 +15,12 @@ * the License. */ -package org.apache.hadoop.ozone.scm; +package org.apache.hadoop.hdds.scm; import com.google.common.base.Optional; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.net.NetUtils; import org.apache.hadoop.ozone.OzoneConfigKeys; -import org.apache.hadoop.scm.ScmConfigKeys; import org.slf4j.Logger; import org.slf4j.LoggerFactory; @@ -30,17 +29,36 @@ import java.util.HashMap; import java.util.Map; import java.util.concurrent.TimeUnit; -import static org.apache.hadoop.hdsl.HdslUtils.*; -import static org.apache.hadoop.hdsl.server.ServerUtils.sanitizeUserArgs; -import static org.apache.hadoop.scm.ScmConfigKeys.*; +import static org.apache.hadoop.hdds.scm.ScmConfigKeys + .OZONE_SCM_DEADNODE_INTERVAL; +import static org.apache.hadoop.hdds.scm.ScmConfigKeys + .OZONE_SCM_DEADNODE_INTERVAL_DEFAULT; +import static org.apache.hadoop.hdds.scm.ScmConfigKeys + .OZONE_SCM_HEARTBEAT_INTERVAL; +import static org.apache.hadoop.hdds.scm.ScmConfigKeys + .OZONE_SCM_HEARTBEAT_LOG_WARN_DEFAULT; +import static org.apache.hadoop.hdds.scm.ScmConfigKeys + .OZONE_SCM_HEARTBEAT_LOG_WARN_INTERVAL_COUNT; +import static org.apache.hadoop.hdds.scm.ScmConfigKeys + .OZONE_SCM_HEARTBEAT_PROCESS_INTERVAL; +import static org.apache.hadoop.hdds.scm.ScmConfigKeys + .OZONE_SCM_HEARTBEAT_RPC_TIMEOUT; +import static org.apache.hadoop.hdds.scm.ScmConfigKeys + .OZONE_SCM_HEARTBEAT_RPC_TIMEOUT_DEFAULT; +import static org.apache.hadoop.hdds.scm.ScmConfigKeys + .OZONE_SCM_STALENODE_INTERVAL; +import static org.apache.hadoop.hdds.scm.ScmConfigKeys + .OZONE_SCM_STALENODE_INTERVAL_DEFAULT; +import static org.apache.hadoop.hdds.HddsUtils.*; +import static org.apache.hadoop.hdds.server.ServerUtils.sanitizeUserArgs; /** - * Hdsl stateless helper functions for server side components. + * Hdds stateless helper functions for server side components. */ -public class HdslServerUtil { +public class HddsServerUtil { private static final Logger LOG = LoggerFactory.getLogger( - HdslServerUtil.class); + HddsServerUtil.class); /** * Retrieve the socket address that should be used by DataNodes to connect diff --git a/hadoop-hdsl/container-service/src/main/java/org/apache/hadoop/ozone/scm/VersionInfo.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/hdds/scm/VersionInfo.java index 6bb3a22683..4e52046625 100644 --- a/hadoop-hdsl/container-service/src/main/java/org/apache/hadoop/ozone/scm/VersionInfo.java +++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/hdds/scm/VersionInfo.java @@ -15,7 +15,7 @@ * See the License for the specific language governing permissions and * limitations under the License. */ -package org.apache.hadoop.ozone.scm; +package org.apache.hadoop.hdds.scm; /** * This is a class that tracks versions of SCM. diff --git a/hadoop-hdsl/container-service/src/main/java/org/apache/hadoop/ozone/HdslDatanodeService.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/HddsDatanodeService.java index 58c1d41424..7213e7e2e3 100644 --- a/hadoop-hdsl/container-service/src/main/java/org/apache/hadoop/ozone/HdslDatanodeService.java +++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/HddsDatanodeService.java @@ -17,33 +17,33 @@ */ package org.apache.hadoop.ozone; -import java.io.File; -import java.io.IOException; -import java.net.InetAddress; -import java.util.UUID; - import com.google.common.base.Preconditions; import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.hdds.scm.ScmConfigKeys; import org.apache.hadoop.hdfs.server.datanode.DataNode; import org.apache.hadoop.hdfs.server.datanode.DataNodeServicePlugin; -import org.apache.hadoop.hdsl.HdslUtils; -import org.apache.hadoop.hdsl.conf.OzoneConfiguration; -import org.apache.hadoop.hdsl.protocol.DatanodeDetails; +import org.apache.hadoop.hdds.HddsUtils; +import org.apache.hadoop.hdds.conf.OzoneConfiguration; +import org.apache.hadoop.hdds.protocol.DatanodeDetails; import org.apache.hadoop.ozone.container.common.helpers.ContainerUtils; import org.apache.hadoop.ozone.container.common.statemachine .DatanodeStateMachine; -import org.apache.hadoop.scm.ScmConfigKeys; import org.slf4j.Logger; import org.slf4j.LoggerFactory; +import java.io.File; +import java.io.IOException; +import java.net.InetAddress; +import java.util.UUID; + /** - * Datanode service plugin to start the HDSL container services. + * Datanode service plugin to start the HDDS container services. */ -public class HdslDatanodeService implements DataNodeServicePlugin { +public class HddsDatanodeService implements DataNodeServicePlugin { private static final Logger LOG = LoggerFactory.getLogger( - HdslDatanodeService.class); + HddsDatanodeService.class); private final boolean isOzoneEnabled; @@ -51,11 +51,11 @@ public class HdslDatanodeService implements DataNodeServicePlugin { private DatanodeDetails datanodeDetails; private DatanodeStateMachine datanodeStateMachine; - public HdslDatanodeService() { + public HddsDatanodeService() { try { OzoneConfiguration.activate(); this.conf = new OzoneConfiguration(); - this.isOzoneEnabled = HdslUtils.isHdslEnabled(conf); + this.isOzoneEnabled = HddsUtils.isHddsEnabled(conf); if (isOzoneEnabled) { this.datanodeDetails = getDatanodeDetails(conf); String hostname = DataNode.getHostName(conf); @@ -64,7 +64,7 @@ public class HdslDatanodeService implements DataNodeServicePlugin { this.datanodeDetails.setIpAddress(ip); } } catch (IOException e) { - throw new RuntimeException("Can't start the HDSL datanode plugin", e); + throw new RuntimeException("Can't start the HDDS datanode plugin", e); } } @@ -78,7 +78,7 @@ public class HdslDatanodeService implements DataNodeServicePlugin { datanodeStateMachine = new DatanodeStateMachine(datanodeDetails, conf); datanodeStateMachine.startDaemon(); } catch (IOException e) { - throw new RuntimeException("Can't start the HDSL datanode plugin", e); + throw new RuntimeException("Can't start the HDDS datanode plugin", e); } } } @@ -90,7 +90,7 @@ public class HdslDatanodeService implements DataNodeServicePlugin { */ private static DatanodeDetails getDatanodeDetails(Configuration conf) throws IOException { - String idFilePath = HdslUtils.getDatanodeIdFilePath(conf); + String idFilePath = HddsUtils.getDatanodeIdFilePath(conf); if (idFilePath == null || idFilePath.isEmpty()) { LOG.error("A valid file path is needed for config setting {}", ScmConfigKeys.OZONE_SCM_DATANODE_ID); diff --git a/hadoop-hdsl/container-service/src/main/java/org/apache/hadoop/ozone/container/common/helpers/ChunkUtils.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/helpers/ChunkUtils.java index 180a116057..68bf4421f6 100644 --- a/hadoop-hdsl/container-service/src/main/java/org/apache/hadoop/ozone/container/common/helpers/ChunkUtils.java +++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/helpers/ChunkUtils.java @@ -21,12 +21,13 @@ import com.google.common.base.Preconditions; import com.google.protobuf.ByteString; import org.apache.commons.codec.binary.Hex; import org.apache.commons.codec.digest.DigestUtils; -import org.apache.hadoop.hdsl.protocol.proto.ContainerProtos; +import org.apache.hadoop.hdds.scm.container.common.helpers.Pipeline; +import org.apache.hadoop.hdds.scm.container.common.helpers + .StorageContainerException; +import org.apache.hadoop.hdds.protocol.proto.ContainerProtos; import org.apache.hadoop.io.IOUtils; import org.apache.hadoop.ozone.OzoneConsts; import org.apache.hadoop.ozone.container.common.impl.ChunkManagerImpl; -import org.apache.hadoop.scm.container.common.helpers.Pipeline; -import org.apache.hadoop.scm.container.common.helpers.StorageContainerException; import org.slf4j.Logger; import org.slf4j.LoggerFactory; @@ -40,22 +41,22 @@ import java.security.MessageDigest; import java.security.NoSuchAlgorithmException; import java.util.concurrent.ExecutionException; -import static org.apache.hadoop.hdsl.protocol.proto.ContainerProtos - .Result.CHECKSUM_MISMATCH; -import static org.apache.hadoop.hdsl.protocol.proto.ContainerProtos - .Result.CONTAINER_INTERNAL_ERROR; -import static org.apache.hadoop.hdsl.protocol.proto.ContainerProtos - .Result.CONTAINER_NOT_FOUND; -import static org.apache.hadoop.hdsl.protocol.proto.ContainerProtos - .Result.INVALID_WRITE_SIZE; -import static org.apache.hadoop.hdsl.protocol.proto.ContainerProtos - .Result.IO_EXCEPTION; -import static org.apache.hadoop.hdsl.protocol.proto.ContainerProtos - .Result.OVERWRITE_FLAG_REQUIRED; -import static org.apache.hadoop.hdsl.protocol.proto.ContainerProtos - .Result.UNABLE_TO_FIND_CHUNK; -import static org.apache.hadoop.hdsl.protocol.proto.ContainerProtos - .Result.UNABLE_TO_FIND_DATA_DIR; +import static org.apache.hadoop.hdds.protocol.proto.ContainerProtos.Result + .CHECKSUM_MISMATCH; +import static org.apache.hadoop.hdds.protocol.proto.ContainerProtos.Result + .CONTAINER_INTERNAL_ERROR; +import static org.apache.hadoop.hdds.protocol.proto.ContainerProtos.Result + .CONTAINER_NOT_FOUND; +import static org.apache.hadoop.hdds.protocol.proto.ContainerProtos.Result + .INVALID_WRITE_SIZE; +import static org.apache.hadoop.hdds.protocol.proto.ContainerProtos.Result + .IO_EXCEPTION; +import static org.apache.hadoop.hdds.protocol.proto.ContainerProtos.Result + .OVERWRITE_FLAG_REQUIRED; +import static org.apache.hadoop.hdds.protocol.proto.ContainerProtos.Result + .UNABLE_TO_FIND_CHUNK; +import static org.apache.hadoop.hdds.protocol.proto.ContainerProtos.Result + .UNABLE_TO_FIND_DATA_DIR; /** * Set of utility functions used by the chunk Manager. diff --git a/hadoop-hdsl/container-service/src/main/java/org/apache/hadoop/ozone/container/common/helpers/ContainerData.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/helpers/ContainerData.java index bc28e8f0dc..c29374c07c 100644 --- a/hadoop-hdsl/container-service/src/main/java/org/apache/hadoop/ozone/container/common/helpers/ContainerData.java +++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/helpers/ContainerData.java @@ -20,10 +20,10 @@ package org.apache.hadoop.ozone.container.common.helpers; import org.apache.commons.codec.digest.DigestUtils; import org.apache.hadoop.conf.Configuration; -import org.apache.hadoop.hdsl.protocol.proto.ContainerProtos; -import org.apache.hadoop.hdsl.protocol.proto.HdslProtos; +import org.apache.hadoop.hdds.scm.ScmConfigKeys; +import org.apache.hadoop.hdds.protocol.proto.ContainerProtos; +import org.apache.hadoop.hdds.protocol.proto.HddsProtos; import org.apache.hadoop.ozone.OzoneConsts; -import org.apache.hadoop.scm.ScmConfigKeys; import org.apache.hadoop.util.Time; import java.io.IOException; @@ -49,7 +49,7 @@ public class ContainerData { private AtomicLong bytesUsed; private long maxSize; private Long containerID; - private HdslProtos.LifeCycleState state; + private HddsProtos.LifeCycleState state; /** * Constructs a ContainerData Object. @@ -64,7 +64,7 @@ public class ContainerData { ScmConfigKeys.SCM_CONTAINER_CLIENT_MAX_SIZE_DEFAULT) * OzoneConsts.GB; this.bytesUsed = new AtomicLong(0L); this.containerID = containerID; - this.state = HdslProtos.LifeCycleState.OPEN; + this.state = HddsProtos.LifeCycleState.OPEN; } /** @@ -135,8 +135,8 @@ public class ContainerData { builder.setState(this.getState()); for (Map.Entry<String, String> entry : metadata.entrySet()) { - HdslProtos.KeyValue.Builder keyValBuilder = - HdslProtos.KeyValue.newBuilder(); + HddsProtos.KeyValue.Builder keyValBuilder = + HddsProtos.KeyValue.newBuilder(); builder.addMetadata(keyValBuilder.setKey(entry.getKey()) .setValue(entry.getValue()).build()); } @@ -259,11 +259,11 @@ public class ContainerData { return containerID; } - public synchronized void setState(HdslProtos.LifeCycleState state) { + public synchronized void setState(HddsProtos.LifeCycleState state) { this.state = state; } - public synchronized HdslProtos.LifeCycleState getState() { + public synchronized HddsProtos.LifeCycleState getState() { return this.state; } @@ -272,7 +272,7 @@ public class ContainerData { * @return - boolean */ public synchronized boolean isOpen() { - return HdslProtos.LifeCycleState.OPEN == state; + return HddsProtos.LifeCycleState.OPEN == state; } /** @@ -280,7 +280,7 @@ public class ContainerData { */ public synchronized void closeContainer() { // TODO: closed or closing here - setState(HdslProtos.LifeCycleState.CLOSED); + setState(HddsProtos.LifeCycleState.CLOSED); // Some thing brain dead for now. name + Time stamp of when we get the close // container message. diff --git a/hadoop-hdsl/container-service/src/main/java/org/apache/hadoop/ozone/container/common/helpers/ContainerMetrics.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/helpers/ContainerMetrics.java index 766039efdd..d4d732b8b6 100644 --- a/hadoop-hdsl/container-service/src/main/java/org/apache/hadoop/ozone/container/common/helpers/ContainerMetrics.java +++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/helpers/ContainerMetrics.java @@ -21,15 +21,15 @@ package org.apache.hadoop.ozone.container.common.helpers; import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hdfs.DFSConfigKeys; -import org.apache.hadoop.hdsl.protocol.proto.ContainerProtos; +import org.apache.hadoop.hdds.protocol.proto.ContainerProtos; import org.apache.hadoop.metrics2.MetricsSystem; import org.apache.hadoop.metrics2.annotation.Metric; import org.apache.hadoop.metrics2.annotation.Metrics; -import org.apache.hadoop.metrics2.lib.MutableCounterLong; -import org.apache.hadoop.metrics2.lib.MutableRate; +import org.apache.hadoop.metrics2.lib.DefaultMetricsSystem; import org.apache.hadoop.metrics2.lib.MetricsRegistry; +import org.apache.hadoop.metrics2.lib.MutableCounterLong; import org.apache.hadoop.metrics2.lib.MutableQuantiles; -import org.apache.hadoop.metrics2.lib.DefaultMetricsSystem; +import org.apache.hadoop.metrics2.lib.MutableRate; /** * diff --git a/hadoop-hdsl/container-service/src/main/java/org/apache/hadoop/ozone/container/common/helpers/ContainerReport.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/helpers/ContainerReport.java index 362ef9b8ac..50d2da3975 100644 --- a/hadoop-hdsl/container-service/src/main/java/org/apache/hadoop/ozone/container/common/helpers/ContainerReport.java +++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/helpers/ContainerReport.java @@ -18,7 +18,7 @@ package org.apache.hadoop.ozone.container.common.helpers; import com.google.common.base.Preconditions; -import org.apache.hadoop.hdsl.protocol.proto.StorageContainerDatanodeProtocolProtos.ContainerInfo; +import org.apache.hadoop.hdds.protocol.proto.StorageContainerDatanodeProtocolProtos.ContainerInfo; /** * Container Report iterates the closed containers and sends a container report diff --git a/hadoop-hdsl/container-service/src/main/java/org/apache/hadoop/ozone/container/common/helpers/ContainerUtils.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/helpers/ContainerUtils.java index 6f115be559..1818188cb6 100644 --- a/hadoop-hdsl/container-service/src/main/java/org/apache/hadoop/ozone/container/common/helpers/ContainerUtils.java +++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/helpers/ContainerUtils.java @@ -22,12 +22,13 @@ import com.google.common.base.Preconditions; import org.apache.commons.io.FileUtils; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.FileAlreadyExistsException; -import org.apache.hadoop.hdsl.protocol.DatanodeDetails; -import org.apache.hadoop.hdsl.protocol.proto.ContainerProtos; -import org.apache.hadoop.hdsl.protocol.proto.HdslProtos; +import org.apache.hadoop.hdds.scm.container.common.helpers + .StorageContainerException; +import org.apache.hadoop.hdds.protocol.DatanodeDetails; +import org.apache.hadoop.hdds.protocol.proto.ContainerProtos; +import org.apache.hadoop.hdds.protocol.proto.HddsProtos; import org.apache.hadoop.ozone.OzoneConsts; import org.apache.hadoop.ozone.container.common.impl.ContainerManagerImpl; -import org.apache.hadoop.scm.container.common.helpers.StorageContainerException; import org.apache.hadoop.utils.MetadataStore; import org.apache.hadoop.utils.MetadataStoreBuilder; import org.slf4j.Logger; @@ -41,10 +42,10 @@ import java.nio.file.Path; import java.nio.file.Paths; import static org.apache.commons.io.FilenameUtils.removeExtension; -import static org.apache.hadoop.hdsl.protocol.proto.ContainerProtos - .Result.INVALID_ARGUMENT; -import static org.apache.hadoop.hdsl.protocol.proto.ContainerProtos - .Result.UNABLE_TO_FIND_DATA_DIR; +import static org.apache.hadoop.hdds.protocol.proto.ContainerProtos.Result + .INVALID_ARGUMENT; +import static org.apache.hadoop.hdds.protocol.proto.ContainerProtos.Result + .UNABLE_TO_FIND_DATA_DIR; import static org.apache.hadoop.ozone.OzoneConsts.CONTAINER_EXTENSION; import static org.apache.hadoop.ozone.OzoneConsts.CONTAINER_META; @@ -412,7 +413,7 @@ public final class ContainerUtils { } } try (FileOutputStream out = new FileOutputStream(path)) { - HdslProtos.DatanodeDetailsProto proto = + HddsProtos.DatanodeDetailsProto proto = datanodeDetails.getProtoBufMessage(); proto.writeTo(out); } @@ -432,7 +433,7 @@ public final class ContainerUtils { } try(FileInputStream in = new FileInputStream(path)) { return DatanodeDetails.getFromProtoBuf( - HdslProtos.DatanodeDetailsProto.parseFrom(in)); + HddsProtos.DatanodeDetailsProto.parseFrom(in)); } catch (IOException e) { throw new IOException("Failed to parse DatanodeDetails from " + path.getAbsolutePath(), e); diff --git a/hadoop-hdsl/container-service/src/main/java/org/apache/hadoop/ozone/container/common/helpers/DeletedContainerBlocksSummary.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/helpers/DeletedContainerBlocksSummary.java index 6fd7f85522..ade162a263 100644 --- a/hadoop-hdsl/container-service/src/main/java/org/apache/hadoop/ozone/container/common/helpers/DeletedContainerBlocksSummary.java +++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/helpers/DeletedContainerBlocksSummary.java @@ -17,7 +17,7 @@ package org.apache.hadoop.ozone.container.common.helpers; import com.google.common.collect.Maps; -import org.apache.hadoop.hdsl.protocol.proto +import org.apache.hadoop.hdds.protocol.proto .StorageContainerDatanodeProtocolProtos.DeletedBlocksTransaction; import java.util.List; diff --git a/hadoop-hdsl/container-service/src/main/java/org/apache/hadoop/ozone/container/common/helpers/FileUtils.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/helpers/FileUtils.java index 75ec505a3b..566db02510 100644 --- a/hadoop-hdsl/container-service/src/main/java/org/apache/hadoop/ozone/container/common/helpers/FileUtils.java +++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/helpers/FileUtils.java @@ -19,7 +19,7 @@ package org.apache.hadoop.ozone.container.common.helpers; import com.google.common.base.Preconditions; import com.google.protobuf.ByteString; -import org.apache.hadoop.hdsl.protocol.proto.ContainerProtos; +import org.apache.hadoop.hdds.protocol.proto.ContainerProtos; /** * File Utils are helper routines used by putSmallFile and getSmallFile diff --git a/hadoop-hdsl/container-service/src/main/java/org/apache/hadoop/ozone/container/common/helpers/KeyUtils.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/helpers/KeyUtils.java index 691c517d42..33eb911d4e 100644 --- a/hadoop-hdsl/container-service/src/main/java/org/apache/hadoop/ozone/container/common/helpers/KeyUtils.java +++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/helpers/KeyUtils.java @@ -19,18 +19,19 @@ package org.apache.hadoop.ozone.container.common.helpers; import com.google.common.base.Preconditions; import org.apache.hadoop.conf.Configuration; -import org.apache.hadoop.hdsl.protocol.proto.ContainerProtos; -import org.apache.hadoop.scm.container.common.helpers.StorageContainerException; +import org.apache.hadoop.hdds.scm.container.common.helpers + .StorageContainerException; +import org.apache.hadoop.hdds.protocol.proto.ContainerProtos; import org.apache.hadoop.ozone.container.common.utils.ContainerCache; import org.apache.hadoop.utils.MetadataStore; import java.io.IOException; import java.nio.charset.Charset; -import static org.apache.hadoop.hdsl.protocol.proto.ContainerProtos - .Result.UNABLE_TO_READ_METADATA_DB; -import static org.apache.hadoop.hdsl.protocol.proto.ContainerProtos - .Result.NO_SUCH_KEY; +import static org.apache.hadoop.hdds.protocol.proto.ContainerProtos.Result + .NO_SUCH_KEY; +import static org.apache.hadoop.hdds.protocol.proto.ContainerProtos.Result + .UNABLE_TO_READ_METADATA_DB; /** * Utils functions to help key functions. diff --git a/hadoop-hdsl/container-service/src/main/java/org/apache/hadoop/ozone/container/common/helpers/package-info.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/helpers/package-info.java index 21f31e1bbe..21f31e1bbe 100644 --- a/hadoop-hdsl/container-service/src/main/java/org/apache/hadoop/ozone/container/common/helpers/package-info.java +++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/helpers/package-info.java diff --git a/hadoop-hdsl/container-service/src/main/java/org/apache/hadoop/ozone/container/common/impl/ChunkManagerImpl.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/impl/ChunkManagerImpl.java index 7c950dccf9..b0286b96d4 100644 --- a/hadoop-hdsl/container-service/src/main/java/org/apache/hadoop/ozone/container/common/impl/ChunkManagerImpl.java +++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/impl/ChunkManagerImpl.java @@ -19,15 +19,16 @@ package org.apache.hadoop.ozone.container.common.impl; import com.google.common.base.Preconditions; import org.apache.hadoop.fs.FileUtil; -import org.apache.hadoop.hdsl.protocol.proto.ContainerProtos; +import org.apache.hadoop.hdds.scm.container.common.helpers.Pipeline; +import org.apache.hadoop.hdds.scm.container.common.helpers + .StorageContainerException; +import org.apache.hadoop.hdds.protocol.proto.ContainerProtos; import org.apache.hadoop.ozone.OzoneConsts; -import org.apache.hadoop.ozone.container.common.helpers.ContainerData; -import org.apache.hadoop.scm.container.common.helpers.StorageContainerException; import org.apache.hadoop.ozone.container.common.helpers.ChunkInfo; import org.apache.hadoop.ozone.container.common.helpers.ChunkUtils; +import org.apache.hadoop.ozone.container.common.helpers.ContainerData; import org.apache.hadoop.ozone.container.common.interfaces.ChunkManager; import org.apache.hadoop.ozone.container.common.interfaces.ContainerManager; -import org.apache.hadoop.scm.container.common.helpers.Pipeline; import org.slf4j.Logger; import org.slf4j.LoggerFactory; @@ -39,10 +40,10 @@ import java.nio.file.StandardCopyOption; import java.security.NoSuchAlgorithmException; import java.util.concurrent.ExecutionException; -import static org.apache.hadoop.hdsl.protocol.proto.ContainerProtos - .Result.CONTAINER_INTERNAL_ERROR; -import static org.apache.hadoop.hdsl.protocol.proto.ContainerProtos - .Result.UNSUPPORTED_REQUEST; +import static org.apache.hadoop.hdds.protocol.proto.ContainerProtos.Result + .CONTAINER_INTERNAL_ERROR; +import static org.apache.hadoop.hdds.protocol.proto.ContainerProtos.Result + .UNSUPPORTED_REQUEST; /** * An implementation of ChunkManager that is used by default in ozone. diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/impl/ContainerManagerImpl.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/impl/ContainerManagerImpl.java new file mode 100644 index 0000000000..5e7375cd9d --- /dev/null +++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/impl/ContainerManagerImpl.java @@ -0,0 +1,1113 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.ozone.container.common.impl; + +import com.google.common.annotations.VisibleForTesting; +import com.google.common.base.Preconditions; +import org.apache.commons.codec.digest.DigestUtils; +import org.apache.commons.io.FileUtils; +import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.hdds.scm.ScmConfigKeys; +import org.apache.hadoop.hdds.scm.container.common.helpers.Pipeline; +import org.apache.hadoop.hdds.scm.container.common.helpers + .StorageContainerException; +import org.apache.hadoop.hdfs.server.datanode.StorageLocation; +import org.apache.hadoop.hdds.protocol.DatanodeDetails; +import org.apache.hadoop.hdds.protocol.proto.ContainerProtos; +import org.apache.hadoop.hdds.protocol.proto + .StorageContainerDatanodeProtocolProtos; +import org.apache.hadoop.hdds.protocol.proto + .StorageContainerDatanodeProtocolProtos.ContainerReportsRequestProto; +import org.apache.hadoop.hdds.protocol.proto + .StorageContainerDatanodeProtocolProtos.ReportState; +import org.apache.hadoop.hdds.protocol.proto + .StorageContainerDatanodeProtocolProtos.SCMNodeReport; +import org.apache.hadoop.hdds.protocol.proto + .StorageContainerDatanodeProtocolProtos.SCMStorageReport; +import org.apache.hadoop.io.IOUtils; +import org.apache.hadoop.ozone.OzoneConsts; +import org.apache.hadoop.ozone.container.common.helpers.ContainerData; +import org.apache.hadoop.ozone.container.common.helpers.ContainerUtils; +import org.apache.hadoop.ozone.container.common.helpers.KeyData; +import org.apache.hadoop.ozone.container.common.helpers.KeyUtils; +import org.apache.hadoop.ozone.container.common.interfaces.ChunkManager; +import org.apache.hadoop.ozone.container.common.interfaces + .ContainerDeletionChoosingPolicy; +import org.apache.hadoop.ozone.container.common.interfaces + .ContainerLocationManager; +import org.apache.hadoop.ozone.container.common.interfaces.ContainerManager; +import org.apache.hadoop.ozone.container.common.interfaces + .ContainerReportManager; +import org.apache.hadoop.ozone.container.common.interfaces.KeyManager; +import org.apache.hadoop.util.ReflectionUtils; +import org.apache.hadoop.utils.MetadataKeyFilters; +import org.apache.hadoop.utils.MetadataStore; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +import java.io.File; +import java.io.FileInputStream; +import java.io.FileOutputStream; +import java.io.FilenameFilter; +import java.io.IOException; +import java.nio.file.Path; +import java.nio.file.Paths; +import java.security.DigestInputStream; +import java.security.DigestOutputStream; +import java.security.MessageDigest; +import java.security.NoSuchAlgorithmException; +import java.util.LinkedList; +import java.util.List; +import java.util.Map; +import java.util.concurrent.ConcurrentNavigableMap; +import java.util.concurrent.ConcurrentSkipListMap; +import java.util.concurrent.locks.ReentrantReadWriteLock; +import java.util.stream.Collectors; + +import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_DATANODE_DATA_DIR_KEY; +import static org.apache.hadoop.hdds.protocol.proto.ContainerProtos.Result + .CONTAINER_EXISTS; +import static org.apache.hadoop.hdds.protocol.proto.ContainerProtos.Result + .CONTAINER_INTERNAL_ERROR; +import static org.apache.hadoop.hdds.protocol.proto.ContainerProtos.Result + .CONTAINER_NOT_FOUND; +import static org.apache.hadoop.hdds.protocol.proto.ContainerProtos.Result + .ERROR_IN_COMPACT_DB; +import static org.apache.hadoop.hdds.protocol.proto.ContainerProtos.Result + .INVALID_CONFIG; +import static org.apache.hadoop.hdds.protocol.proto.ContainerProtos.Result + .IO_EXCEPTION; +import static org.apache.hadoop.hdds.protocol.proto.ContainerProtos.Result + .NO_SUCH_ALGORITHM; +import static org.apache.hadoop.hdds.protocol.proto.ContainerProtos.Result + .UNABLE_TO_READ_METADATA_DB; +import static org.apache.hadoop.hdds.protocol.proto.ContainerProtos.Result + .UNCLOSED_CONTAINER_IO; +import static org.apache.hadoop.hdds.protocol.proto.ContainerProtos.Result + .UNSUPPORTED_REQUEST; +import static org.apache.hadoop.ozone.OzoneConsts.CONTAINER_EXTENSION; +import static org.apache.hadoop.ozone.OzoneConsts.CONTAINER_META; + +/** + * A Generic ContainerManagerImpl that will be called from Ozone + * ContainerManagerImpl. This allows us to support delta changes to ozone + * version without having to rewrite the containerManager. + */ +public class ContainerManagerImpl implements ContainerManager { + static final Logger LOG = + LoggerFactory.getLogger(ContainerManagerImpl.class); + + private final ConcurrentSkipListMap<String, ContainerStatus> + containerMap = new ConcurrentSkipListMap<>(); + + // Use a non-fair RW lock for better throughput, we may revisit this decision + // if this causes fairness issues. + private final ReentrantReadWriteLock lock = new ReentrantReadWriteLock(); + private ContainerLocationManager locationManager; + private ChunkManager chunkManager; + private KeyManager keyManager; + private Configuration conf; + private DatanodeDetails datanodeDetails; + + private ContainerDeletionChoosingPolicy containerDeletionChooser; + private ContainerReportManager containerReportManager; + + /** + * Init call that sets up a container Manager. + * + * @param config - Configuration. + * @param containerDirs - List of Metadata Container locations. + * @param dnDetails - DatanodeDetails. + * @throws IOException + */ + @Override + public void init( + Configuration config, List<StorageLocation> containerDirs, + DatanodeDetails dnDetails) throws IOException { + Preconditions.checkNotNull(config, "Config must not be null"); + Preconditions.checkNotNull(containerDirs, "Container directories cannot " + + "be null"); + Preconditions.checkNotNull(dnDetails, "Datanode Details cannot " + + "be null"); + + Preconditions.checkState(containerDirs.size() > 0, "Number of container" + + " directories must be greater than zero."); + + this.conf = config; + this.datanodeDetails = dnDetails; + + readLock(); + try { + containerDeletionChooser = ReflectionUtils.newInstance(conf.getClass( + ScmConfigKeys.OZONE_SCM_CONTAINER_DELETION_CHOOSING_POLICY, + TopNOrderedContainerDeletionChoosingPolicy.class, + ContainerDeletionChoosingPolicy.class), conf); + + for (StorageLocation path : containerDirs) { + File directory = Paths.get(path.getNormalizedUri()).toFile(); + if (!directory.exists() && !directory.mkdirs()) { + LOG.error("Container metadata directory doesn't exist " + + "and cannot be created. Path: {}", path.toString()); + throw new StorageContainerException("Container metadata " + + "directory doesn't exist and cannot be created " + path + .toString(), INVALID_CONFIG); + } + + // TODO: This will fail if any directory is invalid. + // We should fix this to handle invalid directories and continue. + // Leaving it this way to fail fast for time being. + if (!directory.isDirectory()) { + LOG.error("Invalid path to container metadata directory. path: {}", + path.toString()); + throw new StorageContainerException("Invalid path to container " + + "metadata directory." + path, INVALID_CONFIG); + } + LOG.info("Loading containers under {}", path); + File[] files = directory.listFiles(new ContainerFilter()); + if (files != null) { + for (File containerFile : files) { + LOG.debug("Loading container {}", containerFile); + String containerPath = + ContainerUtils.getContainerNameFromFile(containerFile); + Preconditions.checkNotNull(containerPath, "Container path cannot" + + " be null"); + readContainerInfo(containerPath); + } + } + } + + List<StorageLocation> dataDirs = new LinkedList<>(); + for (String dir : config.getStrings(DFS_DATANODE_DATA_DIR_KEY)) { + StorageLocation location = StorageLocation.parse(dir); + dataDirs.add(location); + } + this.locationManager = + new ContainerLocationManagerImpl(containerDirs, dataDirs, config); + + this.containerReportManager = + new ContainerReportManagerImpl(config); + } finally { + readUnlock(); + } + } + + /** + * Reads the Container Info from a file and verifies that checksum match. If + * the checksums match, then that file is added to containerMap. + * + * @param containerName - Name which points to the persisted container. + * @throws StorageContainerException + */ + private void readContainerInfo(String containerName) + throws StorageContainerException { + Preconditions.checkState(containerName.length() > 0, + "Container name length cannot be zero."); + FileInputStream containerStream = null; + DigestInputStream dis = null; + FileInputStream metaStream = null; + Path cPath = Paths.get(containerName).getFileName(); + String keyName = null; + if (cPath != null) { + keyName = cPath.toString(); + } + Preconditions.checkNotNull(keyName, + "Container Name to container key mapping is null"); + + try { + String containerFileName = containerName.concat(CONTAINER_EXTENSION); + String metaFileName = containerName.concat(CONTAINER_META); + + containerStream = new FileInputStream(containerFileName); + + metaStream = new FileInputStream(metaFileName); + + MessageDigest sha = MessageDigest.getInstance(OzoneConsts.FILE_HASH); + + dis = new DigestInputStream(containerStream, sha); + + ContainerProtos.ContainerData containerDataProto = + ContainerProtos.ContainerData.parseDelimitedFrom(dis); + ContainerData containerData; + if (containerDataProto == null) { + // Sometimes container metadata might have been created but empty, + // when loading the info we get a null, this often means last time + // SCM was ending up at some middle phase causing that the metadata + // was not populated. Such containers are marked as inactive. + containerMap.put(keyName, new ContainerStatus(null)); + return; + } + containerData = ContainerData.getFromProtBuf(containerDataProto, conf); + ContainerProtos.ContainerMeta meta = + ContainerProtos.ContainerMeta.parseDelimitedFrom(metaStream); + if (meta != null && !DigestUtils.sha256Hex(sha.digest()) + .equals(meta.getHash())) { + // This means we were not able read data from the disk when booted the + // datanode. We are going to rely on SCM understanding that we don't + // have valid data for this container when we send container reports. + // Hopefully SCM will ask us to delete this container and rebuild it. + LOG.error("Invalid SHA found for container data. Name :{}" + + "cowardly refusing to read invalid data", containerName); + containerMap.put(keyName, new ContainerStatus(null)); + return; + } + + ContainerStatus containerStatus = new ContainerStatus(containerData); + // Initialize pending deletion blocks count in in-memory + // container status. + MetadataStore metadata = KeyUtils.getDB(containerData, conf); + List<Map.Entry<byte[], byte[]>> underDeletionBlocks = metadata + .getSequentialRangeKVs(null, Integer.MAX_VALUE, + MetadataKeyFilters.getDeletingKeyFilter()); + containerStatus.incrPendingDeletionBlocks(underDeletionBlocks.size()); + + List<Map.Entry<byte[], byte[]>> liveKeys = metadata + .getRangeKVs(null, Integer.MAX_VALUE, + MetadataKeyFilters.getNormalKeyFilter()); + + // Get container bytesUsed upon loading container + // The in-memory state is updated upon key write or delete + // TODO: update containerDataProto and persist it into container MetaFile + long bytesUsed = 0; + bytesUsed = liveKeys.parallelStream().mapToLong(e-> { + KeyData keyData; + try { + keyData = KeyUtils.getKeyData(e.getValue()); + return keyData.getSize(); + } catch (IOException ex) { + return 0L; + } + }).sum(); + containerStatus.setBytesUsed(bytesUsed); + + containerMap.put(keyName, containerStatus); + } catch (IOException | NoSuchAlgorithmException ex) { + LOG.error("read failed for file: {} ex: {}", containerName, + ex.getMessage()); + + // TODO : Add this file to a recovery Queue. + + // Remember that this container is busted and we cannot use it. + containerMap.put(keyName, new ContainerStatus(null)); + throw new StorageContainerException("Unable to read container info", + UNABLE_TO_READ_METADATA_DB); + } finally { + IOUtils.closeStream(dis); + IOUtils.closeStream(containerStream); + IOUtils.closeStream(metaStream); + } + } + + /** + * Creates a container with the given name. + * + * @param pipeline -- Nodes which make up this container. + * @param containerData - Container Name and metadata. + * @throws StorageContainerException - Exception + */ + @Override + public void createContainer(Pipeline pipeline, ContainerData containerData) + throws StorageContainerException { + Preconditions.checkNotNull(containerData, "Container data cannot be null"); + writeLock(); + try { + if (containerMap.containsKey(containerData.getName())) { + LOG.debug("container already exists. {}", containerData.getName()); + throw new StorageContainerException("container already exists.", + CONTAINER_EXISTS); + } + + // This is by design. We first write and close the + // container Info and metadata to a directory. + // Then read back and put that info into the containerMap. + // This allows us to make sure that our write is consistent. + + writeContainerInfo(containerData, false); + File cFile = new File(containerData.getContainerPath()); + readContainerInfo(ContainerUtils.getContainerNameFromFile(cFile)); + } catch (NoSuchAlgorithmException ex) { + LOG.error("Internal error: We seem to be running a JVM without a " + + "needed hash algorithm."); + throw new StorageContainerException("failed to create container", + NO_SUCH_ALGORITHM); + } finally { + writeUnlock(); + } + + } + + /** + * Writes a container to a chosen location and updates the container Map. + * + * The file formats of ContainerData and Container Meta is the following. + * + * message ContainerData { + * required string name = 1; + * repeated KeyValue metadata = 2; + * optional string dbPath = 3; + * optional string containerPath = 4; + * optional int64 bytesUsed = 5; + * optional int64 size = 6; + * } + * + * message ContainerMeta { + * required string fileName = 1; + * required string hash = 2; + * } + * + * @param containerData - container Data + * @param overwrite - Whether we are overwriting. + * @throws StorageContainerException, NoSuchAlgorithmException + */ + private void writeContainerInfo(ContainerData containerData, + boolean overwrite) + throws StorageContainerException, NoSuchAlgorithmException { + + Preconditions.checkNotNull(this.locationManager, + "Internal error: location manager cannot be null"); + + FileOutputStream containerStream = null; + DigestOutputStream dos = null; + FileOutputStream metaStream = null; + + try { + Path metadataPath = null; + Path location = (!overwrite) ? locationManager.getContainerPath(): + Paths.get(containerData.getContainerPath()).getParent(); + if (location == null) { + throw new StorageContainerException( + "Failed to get container file path.", + CONTAINER_INTERNAL_ERROR); + } + + File containerFile = ContainerUtils.getContainerFile(containerData, + location); + File metadataFile = ContainerUtils.getMetadataFile(containerData, + location); + String containerName = containerData.getContainerName(); + + if(!overwrite) { + ContainerUtils.verifyIsNewContainer(containerFile, metadataFile); + metadataPath = this.locationManager.getDataPath(containerName); + metadataPath = ContainerUtils.createMetadata(metadataPath, + containerName, conf); + } else { + metadataPath = ContainerUtils.getMetadataDirectory(containerData); + } + + containerStream = new FileOutputStream(containerFile); + metaStream = new FileOutputStream(metadataFile); + MessageDigest sha = MessageDigest.getInstance(OzoneConsts.FILE_HASH); + + dos = new DigestOutputStream(containerStream, sha); + containerData.setDBPath(metadataPath.resolve( + ContainerUtils.getContainerDbFileName(containerName)) + .toString()); + containerData.setContainerPath(containerFile.toString()); + + ContainerProtos.ContainerData protoData = containerData + .getProtoBufMessage(); + protoData.writeDelimitedTo(dos); + + ContainerProtos.ContainerMeta protoMeta = ContainerProtos + .ContainerMeta.newBuilder() + .setFileName(containerFile.toString()) + .setHash(DigestUtils.sha256Hex(sha.digest())) + .build(); + protoMeta.writeDelimitedTo(metaStream); + + } catch (IOException ex) { + // TODO : we need to clean up partially constructed files + // The proper way to do would be for a thread + // to read all these 3 artifacts and make sure they are + // sane. That info needs to come from the replication + // pipeline, and if not consistent delete these file. + + // In case of ozone this is *not* a deal breaker since + // SCM is guaranteed to generate unique container names. + // The saving grace is that we check if we have residue files + // lying around when creating a new container. We need to queue + // this information to a cleaner thread. + + LOG.error("Creation of container failed. Name: {}, we might need to " + + "cleanup partially created artifacts. ", + containerData.getContainerName(), ex); + throw new StorageContainerException("Container creation failed. ", + ex, CONTAINER_INTERNAL_ERROR); + } finally { + IOUtils.closeStream(dos); + IOUtils.closeStream(containerStream); + IOUtils.closeStream(metaStream); + } + } + + /** + * Deletes an existing container. + * + * @param pipeline - nodes that make this container. + * @param containerName - name of the container. + * @param forceDelete - whether this container should be deleted forcibly. + * @throws StorageContainerException + */ + @Override + public void deleteContainer(Pipeline pipeline, String containerName, + boolean forceDelete) throws StorageContainerException { + Preconditions.checkNotNull(containerName, "Container name cannot be null"); + Preconditions.checkState(containerName.length() > 0, + "Container name length cannot be zero."); + writeLock(); + try { + if (isOpen(pipeline.getContainerName())) { + throw new StorageContainerException( + "Deleting an open container is not allowed.", + UNCLOSED_CONTAINER_IO); + } + + ContainerStatus status = containerMap.get(containerName); + if (status == null) { + LOG.debug("No such container. Name: {}", containerName); + throw new StorageContainerException("No such container. Name : " + + containerName, CONTAINER_NOT_FOUND); + } + if (status.getContainer() == null) { + LOG.debug("Invalid container data. Name: {}", containerName); + throw new StorageContainerException("Invalid container data. Name : " + + containerName, CONTAINER_NOT_FOUND); + } + ContainerUtils.removeContainer(status.getContainer(), conf, forceDelete); + containerMap.remove(containerName); + } catch (StorageContainerException e) { + throw e; + } catch (IOException e) { + // TODO : An I/O error during delete can leave partial artifacts on the + // disk. We will need the cleaner thread to cleanup this information. + LOG.error("Failed to cleanup container. Name: {}", containerName, e); + throw new StorageContainerException(containerName, e, IO_EXCEPTION); + } finally { + writeUnlock(); + } + } + + /** + * A simple interface for container Iterations. + * <p/> + * This call make no guarantees about consistency of the data between + * different list calls. It just returns the best known data at that point of + * time. It is possible that using this iteration you can miss certain + * container from the listing. + * + * @param prefix - Return keys that match this prefix. + * @param count - how many to return + * @param prevKey - Previous Key Value or empty String. + * @param data - Actual containerData + * @throws StorageContainerException + */ + @Override + public void listContainer(String prefix, long count, String prevKey, + List<ContainerData> data) throws StorageContainerException { + // TODO : Support list with Prefix and PrevKey + Preconditions.checkNotNull(data, + "Internal assertion: data cannot be null"); + readLock(); + try { + ConcurrentNavigableMap<String, ContainerStatus> map; + if (prevKey == null || prevKey.isEmpty()) { + map = containerMap.tailMap(containerMap.firstKey(), true); + } else { + map = containerMap.tailMap(prevKey, false); + } + + int currentCount = 0; + for (ContainerStatus entry : map.values()) { + if (currentCount < count) { + data.add(entry.getContainer()); + currentCount++; + } else { + return; + } + } + } finally { + readUnlock(); + } + } + + /** + * Get metadata about a specific container. + * + * @param containerName - Name of the container + * @return ContainerData - Container Data. + * @throws StorageContainerException + */ + @Override + public ContainerData readContainer(String containerName) throws + StorageContainerException { + Preconditions.checkNotNull(containerName, "Container name cannot be null"); + Preconditions.checkState(containerName.length() > 0, + "Container name length cannot be zero."); + if (!containerMap.containsKey(containerName)) { + throw new StorageContainerException("Unable to find the container. Name: " + + containerName, CONTAINER_NOT_FOUND); + } + ContainerData cData = containerMap.get(containerName).getContainer(); + if (cData == null) { + throw new StorageContainerException("Invalid container data. Name: " + + containerName, CONTAINER_INTERNAL_ERROR); + } + return cData; + } + + /** + * Closes a open container, if it is already closed or does not exist a + * StorageContainerException is thrown. + * + * @param containerName - Name of the container. + * @throws StorageContainerException + */ + @Override + public void closeContainer(String containerName) + throws StorageContainerException, NoSuchAlgorithmException { + ContainerData containerData = readContainer(containerName); + containerData.closeContainer(); + writeContainerInfo(containerData, true); + MetadataStore db = KeyUtils.getDB(containerData, conf); + + // It is ok if this operation takes a bit of time. + // Close container is not expected to be instantaneous. + try { + db.compactDB(); + } catch (IOException e) { + LOG.error("Error in DB compaction while closing container", e); + throw new StorageContainerException(e, ERROR_IN_COMPACT_DB); + } + + // Active is different from closed. Closed means it is immutable, active + // false means we have some internal error that is happening to this + // container. This is a way to track damaged containers if we have an + // I/O failure, this allows us to take quick action in case of container + // issues. + + ContainerStatus status = new ContainerStatus(containerData); + containerMap.put(containerName, status); + } + + @Override + public void updateContainer(Pipeline pipeline, String containerName, + ContainerData data, boolean forceUpdate) + throws StorageContainerException { + Preconditions.checkNotNull(pipeline, "Pipeline cannot be null"); + Preconditions.checkNotNull(containerName, "Container name cannot be null"); + Preconditions.checkNotNull(data, "Container data cannot be null"); + FileOutputStream containerStream = null; + DigestOutputStream dos = null; + MessageDigest sha = null; + File containerFileBK = null, containerFile = null; + boolean deleted = false; + + if(!containerMap.containsKey(containerName)) { + throw new StorageContainerException("Container doesn't exist. Name :" + + containerName, CONTAINER_NOT_FOUND); + } + + try { + sha = MessageDigest.getInstance(OzoneConsts.FILE_HASH); + } catch (NoSuchAlgorithmException e) { + throw new StorageContainerException("Unable to create Message Digest," + + " usually this is a java configuration issue.", + NO_SUCH_ALGORITHM); + } + + try { + Path location = locationManager.getContainerPath(); + ContainerData orgData = containerMap.get(containerName).getContainer(); + if (orgData == null) { + // updating a invalid container + throw new StorageContainerException("Update a container with invalid" + + "container meta data", CONTAINER_INTERNAL_ERROR); + } + + if (!forceUpdate && !orgData.isOpen()) { + throw new StorageContainerException( + "Update a closed container is not allowed. Name: " + containerName, + UNSUPPORTED_REQUEST); + } + + containerFile = ContainerUtils.getContainerFile(orgData, location); + // If forceUpdate is true, there is no need to check + // whether the container file exists. + if (!forceUpdate) { + if (!containerFile.exists() || !containerFile.canWrite()) { + throw new StorageContainerException( + "Container file not exists or corrupted. Name: " + containerName, + CONTAINER_INTERNAL_ERROR); + } + + // Backup the container file + containerFileBK = File.createTempFile( + "tmp_" + System.currentTimeMillis() + "_", + containerFile.getName(), containerFile.getParentFile()); + FileUtils.copyFile(containerFile, containerFileBK); + + deleted = containerFile.delete(); + containerStream = new FileOutputStream(containerFile); + dos = new DigestOutputStream(containerStream, sha); + + ContainerProtos.ContainerData protoData = data.getProtoBufMessage(); + protoData.writeDelimitedTo(dos); + } + + // Update the in-memory map + ContainerStatus newStatus = new ContainerStatus(data); + containerMap.replace(containerName, newStatus); + } catch (IOException e) { + // Restore the container file from backup + if(containerFileBK != null && containerFileBK.exists() && deleted) { + if(containerFile.delete() + && containerFileBK.renameTo(containerFile)) { + throw new StorageContainerException("Container update failed," + + " container data restored from the backup.", + CONTAINER_INTERNAL_ERROR); + } else { + throw new StorageContainerException( + "Failed to restore container data from the backup. Name: " + + containerName, CONTAINER_INTERNAL_ERROR); + } + } else { + throw new StorageContainerException( + e.getMessage(), CONTAINER_INTERNAL_ERROR); + } + } finally { + if (containerFileBK != null && containerFileBK.exists()) { + if(!containerFileBK.delete()) { + LOG.warn("Unable to delete container file backup : {}.", + containerFileBK.getAbsolutePath()); + } + } + IOUtils.closeStream(dos); + IOUtils.closeStream(containerStream); + } + } + + @VisibleForTesting + protected File getContainerFile(ContainerData data) throws IOException { + return ContainerUtils.getContainerFile(data, + this.locationManager.getContainerPath()); + } + + /** + * Checks if a container exists. + * + * @param containerName - Name of the container. + * @return true if the container is open false otherwise. + * @throws StorageContainerException - Throws Exception if we are not able to + * find the container. + */ + @Override + public boolean isOpen(String containerName) throws StorageContainerException { + final ContainerStatus status = containerMap.get(containerName); + if (status == null) { + throw new StorageContainerException( + "Container status not found: " + containerName, CONTAINER_NOT_FOUND); + } + final ContainerData cData = status.getContainer(); + if (cData == null) { + throw new StorageContainerException( + "Container not found: " + containerName, CONTAINER_NOT_FOUND); + } + return cData.isOpen(); + } + + /** + * Supports clean shutdown of container. + * + * @throws IOException + */ + @Override + public void shutdown() throws IOException { + Preconditions.checkState(this.hasWriteLock(), + "Assumption that we are holding the lock violated."); + this.containerMap.clear(); + this.locationManager.shutdown(); + } + + + @VisibleForTesting + public ConcurrentSkipListMap<String, ContainerStatus> getContainerMap() { + return containerMap; + } + + /** + * Acquire read lock. + */ + @Override + public void readLock() { + this.lock.readLock().lock(); + + } + + @Override + public void readLockInterruptibly() throws InterruptedException { + this.lock.readLock().lockInterruptibly(); + } + + /** + * Release read lock. + */ + @Override + public void readUnlock() { + this.lock.readLock().unlock(); + } + + /** + * Check if the current thread holds read lock. + */ + @Override + public boolean hasReadLock() { + return this.lock.readLock().tryLock(); + } + + /** + * Acquire write lock. + */ + @Override + public void writeLock() { + this.lock.writeLock().lock(); + } + + /** + * Acquire write lock, unless interrupted while waiting. + */ + @Override + public void writeLockInterruptibly() throws InterruptedException { + this.lock.writeLock().lockInterruptibly(); + + } + + /** + * Release write lock. + */ + @Override + public void writeUnlock() { + this.lock.writeLock().unlock(); + + } + + /** + * Check if the current thread holds write lock. + */ + @Override + public boolean hasWriteLock() { + return this.lock.writeLock().isHeldByCurrentThread(); + } + + public ChunkManager getChunkManager() { + return this.chunkManager; + } + + /** + * Sets the chunk Manager. + * + * @param chunkManager - Chunk Manager + */ + public void setChunkManager(ChunkManager chunkManager) { + this.chunkManager = chunkManager; + } + + /** + * Gets the Key Manager. + * + * @return KeyManager. + */ + @Override + public KeyManager getKeyManager() { + return this.keyManager; + } + + /** + * Get the node report. + * @return node report. + */ + @Override + public SCMNodeReport getNodeReport() throws IOException { + StorageLocationReport[] reports = locationManager.getLocationReport(); + SCMNodeReport.Builder nrb = SCMNodeReport.newBuilder(); + for (int i = 0; i < reports.length; i++) { + SCMStorageReport.Builder srb = SCMStorageReport.newBuilder(); + nrb.addStorageReport(i, srb.setStorageUuid(reports[i].getId()) + .setCapacity(reports[i].getCapacity()) + .setScmUsed(reports[i].getScmUsed()) + .setRemaining(reports[i].getRemaining()) + .build()); + } + return nrb.build(); + } + + + /** + * Gets container reports. + * + * @return List of all closed containers. + * @throws IOException + */ + @Override + public List<ContainerData> getContainerReports() throws IOException { + LOG.debug("Starting container report iteration."); + // No need for locking since containerMap is a ConcurrentSkipListMap + // And we can never get the exact state since close might happen + // after we iterate a point. + return containerMap.entrySet().stream() + .filter(containerStatus -> + !containerStatus.getValue().getContainer().isOpen()) + .map(containerStatus -> containerStatus.getValue().getContainer()) + .collect(Collectors.toList()); + } + + /** + * Get container report. + * + * @return The container report. + * @throws IOException + */ + @Override + public ContainerReportsRequestProto getContainerReport() throws IOException { + LOG.debug("Starting container report iteration."); + // No need for locking since containerMap is a ConcurrentSkipListMap + // And we can never get the exact state since close might happen + // after we iterate a point. + List<ContainerStatus> containers = containerMap.values().stream() + .collect(Collectors.toList()); + + ContainerReportsRequestProto.Builder crBuilder = + ContainerReportsRequestProto.newBuilder(); + + // TODO: support delta based container report + crBuilder.setDatanodeDetails(datanodeDetails.getProtoBufMessage()) + .setType(ContainerReportsRequestProto.reportType.fullReport); + + for (ContainerStatus container: containers) { + StorageContainerDatanodeProtocolProtos.ContainerInfo.Builder ciBuilder = + StorageContainerDatanodeProtocolProtos.ContainerInfo.newBuilder(); + ciBuilder.setContainerName(container.getContainer().getContainerName()) + .setSize(container.getContainer().getMaxSize()) + .setUsed(container.getContainer().getBytesUsed()) + .setKeyCount(container.getContainer().getKeyCount()) + .setReadCount(container.getReadCount()) + .setWriteCount(container.getWriteCount()) + .setReadBytes(container.getReadBytes()) + .setWriteBytes(container.getWriteBytes()) + .setContainerID(container.getContainer().getContainerID()); + + if (container.getContainer().getHash() != null) { + ciBuilder.setFinalhash(container.getContainer().getHash()); + } + crBuilder.addReports(ciBuilder.build()); + } + + return crBuilder.build(); + } + + /** + * Sets the Key Manager. + * + * @param keyManager - Key Manager. + */ + @Override + public void setKeyManager(KeyManager keyManager) { + this.keyManager = keyManager; + } + + /** + * Filter out only container files from the container metadata dir. + */ + private static class ContainerFilter implements FilenameFilter { + /** + * Tests if a specified file should be included in a file list. + * + * @param dir the directory in which the file was found. + * @param name the name of the file. + * @return <code>true</code> if and only if the name should be included in + * the file list; <code>false</code> otherwise. + */ + @Override + public boolean accept(File dir, String name) { + return name.endsWith(CONTAINER_EXTENSION); + } + } + + @Override + public List<ContainerData> chooseContainerForBlockDeletion( + int count) throws StorageContainerException { + readLock(); + try { + return containerDeletionChooser.chooseContainerForBlockDeletion( + count, containerMap); + } finally { + readUnlock(); + } + } + + @VisibleForTesting + public ContainerDeletionChoosingPolicy getContainerDeletionChooser() { + return containerDeletionChooser; + } + + @Override + public void incrPendingDeletionBlocks(int numBlocks, String containerId) { + writeLock(); + try { + ContainerStatus status = containerMap.get(containerId); + status.incrPendingDeletionBlocks(numBlocks); + } finally { + writeUnlock(); + } + } + + @Override + public void decrPendingDeletionBlocks(int numBlocks, String containerId) { + writeLock(); + try { + ContainerStatus status = containerMap.get(containerId); + status.decrPendingDeletionBlocks(numBlocks); + } finally { + writeUnlock(); + } + } + + /** + * Increase the read count of the container. + * + * @param containerName - Name of the container. + */ + @Override + public void incrReadCount(String containerName) { + ContainerStatus status = containerMap.get(containerName); + status.incrReadCount(); + } + + public long getReadCount(String containerName) { + ContainerStatus status = containerMap.get(containerName); + return status.getReadCount(); + } + + /** + * Increse the read counter for bytes read from the container. + * + * @param containerName - Name of the container. + * @param readBytes - bytes read from the container. + */ + @Override + public void incrReadBytes(String containerName, long readBytes) { + ContainerStatus status = containerMap.get(containerName); + status.incrReadBytes(readBytes); + } + + public long getReadBytes(String containerName) { + readLock(); + try { + ContainerStatus status = containerMap.get(containerName); + return status.getReadBytes(); + } finally { + readUnlock(); + } + } + + /** + * Increase the write count of the container. + * + * @param containerName - Name of the container. + */ + @Override + public void incrWriteCount(String containerName) { + ContainerStatus status = containerMap.get(containerName); + status.incrWriteCount(); + } + + public long getWriteCount(String containerName) { + ContainerStatus status = containerMap.get(containerName); + return status.getWriteCount(); + } + + /** + * Increse the write counter for bytes write into the container. + * + * @param containerName - Name of the container. + * @param writeBytes - bytes write into the container. + */ + @Override + public void incrWriteBytes(String containerName, long writeBytes) { + ContainerStatus status = containerMap.get(containerName); + status.incrWriteBytes(writeBytes); + } + + public long getWriteBytes(String containerName) { + ContainerStatus status = containerMap.get(containerName); + return status.getWriteBytes(); + } + + /** + * Increase the bytes used by the container. + * + * @param containerName - Name of the container. + * @param used - additional bytes used by the container. + * @return the current bytes used. + */ + @Override + public long incrBytesUsed(String containerName, long used) { + ContainerStatus status = containerMap.get(containerName); + return status.incrBytesUsed(used); + } + + /** + * Decrease the bytes used by the container. + * + * @param containerName - Name of the container. + * @param used - additional bytes reclaimed by the container. + * @return the current bytes used. + */ + @Override + public long decrBytesUsed(String containerName, long used) { + ContainerStatus status = containerMap.get(containerName); + return status.decrBytesUsed(used); + } + + public long getBytesUsed(String containerName) { + ContainerStatus status = containerMap.get(containerName); + return status.getBytesUsed(); + } + + /** + * Get the number of keys in the container. + * + * @param containerName - Name of the container. + * @return the current key count. + */ + @Override + public long getNumKeys(String containerName) { + ContainerStatus status = containerMap.get(containerName); + return status.getNumKeys(); } + + /** + * Get the container report state to send via HB to SCM. + * + * @return container report state. + */ + @Override + public ReportState getContainerReportState() { + return containerReportManager.getContainerReportState(); + } + +} diff --git a/hadoop-hdsl/container-service/src/main/java/org/apache/hadoop/ozone/container/common/impl/ContainerReportManagerImpl.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/impl/ContainerReportManagerImpl.java index a300767706..6c83c66a3a 100644 --- a/hadoop-hdsl/container-service/src/main/java/org/apache/hadoop/ozone/container/common/impl/ContainerReportManagerImpl.java +++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/impl/ContainerReportManagerImpl.java @@ -18,18 +18,19 @@ package org.apache.hadoop.ozone.container.common.impl; import org.apache.commons.lang3.RandomUtils; -import static org.apache.hadoop.ozone.scm.HdslServerUtil - .getScmHeartbeatInterval; - import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.hdds.protocol.proto + .StorageContainerDatanodeProtocolProtos.ReportState; import org.apache.hadoop.ozone.OzoneConfigKeys; -import org.apache.hadoop.ozone.container.common.interfaces.ContainerReportManager; -import org.apache.hadoop.hdsl.protocol.proto.StorageContainerDatanodeProtocolProtos.ReportState; +import org.apache.hadoop.ozone.container.common.interfaces + .ContainerReportManager; import org.apache.hadoop.util.Time; import java.util.concurrent.TimeUnit; import java.util.concurrent.atomic.AtomicLong; +import static org.apache.hadoop.hdds.scm.HddsServerUtil.getScmHeartbeatInterval; + /** * Class wraps the container report operations on datanode. * // TODO: support incremental/delta container report diff --git a/hadoop-hdsl/container-service/src/main/java/org/apache/hadoop/ozone/container/common/impl/ContainerStatus.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/impl/ContainerStatus.java index 5577323a10..5577323a10 100644 --- a/hadoop-hdsl/container-service/src/main/java/org/apache/hadoop/ozone/container/common/impl/ContainerStatus.java +++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/impl/ContainerStatus.java diff --git a/hadoop-hdsl/container-service/src/main/java/org/apache/hadoop/ozone/container/common/impl/ContainerStorageLocation.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/impl/ContainerStorageLocation.java index 07a3a539e7..7293895295 100644 --- a/hadoop-hdsl/container-service/src/main/java/org/apache/hadoop/ozone/container/common/impl/ContainerStorageLocation.java +++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/impl/ContainerStorageLocation.java @@ -32,10 +32,10 @@ import org.slf4j.Logger; import org.slf4j.LoggerFactory; import java.io.File; -import java.io.IOException; import java.io.FileNotFoundException; -import java.io.OutputStreamWriter; import java.io.FileOutputStream; +import java.io.IOException; +import java.io.OutputStreamWriter; import java.net.URI; import java.nio.charset.StandardCharsets; import java.nio.file.Paths; diff --git a/hadoop-hdsl/container-service/src/main/java/org/apache/hadoop/ozone/container/common/impl/Dispatcher.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/impl/Dispatcher.java index a1690b5a6d..1c6e39ccdf 100644 --- a/hadoop-hdsl/container-service/src/main/java/org/apache/hadoop/ozone/container/common/impl/Dispatcher.java +++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/impl/Dispatcher.java @@ -21,12 +21,15 @@ package org.apache.hadoop.ozone.container.common.impl; import com.google.common.base.Preconditions; import com.google.protobuf.ByteString; import org.apache.hadoop.conf.Configuration; -import org.apache.hadoop.hdsl.protocol.proto.ContainerProtos; -import org.apache.hadoop.hdsl.protocol.proto.ContainerProtos +import org.apache.hadoop.hdds.scm.container.common.helpers.Pipeline; +import org.apache.hadoop.hdds.scm.container.common.helpers + .StorageContainerException; +import org.apache.hadoop.hdds.protocol.proto.ContainerProtos; +import org.apache.hadoop.hdds.protocol.proto.ContainerProtos .ContainerCommandRequestProto; -import org.apache.hadoop.hdsl.protocol.proto.ContainerProtos +import org.apache.hadoop.hdds.protocol.proto.ContainerProtos .ContainerCommandResponseProto; -import org.apache.hadoop.hdsl.protocol.proto.ContainerProtos.Type; +import org.apache.hadoop.hdds.protocol.proto.ContainerProtos.Type; import org.apache.hadoop.ozone.container.common.helpers.ChunkInfo; import org.apache.hadoop.ozone.container.common.helpers.ChunkUtils; import org.apache.hadoop.ozone.container.common.helpers.ContainerData; @@ -37,8 +40,6 @@ import org.apache.hadoop.ozone.container.common.helpers.KeyData; import org.apache.hadoop.ozone.container.common.helpers.KeyUtils; import org.apache.hadoop.ozone.container.common.interfaces.ContainerDispatcher; import org.apache.hadoop.ozone.container.common.interfaces.ContainerManager; -import org.apache.hadoop.scm.container.common.helpers.Pipeline; -import org.apache.hadoop.scm.container.common.helpers.StorageContainerException; import org.slf4j.Logger; import org.slf4j.LoggerFactory; @@ -47,10 +48,14 @@ import java.security.NoSuchAlgorithmException; import java.util.LinkedList; import java.util.List; -import static org.apache.hadoop.hdsl.protocol.proto.ContainerProtos.Result.CLOSED_CONTAINER_IO; -import static org.apache.hadoop.hdsl.protocol.proto.ContainerProtos.Result.GET_SMALL_FILE_ERROR; -import static org.apache.hadoop.hdsl.protocol.proto.ContainerProtos.Result.NO_SUCH_ALGORITHM; -import static org.apache.hadoop.hdsl.protocol.proto.ContainerProtos.Result.PUT_SMALL_FILE_ERROR; +import static org.apache.hadoop.hdds.protocol.proto.ContainerProtos.Result + .CLOSED_CONTAINER_IO; +import static org.apache.hadoop.hdds.protocol.proto.ContainerProtos.Result + .GET_SMALL_FILE_ERROR; +import static org.apache.hadoop.hdds.protocol.proto.ContainerProtos.Result + .NO_SUCH_ALGORITHM; +import static org.apache.hadoop.hdds.protocol.proto.ContainerProtos.Result + .PUT_SMALL_FILE_ERROR; /** * Ozone Container dispatcher takes a call from the netty server and routes it diff --git a/hadoop-hdsl/container-service/src/main/java/org/apache/hadoop/ozone/container/common/impl/KeyManagerImpl.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/impl/KeyManagerImpl.java index c195ad22a0..cf6bf12214 100644 --- a/hadoop-hdsl/container-service/src/main/java/org/apache/hadoop/ozone/container/common/impl/KeyManagerImpl.java +++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/impl/KeyManagerImpl.java @@ -20,16 +20,17 @@ package org.apache.hadoop.ozone.container.common.impl; import com.google.common.base.Preconditions; import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.hdds.scm.container.common.helpers.Pipeline; +import org.apache.hadoop.hdds.scm.container.common.helpers + .StorageContainerException; import org.apache.hadoop.hdfs.DFSUtil; -import org.apache.hadoop.hdsl.protocol.proto.ContainerProtos; +import org.apache.hadoop.hdds.protocol.proto.ContainerProtos; import org.apache.hadoop.ozone.container.common.helpers.ContainerData; import org.apache.hadoop.ozone.container.common.helpers.KeyData; import org.apache.hadoop.ozone.container.common.helpers.KeyUtils; import org.apache.hadoop.ozone.container.common.interfaces.ContainerManager; import org.apache.hadoop.ozone.container.common.interfaces.KeyManager; import org.apache.hadoop.ozone.container.common.utils.ContainerCache; -import org.apache.hadoop.scm.container.common.helpers.Pipeline; -import org.apache.hadoop.scm.container.common.helpers.StorageContainerException; import org.apache.hadoop.utils.MetadataKeyFilters.KeyPrefixFilter; import org.apache.hadoop.utils.MetadataKeyFilters.MetadataKeyFilter; import org.apache.hadoop.utils.MetadataStore; @@ -41,8 +42,8 @@ import java.util.ArrayList; import java.util.List; import java.util.Map; -import static org.apache.hadoop.hdsl.protocol.proto.ContainerProtos - .Result.NO_SUCH_KEY; +import static org.apache.hadoop.hdds.protocol.proto.ContainerProtos.Result + .NO_SUCH_KEY; /** * Key Manager impl. diff --git a/hadoop-hdsl/container-service/src/main/java/org/apache/hadoop/ozone/container/common/impl/RandomContainerDeletionChoosingPolicy.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/impl/RandomContainerDeletionChoosingPolicy.java index 0123ab1b44..3e267d2b37 100644 --- a/hadoop-hdsl/container-service/src/main/java/org/apache/hadoop/ozone/container/common/impl/RandomContainerDeletionChoosingPolicy.java +++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/impl/RandomContainerDeletionChoosingPolicy.java @@ -17,18 +17,19 @@ */ package org.apache.hadoop.ozone.container.common.impl; -import java.util.LinkedList; -import java.util.List; -import java.util.Map; - +import com.google.common.base.Preconditions; +import org.apache.hadoop.hdds.scm.container.common.helpers + .StorageContainerException; import org.apache.hadoop.hdfs.DFSUtil; import org.apache.hadoop.ozone.container.common.helpers.ContainerData; -import org.apache.hadoop.ozone.container.common.interfaces.ContainerDeletionChoosingPolicy; -import org.apache.hadoop.scm.container.common.helpers.StorageContainerException; +import org.apache.hadoop.ozone.container.common.interfaces + .ContainerDeletionChoosingPolicy; import org.slf4j.Logger; import org.slf4j.LoggerFactory; -import com.google.common.base.Preconditions; +import java.util.LinkedList; +import java.util.List; +import java.util.Map; /** * Randomly choosing containers for block deletion. diff --git a/hadoop-hdsl/container-service/src/main/java/org/apache/hadoop/ozone/container/common/impl/StorageLocationReport.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/impl/StorageLocationReport.java index 7ef91a91f7..7ef91a91f7 100644 --- a/hadoop-hdsl/container-service/src/main/java/org/apache/hadoop/ozone/container/common/impl/StorageLocationReport.java +++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/impl/StorageLocationReport.java diff --git a/hadoop-hdsl/container-service/src/main/java/org/apache/hadoop/ozone/container/common/impl/TopNOrderedContainerDeletionChoosingPolicy.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/impl/TopNOrderedContainerDeletionChoosingPolicy.java index 3f4cdaa2c5..0169a96cf9 100644 --- a/hadoop-hdsl/container-service/src/main/java/org/apache/hadoop/ozone/container/common/impl/TopNOrderedContainerDeletionChoosingPolicy.java +++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/impl/TopNOrderedContainerDeletionChoosingPolicy.java @@ -17,20 +17,21 @@ */ package org.apache.hadoop.ozone.container.common.impl; +import com.google.common.base.Preconditions; +import org.apache.hadoop.hdds.scm.container.common.helpers + .StorageContainerException; +import org.apache.hadoop.ozone.container.common.helpers.ContainerData; +import org.apache.hadoop.ozone.container.common.interfaces + .ContainerDeletionChoosingPolicy; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + import java.util.Collections; import java.util.Comparator; import java.util.LinkedList; import java.util.List; import java.util.Map; -import org.apache.hadoop.ozone.container.common.helpers.ContainerData; -import org.apache.hadoop.ozone.container.common.interfaces.ContainerDeletionChoosingPolicy; -import org.apache.hadoop.scm.container.common.helpers.StorageContainerException; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - -import com.google.common.base.Preconditions; - /** * TopN Ordered choosing policy that choosing containers based on pending * deletion blocks' number. diff --git a/hadoop-hdsl/container-service/src/main/java/org/apache/hadoop/ozone/container/common/impl/package-info.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/impl/package-info.java index 16da5d9dee..16da5d9dee 100644 --- a/hadoop-hdsl/container-service/src/main/java/org/apache/hadoop/ozone/container/common/impl/package-info.java +++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/impl/package-info.java diff --git a/hadoop-hdsl/container-service/src/main/java/org/apache/hadoop/ozone/container/common/interfaces/ChunkManager.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/interfaces/ChunkManager.java index a7d3fe536d..f55d74ca2b 100644 --- a/hadoop-hdsl/container-service/src/main/java/org/apache/hadoop/ozone/container/common/interfaces/ChunkManager.java +++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/interfaces/ChunkManager.java @@ -18,10 +18,11 @@ package org.apache.hadoop.ozone.container.common.interfaces; -import org.apache.hadoop.hdsl.protocol.proto.ContainerProtos; -import org.apache.hadoop.scm.container.common.helpers.StorageContainerException; +import org.apache.hadoop.hdds.scm.container.common.helpers.Pipeline; +import org.apache.hadoop.hdds.scm.container.common.helpers + .StorageContainerException; +import org.apache.hadoop.hdds.protocol.proto.ContainerProtos; import org.apache.hadoop.ozone.container.common.helpers.ChunkInfo; -import org.apache.hadoop.scm.container.common.helpers.Pipeline; /** * Chunk Manager allows read, write, delete and listing of chunks in diff --git a/hadoop-hdsl/container-service/src/main/java/org/apache/hadoop/ozone/container/common/interfaces/ContainerDeletionChoosingPolicy.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/interfaces/ContainerDeletionChoosingPolicy.java index 3e0a28331e..f7280e2a3c 100644 --- a/hadoop-hdsl/container-service/src/main/java/org/apache/hadoop/ozone/container/common/interfaces/ContainerDeletionChoosingPolicy.java +++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/interfaces/ContainerDeletionChoosingPolicy.java @@ -17,12 +17,13 @@ */ package org.apache.hadoop.ozone.container.common.interfaces; -import java.util.List; -import java.util.Map; - +import org.apache.hadoop.hdds.scm.container.common.helpers + .StorageContainerException; import org.apache.hadoop.ozone.container.common.helpers.ContainerData; import org.apache.hadoop.ozone.container.common.impl.ContainerStatus; -import org.apache.hadoop.scm.container.common.helpers.StorageContainerException; + +import java.util.List; +import java.util.Map; /** * This interface is used for choosing desired containers for diff --git a/hadoop-hdsl/container-service/src/main/java/org/apache/hadoop/ozone/container/common/interfaces/ContainerDispatcher.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/interfaces/ContainerDispatcher.java index cb3fdeadd6..984fe41b91 100644 --- a/hadoop-hdsl/container-service/src/main/java/org/apache/hadoop/ozone/container/common/interfaces/ContainerDispatcher.java +++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/interfaces/ContainerDispatcher.java @@ -18,8 +18,10 @@ package org.apache.hadoop.ozone.container.common.interfaces; -import org.apache.hadoop.hdsl.protocol.proto.ContainerProtos.ContainerCommandResponseProto; -import org.apache.hadoop.hdsl.protocol.proto.ContainerProtos.ContainerCommandRequestProto; +import org.apache.hadoop.hdds.protocol.proto.ContainerProtos + .ContainerCommandRequestProto; +import org.apache.hadoop.hdds.protocol.proto.ContainerProtos + .ContainerCommandResponseProto; /** * Dispatcher acts as the bridge between the transport layer and diff --git a/hadoop-hdsl/container-service/src/main/java/org/apache/hadoop/ozone/container/common/interfaces/ContainerLocationManager.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/interfaces/ContainerLocationManager.java index 9c5fcea163..9c5fcea163 100644 --- a/hadoop-hdsl/container-service/src/main/java/org/apache/hadoop/ozone/container/common/interfaces/ContainerLocationManager.java +++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/interfaces/ContainerLocationManager.java diff --git a/hadoop-hdsl/container-service/src/main/java/org/apache/hadoop/ozone/container/common/interfaces/ContainerLocationManagerMXBean.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/interfaces/ContainerLocationManagerMXBean.java index 88e6148630..88e6148630 100644 --- a/hadoop-hdsl/container-service/src/main/java/org/apache/hadoop/ozone/container/common/interfaces/ContainerLocationManagerMXBean.java +++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/interfaces/ContainerLocationManagerMXBean.java diff --git a/hadoop-hdsl/container-service/src/main/java/org/apache/hadoop/ozone/container/common/interfaces/ContainerManager.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/interfaces/ContainerManager.java index 3f19992b73..2ff636e87f 100644 --- a/hadoop-hdsl/container-service/src/main/java/org/apache/hadoop/ozone/container/common/interfaces/ContainerManager.java +++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/interfaces/ContainerManager.java @@ -21,17 +21,20 @@ package org.apache.hadoop.ozone.container.common.interfaces; import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.classification.InterfaceStability; import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.hdds.scm.container.common.helpers.Pipeline; +import org.apache.hadoop.hdds.scm.container.common.helpers + .StorageContainerException; import org.apache.hadoop.hdfs.server.datanode.StorageLocation; import org.apache.hadoop.hdfs.util.RwLock; -import org.apache.hadoop.hdsl.protocol.DatanodeDetails; -import org.apache.hadoop.hdsl.protocol.proto.StorageContainerDatanodeProtocolProtos.ContainerReportsRequestProto; -import org.apache.hadoop.hdsl.protocol.proto.StorageContainerDatanodeProtocolProtos.ReportState; -import org.apache.hadoop.hdsl.protocol.proto.StorageContainerDatanodeProtocolProtos.SCMNodeReport; -import org.apache.hadoop.scm.container.common.helpers.StorageContainerException; +import org.apache.hadoop.hdds.protocol.DatanodeDetails; +import org.apache.hadoop.hdds.protocol.proto + .StorageContainerDatanodeProtocolProtos.ContainerReportsRequestProto; +import org.apache.hadoop.hdds.protocol.proto + .StorageContainerDatanodeProtocolProtos.ReportState; +import org.apache.hadoop.hdds.protocol.proto + .StorageContainerDatanodeProtocolProtos.SCMNodeReport; import org.apache.hadoop.ozone.container.common.helpers.ContainerData; -import org.apache.hadoop.scm.container.common.helpers.Pipeline; - import java.io.IOException; import java.security.NoSuchAlgorithmException; import java.util.List; diff --git a/hadoop-hdsl/container-service/src/main/java/org/apache/hadoop/ozone/container/common/interfaces/ContainerReportManager.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/interfaces/ContainerReportManager.java index 6485d3f46a..4689dfe9e1 100644 --- a/hadoop-hdsl/container-service/src/main/java/org/apache/hadoop/ozone/container/common/interfaces/ContainerReportManager.java +++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/interfaces/ContainerReportManager.java @@ -17,7 +17,7 @@ */ package org.apache.hadoop.ozone.container.common.interfaces; -import org.apache.hadoop.hdsl.protocol.proto.StorageContainerDatanodeProtocolProtos.ReportState; +import org.apache.hadoop.hdds.protocol.proto.StorageContainerDatanodeProtocolProtos.ReportState; /** * Interface for container report manager operations. diff --git a/hadoop-hdsl/container-service/src/main/java/org/apache/hadoop/ozone/container/common/interfaces/KeyManager.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/interfaces/KeyManager.java index a613d2aca0..8c27ba94c4 100644 --- a/hadoop-hdsl/container-service/src/main/java/org/apache/hadoop/ozone/container/common/interfaces/KeyManager.java +++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/interfaces/KeyManager.java @@ -17,9 +17,10 @@ */ package org.apache.hadoop.ozone.container.common.interfaces; -import org.apache.hadoop.scm.container.common.helpers.StorageContainerException; +import org.apache.hadoop.hdds.scm.container.common.helpers.Pipeline; +import org.apache.hadoop.hdds.scm.container.common.helpers + .StorageContainerException; import org.apache.hadoop.ozone.container.common.helpers.KeyData; -import org.apache.hadoop.scm.container.common.helpers.Pipeline; import java.io.IOException; import java.util.List; diff --git a/hadoop-hdsl/container-service/src/main/java/org/apache/hadoop/ozone/container/common/interfaces/package-info.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/interfaces/package-info.java index d83bf95c36..d83bf95c36 100644 --- a/hadoop-hdsl/container-service/src/main/java/org/apache/hadoop/ozone/container/common/interfaces/package-info.java +++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/interfaces/package-info.java diff --git a/hadoop-hdsl/container-service/src/main/java/org/apache/hadoop/ozone/container/common/package-info.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/package-info.java index 1638a36a13..1638a36a13 100644 --- a/hadoop-hdsl/container-service/src/main/java/org/apache/hadoop/ozone/container/common/package-info.java +++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/package-info.java diff --git a/hadoop-hdsl/container-service/src/main/java/org/apache/hadoop/ozone/container/common/statemachine/DatanodeStateMachine.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/statemachine/DatanodeStateMachine.java index 91fa9c3758..8e9482f565 100644 --- a/hadoop-hdsl/container-service/src/main/java/org/apache/hadoop/ozone/container/common/statemachine/DatanodeStateMachine.java +++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/statemachine/DatanodeStateMachine.java @@ -19,19 +19,20 @@ package org.apache.hadoop.ozone.container.common.statemachine; import com.google.common.annotations.VisibleForTesting; import com.google.common.util.concurrent.ThreadFactoryBuilder; import org.apache.hadoop.conf.Configuration; -import org.apache.hadoop.hdsl.conf.OzoneConfiguration; -import org.apache.hadoop.hdsl.protocol.DatanodeDetails; +import org.apache.hadoop.hdds.conf.OzoneConfiguration; +import org.apache.hadoop.hdds.protocol.DatanodeDetails; import org.apache.hadoop.ozone.container.common.statemachine.commandhandler .CloseContainerHandler; -import org.apache.hadoop.ozone.container.common.statemachine.commandhandler.CommandDispatcher; -import org.apache.hadoop.ozone.container.common.statemachine.commandhandler.ContainerReportHandler; -import org.apache.hadoop.ozone.container.common.statemachine.commandhandler.DeleteBlocksCommandHandler; +import org.apache.hadoop.ozone.container.common.statemachine.commandhandler + .CommandDispatcher; +import org.apache.hadoop.ozone.container.common.statemachine.commandhandler + .ContainerReportHandler; +import org.apache.hadoop.ozone.container.common.statemachine.commandhandler + .DeleteBlocksCommandHandler; import org.apache.hadoop.ozone.container.ozoneimpl.OzoneContainer; import org.apache.hadoop.ozone.protocol.commands.SCMCommand; import org.apache.hadoop.util.Time; import org.apache.hadoop.util.concurrent.HadoopExecutors; - -import static org.apache.hadoop.ozone.scm.HdslServerUtil.getScmHeartbeatInterval; import org.slf4j.Logger; import org.slf4j.LoggerFactory; @@ -41,6 +42,8 @@ import java.util.concurrent.ExecutorService; import java.util.concurrent.TimeUnit; import java.util.concurrent.atomic.AtomicLong; +import static org.apache.hadoop.hdds.scm.HddsServerUtil.getScmHeartbeatInterval; + /** * State Machine Class. */ @@ -220,7 +223,7 @@ public class DatanodeStateMachine implements Closeable { private final int value; /** - * Constructs ContainerStates. + * Constructs states. * * @param value Enum Value */ diff --git a/hadoop-hdsl/container-service/src/main/java/org/apache/hadoop/ozone/container/common/statemachine/EndpointStateMachine.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/statemachine/EndpointStateMachine.java index 61bc91e318..7e85923d31 100644 --- a/hadoop-hdsl/container-service/src/main/java/org/apache/hadoop/ozone/container/common/statemachine/EndpointStateMachine.java +++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/statemachine/EndpointStateMachine.java @@ -20,10 +20,6 @@ import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.ozone.protocol.VersionResponse; import org.apache.hadoop.ozone.protocolPB .StorageContainerDatanodeProtocolClientSideTranslatorPB; - -import static org.apache.hadoop.ozone.scm.HdslServerUtil.getLogWarnInterval; -import static org.apache.hadoop.ozone.scm.HdslServerUtil - .getScmHeartbeatInterval; import org.slf4j.Logger; import org.slf4j.LoggerFactory; @@ -35,6 +31,9 @@ import java.util.concurrent.atomic.AtomicLong; import java.util.concurrent.locks.Lock; import java.util.concurrent.locks.ReentrantLock; +import static org.apache.hadoop.hdds.scm.HddsServerUtil.getLogWarnInterval; +import static org.apache.hadoop.hdds.scm.HddsServerUtil.getScmHeartbeatInterval; + /** * Endpoint is used as holder class that keeps state around the RPC endpoint. */ diff --git a/hadoop-hdsl/container-service/src/main/java/org/apache/hadoop/ozone/container/common/statemachine/EndpointStateMachineMBean.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/statemachine/EndpointStateMachineMBean.java index 4f64bde0b3..4f64bde0b3 100644 --- a/hadoop-hdsl/container-service/src/main/java/org/apache/hadoop/ozone/container/common/statemachine/EndpointStateMachineMBean.java +++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/statemachine/EndpointStateMachineMBean.java diff --git a/hadoop-hdsl/container-service/src/main/java/org/apache/hadoop/ozone/container/common/statemachine/SCMConnectionManager.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/statemachine/SCMConnectionManager.java index c9f83c61c5..19722f04a5 100644 --- a/hadoop-hdsl/container-service/src/main/java/org/apache/hadoop/ozone/container/common/statemachine/SCMConnectionManager.java +++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/statemachine/SCMConnectionManager.java @@ -26,9 +26,6 @@ import org.apache.hadoop.ozone.protocolPB .StorageContainerDatanodeProtocolClientSideTranslatorPB; import org.apache.hadoop.ozone.protocolPB.StorageContainerDatanodeProtocolPB; import org.apache.hadoop.security.UserGroupInformation; - -import static org.apache.hadoop.ozone.scm.HdslServerUtil - .getScmRpcTimeOutInMilliseconds; import org.slf4j.Logger; import org.slf4j.LoggerFactory; @@ -36,10 +33,18 @@ import javax.management.ObjectName; import java.io.Closeable; import java.io.IOException; import java.net.InetSocketAddress; -import java.util.*; +import java.util.ArrayList; +import java.util.Collection; +import java.util.Collections; +import java.util.HashMap; +import java.util.List; +import java.util.Map; import java.util.concurrent.locks.ReadWriteLock; import java.util.concurrent.locks.ReentrantReadWriteLock; +import static org.apache.hadoop.hdds.scm.HddsServerUtil + .getScmRpcTimeOutInMilliseconds; + /** * SCMConnectionManager - Acts as a class that manages the membership * information of the SCMs that we are working with. diff --git a/hadoop-hdsl/container-service/src/main/java/org/apache/hadoop/ozone/container/common/statemachine/SCMConnectionManagerMXBean.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/statemachine/SCMConnectionManagerMXBean.java index 25ef16379a..25ef16379a 100644 --- a/hadoop-hdsl/container-service/src/main/java/org/apache/hadoop/ozone/container/common/statemachine/SCMConnectionManagerMXBean.java +++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/statemachine/SCMConnectionManagerMXBean.java diff --git a/hadoop-hdsl/container-service/src/main/java/org/apache/hadoop/ozone/container/common/statemachine/StateContext.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/statemachine/StateContext.java index e4d6cd997c..55476fd41e 100644 --- a/hadoop-hdsl/container-service/src/main/java/org/apache/hadoop/ozone/container/common/statemachine/StateContext.java +++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/statemachine/StateContext.java @@ -17,12 +17,16 @@ package org.apache.hadoop.ozone.container.common.statemachine; import org.apache.hadoop.conf.Configuration; -import org.apache.hadoop.ozone.container.common.states.datanode.InitDatanodeState; +import org.apache.hadoop.hdds.protocol.proto + .StorageContainerDatanodeProtocolProtos.ReportState; +import org.apache.hadoop.hdds.protocol.proto + .StorageContainerDatanodeProtocolProtos.SCMNodeReport; import org.apache.hadoop.ozone.container.common.states.DatanodeState; -import org.apache.hadoop.ozone.container.common.states.datanode.RunningDatanodeState; +import org.apache.hadoop.ozone.container.common.states.datanode + .InitDatanodeState; +import org.apache.hadoop.ozone.container.common.states.datanode + .RunningDatanodeState; import org.apache.hadoop.ozone.protocol.commands.SCMCommand; -import org.apache.hadoop.hdsl.protocol.proto.StorageContainerDatanodeProtocolProtos.SCMNodeReport; -import org.apache.hadoop.hdsl.protocol.proto.StorageContainerDatanodeProtocolProtos.ReportState; import org.slf4j.Logger; import org.slf4j.LoggerFactory; @@ -36,10 +40,10 @@ import java.util.concurrent.atomic.AtomicLong; import java.util.concurrent.locks.Lock; import java.util.concurrent.locks.ReentrantLock; -import static org.apache.hadoop.ozone.OzoneConsts.INVALID_PORT; -import static org.apache.hadoop.hdsl.protocol.proto +import static org.apache.hadoop.hdds.protocol.proto .StorageContainerDatanodeProtocolProtos.ReportState.states .noContainerReports; +import static org.apache.hadoop.ozone.OzoneConsts.INVALID_PORT; /** * Current Context of State Machine. diff --git a/hadoop-hdsl/container-service/src/main/java/org/apache/hadoop/ozone/container/common/statemachine/background/BlockDeletingService.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/statemachine/background/BlockDeletingService.java index d5df699293..ac95b2a12c 100644 --- a/hadoop-hdsl/container-service/src/main/java/org/apache/hadoop/ozone/container/common/statemachine/background/BlockDeletingService.java +++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/statemachine/background/BlockDeletingService.java @@ -22,39 +22,40 @@ import com.google.common.collect.Lists; import com.google.protobuf.InvalidProtocolBufferException; import org.apache.commons.io.FileUtils; import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.hdds.scm.container.common.helpers + .StorageContainerException; import org.apache.hadoop.hdfs.DFSUtil; -import org.apache.hadoop.hdsl.protocol.proto.ContainerProtos; +import org.apache.hadoop.hdds.protocol.proto.ContainerProtos; import org.apache.hadoop.ozone.OzoneConsts; import org.apache.hadoop.ozone.container.common.helpers.ContainerData; import org.apache.hadoop.ozone.container.common.helpers.ContainerUtils; import org.apache.hadoop.ozone.container.common.helpers.KeyUtils; import org.apache.hadoop.ozone.container.common.interfaces.ContainerManager; -import org.apache.hadoop.scm.container.common.helpers.StorageContainerException; import org.apache.hadoop.util.Time; import org.apache.hadoop.utils.BackgroundService; -import org.apache.hadoop.utils.BackgroundTaskResult; -import org.apache.hadoop.utils.BackgroundTaskQueue; import org.apache.hadoop.utils.BackgroundTask; +import org.apache.hadoop.utils.BackgroundTaskQueue; +import org.apache.hadoop.utils.BackgroundTaskResult; import org.apache.hadoop.utils.BatchOperation; -import org.apache.hadoop.utils.MetadataStore; import org.apache.hadoop.utils.MetadataKeyFilters.KeyPrefixFilter; +import org.apache.hadoop.utils.MetadataStore; import org.slf4j.Logger; import org.slf4j.LoggerFactory; import java.io.File; -import java.util.List; import java.util.LinkedList; +import java.util.List; import java.util.Map; import java.util.concurrent.TimeUnit; import static org.apache.hadoop.ozone.OzoneConfigKeys - .OZONE_BLOCK_DELETING_LIMIT_PER_CONTAINER; -import static org.apache.hadoop.ozone.OzoneConfigKeys - .OZONE_BLOCK_DELETING_LIMIT_PER_CONTAINER_DEFAULT; -import static org.apache.hadoop.ozone.OzoneConfigKeys .OZONE_BLOCK_DELETING_CONTAINER_LIMIT_PER_INTERVAL; import static org.apache.hadoop.ozone.OzoneConfigKeys .OZONE_BLOCK_DELETING_CONTAINER_LIMIT_PER_INTERVAL_DEFAULT; +import static org.apache.hadoop.ozone.OzoneConfigKeys + .OZONE_BLOCK_DELETING_LIMIT_PER_CONTAINER; +import static org.apache.hadoop.ozone.OzoneConfigKeys + .OZONE_BLOCK_DELETING_LIMIT_PER_CONTAINER_DEFAULT; /** * A per-datanode container block deleting service takes in charge diff --git a/hadoop-hdsl/container-service/src/main/java/org/apache/hadoop/ozone/container/common/statemachine/background/package-info.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/statemachine/background/package-info.java index a9e202e35e..a9e202e35e 100644 --- a/hadoop-hdsl/container-service/src/main/java/org/apache/hadoop/ozone/container/common/statemachine/background/package-info.java +++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/statemachine/background/package-info.java diff --git a/hadoop-hdsl/container-service/src/main/java/org/apache/hadoop/ozone/container/common/statemachine/commandhandler/CloseContainerHandler.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/statemachine/commandhandler/CloseContainerHandler.java index e872555a36..f7b49b7590 100644 --- a/hadoop-hdsl/container-service/src/main/java/org/apache/hadoop/ozone/container/common/statemachine/commandhandler/CloseContainerHandler.java +++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/statemachine/commandhandler/CloseContainerHandler.java @@ -16,15 +16,15 @@ */ package org.apache.hadoop.ozone.container.common.statemachine.commandhandler; +import org.apache.hadoop.hdds.protocol.proto + .StorageContainerDatanodeProtocolProtos.SCMCloseContainerCmdResponseProto; +import org.apache.hadoop.hdds.protocol.proto + .StorageContainerDatanodeProtocolProtos.SCMCmdType; import org.apache.hadoop.ozone.container.common.statemachine .SCMConnectionManager; import org.apache.hadoop.ozone.container.common.statemachine.StateContext; import org.apache.hadoop.ozone.container.ozoneimpl.OzoneContainer; import org.apache.hadoop.ozone.protocol.commands.SCMCommand; -import org.apache.hadoop.hdsl.protocol.proto - .StorageContainerDatanodeProtocolProtos.SCMCloseContainerCmdResponseProto; -import org.apache.hadoop.hdsl.protocol.proto - .StorageContainerDatanodeProtocolProtos.SCMCmdType; import org.apache.hadoop.util.Time; import org.slf4j.Logger; import org.slf4j.LoggerFactory; diff --git a/hadoop-hdsl/container-service/src/main/java/org/apache/hadoop/ozone/container/common/statemachine/commandhandler/CommandDispatcher.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/statemachine/commandhandler/CommandDispatcher.java index fee3e1c2a8..40feca32bd 100644 --- a/hadoop-hdsl/container-service/src/main/java/org/apache/hadoop/ozone/container/common/statemachine/commandhandler/CommandDispatcher.java +++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/statemachine/commandhandler/CommandDispatcher.java @@ -18,7 +18,7 @@ package org.apache.hadoop.ozone.container.common.statemachine.commandhandler; import com.google.common.base.Preconditions; -import org.apache.hadoop.hdsl.protocol.proto.StorageContainerDatanodeProtocolProtos.SCMCmdType; +import org.apache.hadoop.hdds.protocol.proto.StorageContainerDatanodeProtocolProtos.SCMCmdType; import org.apache.hadoop.ozone.container.common.statemachine.SCMConnectionManager; import org.apache.hadoop.ozone.container.common.statemachine.StateContext; import org.apache.hadoop.ozone.container.ozoneimpl.OzoneContainer; diff --git a/hadoop-hdsl/container-service/src/main/java/org/apache/hadoop/ozone/container/common/statemachine/commandhandler/CommandHandler.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/statemachine/commandhandler/CommandHandler.java index b54923e58e..13d9f7295d 100644 --- a/hadoop-hdsl/container-service/src/main/java/org/apache/hadoop/ozone/container/common/statemachine/commandhandler/CommandHandler.java +++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/statemachine/commandhandler/CommandHandler.java @@ -17,7 +17,7 @@ package org.apache.hadoop.ozone.container.common.statemachine.commandhandler; -import org.apache.hadoop.hdsl.protocol.proto.StorageContainerDatanodeProtocolProtos.SCMCmdType; +import org.apache.hadoop.hdds.protocol.proto.StorageContainerDatanodeProtocolProtos.SCMCmdType; import org.apache.hadoop.ozone.container.common.statemachine.SCMConnectionManager; import org.apache.hadoop.ozone.container.common.statemachine.StateContext; import org.apache.hadoop.ozone.container.ozoneimpl.OzoneContainer; diff --git a/hadoop-hdsl/container-service/src/main/java/org/apache/hadoop/ozone/container/common/statemachine/commandhandler/ContainerReportHandler.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/statemachine/commandhandler/ContainerReportHandler.java index e9f4b61ce8..ba6b4185df 100644 --- a/hadoop-hdsl/container-service/src/main/java/org/apache/hadoop/ozone/container/common/statemachine/commandhandler/ContainerReportHandler.java +++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/statemachine/commandhandler/ContainerReportHandler.java @@ -16,13 +16,17 @@ */ package org.apache.hadoop.ozone.container.common.statemachine.commandhandler; -import org.apache.hadoop.ozone.container.common.statemachine.EndpointStateMachine; -import org.apache.hadoop.ozone.container.common.statemachine.SCMConnectionManager; +import org.apache.hadoop.hdds.protocol.proto + .StorageContainerDatanodeProtocolProtos.ContainerReportsRequestProto; +import org.apache.hadoop.hdds.protocol.proto + .StorageContainerDatanodeProtocolProtos.SCMCmdType; +import org.apache.hadoop.ozone.container.common.statemachine + .EndpointStateMachine; +import org.apache.hadoop.ozone.container.common.statemachine + .SCMConnectionManager; import org.apache.hadoop.ozone.container.common.statemachine.StateContext; import org.apache.hadoop.ozone.container.ozoneimpl.OzoneContainer; import org.apache.hadoop.ozone.protocol.commands.SCMCommand; -import org.apache.hadoop.hdsl.protocol.proto.StorageContainerDatanodeProtocolProtos.ContainerReportsRequestProto; -import org.apache.hadoop.hdsl.protocol.proto.StorageContainerDatanodeProtocolProtos.SCMCmdType; import org.apache.hadoop.util.Time; import org.slf4j.Logger; import org.slf4j.LoggerFactory; diff --git a/hadoop-hdsl/container-service/src/main/java/org/apache/hadoop/ozone/container/common/statemachine/commandhandler/DeleteBlocksCommandHandler.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/statemachine/commandhandler/DeleteBlocksCommandHandler.java index ff38cdccea..f106e3d55f 100644 --- a/hadoop-hdsl/container-service/src/main/java/org/apache/hadoop/ozone/container/common/statemachine/commandhandler/DeleteBlocksCommandHandler.java +++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/statemachine/commandhandler/DeleteBlocksCommandHandler.java @@ -16,24 +16,31 @@ */ package org.apache.hadoop.ozone.container.common.statemachine.commandhandler; - import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hdfs.DFSUtil; +import org.apache.hadoop.hdds.protocol.proto + .StorageContainerDatanodeProtocolProtos.ContainerBlocksDeletionACKProto; +import org.apache.hadoop.hdds.protocol.proto + .StorageContainerDatanodeProtocolProtos.ContainerBlocksDeletionACKProto + .DeleteBlockTransactionResult; +import org.apache.hadoop.hdds.protocol.proto + .StorageContainerDatanodeProtocolProtos.DeletedBlocksTransaction; +import org.apache.hadoop.hdds.protocol.proto + .StorageContainerDatanodeProtocolProtos.SCMCmdType; import org.apache.hadoop.ozone.OzoneConsts; import org.apache.hadoop.ozone.container.common.helpers.ContainerData; -import org.apache.hadoop.ozone.container.common.helpers.DeletedContainerBlocksSummary; +import org.apache.hadoop.ozone.container.common.helpers + .DeletedContainerBlocksSummary; import org.apache.hadoop.ozone.container.common.helpers.KeyUtils; import org.apache.hadoop.ozone.container.common.interfaces.ContainerManager; -import org.apache.hadoop.ozone.container.common.statemachine.EndpointStateMachine; -import org.apache.hadoop.ozone.container.common.statemachine.SCMConnectionManager; +import org.apache.hadoop.ozone.container.common.statemachine + .EndpointStateMachine; +import org.apache.hadoop.ozone.container.common.statemachine + .SCMConnectionManager; import org.apache.hadoop.ozone.container.common.statemachine.StateContext; import org.apache.hadoop.ozone.container.ozoneimpl.OzoneContainer; import org.apache.hadoop.ozone.protocol.commands.DeleteBlocksCommand; import org.apache.hadoop.ozone.protocol.commands.SCMCommand; -import org.apache.hadoop.hdsl.protocol.proto.StorageContainerDatanodeProtocolProtos.ContainerBlocksDeletionACKProto; -import org.apache.hadoop.hdsl.protocol.proto.StorageContainerDatanodeProtocolProtos.ContainerBlocksDeletionACKProto.DeleteBlockTransactionResult; -import org.apache.hadoop.hdsl.protocol.proto.StorageContainerDatanodeProtocolProtos.DeletedBlocksTransaction; -import org.apache.hadoop.hdsl.protocol.proto.StorageContainerDatanodeProtocolProtos.SCMCmdType; import org.apache.hadoop.util.Time; import org.apache.hadoop.utils.BatchOperation; import org.apache.hadoop.utils.MetadataStore; diff --git a/hadoop-hdsl/container-service/src/main/java/org/apache/hadoop/ozone/container/common/statemachine/commandhandler/package-info.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/statemachine/commandhandler/package-info.java index 1e9c8dc5ee..1e9c8dc5ee 100644 --- a/hadoop-hdsl/container-service/src/main/java/org/apache/hadoop/ozone/container/common/statemachine/commandhandler/package-info.java +++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/statemachine/commandhandler/package-info.java diff --git a/hadoop-hdsl/container-service/src/main/java/org/apache/hadoop/ozone/container/common/statemachine/package-info.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/statemachine/package-info.java index feb2f812ac..feb2f812ac 100644 --- a/hadoop-hdsl/container-service/src/main/java/org/apache/hadoop/ozone/container/common/statemachine/package-info.java +++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/statemachine/package-info.java diff --git a/hadoop-hdsl/container-service/src/main/java/org/apache/hadoop/ozone/container/common/states/DatanodeState.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/states/DatanodeState.java index 75142afd10..75142afd10 100644 --- a/hadoop-hdsl/container-service/src/main/java/org/apache/hadoop/ozone/container/common/states/DatanodeState.java +++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/states/DatanodeState.java diff --git a/hadoop-hdsl/container-service/src/main/java/org/apache/hadoop/ozone/container/common/states/datanode/InitDatanodeState.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/states/datanode/InitDatanodeState.java index 08f47a26e5..ac245d511c 100644 --- a/hadoop-hdsl/container-service/src/main/java/org/apache/hadoop/ozone/container/common/states/datanode/InitDatanodeState.java +++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/states/datanode/InitDatanodeState.java @@ -18,15 +18,15 @@ package org.apache.hadoop.ozone.container.common.states.datanode; import com.google.common.base.Strings; import org.apache.hadoop.conf.Configuration; -import org.apache.hadoop.hdsl.HdslUtils; -import org.apache.hadoop.hdsl.protocol.DatanodeDetails; +import org.apache.hadoop.hdds.HddsUtils; +import org.apache.hadoop.hdds.protocol.DatanodeDetails; import org.apache.hadoop.ozone.container.common.helpers.ContainerUtils; -import org.apache.hadoop.ozone.container.common.statemachine.DatanodeStateMachine; -import org.apache.hadoop.ozone.container.common.statemachine.SCMConnectionManager; +import org.apache.hadoop.ozone.container.common.statemachine + .DatanodeStateMachine; +import org.apache.hadoop.ozone.container.common.statemachine + .SCMConnectionManager; import org.apache.hadoop.ozone.container.common.statemachine.StateContext; import org.apache.hadoop.ozone.container.common.states.DatanodeState; - -import org.apache.hadoop.scm.ScmConfigKeys; import org.slf4j.Logger; import org.slf4j.LoggerFactory; @@ -41,7 +41,7 @@ import java.util.concurrent.Future; import java.util.concurrent.TimeUnit; import java.util.concurrent.TimeoutException; -import static org.apache.hadoop.hdsl.HdslUtils.getSCMAddresses; +import static org.apache.hadoop.hdds.HddsUtils.getSCMAddresses; /** * Init Datanode State is the task that gets run when we are in Init State. @@ -106,7 +106,7 @@ public class InitDatanodeState implements DatanodeState, * Persist DatanodeDetails to datanode.id file. */ private void persistContainerDatanodeDetails() throws IOException { - String dataNodeIDPath = HdslUtils.getDatanodeIdFilePath(conf); + String dataNodeIDPath = HddsUtils.getDatanodeIdFilePath(conf); File idPath = new File(dataNodeIDPath); DatanodeDetails datanodeDetails = this.context.getParent() .getDatanodeDetails(); diff --git a/hadoop-hdsl/container-service/src/main/java/org/apache/hadoop/ozone/container/common/states/datanode/RunningDatanodeState.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/states/datanode/RunningDatanodeState.java index 7a8c17b8c8..7a8c17b8c8 100644 --- a/hadoop-hdsl/container-service/src/main/java/org/apache/hadoop/ozone/container/common/states/datanode/RunningDatanodeState.java +++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/states/datanode/RunningDatanodeState.java diff --git a/hadoop-hdsl/container-service/src/main/java/org/apache/hadoop/ozone/container/common/states/datanode/package-info.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/states/datanode/package-info.java index 6b8d16c6d3..6b8d16c6d3 100644 --- a/hadoop-hdsl/container-service/src/main/java/org/apache/hadoop/ozone/container/common/states/datanode/package-info.java +++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/states/datanode/package-info.java diff --git a/hadoop-hdsl/container-service/src/main/java/org/apache/hadoop/ozone/container/common/states/endpoint/HeartbeatEndpointTask.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/states/endpoint/HeartbeatEndpointTask.java index 29f1f9c2da..5dee10f44b 100644 --- a/hadoop-hdsl/container-service/src/main/java/org/apache/hadoop/ozone/container/common/states/endpoint/HeartbeatEndpointTask.java +++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/states/endpoint/HeartbeatEndpointTask.java @@ -20,8 +20,12 @@ package org.apache.hadoop.ozone.container.common.states.endpoint; import com.google.common.base.Preconditions; import org.apache.hadoop.conf.Configuration; -import org.apache.hadoop.hdsl.protocol.DatanodeDetails; -import org.apache.hadoop.hdsl.protocol.proto.HdslProtos.DatanodeDetailsProto; +import org.apache.hadoop.hdds.protocol.DatanodeDetails; +import org.apache.hadoop.hdds.protocol.proto.HddsProtos.DatanodeDetailsProto; +import org.apache.hadoop.hdds.protocol.proto + .StorageContainerDatanodeProtocolProtos.SCMCommandResponseProto; +import org.apache.hadoop.hdds.protocol.proto + .StorageContainerDatanodeProtocolProtos.SCMHeartbeatResponseProto; import org.apache.hadoop.ozone.container.common.helpers .DeletedContainerBlocksSummary; import org.apache.hadoop.ozone.container.common.statemachine @@ -32,10 +36,6 @@ import org.apache.hadoop.ozone.container.common.statemachine.StateContext; import org.apache.hadoop.ozone.protocol.commands.CloseContainerCommand; import org.apache.hadoop.ozone.protocol.commands.DeleteBlocksCommand; import org.apache.hadoop.ozone.protocol.commands.SendContainerCommand; -import org.apache.hadoop.hdsl.protocol.proto - .StorageContainerDatanodeProtocolProtos.SCMCommandResponseProto; -import org.apache.hadoop.hdsl.protocol.proto - .StorageContainerDatanodeProtocolProtos.SCMHeartbeatResponseProto; import org.slf4j.Logger; import org.slf4j.LoggerFactory; diff --git a/hadoop-hdsl/container-service/src/main/java/org/apache/hadoop/ozone/container/common/states/endpoint/RegisterEndpointTask.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/states/endpoint/RegisterEndpointTask.java index bfe6a2885b..6913896e40 100644 --- a/hadoop-hdsl/container-service/src/main/java/org/apache/hadoop/ozone/container/common/states/endpoint/RegisterEndpointTask.java +++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/states/endpoint/RegisterEndpointTask.java @@ -18,12 +18,11 @@ package org.apache.hadoop.ozone.container.common.states.endpoint; import com.google.common.annotations.VisibleForTesting; import org.apache.hadoop.conf.Configuration; -import org.apache.hadoop.hdsl.protocol.DatanodeDetails; -import org.apache.hadoop.hdsl.protocol.proto.HdslProtos.DatanodeDetailsProto; +import org.apache.hadoop.hdds.scm.ScmConfigKeys; +import org.apache.hadoop.hdds.protocol.DatanodeDetails; +import org.apache.hadoop.hdds.protocol.proto.HddsProtos.DatanodeDetailsProto; import org.apache.hadoop.ozone.container.common.statemachine .EndpointStateMachine; - -import org.apache.hadoop.scm.ScmConfigKeys; import org.slf4j.Logger; import org.slf4j.LoggerFactory; diff --git a/hadoop-hdsl/container-service/src/main/java/org/apache/hadoop/ozone/container/common/states/endpoint/VersionEndpointTask.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/states/endpoint/VersionEndpointTask.java index 502d82700b..b048ee5b5c 100644 --- a/hadoop-hdsl/container-service/src/main/java/org/apache/hadoop/ozone/container/common/states/endpoint/VersionEndpointTask.java +++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/states/endpoint/VersionEndpointTask.java @@ -17,9 +17,11 @@ package org.apache.hadoop.ozone.container.common.states.endpoint; import org.apache.hadoop.conf.Configuration; -import org.apache.hadoop.ozone.container.common.statemachine.EndpointStateMachine; +import org.apache.hadoop.hdds.protocol.proto + .StorageContainerDatanodeProtocolProtos.SCMVersionResponseProto; +import org.apache.hadoop.ozone.container.common.statemachine + .EndpointStateMachine; import org.apache.hadoop.ozone.protocol.VersionResponse; -import org.apache.hadoop.hdsl.protocol.proto.StorageContainerDatanodeProtocolProtos.SCMVersionResponseProto; import java.io.IOException; import java.util.concurrent.Callable; diff --git a/hadoop-hdsl/container-service/src/main/java/org/apache/hadoop/ozone/container/common/states/endpoint/package-info.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/states/endpoint/package-info.java index 112259834d..112259834d 100644 --- a/hadoop-hdsl/container-service/src/main/java/org/apache/hadoop/ozone/container/common/states/endpoint/package-info.java +++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/states/endpoint/package-info.java diff --git a/hadoop-hdsl/container-service/src/main/java/org/apache/hadoop/ozone/container/common/states/package-info.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/states/package-info.java index 92c953ff41..92c953ff41 100644 --- a/hadoop-hdsl/container-service/src/main/java/org/apache/hadoop/ozone/container/common/states/package-info.java +++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/states/package-info.java diff --git a/hadoop-hdsl/container-service/src/main/java/org/apache/hadoop/ozone/container/common/transport/server/XceiverServer.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/transport/server/XceiverServer.java index bd180eff83..50e45b45bf 100644 --- a/hadoop-hdsl/container-service/src/main/java/org/apache/hadoop/ozone/container/common/transport/server/XceiverServer.java +++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/transport/server/XceiverServer.java @@ -27,10 +27,10 @@ import io.netty.channel.socket.nio.NioServerSocketChannel; import io.netty.handler.logging.LogLevel; import io.netty.handler.logging.LoggingHandler; import org.apache.hadoop.conf.Configuration; -import org.apache.hadoop.hdsl.protocol.DatanodeDetails; +import org.apache.hadoop.hdds.protocol.DatanodeDetails; +import org.apache.hadoop.hdds.protocol.proto.HddsProtos; import org.apache.hadoop.ozone.OzoneConfigKeys; import org.apache.hadoop.ozone.container.common.interfaces.ContainerDispatcher; -import org.apache.hadoop.hdsl.protocol.proto.HdslProtos; import org.slf4j.Logger; import org.slf4j.LoggerFactory; @@ -94,8 +94,8 @@ public final class XceiverServer implements XceiverServerSpi { * @return enum -- {Stand_Alone, Ratis, Chained} */ @Override - public HdslProtos.ReplicationType getServerType() { - return HdslProtos.ReplicationType.STAND_ALONE; + public HddsProtos.ReplicationType getServerType() { + return HddsProtos.ReplicationType.STAND_ALONE; } @Override diff --git a/hadoop-hdsl/container-service/src/main/java/org/apache/hadoop/ozone/container/common/transport/server/XceiverServerHandler.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/transport/server/XceiverServerHandler.java index 6c42c84e12..5947dde75d 100644 --- a/hadoop-hdsl/container-service/src/main/java/org/apache/hadoop/ozone/container/common/transport/server/XceiverServerHandler.java +++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/transport/server/XceiverServerHandler.java @@ -20,11 +20,13 @@ package org.apache.hadoop.ozone.container.common.transport.server; import io.netty.channel.ChannelHandlerContext; import io.netty.channel.SimpleChannelInboundHandler; -import org.slf4j.LoggerFactory; -import org.slf4j.Logger; -import org.apache.hadoop.hdsl.protocol.proto.ContainerProtos.ContainerCommandResponseProto; -import org.apache.hadoop.hdsl.protocol.proto.ContainerProtos.ContainerCommandRequestProto; +import org.apache.hadoop.hdds.protocol.proto.ContainerProtos + .ContainerCommandRequestProto; +import org.apache.hadoop.hdds.protocol.proto.ContainerProtos + .ContainerCommandResponseProto; import org.apache.hadoop.ozone.container.common.interfaces.ContainerDispatcher; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; /** * Netty server handlers that respond to Network events. diff --git a/hadoop-hdsl/container-service/src/main/java/org/apache/hadoop/ozone/container/common/transport/server/XceiverServerInitializer.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/transport/server/XceiverServerInitializer.java index 036a654953..78ba26b4de 100644 --- a/hadoop-hdsl/container-service/src/main/java/org/apache/hadoop/ozone/container/common/transport/server/XceiverServerInitializer.java +++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/transport/server/XceiverServerInitializer.java @@ -26,8 +26,9 @@ import io.netty.handler.codec.protobuf.ProtobufDecoder; import io.netty.handler.codec.protobuf.ProtobufEncoder; import io.netty.handler.codec.protobuf.ProtobufVarint32FrameDecoder; import io.netty.handler.codec.protobuf.ProtobufVarint32LengthFieldPrepender; +import org.apache.hadoop.hdds.protocol.proto.ContainerProtos + .ContainerCommandRequestProto; import org.apache.hadoop.ozone.container.common.interfaces.ContainerDispatcher; -import org.apache.hadoop.hdsl.protocol.proto.ContainerProtos.ContainerCommandRequestProto; /** * Creates a channel for the XceiverServer. diff --git a/hadoop-hdsl/container-service/src/main/java/org/apache/hadoop/ozone/container/common/transport/server/XceiverServerSpi.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/transport/server/XceiverServerSpi.java index 09bd6fcdab..dad9e9f3fd 100644 --- a/hadoop-hdsl/container-service/src/main/java/org/apache/hadoop/ozone/container/common/transport/server/XceiverServerSpi.java +++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/transport/server/XceiverServerSpi.java @@ -18,7 +18,7 @@ package org.apache.hadoop.ozone.container.common.transport.server; -import org.apache.hadoop.hdsl.protocol.proto.HdslProtos; +import org.apache.hadoop.hdds.protocol.proto.HddsProtos; import java.io.IOException; @@ -38,6 +38,6 @@ public interface XceiverServerSpi { * Returns the Replication type supported by this end-point. * @return enum -- {Stand_Alone, Ratis, Chained} */ - HdslProtos.ReplicationType getServerType(); + HddsProtos.ReplicationType getServerType(); } diff --git a/hadoop-hdsl/container-service/src/main/java/org/apache/hadoop/ozone/container/common/transport/server/package-info.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/transport/server/package-info.java index 59c96f1349..59c96f1349 100644 --- a/hadoop-hdsl/container-service/src/main/java/org/apache/hadoop/ozone/container/common/transport/server/package-info.java +++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/transport/server/package-info.java diff --git a/hadoop-hdsl/container-service/src/main/java/org/apache/hadoop/ozone/container/common/transport/server/ratis/ContainerStateMachine.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/transport/server/ratis/ContainerStateMachine.java index 08a96149c6..1a89e44bd1 100644 --- a/hadoop-hdsl/container-service/src/main/java/org/apache/hadoop/ozone/container/common/transport/server/ratis/ContainerStateMachine.java +++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/transport/server/ratis/ContainerStateMachine.java @@ -19,33 +19,36 @@ package org.apache.hadoop.ozone.container.common.transport.server.ratis; import com.google.common.base.Preconditions; -import org.apache.hadoop.hdsl.protocol.proto.ContainerProtos; -import org.apache.hadoop.hdsl.protocol.proto.ContainerProtos.ContainerCommandRequestProto; -import org.apache.hadoop.hdsl.protocol.proto.ContainerProtos.ContainerCommandResponseProto; -import org.apache.hadoop.hdsl.protocol.proto.ContainerProtos.WriteChunkRequestProto; +import com.google.protobuf.InvalidProtocolBufferException; +import org.apache.hadoop.hdds.protocol.proto.ContainerProtos; +import org.apache.hadoop.hdds.protocol.proto.ContainerProtos + .ContainerCommandRequestProto; +import org.apache.hadoop.hdds.protocol.proto.ContainerProtos + .ContainerCommandResponseProto; +import org.apache.hadoop.hdds.protocol.proto.ContainerProtos + .WriteChunkRequestProto; import org.apache.hadoop.ozone.container.common.interfaces.ContainerDispatcher; import org.apache.ratis.conf.RaftProperties; import org.apache.ratis.protocol.Message; import org.apache.ratis.protocol.RaftClientRequest; import org.apache.ratis.protocol.RaftPeerId; import org.apache.ratis.server.storage.RaftStorage; +import org.apache.ratis.shaded.com.google.protobuf.ByteString; import org.apache.ratis.shaded.com.google.protobuf.ShadedProtoUtil; import org.apache.ratis.shaded.proto.RaftProtos.LogEntryProto; import org.apache.ratis.shaded.proto.RaftProtos.SMLogEntryProto; -import org.apache.ratis.statemachine.impl.BaseStateMachine; -import org.apache.ratis.statemachine.impl.SimpleStateMachineStorage; import org.apache.ratis.statemachine.StateMachineStorage; import org.apache.ratis.statemachine.TransactionContext; +import org.apache.ratis.statemachine.impl.BaseStateMachine; +import org.apache.ratis.statemachine.impl.SimpleStateMachineStorage; import org.apache.ratis.statemachine.impl.TransactionContextImpl; import org.slf4j.Logger; import org.slf4j.LoggerFactory; -import org.apache.ratis.shaded.com.google.protobuf.ByteString; -import com.google.protobuf.InvalidProtocolBufferException; import java.io.IOException; import java.util.concurrent.CompletableFuture; -import java.util.concurrent.ThreadPoolExecutor; import java.util.concurrent.ConcurrentHashMap; +import java.util.concurrent.ThreadPoolExecutor; /** A {@link org.apache.ratis.statemachine.StateMachine} for containers. * @@ -54,7 +57,7 @@ import java.util.concurrent.ConcurrentHashMap; * requests. * * Read only requests are classified in - * {@link org.apache.hadoop.scm.XceiverClientRatis#isReadOnly} + * {@link org.apache.hadoop.hdds.scm.XceiverClientRatis#isReadOnly} * and these readonly requests are replied from the {@link #query(Message)}. * * The write requests can be divided into requests with user data diff --git a/hadoop-hdsl/container-service/src/main/java/org/apache/hadoop/ozone/container/common/transport/server/ratis/XceiverServerRatis.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/transport/server/ratis/XceiverServerRatis.java index 7aee5bb404..4bd55f1b99 100644 --- a/hadoop-hdsl/container-service/src/main/java/org/apache/hadoop/ozone/container/common/transport/server/ratis/XceiverServerRatis.java +++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/transport/server/ratis/XceiverServerRatis.java @@ -21,18 +21,17 @@ package org.apache.hadoop.ozone.container.common.transport.server.ratis; import com.google.common.base.Preconditions; import com.google.common.base.Strings; import org.apache.hadoop.conf.Configuration; -import org.apache.hadoop.hdsl.protocol.DatanodeDetails; +import org.apache.hadoop.hdds.protocol.DatanodeDetails; +import org.apache.hadoop.hdds.protocol.proto.HddsProtos; import org.apache.hadoop.ozone.OzoneConfigKeys; import org.apache.hadoop.ozone.container.common.interfaces.ContainerDispatcher; import org.apache.hadoop.ozone.container.common.transport.server .XceiverServerSpi; - -import org.apache.hadoop.hdsl.protocol.proto.HdslProtos; import org.apache.ratis.RaftConfigKeys; +import org.apache.ratis.RatisHelper; import org.apache.ratis.conf.RaftProperties; import org.apache.ratis.grpc.GrpcConfigKeys; import org.apache.ratis.netty.NettyConfigKeys; -import org.apache.ratis.RatisHelper; import org.apache.ratis.rpc.RpcType; import org.apache.ratis.rpc.SupportedRpcType; import org.apache.ratis.server.RaftServer; @@ -209,7 +208,7 @@ public final class XceiverServerRatis implements XceiverServerSpi { * @return enum -- {Stand_Alone, Ratis, Chained} */ @Override - public HdslProtos.ReplicationType getServerType() { - return HdslProtos.ReplicationType.RATIS; + public HddsProtos.ReplicationType getServerType() { + return HddsProtos.ReplicationType.RATIS; } } diff --git a/hadoop-hdsl/container-service/src/main/java/org/apache/hadoop/ozone/container/common/transport/server/ratis/package-info.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/transport/server/ratis/package-info.java index 8debfe0283..8debfe0283 100644 --- a/hadoop-hdsl/container-service/src/main/java/org/apache/hadoop/ozone/container/common/transport/server/ratis/package-info.java +++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/transport/server/ratis/package-info.java diff --git a/hadoop-hdsl/container-service/src/main/java/org/apache/hadoop/ozone/container/common/utils/ContainerCache.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/utils/ContainerCache.java index 6ae45b6d08..6ae45b6d08 100644 --- a/hadoop-hdsl/container-service/src/main/java/org/apache/hadoop/ozone/container/common/utils/ContainerCache.java +++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/utils/ContainerCache.java diff --git a/hadoop-hdsl/container-service/src/main/java/org/apache/hadoop/ozone/container/common/utils/package-info.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/utils/package-info.java index 08264f084a..08264f084a 100644 --- a/hadoop-hdsl/container-service/src/main/java/org/apache/hadoop/ozone/container/common/utils/package-info.java +++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/utils/package-info.java diff --git a/hadoop-hdsl/container-service/src/main/java/org/apache/hadoop/ozone/container/ozoneimpl/OzoneContainer.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/ozoneimpl/OzoneContainer.java index 0ef9406f45..33a5971bbb 100644 --- a/hadoop-hdsl/container-service/src/main/java/org/apache/hadoop/ozone/container/ozoneimpl/OzoneContainer.java +++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/ozoneimpl/OzoneContainer.java @@ -20,7 +20,14 @@ package org.apache.hadoop.ozone.container.ozoneimpl; import com.google.common.annotations.VisibleForTesting; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hdfs.server.datanode.StorageLocation; -import org.apache.hadoop.hdsl.protocol.DatanodeDetails; +import org.apache.hadoop.hdds.protocol.DatanodeDetails; +import org.apache.hadoop.hdds.protocol.proto.HddsProtos; +import org.apache.hadoop.hdds.protocol.proto + .StorageContainerDatanodeProtocolProtos.ContainerReportsRequestProto; +import org.apache.hadoop.hdds.protocol.proto + .StorageContainerDatanodeProtocolProtos.ReportState; +import org.apache.hadoop.hdds.protocol.proto + .StorageContainerDatanodeProtocolProtos.SCMNodeReport; import org.apache.hadoop.ozone.OzoneConfigKeys; import org.apache.hadoop.ozone.container.common.helpers.ContainerData; import org.apache.hadoop.ozone.container.common.impl.ChunkManagerImpl; @@ -31,15 +38,13 @@ import org.apache.hadoop.ozone.container.common.interfaces.ChunkManager; import org.apache.hadoop.ozone.container.common.interfaces.ContainerDispatcher; import org.apache.hadoop.ozone.container.common.interfaces.ContainerManager; import org.apache.hadoop.ozone.container.common.interfaces.KeyManager; -import org.apache.hadoop.ozone.container.common.statemachine.background.BlockDeletingService; +import org.apache.hadoop.ozone.container.common.statemachine.background + .BlockDeletingService; import org.apache.hadoop.ozone.container.common.transport.server.XceiverServer; -import org.apache.hadoop.ozone.container.common.transport.server.ratis.XceiverServerRatis; - -import org.apache.hadoop.hdsl.protocol.proto.HdslProtos; -import org.apache.hadoop.hdsl.protocol.proto.StorageContainerDatanodeProtocolProtos.ContainerReportsRequestProto; -import org.apache.hadoop.hdsl.protocol.proto.StorageContainerDatanodeProtocolProtos.ReportState; -import org.apache.hadoop.hdsl.protocol.proto.StorageContainerDatanodeProtocolProtos.SCMNodeReport; -import org.apache.hadoop.ozone.container.common.transport.server.XceiverServerSpi; +import org.apache.hadoop.ozone.container.common.transport.server + .XceiverServerSpi; +import org.apache.hadoop.ozone.container.common.transport.server.ratis + .XceiverServerRatis; import org.slf4j.Logger; import org.slf4j.LoggerFactory; @@ -50,14 +55,16 @@ import java.util.List; import java.util.concurrent.TimeUnit; import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_DATANODE_DATA_DIR_KEY; -import static org.apache.hadoop.ozone.OzoneConsts.CONTAINER_ROOT_PREFIX; import static org.apache.hadoop.ozone.OzoneConfigKeys .OZONE_BLOCK_DELETING_SERVICE_INTERVAL; import static org.apache.hadoop.ozone.OzoneConfigKeys .OZONE_BLOCK_DELETING_SERVICE_INTERVAL_DEFAULT; +import static org.apache.hadoop.ozone.OzoneConfigKeys + .OZONE_BLOCK_DELETING_SERVICE_TIMEOUT; +import static org.apache.hadoop.ozone.OzoneConfigKeys + .OZONE_BLOCK_DELETING_SERVICE_TIMEOUT_DEFAULT; +import static org.apache.hadoop.ozone.OzoneConsts.CONTAINER_ROOT_PREFIX; import static org.apache.hadoop.ozone.OzoneConsts.INVALID_PORT; -import static org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_BLOCK_DELETING_SERVICE_TIMEOUT; -import static org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_BLOCK_DELETING_SERVICE_TIMEOUT_DEFAULT; /** * Ozone main class sets up the network server and initializes the container @@ -209,7 +216,7 @@ public class OzoneContainer { return this.manager.getNodeReport(); } - private int getPortbyType(HdslProtos.ReplicationType replicationType) { + private int getPortbyType(HddsProtos.ReplicationType replicationType) { for (XceiverServerSpi serverinstance : server) { if (serverinstance.getServerType() == replicationType) { return serverinstance.getIPCPort(); @@ -224,7 +231,7 @@ public class OzoneContainer { * @return Container server IPC port. */ public int getContainerServerPort() { - return getPortbyType(HdslProtos.ReplicationType.STAND_ALONE); + return getPortbyType(HddsProtos.ReplicationType.STAND_ALONE); } /** @@ -233,7 +240,7 @@ public class OzoneContainer { * @return Ratis port. */ public int getRatisContainerServerPort() { - return getPortbyType(HdslProtos.ReplicationType.RATIS); + return getPortbyType(HddsProtos.ReplicationType.RATIS); } /** diff --git a/hadoop-hdsl/container-service/src/main/java/org/apache/hadoop/ozone/container/ozoneimpl/package-info.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/ozoneimpl/package-info.java index c99c038b24..c99c038b24 100644 --- a/hadoop-hdsl/container-service/src/main/java/org/apache/hadoop/ozone/container/ozoneimpl/package-info.java +++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/ozoneimpl/package-info.java diff --git a/hadoop-hdsl/container-service/src/main/java/org/apache/hadoop/ozone/package-info.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/package-info.java index 1a51012839..1a51012839 100644 --- a/hadoop-hdsl/container-service/src/main/java/org/apache/hadoop/ozone/package-info.java +++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/package-info.java diff --git a/hadoop-hdsl/container-service/src/main/java/org/apache/hadoop/ozone/protocol/StorageContainerDatanodeProtocol.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/protocol/StorageContainerDatanodeProtocol.java index fbb8426557..43e7412aea 100644 --- a/hadoop-hdsl/container-service/src/main/java/org/apache/hadoop/ozone/protocol/StorageContainerDatanodeProtocol.java +++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/protocol/StorageContainerDatanodeProtocol.java @@ -17,17 +17,29 @@ package org.apache.hadoop.ozone.protocol; import org.apache.hadoop.classification.InterfaceAudience; -import org.apache.hadoop.hdsl.protocol.proto.HdslProtos.DatanodeDetailsProto; -import org.apache.hadoop.hdsl.protocol.proto.StorageContainerDatanodeProtocolProtos.ContainerReportsRequestProto; -import org.apache.hadoop.hdsl.protocol.proto.StorageContainerDatanodeProtocolProtos.ContainerReportsResponseProto; -import org.apache.hadoop.hdsl.protocol.proto.StorageContainerDatanodeProtocolProtos.ReportState; -import org.apache.hadoop.hdsl.protocol.proto.StorageContainerDatanodeProtocolProtos.SCMVersionRequestProto; -import org.apache.hadoop.hdsl.protocol.proto.StorageContainerDatanodeProtocolProtos.SCMHeartbeatResponseProto; -import org.apache.hadoop.hdsl.protocol.proto.StorageContainerDatanodeProtocolProtos.SCMVersionResponseProto; -import org.apache.hadoop.hdsl.protocol.proto.StorageContainerDatanodeProtocolProtos.SCMRegisteredCmdResponseProto; -import org.apache.hadoop.hdsl.protocol.proto.StorageContainerDatanodeProtocolProtos.SCMNodeReport; -import org.apache.hadoop.hdsl.protocol.proto.StorageContainerDatanodeProtocolProtos.ContainerBlocksDeletionACKProto; -import org.apache.hadoop.hdsl.protocol.proto.StorageContainerDatanodeProtocolProtos.ContainerBlocksDeletionACKResponseProto; +import org.apache.hadoop.hdds.protocol.proto.HddsProtos.DatanodeDetailsProto; +import org.apache.hadoop.hdds.protocol.proto + .StorageContainerDatanodeProtocolProtos.ContainerBlocksDeletionACKProto; +import org.apache.hadoop.hdds.protocol.proto + .StorageContainerDatanodeProtocolProtos + .ContainerBlocksDeletionACKResponseProto; +import org.apache.hadoop.hdds.protocol.proto + .StorageContainerDatanodeProtocolProtos.ContainerReportsRequestProto; +import org.apache.hadoop.hdds.protocol.proto + .StorageContainerDatanodeProtocolProtos.ContainerReportsResponseProto; +import org.apache.hadoop.hdds.protocol.proto + .StorageContainerDatanodeProtocolProtos.ReportState; +import org.apache.hadoop.hdds.protocol.proto + .StorageContainerDatanodeProtocolProtos.SCMHeartbeatResponseProto; +import org.apache.hadoop.hdds.protocol.proto + .StorageContainerDatanodeProtocolProtos.SCMNodeReport; +import org.apache.hadoop.hdds.protocol.proto + .StorageContainerDatanodeProtocolProtos.SCMRegisteredCmdResponseProto; +import org.apache.hadoop.hdds.protocol.proto + .StorageContainerDatanodeProtocolProtos.SCMVersionRequestProto; +import org.apache.hadoop.hdds.protocol.proto + .StorageContainerDatanodeProtocolProtos.SCMVersionResponseProto; + import java.io.IOException; /** diff --git a/hadoop-hdsl/container-service/src/main/java/org/apache/hadoop/ozone/protocol/StorageContainerNodeProtocol.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/protocol/StorageContainerNodeProtocol.java index fffbfd1d4f..1fc7c570ef 100644 --- a/hadoop-hdsl/container-service/src/main/java/org/apache/hadoop/ozone/protocol/StorageContainerNodeProtocol.java +++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/protocol/StorageContainerNodeProtocol.java @@ -18,14 +18,14 @@ package org.apache.hadoop.ozone.protocol; import org.apache.hadoop.classification.InterfaceAudience; -import org.apache.hadoop.hdsl.protocol.proto.HdslProtos.DatanodeDetailsProto; -import org.apache.hadoop.ozone.protocol.commands.SCMCommand; -import org.apache.hadoop.hdsl.protocol.proto +import org.apache.hadoop.hdds.protocol.proto.HddsProtos.DatanodeDetailsProto; +import org.apache.hadoop.hdds.protocol.proto .StorageContainerDatanodeProtocolProtos.ReportState; -import org.apache.hadoop.hdsl.protocol.proto - .StorageContainerDatanodeProtocolProtos.SCMVersionRequestProto; -import org.apache.hadoop.hdsl.protocol.proto +import org.apache.hadoop.hdds.protocol.proto .StorageContainerDatanodeProtocolProtos.SCMNodeReport; +import org.apache.hadoop.hdds.protocol.proto + .StorageContainerDatanodeProtocolProtos.SCMVersionRequestProto; +import org.apache.hadoop.ozone.protocol.commands.SCMCommand; import java.util.List; diff --git a/hadoop-hdsl/container-service/src/main/java/org/apache/hadoop/ozone/protocol/VersionResponse.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/protocol/VersionResponse.java index c2dc2b3bcb..83acf5bd6e 100644 --- a/hadoop-hdsl/container-service/src/main/java/org/apache/hadoop/ozone/protocol/VersionResponse.java +++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/protocol/VersionResponse.java @@ -19,8 +19,8 @@ package org.apache.hadoop.ozone.protocol; -import org.apache.hadoop.hdsl.protocol.proto.HdslProtos.KeyValue; -import org.apache.hadoop.hdsl.protocol.proto +import org.apache.hadoop.hdds.protocol.proto.HddsProtos.KeyValue; +import org.apache.hadoop.hdds.protocol.proto .StorageContainerDatanodeProtocolProtos.SCMVersionResponseProto; import java.util.HashMap; diff --git a/hadoop-hdsl/container-service/src/main/java/org/apache/hadoop/ozone/protocol/commands/CloseContainerCommand.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/protocol/commands/CloseContainerCommand.java index f9571e9bfc..b1cdbc4913 100644 --- a/hadoop-hdsl/container-service/src/main/java/org/apache/hadoop/ozone/protocol/commands/CloseContainerCommand.java +++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/protocol/commands/CloseContainerCommand.java @@ -18,11 +18,12 @@ package org.apache.hadoop.ozone.protocol.commands; import com.google.common.base.Preconditions; -import org.apache.hadoop.hdsl.protocol.proto +import org.apache.hadoop.hdds.protocol.proto .StorageContainerDatanodeProtocolProtos.SCMCloseContainerCmdResponseProto; -import org.apache.hadoop.hdsl.protocol.proto +import org.apache.hadoop.hdds.protocol.proto .StorageContainerDatanodeProtocolProtos.SCMCmdType; -import static org.apache.hadoop.hdsl.protocol.proto + +import static org.apache.hadoop.hdds.protocol.proto .StorageContainerDatanodeProtocolProtos.SCMCmdType.closeContainerCommand; /** diff --git a/hadoop-hdsl/container-service/src/main/java/org/apache/hadoop/ozone/protocol/commands/DeleteBlocksCommand.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/protocol/commands/DeleteBlocksCommand.java index c5816581d4..a11ca25a30 100644 --- a/hadoop-hdsl/container-service/src/main/java/org/apache/hadoop/ozone/protocol/commands/DeleteBlocksCommand.java +++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/protocol/commands/DeleteBlocksCommand.java @@ -17,9 +17,12 @@ */ package org.apache.hadoop.ozone.protocol.commands; -import org.apache.hadoop.hdsl.protocol.proto.StorageContainerDatanodeProtocolProtos.DeletedBlocksTransaction; -import org.apache.hadoop.hdsl.protocol.proto.StorageContainerDatanodeProtocolProtos.SCMDeleteBlocksCmdResponseProto; -import org.apache.hadoop.hdsl.protocol.proto.StorageContainerDatanodeProtocolProtos.SCMCmdType; +import org.apache.hadoop.hdds.protocol.proto + .StorageContainerDatanodeProtocolProtos.DeletedBlocksTransaction; +import org.apache.hadoop.hdds.protocol.proto + .StorageContainerDatanodeProtocolProtos.SCMCmdType; +import org.apache.hadoop.hdds.protocol.proto + .StorageContainerDatanodeProtocolProtos.SCMDeleteBlocksCmdResponseProto; import java.util.List; diff --git a/hadoop-hdsl/container-service/src/main/java/org/apache/hadoop/ozone/protocol/commands/RegisteredCommand.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/protocol/commands/RegisteredCommand.java index c4db6f7ef9..88b59110f4 100644 --- a/hadoop-hdsl/container-service/src/main/java/org/apache/hadoop/ozone/protocol/commands/RegisteredCommand.java +++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/protocol/commands/RegisteredCommand.java @@ -18,13 +18,13 @@ package org.apache.hadoop.ozone.protocol.commands; import com.google.common.base.Preconditions; -import org.apache.hadoop.hdsl.protocol.proto +import org.apache.hadoop.hdds.protocol.proto + .StorageContainerDatanodeProtocolProtos.SCMCmdType; +import org.apache.hadoop.hdds.protocol.proto .StorageContainerDatanodeProtocolProtos.SCMRegisteredCmdResponseProto; -import org.apache.hadoop.hdsl.protocol.proto +import org.apache.hadoop.hdds.protocol.proto .StorageContainerDatanodeProtocolProtos.SCMRegisteredCmdResponseProto .ErrorCode; -import org.apache.hadoop.hdsl.protocol.proto - .StorageContainerDatanodeProtocolProtos.SCMCmdType; /** * Response to Datanode Register call. diff --git a/hadoop-hdsl/container-service/src/main/java/org/apache/hadoop/ozone/protocol/commands/ReregisterCommand.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/protocol/commands/ReregisterCommand.java index 0c55a90ca7..c167d59ddc 100644 --- a/hadoop-hdsl/container-service/src/main/java/org/apache/hadoop/ozone/protocol/commands/ReregisterCommand.java +++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/protocol/commands/ReregisterCommand.java @@ -17,12 +17,13 @@ */ package org.apache.hadoop.ozone.protocol.commands; -import org.apache.hadoop.hdsl.protocol.proto +import org.apache.hadoop.hdds.protocol.proto .StorageContainerDatanodeProtocolProtos.SCMCmdType; -import static org.apache.hadoop.hdsl.protocol.proto - .StorageContainerDatanodeProtocolProtos.SCMReregisterCmdResponseProto; -import static org.apache.hadoop.hdsl.protocol.proto + +import static org.apache.hadoop.hdds.protocol.proto .StorageContainerDatanodeProtocolProtos.SCMCmdType.reregisterCommand; +import static org.apache.hadoop.hdds.protocol.proto + .StorageContainerDatanodeProtocolProtos.SCMReregisterCmdResponseProto; /** * Informs a datanode to register itself with SCM again. diff --git a/hadoop-hdsl/container-service/src/main/java/org/apache/hadoop/ozone/protocol/commands/SCMCommand.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/protocol/commands/SCMCommand.java index e79a157a9c..73e4194d8c 100644 --- a/hadoop-hdsl/container-service/src/main/java/org/apache/hadoop/ozone/protocol/commands/SCMCommand.java +++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/protocol/commands/SCMCommand.java @@ -17,9 +17,9 @@ */ package org.apache.hadoop.ozone.protocol.commands; -import org.apache.hadoop.hdsl.protocol.proto - .StorageContainerDatanodeProtocolProtos.SCMCmdType; import com.google.protobuf.GeneratedMessage; +import org.apache.hadoop.hdds.protocol.proto + .StorageContainerDatanodeProtocolProtos.SCMCmdType; /** * A class that acts as the base class to convert between Java and SCM diff --git a/hadoop-hdsl/container-service/src/main/java/org/apache/hadoop/ozone/protocol/commands/SendContainerCommand.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/protocol/commands/SendContainerCommand.java index fa2245b4ad..84317526e7 100644 --- a/hadoop-hdsl/container-service/src/main/java/org/apache/hadoop/ozone/protocol/commands/SendContainerCommand.java +++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/protocol/commands/SendContainerCommand.java @@ -18,10 +18,10 @@ package org.apache.hadoop.ozone.protocol.commands; -import org.apache.hadoop.hdsl.protocol.proto - .StorageContainerDatanodeProtocolProtos.SendContainerReportProto; -import org.apache.hadoop.hdsl.protocol.proto +import org.apache.hadoop.hdds.protocol.proto .StorageContainerDatanodeProtocolProtos.SCMCmdType; +import org.apache.hadoop.hdds.protocol.proto + .StorageContainerDatanodeProtocolProtos.SendContainerReportProto; /** * Allows a Datanode to send in the container report. diff --git a/hadoop-hdsl/container-service/src/main/java/org/apache/hadoop/ozone/protocol/commands/package-info.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/protocol/commands/package-info.java index 7083c1b154..7083c1b154 100644 --- a/hadoop-hdsl/container-service/src/main/java/org/apache/hadoop/ozone/protocol/commands/package-info.java +++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/protocol/commands/package-info.java diff --git a/hadoop-hdsl/container-service/src/main/java/org/apache/hadoop/ozone/protocol/package-info.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/protocol/package-info.java index 03854a0593..a718fa7476 100644 --- a/hadoop-hdsl/container-service/src/main/java/org/apache/hadoop/ozone/protocol/package-info.java +++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/protocol/package-info.java @@ -19,5 +19,5 @@ package org.apache.hadoop.ozone.protocol; /** - * This package contains classes for HDSL protocol definitions. + * This package contains classes for HDDS protocol definitions. */ diff --git a/hadoop-hdsl/container-service/src/main/java/org/apache/hadoop/ozone/protocolPB/StorageContainerDatanodeProtocolClientSideTranslatorPB.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/protocolPB/StorageContainerDatanodeProtocolClientSideTranslatorPB.java index 4abd8a6596..12fed1cd0f 100644 --- a/hadoop-hdsl/container-service/src/main/java/org/apache/hadoop/ozone/protocolPB/StorageContainerDatanodeProtocolClientSideTranslatorPB.java +++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/protocolPB/StorageContainerDatanodeProtocolClientSideTranslatorPB.java @@ -18,29 +18,36 @@ package org.apache.hadoop.ozone.protocolPB; import com.google.protobuf.RpcController; import com.google.protobuf.ServiceException; -import org.apache.hadoop.hdsl.protocol.proto.HdslProtos.DatanodeDetailsProto; -import org.apache.hadoop.ipc.ProtobufHelper; -import org.apache.hadoop.ipc.ProtocolTranslator; -import org.apache.hadoop.ipc.RPC; -import org.apache.hadoop.ozone.protocol.StorageContainerDatanodeProtocol; -import org.apache.hadoop.hdsl.protocol.proto.StorageContainerDatanodeProtocolProtos.ContainerReportsResponseProto; -import org.apache.hadoop.hdsl.protocol.proto.StorageContainerDatanodeProtocolProtos.ContainerReportsRequestProto; -import org.apache.hadoop.hdsl.protocol.proto +import org.apache.hadoop.hdds.protocol.proto.HddsProtos.DatanodeDetailsProto; +import org.apache.hadoop.hdds.protocol.proto + .StorageContainerDatanodeProtocolProtos.ContainerBlocksDeletionACKProto; +import org.apache.hadoop.hdds.protocol.proto + .StorageContainerDatanodeProtocolProtos + .ContainerBlocksDeletionACKResponseProto; +import org.apache.hadoop.hdds.protocol.proto + .StorageContainerDatanodeProtocolProtos.ContainerReportsRequestProto; +import org.apache.hadoop.hdds.protocol.proto + .StorageContainerDatanodeProtocolProtos.ContainerReportsResponseProto; +import org.apache.hadoop.hdds.protocol.proto .StorageContainerDatanodeProtocolProtos.ReportState; -import org.apache.hadoop.hdsl.protocol.proto +import org.apache.hadoop.hdds.protocol.proto .StorageContainerDatanodeProtocolProtos.SCMHeartbeatRequestProto; -import org.apache.hadoop.hdsl.protocol.proto +import org.apache.hadoop.hdds.protocol.proto .StorageContainerDatanodeProtocolProtos.SCMHeartbeatResponseProto; -import org.apache.hadoop.hdsl.protocol.proto +import org.apache.hadoop.hdds.protocol.proto .StorageContainerDatanodeProtocolProtos.SCMNodeReport; -import org.apache.hadoop.hdsl.protocol.proto +import org.apache.hadoop.hdds.protocol.proto .StorageContainerDatanodeProtocolProtos.SCMRegisterRequestProto; -import org.apache.hadoop.hdsl.protocol.proto +import org.apache.hadoop.hdds.protocol.proto .StorageContainerDatanodeProtocolProtos.SCMRegisteredCmdResponseProto; -import org.apache.hadoop.hdsl.protocol.proto.StorageContainerDatanodeProtocolProtos.SCMVersionRequestProto; -import org.apache.hadoop.hdsl.protocol.proto.StorageContainerDatanodeProtocolProtos.SCMVersionResponseProto; -import org.apache.hadoop.hdsl.protocol.proto.StorageContainerDatanodeProtocolProtos.ContainerBlocksDeletionACKProto; -import org.apache.hadoop.hdsl.protocol.proto.StorageContainerDatanodeProtocolProtos.ContainerBlocksDeletionACKResponseProto; +import org.apache.hadoop.hdds.protocol.proto + .StorageContainerDatanodeProtocolProtos.SCMVersionRequestProto; +import org.apache.hadoop.hdds.protocol.proto + .StorageContainerDatanodeProtocolProtos.SCMVersionResponseProto; +import org.apache.hadoop.ipc.ProtobufHelper; +import org.apache.hadoop.ipc.ProtocolTranslator; +import org.apache.hadoop.ipc.RPC; +import org.apache.hadoop.ozone.protocol.StorageContainerDatanodeProtocol; import java.io.Closeable; import java.io.IOException; diff --git a/hadoop-hdsl/container-service/src/main/java/org/apache/hadoop/ozone/protocolPB/StorageContainerDatanodeProtocolPB.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/protocolPB/StorageContainerDatanodeProtocolPB.java index 677a364afe..9b28b5ae98 100644 --- a/hadoop-hdsl/container-service/src/main/java/org/apache/hadoop/ozone/protocolPB/StorageContainerDatanodeProtocolPB.java +++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/protocolPB/StorageContainerDatanodeProtocolPB.java @@ -16,8 +16,10 @@ */ package org.apache.hadoop.ozone.protocolPB; +import org.apache.hadoop.hdds.protocol.proto + .StorageContainerDatanodeProtocolProtos + .StorageContainerDatanodeProtocolService; import org.apache.hadoop.ipc.ProtocolInfo; -import org.apache.hadoop.hdsl.protocol.proto.StorageContainerDatanodeProtocolProtos.StorageContainerDatanodeProtocolService; /** * Protocol used from a datanode to StorageContainerManager. This extends diff --git a/hadoop-hdsl/container-service/src/main/java/org/apache/hadoop/ozone/protocolPB/StorageContainerDatanodeProtocolServerSideTranslatorPB.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/protocolPB/StorageContainerDatanodeProtocolServerSideTranslatorPB.java index cd2fb59c22..985b75acd6 100644 --- a/hadoop-hdsl/container-service/src/main/java/org/apache/hadoop/ozone/protocolPB/StorageContainerDatanodeProtocolServerSideTranslatorPB.java +++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/protocolPB/StorageContainerDatanodeProtocolServerSideTranslatorPB.java @@ -18,14 +18,22 @@ package org.apache.hadoop.ozone.protocolPB; import com.google.protobuf.RpcController; import com.google.protobuf.ServiceException; +import org.apache.hadoop.hdds.protocol.proto + .StorageContainerDatanodeProtocolProtos; +import org.apache.hadoop.hdds.protocol.proto + .StorageContainerDatanodeProtocolProtos.ContainerBlocksDeletionACKProto; +import org.apache.hadoop.hdds.protocol.proto + .StorageContainerDatanodeProtocolProtos + .ContainerBlocksDeletionACKResponseProto; +import org.apache.hadoop.hdds.protocol.proto + .StorageContainerDatanodeProtocolProtos.ContainerReportsRequestProto; +import org.apache.hadoop.hdds.protocol.proto + .StorageContainerDatanodeProtocolProtos.ContainerReportsResponseProto; +import org.apache.hadoop.hdds.protocol.proto + .StorageContainerDatanodeProtocolProtos.SCMHeartbeatRequestProto; +import org.apache.hadoop.hdds.protocol.proto + .StorageContainerDatanodeProtocolProtos.SCMHeartbeatResponseProto; import org.apache.hadoop.ozone.protocol.StorageContainerDatanodeProtocol; -import org.apache.hadoop.hdsl.protocol.proto.StorageContainerDatanodeProtocolProtos; -import org.apache.hadoop.hdsl.protocol.proto.StorageContainerDatanodeProtocolProtos.ContainerReportsRequestProto; -import org.apache.hadoop.hdsl.protocol.proto.StorageContainerDatanodeProtocolProtos.ContainerReportsResponseProto; -import org.apache.hadoop.hdsl.protocol.proto.StorageContainerDatanodeProtocolProtos.SCMHeartbeatRequestProto; -import org.apache.hadoop.hdsl.protocol.proto.StorageContainerDatanodeProtocolProtos.SCMHeartbeatResponseProto; -import org.apache.hadoop.hdsl.protocol.proto.StorageContainerDatanodeProtocolProtos.ContainerBlocksDeletionACKResponseProto; -import org.apache.hadoop.hdsl.protocol.proto.StorageContainerDatanodeProtocolProtos.ContainerBlocksDeletionACKProto; import java.io.IOException; diff --git a/hadoop-hdsl/container-service/src/main/proto/StorageContainerDatanodeProtocol.proto b/hadoop-hdds/container-service/src/main/proto/StorageContainerDatanodeProtocol.proto index 187ecda25c..cd415e2bca 100644 --- a/hadoop-hdsl/container-service/src/main/proto/StorageContainerDatanodeProtocol.proto +++ b/hadoop-hdds/container-service/src/main/proto/StorageContainerDatanodeProtocol.proto @@ -22,7 +22,7 @@ * for what changes are allowed for a *unstable* .proto interface. */ -option java_package = "org.apache.hadoop.hdsl.protocol.proto"; +option java_package = "org.apache.hadoop.hdds.protocol.proto"; option java_outer_classname = "StorageContainerDatanodeProtocolProtos"; @@ -30,9 +30,9 @@ option java_generic_services = true; option java_generate_equals_and_hash = true; -package hadoop.hdsl; +package hadoop.hdds; -import "hdsl.proto"; +import "hdds.proto"; /** @@ -71,7 +71,7 @@ all container info in memory all the time. */ message ContainerPersistanceProto { required DatanodeContainerState state = 1; - required hadoop.hdsl.Pipeline pipeline = 2; + required hadoop.hdds.Pipeline pipeline = 2; required ContainerInfo info = 3; } @@ -98,7 +98,7 @@ message ContainerInfo { optional int64 readBytes = 8; optional int64 writeBytes = 9; required int64 containerID = 10; - optional hadoop.hdsl.LifeCycleState state = 11; + optional hadoop.hdds.LifeCycleState state = 11; } // The deleted blocks which are stored in deletedBlock.db of scm. @@ -161,7 +161,7 @@ message SCMVersionRequestProto { */ message SCMVersionResponseProto { required uint32 softwareVersion = 1; - repeated hadoop.hdsl.KeyValue keys = 2; + repeated hadoop.hdds.KeyValue keys = 2; } message SCMNodeAddressList { diff --git a/hadoop-hdsl/container-service/src/main/resources/META-INF/services/com.sun.jersey.spi.container.ContainerProvider b/hadoop-hdds/container-service/src/main/resources/META-INF/services/com.sun.jersey.spi.container.ContainerProvider index 2e103fea7b..2e103fea7b 100644 --- a/hadoop-hdsl/container-service/src/main/resources/META-INF/services/com.sun.jersey.spi.container.ContainerProvider +++ b/hadoop-hdds/container-service/src/main/resources/META-INF/services/com.sun.jersey.spi.container.ContainerProvider diff --git a/hadoop-hdsl/container-service/src/test/java/org/apache/hadoop/ozone/container/common/ContainerTestUtils.java b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/ContainerTestUtils.java index 4501940a1b..9db792bc7b 100644 --- a/hadoop-hdsl/container-service/src/test/java/org/apache/hadoop/ozone/container/common/ContainerTestUtils.java +++ b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/ContainerTestUtils.java @@ -17,8 +17,6 @@ package org.apache.hadoop.ozone.container.common; -import java.net.InetSocketAddress; - import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.io.retry.RetryPolicies; import org.apache.hadoop.ipc.ProtobufRpcEngine; @@ -31,6 +29,8 @@ import org.apache.hadoop.ozone.protocolPB import org.apache.hadoop.ozone.protocolPB.StorageContainerDatanodeProtocolPB; import org.apache.hadoop.security.UserGroupInformation; +import java.net.InetSocketAddress; + /** * Helper utility to test containers. */ diff --git a/hadoop-hdsl/container-service/src/test/java/org/apache/hadoop/ozone/container/common/SCMTestUtils.java b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/SCMTestUtils.java index 32c28aa12d..b63c5fbe9b 100644 --- a/hadoop-hdsl/container-service/src/test/java/org/apache/hadoop/ozone/container/common/SCMTestUtils.java +++ b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/SCMTestUtils.java @@ -16,15 +16,11 @@ */ package org.apache.hadoop.ozone.container.common; -import java.io.IOException; -import java.net.InetAddress; -import java.net.InetSocketAddress; -import java.net.ServerSocket; - +import com.google.protobuf.BlockingService; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hdfs.DFSUtil; -import org.apache.hadoop.hdsl.conf.OzoneConfiguration; -import org.apache.hadoop.hdsl.protocol.proto +import org.apache.hadoop.hdds.conf.OzoneConfiguration; +import org.apache.hadoop.hdds.protocol.proto .StorageContainerDatanodeProtocolProtos .StorageContainerDatanodeProtocolService; import org.apache.hadoop.ipc.ProtobufRpcEngine; @@ -34,7 +30,10 @@ import org.apache.hadoop.ozone.protocolPB.StorageContainerDatanodeProtocolPB; import org.apache.hadoop.ozone.protocolPB .StorageContainerDatanodeProtocolServerSideTranslatorPB; -import com.google.protobuf.BlockingService; +import java.io.IOException; +import java.net.InetAddress; +import java.net.InetSocketAddress; +import java.net.ServerSocket; /** * Test Endpoint class. diff --git a/hadoop-hdsl/container-service/src/test/java/org/apache/hadoop/ozone/container/common/ScmTestMock.java b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/ScmTestMock.java index 4349b1aa25..41a8a8012c 100644 --- a/hadoop-hdsl/container-service/src/test/java/org/apache/hadoop/ozone/container/common/ScmTestMock.java +++ b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/ScmTestMock.java @@ -17,19 +17,28 @@ package org.apache.hadoop.ozone.container.common; import com.google.common.base.Preconditions; -import org.apache.hadoop.hdsl.protocol.DatanodeDetails; -import org.apache.hadoop.hdsl.protocol.proto.HdslProtos.DatanodeDetailsProto; +import org.apache.hadoop.hdds.scm.VersionInfo; +import org.apache.hadoop.hdds.protocol.DatanodeDetails; +import org.apache.hadoop.hdds.protocol.proto.HddsProtos.DatanodeDetailsProto; +import org.apache.hadoop.hdds.protocol.proto + .StorageContainerDatanodeProtocolProtos; +import org.apache.hadoop.hdds.protocol.proto + .StorageContainerDatanodeProtocolProtos.ContainerBlocksDeletionACKProto; +import org.apache.hadoop.hdds.protocol.proto + .StorageContainerDatanodeProtocolProtos + .ContainerBlocksDeletionACKResponseProto; +import org.apache.hadoop.hdds.protocol.proto + .StorageContainerDatanodeProtocolProtos.ContainerInfo; +import org.apache.hadoop.hdds.protocol.proto + .StorageContainerDatanodeProtocolProtos.ReportState; +import org.apache.hadoop.hdds.protocol.proto + .StorageContainerDatanodeProtocolProtos.SCMCommandResponseProto; +import org.apache.hadoop.hdds.protocol.proto + .StorageContainerDatanodeProtocolProtos.SCMHeartbeatResponseProto; +import org.apache.hadoop.hdds.protocol.proto + .StorageContainerDatanodeProtocolProtos.SCMNodeReport; import org.apache.hadoop.ozone.protocol.StorageContainerDatanodeProtocol; import org.apache.hadoop.ozone.protocol.VersionResponse; -import org.apache.hadoop.hdsl.protocol.proto.StorageContainerDatanodeProtocolProtos; -import org.apache.hadoop.hdsl.protocol.proto.StorageContainerDatanodeProtocolProtos.ContainerInfo; -import org.apache.hadoop.hdsl.protocol.proto.StorageContainerDatanodeProtocolProtos.SCMHeartbeatResponseProto; -import org.apache.hadoop.hdsl.protocol.proto.StorageContainerDatanodeProtocolProtos.SCMCommandResponseProto; -import org.apache.hadoop.hdsl.protocol.proto.StorageContainerDatanodeProtocolProtos.ReportState; -import org.apache.hadoop.hdsl.protocol.proto.StorageContainerDatanodeProtocolProtos.SCMNodeReport; -import org.apache.hadoop.hdsl.protocol.proto.StorageContainerDatanodeProtocolProtos.ContainerBlocksDeletionACKProto; -import org.apache.hadoop.hdsl.protocol.proto.StorageContainerDatanodeProtocolProtos.ContainerBlocksDeletionACKResponseProto; -import org.apache.hadoop.ozone.scm.VersionInfo; import java.io.IOException; import java.util.HashMap; diff --git a/hadoop-hdsl/container-service/src/test/java/org/apache/hadoop/ozone/container/common/TestDatanodeStateMachine.java b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/TestDatanodeStateMachine.java index f05ba4941a..9446ce275a 100644 --- a/hadoop-hdsl/container-service/src/test/java/org/apache/hadoop/ozone/container/common/TestDatanodeStateMachine.java +++ b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/TestDatanodeStateMachine.java @@ -19,18 +19,24 @@ package org.apache.hadoop.ozone.container.common; import com.google.common.collect.Maps; import com.google.common.util.concurrent.ThreadFactoryBuilder; import org.apache.hadoop.conf.Configuration; -import org.apache.hadoop.hdsl.protocol.DatanodeDetails; +import org.apache.hadoop.hdds.scm.ScmConfigKeys; +import org.apache.hadoop.hdds.protocol.DatanodeDetails; +import org.apache.hadoop.hdds.protocol.proto + .StorageContainerDatanodeProtocolProtos; import org.apache.hadoop.ipc.RPC; import org.apache.hadoop.ozone.OzoneConfigKeys; import org.apache.hadoop.ozone.container.common.helpers.ContainerUtils; -import org.apache.hadoop.scm.ScmConfigKeys; -import org.apache.hadoop.ozone.container.common.statemachine.DatanodeStateMachine; -import org.apache.hadoop.ozone.container.common.statemachine.EndpointStateMachine; -import org.apache.hadoop.ozone.container.common.statemachine.SCMConnectionManager; +import org.apache.hadoop.ozone.container.common.statemachine + .DatanodeStateMachine; +import org.apache.hadoop.ozone.container.common.statemachine + .EndpointStateMachine; +import org.apache.hadoop.ozone.container.common.statemachine + .SCMConnectionManager; import org.apache.hadoop.ozone.container.common.states.DatanodeState; -import org.apache.hadoop.ozone.container.common.states.datanode.InitDatanodeState; -import org.apache.hadoop.ozone.container.common.states.datanode.RunningDatanodeState; -import org.apache.hadoop.hdsl.protocol.proto.StorageContainerDatanodeProtocolProtos; +import org.apache.hadoop.ozone.container.common.states.datanode + .InitDatanodeState; +import org.apache.hadoop.ozone.container.common.states.datanode + .RunningDatanodeState; import org.apache.hadoop.test.GenericTestUtils; import org.apache.hadoop.util.concurrent.HadoopExecutors; import org.junit.After; @@ -53,9 +59,9 @@ import java.util.concurrent.ExecutorService; import java.util.concurrent.TimeUnit; import java.util.concurrent.TimeoutException; -import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_DATANODE_DATA_DIR_KEY; -import static org.apache.hadoop.scm.ScmConfigKeys +import static org.apache.hadoop.hdds.scm.ScmConfigKeys .OZONE_SCM_HEARTBEAT_RPC_TIMEOUT; +import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_DATANODE_DATA_DIR_KEY; import static org.junit.Assert.assertTrue; /** diff --git a/hadoop-hdsl/container-service/src/test/java/org/apache/hadoop/ozone/container/testutils/BlockDeletingServiceTestImpl.java b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/testutils/BlockDeletingServiceTestImpl.java index 61bd65f0d9..86888aa790 100644 --- a/hadoop-hdsl/container-service/src/test/java/org/apache/hadoop/ozone/container/testutils/BlockDeletingServiceTestImpl.java +++ b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/testutils/BlockDeletingServiceTestImpl.java @@ -20,11 +20,12 @@ import com.google.common.annotations.VisibleForTesting; import com.google.common.util.concurrent.ThreadFactoryBuilder; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.ozone.container.common.interfaces.ContainerManager; -import org.apache.hadoop.ozone.container.common.statemachine.background.BlockDeletingService; +import org.apache.hadoop.ozone.container.common.statemachine.background + .BlockDeletingService; import java.util.concurrent.CountDownLatch; -import java.util.concurrent.TimeUnit; import java.util.concurrent.Future; +import java.util.concurrent.TimeUnit; import java.util.concurrent.atomic.AtomicInteger; /** diff --git a/hadoop-hdsl/container-service/src/test/java/org/apache/hadoop/ozone/container/testutils/package-info.java b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/testutils/package-info.java index 4e8a90bf1d..4e8a90bf1d 100644 --- a/hadoop-hdsl/container-service/src/test/java/org/apache/hadoop/ozone/container/testutils/package-info.java +++ b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/testutils/package-info.java diff --git a/hadoop-hdsl/framework/README.md b/hadoop-hdds/framework/README.md index 1f4d217556..59cdac71f3 100644 --- a/hadoop-hdsl/framework/README.md +++ b/hadoop-hdds/framework/README.md @@ -15,9 +15,9 @@ limitations under the License. --> -# Server framework for HDSL/Ozone +# Server framework for HDDS/Ozone -This project contains generic utilities and resources for all the HDSL/Ozone +This project contains generic utilities and resources for all the HDDS/Ozone server-side components. The project is shared between the server/service projects but not with the diff --git a/hadoop-hdsl/framework/pom.xml b/hadoop-hdds/framework/pom.xml index 44ef7fedf6..a234b8e32d 100644 --- a/hadoop-hdsl/framework/pom.xml +++ b/hadoop-hdds/framework/pom.xml @@ -19,26 +19,24 @@ http://maven.apache.org/xsd/maven-4.0.0.xsd"> <modelVersion>4.0.0</modelVersion> <parent> <groupId>org.apache.hadoop</groupId> - <artifactId>hadoop-hdsl</artifactId> + <artifactId>hadoop-hdds</artifactId> <version>3.2.0-SNAPSHOT</version> </parent> - <artifactId>hadoop-hdsl-server-framework</artifactId> + <artifactId>hadoop-hdds-server-framework</artifactId> <version>3.2.0-SNAPSHOT</version> - <description>Apache Hadoop HDSL Common utilities for server side - components - </description> - <name>Apache Hadoop HDSL Server Common</name> + <description>Apache HDDS server framework</description> + <name>Apache HDDS Server Common</name> <packaging>jar</packaging> <properties> - <hadoop.component>hdsl</hadoop.component> + <hadoop.component>hdds</hadoop.component> <is.hadoop.component>true</is.hadoop.component> </properties> <dependencies> <dependency> <groupId>org.apache.hadoop</groupId> - <artifactId>hadoop-hdsl-common</artifactId> + <artifactId>hadoop-hdds-common</artifactId> <scope>provided</scope> </dependency> </dependencies> diff --git a/hadoop-hdsl/framework/src/main/java/org/apache/hadoop/hdsl/server/BaseHttpServer.java b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdsl/server/BaseHttpServer.java index f9c3991886..90de00273c 100644 --- a/hadoop-hdsl/framework/src/main/java/org/apache/hadoop/hdsl/server/BaseHttpServer.java +++ b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdsl/server/BaseHttpServer.java @@ -15,7 +15,7 @@ * the License. */ -package org.apache.hadoop.hdsl.server; +package org.apache.hadoop.hdds.server; import com.google.common.base.Optional; import org.apache.hadoop.conf.Configuration; @@ -32,8 +32,8 @@ import javax.servlet.http.HttpServlet; import java.io.IOException; import java.net.InetSocketAddress; -import static org.apache.hadoop.hdsl.HdslUtils.getHostNameFromConfigKeys; -import static org.apache.hadoop.hdsl.HdslUtils.getPortNumberFromConfigKeys; +import static org.apache.hadoop.hdds.HddsUtils.getHostNameFromConfigKeys; +import static org.apache.hadoop.hdds.HddsUtils.getPortNumberFromConfigKeys; /** * Base class for HTTP server of the Ozone related components. diff --git a/hadoop-hdsl/framework/src/main/java/org/apache/hadoop/hdsl/server/ServerUtils.java b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdsl/server/ServerUtils.java index f315ecb11e..2cf5b351a9 100644 --- a/hadoop-hdsl/framework/src/main/java/org/apache/hadoop/hdsl/server/ServerUtils.java +++ b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdsl/server/ServerUtils.java @@ -15,23 +15,22 @@ * the License. */ -package org.apache.hadoop.hdsl.server; - -import java.io.File; -import java.net.InetSocketAddress; +package org.apache.hadoop.hdds.server; +import com.google.common.base.Preconditions; import org.apache.hadoop.conf.Configuration; -import org.apache.hadoop.hdsl.conf.OzoneConfiguration; +import org.apache.hadoop.hdds.conf.OzoneConfiguration; import org.apache.hadoop.ipc.RPC; import org.apache.hadoop.ozone.OzoneConfigKeys; - -import com.google.common.base.Preconditions; import org.apache.http.client.methods.HttpRequestBase; import org.slf4j.Logger; import org.slf4j.LoggerFactory; +import java.io.File; +import java.net.InetSocketAddress; + /** - * Generic utilities for all HDSL/Ozone servers. + * Generic utilities for all HDDS/Ozone servers. */ public class ServerUtils { diff --git a/hadoop-hdsl/framework/src/main/java/org/apache/hadoop/hdsl/server/ServiceRuntimeInfo.java b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdsl/server/ServiceRuntimeInfo.java index f9c57ea72d..bcd75f3f21 100644 --- a/hadoop-hdsl/framework/src/main/java/org/apache/hadoop/hdsl/server/ServiceRuntimeInfo.java +++ b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdsl/server/ServiceRuntimeInfo.java @@ -15,7 +15,7 @@ * limitations under the License. */ -package org.apache.hadoop.hdsl.server; +package org.apache.hadoop.hdds.server; /** * Common runtime information for any service components. diff --git a/hadoop-hdsl/framework/src/main/java/org/apache/hadoop/hdsl/server/ServiceRuntimeInfoImpl.java b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdsl/server/ServiceRuntimeInfoImpl.java index 92f00b1c5b..36d6b64a68 100644 --- a/hadoop-hdsl/framework/src/main/java/org/apache/hadoop/hdsl/server/ServiceRuntimeInfoImpl.java +++ b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdsl/server/ServiceRuntimeInfoImpl.java @@ -15,7 +15,7 @@ * limitations under the License. */ -package org.apache.hadoop.hdsl.server; +package org.apache.hadoop.hdds.server; import org.apache.hadoop.util.VersionInfo; diff --git a/hadoop-hdsl/framework/src/main/java/org/apache/hadoop/hdsl/server/package-info.java b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdsl/server/package-info.java index b38215ccbf..35ad5e7f49 100644 --- a/hadoop-hdsl/framework/src/main/java/org/apache/hadoop/hdsl/server/package-info.java +++ b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdsl/server/package-info.java @@ -16,8 +16,8 @@ * limitations under the License. */ -package org.apache.hadoop.hdsl.server; +package org.apache.hadoop.hdds.server; /** - * Common server side utilities for all the hdsl/ozone server components. + * Common server side utilities for all the hdds/ozone server components. */
\ No newline at end of file diff --git a/hadoop-hdsl/framework/src/main/resources/webapps/datanode/dn.js b/hadoop-hdds/framework/src/main/resources/webapps/datanode/dn.js index 3b671671de..3b671671de 100644 --- a/hadoop-hdsl/framework/src/main/resources/webapps/datanode/dn.js +++ b/hadoop-hdds/framework/src/main/resources/webapps/datanode/dn.js diff --git a/hadoop-hdsl/framework/src/main/resources/webapps/static/angular-1.6.4.min.js b/hadoop-hdds/framework/src/main/resources/webapps/static/angular-1.6.4.min.js index c4bf158ece..c4bf158ece 100644 --- a/hadoop-hdsl/framework/src/main/resources/webapps/static/angular-1.6.4.min.js +++ b/hadoop-hdds/framework/src/main/resources/webapps/static/angular-1.6.4.min.js diff --git a/hadoop-hdsl/framework/src/main/resources/webapps/static/angular-nvd3-1.0.9.min.js b/hadoop-hdds/framework/src/main/resources/webapps/static/angular-nvd3-1.0.9.min.js index 4aced5761e..4aced5761e 100644 --- a/hadoop-hdsl/framework/src/main/resources/webapps/static/angular-nvd3-1.0.9.min.js +++ b/hadoop-hdds/framework/src/main/resources/webapps/static/angular-nvd3-1.0.9.min.js diff --git a/hadoop-hdsl/framework/src/main/resources/webapps/static/angular-route-1.6.4.min.js b/hadoop-hdds/framework/src/main/resources/webapps/static/angular-route-1.6.4.min.js index 3f985d1422..3f985d1422 100644 --- a/hadoop-hdsl/framework/src/main/resources/webapps/static/angular-route-1.6.4.min.js +++ b/hadoop-hdds/framework/src/main/resources/webapps/static/angular-route-1.6.4.min.js diff --git a/hadoop-hdsl/framework/src/main/resources/webapps/static/d3-3.5.17.min.js b/hadoop-hdds/framework/src/main/resources/webapps/static/d3-3.5.17.min.js index 166487309a..166487309a 100644 --- a/hadoop-hdsl/framework/src/main/resources/webapps/static/d3-3.5.17.min.js +++ b/hadoop-hdds/framework/src/main/resources/webapps/static/d3-3.5.17.min.js diff --git a/hadoop-hdsl/framework/src/main/resources/webapps/static/dfs-dust.js b/hadoop-hdds/framework/src/main/resources/webapps/static/dfs-dust.js index c7af6a18d3..c7af6a18d3 100644 --- a/hadoop-hdsl/framework/src/main/resources/webapps/static/dfs-dust.js +++ b/hadoop-hdds/framework/src/main/resources/webapps/static/dfs-dust.js diff --git a/hadoop-hdsl/framework/src/main/resources/webapps/static/nvd3-1.8.5.min.css b/hadoop-hdds/framework/src/main/resources/webapps/static/nvd3-1.8.5.min.css index b8a5c0f1a3..b8a5c0f1a3 100644 --- a/hadoop-hdsl/framework/src/main/resources/webapps/static/nvd3-1.8.5.min.css +++ b/hadoop-hdds/framework/src/main/resources/webapps/static/nvd3-1.8.5.min.css diff --git a/hadoop-hdsl/framework/src/main/resources/webapps/static/nvd3-1.8.5.min.css.map b/hadoop-hdds/framework/src/main/resources/webapps/static/nvd3-1.8.5.min.css.map index 63380e6e0d..63380e6e0d 100644 --- a/hadoop-hdsl/framework/src/main/resources/webapps/static/nvd3-1.8.5.min.css.map +++ b/hadoop-hdds/framework/src/main/resources/webapps/static/nvd3-1.8.5.min.css.map diff --git a/hadoop-hdsl/framework/src/main/resources/webapps/static/nvd3-1.8.5.min.js b/hadoop-hdds/framework/src/main/resources/webapps/static/nvd3-1.8.5.min.js index 9cfd702277..9cfd702277 100644 --- a/hadoop-hdsl/framework/src/main/resources/webapps/static/nvd3-1.8.5.min.js +++ b/hadoop-hdds/framework/src/main/resources/webapps/static/nvd3-1.8.5.min.js diff --git a/hadoop-hdsl/framework/src/main/resources/webapps/static/nvd3-1.8.5.min.js.map b/hadoop-hdds/framework/src/main/resources/webapps/static/nvd3-1.8.5.min.js.map index 594da5a35c..594da5a35c 100644 --- a/hadoop-hdsl/framework/src/main/resources/webapps/static/nvd3-1.8.5.min.js.map +++ b/hadoop-hdds/framework/src/main/resources/webapps/static/nvd3-1.8.5.min.js.map diff --git a/hadoop-hdsl/framework/src/main/resources/webapps/static/ozone.css b/hadoop-hdds/framework/src/main/resources/webapps/static/ozone.css index 271ac74446..271ac74446 100644 --- a/hadoop-hdsl/framework/src/main/resources/webapps/static/ozone.css +++ b/hadoop-hdds/framework/src/main/resources/webapps/static/ozone.css diff --git a/hadoop-hdsl/framework/src/main/resources/webapps/static/ozone.js b/hadoop-hdds/framework/src/main/resources/webapps/static/ozone.js index 37cafef4b4..37cafef4b4 100644 --- a/hadoop-hdsl/framework/src/main/resources/webapps/static/ozone.js +++ b/hadoop-hdds/framework/src/main/resources/webapps/static/ozone.js diff --git a/hadoop-hdsl/framework/src/main/resources/webapps/static/templates/config.html b/hadoop-hdds/framework/src/main/resources/webapps/static/templates/config.html index 7ca07134cb..7ca07134cb 100644 --- a/hadoop-hdsl/framework/src/main/resources/webapps/static/templates/config.html +++ b/hadoop-hdds/framework/src/main/resources/webapps/static/templates/config.html diff --git a/hadoop-hdsl/framework/src/main/resources/webapps/static/templates/jvm.html b/hadoop-hdds/framework/src/main/resources/webapps/static/templates/jvm.html index c1f7d16aef..c1f7d16aef 100644 --- a/hadoop-hdsl/framework/src/main/resources/webapps/static/templates/jvm.html +++ b/hadoop-hdds/framework/src/main/resources/webapps/static/templates/jvm.html diff --git a/hadoop-hdsl/framework/src/main/resources/webapps/static/templates/menu.html b/hadoop-hdds/framework/src/main/resources/webapps/static/templates/menu.html index 95f1b4842f..95f1b4842f 100644 --- a/hadoop-hdsl/framework/src/main/resources/webapps/static/templates/menu.html +++ b/hadoop-hdds/framework/src/main/resources/webapps/static/templates/menu.html diff --git a/hadoop-hdsl/framework/src/main/resources/webapps/static/templates/overview.html b/hadoop-hdds/framework/src/main/resources/webapps/static/templates/overview.html index 30e2d26f56..30e2d26f56 100644 --- a/hadoop-hdsl/framework/src/main/resources/webapps/static/templates/overview.html +++ b/hadoop-hdds/framework/src/main/resources/webapps/static/templates/overview.html diff --git a/hadoop-hdsl/framework/src/main/resources/webapps/static/templates/rpc-metrics.html b/hadoop-hdds/framework/src/main/resources/webapps/static/templates/rpc-metrics.html index facb152030..facb152030 100644 --- a/hadoop-hdsl/framework/src/main/resources/webapps/static/templates/rpc-metrics.html +++ b/hadoop-hdds/framework/src/main/resources/webapps/static/templates/rpc-metrics.html diff --git a/hadoop-hdsl/framework/src/test/java/org/apache/hadoop/hdsl/server/TestBaseHttpServer.java b/hadoop-hdds/framework/src/test/java/org/apache/hadoop/hdsl/server/TestBaseHttpServer.java index fcd5f7fbf7..c6eae0e5fa 100644 --- a/hadoop-hdsl/framework/src/test/java/org/apache/hadoop/hdsl/server/TestBaseHttpServer.java +++ b/hadoop-hdds/framework/src/test/java/org/apache/hadoop/hdsl/server/TestBaseHttpServer.java @@ -15,15 +15,14 @@ * See the License for the specific language governing permissions and * limitations under the License. */ -package org.apache.hadoop.hdsl.server; +package org.apache.hadoop.hdds.server; import org.apache.hadoop.conf.Configuration; - import org.junit.Assert; import org.junit.Test; /** - * Test Common ozone/hdsl web methods. + * Test Common ozone/hdds web methods. */ public class TestBaseHttpServer { @Test diff --git a/hadoop-hdsl/framework/src/test/resources/ozone-site.xml b/hadoop-hdds/framework/src/test/resources/ozone-site.xml index 77dd7ef994..77dd7ef994 100644 --- a/hadoop-hdsl/framework/src/test/resources/ozone-site.xml +++ b/hadoop-hdds/framework/src/test/resources/ozone-site.xml diff --git a/hadoop-hdsl/pom.xml b/hadoop-hdds/pom.xml index 0a40fbb262..01b696cec0 100644 --- a/hadoop-hdsl/pom.xml +++ b/hadoop-hdds/pom.xml @@ -23,10 +23,11 @@ http://maven.apache.org/xsd/maven-4.0.0.xsd"> <version>3.2.0-SNAPSHOT</version> <relativePath>../hadoop-project-dist</relativePath> </parent> - <artifactId>hadoop-hdsl</artifactId> + + <artifactId>hadoop-hdds</artifactId> <version>3.2.0-SNAPSHOT</version> - <description>Apache Hadoop Hdsl Parent project</description> - <name>Apache Hadoop Hdsl</name> + <description>Apache Hadoop Distributed Data Store Parent project</description> + <name>Apache Hdds</name> <packaging>pom</packaging> <modules> diff --git a/hadoop-hdsl/server-scm/pom.xml b/hadoop-hdds/server-scm/pom.xml index e7b8bf0e22..35975f4e49 100644 --- a/hadoop-hdsl/server-scm/pom.xml +++ b/hadoop-hdds/server-scm/pom.xml @@ -19,48 +19,48 @@ http://maven.apache.org/xsd/maven-4.0.0.xsd"> <modelVersion>4.0.0</modelVersion> <parent> <groupId>org.apache.hadoop</groupId> - <artifactId>hadoop-hdsl</artifactId> + <artifactId>hadoop-hdds</artifactId> <version>3.2.0-SNAPSHOT</version> </parent> - <artifactId>hadoop-hdsl-server-scm</artifactId> + <artifactId>hadoop-hdds-server-scm</artifactId> <version>3.2.0-SNAPSHOT</version> - <description>Apache Hadoop HDSL SCM server</description> - <name>Apache Hadoop HDSL SCM server</name> + <description>Apache HDDS SCM server</description> + <name>Apache Hadoop HDDS SCM server</name> <packaging>jar</packaging> <properties> - <hadoop.component>hdsl</hadoop.component> + <hadoop.component>hdds</hadoop.component> <is.hadoop.component>true</is.hadoop.component> </properties> <dependencies> <dependency> <groupId>org.apache.hadoop</groupId> - <artifactId>hadoop-hdsl-common</artifactId> + <artifactId>hadoop-hdds-common</artifactId> <scope>provided</scope> </dependency> <dependency> <groupId>org.apache.hadoop</groupId> - <artifactId>hadoop-hdsl-container-service</artifactId> + <artifactId>hadoop-hdds-container-service</artifactId> <scope>provided</scope> </dependency> <dependency> <groupId>org.apache.hadoop</groupId> - <artifactId>hadoop-hdsl-client</artifactId> + <artifactId>hadoop-hdds-client</artifactId> <scope>provided</scope> </dependency> <dependency> <groupId>org.apache.hadoop</groupId> - <artifactId>hadoop-hdsl-server-framework</artifactId> + <artifactId>hadoop-hdds-server-framework</artifactId> <scope>provided</scope> </dependency> <dependency> <groupId>org.apache.hadoop</groupId> - <artifactId>hadoop-hdsl-container-service</artifactId> + <artifactId>hadoop-hdds-container-service</artifactId> <scope>test</scope> <type>test-jar</type> </dependency> @@ -141,7 +141,7 @@ http://maven.apache.org/xsd/maven-4.0.0.xsd"> <artifactItems> <artifactItem> <groupId>org.apache.hadoop</groupId> - <artifactId>hadoop-hdsl-server-framework</artifactId> + <artifactId>hadoop-hdds-server-framework</artifactId> <outputDirectory>${project.build.directory}/</outputDirectory> <includes>webapps/static/**/*.*</includes> </artifactItem> diff --git a/hadoop-hdsl/server-scm/src/main/java/org/apache/hadoop/ozone/scm/SCMMXBean.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/SCMMXBean.java index 3b31d9f70c..17b681443b 100644 --- a/hadoop-hdsl/server-scm/src/main/java/org/apache/hadoop/ozone/scm/SCMMXBean.java +++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/SCMMXBean.java @@ -16,12 +16,12 @@ * limitations under the License. */ -package org.apache.hadoop.ozone.scm; - -import java.util.Map; +package org.apache.hadoop.hdds.scm; import org.apache.hadoop.classification.InterfaceAudience; -import org.apache.hadoop.hdsl.server.ServiceRuntimeInfo; +import org.apache.hadoop.hdds.server.ServiceRuntimeInfo; + +import java.util.Map; /** * diff --git a/hadoop-hdsl/server-scm/src/main/java/org/apache/hadoop/ozone/scm/SCMStorage.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/SCMStorage.java index 4a6d4c06a2..27e9363233 100644 --- a/hadoop-hdsl/server-scm/src/main/java/org/apache/hadoop/ozone/scm/SCMStorage.java +++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/SCMStorage.java @@ -15,19 +15,19 @@ * See the License for the specific language governing permissions and * limitations under the License. */ -package org.apache.hadoop.ozone.scm; +package org.apache.hadoop.hdds.scm; + +import org.apache.hadoop.hdds.conf.OzoneConfiguration; +import org.apache.hadoop.hdds.protocol.proto.HddsProtos.NodeType; +import org.apache.hadoop.ozone.common.Storage; import java.io.IOException; import java.util.Properties; import java.util.UUID; -import org.apache.hadoop.hdsl.conf.OzoneConfiguration; -import org.apache.hadoop.ozone.common.Storage; -import org.apache.hadoop.hdsl.protocol.proto.HdslProtos.NodeType; - +import static org.apache.hadoop.hdds.server.ServerUtils.getOzoneMetaDirPath; import static org.apache.hadoop.ozone.OzoneConsts.SCM_ID; import static org.apache.hadoop.ozone.OzoneConsts.STORAGE_DIR; -import static org.apache.hadoop.hdsl.server.ServerUtils.getOzoneMetaDirPath; /** * SCMStorage is responsible for management of the StorageDirectories used by diff --git a/hadoop-hdsl/server-scm/src/main/java/org/apache/hadoop/ozone/scm/StorageContainerManager.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/StorageContainerManager.java index ef925ca849..1a78deef41 100644 --- a/hadoop-hdsl/server-scm/src/main/java/org/apache/hadoop/ozone/scm/StorageContainerManager.java +++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/StorageContainerManager.java @@ -15,7 +15,7 @@ * the License. */ -package org.apache.hadoop.ozone.scm; +package org.apache.hadoop.hdds.scm; import com.google.common.annotations.VisibleForTesting; import com.google.common.base.Preconditions; @@ -26,18 +26,79 @@ import com.google.common.cache.RemovalNotification; import com.google.protobuf.BlockingService; import com.google.protobuf.InvalidProtocolBufferException; import org.apache.hadoop.classification.InterfaceAudience; -import org.apache.hadoop.hdsl.conf.OzoneConfiguration; +import org.apache.hadoop.hdds.HddsUtils; +import org.apache.hadoop.hdds.scm.block.BlockManager; +import org.apache.hadoop.hdds.scm.block.BlockManagerImpl; +import org.apache.hadoop.hdds.scm.container.ContainerMapping; +import org.apache.hadoop.hdds.scm.container.Mapping; +import org.apache.hadoop.hdds.scm.container.common.helpers.AllocatedBlock; +import org.apache.hadoop.hdds.scm.container.common.helpers.ContainerInfo; +import org.apache.hadoop.hdds.scm.container.common.helpers.DeleteBlockResult; +import org.apache.hadoop.hdds.scm.container.common.helpers.Pipeline; +import org.apache.hadoop.hdds.scm.container.placement.metrics.ContainerStat; +import org.apache.hadoop.hdds.scm.container.placement.metrics.SCMMetrics; +import org.apache.hadoop.hdds.scm.exceptions.SCMException; +import org.apache.hadoop.hdds.scm.exceptions.SCMException.ResultCodes; +import org.apache.hadoop.hdds.scm.node.NodeManager; +import org.apache.hadoop.hdds.scm.node.SCMNodeManager; +import org.apache.hadoop.hdds.scm.protocol.ScmBlockLocationProtocol; +import org.apache.hadoop.hdds.scm.protocol.StorageContainerLocationProtocol; +import org.apache.hadoop.hdds.scm.protocolPB.ScmBlockLocationProtocolPB; +import org.apache.hadoop.hdds.scm.protocolPB.StorageContainerLocationProtocolPB; import org.apache.hadoop.hdfs.DFSUtil; -import org.apache.hadoop.hdsl.HdslUtils; -import org.apache.hadoop.hdsl.protocol.DatanodeDetails; +import org.apache.hadoop.hdds.conf.OzoneConfiguration; +import org.apache.hadoop.hdds.protocol.DatanodeDetails; +import org.apache.hadoop.hdds.protocol.proto.HddsProtos; +import org.apache.hadoop.hdds.protocol.proto.HddsProtos.DatanodeDetailsProto; +import org.apache.hadoop.hdds.protocol.proto.HddsProtos.NodeState; +import org.apache.hadoop.hdds.protocol.proto.ScmBlockLocationProtocolProtos; +import org.apache.hadoop.hdds.protocol.proto + .StorageContainerDatanodeProtocolProtos; +import org.apache.hadoop.hdds.protocol.proto + .StorageContainerDatanodeProtocolProtos.ContainerBlocksDeletionACKProto; +import org.apache.hadoop.hdds.protocol.proto + .StorageContainerDatanodeProtocolProtos.ContainerBlocksDeletionACKProto + .DeleteBlockTransactionResult; +import org.apache.hadoop.hdds.protocol.proto + .StorageContainerDatanodeProtocolProtos + .ContainerBlocksDeletionACKResponseProto; +import org.apache.hadoop.hdds.protocol.proto + .StorageContainerDatanodeProtocolProtos.ContainerReportsRequestProto; +import org.apache.hadoop.hdds.protocol.proto + .StorageContainerDatanodeProtocolProtos.ContainerReportsResponseProto; +import org.apache.hadoop.hdds.protocol.proto + .StorageContainerDatanodeProtocolProtos.ReportState; +import org.apache.hadoop.hdds.protocol.proto + .StorageContainerDatanodeProtocolProtos.SCMCmdType; +import org.apache.hadoop.hdds.protocol.proto + .StorageContainerDatanodeProtocolProtos.SCMCommandResponseProto; +import org.apache.hadoop.hdds.protocol.proto + .StorageContainerDatanodeProtocolProtos.SCMHeartbeatResponseProto; +import org.apache.hadoop.hdds.protocol.proto + .StorageContainerDatanodeProtocolProtos.SCMNodeAddressList; +import org.apache.hadoop.hdds.protocol.proto + .StorageContainerDatanodeProtocolProtos.SCMNodeReport; +import org.apache.hadoop.hdds.protocol.proto + .StorageContainerDatanodeProtocolProtos.SCMRegisteredCmdResponseProto; +import org.apache.hadoop.hdds.protocol.proto + .StorageContainerDatanodeProtocolProtos.SCMReregisterCmdResponseProto; +import org.apache.hadoop.hdds.protocol.proto + .StorageContainerDatanodeProtocolProtos.SCMVersionRequestProto; +import org.apache.hadoop.hdds.protocol.proto + .StorageContainerDatanodeProtocolProtos.SCMVersionResponseProto; +import org.apache.hadoop.hdds.protocol.proto + .StorageContainerDatanodeProtocolProtos.SendContainerReportProto; +import org.apache.hadoop.hdds.protocol.proto + .StorageContainerLocationProtocolProtos; +import org.apache.hadoop.hdds.protocol.proto + .StorageContainerLocationProtocolProtos.ObjectStageChangeRequestProto; +import org.apache.hadoop.hdds.server.ServiceRuntimeInfoImpl; import org.apache.hadoop.io.IOUtils; import org.apache.hadoop.ipc.ProtobufRpcEngine; import org.apache.hadoop.ipc.RPC; -import org.apache.hadoop.hdsl.server.ServiceRuntimeInfoImpl; import org.apache.hadoop.metrics2.lib.DefaultMetricsSystem; import org.apache.hadoop.metrics2.util.MBeans; import org.apache.hadoop.ozone.OzoneConfigKeys; -import static org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_ENABLED; import org.apache.hadoop.ozone.common.BlockGroup; import org.apache.hadoop.ozone.common.DeleteBlockGroupResult; import org.apache.hadoop.ozone.common.Storage.StorageState; @@ -47,64 +108,16 @@ import org.apache.hadoop.ozone.protocol.commands.CloseContainerCommand; import org.apache.hadoop.ozone.protocol.commands.DeleteBlocksCommand; import org.apache.hadoop.ozone.protocol.commands.RegisteredCommand; import org.apache.hadoop.ozone.protocol.commands.SCMCommand; -import org.apache.hadoop.hdsl.protocol.proto.HdslProtos; -import org.apache.hadoop.hdsl.protocol.proto.HdslProtos.DatanodeDetailsProto; -import org.apache.hadoop.hdsl.protocol.proto.HdslProtos.NodeState; -import org.apache.hadoop.hdsl.protocol.proto.ScmBlockLocationProtocolProtos; -import org.apache.hadoop.hdsl.protocol.proto.StorageContainerDatanodeProtocolProtos; -import org.apache.hadoop.hdsl.protocol.proto.StorageContainerDatanodeProtocolProtos.ContainerReportsRequestProto; -import org.apache.hadoop.hdsl.protocol.proto.StorageContainerDatanodeProtocolProtos.ContainerReportsResponseProto; -import org.apache.hadoop.hdsl.protocol.proto.StorageContainerDatanodeProtocolProtos.ReportState; -import org.apache.hadoop.hdsl.protocol.proto.StorageContainerDatanodeProtocolProtos.SCMCommandResponseProto; -import org.apache.hadoop.hdsl.protocol.proto.StorageContainerDatanodeProtocolProtos.SCMHeartbeatResponseProto; -import org.apache.hadoop.hdsl.protocol.proto.StorageContainerDatanodeProtocolProtos.SCMNodeAddressList; -import org.apache.hadoop.hdsl.protocol.proto.StorageContainerDatanodeProtocolProtos.SCMNodeReport; -import org.apache.hadoop.hdsl.protocol.proto.StorageContainerDatanodeProtocolProtos.SCMRegisteredCmdResponseProto; -import org.apache.hadoop.hdsl.protocol.proto.StorageContainerDatanodeProtocolProtos.SCMReregisterCmdResponseProto; -import org.apache.hadoop.hdsl.protocol.proto.StorageContainerDatanodeProtocolProtos.SCMVersionRequestProto; -import org.apache.hadoop.hdsl.protocol.proto.StorageContainerDatanodeProtocolProtos.SCMVersionResponseProto; -import org.apache.hadoop.hdsl.protocol.proto.StorageContainerDatanodeProtocolProtos.SendContainerReportProto; -import org.apache.hadoop.hdsl.protocol.proto.StorageContainerDatanodeProtocolProtos.SCMCmdType; -import org.apache.hadoop.hdsl.protocol.proto.StorageContainerDatanodeProtocolProtos.ContainerBlocksDeletionACKProto.DeleteBlockTransactionResult; -import org.apache.hadoop.hdsl.protocol.proto.StorageContainerDatanodeProtocolProtos.ContainerBlocksDeletionACKProto; -import org.apache.hadoop.hdsl.protocol.proto.StorageContainerDatanodeProtocolProtos.ContainerBlocksDeletionACKResponseProto; -import org.apache.hadoop.hdsl.protocol.proto.StorageContainerLocationProtocolProtos; -import org.apache.hadoop.hdsl.protocol.proto.StorageContainerLocationProtocolProtos.ObjectStageChangeRequestProto; -import org.apache.hadoop.ozone.protocolPB.ScmBlockLocationProtocolServerSideTranslatorPB; +import org.apache.hadoop.ozone.protocolPB + .ScmBlockLocationProtocolServerSideTranslatorPB; import org.apache.hadoop.ozone.protocolPB.StorageContainerDatanodeProtocolPB; -import org.apache.hadoop.ozone.protocolPB.StorageContainerDatanodeProtocolServerSideTranslatorPB; -import org.apache.hadoop.ozone.protocolPB.StorageContainerLocationProtocolServerSideTranslatorPB; -import org.apache.hadoop.ozone.scm.block.BlockManager; -import org.apache.hadoop.ozone.scm.block.BlockManagerImpl; -import org.apache.hadoop.ozone.scm.container.ContainerMapping; -import org.apache.hadoop.ozone.scm.container.Mapping; -import org.apache.hadoop.ozone.scm.container.placement.metrics.ContainerStat; -import org.apache.hadoop.ozone.scm.container.placement.metrics.SCMMetrics; -import org.apache.hadoop.ozone.scm.exceptions.SCMException; -import org.apache.hadoop.scm.ScmInfo; -import org.apache.hadoop.ozone.scm.node.NodeManager; -import org.apache.hadoop.ozone.scm.node.SCMNodeManager; -import org.apache.hadoop.scm.container.common.helpers.AllocatedBlock; -import org.apache.hadoop.scm.container.common.helpers.ContainerInfo; -import org.apache.hadoop.scm.container.common.helpers.DeleteBlockResult; -import org.apache.hadoop.scm.container.common.helpers.Pipeline; -import org.apache.hadoop.scm.protocol.ScmBlockLocationProtocol; -import org.apache.hadoop.scm.protocol.StorageContainerLocationProtocol; -import org.apache.hadoop.scm.protocolPB.ScmBlockLocationProtocolPB; -import org.apache.hadoop.scm.protocolPB.StorageContainerLocationProtocolPB; -import org.apache.hadoop.ozone.scm.exceptions.SCMException.ResultCodes; +import org.apache.hadoop.ozone.protocolPB + .StorageContainerDatanodeProtocolServerSideTranslatorPB; +import org.apache.hadoop.ozone.protocolPB + .StorageContainerLocationProtocolServerSideTranslatorPB; import org.apache.hadoop.security.UserGroupInformation; import org.apache.hadoop.util.GenericOptionsParser; import org.apache.hadoop.util.StringUtils; - -import static org.apache.hadoop.ozone.scm.HdslServerUtil - .getScmBlockClientBindAddress; -import static org.apache.hadoop.ozone.scm.HdslServerUtil - .getScmClientBindAddress; -import static org.apache.hadoop.ozone.scm.HdslServerUtil - .getScmDataNodeBindAddress; -import static org.apache.hadoop.hdsl.server.ServerUtils - .updateRPCListenAddress; import org.slf4j.Logger; import org.slf4j.LoggerFactory; @@ -127,19 +140,24 @@ import java.util.concurrent.ConcurrentMap; import java.util.concurrent.TimeUnit; import java.util.stream.Collectors; -import static org.apache.hadoop.hdsl.protocol.proto - .ScmBlockLocationProtocolProtos.DeleteScmBlockResult.Result; -import static org.apache.hadoop.scm.ScmConfigKeys +import static org.apache.hadoop.hdds.scm.ScmConfigKeys .OZONE_SCM_BLOCK_CLIENT_ADDRESS_KEY; -import static org.apache.hadoop.scm.ScmConfigKeys.OZONE_SCM_CLIENT_ADDRESS_KEY; -import static org.apache.hadoop.scm.ScmConfigKeys +import static org.apache.hadoop.hdds.scm.ScmConfigKeys + .OZONE_SCM_CLIENT_ADDRESS_KEY; +import static org.apache.hadoop.hdds.scm.ScmConfigKeys .OZONE_SCM_DATANODE_ADDRESS_KEY; -import static org.apache.hadoop.scm.ScmConfigKeys +import static org.apache.hadoop.hdds.scm.ScmConfigKeys .OZONE_SCM_DB_CACHE_SIZE_DEFAULT; -import static org.apache.hadoop.scm.ScmConfigKeys.OZONE_SCM_DB_CACHE_SIZE_MB; -import static org.apache.hadoop.scm.ScmConfigKeys +import static org.apache.hadoop.hdds.scm.ScmConfigKeys + .OZONE_SCM_DB_CACHE_SIZE_MB; +import static org.apache.hadoop.hdds.scm.ScmConfigKeys .OZONE_SCM_HANDLER_COUNT_DEFAULT; -import static org.apache.hadoop.scm.ScmConfigKeys.OZONE_SCM_HANDLER_COUNT_KEY; +import static org.apache.hadoop.hdds.scm.ScmConfigKeys + .OZONE_SCM_HANDLER_COUNT_KEY; +import static org.apache.hadoop.hdds.protocol.proto + .ScmBlockLocationProtocolProtos.DeleteScmBlockResult.Result; +import static org.apache.hadoop.hdds.server.ServerUtils.updateRPCListenAddress; +import static org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_ENABLED; import static org.apache.hadoop.util.ExitUtil.terminate; /** @@ -281,7 +299,7 @@ public class StorageContainerManager extends ServiceRuntimeInfoImpl new StorageContainerDatanodeProtocolServerSideTranslatorPB(this)); final InetSocketAddress datanodeRpcAddr = - getScmDataNodeBindAddress(conf); + HddsServerUtil.getScmDataNodeBindAddress(conf); datanodeRpcServer = startRpcServer(conf, datanodeRpcAddr, StorageContainerDatanodeProtocolPB.class, dnProtoPbService, handlerCount); @@ -296,7 +314,7 @@ public class StorageContainerManager extends ServiceRuntimeInfoImpl new StorageContainerLocationProtocolServerSideTranslatorPB(this)); final InetSocketAddress scmAddress = - getScmClientBindAddress(conf); + HddsServerUtil.getScmClientBindAddress(conf); clientRpcServer = startRpcServer(conf, scmAddress, StorageContainerLocationProtocolPB.class, storageProtoPbService, handlerCount); @@ -311,7 +329,7 @@ public class StorageContainerManager extends ServiceRuntimeInfoImpl new ScmBlockLocationProtocolServerSideTranslatorPB(this)); final InetSocketAddress scmBlockAddress = - getScmBlockClientBindAddress(conf); + HddsServerUtil.getScmBlockClientBindAddress(conf); blockRpcServer = startRpcServer(conf, scmBlockAddress, ScmBlockLocationProtocolPB.class, blockProtoPbService, handlerCount); @@ -447,7 +465,7 @@ public class StorageContainerManager extends ServiceRuntimeInfoImpl public static StorageContainerManager createSCM(String[] argv, OzoneConfiguration conf) throws IOException { - if (!HdslUtils.isHdslEnabled(conf)) { + if (!HddsUtils.isHddsEnabled(conf)) { System.err.println("SCM cannot be started in secure mode or when " + OZONE_ENABLED + " is set to false"); System.exit(1); @@ -659,19 +677,19 @@ public class StorageContainerManager extends ServiceRuntimeInfoImpl * @param poolName @return List of Datanodes. */ @Override - public HdslProtos.NodePool queryNode(EnumSet<NodeState> nodeStatuses, - HdslProtos.QueryScope queryScope, String poolName) throws IOException { + public HddsProtos.NodePool queryNode(EnumSet<NodeState> nodeStatuses, + HddsProtos.QueryScope queryScope, String poolName) throws IOException { - if (queryScope == HdslProtos.QueryScope.POOL) { + if (queryScope == HddsProtos.QueryScope.POOL) { throw new IllegalArgumentException("Not Supported yet"); } List<DatanodeDetails> datanodes = queryNode(nodeStatuses); - HdslProtos.NodePool.Builder poolBuilder = - HdslProtos.NodePool.newBuilder(); + HddsProtos.NodePool.Builder poolBuilder = + HddsProtos.NodePool.newBuilder(); for (DatanodeDetails datanode : datanodes) { - HdslProtos.Node node = HdslProtos.Node.newBuilder() + HddsProtos.Node node = HddsProtos.Node.newBuilder() .setNodeID(datanode.getProtoBufMessage()) .addAllNodeStates(nodeStatuses) .build(); @@ -701,18 +719,18 @@ public class StorageContainerManager extends ServiceRuntimeInfoImpl if (op == ObjectStageChangeRequestProto.Op.create) { if (stage == ObjectStageChangeRequestProto.Stage.begin) { scmContainerManager.updateContainerState(name, - HdslProtos.LifeCycleEvent.CREATE); + HddsProtos.LifeCycleEvent.CREATE); } else { scmContainerManager.updateContainerState(name, - HdslProtos.LifeCycleEvent.CREATED); + HddsProtos.LifeCycleEvent.CREATED); } } else if (op == ObjectStageChangeRequestProto.Op.close) { if (stage == ObjectStageChangeRequestProto.Stage.begin) { scmContainerManager.updateContainerState(name, - HdslProtos.LifeCycleEvent.FINALIZE); + HddsProtos.LifeCycleEvent.FINALIZE); } else { scmContainerManager.updateContainerState(name, - HdslProtos.LifeCycleEvent.CLOSE); + HddsProtos.LifeCycleEvent.CLOSE); } } } //else if (type == ObjectStageChangeRequestProto.Type.pipeline) { @@ -725,9 +743,9 @@ public class StorageContainerManager extends ServiceRuntimeInfoImpl */ @Override public Pipeline createReplicationPipeline( - HdslProtos.ReplicationType replicationType, - HdslProtos.ReplicationFactor factor, - HdslProtos.NodePool nodePool) + HddsProtos.ReplicationType replicationType, + HddsProtos.ReplicationFactor factor, + HddsProtos.NodePool nodePool) throws IOException { // TODO: will be addressed in future patch. return null; @@ -803,8 +821,8 @@ public class StorageContainerManager extends ServiceRuntimeInfoImpl * @throws IOException */ @Override - public Pipeline allocateContainer(HdslProtos.ReplicationType replicationType, - HdslProtos.ReplicationFactor replicationFactor, String containerName, + public Pipeline allocateContainer(HddsProtos.ReplicationType replicationType, + HddsProtos.ReplicationFactor replicationFactor, String containerName, String owner) throws IOException { checkAdminAccess(); @@ -1129,7 +1147,7 @@ public class StorageContainerManager extends ServiceRuntimeInfoImpl */ @Override public AllocatedBlock allocateBlock(long size, - HdslProtos.ReplicationType type, HdslProtos.ReplicationFactor factor, + HddsProtos.ReplicationType type, HddsProtos.ReplicationFactor factor, String owner) throws IOException { return scmBlockManager.allocateBlock(size, type, factor, owner); } diff --git a/hadoop-hdsl/server-scm/src/main/java/org/apache/hadoop/ozone/scm/StorageContainerManagerHttpServer.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/StorageContainerManagerHttpServer.java index 0dcf364f3d..1ca059ca4c 100644 --- a/hadoop-hdsl/server-scm/src/main/java/org/apache/hadoop/ozone/scm/StorageContainerManagerHttpServer.java +++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/StorageContainerManagerHttpServer.java @@ -15,12 +15,11 @@ * the License. */ -package org.apache.hadoop.ozone.scm; +package org.apache.hadoop.hdds.scm; import org.apache.hadoop.conf.Configuration; -import org.apache.hadoop.hdsl.server.BaseHttpServer; +import org.apache.hadoop.hdds.server.BaseHttpServer; import org.apache.hadoop.ozone.OzoneConfigKeys; -import org.apache.hadoop.scm.ScmConfigKeys; import java.io.IOException; diff --git a/hadoop-hdsl/server-scm/src/main/java/org/apache/hadoop/ozone/scm/block/BlockManager.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/block/BlockManager.java index 6d964f0f99..4ab251641c 100644 --- a/hadoop-hdsl/server-scm/src/main/java/org/apache/hadoop/ozone/scm/block/BlockManager.java +++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/block/BlockManager.java @@ -15,11 +15,11 @@ * the License. */ -package org.apache.hadoop.ozone.scm.block; +package org.apache.hadoop.hdds.scm.block; -import org.apache.hadoop.hdsl.protocol.proto.HdslProtos; -import org.apache.hadoop.scm.container.common.helpers.AllocatedBlock; -import org.apache.hadoop.scm.container.common.helpers.Pipeline; +import org.apache.hadoop.hdds.scm.container.common.helpers.AllocatedBlock; +import org.apache.hadoop.hdds.scm.container.common.helpers.Pipeline; +import org.apache.hadoop.hdds.protocol.proto.HddsProtos; import java.io.Closeable; import java.io.IOException; @@ -39,8 +39,8 @@ public interface BlockManager extends Closeable { * @return AllocatedBlock * @throws IOException */ - AllocatedBlock allocateBlock(long size, HdslProtos.ReplicationType type, - HdslProtos.ReplicationFactor factor, String owner) throws IOException; + AllocatedBlock allocateBlock(long size, HddsProtos.ReplicationType type, + HddsProtos.ReplicationFactor factor, String owner) throws IOException; /** * Give the key to the block, get the pipeline info. diff --git a/hadoop-hdsl/server-scm/src/main/java/org/apache/hadoop/ozone/scm/block/BlockManagerImpl.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/block/BlockManagerImpl.java index 36f73cc615..d9661124f5 100644 --- a/hadoop-hdsl/server-scm/src/main/java/org/apache/hadoop/ozone/scm/block/BlockManagerImpl.java +++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/block/BlockManagerImpl.java @@ -14,29 +14,27 @@ * License for the specific language governing permissions and limitations under * the License. */ -package org.apache.hadoop.ozone.scm.block; +package org.apache.hadoop.hdds.scm.block; import com.google.common.annotations.VisibleForTesting; import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.hdds.scm.ScmConfigKeys; +import org.apache.hadoop.hdds.scm.container.Mapping; +import org.apache.hadoop.hdds.scm.container.common.helpers.AllocatedBlock; +import org.apache.hadoop.hdds.scm.container.common.helpers.ContainerInfo; +import org.apache.hadoop.hdds.scm.container.common.helpers.Pipeline; +import org.apache.hadoop.hdds.scm.exceptions.SCMException; +import org.apache.hadoop.hdds.scm.node.NodeManager; import org.apache.hadoop.hdfs.DFSUtil; +import org.apache.hadoop.hdds.protocol.proto.HddsProtos; +import org.apache.hadoop.hdds.protocol.proto.HddsProtos.ReplicationFactor; +import org.apache.hadoop.hdds.protocol.proto.HddsProtos.ReplicationType; import org.apache.hadoop.metrics2.util.MBeans; import org.apache.hadoop.ozone.OzoneConsts; -import org.apache.hadoop.hdsl.protocol.proto.HdslProtos; -import org.apache.hadoop.hdsl.protocol.proto.HdslProtos.ReplicationFactor; -import org.apache.hadoop.hdsl.protocol.proto.HdslProtos.ReplicationType; -import org.apache.hadoop.ozone.scm.container.Mapping; -import org.apache.hadoop.ozone.scm.exceptions.SCMException; -import org.apache.hadoop.ozone.scm.node.NodeManager; -import org.apache.hadoop.scm.ScmConfigKeys; -import org.apache.hadoop.scm.container.common.helpers.AllocatedBlock; -import org.apache.hadoop.scm.container.common.helpers.ContainerInfo; -import org.apache.hadoop.scm.container.common.helpers.Pipeline; import org.apache.hadoop.util.StringUtils; import org.apache.hadoop.utils.BatchOperation; import org.apache.hadoop.utils.MetadataStore; import org.apache.hadoop.utils.MetadataStoreBuilder; - -import static org.apache.hadoop.hdsl.server.ServerUtils.getOzoneMetaDirPath; import org.slf4j.Logger; import org.slf4j.LoggerFactory; @@ -53,6 +51,13 @@ import java.util.concurrent.TimeUnit; import java.util.concurrent.locks.Lock; import java.util.concurrent.locks.ReentrantLock; +import static org.apache.hadoop.hdds.scm.exceptions.SCMException.ResultCodes + .CHILL_MODE_EXCEPTION; +import static org.apache.hadoop.hdds.scm.exceptions.SCMException.ResultCodes + .FAILED_TO_FIND_BLOCK; +import static org.apache.hadoop.hdds.scm.exceptions.SCMException.ResultCodes + .INVALID_BLOCK_SIZE; +import static org.apache.hadoop.hdds.server.ServerUtils.getOzoneMetaDirPath; import static org.apache.hadoop.ozone.OzoneConfigKeys .OZONE_BLOCK_DELETING_SERVICE_INTERVAL; import static org.apache.hadoop.ozone.OzoneConfigKeys @@ -62,12 +67,6 @@ import static org.apache.hadoop.ozone.OzoneConfigKeys import static org.apache.hadoop.ozone.OzoneConfigKeys .OZONE_BLOCK_DELETING_SERVICE_TIMEOUT_DEFAULT; import static org.apache.hadoop.ozone.OzoneConsts.BLOCK_DB; -import static org.apache.hadoop.ozone.scm.exceptions.SCMException.ResultCodes - .CHILL_MODE_EXCEPTION; -import static org.apache.hadoop.ozone.scm.exceptions.SCMException.ResultCodes - .FAILED_TO_FIND_BLOCK; -import static org.apache.hadoop.ozone.scm.exceptions.SCMException.ResultCodes - .INVALID_BLOCK_SIZE; /** Block Manager manages the block access for SCM. */ public class BlockManagerImpl implements BlockManager, BlockmanagerMXBean { @@ -265,12 +264,12 @@ public class BlockManagerImpl implements BlockManager, BlockmanagerMXBean { containerManager .getStateManager() .getMatchingContainer( - size, owner, type, factor, HdslProtos.LifeCycleState + size, owner, type, factor, HddsProtos.LifeCycleState .ALLOCATED); if (containerInfo != null) { containerManager.updateContainerState(containerInfo.getContainerName(), - HdslProtos.LifeCycleEvent.CREATE); - return newBlock(containerInfo, HdslProtos.LifeCycleState.ALLOCATED); + HddsProtos.LifeCycleEvent.CREATE); + return newBlock(containerInfo, HddsProtos.LifeCycleState.ALLOCATED); } // Since we found no allocated containers that match our criteria, let us @@ -278,10 +277,10 @@ public class BlockManagerImpl implements BlockManager, BlockmanagerMXBean { containerInfo = containerManager .getStateManager() - .getMatchingContainer(size, owner, type, factor, HdslProtos + .getMatchingContainer(size, owner, type, factor, HddsProtos .LifeCycleState.OPEN); if (containerInfo != null) { - return newBlock(containerInfo, HdslProtos.LifeCycleState.OPEN); + return newBlock(containerInfo, HddsProtos.LifeCycleState.OPEN); } // We found neither ALLOCATED or OPEN Containers. This generally means @@ -295,12 +294,12 @@ public class BlockManagerImpl implements BlockManager, BlockmanagerMXBean { containerManager .getStateManager() .getMatchingContainer( - size, owner, type, factor, HdslProtos.LifeCycleState + size, owner, type, factor, HddsProtos.LifeCycleState .ALLOCATED); if (containerInfo != null) { containerManager.updateContainerState(containerInfo.getContainerName(), - HdslProtos.LifeCycleEvent.CREATE); - return newBlock(containerInfo, HdslProtos.LifeCycleState.ALLOCATED); + HddsProtos.LifeCycleEvent.CREATE); + return newBlock(containerInfo, HddsProtos.LifeCycleState.ALLOCATED); } // we have tried all strategies we know and but somehow we are not able @@ -325,12 +324,12 @@ public class BlockManagerImpl implements BlockManager, BlockmanagerMXBean { * @return AllocatedBlock */ private AllocatedBlock newBlock( - ContainerInfo containerInfo, HdslProtos.LifeCycleState state) + ContainerInfo containerInfo, HddsProtos.LifeCycleState state) throws IOException { // TODO : Replace this with Block ID. String blockKey = UUID.randomUUID().toString(); - boolean createContainer = (state == HdslProtos.LifeCycleState.ALLOCATED); + boolean createContainer = (state == HddsProtos.LifeCycleState.ALLOCATED); AllocatedBlock.Builder abb = new AllocatedBlock.Builder() @@ -521,7 +520,7 @@ public class BlockManagerImpl implements BlockManager, BlockmanagerMXBean { // sense. // We have to get open containers by Replication Type and Replication // factor. Hence returning 0 for now. - // containers.get(HdslProtos.LifeCycleState.OPEN).size(); + // containers.get(HddsProtos.LifeCycleState.OPEN).size(); } @Override diff --git a/hadoop-hdsl/server-scm/src/main/java/org/apache/hadoop/ozone/scm/block/BlockmanagerMXBean.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/block/BlockmanagerMXBean.java index efcfc63734..23c6983083 100644 --- a/hadoop-hdsl/server-scm/src/main/java/org/apache/hadoop/ozone/scm/block/BlockmanagerMXBean.java +++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/block/BlockmanagerMXBean.java @@ -15,7 +15,7 @@ * the License. */ -package org.apache.hadoop.ozone.scm.block; +package org.apache.hadoop.hdds.scm.block; /** diff --git a/hadoop-hdsl/server-scm/src/main/java/org/apache/hadoop/ozone/scm/block/DatanodeDeletedBlockTransactions.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/block/DatanodeDeletedBlockTransactions.java index 9a208d5824..47074d28ec 100644 --- a/hadoop-hdsl/server-scm/src/main/java/org/apache/hadoop/ozone/scm/block/DatanodeDeletedBlockTransactions.java +++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/block/DatanodeDeletedBlockTransactions.java @@ -14,7 +14,14 @@ * License for the specific language governing permissions and limitations under * the License. */ -package org.apache.hadoop.ozone.scm.block; +package org.apache.hadoop.hdds.scm.block; + +import com.google.common.collect.ArrayListMultimap; +import org.apache.hadoop.hdds.scm.container.Mapping; +import org.apache.hadoop.hdds.scm.container.common.helpers.ContainerInfo; +import org.apache.hadoop.hdds.protocol.DatanodeDetails; +import org.apache.hadoop.hdds.protocol.proto + .StorageContainerDatanodeProtocolProtos.DeletedBlocksTransaction; import java.io.IOException; import java.util.Collections; @@ -23,13 +30,6 @@ import java.util.Set; import java.util.UUID; import java.util.stream.Collectors; -import org.apache.hadoop.hdsl.protocol.DatanodeDetails; -import org.apache.hadoop.hdsl.protocol.proto.StorageContainerDatanodeProtocolProtos.DeletedBlocksTransaction; -import org.apache.hadoop.ozone.scm.container.Mapping; -import org.apache.hadoop.scm.container.common.helpers.ContainerInfo; - -import com.google.common.collect.ArrayListMultimap; - /** * A wrapper class to hold info about datanode and all deleted block * transactions that will be sent to this datanode. diff --git a/hadoop-hdsl/server-scm/src/main/java/org/apache/hadoop/ozone/scm/block/DeletedBlockLog.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/block/DeletedBlockLog.java index a34d9605b8..f7b770eca0 100644 --- a/hadoop-hdsl/server-scm/src/main/java/org/apache/hadoop/ozone/scm/block/DeletedBlockLog.java +++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/block/DeletedBlockLog.java @@ -15,9 +15,9 @@ * See the License for the specific language governing permissions and * limitations under the License. */ -package org.apache.hadoop.ozone.scm.block; +package org.apache.hadoop.hdds.scm.block; -import org.apache.hadoop.hdsl.protocol.proto +import org.apache.hadoop.hdds.protocol.proto .StorageContainerDatanodeProtocolProtos.DeletedBlocksTransaction; import java.io.Closeable; diff --git a/hadoop-hdsl/server-scm/src/main/java/org/apache/hadoop/ozone/scm/block/DeletedBlockLogImpl.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/block/DeletedBlockLogImpl.java index 77c59b04c4..0f4988afa3 100644 --- a/hadoop-hdsl/server-scm/src/main/java/org/apache/hadoop/ozone/scm/block/DeletedBlockLogImpl.java +++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/block/DeletedBlockLogImpl.java @@ -15,21 +15,20 @@ * See the License for the specific language governing permissions and * limitations under the License. */ -package org.apache.hadoop.ozone.scm.block; +package org.apache.hadoop.hdds.scm.block; import com.google.common.annotations.VisibleForTesting; import com.google.common.collect.Lists; import com.google.common.primitives.Longs; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hdfs.DFSUtil; +import org.apache.hadoop.hdds.protocol.proto + .StorageContainerDatanodeProtocolProtos.DeletedBlocksTransaction; import org.apache.hadoop.ozone.OzoneConsts; -import org.apache.hadoop.hdsl.protocol.proto.StorageContainerDatanodeProtocolProtos.DeletedBlocksTransaction; import org.apache.hadoop.utils.BatchOperation; import org.apache.hadoop.utils.MetadataKeyFilters.MetadataKeyFilter; import org.apache.hadoop.utils.MetadataStore; import org.apache.hadoop.utils.MetadataStoreBuilder; - -import static org.apache.hadoop.hdsl.server.ServerUtils.getOzoneMetaDirPath; import org.slf4j.Logger; import org.slf4j.LoggerFactory; @@ -43,11 +42,16 @@ import java.util.concurrent.atomic.AtomicInteger; import java.util.concurrent.locks.Lock; import java.util.concurrent.locks.ReentrantLock; +import static org.apache.hadoop.hdds.scm.ScmConfigKeys + .OZONE_SCM_BLOCK_DELETION_MAX_RETRY; +import static org.apache.hadoop.hdds.scm.ScmConfigKeys + .OZONE_SCM_BLOCK_DELETION_MAX_RETRY_DEFAULT; +import static org.apache.hadoop.hdds.scm.ScmConfigKeys + .OZONE_SCM_DB_CACHE_SIZE_DEFAULT; +import static org.apache.hadoop.hdds.scm.ScmConfigKeys + .OZONE_SCM_DB_CACHE_SIZE_MB; +import static org.apache.hadoop.hdds.server.ServerUtils.getOzoneMetaDirPath; import static org.apache.hadoop.ozone.OzoneConsts.DELETED_BLOCK_DB; -import static org.apache.hadoop.scm.ScmConfigKeys.OZONE_SCM_BLOCK_DELETION_MAX_RETRY; -import static org.apache.hadoop.scm.ScmConfigKeys.OZONE_SCM_BLOCK_DELETION_MAX_RETRY_DEFAULT; -import static org.apache.hadoop.scm.ScmConfigKeys.OZONE_SCM_DB_CACHE_SIZE_DEFAULT; -import static org.apache.hadoop.scm.ScmConfigKeys.OZONE_SCM_DB_CACHE_SIZE_MB; /** * A implement class of {@link DeletedBlockLog}, and it uses diff --git a/hadoop-hdsl/server-scm/src/main/java/org/apache/hadoop/ozone/scm/block/SCMBlockDeletingService.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/block/SCMBlockDeletingService.java index 5d7c2d5719..2c555e0421 100644 --- a/hadoop-hdsl/server-scm/src/main/java/org/apache/hadoop/ozone/scm/block/SCMBlockDeletingService.java +++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/block/SCMBlockDeletingService.java @@ -14,18 +14,18 @@ * License for the specific language governing permissions and limitations under * the License. */ -package org.apache.hadoop.ozone.scm.block; +package org.apache.hadoop.hdds.scm.block; import com.google.common.annotations.VisibleForTesting; import com.google.common.base.Preconditions; - import org.apache.hadoop.conf.Configuration; -import org.apache.hadoop.hdsl.protocol.DatanodeDetails; +import org.apache.hadoop.hdds.scm.container.Mapping; +import org.apache.hadoop.hdds.scm.node.NodeManager; +import org.apache.hadoop.hdds.protocol.DatanodeDetails; +import org.apache.hadoop.hdds.protocol.proto.HddsProtos.NodeState; +import org.apache.hadoop.hdds.protocol.proto + .StorageContainerDatanodeProtocolProtos.DeletedBlocksTransaction; import org.apache.hadoop.ozone.protocol.commands.DeleteBlocksCommand; -import org.apache.hadoop.hdsl.protocol.proto.HdslProtos.NodeState; -import org.apache.hadoop.hdsl.protocol.proto.StorageContainerDatanodeProtocolProtos.DeletedBlocksTransaction; -import org.apache.hadoop.ozone.scm.container.Mapping; -import org.apache.hadoop.ozone.scm.node.NodeManager; import org.apache.hadoop.util.Time; import org.apache.hadoop.utils.BackgroundService; import org.apache.hadoop.utils.BackgroundTask; @@ -34,18 +34,20 @@ import org.apache.hadoop.utils.BackgroundTaskResult.EmptyTaskResult; import org.slf4j.Logger; import org.slf4j.LoggerFactory; -import static org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_BLOCK_DELETING_CONTAINER_LIMIT_PER_INTERVAL; -import static org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_BLOCK_DELETING_CONTAINER_LIMIT_PER_INTERVAL_DEFAULT; - import java.io.IOException; import java.util.List; import java.util.UUID; import java.util.concurrent.TimeUnit; +import static org.apache.hadoop.ozone.OzoneConfigKeys + .OZONE_BLOCK_DELETING_CONTAINER_LIMIT_PER_INTERVAL; +import static org.apache.hadoop.ozone.OzoneConfigKeys + .OZONE_BLOCK_DELETING_CONTAINER_LIMIT_PER_INTERVAL_DEFAULT; + /** * A background service running in SCM to delete blocks. This service scans * block deletion log in certain interval and caches block deletion commands - * in {@link org.apache.hadoop.ozone.scm.node.CommandQueue}, asynchronously + * in {@link org.apache.hadoop.hdds.scm.node.CommandQueue}, asynchronously * SCM HB thread polls cached commands and sends them to datanode for physical * processing. */ diff --git a/hadoop-hdsl/server-scm/src/main/java/org/apache/hadoop/ozone/scm/block/package-info.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/block/package-info.java index 3c56627761..e1bfdff506 100644 --- a/hadoop-hdsl/server-scm/src/main/java/org/apache/hadoop/ozone/scm/block/package-info.java +++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/block/package-info.java @@ -15,7 +15,7 @@ * the License. */ -package org.apache.hadoop.ozone.scm.block; +package org.apache.hadoop.hdds.scm.block; /** * This package contains routines to manage the block location and * mapping inside SCM diff --git a/hadoop-hdsl/server-scm/src/main/java/org/apache/hadoop/ozone/scm/container/ContainerMapping.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/ContainerMapping.java index 364bf7d93a..63cb3a3c1f 100644 --- a/hadoop-hdsl/server-scm/src/main/java/org/apache/hadoop/ozone/scm/container/ContainerMapping.java +++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/ContainerMapping.java @@ -14,36 +14,34 @@ * License for the specific language governing permissions and limitations under * the License. */ -package org.apache.hadoop.ozone.scm.container; +package org.apache.hadoop.hdds.scm.container; import com.google.common.annotations.VisibleForTesting; import com.google.common.base.Preconditions; import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.hdds.scm.ScmConfigKeys; +import org.apache.hadoop.hdds.scm.container.closer.ContainerCloser; +import org.apache.hadoop.hdds.scm.container.common.helpers.ContainerInfo; +import org.apache.hadoop.hdds.scm.container.replication.ContainerSupervisor; +import org.apache.hadoop.hdds.scm.exceptions.SCMException; +import org.apache.hadoop.hdds.scm.node.NodeManager; +import org.apache.hadoop.hdds.scm.pipelines.PipelineSelector; import org.apache.hadoop.hdfs.DFSUtil; +import org.apache.hadoop.hdds.protocol.proto.HddsProtos; +import org.apache.hadoop.hdds.protocol.proto.HddsProtos.ReplicationFactor; +import org.apache.hadoop.hdds.protocol.proto.HddsProtos.ReplicationType; +import org.apache.hadoop.hdds.protocol.proto + .StorageContainerDatanodeProtocolProtos; +import org.apache.hadoop.hdds.protocol.proto + .StorageContainerDatanodeProtocolProtos.ContainerReportsRequestProto; import org.apache.hadoop.ozone.OzoneConsts; import org.apache.hadoop.ozone.lease.Lease; import org.apache.hadoop.ozone.lease.LeaseException; import org.apache.hadoop.ozone.lease.LeaseManager; -import org.apache.hadoop.hdsl.protocol.proto.HdslProtos; -import org.apache.hadoop.hdsl.protocol.proto.HdslProtos.ReplicationFactor; -import org.apache.hadoop.hdsl.protocol.proto.HdslProtos.ReplicationType; -import org.apache.hadoop.hdsl.protocol.proto - .StorageContainerDatanodeProtocolProtos; -import org.apache.hadoop.hdsl.protocol.proto - .StorageContainerDatanodeProtocolProtos.ContainerReportsRequestProto; -import org.apache.hadoop.ozone.scm.container.closer.ContainerCloser; -import org.apache.hadoop.ozone.scm.container.replication.ContainerSupervisor; -import org.apache.hadoop.ozone.scm.exceptions.SCMException; -import org.apache.hadoop.ozone.scm.node.NodeManager; -import org.apache.hadoop.ozone.scm.pipelines.PipelineSelector; -import org.apache.hadoop.scm.ScmConfigKeys; -import org.apache.hadoop.scm.container.common.helpers.ContainerInfo; import org.apache.hadoop.utils.MetadataKeyFilters.KeyPrefixFilter; import org.apache.hadoop.utils.MetadataKeyFilters.MetadataKeyFilter; import org.apache.hadoop.utils.MetadataStore; import org.apache.hadoop.utils.MetadataStoreBuilder; - -import static org.apache.hadoop.hdsl.server.ServerUtils.getOzoneMetaDirPath; import org.slf4j.Logger; import org.slf4j.LoggerFactory; @@ -57,12 +55,14 @@ import java.util.concurrent.TimeUnit; import java.util.concurrent.locks.Lock; import java.util.concurrent.locks.ReentrantLock; -import static org.apache.hadoop.ozone.OzoneConsts.SCM_CONTAINER_DB; -import static org.apache.hadoop.ozone.scm.exceptions.SCMException.ResultCodes - .FAILED_TO_CHANGE_CONTAINER_STATE; -import static org.apache.hadoop.scm.ScmConfigKeys +import static org.apache.hadoop.hdds.scm.ScmConfigKeys .OZONE_SCM_CONTAINER_SIZE_DEFAULT; -import static org.apache.hadoop.scm.ScmConfigKeys.OZONE_SCM_CONTAINER_SIZE_GB; +import static org.apache.hadoop.hdds.scm.ScmConfigKeys + .OZONE_SCM_CONTAINER_SIZE_GB; +import static org.apache.hadoop.hdds.scm.exceptions.SCMException.ResultCodes + .FAILED_TO_CHANGE_CONTAINER_STATE; +import static org.apache.hadoop.hdds.server.ServerUtils.getOzoneMetaDirPath; +import static org.apache.hadoop.ozone.OzoneConsts.SCM_CONTAINER_DB; /** * Mapping class contains the mapping from a name to a pipeline mapping. This @@ -162,7 +162,7 @@ public class ContainerMapping implements Mapping { SCMException.ResultCodes.FAILED_TO_FIND_CONTAINER); } - HdslProtos.SCMContainerInfo temp = HdslProtos.SCMContainerInfo.PARSER + HddsProtos.SCMContainerInfo temp = HddsProtos.SCMContainerInfo.PARSER .parseFrom(containerBytes); containerInfo = ContainerInfo.fromProtobuf(temp); return containerInfo; @@ -194,7 +194,7 @@ public class ContainerMapping implements Mapping { for (Map.Entry<byte[], byte[]> entry : range) { ContainerInfo containerInfo = ContainerInfo.fromProtobuf( - HdslProtos.SCMContainerInfo.PARSER.parseFrom( + HddsProtos.SCMContainerInfo.PARSER.parseFrom( entry.getValue())); Preconditions.checkNotNull(containerInfo); containerList.add(containerInfo); @@ -283,8 +283,8 @@ public class ContainerMapping implements Mapping { * {@inheritDoc} Used by client to update container state on SCM. */ @Override - public HdslProtos.LifeCycleState updateContainerState( - String containerName, HdslProtos.LifeCycleEvent event) throws + public HddsProtos.LifeCycleState updateContainerState( + String containerName, HddsProtos.LifeCycleEvent event) throws IOException { ContainerInfo containerInfo; lock.lock(); @@ -299,7 +299,7 @@ public class ContainerMapping implements Mapping { SCMException.ResultCodes.FAILED_TO_FIND_CONTAINER); } containerInfo = - ContainerInfo.fromProtobuf(HdslProtos.SCMContainerInfo.PARSER + ContainerInfo.fromProtobuf(HddsProtos.SCMContainerInfo.PARSER .parseFrom(containerBytes)); Preconditions.checkNotNull(containerInfo); @@ -311,7 +311,7 @@ public class ContainerMapping implements Mapping { // Register callback to be executed in case of timeout containerLease.registerCallBack(() -> { updateContainerState(containerName, - HdslProtos.LifeCycleEvent.TIMEOUT); + HddsProtos.LifeCycleEvent.TIMEOUT); return null; }); break; @@ -393,10 +393,10 @@ public class ContainerMapping implements Mapping { try { byte[] containerBytes = containerStore.get(dbKey); if (containerBytes != null) { - HdslProtos.SCMContainerInfo knownState = - HdslProtos.SCMContainerInfo.PARSER.parseFrom(containerBytes); + HddsProtos.SCMContainerInfo knownState = + HddsProtos.SCMContainerInfo.PARSER.parseFrom(containerBytes); - HdslProtos.SCMContainerInfo newState = + HddsProtos.SCMContainerInfo newState = reconcileState(datanodeState, knownState); // FIX ME: This can be optimized, we write twice to memory, where a @@ -431,11 +431,11 @@ public class ContainerMapping implements Mapping { * @param knownState - State inside SCM. * @return new SCM State for this container. */ - private HdslProtos.SCMContainerInfo reconcileState( + private HddsProtos.SCMContainerInfo reconcileState( StorageContainerDatanodeProtocolProtos.ContainerInfo datanodeState, - HdslProtos.SCMContainerInfo knownState) { - HdslProtos.SCMContainerInfo.Builder builder = - HdslProtos.SCMContainerInfo.newBuilder(); + HddsProtos.SCMContainerInfo knownState) { + HddsProtos.SCMContainerInfo.Builder builder = + HddsProtos.SCMContainerInfo.newBuilder(); builder.setContainerName(knownState.getContainerName()); builder.setPipeline(knownState.getPipeline()); // If used size is greater than allocated size, we will be updating @@ -468,7 +468,7 @@ public class ContainerMapping implements Mapping { * @param newState - This is the state we maintain in SCM. * @throws IOException */ - private boolean closeContainerIfNeeded(HdslProtos.SCMContainerInfo newState) + private boolean closeContainerIfNeeded(HddsProtos.SCMContainerInfo newState) throws IOException { float containerUsedPercentage = 1.0f * newState.getUsedBytes() / this.size; @@ -487,10 +487,10 @@ public class ContainerMapping implements Mapping { // container to reach. We will know that a container has reached the // closed state from container reports. This state change should be // invoked once and only once. - HdslProtos.LifeCycleState state = updateContainerState( + HddsProtos.LifeCycleState state = updateContainerState( scmInfo.getContainerName(), - HdslProtos.LifeCycleEvent.FINALIZE); - if (state != HdslProtos.LifeCycleState.CLOSING) { + HddsProtos.LifeCycleEvent.FINALIZE); + if (state != HddsProtos.LifeCycleState.CLOSING) { LOG.error("Failed to close container {}, reason : Not able " + "to " + "update container state, current container state: {}.", @@ -511,11 +511,11 @@ public class ContainerMapping implements Mapping { * @return true if is in open state, false otherwise */ private boolean shouldClose(ContainerInfo info) { - return info.getState() == HdslProtos.LifeCycleState.OPEN; + return info.getState() == HddsProtos.LifeCycleState.OPEN; } private boolean isClosed(ContainerInfo info) { - return info.getState() == HdslProtos.LifeCycleState.CLOSED; + return info.getState() == HddsProtos.LifeCycleState.CLOSED; } @VisibleForTesting @@ -572,8 +572,8 @@ public class ContainerMapping implements Mapping { // return info of a deleted container. may revisit this in the future, // for now, just skip a not-found container if (containerBytes != null) { - HdslProtos.SCMContainerInfo oldInfoProto = - HdslProtos.SCMContainerInfo.PARSER.parseFrom(containerBytes); + HddsProtos.SCMContainerInfo oldInfoProto = + HddsProtos.SCMContainerInfo.PARSER.parseFrom(containerBytes); ContainerInfo oldInfo = ContainerInfo.fromProtobuf(oldInfoProto); ContainerInfo newInfo = new ContainerInfo.Builder() .setAllocatedBytes(info.getAllocatedBytes()) diff --git a/hadoop-hdsl/server-scm/src/main/java/org/apache/hadoop/ozone/scm/container/ContainerStateManager.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/ContainerStateManager.java index b9f0e4c8d4..227eca04e8 100644 --- a/hadoop-hdsl/server-scm/src/main/java/org/apache/hadoop/ozone/scm/container/ContainerStateManager.java +++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/ContainerStateManager.java @@ -15,26 +15,26 @@ * the License. */ -package org.apache.hadoop.ozone.scm.container; +package org.apache.hadoop.hdds.scm.container; import com.google.common.base.Preconditions; import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.hdds.scm.ScmConfigKeys; +import org.apache.hadoop.hdds.scm.container.common.helpers.ContainerInfo; +import org.apache.hadoop.hdds.scm.container.common.helpers.Pipeline; +import org.apache.hadoop.hdds.scm.container.states.ContainerState; +import org.apache.hadoop.hdds.scm.container.states.ContainerStateMap; +import org.apache.hadoop.hdds.scm.exceptions.SCMException; +import org.apache.hadoop.hdds.scm.pipelines.PipelineSelector; +import org.apache.hadoop.hdds.protocol.proto.HddsProtos; +import org.apache.hadoop.hdds.protocol.proto.HddsProtos.LifeCycleEvent; +import org.apache.hadoop.hdds.protocol.proto.HddsProtos.LifeCycleState; +import org.apache.hadoop.hdds.protocol.proto.HddsProtos.ReplicationFactor; +import org.apache.hadoop.hdds.protocol.proto.HddsProtos.ReplicationType; import org.apache.hadoop.ozone.OzoneConsts; -import org.apache.hadoop.ozone.common.statemachine.InvalidStateTransitionException; +import org.apache.hadoop.ozone.common.statemachine + .InvalidStateTransitionException; import org.apache.hadoop.ozone.common.statemachine.StateMachine; -import org.apache.hadoop.hdsl.protocol.proto.HdslProtos; -import org.apache.hadoop.hdsl.protocol.proto.HdslProtos.LifeCycleEvent; -import org.apache.hadoop.hdsl.protocol.proto.HdslProtos.LifeCycleState; -import org.apache.hadoop.hdsl.protocol.proto.HdslProtos.ReplicationFactor; -import org.apache.hadoop.hdsl.protocol.proto.HdslProtos.ReplicationType; -import org.apache.hadoop.ozone.scm.container.ContainerStates.ContainerID; -import org.apache.hadoop.ozone.scm.container.ContainerStates.ContainerState; -import org.apache.hadoop.ozone.scm.container.ContainerStates.ContainerStateMap; -import org.apache.hadoop.ozone.scm.exceptions.SCMException; -import org.apache.hadoop.ozone.scm.pipelines.PipelineSelector; -import org.apache.hadoop.scm.ScmConfigKeys; -import org.apache.hadoop.scm.container.common.helpers.ContainerInfo; -import org.apache.hadoop.scm.container.common.helpers.Pipeline; import org.apache.hadoop.util.Time; import org.slf4j.Logger; import org.slf4j.LoggerFactory; @@ -49,7 +49,7 @@ import java.util.Set; import java.util.concurrent.ConcurrentHashMap; import java.util.concurrent.atomic.AtomicLong; -import static org.apache.hadoop.ozone.scm.exceptions.SCMException.ResultCodes +import static org.apache.hadoop.hdds.scm.exceptions.SCMException.ResultCodes .FAILED_TO_CHANGE_CONTAINER_STATE; /** @@ -113,8 +113,8 @@ public class ContainerStateManager implements Closeable { private static final Logger LOG = LoggerFactory.getLogger(ContainerStateManager.class); - private final StateMachine<HdslProtos.LifeCycleState, - HdslProtos.LifeCycleEvent> stateMachine; + private final StateMachine<HddsProtos.LifeCycleState, + HddsProtos.LifeCycleEvent> stateMachine; private final long containerSize; private final ConcurrentHashMap<ContainerState, ContainerID> lastUsedMap; @@ -132,7 +132,7 @@ public class ContainerStateManager implements Closeable { Mapping containerMapping) { // Initialize the container state machine. - Set<HdslProtos.LifeCycleState> finalStates = new HashSet(); + Set<HddsProtos.LifeCycleState> finalStates = new HashSet(); // These are the steady states of a container. finalStates.add(LifeCycleState.OPEN); @@ -284,8 +284,8 @@ public class ContainerStateManager implements Closeable { * @return Container Info. * @throws IOException on Failure. */ - public ContainerInfo allocateContainer(PipelineSelector selector, HdslProtos - .ReplicationType type, HdslProtos.ReplicationFactor replicationFactor, + public ContainerInfo allocateContainer(PipelineSelector selector, HddsProtos + .ReplicationType type, HddsProtos.ReplicationFactor replicationFactor, final String containerName, String owner) throws IOException { @@ -298,7 +298,7 @@ public class ContainerStateManager implements Closeable { ContainerInfo containerInfo = new ContainerInfo.Builder() .setContainerName(containerName) - .setState(HdslProtos.LifeCycleState.ALLOCATED) + .setState(HddsProtos.LifeCycleState.ALLOCATED) .setPipeline(pipeline) // This is bytes allocated for blocks inside container, not the // container size @@ -324,7 +324,7 @@ public class ContainerStateManager implements Closeable { * @throws SCMException on Failure. */ public ContainerInfo updateContainerState(ContainerInfo - info, HdslProtos.LifeCycleEvent event) throws SCMException { + info, HddsProtos.LifeCycleEvent event) throws SCMException { LifeCycleState newState; try { newState = this.stateMachine.getNextState(info.getState(), event); diff --git a/hadoop-hdsl/server-scm/src/main/java/org/apache/hadoop/ozone/scm/container/Mapping.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/Mapping.java index 1ba426c2f2..c949c6c4b5 100644 --- a/hadoop-hdsl/server-scm/src/main/java/org/apache/hadoop/ozone/scm/container/Mapping.java +++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/Mapping.java @@ -14,13 +14,12 @@ * License for the specific language governing permissions and limitations under * the License. */ -package org.apache.hadoop.ozone.scm.container; +package org.apache.hadoop.hdds.scm.container; - -import org.apache.hadoop.hdsl.protocol.proto.HdslProtos; -import org.apache.hadoop.hdsl.protocol.proto +import org.apache.hadoop.hdds.scm.container.common.helpers.ContainerInfo; +import org.apache.hadoop.hdds.protocol.proto.HddsProtos; +import org.apache.hadoop.hdds.protocol.proto .StorageContainerDatanodeProtocolProtos.ContainerReportsRequestProto; -import org.apache.hadoop.scm.container.common.helpers.ContainerInfo; import java.io.Closeable; import java.io.IOException; @@ -68,8 +67,8 @@ public interface Mapping extends Closeable { * @return - Container Info. * @throws IOException */ - ContainerInfo allocateContainer(HdslProtos.ReplicationType type, - HdslProtos.ReplicationFactor replicationFactor, + ContainerInfo allocateContainer(HddsProtos.ReplicationType type, + HddsProtos.ReplicationFactor replicationFactor, String containerName, String owner) throws IOException; /** @@ -87,8 +86,8 @@ public interface Mapping extends Closeable { * @return - new container state * @throws IOException */ - HdslProtos.LifeCycleState updateContainerState(String containerName, - HdslProtos.LifeCycleEvent event) throws IOException; + HddsProtos.LifeCycleState updateContainerState(String containerName, + HddsProtos.LifeCycleEvent event) throws IOException; /** * Returns the container State Manager. diff --git a/hadoop-hdsl/server-scm/src/main/java/org/apache/hadoop/ozone/scm/container/closer/ContainerCloser.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/closer/ContainerCloser.java index c9256955f1..b5d4da9ed1 100644 --- a/hadoop-hdsl/server-scm/src/main/java/org/apache/hadoop/ozone/scm/container/closer/ContainerCloser.java +++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/closer/ContainerCloser.java @@ -16,16 +16,16 @@ * */ -package org.apache.hadoop.ozone.scm.container.closer; +package org.apache.hadoop.hdds.scm.container.closer; import com.google.common.annotations.VisibleForTesting; import com.google.common.base.Preconditions; import com.google.common.util.concurrent.ThreadFactoryBuilder; import org.apache.hadoop.conf.Configuration; -import org.apache.hadoop.hdsl.protocol.DatanodeDetails; +import org.apache.hadoop.hdds.scm.node.NodeManager; +import org.apache.hadoop.hdds.protocol.DatanodeDetails; +import org.apache.hadoop.hdds.protocol.proto.HddsProtos; import org.apache.hadoop.ozone.protocol.commands.CloseContainerCommand; -import org.apache.hadoop.hdsl.protocol.proto.HdslProtos; -import org.apache.hadoop.ozone.scm.node.NodeManager; import org.apache.hadoop.util.Time; import org.slf4j.Logger; import org.slf4j.LoggerFactory; @@ -91,7 +91,7 @@ public class ContainerCloser { * * @param info - ContainerInfo. */ - public void close(HdslProtos.SCMContainerInfo info) { + public void close(HddsProtos.SCMContainerInfo info) { if (commandIssued.containsKey(info.getContainerName())) { // We check if we issued a close command in last 3 * reportInterval secs. @@ -126,8 +126,8 @@ public class ContainerCloser { // this queue can be emptied by a datanode after a close report is send // to SCM. In that case also, data node will ignore this command. - HdslProtos.Pipeline pipeline = info.getPipeline(); - for (HdslProtos.DatanodeDetailsProto datanodeDetails : + HddsProtos.Pipeline pipeline = info.getPipeline(); + for (HddsProtos.DatanodeDetailsProto datanodeDetails : pipeline.getPipelineChannel().getMembersList()) { nodeManager.addDatanodeCommand( DatanodeDetails.getFromProtoBuf(datanodeDetails).getUuid(), diff --git a/hadoop-hdsl/server-scm/src/main/java/org/apache/hadoop/ozone/scm/container/closer/package-info.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/closer/package-info.java index 2d0f257b33..ee02bbd88f 100644 --- a/hadoop-hdsl/server-scm/src/main/java/org/apache/hadoop/ozone/scm/container/closer/package-info.java +++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/closer/package-info.java @@ -20,4 +20,4 @@ * This package has class that close a container. That is move a container from * open state to close state. */ -package org.apache.hadoop.ozone.scm.container.closer;
\ No newline at end of file +package org.apache.hadoop.hdds.scm.container.closer;
\ No newline at end of file diff --git a/hadoop-hdsl/server-scm/src/main/java/org/apache/hadoop/ozone/scm/container/package-info.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/package-info.java index 4ac490cb6f..3f8d05681b 100644 --- a/hadoop-hdsl/server-scm/src/main/java/org/apache/hadoop/ozone/scm/container/package-info.java +++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/package-info.java @@ -15,7 +15,7 @@ * the License. */ -package org.apache.hadoop.ozone.scm.container; +package org.apache.hadoop.hdds.scm.container; /** * This package contains routines to manage the container location and * mapping inside SCM diff --git a/hadoop-hdsl/server-scm/src/main/java/org/apache/hadoop/ozone/scm/container/placement/algorithms/ContainerPlacementPolicy.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/placement/algorithms/ContainerPlacementPolicy.java index 1129d93f33..5d91ac5dad 100644 --- a/hadoop-hdsl/server-scm/src/main/java/org/apache/hadoop/ozone/scm/container/placement/algorithms/ContainerPlacementPolicy.java +++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/placement/algorithms/ContainerPlacementPolicy.java @@ -15,9 +15,9 @@ * the License. */ -package org.apache.hadoop.ozone.scm.container.placement.algorithms; +package org.apache.hadoop.hdds.scm.container.placement.algorithms; -import org.apache.hadoop.hdsl.protocol.DatanodeDetails; +import org.apache.hadoop.hdds.protocol.DatanodeDetails; import java.io.IOException; import java.util.List; diff --git a/hadoop-hdsl/server-scm/src/main/java/org/apache/hadoop/ozone/scm/container/placement/algorithms/SCMCommonPolicy.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/placement/algorithms/SCMCommonPolicy.java index 71e5ebbb1e..0a595d55ba 100644 --- a/hadoop-hdsl/server-scm/src/main/java/org/apache/hadoop/ozone/scm/container/placement/algorithms/SCMCommonPolicy.java +++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/placement/algorithms/SCMCommonPolicy.java @@ -15,15 +15,15 @@ * the License. */ -package org.apache.hadoop.ozone.scm.container.placement.algorithms; +package org.apache.hadoop.hdds.scm.container.placement.algorithms; import com.google.common.annotations.VisibleForTesting; import org.apache.hadoop.conf.Configuration; -import org.apache.hadoop.hdsl.protocol.DatanodeDetails; -import org.apache.hadoop.hdsl.protocol.proto.HdslProtos; -import org.apache.hadoop.ozone.scm.container.placement.metrics.SCMNodeMetric; -import org.apache.hadoop.ozone.scm.exceptions.SCMException; -import org.apache.hadoop.ozone.scm.node.NodeManager; +import org.apache.hadoop.hdds.scm.container.placement.metrics.SCMNodeMetric; +import org.apache.hadoop.hdds.scm.exceptions.SCMException; +import org.apache.hadoop.hdds.scm.node.NodeManager; +import org.apache.hadoop.hdds.protocol.DatanodeDetails; +import org.apache.hadoop.hdds.protocol.proto.HddsProtos; import org.slf4j.Logger; import org.slf4j.LoggerFactory; @@ -104,7 +104,7 @@ public abstract class SCMCommonPolicy implements ContainerPlacementPolicy { public List<DatanodeDetails> chooseDatanodes(int nodesRequired, final long sizeRequired) throws SCMException { List<DatanodeDetails> healthyNodes = - nodeManager.getNodes(HdslProtos.NodeState.HEALTHY); + nodeManager.getNodes(HddsProtos.NodeState.HEALTHY); String msg; if (healthyNodes.size() == 0) { msg = "No healthy node found to allocate container."; diff --git a/hadoop-hdsl/server-scm/src/main/java/org/apache/hadoop/ozone/scm/container/placement/algorithms/SCMContainerPlacementCapacity.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/placement/algorithms/SCMContainerPlacementCapacity.java index 90d301cfcc..85a6b544cc 100644 --- a/hadoop-hdsl/server-scm/src/main/java/org/apache/hadoop/ozone/scm/container/placement/algorithms/SCMContainerPlacementCapacity.java +++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/placement/algorithms/SCMContainerPlacementCapacity.java @@ -15,14 +15,14 @@ * the License. */ -package org.apache.hadoop.ozone.scm.container.placement.algorithms; +package org.apache.hadoop.hdds.scm.container.placement.algorithms; import com.google.common.annotations.VisibleForTesting; import org.apache.hadoop.conf.Configuration; -import org.apache.hadoop.hdsl.protocol.DatanodeDetails; -import org.apache.hadoop.ozone.scm.container.placement.metrics.SCMNodeMetric; -import org.apache.hadoop.ozone.scm.exceptions.SCMException; -import org.apache.hadoop.ozone.scm.node.NodeManager; +import org.apache.hadoop.hdds.scm.container.placement.metrics.SCMNodeMetric; +import org.apache.hadoop.hdds.scm.exceptions.SCMException; +import org.apache.hadoop.hdds.scm.node.NodeManager; +import org.apache.hadoop.hdds.protocol.DatanodeDetails; import org.slf4j.Logger; import org.slf4j.LoggerFactory; diff --git a/hadoop-hdsl/server-scm/src/main/java/org/apache/hadoop/ozone/scm/container/placement/algorithms/SCMContainerPlacementRandom.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/placement/algorithms/SCMContainerPlacementRandom.java index f46976294e..9903c84e31 100644 --- a/hadoop-hdsl/server-scm/src/main/java/org/apache/hadoop/ozone/scm/container/placement/algorithms/SCMContainerPlacementRandom.java +++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/placement/algorithms/SCMContainerPlacementRandom.java @@ -15,13 +15,13 @@ * the License. */ -package org.apache.hadoop.ozone.scm.container.placement.algorithms; +package org.apache.hadoop.hdds.scm.container.placement.algorithms; import com.google.common.annotations.VisibleForTesting; import org.apache.hadoop.conf.Configuration; -import org.apache.hadoop.hdsl.protocol.DatanodeDetails; -import org.apache.hadoop.ozone.scm.exceptions.SCMException; -import org.apache.hadoop.ozone.scm.node.NodeManager; +import org.apache.hadoop.hdds.scm.exceptions.SCMException; +import org.apache.hadoop.hdds.scm.node.NodeManager; +import org.apache.hadoop.hdds.protocol.DatanodeDetails; import org.slf4j.Logger; import org.slf4j.LoggerFactory; diff --git a/hadoop-hdsl/server-scm/src/main/java/org/apache/hadoop/ozone/scm/container/placement/algorithms/package-info.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/placement/algorithms/package-info.java index d6280df430..1cb810dd0e 100644 --- a/hadoop-hdsl/server-scm/src/main/java/org/apache/hadoop/ozone/scm/container/placement/algorithms/package-info.java +++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/placement/algorithms/package-info.java @@ -14,5 +14,5 @@ * License for the specific language governing permissions and limitations under * the License. */ -package org.apache.hadoop.ozone.scm.container.placement.algorithms; +package org.apache.hadoop.hdds.scm.container.placement.algorithms; // Various placement algorithms.
\ No newline at end of file diff --git a/hadoop-hdsl/server-scm/src/main/java/org/apache/hadoop/ozone/scm/container/placement/metrics/ContainerStat.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/placement/metrics/ContainerStat.java index 810b8fd5dc..b8e8998763 100644 --- a/hadoop-hdsl/server-scm/src/main/java/org/apache/hadoop/ozone/scm/container/placement/metrics/ContainerStat.java +++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/placement/metrics/ContainerStat.java @@ -15,14 +15,13 @@ * See the License for the specific language governing permissions and * limitations under the License. */ -package org.apache.hadoop.ozone.scm.container.placement.metrics; - -import java.io.IOException; - -import org.apache.hadoop.ozone.web.utils.JsonUtils; +package org.apache.hadoop.hdds.scm.container.placement.metrics; import com.fasterxml.jackson.annotation.JsonProperty; import com.google.common.base.Preconditions; +import org.apache.hadoop.ozone.web.utils.JsonUtils; + +import java.io.IOException; /** * This class represents the SCM container stat. diff --git a/hadoop-hdsl/server-scm/src/main/java/org/apache/hadoop/ozone/scm/container/placement/metrics/DatanodeMetric.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/placement/metrics/DatanodeMetric.java index cc829c2c3f..a6e732c750 100644 --- a/hadoop-hdsl/server-scm/src/main/java/org/apache/hadoop/ozone/scm/container/placement/metrics/DatanodeMetric.java +++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/placement/metrics/DatanodeMetric.java @@ -15,9 +15,9 @@ * the License. */ -package org.apache.hadoop.ozone.scm.container.placement.metrics; +package org.apache.hadoop.hdds.scm.container.placement.metrics; -import org.apache.hadoop.ozone.scm.exceptions.SCMException; +import org.apache.hadoop.hdds.scm.exceptions.SCMException; /** * DatanodeMetric acts as the basis for all the metric that is used in diff --git a/hadoop-hdsl/server-scm/src/main/java/org/apache/hadoop/ozone/scm/container/placement/metrics/LongMetric.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/placement/metrics/LongMetric.java index dbcd9f4226..050d26bd23 100644 --- a/hadoop-hdsl/server-scm/src/main/java/org/apache/hadoop/ozone/scm/container/placement/metrics/LongMetric.java +++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/placement/metrics/LongMetric.java @@ -14,7 +14,7 @@ * License for the specific language governing permissions and limitations under * the License. */ -package org.apache.hadoop.ozone.scm.container.placement.metrics; +package org.apache.hadoop.hdds.scm.container.placement.metrics; import com.fasterxml.jackson.annotation.JsonAutoDetect; import com.fasterxml.jackson.annotation.JsonAutoDetect.Visibility; diff --git a/hadoop-hdsl/server-scm/src/main/java/org/apache/hadoop/ozone/scm/container/placement/metrics/NodeStat.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/placement/metrics/NodeStat.java index bda61f8ccd..d6857d395c 100644 --- a/hadoop-hdsl/server-scm/src/main/java/org/apache/hadoop/ozone/scm/container/placement/metrics/NodeStat.java +++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/placement/metrics/NodeStat.java @@ -16,7 +16,7 @@ * limitations under the License. */ -package org.apache.hadoop.ozone.scm.container.placement.metrics; +package org.apache.hadoop.hdds.scm.container.placement.metrics; import com.google.common.annotations.VisibleForTesting; diff --git a/hadoop-hdsl/server-scm/src/main/java/org/apache/hadoop/ozone/scm/container/placement/metrics/SCMMetrics.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/placement/metrics/SCMMetrics.java index bdfec2d32a..e4dd9aa37e 100644 --- a/hadoop-hdsl/server-scm/src/main/java/org/apache/hadoop/ozone/scm/container/placement/metrics/SCMMetrics.java +++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/placement/metrics/SCMMetrics.java @@ -15,7 +15,7 @@ * See the License for the specific language governing permissions and * limitations under the License. */ -package org.apache.hadoop.ozone.scm.container.placement.metrics; +package org.apache.hadoop.hdds.scm.container.placement.metrics; import org.apache.hadoop.metrics2.MetricsSystem; import org.apache.hadoop.metrics2.annotation.Metric; diff --git a/hadoop-hdsl/server-scm/src/main/java/org/apache/hadoop/ozone/scm/container/placement/metrics/SCMNodeMetric.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/placement/metrics/SCMNodeMetric.java index ae01361663..b50376d89d 100644 --- a/hadoop-hdsl/server-scm/src/main/java/org/apache/hadoop/ozone/scm/container/placement/metrics/SCMNodeMetric.java +++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/placement/metrics/SCMNodeMetric.java @@ -15,7 +15,7 @@ * the License. */ -package org.apache.hadoop.ozone.scm.container.placement.metrics; +package org.apache.hadoop.hdds.scm.container.placement.metrics; import com.google.common.annotations.VisibleForTesting; import com.google.common.base.Preconditions; diff --git a/hadoop-hdsl/server-scm/src/main/java/org/apache/hadoop/ozone/scm/container/placement/metrics/SCMNodeStat.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/placement/metrics/SCMNodeStat.java index 64f76a6b0a..3c871d3ef5 100644 --- a/hadoop-hdsl/server-scm/src/main/java/org/apache/hadoop/ozone/scm/container/placement/metrics/SCMNodeStat.java +++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/placement/metrics/SCMNodeStat.java @@ -16,7 +16,7 @@ * limitations under the License. */ -package org.apache.hadoop.ozone.scm.container.placement.metrics; +package org.apache.hadoop.hdds.scm.container.placement.metrics; import com.google.common.annotations.VisibleForTesting; import com.google.common.base.Preconditions; diff --git a/hadoop-hdsl/server-scm/src/main/java/org/apache/hadoop/ozone/scm/container/placement/metrics/package-info.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/placement/metrics/package-info.java index 51350958e1..4a81d69216 100644 --- a/hadoop-hdsl/server-scm/src/main/java/org/apache/hadoop/ozone/scm/container/placement/metrics/package-info.java +++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/placement/metrics/package-info.java @@ -14,7 +14,7 @@ * License for the specific language governing permissions and limitations under * the License. */ -package org.apache.hadoop.ozone.scm.container.placement.metrics; +package org.apache.hadoop.hdds.scm.container.placement.metrics; // Various metrics supported by Datanode and used by SCM in the placement // strategy.
\ No newline at end of file diff --git a/hadoop-hdsl/server-scm/src/main/java/org/apache/hadoop/ozone/scm/container/placement/package-info.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/placement/package-info.java index 43676effc6..dc54d9bd91 100644 --- a/hadoop-hdsl/server-scm/src/main/java/org/apache/hadoop/ozone/scm/container/placement/package-info.java +++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/placement/package-info.java @@ -15,5 +15,5 @@ * the License. */ -package org.apache.hadoop.ozone.scm.container.placement; +package org.apache.hadoop.hdds.scm.container.placement; // Classes related to container placement.
\ No newline at end of file diff --git a/hadoop-hdsl/server-scm/src/main/java/org/apache/hadoop/ozone/scm/container/replication/ContainerSupervisor.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/replication/ContainerSupervisor.java index e51ad79e93..52321eee4a 100644 --- a/hadoop-hdsl/server-scm/src/main/java/org/apache/hadoop/ozone/scm/container/replication/ContainerSupervisor.java +++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/replication/ContainerSupervisor.java @@ -14,18 +14,18 @@ * License for the specific language governing permissions and limitations under * the License. */ -package org.apache.hadoop.ozone.scm.container.replication; +package org.apache.hadoop.hdds.scm.container.replication; import com.google.common.annotations.VisibleForTesting; import com.google.common.base.Preconditions; import com.google.common.util.concurrent.ThreadFactoryBuilder; import org.apache.hadoop.conf.Configuration; -import org.apache.hadoop.hdsl.protocol.DatanodeDetails; -import org.apache.hadoop.hdsl.protocol.proto +import org.apache.hadoop.hdds.scm.exceptions.SCMException; +import org.apache.hadoop.hdds.scm.node.NodeManager; +import org.apache.hadoop.hdds.scm.node.NodePoolManager; +import org.apache.hadoop.hdds.protocol.DatanodeDetails; +import org.apache.hadoop.hdds.protocol.proto .StorageContainerDatanodeProtocolProtos.ContainerReportsRequestProto; -import org.apache.hadoop.ozone.scm.exceptions.SCMException; -import org.apache.hadoop.ozone.scm.node.NodeManager; -import org.apache.hadoop.ozone.scm.node.NodePoolManager; import org.apache.hadoop.util.Time; import org.apache.hadoop.util.concurrent.HadoopExecutors; import org.slf4j.Logger; @@ -47,21 +47,21 @@ import java.util.concurrent.locks.ReentrantReadWriteLock; import static com.google.common.util.concurrent.Uninterruptibles .sleepUninterruptibly; -import static org.apache.hadoop.scm.ScmConfigKeys - .OZONE_SCM_CONTAINER_REPORTS_WAIT_TIMEOUT_DEFAULT; -import static org.apache.hadoop.scm.ScmConfigKeys +import static org.apache.hadoop.hdds.scm.ScmConfigKeys .OZONE_SCM_CONTAINER_REPORTS_WAIT_TIMEOUT; -import static org.apache.hadoop.scm.ScmConfigKeys - .OZONE_SCM_CONTAINER_REPORT_PROCESSING_INTERVAL_DEFAULT; -import static org.apache.hadoop.scm.ScmConfigKeys +import static org.apache.hadoop.hdds.scm.ScmConfigKeys + .OZONE_SCM_CONTAINER_REPORTS_WAIT_TIMEOUT_DEFAULT; +import static org.apache.hadoop.hdds.scm.ScmConfigKeys .OZONE_SCM_CONTAINER_REPORT_PROCESSING_INTERVAL; -import static org.apache.hadoop.scm.ScmConfigKeys +import static org.apache.hadoop.hdds.scm.ScmConfigKeys + .OZONE_SCM_CONTAINER_REPORT_PROCESSING_INTERVAL_DEFAULT; +import static org.apache.hadoop.hdds.scm.ScmConfigKeys .OZONE_SCM_MAX_CONTAINER_REPORT_THREADS; -import static org.apache.hadoop.scm.ScmConfigKeys +import static org.apache.hadoop.hdds.scm.ScmConfigKeys .OZONE_SCM_MAX_CONTAINER_REPORT_THREADS_DEFAULT; -import static org.apache.hadoop.scm.ScmConfigKeys +import static org.apache.hadoop.hdds.scm.ScmConfigKeys .OZONE_SCM_MAX_NODEPOOL_PROCESSING_THREADS; -import static org.apache.hadoop.scm.ScmConfigKeys +import static org.apache.hadoop.hdds.scm.ScmConfigKeys .OZONE_SCM_MAX_NODEPOOL_PROCESSING_THREADS_DEFAULT; /** diff --git a/hadoop-hdsl/server-scm/src/main/java/org/apache/hadoop/ozone/scm/container/replication/InProgressPool.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/replication/InProgressPool.java index 5b2dd0fe78..ddbd213402 100644 --- a/hadoop-hdsl/server-scm/src/main/java/org/apache/hadoop/ozone/scm/container/replication/InProgressPool.java +++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/replication/InProgressPool.java @@ -14,19 +14,19 @@ * License for the specific language governing permissions and limitations under * the License. */ -package org.apache.hadoop.ozone.scm.container.replication; +package org.apache.hadoop.hdds.scm.container.replication; import com.google.common.annotations.VisibleForTesting; import com.google.common.base.Preconditions; -import org.apache.hadoop.hdsl.protocol.DatanodeDetails; -import org.apache.hadoop.ozone.protocol.commands.SendContainerCommand; -import org.apache.hadoop.hdsl.protocol.proto.HdslProtos.NodeState; -import org.apache.hadoop.hdsl.protocol.proto +import org.apache.hadoop.hdds.scm.node.NodeManager; +import org.apache.hadoop.hdds.scm.node.NodePoolManager; +import org.apache.hadoop.hdds.protocol.DatanodeDetails; +import org.apache.hadoop.hdds.protocol.proto.HddsProtos.NodeState; +import org.apache.hadoop.hdds.protocol.proto .StorageContainerDatanodeProtocolProtos.ContainerInfo; -import org.apache.hadoop.hdsl.protocol.proto +import org.apache.hadoop.hdds.protocol.proto .StorageContainerDatanodeProtocolProtos.ContainerReportsRequestProto; -import org.apache.hadoop.ozone.scm.node.NodeManager; -import org.apache.hadoop.ozone.scm.node.NodePoolManager; +import org.apache.hadoop.ozone.protocol.commands.SendContainerCommand; import org.apache.hadoop.util.Time; import org.slf4j.Logger; import org.slf4j.LoggerFactory; @@ -43,12 +43,11 @@ import java.util.stream.Collectors; import static com.google.common.util.concurrent.Uninterruptibles .sleepUninterruptibly; -import static org.apache.hadoop.hdsl.protocol.proto.HdslProtos - .NodeState.HEALTHY; -import static org.apache.hadoop.hdsl.protocol.proto.HdslProtos - .NodeState.STALE; -import static org.apache.hadoop.hdsl.protocol.proto.HdslProtos - .NodeState.INVALID; +import static org.apache.hadoop.hdds.protocol.proto.HddsProtos.NodeState + .HEALTHY; +import static org.apache.hadoop.hdds.protocol.proto.HddsProtos.NodeState + .INVALID; +import static org.apache.hadoop.hdds.protocol.proto.HddsProtos.NodeState.STALE; /** * These are pools that are actively checking for replication status of the diff --git a/hadoop-hdsl/server-scm/src/main/java/org/apache/hadoop/ozone/scm/container/replication/PeriodicPool.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/replication/PeriodicPool.java index 35b1e7607b..ef28aa78d0 100644 --- a/hadoop-hdsl/server-scm/src/main/java/org/apache/hadoop/ozone/scm/container/replication/PeriodicPool.java +++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/replication/PeriodicPool.java @@ -14,7 +14,7 @@ * License for the specific language governing permissions and limitations under * the License. */ -package org.apache.hadoop.ozone.scm.container.replication; +package org.apache.hadoop.hdds.scm.container.replication; import java.util.concurrent.atomic.AtomicLong; diff --git a/hadoop-hdsl/server-scm/src/main/java/org/apache/hadoop/ozone/scm/container/replication/package-info.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/replication/package-info.java index 82e420272e..7bbe2efe57 100644 --- a/hadoop-hdsl/server-scm/src/main/java/org/apache/hadoop/ozone/scm/container/replication/package-info.java +++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/replication/package-info.java @@ -14,7 +14,7 @@ * License for the specific language governing permissions and limitations under * the License. */ -package org.apache.hadoop.ozone.scm.container.replication; +package org.apache.hadoop.hdds.scm.container.replication; /* This package contains routines that manage replication of a container. This relies on container reports to understand the replication level of a diff --git a/hadoop-hdsl/server-scm/src/main/java/org/apache/hadoop/ozone/scm/container/ContainerStates/ContainerAttribute.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/states/ContainerAttribute.java index 1372e7f0cb..288fa2deb1 100644 --- a/hadoop-hdsl/server-scm/src/main/java/org/apache/hadoop/ozone/scm/container/ContainerStates/ContainerAttribute.java +++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/states/ContainerAttribute.java @@ -15,10 +15,11 @@ * the License. * */ -package org.apache.hadoop.ozone.scm.container.ContainerStates; +package org.apache.hadoop.hdds.scm.container.states; import com.google.common.base.Preconditions; -import org.apache.hadoop.ozone.scm.exceptions.SCMException; +import org.apache.hadoop.hdds.scm.container.ContainerID; +import org.apache.hadoop.hdds.scm.exceptions.SCMException; import org.slf4j.Logger; import org.slf4j.LoggerFactory; @@ -28,7 +29,7 @@ import java.util.Map; import java.util.NavigableSet; import java.util.TreeSet; -import static org.apache.hadoop.ozone.scm.exceptions.SCMException.ResultCodes +import static org.apache.hadoop.hdds.scm.exceptions.SCMException.ResultCodes .FAILED_TO_CHANGE_CONTAINER_STATE; /** diff --git a/hadoop-hdsl/server-scm/src/main/java/org/apache/hadoop/ozone/scm/container/ContainerStates/ContainerState.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/states/ContainerState.java index d3e4522ea4..1dac36ef77 100644 --- a/hadoop-hdsl/server-scm/src/main/java/org/apache/hadoop/ozone/scm/container/ContainerStates/ContainerState.java +++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/states/ContainerState.java @@ -16,19 +16,19 @@ * */ -package org.apache.hadoop.ozone.scm.container.ContainerStates; +package org.apache.hadoop.hdds.scm.container.states; import org.apache.commons.lang3.builder.EqualsBuilder; import org.apache.commons.lang3.builder.HashCodeBuilder; -import org.apache.hadoop.hdsl.protocol.proto.HdslProtos; +import org.apache.hadoop.hdds.protocol.proto.HddsProtos; /** * Class that acts as the container state. */ public class ContainerState { - private final HdslProtos.ReplicationType type; + private final HddsProtos.ReplicationType type; private final String owner; - private final HdslProtos.ReplicationFactor replicationFactor; + private final HddsProtos.ReplicationFactor replicationFactor; /** * Constructs a Container Key. @@ -37,15 +37,15 @@ public class ContainerState { * @param type - Replication Type. * @param factor - Replication Factors */ - public ContainerState(String owner, HdslProtos.ReplicationType type, - HdslProtos.ReplicationFactor factor) { + public ContainerState(String owner, HddsProtos.ReplicationType type, + HddsProtos.ReplicationFactor factor) { this.type = type; this.owner = owner; this.replicationFactor = factor; } - public HdslProtos.ReplicationType getType() { + public HddsProtos.ReplicationType getType() { return type; } @@ -53,7 +53,7 @@ public class ContainerState { return owner; } - public HdslProtos.ReplicationFactor getFactor() { + public HddsProtos.ReplicationFactor getFactor() { return replicationFactor; } diff --git a/hadoop-hdsl/server-scm/src/main/java/org/apache/hadoop/ozone/scm/container/ContainerStates/ContainerStateMap.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/states/ContainerStateMap.java index fdbf2673ee..48c6423d02 100644 --- a/hadoop-hdsl/server-scm/src/main/java/org/apache/hadoop/ozone/scm/container/ContainerStates/ContainerStateMap.java +++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/states/ContainerStateMap.java @@ -16,14 +16,15 @@ * */ -package org.apache.hadoop.ozone.scm.container.ContainerStates; +package org.apache.hadoop.hdds.scm.container.states; import com.google.common.base.Preconditions; -import org.apache.hadoop.hdsl.protocol.proto.HdslProtos.LifeCycleState; -import org.apache.hadoop.hdsl.protocol.proto.HdslProtos.ReplicationFactor; -import org.apache.hadoop.hdsl.protocol.proto.HdslProtos.ReplicationType; -import org.apache.hadoop.ozone.scm.exceptions.SCMException; -import org.apache.hadoop.scm.container.common.helpers.ContainerInfo; +import org.apache.hadoop.hdds.scm.container.ContainerID; +import org.apache.hadoop.hdds.scm.container.common.helpers.ContainerInfo; +import org.apache.hadoop.hdds.scm.exceptions.SCMException; +import org.apache.hadoop.hdds.protocol.proto.HddsProtos.LifeCycleState; +import org.apache.hadoop.hdds.protocol.proto.HddsProtos.ReplicationFactor; +import org.apache.hadoop.hdds.protocol.proto.HddsProtos.ReplicationType; import org.apache.hadoop.util.AutoCloseableLock; import org.slf4j.Logger; import org.slf4j.LoggerFactory; @@ -34,11 +35,11 @@ import java.util.Map; import java.util.NavigableSet; import java.util.TreeSet; -import static org.apache.hadoop.ozone.scm.exceptions.SCMException.ResultCodes +import static org.apache.hadoop.hdds.scm.exceptions.SCMException.ResultCodes .CONTAINER_EXISTS; -import static org.apache.hadoop.ozone.scm.exceptions.SCMException.ResultCodes +import static org.apache.hadoop.hdds.scm.exceptions.SCMException.ResultCodes .FAILED_TO_CHANGE_CONTAINER_STATE; -import static org.apache.hadoop.ozone.scm.exceptions.SCMException.ResultCodes +import static org.apache.hadoop.hdds.scm.exceptions.SCMException.ResultCodes .FAILED_TO_FIND_CONTAINER; /** diff --git a/hadoop-hdsl/server-scm/src/main/java/org/apache/hadoop/ozone/scm/container/ContainerStates/package-info.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/states/package-info.java index 6a7e663171..cf20f39690 100644 --- a/hadoop-hdsl/server-scm/src/main/java/org/apache/hadoop/ozone/scm/container/ContainerStates/package-info.java +++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/states/package-info.java @@ -19,4 +19,4 @@ /** * Container States management package. */ -package org.apache.hadoop.ozone.scm.container.ContainerStates;
\ No newline at end of file +package org.apache.hadoop.hdds.scm.container.states;
\ No newline at end of file diff --git a/hadoop-hdsl/server-scm/src/main/java/org/apache/hadoop/ozone/scm/exceptions/SCMException.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/exceptions/SCMException.java index 4fb50e7623..227df3c9e9 100644 --- a/hadoop-hdsl/server-scm/src/main/java/org/apache/hadoop/ozone/scm/exceptions/SCMException.java +++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/exceptions/SCMException.java @@ -15,7 +15,7 @@ * See the License for the specific language governing permissions and * limitations under the License. */ -package org.apache.hadoop.ozone.scm.exceptions; +package org.apache.hadoop.hdds.scm.exceptions; import java.io.IOException; diff --git a/hadoop-hdsl/server-scm/src/main/java/org/apache/hadoop/ozone/scm/exceptions/package-info.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/exceptions/package-info.java index 0922382d14..7b69310c23 100644 --- a/hadoop-hdsl/server-scm/src/main/java/org/apache/hadoop/ozone/scm/exceptions/package-info.java +++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/exceptions/package-info.java @@ -14,5 +14,5 @@ * License for the specific language governing permissions and limitations under * the License. */ -package org.apache.hadoop.ozone.scm.exceptions; +package org.apache.hadoop.hdds.scm.exceptions; // Exceptions thrown by SCM.
\ No newline at end of file diff --git a/hadoop-hdsl/server-scm/src/main/java/org/apache/hadoop/ozone/scm/node/CommandQueue.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/CommandQueue.java index c376efa564..edbcfa12f2 100644 --- a/hadoop-hdsl/server-scm/src/main/java/org/apache/hadoop/ozone/scm/node/CommandQueue.java +++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/CommandQueue.java @@ -15,7 +15,7 @@ * See the License for the specific language governing permissions and * limitations under the License. */ -package org.apache.hadoop.ozone.scm.node; +package org.apache.hadoop.hdds.scm.node; import com.google.common.annotations.VisibleForTesting; import com.google.common.base.Preconditions; diff --git a/hadoop-hdsl/server-scm/src/main/java/org/apache/hadoop/ozone/scm/node/HeartbeatQueueItem.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/HeartbeatQueueItem.java index fe7ff14421..43720f0104 100644 --- a/hadoop-hdsl/server-scm/src/main/java/org/apache/hadoop/ozone/scm/node/HeartbeatQueueItem.java +++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/HeartbeatQueueItem.java @@ -16,12 +16,13 @@ * limitations under the License. */ -package org.apache.hadoop.ozone.scm.node; +package org.apache.hadoop.hdds.scm.node; import com.google.common.annotations.VisibleForTesting; -import org.apache.hadoop.hdsl.protocol.DatanodeDetails; -import org.apache.hadoop.hdsl.protocol.proto.StorageContainerDatanodeProtocolProtos.ReportState; -import org.apache.hadoop.hdsl.protocol.proto +import org.apache.hadoop.hdds.protocol.DatanodeDetails; +import org.apache.hadoop.hdds.protocol.proto + .StorageContainerDatanodeProtocolProtos.ReportState; +import org.apache.hadoop.hdds.protocol.proto .StorageContainerDatanodeProtocolProtos.SCMNodeReport; import static org.apache.hadoop.util.Time.monotonicNow; diff --git a/hadoop-hdsl/server-scm/src/main/java/org/apache/hadoop/ozone/scm/node/NodeManager.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/NodeManager.java index c1b2acaf5a..4392633b16 100644 --- a/hadoop-hdsl/server-scm/src/main/java/org/apache/hadoop/ozone/scm/node/NodeManager.java +++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/NodeManager.java @@ -15,16 +15,16 @@ * See the License for the specific language governing permissions and * limitations under the License. */ -package org.apache.hadoop.ozone.scm.node; +package org.apache.hadoop.hdds.scm.node; import com.google.common.annotations.VisibleForTesting; +import org.apache.hadoop.hdds.scm.container.placement.metrics.SCMNodeMetric; +import org.apache.hadoop.hdds.scm.container.placement.metrics.SCMNodeStat; import org.apache.hadoop.hdfs.protocol.UnregisteredNodeException; -import org.apache.hadoop.hdsl.protocol.DatanodeDetails; +import org.apache.hadoop.hdds.protocol.DatanodeDetails; +import org.apache.hadoop.hdds.protocol.proto.HddsProtos.NodeState; import org.apache.hadoop.ozone.protocol.StorageContainerNodeProtocol; import org.apache.hadoop.ozone.protocol.commands.SCMCommand; -import org.apache.hadoop.hdsl.protocol.proto.HdslProtos.NodeState; -import org.apache.hadoop.ozone.scm.container.placement.metrics.SCMNodeMetric; -import org.apache.hadoop.ozone.scm.container.placement.metrics.SCMNodeStat; import java.io.Closeable; import java.util.List; diff --git a/hadoop-hdsl/server-scm/src/main/java/org/apache/hadoop/ozone/scm/node/NodeManagerMXBean.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/NodeManagerMXBean.java index 46881c9c60..3ac993b77d 100644 --- a/hadoop-hdsl/server-scm/src/main/java/org/apache/hadoop/ozone/scm/node/NodeManagerMXBean.java +++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/NodeManagerMXBean.java @@ -16,7 +16,7 @@ * limitations under the License. */ -package org.apache.hadoop.ozone.scm.node; +package org.apache.hadoop.hdds.scm.node; import org.apache.hadoop.classification.InterfaceAudience; diff --git a/hadoop-hdsl/server-scm/src/main/java/org/apache/hadoop/ozone/scm/node/NodePoolManager.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/NodePoolManager.java index c330526786..46faf9ca4d 100644 --- a/hadoop-hdsl/server-scm/src/main/java/org/apache/hadoop/ozone/scm/node/NodePoolManager.java +++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/NodePoolManager.java @@ -16,11 +16,10 @@ * limitations under the License. */ -package org.apache.hadoop.ozone.scm.node; +package org.apache.hadoop.hdds.scm.node; - -import org.apache.hadoop.hdsl.protocol.DatanodeDetails; -import org.apache.hadoop.ozone.scm.exceptions.SCMException; +import org.apache.hadoop.hdds.scm.exceptions.SCMException; +import org.apache.hadoop.hdds.protocol.DatanodeDetails; import java.io.Closeable; import java.io.IOException; diff --git a/hadoop-hdsl/server-scm/src/main/java/org/apache/hadoop/ozone/scm/node/SCMNodeManager.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/SCMNodeManager.java index 31915b1807..0174c1754f 100644 --- a/hadoop-hdsl/server-scm/src/main/java/org/apache/hadoop/ozone/scm/node/SCMNodeManager.java +++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/SCMNodeManager.java @@ -15,50 +15,42 @@ * See the License for the specific language governing permissions and * limitations under the License. */ -package org.apache.hadoop.ozone.scm.node; +package org.apache.hadoop.hdds.scm.node; import com.google.common.annotations.VisibleForTesting; import com.google.common.base.Preconditions; import com.google.common.util.concurrent.ThreadFactoryBuilder; +import org.apache.hadoop.hdds.scm.HddsServerUtil; +import org.apache.hadoop.hdds.scm.StorageContainerManager; +import org.apache.hadoop.hdds.scm.VersionInfo; +import org.apache.hadoop.hdds.scm.container.placement.metrics.SCMNodeMetric; +import org.apache.hadoop.hdds.scm.container.placement.metrics.SCMNodeStat; import org.apache.hadoop.hdfs.protocol.UnregisteredNodeException; -import org.apache.hadoop.hdsl.protocol.DatanodeDetails; -import org.apache.hadoop.hdsl.protocol.proto.HdslProtos.DatanodeDetailsProto; +import org.apache.hadoop.hdds.conf.OzoneConfiguration; +import org.apache.hadoop.hdds.protocol.DatanodeDetails; +import org.apache.hadoop.hdds.protocol.proto.HddsProtos.DatanodeDetailsProto; +import org.apache.hadoop.hdds.protocol.proto.HddsProtos.NodeState; +import org.apache.hadoop.hdds.protocol.proto + .StorageContainerDatanodeProtocolProtos.ReportState; +import org.apache.hadoop.hdds.protocol.proto + .StorageContainerDatanodeProtocolProtos.SCMNodeReport; +import org.apache.hadoop.hdds.protocol.proto + .StorageContainerDatanodeProtocolProtos.SCMRegisteredCmdResponseProto + .ErrorCode; +import org.apache.hadoop.hdds.protocol.proto + .StorageContainerDatanodeProtocolProtos.SCMStorageReport; +import org.apache.hadoop.hdds.protocol.proto + .StorageContainerDatanodeProtocolProtos.SCMVersionRequestProto; import org.apache.hadoop.ipc.Server; import org.apache.hadoop.metrics2.util.MBeans; -import org.apache.hadoop.hdsl.conf.OzoneConfiguration; import org.apache.hadoop.ozone.protocol.StorageContainerNodeProtocol; import org.apache.hadoop.ozone.protocol.VersionResponse; -import org.apache.hadoop.ozone.protocol.commands.ReregisterCommand; import org.apache.hadoop.ozone.protocol.commands.RegisteredCommand; +import org.apache.hadoop.ozone.protocol.commands.ReregisterCommand; import org.apache.hadoop.ozone.protocol.commands.SCMCommand; import org.apache.hadoop.ozone.protocol.commands.SendContainerCommand; -import org.apache.hadoop.hdsl.protocol.proto.HdslProtos.NodeState; -import org.apache.hadoop.hdsl.protocol.proto - .StorageContainerDatanodeProtocolProtos.ReportState; -import org.apache.hadoop.hdsl.protocol.proto - .StorageContainerDatanodeProtocolProtos.SCMRegisteredCmdResponseProto - .ErrorCode; -import org.apache.hadoop.hdsl.protocol.proto - .StorageContainerDatanodeProtocolProtos.SCMVersionRequestProto; -import org.apache.hadoop.hdsl.protocol - .proto.StorageContainerDatanodeProtocolProtos.SCMNodeReport; -import org.apache.hadoop.hdsl.protocol - .proto.StorageContainerDatanodeProtocolProtos.SCMStorageReport; -import org.apache.hadoop.ozone.scm.StorageContainerManager; -import org.apache.hadoop.ozone.scm.VersionInfo; -import org.apache.hadoop.ozone.scm.container.placement.metrics.SCMNodeMetric; -import org.apache.hadoop.ozone.scm.container.placement.metrics.SCMNodeStat; import org.apache.hadoop.util.Time; import org.apache.hadoop.util.concurrent.HadoopExecutors; - -import static org.apache.hadoop.ozone.scm.HdslServerUtil.getDeadNodeInterval; -import static org.apache.hadoop.ozone.scm.HdslServerUtil - .getMaxHBToProcessPerLoop; -import static org.apache.hadoop.ozone.scm.HdslServerUtil - .getScmHeartbeatInterval; -import static org.apache.hadoop.ozone.scm.HdslServerUtil - .getScmheartbeatCheckerInterval; -import static org.apache.hadoop.ozone.scm.HdslServerUtil.getStaleNodeInterval; import org.slf4j.Logger; import org.slf4j.LoggerFactory; @@ -79,10 +71,12 @@ import java.util.concurrent.atomic.AtomicBoolean; import java.util.concurrent.atomic.AtomicInteger; import java.util.stream.Collectors; -import static org.apache.hadoop.hdsl.protocol.proto.HdslProtos.NodeState.DEAD; -import static org.apache.hadoop.hdsl.protocol.proto.HdslProtos.NodeState.HEALTHY; -import static org.apache.hadoop.hdsl.protocol.proto.HdslProtos.NodeState.STALE; -import static org.apache.hadoop.hdsl.protocol.proto.HdslProtos.NodeState.INVALID; +import static org.apache.hadoop.hdds.protocol.proto.HddsProtos.NodeState.DEAD; +import static org.apache.hadoop.hdds.protocol.proto.HddsProtos.NodeState + .HEALTHY; +import static org.apache.hadoop.hdds.protocol.proto.HddsProtos.NodeState + .INVALID; +import static org.apache.hadoop.hdds.protocol.proto.HddsProtos.NodeState.STALE; import static org.apache.hadoop.util.Time.monotonicNow; /** @@ -188,12 +182,12 @@ public class SCMNodeManager // TODO: Support this value as a Percentage of known machines. chillModeNodeCount = 1; - staleNodeIntervalMs = getStaleNodeInterval(conf); - deadNodeIntervalMs = getDeadNodeInterval(conf); + staleNodeIntervalMs = HddsServerUtil.getStaleNodeInterval(conf); + deadNodeIntervalMs = HddsServerUtil.getDeadNodeInterval(conf); heartbeatCheckerIntervalMs = - getScmheartbeatCheckerInterval(conf); - datanodeHBIntervalSeconds = getScmHeartbeatInterval(conf); - maxHBToProcessPerLoop = getMaxHBToProcessPerLoop(conf); + HddsServerUtil.getScmheartbeatCheckerInterval(conf); + datanodeHBIntervalSeconds = HddsServerUtil.getScmHeartbeatInterval(conf); + maxHBToProcessPerLoop = HddsServerUtil.getMaxHBToProcessPerLoop(conf); executorService = HadoopExecutors.newScheduledThreadPool(1, new ThreadFactoryBuilder().setDaemon(true) diff --git a/hadoop-hdsl/server-scm/src/main/java/org/apache/hadoop/ozone/scm/node/SCMNodePoolManager.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/SCMNodePoolManager.java index f9b193fe09..a4a6c51cdf 100644 --- a/hadoop-hdsl/server-scm/src/main/java/org/apache/hadoop/ozone/scm/node/SCMNodePoolManager.java +++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/SCMNodePoolManager.java @@ -16,19 +16,17 @@ * limitations under the License. */ -package org.apache.hadoop.ozone.scm.node; +package org.apache.hadoop.hdds.scm.node; import com.google.common.base.Preconditions; +import org.apache.hadoop.hdds.scm.exceptions.SCMException; import org.apache.hadoop.hdfs.DFSUtil; -import org.apache.hadoop.hdsl.conf.OzoneConfiguration; -import org.apache.hadoop.hdsl.protocol.DatanodeDetails; -import org.apache.hadoop.hdsl.protocol.proto.HdslProtos; +import org.apache.hadoop.hdds.conf.OzoneConfiguration; +import org.apache.hadoop.hdds.protocol.DatanodeDetails; +import org.apache.hadoop.hdds.protocol.proto.HddsProtos; import org.apache.hadoop.ozone.OzoneConsts; -import org.apache.hadoop.ozone.scm.exceptions.SCMException; import org.apache.hadoop.utils.MetadataStore; import org.apache.hadoop.utils.MetadataStoreBuilder; - -import static org.apache.hadoop.hdsl.server.ServerUtils.getOzoneMetaDirPath; import org.slf4j.Logger; import org.slf4j.LoggerFactory; @@ -43,15 +41,16 @@ import java.util.concurrent.locks.ReadWriteLock; import java.util.concurrent.locks.ReentrantReadWriteLock; import java.util.stream.Collectors; +import static org.apache.hadoop.hdds.scm.ScmConfigKeys + .OZONE_SCM_DB_CACHE_SIZE_DEFAULT; +import static org.apache.hadoop.hdds.scm.ScmConfigKeys + .OZONE_SCM_DB_CACHE_SIZE_MB; +import static org.apache.hadoop.hdds.scm.exceptions.SCMException.ResultCodes + .FAILED_TO_FIND_NODE_IN_POOL; +import static org.apache.hadoop.hdds.scm.exceptions.SCMException.ResultCodes + .FAILED_TO_LOAD_NODEPOOL; +import static org.apache.hadoop.hdds.server.ServerUtils.getOzoneMetaDirPath; import static org.apache.hadoop.ozone.OzoneConsts.NODEPOOL_DB; -import static org.apache.hadoop.ozone.scm - .exceptions.SCMException.ResultCodes.FAILED_TO_LOAD_NODEPOOL; -import static org.apache.hadoop.ozone.scm - .exceptions.SCMException.ResultCodes.FAILED_TO_FIND_NODE_IN_POOL; -import static org.apache.hadoop.scm - .ScmConfigKeys.OZONE_SCM_DB_CACHE_SIZE_DEFAULT; -import static org.apache.hadoop.scm - .ScmConfigKeys.OZONE_SCM_DB_CACHE_SIZE_MB; /** * SCM node pool manager that manges node pools. @@ -106,7 +105,7 @@ public final class SCMNodePoolManager implements NodePoolManager { nodePoolStore.iterate(null, (key, value) -> { try { DatanodeDetails nodeId = DatanodeDetails.getFromProtoBuf( - HdslProtos.DatanodeDetailsProto.PARSER.parseFrom(key)); + HddsProtos.DatanodeDetailsProto.PARSER.parseFrom(key)); String poolName = DFSUtil.bytes2String(value); Set<DatanodeDetails> nodePool = null; diff --git a/hadoop-hdsl/server-scm/src/main/java/org/apache/hadoop/ozone/scm/node/package-info.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/package-info.java index f1efe79910..d6a8ad0394 100644 --- a/hadoop-hdsl/server-scm/src/main/java/org/apache/hadoop/ozone/scm/node/package-info.java +++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/package-info.java @@ -14,7 +14,7 @@ * License for the specific language governing permissions and limitations under * the License. */ -package org.apache.hadoop.ozone.scm.node; +package org.apache.hadoop.hdds.scm.node; /** * The node package deals with node management. diff --git a/hadoop-hdsl/server-scm/src/main/java/org/apache/hadoop/ozone/scm/package-info.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/package-info.java index 7686df3610..4669e741ef 100644 --- a/hadoop-hdsl/server-scm/src/main/java/org/apache/hadoop/ozone/scm/package-info.java +++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/package-info.java @@ -15,7 +15,7 @@ * the License. */ -package org.apache.hadoop.ozone.scm; +package org.apache.hadoop.hdds.scm; /* * This package contains StorageContainerManager classes. diff --git a/hadoop-hdsl/server-scm/src/main/java/org/apache/hadoop/ozone/scm/pipelines/PipelineManager.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/pipelines/PipelineManager.java index 35e1bc103a..8e43528914 100644 --- a/hadoop-hdsl/server-scm/src/main/java/org/apache/hadoop/ozone/scm/pipelines/PipelineManager.java +++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/pipelines/PipelineManager.java @@ -14,15 +14,14 @@ * License for the specific language governing permissions and limitations under * the License. */ -package org.apache.hadoop.ozone.scm.pipelines; +package org.apache.hadoop.hdds.scm.pipelines; - -import org.apache.hadoop.hdsl.protocol.DatanodeDetails; -import org.apache.hadoop.hdsl.protocol.proto.HdslProtos.ReplicationFactor; -import org.apache.hadoop.hdsl.protocol.proto.HdslProtos.ReplicationType; -import org.apache.hadoop.hdsl.protocol.proto.HdslProtos.LifeCycleState; -import org.apache.hadoop.scm.container.common.helpers.PipelineChannel; -import org.apache.hadoop.scm.container.common.helpers.Pipeline; +import org.apache.hadoop.hdds.scm.container.common.helpers.Pipeline; +import org.apache.hadoop.hdds.scm.container.common.helpers.PipelineChannel; +import org.apache.hadoop.hdds.protocol.DatanodeDetails; +import org.apache.hadoop.hdds.protocol.proto.HddsProtos.LifeCycleState; +import org.apache.hadoop.hdds.protocol.proto.HddsProtos.ReplicationFactor; +import org.apache.hadoop.hdds.protocol.proto.HddsProtos.ReplicationType; import org.slf4j.Logger; import org.slf4j.LoggerFactory; diff --git a/hadoop-hdsl/server-scm/src/main/java/org/apache/hadoop/ozone/scm/pipelines/PipelineSelector.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/pipelines/PipelineSelector.java index 0eac7f6e24..f0c9eea441 100644 --- a/hadoop-hdsl/server-scm/src/main/java/org/apache/hadoop/ozone/scm/pipelines/PipelineSelector.java +++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/pipelines/PipelineSelector.java @@ -14,24 +14,26 @@ * License for the specific language governing permissions and limitations under * the License. */ -package org.apache.hadoop.ozone.scm.pipelines; +package org.apache.hadoop.hdds.scm.pipelines; import com.google.common.base.Preconditions; import org.apache.hadoop.conf.Configuration; -import org.apache.hadoop.hdsl.protocol.DatanodeDetails; +import org.apache.hadoop.hdds.scm.ScmConfigKeys; +import org.apache.hadoop.hdds.scm.container.common.helpers.Pipeline; +import org.apache.hadoop.hdds.scm.container.common.helpers.PipelineChannel; +import org.apache.hadoop.hdds.scm.container.placement.algorithms + .ContainerPlacementPolicy; +import org.apache.hadoop.hdds.scm.container.placement.algorithms + .SCMContainerPlacementRandom; +import org.apache.hadoop.hdds.scm.node.NodeManager; +import org.apache.hadoop.hdds.scm.pipelines.ratis.RatisManagerImpl; +import org.apache.hadoop.hdds.scm.pipelines.standalone.StandaloneManagerImpl; +import org.apache.hadoop.hdds.protocol.DatanodeDetails; +import org.apache.hadoop.hdds.protocol.proto.HddsProtos; +import org.apache.hadoop.hdds.protocol.proto.HddsProtos.LifeCycleState; +import org.apache.hadoop.hdds.protocol.proto.HddsProtos.ReplicationFactor; +import org.apache.hadoop.hdds.protocol.proto.HddsProtos.ReplicationType; import org.apache.hadoop.ozone.OzoneConsts; -import org.apache.hadoop.hdsl.protocol.proto.HdslProtos; -import org.apache.hadoop.hdsl.protocol.proto.HdslProtos.ReplicationType; -import org.apache.hadoop.hdsl.protocol.proto.HdslProtos.ReplicationFactor; -import org.apache.hadoop.hdsl.protocol.proto.HdslProtos.LifeCycleState; -import org.apache.hadoop.ozone.scm.container.placement.algorithms.ContainerPlacementPolicy; -import org.apache.hadoop.ozone.scm.container.placement.algorithms.SCMContainerPlacementRandom; -import org.apache.hadoop.ozone.scm.node.NodeManager; -import org.apache.hadoop.ozone.scm.pipelines.ratis.RatisManagerImpl; -import org.apache.hadoop.ozone.scm.pipelines.standalone.StandaloneManagerImpl; -import org.apache.hadoop.scm.ScmConfigKeys; -import org.apache.hadoop.scm.container.common.helpers.Pipeline; -import org.apache.hadoop.scm.container.common.helpers.PipelineChannel; import org.slf4j.Logger; import org.slf4j.LoggerFactory; @@ -164,7 +166,7 @@ public class PipelineSelector { */ public Pipeline getReplicationPipeline(ReplicationType replicationType, - HdslProtos.ReplicationFactor replicationFactor, String containerName) + HddsProtos.ReplicationFactor replicationFactor, String containerName) throws IOException { PipelineManager manager = getPipelineManager(replicationType); Preconditions.checkNotNull(manager, "Found invalid pipeline manager"); diff --git a/hadoop-hdsl/server-scm/src/main/java/org/apache/hadoop/ozone/scm/pipelines/package-info.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/pipelines/package-info.java index c2a3b5490e..ea24c581a8 100644 --- a/hadoop-hdsl/server-scm/src/main/java/org/apache/hadoop/ozone/scm/pipelines/package-info.java +++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/pipelines/package-info.java @@ -15,7 +15,7 @@ * See the License for the specific language governing permissions and * limitations under the License. */ -package org.apache.hadoop.ozone.scm.pipelines; +package org.apache.hadoop.hdds.scm.pipelines; /** Ozone supports the notion of different kind of pipelines. That means that we can have a replication pipeline build on diff --git a/hadoop-hdsl/server-scm/src/main/java/org/apache/hadoop/ozone/scm/pipelines/ratis/RatisManagerImpl.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/pipelines/ratis/RatisManagerImpl.java index c98573e353..089a1374a4 100644 --- a/hadoop-hdsl/server-scm/src/main/java/org/apache/hadoop/ozone/scm/pipelines/ratis/RatisManagerImpl.java +++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/pipelines/ratis/RatisManagerImpl.java @@ -14,23 +14,23 @@ * License for the specific language governing permissions and limitations under * the License. */ -package org.apache.hadoop.ozone.scm.pipelines.ratis; +package org.apache.hadoop.hdds.scm.pipelines.ratis; import com.google.common.base.Preconditions; import org.apache.hadoop.conf.Configuration; -import org.apache.hadoop.hdsl.protocol.DatanodeDetails; -import org.apache.hadoop.hdsl.protocol.proto.HdslProtos.LifeCycleState; -import org.apache.hadoop.hdsl.protocol.proto.HdslProtos.ReplicationFactor; -import org.apache.hadoop.hdsl.protocol.proto.HdslProtos.ReplicationType; -import org.apache.hadoop.hdsl.protocol.proto.HdslProtos.NodeState; -import org.apache.hadoop.ozone.scm.container.placement.algorithms +import org.apache.hadoop.hdds.scm.XceiverClientRatis; +import org.apache.hadoop.hdds.scm.container.common.helpers.Pipeline; +import org.apache.hadoop.hdds.scm.container.common.helpers.PipelineChannel; +import org.apache.hadoop.hdds.scm.container.placement.algorithms .ContainerPlacementPolicy; -import org.apache.hadoop.ozone.scm.node.NodeManager; -import org.apache.hadoop.ozone.scm.pipelines.PipelineManager; -import org.apache.hadoop.ozone.scm.pipelines.PipelineSelector; -import org.apache.hadoop.scm.XceiverClientRatis; -import org.apache.hadoop.scm.container.common.helpers.Pipeline; -import org.apache.hadoop.scm.container.common.helpers.PipelineChannel; +import org.apache.hadoop.hdds.scm.node.NodeManager; +import org.apache.hadoop.hdds.scm.pipelines.PipelineManager; +import org.apache.hadoop.hdds.scm.pipelines.PipelineSelector; +import org.apache.hadoop.hdds.protocol.DatanodeDetails; +import org.apache.hadoop.hdds.protocol.proto.HddsProtos.LifeCycleState; +import org.apache.hadoop.hdds.protocol.proto.HddsProtos.NodeState; +import org.apache.hadoop.hdds.protocol.proto.HddsProtos.ReplicationFactor; +import org.apache.hadoop.hdds.protocol.proto.HddsProtos.ReplicationType; import org.slf4j.Logger; import org.slf4j.LoggerFactory; diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/pipelines/ratis/package-info.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/pipelines/ratis/package-info.java new file mode 100644 index 0000000000..2970fb354b --- /dev/null +++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/pipelines/ratis/package-info.java @@ -0,0 +1,18 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hdds.scm.pipelines.ratis;
\ No newline at end of file diff --git a/hadoop-hdsl/server-scm/src/main/java/org/apache/hadoop/ozone/scm/pipelines/standalone/StandaloneManagerImpl.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/pipelines/standalone/StandaloneManagerImpl.java index 023baea815..8268329351 100644 --- a/hadoop-hdsl/server-scm/src/main/java/org/apache/hadoop/ozone/scm/pipelines/standalone/StandaloneManagerImpl.java +++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/pipelines/standalone/StandaloneManagerImpl.java @@ -14,28 +14,29 @@ * License for the specific language governing permissions and limitations under * the License. */ -package org.apache.hadoop.ozone.scm.pipelines.standalone; +package org.apache.hadoop.hdds.scm.pipelines.standalone; import com.google.common.base.Preconditions; -import org.apache.hadoop.hdsl.protocol.DatanodeDetails; -import org.apache.hadoop.hdsl.protocol.proto.HdslProtos.ReplicationFactor; -import org.apache.hadoop.hdsl.protocol.proto.HdslProtos.ReplicationType; -import org.apache.hadoop.hdsl.protocol.proto.HdslProtos.LifeCycleState; -import org.apache.hadoop.hdsl.protocol.proto.HdslProtos.NodeState; -import org.apache.hadoop.ozone.scm.container.placement.algorithms.ContainerPlacementPolicy; -import org.apache.hadoop.ozone.scm.node.NodeManager; -import org.apache.hadoop.ozone.scm.pipelines.PipelineManager; -import org.apache.hadoop.ozone.scm.pipelines.PipelineSelector; -import org.apache.hadoop.scm.container.common.helpers.PipelineChannel; +import org.apache.hadoop.hdds.scm.container.common.helpers.PipelineChannel; +import org.apache.hadoop.hdds.scm.container.placement.algorithms + .ContainerPlacementPolicy; +import org.apache.hadoop.hdds.scm.node.NodeManager; +import org.apache.hadoop.hdds.scm.pipelines.PipelineManager; +import org.apache.hadoop.hdds.scm.pipelines.PipelineSelector; +import org.apache.hadoop.hdds.protocol.DatanodeDetails; +import org.apache.hadoop.hdds.protocol.proto.HddsProtos.LifeCycleState; +import org.apache.hadoop.hdds.protocol.proto.HddsProtos.NodeState; +import org.apache.hadoop.hdds.protocol.proto.HddsProtos.ReplicationFactor; +import org.apache.hadoop.hdds.protocol.proto.HddsProtos.ReplicationType; import org.slf4j.Logger; import org.slf4j.LoggerFactory; import java.io.IOException; -import java.util.List; -import java.util.UUID; -import java.util.Set; import java.util.HashSet; import java.util.LinkedList; +import java.util.List; +import java.util.Set; +import java.util.UUID; /** * Standalone Manager Impl to prove that pluggable interface diff --git a/hadoop-hdsl/server-scm/src/main/java/org/apache/hadoop/ozone/scm/pipelines/ratis/package-info.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/pipelines/standalone/package-info.java index 6fe9b2821b..b2c3ca40e5 100644 --- a/hadoop-hdsl/server-scm/src/main/java/org/apache/hadoop/ozone/scm/pipelines/ratis/package-info.java +++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/pipelines/standalone/package-info.java @@ -15,4 +15,4 @@ * See the License for the specific language governing permissions and * limitations under the License. */ -package org.apache.hadoop.ozone.scm.pipelines.ratis;
\ No newline at end of file +package org.apache.hadoop.hdds.scm.pipelines.standalone;
\ No newline at end of file diff --git a/hadoop-hdsl/server-scm/src/main/java/org/apache/hadoop/ozone/scm/ratis/package-info.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/ratis/package-info.java index 27fd32b851..4944017593 100644 --- a/hadoop-hdsl/server-scm/src/main/java/org/apache/hadoop/ozone/scm/ratis/package-info.java +++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/ratis/package-info.java @@ -15,7 +15,7 @@ * See the License for the specific language governing permissions and * limitations under the License. */ -package org.apache.hadoop.ozone.scm.ratis; +package org.apache.hadoop.hdds.scm.ratis; /** * This package contains classes related to Apache Ratis for SCM. diff --git a/hadoop-hdsl/server-scm/src/main/webapps/scm/index.html b/hadoop-hdds/server-scm/src/main/webapps/scm/index.html index 3407f51be7..3407f51be7 100644 --- a/hadoop-hdsl/server-scm/src/main/webapps/scm/index.html +++ b/hadoop-hdds/server-scm/src/main/webapps/scm/index.html diff --git a/hadoop-hdsl/server-scm/src/main/webapps/scm/main.html b/hadoop-hdds/server-scm/src/main/webapps/scm/main.html index 2666f81b5f..2666f81b5f 100644 --- a/hadoop-hdsl/server-scm/src/main/webapps/scm/main.html +++ b/hadoop-hdds/server-scm/src/main/webapps/scm/main.html diff --git a/hadoop-hdsl/server-scm/src/main/webapps/scm/scm-overview.html b/hadoop-hdds/server-scm/src/main/webapps/scm/scm-overview.html index fca23baa04..fca23baa04 100644 --- a/hadoop-hdsl/server-scm/src/main/webapps/scm/scm-overview.html +++ b/hadoop-hdds/server-scm/src/main/webapps/scm/scm-overview.html diff --git a/hadoop-hdsl/server-scm/src/main/webapps/scm/scm.js b/hadoop-hdds/server-scm/src/main/webapps/scm/scm.js index bcfa8b7b7b..bcfa8b7b7b 100644 --- a/hadoop-hdsl/server-scm/src/main/webapps/scm/scm.js +++ b/hadoop-hdds/server-scm/src/main/webapps/scm/scm.js diff --git a/hadoop-hdsl/server-scm/src/test/java/org/apache/hadoop/ozone/scm/HdslServerUtilTest.java b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/HddsServerUtilTest.java index 5be5ec1364..6e01e5354b 100644 --- a/hadoop-hdsl/server-scm/src/test/java/org/apache/hadoop/ozone/scm/HdslServerUtilTest.java +++ b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/HddsServerUtilTest.java @@ -17,37 +17,30 @@ */ -package org.apache.hadoop.ozone.scm; +package org.apache.hadoop.hdds.scm; + +import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.hdds.conf.OzoneConfiguration; +import org.junit.Rule; +import org.junit.Test; +import org.junit.rules.ExpectedException; +import org.junit.rules.Timeout; import java.net.InetSocketAddress; import java.util.Collection; import java.util.HashMap; import java.util.Iterator; -import org.apache.hadoop.conf.Configuration; -import org.apache.hadoop.hdsl.conf.OzoneConfiguration; -import org.apache.hadoop.scm.ScmConfigKeys; - -import static org.apache.hadoop.hdsl.HdslUtils.getSCMAddresses; -import static org.apache.hadoop.ozone.scm.HdslServerUtil - .getScmAddressForDataNodes; -import static org.apache.hadoop.ozone.scm.HdslServerUtil - .getScmClientBindAddress; -import static org.apache.hadoop.ozone.scm.HdslServerUtil - .getScmDataNodeBindAddress; +import static org.apache.hadoop.hdds.HddsUtils.getSCMAddresses; import static org.hamcrest.core.Is.is; import static org.junit.Assert.assertThat; import static org.junit.Assert.assertTrue; import static org.junit.Assert.fail; -import org.junit.Rule; -import org.junit.Test; -import org.junit.rules.ExpectedException; -import org.junit.rules.Timeout; /** - * Test the HDSL server side utilities. + * Test the HDDS server side utilities. */ -public class HdslServerUtilTest { +public class HddsServerUtilTest { @Rule public Timeout timeout = new Timeout(300000); @@ -63,7 +56,7 @@ public class HdslServerUtilTest { public void testMissingScmDataNodeAddress() { final Configuration conf = new OzoneConfiguration(); thrown.expect(IllegalArgumentException.class); - getScmAddressForDataNodes(conf); + HddsServerUtil.getScmAddressForDataNodes(conf); } /** @@ -78,7 +71,7 @@ public class HdslServerUtilTest { // First try a client address with just a host name. Verify it falls // back to the default port. conf.set(ScmConfigKeys.OZONE_SCM_CLIENT_ADDRESS_KEY, "1.2.3.4"); - InetSocketAddress addr = getScmAddressForDataNodes(conf); + InetSocketAddress addr = HddsServerUtil.getScmAddressForDataNodes(conf); assertThat(addr.getHostString(), is("1.2.3.4")); assertThat(addr.getPort(), is( ScmConfigKeys.OZONE_SCM_DATANODE_PORT_DEFAULT)); @@ -86,7 +79,7 @@ public class HdslServerUtilTest { // Next try a client address with just a host name and port. // Verify the port is ignored and the default DataNode port is used. conf.set(ScmConfigKeys.OZONE_SCM_CLIENT_ADDRESS_KEY, "1.2.3.4:100"); - addr = getScmAddressForDataNodes(conf); + addr = HddsServerUtil.getScmAddressForDataNodes(conf); assertThat(addr.getHostString(), is("1.2.3.4")); assertThat(addr.getPort(), is( ScmConfigKeys.OZONE_SCM_DATANODE_PORT_DEFAULT)); @@ -98,7 +91,7 @@ public class HdslServerUtilTest { conf.set(ScmConfigKeys.OZONE_SCM_CLIENT_ADDRESS_KEY, "1.2.3.4:100"); conf.set(ScmConfigKeys.OZONE_SCM_DATANODE_ADDRESS_KEY, "5.6.7.8"); addr = - getScmAddressForDataNodes(conf); + HddsServerUtil.getScmAddressForDataNodes(conf); assertThat(addr.getHostString(), is("5.6.7.8")); assertThat(addr.getPort(), is( ScmConfigKeys.OZONE_SCM_DATANODE_PORT_DEFAULT)); @@ -109,7 +102,7 @@ public class HdslServerUtilTest { // used. conf.set(ScmConfigKeys.OZONE_SCM_CLIENT_ADDRESS_KEY, "1.2.3.4:100"); conf.set(ScmConfigKeys.OZONE_SCM_DATANODE_ADDRESS_KEY, "5.6.7.8:200"); - addr = getScmAddressForDataNodes(conf); + addr = HddsServerUtil.getScmAddressForDataNodes(conf); assertThat(addr.getHostString(), is("5.6.7.8")); assertThat(addr.getPort(), is(200)); } @@ -126,7 +119,7 @@ public class HdslServerUtilTest { // The bind host should be 0.0.0.0 unless OZONE_SCM_CLIENT_BIND_HOST_KEY // is set differently. conf.set(ScmConfigKeys.OZONE_SCM_CLIENT_ADDRESS_KEY, "1.2.3.4"); - InetSocketAddress addr = getScmClientBindAddress(conf); + InetSocketAddress addr = HddsServerUtil.getScmClientBindAddress(conf); assertThat(addr.getHostString(), is("0.0.0.0")); assertThat(addr.getPort(), is(ScmConfigKeys.OZONE_SCM_CLIENT_PORT_DEFAULT)); @@ -135,7 +128,7 @@ public class HdslServerUtilTest { // should be respected. conf.set(ScmConfigKeys.OZONE_SCM_CLIENT_ADDRESS_KEY, "1.2.3.4:100"); conf.set(ScmConfigKeys.OZONE_SCM_DATANODE_ADDRESS_KEY, "1.2.3.4:200"); - addr = getScmClientBindAddress(conf); + addr = HddsServerUtil.getScmClientBindAddress(conf); assertThat(addr.getHostString(), is("0.0.0.0")); assertThat(addr.getPort(), is(100)); @@ -145,7 +138,7 @@ public class HdslServerUtilTest { conf.set(ScmConfigKeys.OZONE_SCM_CLIENT_ADDRESS_KEY, "1.2.3.4"); conf.set(ScmConfigKeys.OZONE_SCM_DATANODE_ADDRESS_KEY, "1.2.3.4"); conf.set(ScmConfigKeys.OZONE_SCM_CLIENT_BIND_HOST_KEY, "5.6.7.8"); - addr = getScmClientBindAddress(conf); + addr = HddsServerUtil.getScmClientBindAddress(conf); assertThat(addr.getHostString(), is("5.6.7.8")); assertThat(addr.getPort(), is( ScmConfigKeys.OZONE_SCM_CLIENT_PORT_DEFAULT)); @@ -156,7 +149,7 @@ public class HdslServerUtilTest { conf.set(ScmConfigKeys.OZONE_SCM_CLIENT_ADDRESS_KEY, "1.2.3.4:100"); conf.set(ScmConfigKeys.OZONE_SCM_DATANODE_ADDRESS_KEY, "1.2.3.4:200"); conf.set(ScmConfigKeys.OZONE_SCM_CLIENT_BIND_HOST_KEY, "5.6.7.8"); - addr = getScmClientBindAddress(conf); + addr = HddsServerUtil.getScmClientBindAddress(conf); assertThat(addr.getHostString(), is("5.6.7.8")); assertThat(addr.getPort(), is(100)); } @@ -172,7 +165,7 @@ public class HdslServerUtilTest { // The bind host should be 0.0.0.0 unless OZONE_SCM_DATANODE_BIND_HOST_KEY // is set differently. conf.set(ScmConfigKeys.OZONE_SCM_CLIENT_ADDRESS_KEY, "1.2.3.4"); - InetSocketAddress addr = getScmDataNodeBindAddress(conf); + InetSocketAddress addr = HddsServerUtil.getScmDataNodeBindAddress(conf); assertThat(addr.getHostString(), is("0.0.0.0")); assertThat(addr.getPort(), is( ScmConfigKeys.OZONE_SCM_DATANODE_PORT_DEFAULT)); @@ -182,7 +175,7 @@ public class HdslServerUtilTest { // should be respected. conf.set(ScmConfigKeys.OZONE_SCM_CLIENT_ADDRESS_KEY, "1.2.3.4:100"); conf.set(ScmConfigKeys.OZONE_SCM_DATANODE_ADDRESS_KEY, "1.2.3.4:200"); - addr = getScmDataNodeBindAddress(conf); + addr = HddsServerUtil.getScmDataNodeBindAddress(conf); assertThat(addr.getHostString(), is("0.0.0.0")); assertThat(addr.getPort(), is(200)); @@ -192,7 +185,7 @@ public class HdslServerUtilTest { conf.set(ScmConfigKeys.OZONE_SCM_CLIENT_ADDRESS_KEY, "1.2.3.4:100"); conf.set(ScmConfigKeys.OZONE_SCM_DATANODE_ADDRESS_KEY, "1.2.3.4"); conf.set(ScmConfigKeys.OZONE_SCM_DATANODE_BIND_HOST_KEY, "5.6.7.8"); - addr = getScmDataNodeBindAddress(conf); + addr = HddsServerUtil.getScmDataNodeBindAddress(conf); assertThat(addr.getHostString(), is("5.6.7.8")); assertThat(addr.getPort(), is( ScmConfigKeys.OZONE_SCM_DATANODE_PORT_DEFAULT)); @@ -203,7 +196,7 @@ public class HdslServerUtilTest { conf.set(ScmConfigKeys.OZONE_SCM_CLIENT_ADDRESS_KEY, "1.2.3.4:100"); conf.set(ScmConfigKeys.OZONE_SCM_DATANODE_ADDRESS_KEY, "1.2.3.4:200"); conf.set(ScmConfigKeys.OZONE_SCM_DATANODE_BIND_HOST_KEY, "5.6.7.8"); - addr = getScmDataNodeBindAddress(conf); + addr = HddsServerUtil.getScmDataNodeBindAddress(conf); assertThat(addr.getHostString(), is("5.6.7.8")); assertThat(addr.getPort(), is(200)); } diff --git a/hadoop-hdsl/server-scm/src/test/java/org/apache/hadoop/ozone/scm/TestStorageContainerManagerHttpServer.java b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/TestStorageContainerManagerHttpServer.java index aeb57055a8..5d9139d72c 100644 --- a/hadoop-hdsl/server-scm/src/test/java/org/apache/hadoop/ozone/scm/TestStorageContainerManagerHttpServer.java +++ b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/TestStorageContainerManagerHttpServer.java @@ -16,7 +16,7 @@ * limitations under the License. */ -package org.apache.hadoop.ozone.scm; +package org.apache.hadoop.hdds.scm; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.FileUtil; @@ -25,7 +25,6 @@ import org.apache.hadoop.hdfs.web.URLConnectionFactory; import org.apache.hadoop.http.HttpConfig; import org.apache.hadoop.http.HttpConfig.Policy; import org.apache.hadoop.net.NetUtils; -import org.apache.hadoop.scm.ScmConfigKeys; import org.apache.hadoop.security.ssl.KeyStoreTestUtil; import org.apache.hadoop.test.GenericTestUtils; import org.junit.AfterClass; diff --git a/hadoop-hdsl/server-scm/src/test/java/org/apache/hadoop/ozone/scm/TestUtils.java b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/TestUtils.java index 3385fd6e4c..e191dd54a1 100644 --- a/hadoop-hdsl/server-scm/src/test/java/org/apache/hadoop/ozone/scm/TestUtils.java +++ b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/TestUtils.java @@ -14,16 +14,16 @@ * License for the specific language governing permissions and limitations under * the License. */ -package org.apache.hadoop.ozone.scm; +package org.apache.hadoop.hdds.scm; + +import org.apache.hadoop.hdds.scm.node.SCMNodeManager; +import org.apache.hadoop.hdds.protocol.DatanodeDetails; import java.util.ArrayList; import java.util.List; import java.util.Random; import java.util.UUID; -import org.apache.hadoop.hdsl.protocol.DatanodeDetails; -import org.apache.hadoop.ozone.scm.node.SCMNodeManager; - /** * Stateless helper functions to handler scm/datanode connection. */ diff --git a/hadoop-hdsl/server-scm/src/test/java/org/apache/hadoop/ozone/scm/block/TestBlockManager.java b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/block/TestBlockManager.java index c28f8359c8..0eff702cde 100644 --- a/hadoop-hdsl/server-scm/src/test/java/org/apache/hadoop/ozone/scm/block/TestBlockManager.java +++ b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/block/TestBlockManager.java @@ -15,18 +15,18 @@ * the License. */ -package org.apache.hadoop.ozone.scm.block; +package org.apache.hadoop.hdds.scm.block; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.FileUtil; +import org.apache.hadoop.hdds.scm.ScmConfigKeys; +import org.apache.hadoop.hdds.scm.container.ContainerMapping; +import org.apache.hadoop.hdds.scm.container.MockNodeManager; +import org.apache.hadoop.hdds.scm.container.common.helpers.AllocatedBlock; +import org.apache.hadoop.hdds.scm.container.common.helpers.Pipeline; +import org.apache.hadoop.hdds.protocol.proto.HddsProtos; import org.apache.hadoop.ozone.OzoneConfigKeys; import org.apache.hadoop.ozone.container.common.SCMTestUtils; -import org.apache.hadoop.hdsl.protocol.proto.HdslProtos; -import org.apache.hadoop.ozone.scm.container.ContainerMapping; -import org.apache.hadoop.ozone.scm.container.MockNodeManager; -import org.apache.hadoop.scm.ScmConfigKeys; -import org.apache.hadoop.scm.container.common.helpers.AllocatedBlock; -import org.apache.hadoop.scm.container.common.helpers.Pipeline; import org.apache.hadoop.test.GenericTestUtils; import org.junit.AfterClass; import org.junit.Assert; @@ -55,8 +55,8 @@ public class TestBlockManager { private static BlockManagerImpl blockManager; private static File testDir; private final static long DEFAULT_BLOCK_SIZE = 128 * MB; - private static HdslProtos.ReplicationFactor factor; - private static HdslProtos.ReplicationType type; + private static HddsProtos.ReplicationFactor factor; + private static HddsProtos.ReplicationType type; private static String containerOwner = "OZONE"; @Rule @@ -79,11 +79,11 @@ public class TestBlockManager { blockManager = new BlockManagerImpl(conf, nodeManager, mapping, 128); if(conf.getBoolean(ScmConfigKeys.DFS_CONTAINER_RATIS_ENABLED_KEY, ScmConfigKeys.DFS_CONTAINER_RATIS_ENABLED_DEFAULT)){ - factor = HdslProtos.ReplicationFactor.THREE; - type = HdslProtos.ReplicationType.RATIS; + factor = HddsProtos.ReplicationFactor.THREE; + type = HddsProtos.ReplicationType.RATIS; } else { - factor = HdslProtos.ReplicationFactor.ONE; - type = HdslProtos.ReplicationType.STAND_ALONE; + factor = HddsProtos.ReplicationFactor.ONE; + type = HddsProtos.ReplicationType.STAND_ALONE; } } diff --git a/hadoop-hdsl/server-scm/src/test/java/org/apache/hadoop/ozone/scm/block/TestDeletedBlockLog.java b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/block/TestDeletedBlockLog.java index 6e883391b1..e820fa4427 100644 --- a/hadoop-hdsl/server-scm/src/test/java/org/apache/hadoop/ozone/scm/block/TestDeletedBlockLog.java +++ b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/block/TestDeletedBlockLog.java @@ -15,22 +15,22 @@ * See the License for the specific language governing permissions and * limitations under the License. */ -package org.apache.hadoop.ozone.scm.block; +package org.apache.hadoop.hdds.scm.block; import org.apache.commons.io.FileUtils; +import org.apache.hadoop.hdds.scm.container.ContainerMapping; +import org.apache.hadoop.hdds.scm.container.Mapping; +import org.apache.hadoop.hdds.scm.container.common.helpers.ContainerInfo; +import org.apache.hadoop.hdds.scm.container.common.helpers.Pipeline; +import org.apache.hadoop.hdds.scm.container.common.helpers.PipelineChannel; import org.apache.hadoop.hdfs.DFSUtil; -import org.apache.hadoop.hdsl.conf.OzoneConfiguration; -import org.apache.hadoop.hdsl.protocol.DatanodeDetails; -import org.apache.hadoop.hdsl.protocol.proto.HdslProtos.LifeCycleState; -import org.apache.hadoop.hdsl.protocol.proto.HdslProtos.ReplicationType; -import org.apache.hadoop.hdsl.protocol.proto.HdslProtos.ReplicationFactor; -import org.apache.hadoop.hdsl.protocol.proto +import org.apache.hadoop.hdds.conf.OzoneConfiguration; +import org.apache.hadoop.hdds.protocol.DatanodeDetails; +import org.apache.hadoop.hdds.protocol.proto.HddsProtos.LifeCycleState; +import org.apache.hadoop.hdds.protocol.proto.HddsProtos.ReplicationFactor; +import org.apache.hadoop.hdds.protocol.proto.HddsProtos.ReplicationType; +import org.apache.hadoop.hdds.protocol.proto .StorageContainerDatanodeProtocolProtos.DeletedBlocksTransaction; -import org.apache.hadoop.ozone.scm.container.ContainerMapping; -import org.apache.hadoop.ozone.scm.container.Mapping; -import org.apache.hadoop.scm.container.common.helpers.PipelineChannel; -import org.apache.hadoop.scm.container.common.helpers.ContainerInfo; -import org.apache.hadoop.scm.container.common.helpers.Pipeline; import org.apache.hadoop.test.GenericTestUtils; import org.apache.hadoop.utils.MetadataKeyFilters; import org.apache.hadoop.utils.MetadataStore; @@ -52,8 +52,9 @@ import java.util.Random; import java.util.UUID; import java.util.stream.Collectors; +import static org.apache.hadoop.hdds.scm.ScmConfigKeys + .OZONE_SCM_BLOCK_DELETION_MAX_RETRY; import static org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_METADATA_DIRS; -import static org.apache.hadoop.scm.ScmConfigKeys.OZONE_SCM_BLOCK_DELETION_MAX_RETRY; import static org.mockito.Mockito.mock; /** diff --git a/hadoop-hdsl/server-scm/src/test/java/org/apache/hadoop/ozone/scm/container/MockNodeManager.java b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/MockNodeManager.java index 587e60e8e9..e3473b3634 100644 --- a/hadoop-hdsl/server-scm/src/test/java/org/apache/hadoop/ozone/scm/container/MockNodeManager.java +++ b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/MockNodeManager.java @@ -14,31 +14,28 @@ * License for the specific language governing permissions and limitations under * the License. */ -package org.apache.hadoop.ozone.scm.container; +package org.apache.hadoop.hdds.scm.container; +import org.apache.hadoop.hdds.scm.container.placement.metrics.SCMNodeMetric; +import org.apache.hadoop.hdds.scm.container.placement.metrics.SCMNodeStat; +import org.apache.hadoop.hdds.scm.node.NodeManager; +import org.apache.hadoop.hdds.scm.node.NodePoolManager; import org.apache.hadoop.hdfs.protocol.UnregisteredNodeException; -import org.apache.hadoop.hdsl.protocol.DatanodeDetails; -import org.apache.hadoop.ozone.OzoneConsts; -import org.apache.hadoop.ozone.protocol.VersionResponse; -import org.apache.hadoop.ozone.protocol.commands.SCMCommand; -import org.apache.hadoop.hdsl.protocol.proto.HdslProtos; -import org.apache.hadoop.hdsl.protocol.proto +import org.apache.hadoop.hdds.protocol.DatanodeDetails; +import org.apache.hadoop.hdds.protocol.proto.HddsProtos; +import org.apache.hadoop.hdds.protocol.proto .StorageContainerDatanodeProtocolProtos.ReportState; -import org.apache.hadoop.hdsl.protocol.proto +import org.apache.hadoop.hdds.protocol.proto + .StorageContainerDatanodeProtocolProtos.SCMNodeReport; +import org.apache.hadoop.hdds.protocol.proto .StorageContainerDatanodeProtocolProtos.SCMStorageReport; -import org.apache.hadoop.hdsl.protocol.proto +import org.apache.hadoop.hdds.protocol.proto .StorageContainerDatanodeProtocolProtos.SCMVersionRequestProto; - -import org.apache.hadoop.hdsl.protocol.proto - .StorageContainerDatanodeProtocolProtos.SCMNodeReport; -import org.apache.hadoop.ozone.scm.container.placement.metrics.SCMNodeMetric; -import org.apache.hadoop.ozone.scm.container.placement.metrics.SCMNodeStat; -import org.apache.hadoop.ozone.scm.node.NodeManager; -import org.apache.hadoop.ozone.scm.node.NodePoolManager; - -import static org.apache.hadoop.ozone.scm.TestUtils.getDatanodeDetails; -import org.mockito.Mockito; +import org.apache.hadoop.ozone.OzoneConsts; +import org.apache.hadoop.ozone.protocol.VersionResponse; +import org.apache.hadoop.ozone.protocol.commands.SCMCommand; import org.assertj.core.util.Preconditions; +import org.mockito.Mockito; import java.io.IOException; import java.util.HashMap; @@ -47,11 +44,11 @@ import java.util.List; import java.util.Map; import java.util.UUID; -import static org.apache.hadoop.hdsl.protocol.proto.HdslProtos.NodeState.DEAD; -import static org.apache.hadoop.hdsl.protocol.proto.HdslProtos.NodeState +import static org.apache.hadoop.hdds.scm.TestUtils.getDatanodeDetails; +import static org.apache.hadoop.hdds.protocol.proto.HddsProtos.NodeState.DEAD; +import static org.apache.hadoop.hdds.protocol.proto.HddsProtos.NodeState .HEALTHY; -import static org.apache.hadoop.hdsl.protocol.proto.HdslProtos.NodeState - .STALE; +import static org.apache.hadoop.hdds.protocol.proto.HddsProtos.NodeState.STALE; /** * Test Helper for testing container Mapping. @@ -150,7 +147,7 @@ public class MockNodeManager implements NodeManager { * @return List of Datanodes that are Heartbeating SCM. */ @Override - public List<DatanodeDetails> getNodes(HdslProtos.NodeState nodestate) { + public List<DatanodeDetails> getNodes(HddsProtos.NodeState nodestate) { if (nodestate == HEALTHY) { return healthyNodes; } @@ -173,7 +170,7 @@ public class MockNodeManager implements NodeManager { * @return int -- count */ @Override - public int getNodeCount(HdslProtos.NodeState nodestate) { + public int getNodeCount(HddsProtos.NodeState nodestate) { List<DatanodeDetails> nodes = getNodes(nodestate); if (nodes != null) { return nodes.size(); @@ -299,7 +296,7 @@ public class MockNodeManager implements NodeManager { * @return Healthy/Stale/Dead. */ @Override - public HdslProtos.NodeState getNodeState(DatanodeDetails dd) { + public HddsProtos.NodeState getNodeState(DatanodeDetails dd) { return null; } @@ -379,7 +376,7 @@ public class MockNodeManager implements NodeManager { * @return SCMHeartbeatResponseProto */ @Override - public SCMCommand register(HdslProtos.DatanodeDetailsProto datanodeDetails) { + public SCMCommand register(HddsProtos.DatanodeDetailsProto datanodeDetails) { return null; } @@ -393,7 +390,7 @@ public class MockNodeManager implements NodeManager { */ @Override public List<SCMCommand> sendHeartbeat( - HdslProtos.DatanodeDetailsProto datanodeDetails, + HddsProtos.DatanodeDetailsProto datanodeDetails, SCMNodeReport nodeReport, ReportState containerReportState) { if ((datanodeDetails != null) && (nodeReport != null) && (nodeReport .getStorageReportCount() > 0)) { @@ -421,7 +418,7 @@ public class MockNodeManager implements NodeManager { @Override public Map<String, Integer> getNodeCount() { Map<String, Integer> nodeCountMap = new HashMap<String, Integer>(); - for (HdslProtos.NodeState state : HdslProtos.NodeState.values()) { + for (HddsProtos.NodeState state : HddsProtos.NodeState.values()) { nodeCountMap.put(state.toString(), getNodeCount(state)); } return nodeCountMap; diff --git a/hadoop-hdsl/server-scm/src/test/java/org/apache/hadoop/ozone/scm/container/TestContainerMapping.java b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/TestContainerMapping.java index cec02de218..200a611d0f 100644 --- a/hadoop-hdsl/server-scm/src/test/java/org/apache/hadoop/ozone/scm/container/TestContainerMapping.java +++ b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/TestContainerMapping.java @@ -14,26 +14,24 @@ * License for the specific language governing permissions and limitations under * the License. */ -package org.apache.hadoop.ozone.scm.container; +package org.apache.hadoop.hdds.scm.container; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.FileUtil; -import org.apache.hadoop.hdsl.protocol.DatanodeDetails; -import org.apache.hadoop.ozone.OzoneConfigKeys; -import org.apache.hadoop.hdsl.protocol.proto.HdslProtos; -import org.apache.hadoop.hdsl.protocol.proto +import org.apache.hadoop.hdds.scm.ScmConfigKeys; +import org.apache.hadoop.hdds.scm.TestUtils; +import org.apache.hadoop.hdds.scm.XceiverClientManager; +import org.apache.hadoop.hdds.scm.container.common.helpers.ContainerInfo; +import org.apache.hadoop.hdds.scm.container.common.helpers.Pipeline; +import org.apache.hadoop.hdds.protocol.DatanodeDetails; +import org.apache.hadoop.hdds.protocol.proto.HddsProtos; +import org.apache.hadoop.hdds.protocol.proto .StorageContainerDatanodeProtocolProtos; -import org.apache.hadoop.hdsl.protocol.proto +import org.apache.hadoop.hdds.protocol.proto .StorageContainerDatanodeProtocolProtos.ContainerReportsRequestProto; +import org.apache.hadoop.ozone.OzoneConfigKeys; import org.apache.hadoop.ozone.container.common.SCMTestUtils; -import org.apache.hadoop.ozone.scm.TestUtils; -import org.apache.hadoop.ozone.scm.container.ContainerStates.ContainerID; -import org.apache.hadoop.scm.ScmConfigKeys; -import org.apache.hadoop.scm.XceiverClientManager; -import org.apache.hadoop.scm.container.common.helpers.ContainerInfo; -import org.apache.hadoop.scm.container.common.helpers.Pipeline; import org.apache.hadoop.test.GenericTestUtils; - import org.junit.AfterClass; import org.junit.Assert; import org.junit.Before; @@ -187,7 +185,7 @@ public class TestContainerMapping { containerName, containerOwner); mapping.updateContainerState(containerInfo.getContainerName(), - HdslProtos.LifeCycleEvent.CREATE); + HddsProtos.LifeCycleEvent.CREATE); Thread.sleep(TIMEOUT + 1000); NavigableSet<ContainerID> deleteContainers = mapping.getStateManager() @@ -195,13 +193,13 @@ public class TestContainerMapping { "OZONE", xceiverClientManager.getType(), xceiverClientManager.getFactor(), - HdslProtos.LifeCycleState.DELETING); + HddsProtos.LifeCycleState.DELETING); Assert.assertTrue(deleteContainers.contains(containerInfo.containerID())); thrown.expect(IOException.class); thrown.expectMessage("Lease Exception"); mapping.updateContainerState(containerInfo.getContainerName(), - HdslProtos.LifeCycleEvent.CREATED); + HddsProtos.LifeCycleEvent.CREATED); } @Test @@ -282,7 +280,7 @@ public class TestContainerMapping { containerOwner, xceiverClientManager.getType(), xceiverClientManager.getFactor(), - HdslProtos.LifeCycleState.CLOSING); + HddsProtos.LifeCycleState.CLOSING); Assert.assertTrue( pendingCloseContainers.contains(updatedContainer.containerID())); } @@ -292,22 +290,22 @@ public class TestContainerMapping { String containerName = UUID.randomUUID().toString(); ContainerInfo info = createContainer(containerName); mapping.updateContainerState(containerName, - HdslProtos.LifeCycleEvent.FINALIZE); + HddsProtos.LifeCycleEvent.FINALIZE); NavigableSet<ContainerID> pendingCloseContainers = mapping.getStateManager() .getMatchingContainerIDs( containerOwner, xceiverClientManager.getType(), xceiverClientManager.getFactor(), - HdslProtos.LifeCycleState.CLOSING); + HddsProtos.LifeCycleState.CLOSING); Assert.assertTrue(pendingCloseContainers.contains(info.containerID())); mapping.updateContainerState(containerName, - HdslProtos.LifeCycleEvent.CLOSE); + HddsProtos.LifeCycleEvent.CLOSE); NavigableSet<ContainerID> closeContainers = mapping.getStateManager() .getMatchingContainerIDs( containerOwner, xceiverClientManager.getType(), xceiverClientManager.getFactor(), - HdslProtos.LifeCycleState.CLOSED); + HddsProtos.LifeCycleState.CLOSED); Assert.assertTrue(closeContainers.contains(info.containerID())); } @@ -326,9 +324,9 @@ public class TestContainerMapping { containerName, containerOwner); mapping.updateContainerState(containerInfo.getContainerName(), - HdslProtos.LifeCycleEvent.CREATE); + HddsProtos.LifeCycleEvent.CREATE); mapping.updateContainerState(containerInfo.getContainerName(), - HdslProtos.LifeCycleEvent.CREATED); + HddsProtos.LifeCycleEvent.CREATED); return containerInfo; } diff --git a/hadoop-hdsl/server-scm/src/test/java/org/apache/hadoop/ozone/scm/container/closer/TestContainerCloser.java b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/closer/TestContainerCloser.java index 4a797b2783..2fec232421 100644 --- a/hadoop-hdsl/server-scm/src/test/java/org/apache/hadoop/ozone/scm/container/closer/TestContainerCloser.java +++ b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/closer/TestContainerCloser.java @@ -16,22 +16,24 @@ * */ -package org.apache.hadoop.ozone.scm.container.closer; +package org.apache.hadoop.hdds.scm.container.closer; import org.apache.commons.lang.RandomStringUtils; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.FileUtil; -import org.apache.hadoop.hdsl.protocol.DatanodeDetails; +import org.apache.hadoop.hdds.scm.TestUtils; +import org.apache.hadoop.hdds.scm.container.ContainerMapping; +import org.apache.hadoop.hdds.scm.container.MockNodeManager; +import org.apache.hadoop.hdds.scm.container.TestContainerMapping; +import org.apache.hadoop.hdds.scm.container.common.helpers.ContainerInfo; +import org.apache.hadoop.hdds.protocol.DatanodeDetails; +import org.apache.hadoop.hdds.protocol.proto.HddsProtos; +import org.apache.hadoop.hdds.protocol.proto + .StorageContainerDatanodeProtocolProtos; +import org.apache.hadoop.hdds.protocol.proto + .StorageContainerDatanodeProtocolProtos.ContainerReportsRequestProto; import org.apache.hadoop.ozone.OzoneConfigKeys; import org.apache.hadoop.ozone.container.common.SCMTestUtils; -import org.apache.hadoop.hdsl.protocol.proto.HdslProtos; -import org.apache.hadoop.hdsl.protocol.proto.StorageContainerDatanodeProtocolProtos; -import org.apache.hadoop.hdsl.protocol.proto.StorageContainerDatanodeProtocolProtos.ContainerReportsRequestProto; -import org.apache.hadoop.ozone.scm.TestUtils; -import org.apache.hadoop.ozone.scm.container.ContainerMapping; -import org.apache.hadoop.ozone.scm.container.MockNodeManager; -import org.apache.hadoop.ozone.scm.container.TestContainerMapping; -import org.apache.hadoop.scm.container.common.helpers.ContainerInfo; import org.apache.hadoop.test.GenericTestUtils; import org.junit.AfterClass; import org.junit.Assert; @@ -42,11 +44,16 @@ import java.io.File; import java.io.IOException; import java.util.concurrent.TimeUnit; -import static org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_CONTAINER_REPORT_INTERVAL; -import static org.apache.hadoop.hdsl.protocol.proto.HdslProtos.LifeCycleEvent.CREATE; -import static org.apache.hadoop.hdsl.protocol.proto.HdslProtos.LifeCycleEvent.CREATED; -import static org.apache.hadoop.scm.ScmConfigKeys.OZONE_SCM_CONTAINER_SIZE_DEFAULT; -import static org.apache.hadoop.scm.ScmConfigKeys.OZONE_SCM_CONTAINER_SIZE_GB; +import static org.apache.hadoop.hdds.scm.ScmConfigKeys + .OZONE_SCM_CONTAINER_SIZE_DEFAULT; +import static org.apache.hadoop.hdds.scm.ScmConfigKeys + .OZONE_SCM_CONTAINER_SIZE_GB; +import static org.apache.hadoop.hdds.protocol.proto.HddsProtos.LifeCycleEvent + .CREATE; +import static org.apache.hadoop.hdds.protocol.proto.HddsProtos.LifeCycleEvent + .CREATED; +import static org.apache.hadoop.ozone.OzoneConfigKeys + .OZONE_CONTAINER_REPORT_INTERVAL; /** * Test class for Closing Container. @@ -88,8 +95,8 @@ public class TestContainerCloser { String containerName = "container-" + RandomStringUtils.randomNumeric(5); ContainerInfo info = mapping.allocateContainer( - HdslProtos.ReplicationType.STAND_ALONE, - HdslProtos.ReplicationFactor.ONE, containerName, "ozone"); + HddsProtos.ReplicationType.STAND_ALONE, + HddsProtos.ReplicationFactor.ONE, containerName, "ozone"); //Execute these state transitions so that we can close the container. mapping.updateContainerState(containerName, CREATE); @@ -136,8 +143,8 @@ public class TestContainerCloser { String containerName = "container-" + RandomStringUtils.randomNumeric(5); ContainerInfo info = mapping.allocateContainer( - HdslProtos.ReplicationType.STAND_ALONE, - HdslProtos.ReplicationFactor.ONE, containerName, "ozone"); + HddsProtos.ReplicationType.STAND_ALONE, + HddsProtos.ReplicationFactor.ONE, containerName, "ozone"); //Execute these state transitions so that we can close the container. mapping.updateContainerState(containerName, CREATE); @@ -182,8 +189,8 @@ public class TestContainerCloser { for (int x = 0; x < ContainerCloser.getCleanupWaterMark() + 10; x++) { String containerName = "container-" + RandomStringUtils.randomNumeric(7); ContainerInfo info = mapping.allocateContainer( - HdslProtos.ReplicationType.STAND_ALONE, - HdslProtos.ReplicationFactor.ONE, containerName, "ozone"); + HddsProtos.ReplicationType.STAND_ALONE, + HddsProtos.ReplicationFactor.ONE, containerName, "ozone"); mapping.updateContainerState(containerName, CREATE); mapping.updateContainerState(containerName, CREATED); sendContainerReport(info, 5 * GIGABYTE); diff --git a/hadoop-hdsl/server-scm/src/test/java/org/apache/hadoop/ozone/scm/container/ContainerStates/TestContainerAttribute.java b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/states/TestContainerAttribute.java index 33c8f39a90..63cc9bfd78 100644 --- a/hadoop-hdsl/server-scm/src/test/java/org/apache/hadoop/ozone/scm/container/ContainerStates/TestContainerAttribute.java +++ b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/states/TestContainerAttribute.java @@ -16,9 +16,10 @@ * */ -package org.apache.hadoop.ozone.scm.container.ContainerStates; +package org.apache.hadoop.hdds.scm.container.states; -import org.apache.hadoop.ozone.scm.exceptions.SCMException; +import org.apache.hadoop.hdds.scm.container.ContainerID; +import org.apache.hadoop.hdds.scm.exceptions.SCMException; import org.junit.Assert; import org.junit.Rule; import org.junit.Test; diff --git a/hadoop-hdsl/server-scm/src/test/java/org/apache/hadoop/ozone/scm/node/TestContainerPlacement.java b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/node/TestContainerPlacement.java index b0f47f589a..ad50d971b3 100644 --- a/hadoop-hdsl/server-scm/src/test/java/org/apache/hadoop/ozone/scm/node/TestContainerPlacement.java +++ b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/node/TestContainerPlacement.java @@ -16,28 +16,30 @@ * limitations under the License. */ -package org.apache.hadoop.ozone.scm.node; +package org.apache.hadoop.hdds.scm.node; import org.apache.commons.io.IOUtils; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.FileUtil; -import org.apache.hadoop.hdsl.protocol.DatanodeDetails; -import org.apache.hadoop.ozone.OzoneConfigKeys; -import org.apache.hadoop.hdsl.conf.OzoneConfiguration; -import org.apache.hadoop.ozone.OzoneConsts; -import org.apache.hadoop.hdsl.protocol.proto +import org.apache.hadoop.hdds.scm.ScmConfigKeys; +import org.apache.hadoop.hdds.scm.TestUtils; +import org.apache.hadoop.hdds.scm.XceiverClientManager; +import org.apache.hadoop.hdds.scm.container.ContainerMapping; +import org.apache.hadoop.hdds.scm.container.common.helpers.Pipeline; +import org.apache.hadoop.hdds.scm.container.placement.algorithms + .ContainerPlacementPolicy; +import org.apache.hadoop.hdds.scm.container.placement.algorithms + .SCMContainerPlacementCapacity; +import org.apache.hadoop.hdds.conf.OzoneConfiguration; +import org.apache.hadoop.hdds.protocol.DatanodeDetails; +import org.apache.hadoop.hdds.protocol.proto .StorageContainerDatanodeProtocolProtos.ReportState; -import org.apache.hadoop.hdsl.protocol.proto +import org.apache.hadoop.hdds.protocol.proto .StorageContainerDatanodeProtocolProtos.SCMNodeReport; -import org.apache.hadoop.hdsl.protocol.proto +import org.apache.hadoop.hdds.protocol.proto .StorageContainerDatanodeProtocolProtos.SCMStorageReport; -import org.apache.hadoop.ozone.scm.TestUtils; -import org.apache.hadoop.ozone.scm.container.ContainerMapping; -import org.apache.hadoop.ozone.scm.container.placement.algorithms.ContainerPlacementPolicy; -import org.apache.hadoop.ozone.scm.container.placement.algorithms.SCMContainerPlacementCapacity; -import org.apache.hadoop.scm.ScmConfigKeys; -import org.apache.hadoop.scm.XceiverClientManager; -import org.apache.hadoop.scm.container.common.helpers.Pipeline; +import org.apache.hadoop.ozone.OzoneConfigKeys; +import org.apache.hadoop.ozone.OzoneConsts; import org.apache.hadoop.test.GenericTestUtils; import org.apache.hadoop.test.PathUtils; import org.junit.Rule; @@ -50,10 +52,12 @@ import java.util.List; import java.util.UUID; import java.util.concurrent.TimeoutException; -import static org.apache.hadoop.hdsl.protocol.proto.HdslProtos.NodeState +import static org.apache.hadoop.hdds.scm.ScmConfigKeys + .OZONE_SCM_DB_CACHE_SIZE_DEFAULT; +import static org.apache.hadoop.hdds.scm.ScmConfigKeys + .OZONE_SCM_DB_CACHE_SIZE_MB; +import static org.apache.hadoop.hdds.protocol.proto.HddsProtos.NodeState .HEALTHY; -import static org.apache.hadoop.scm.ScmConfigKeys.OZONE_SCM_DB_CACHE_SIZE_DEFAULT; -import static org.apache.hadoop.scm.ScmConfigKeys.OZONE_SCM_DB_CACHE_SIZE_MB; import static org.junit.Assert.assertEquals; import static org.junit.Assert.assertFalse; import static org.junit.Assert.assertTrue; diff --git a/hadoop-hdsl/server-scm/src/test/java/org/apache/hadoop/ozone/scm/node/TestNodeManager.java b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/node/TestNodeManager.java index 6c821d3141..de6e30c947 100644 --- a/hadoop-hdsl/server-scm/src/test/java/org/apache/hadoop/ozone/scm/node/TestNodeManager.java +++ b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/node/TestNodeManager.java @@ -15,31 +15,29 @@ * See the License for the specific language governing permissions and * limitations under the License. */ -package org.apache.hadoop.ozone.scm.node; +package org.apache.hadoop.hdds.scm.node; import com.google.common.base.Supplier; -import static java.util.concurrent.TimeUnit.*; import org.apache.hadoop.fs.FileUtil; -import org.apache.hadoop.hdsl.protocol.DatanodeDetails; -import org.apache.hadoop.ozone.OzoneConfigKeys; -import org.apache.hadoop.hdsl.conf.OzoneConfiguration; -import org.apache.hadoop.ozone.protocol.commands.SCMCommand; -import org.apache.hadoop.hdsl.protocol.proto.HdslProtos; -import org.apache.hadoop.hdsl.protocol.proto +import org.apache.hadoop.hdds.scm.ScmConfigKeys; +import org.apache.hadoop.hdds.scm.TestUtils; +import org.apache.hadoop.hdds.scm.container.placement.metrics.SCMNodeStat; +import org.apache.hadoop.hdds.conf.OzoneConfiguration; +import org.apache.hadoop.hdds.protocol.DatanodeDetails; +import org.apache.hadoop.hdds.protocol.proto.HddsProtos; +import org.apache.hadoop.hdds.protocol.proto .StorageContainerDatanodeProtocolProtos.ReportState; -import org.apache.hadoop.hdsl.protocol.proto +import org.apache.hadoop.hdds.protocol.proto .StorageContainerDatanodeProtocolProtos.SCMNodeReport; -import org.apache.hadoop.hdsl.protocol.proto +import org.apache.hadoop.hdds.protocol.proto .StorageContainerDatanodeProtocolProtos.SCMStorageReport; -import org.apache.hadoop.ozone.scm.TestUtils; -import org.apache.hadoop.ozone.scm.container.placement.metrics.SCMNodeStat; -import org.apache.hadoop.scm.ScmConfigKeys; +import org.apache.hadoop.ozone.OzoneConfigKeys; +import org.apache.hadoop.ozone.protocol.commands.SCMCommand; import org.apache.hadoop.test.GenericTestUtils; import org.apache.hadoop.test.PathUtils; - import org.hamcrest.CoreMatchers; -import org.junit.Assert; import org.junit.After; +import org.junit.Assert; import org.junit.Before; import org.junit.BeforeClass; import org.junit.Rule; @@ -54,22 +52,24 @@ import java.util.UUID; import java.util.concurrent.TimeUnit; import java.util.concurrent.TimeoutException; +import static java.util.concurrent.TimeUnit.MILLISECONDS; import static java.util.concurrent.TimeUnit.SECONDS; -import static org.apache.hadoop.hdsl.protocol.proto.HdslProtos.NodeState.DEAD; -import static org.apache.hadoop.hdsl.protocol.proto.HdslProtos.NodeState - .HEALTHY; -import static org.apache.hadoop.hdsl.protocol.proto.HdslProtos.NodeState - .STALE; -import static org.apache.hadoop.hdsl.protocol.proto - .StorageContainerDatanodeProtocolProtos.SCMCmdType; - -import static org.apache.hadoop.scm.ScmConfigKeys.OZONE_SCM_DEADNODE_INTERVAL; -import static org.apache.hadoop.scm.ScmConfigKeys.OZONE_SCM_HEARTBEAT_INTERVAL; -import static org.apache.hadoop.scm.ScmConfigKeys +import static org.apache.hadoop.hdds.scm.ScmConfigKeys + .OZONE_SCM_DEADNODE_INTERVAL; +import static org.apache.hadoop.hdds.scm.ScmConfigKeys + .OZONE_SCM_HEARTBEAT_INTERVAL; +import static org.apache.hadoop.hdds.scm.ScmConfigKeys .OZONE_SCM_HEARTBEAT_PROCESS_INTERVAL; -import static org.apache.hadoop.scm.ScmConfigKeys +import static org.apache.hadoop.hdds.scm.ScmConfigKeys .OZONE_SCM_MAX_HB_COUNT_TO_PROCESS; -import static org.apache.hadoop.scm.ScmConfigKeys.OZONE_SCM_STALENODE_INTERVAL; +import static org.apache.hadoop.hdds.scm.ScmConfigKeys + .OZONE_SCM_STALENODE_INTERVAL; +import static org.apache.hadoop.hdds.protocol.proto.HddsProtos.NodeState.DEAD; +import static org.apache.hadoop.hdds.protocol.proto.HddsProtos.NodeState + .HEALTHY; +import static org.apache.hadoop.hdds.protocol.proto.HddsProtos.NodeState.STALE; +import static org.apache.hadoop.hdds.protocol.proto + .StorageContainerDatanodeProtocolProtos.SCMCmdType; import static org.hamcrest.CoreMatchers.containsString; import static org.hamcrest.MatcherAssert.assertThat; import static org.hamcrest.core.StringStartsWith.startsWith; @@ -729,7 +729,7 @@ public class TestNodeManager { * @return true if we found the expected number. */ private boolean findNodes(NodeManager nodeManager, int count, - HdslProtos.NodeState state) { + HddsProtos.NodeState state) { return count == nodeManager.getNodeCount(state); } diff --git a/hadoop-hdsl/server-scm/src/test/java/org/apache/hadoop/ozone/scm/node/TestSCMNodePoolManager.java b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/node/TestSCMNodePoolManager.java index f12e831403..8f412dedda 100644 --- a/hadoop-hdsl/server-scm/src/test/java/org/apache/hadoop/ozone/scm/node/TestSCMNodePoolManager.java +++ b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/node/TestSCMNodePoolManager.java @@ -16,19 +16,20 @@ * limitations under the License. */ -package org.apache.hadoop.ozone.scm.node; +package org.apache.hadoop.hdds.scm.node; import org.apache.commons.collections.ListUtils; import org.apache.hadoop.fs.FileUtil; -import org.apache.hadoop.hdsl.protocol.DatanodeDetails; +import org.apache.hadoop.hdds.scm.ScmConfigKeys; +import org.apache.hadoop.hdds.scm.TestUtils; +import org.apache.hadoop.hdds.scm.container.placement.algorithms + .ContainerPlacementPolicy; +import org.apache.hadoop.hdds.scm.container.placement.algorithms + .SCMContainerPlacementCapacity; +import org.apache.hadoop.hdds.conf.OzoneConfiguration; +import org.apache.hadoop.hdds.protocol.DatanodeDetails; import org.apache.hadoop.ozone.OzoneConfigKeys; -import org.apache.hadoop.hdsl.conf.OzoneConfiguration; -import org.apache.hadoop.ozone.scm.TestUtils; -import org.apache.hadoop.ozone.scm.container.placement.algorithms.ContainerPlacementPolicy; -import org.apache.hadoop.ozone.scm.container.placement.algorithms.SCMContainerPlacementCapacity; -import org.apache.hadoop.scm.ScmConfigKeys; import org.apache.hadoop.test.PathUtils; - import org.junit.Rule; import org.junit.Test; import org.junit.rules.ExpectedException; diff --git a/hadoop-hdsl/server-scm/src/test/java/org/apache/hadoop/ozone/scm/package-info.java b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/package-info.java index 0349f5d774..da05c59acf 100644 --- a/hadoop-hdsl/server-scm/src/test/java/org/apache/hadoop/ozone/scm/package-info.java +++ b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/package-info.java @@ -15,7 +15,7 @@ * See the License for the specific language governing permissions and * limitations under the License. */ -package org.apache.hadoop.ozone.scm; +package org.apache.hadoop.hdds.scm; /** * SCM tests */ diff --git a/hadoop-hdsl/server-scm/src/test/java/org/apache/hadoop/ozone/container/common/TestEndPoint.java b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/ozone/container/common/TestEndPoint.java index 6f8cc2d915..433beb8d51 100644 --- a/hadoop-hdsl/server-scm/src/test/java/org/apache/hadoop/ozone/container/common/TestEndPoint.java +++ b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/ozone/container/common/TestEndPoint.java @@ -19,8 +19,26 @@ package org.apache.hadoop.ozone.container.common; import org.apache.commons.codec.digest.DigestUtils; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.FileUtil; -import org.apache.hadoop.hdsl.protocol.DatanodeDetails; -import org.apache.hadoop.hdsl.protocol.proto.HdslProtos; +import org.apache.hadoop.hdds.scm.TestUtils; +import org.apache.hadoop.hdds.scm.VersionInfo; +import org.apache.hadoop.hdds.protocol.DatanodeDetails; +import org.apache.hadoop.hdds.protocol.proto.HddsProtos; +import org.apache.hadoop.hdds.protocol.proto + .StorageContainerDatanodeProtocolProtos; +import org.apache.hadoop.hdds.protocol.proto + .StorageContainerDatanodeProtocolProtos.ContainerReportsRequestProto; +import org.apache.hadoop.hdds.protocol.proto + .StorageContainerDatanodeProtocolProtos.ContainerReportsResponseProto; +import org.apache.hadoop.hdds.protocol.proto + .StorageContainerDatanodeProtocolProtos.SCMHeartbeatResponseProto; +import org.apache.hadoop.hdds.protocol.proto + .StorageContainerDatanodeProtocolProtos.SCMNodeReport; +import org.apache.hadoop.hdds.protocol.proto + .StorageContainerDatanodeProtocolProtos.SCMRegisteredCmdResponseProto; +import org.apache.hadoop.hdds.protocol.proto + .StorageContainerDatanodeProtocolProtos.SCMStorageReport; +import org.apache.hadoop.hdds.protocol.proto + .StorageContainerDatanodeProtocolProtos.SCMVersionResponseProto; import org.apache.hadoop.ipc.RPC; import org.apache.hadoop.ozone.OzoneConfigKeys; import org.apache.hadoop.ozone.OzoneConsts; @@ -36,47 +54,26 @@ import org.apache.hadoop.ozone.container.common.states.endpoint .RegisterEndpointTask; import org.apache.hadoop.ozone.container.common.states.endpoint .VersionEndpointTask; -import org.apache.hadoop.hdsl.protocol.proto - .StorageContainerDatanodeProtocolProtos; -import org.apache.hadoop.hdsl.protocol.proto - .StorageContainerDatanodeProtocolProtos.ContainerReportsRequestProto; -import org.apache.hadoop.hdsl.protocol.proto - .StorageContainerDatanodeProtocolProtos.ContainerReportsResponseProto; -import org.apache.hadoop.hdsl.protocol.proto - .StorageContainerDatanodeProtocolProtos.SCMHeartbeatResponseProto; -import org.apache.hadoop.hdsl.protocol.proto - .StorageContainerDatanodeProtocolProtos.SCMNodeReport; -import org.apache.hadoop.hdsl.protocol.proto - .StorageContainerDatanodeProtocolProtos.SCMRegisteredCmdResponseProto; -import org.apache.hadoop.hdsl.protocol.proto - .StorageContainerDatanodeProtocolProtos.SCMStorageReport; -import org.apache.hadoop.hdsl.protocol.proto - .StorageContainerDatanodeProtocolProtos.SCMVersionResponseProto; -import org.apache.hadoop.ozone.scm.TestUtils; -import org.apache.hadoop.ozone.scm.VersionInfo; import org.apache.hadoop.test.PathUtils; import org.apache.hadoop.util.Time; - -import static org.apache.hadoop.ozone.container.common.ContainerTestUtils - .createEndpoint; -import static org.apache.hadoop.ozone.scm.TestUtils.getDatanodeDetails; -import static org.hamcrest.Matchers.lessThanOrEqualTo; import org.junit.AfterClass; import org.junit.Assert; import org.junit.BeforeClass; import org.junit.Test; - import java.io.File; import java.net.InetSocketAddress; import java.util.UUID; +import static org.apache.hadoop.hdds.scm.TestUtils.getDatanodeDetails; import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_DATANODE_DATA_DIR_KEY; -import static org.apache.hadoop.ozone.OzoneConfigKeys - .OZONE_METADATA_DIRS; -import static org.apache.hadoop.hdsl.protocol.proto +import static org.apache.hadoop.hdds.protocol.proto .StorageContainerDatanodeProtocolProtos.ReportState.states .noContainerReports; +import static org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_METADATA_DIRS; +import static org.apache.hadoop.ozone.container.common.ContainerTestUtils + .createEndpoint; +import static org.hamcrest.Matchers.lessThanOrEqualTo; /** * Tests the endpoints. @@ -231,7 +228,7 @@ public class TestEndPoint { new RegisterEndpointTask(rpcEndPoint, conf); if (!clearDatanodeDetails) { DatanodeDetails datanodeDetails = TestUtils.getDatanodeDetails(); - HdslProtos.DatanodeDetailsProto datanodeDetailsProto = + HddsProtos.DatanodeDetailsProto datanodeDetailsProto = datanodeDetails.getProtoBufMessage(); endpointTask.setDatanodeDetailsProto(datanodeDetailsProto); } @@ -318,7 +315,7 @@ public class TestEndPoint { TestUtils.getDatanodeDetails(), conf); EndpointStateMachine rpcEndPoint = createEndpoint(conf, scmAddress, rpcTimeout)) { - HdslProtos.DatanodeDetailsProto datanodeDetailsProto = + HddsProtos.DatanodeDetailsProto datanodeDetailsProto = getDatanodeDetails().getProtoBufMessage(); rpcEndPoint.setState(EndpointStateMachine.EndPointStates.HEARTBEAT); diff --git a/hadoop-hdsl/server-scm/src/test/java/org/apache/hadoop/ozone/container/placement/TestContainerPlacement.java b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/ozone/container/placement/TestContainerPlacement.java index 2947789b0f..0801c255a7 100644 --- a/hadoop-hdsl/server-scm/src/test/java/org/apache/hadoop/ozone/container/placement/TestContainerPlacement.java +++ b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/ozone/container/placement/TestContainerPlacement.java @@ -18,20 +18,23 @@ package org.apache.hadoop.ozone.container.placement; import org.apache.commons.math3.stat.descriptive.DescriptiveStatistics; import org.apache.hadoop.conf.Configuration; -import org.apache.hadoop.hdsl.protocol.DatanodeDetails; +import org.apache.hadoop.hdds.scm.container.MockNodeManager; +import org.apache.hadoop.hdds.scm.container.placement.algorithms + .SCMContainerPlacementCapacity; +import org.apache.hadoop.hdds.scm.container.placement.algorithms + .SCMContainerPlacementRandom; +import org.apache.hadoop.hdds.scm.exceptions.SCMException; +import org.apache.hadoop.hdds.scm.node.NodeManager; +import org.apache.hadoop.hdds.protocol.DatanodeDetails; import org.apache.hadoop.ozone.OzoneConsts; -import org.apache.hadoop.ozone.scm.container.MockNodeManager; -import org.apache.hadoop.ozone.scm.container.placement.algorithms.SCMContainerPlacementCapacity; -import org.apache.hadoop.ozone.scm.container.placement.algorithms.SCMContainerPlacementRandom; -import org.apache.hadoop.ozone.scm.exceptions.SCMException; -import org.apache.hadoop.ozone.scm.node.NodeManager; import org.junit.Assert; import org.junit.Test; import java.util.List; import java.util.Random; -import static org.apache.hadoop.hdsl.protocol.proto.HdslProtos.NodeState.HEALTHY; +import static org.apache.hadoop.hdds.protocol.proto.HddsProtos.NodeState + .HEALTHY; import static org.junit.Assert.assertEquals; /** diff --git a/hadoop-hdsl/server-scm/src/test/java/org/apache/hadoop/ozone/container/placement/TestDatanodeMetrics.java b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/ozone/container/placement/TestDatanodeMetrics.java index ccc23c3224..7150d1b94f 100644 --- a/hadoop-hdsl/server-scm/src/test/java/org/apache/hadoop/ozone/container/placement/TestDatanodeMetrics.java +++ b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/ozone/container/placement/TestDatanodeMetrics.java @@ -17,8 +17,8 @@ package org.apache.hadoop.ozone.container.placement; -import org.apache.hadoop.ozone.scm.container.placement.metrics.SCMNodeMetric; -import org.apache.hadoop.ozone.scm.container.placement.metrics.SCMNodeStat; +import org.apache.hadoop.hdds.scm.container.placement.metrics.SCMNodeMetric; +import org.apache.hadoop.hdds.scm.container.placement.metrics.SCMNodeStat; import org.junit.Rule; import org.junit.Test; import org.junit.rules.ExpectedException; diff --git a/hadoop-hdsl/server-scm/src/test/java/org/apache/hadoop/ozone/container/replication/TestContainerSupervisor.java b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/ozone/container/replication/TestContainerSupervisor.java index a35d4d4ed8..8eb07e61c2 100644 --- a/hadoop-hdsl/server-scm/src/test/java/org/apache/hadoop/ozone/container/replication/TestContainerSupervisor.java +++ b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/ozone/container/replication/TestContainerSupervisor.java @@ -17,28 +17,30 @@ package org.apache.hadoop.ozone.container.replication; import org.apache.commons.logging.LogFactory; -import org.apache.hadoop.hdsl.conf.OzoneConfiguration; -import org.apache.hadoop.hdsl.protocol.DatanodeDetails; -import org.apache.hadoop.ozone.container.testutils.ReplicationDatanodeStateManager; -import org.apache.hadoop.ozone.container.testutils.ReplicationNodeManagerMock; -import org.apache.hadoop.ozone.container.testutils.ReplicationNodePoolManagerMock; -import org.apache.hadoop.ozone.container.common.SCMTestUtils; -import org.apache.hadoop.hdsl.protocol.proto.HdslProtos.NodeState; -import org.apache.hadoop.hdsl.protocol.proto +import org.apache.hadoop.hdds.scm.TestUtils; +import org.apache.hadoop.hdds.scm.container.replication.ContainerSupervisor; +import org.apache.hadoop.hdds.scm.container.replication.InProgressPool; +import org.apache.hadoop.hdds.scm.node.CommandQueue; +import org.apache.hadoop.hdds.scm.node.NodeManager; +import org.apache.hadoop.hdds.scm.node.NodePoolManager; +import org.apache.hadoop.hdds.conf.OzoneConfiguration; +import org.apache.hadoop.hdds.protocol.DatanodeDetails; +import org.apache.hadoop.hdds.protocol.proto.HddsProtos.NodeState; +import org.apache.hadoop.hdds.protocol.proto .StorageContainerDatanodeProtocolProtos.ContainerReportsRequestProto; -import org.apache.hadoop.ozone.scm.TestUtils; -import org.apache.hadoop.ozone.scm.container.replication.ContainerSupervisor; -import org.apache.hadoop.ozone.scm.container.replication.InProgressPool; -import org.apache.hadoop.ozone.scm.node.CommandQueue; -import org.apache.hadoop.ozone.scm.node.NodeManager; -import org.apache.hadoop.ozone.scm.node.NodePoolManager; +import org.apache.hadoop.ozone.container.common.SCMTestUtils; +import org.apache.hadoop.ozone.container.testutils + .ReplicationDatanodeStateManager; +import org.apache.hadoop.ozone.container.testutils.ReplicationNodeManagerMock; +import org.apache.hadoop.ozone.container.testutils + .ReplicationNodePoolManagerMock; import org.apache.hadoop.test.GenericTestUtils; import org.apache.hadoop.test.GenericTestUtils.LogCapturer; -import org.slf4j.event.Level; import org.junit.After; import org.junit.Assert; import org.junit.Before; import org.junit.Test; +import org.slf4j.event.Level; import java.io.IOException; import java.util.HashMap; @@ -48,11 +50,12 @@ import java.util.Map; import java.util.concurrent.TimeUnit; import java.util.concurrent.TimeoutException; -import static org.apache.hadoop.hdsl.protocol.proto.HdslProtos.NodeState.HEALTHY; -import static org.apache.hadoop.scm.ScmConfigKeys +import static org.apache.hadoop.hdds.scm.ScmConfigKeys .OZONE_SCM_CONTAINER_REPORTS_WAIT_TIMEOUT; -import static org.apache.hadoop.scm.ScmConfigKeys +import static org.apache.hadoop.hdds.scm.ScmConfigKeys .OZONE_SCM_CONTAINER_REPORT_PROCESSING_INTERVAL; +import static org.apache.hadoop.hdds.protocol.proto.HddsProtos.NodeState + .HEALTHY; import static org.apache.ratis.shaded.com.google.common.util.concurrent .Uninterruptibles.sleepUninterruptibly; diff --git a/hadoop-hdsl/server-scm/src/test/java/org/apache/hadoop/ozone/container/replication/package-info.java b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/ozone/container/replication/package-info.java index 318c54d958..318c54d958 100644 --- a/hadoop-hdsl/server-scm/src/test/java/org/apache/hadoop/ozone/container/replication/package-info.java +++ b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/ozone/container/replication/package-info.java diff --git a/hadoop-hdsl/server-scm/src/test/java/org/apache/hadoop/ozone/container/testutils/ReplicationDatanodeStateManager.java b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/ozone/container/testutils/ReplicationDatanodeStateManager.java index 55b8e6ddb3..26f3514497 100644 --- a/hadoop-hdsl/server-scm/src/test/java/org/apache/hadoop/ozone/container/testutils/ReplicationDatanodeStateManager.java +++ b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/ozone/container/testutils/ReplicationDatanodeStateManager.java @@ -17,17 +17,20 @@ package org.apache.hadoop.ozone.container.testutils; import org.apache.commons.codec.digest.DigestUtils; -import org.apache.hadoop.hdsl.protocol.DatanodeDetails; -import org.apache.hadoop.hdsl.protocol.proto.StorageContainerDatanodeProtocolProtos.ContainerInfo; -import org.apache.hadoop.hdsl.protocol.proto.StorageContainerDatanodeProtocolProtos.ContainerReportsRequestProto; -import org.apache.hadoop.ozone.scm.node.NodeManager; -import org.apache.hadoop.ozone.scm.node.NodePoolManager; +import org.apache.hadoop.hdds.scm.node.NodeManager; +import org.apache.hadoop.hdds.scm.node.NodePoolManager; +import org.apache.hadoop.hdds.protocol.DatanodeDetails; +import org.apache.hadoop.hdds.protocol.proto + .StorageContainerDatanodeProtocolProtos.ContainerInfo; +import org.apache.hadoop.hdds.protocol.proto + .StorageContainerDatanodeProtocolProtos.ContainerReportsRequestProto; import java.util.LinkedList; import java.util.List; import java.util.Random; -import static org.apache.hadoop.hdsl.protocol.proto.HdslProtos.NodeState.HEALTHY; +import static org.apache.hadoop.hdds.protocol.proto.HddsProtos.NodeState + .HEALTHY; /** * This class manages the state of datanode diff --git a/hadoop-hdsl/server-scm/src/test/java/org/apache/hadoop/ozone/container/testutils/ReplicationNodeManagerMock.java b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/ozone/container/testutils/ReplicationNodeManagerMock.java index f8a64320fe..f2db75104f 100644 --- a/hadoop-hdsl/server-scm/src/test/java/org/apache/hadoop/ozone/container/testutils/ReplicationNodeManagerMock.java +++ b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/ozone/container/testutils/ReplicationNodeManagerMock.java @@ -17,31 +17,30 @@ package org.apache.hadoop.ozone.container.testutils; import com.google.common.base.Preconditions; +import org.apache.hadoop.hdds.scm.container.placement.metrics.SCMNodeMetric; +import org.apache.hadoop.hdds.scm.container.placement.metrics.SCMNodeStat; +import org.apache.hadoop.hdds.scm.node.CommandQueue; +import org.apache.hadoop.hdds.scm.node.NodeManager; +import org.apache.hadoop.hdds.scm.node.NodePoolManager; import org.apache.hadoop.hdfs.protocol.UnregisteredNodeException; -import org.apache.hadoop.hdsl.protocol.DatanodeDetails; -import org.apache.hadoop.hdsl.protocol.proto.HdslProtos; -import org.apache.hadoop.ozone.protocol.VersionResponse; -import org.apache.hadoop.ozone.protocol.commands.SCMCommand; -import org.apache.hadoop.hdsl.protocol.proto.HdslProtos.NodeState; -import org.apache.hadoop.hdsl.protocol.proto +import org.apache.hadoop.hdds.protocol.DatanodeDetails; +import org.apache.hadoop.hdds.protocol.proto.HddsProtos; +import org.apache.hadoop.hdds.protocol.proto.HddsProtos.NodeState; +import org.apache.hadoop.hdds.protocol.proto .StorageContainerDatanodeProtocolProtos.ReportState; -import org.apache.hadoop.hdsl.protocol.proto +import org.apache.hadoop.hdds.protocol.proto .StorageContainerDatanodeProtocolProtos.SCMNodeReport; -import org.apache.hadoop.hdsl.protocol.proto +import org.apache.hadoop.hdds.protocol.proto .StorageContainerDatanodeProtocolProtos.SCMVersionRequestProto; -import org.apache.hadoop.ozone.scm.container.placement.metrics.SCMNodeMetric; -import org.apache.hadoop.ozone.scm.container.placement.metrics.SCMNodeStat; -import org.apache.hadoop.ozone.scm.node.CommandQueue; -import org.apache.hadoop.ozone.scm.node.NodePoolManager; -import org.apache.hadoop.ozone.scm.node.NodeManager; +import org.apache.hadoop.ozone.protocol.VersionResponse; +import org.apache.hadoop.ozone.protocol.commands.SCMCommand; +import org.mockito.Mockito; import java.io.IOException; import java.util.List; import java.util.Map; import java.util.UUID; -import org.mockito.Mockito; - /** * A Node Manager to test replication. */ @@ -284,7 +283,7 @@ public class ReplicationNodeManagerMock implements NodeManager { * @return SCMHeartbeatResponseProto */ @Override - public SCMCommand register(HdslProtos.DatanodeDetailsProto dd) { + public SCMCommand register(HddsProtos.DatanodeDetailsProto dd) { return null; } @@ -297,7 +296,7 @@ public class ReplicationNodeManagerMock implements NodeManager { * @return SCMheartbeat response list */ @Override - public List<SCMCommand> sendHeartbeat(HdslProtos.DatanodeDetailsProto dd, + public List<SCMCommand> sendHeartbeat(HddsProtos.DatanodeDetailsProto dd, SCMNodeReport nodeReport, ReportState containerReportState) { return null; } diff --git a/hadoop-hdsl/server-scm/src/test/java/org/apache/hadoop/ozone/container/testutils/ReplicationNodePoolManagerMock.java b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/ozone/container/testutils/ReplicationNodePoolManagerMock.java index 766a882799..ffcd752e84 100644 --- a/hadoop-hdsl/server-scm/src/test/java/org/apache/hadoop/ozone/container/testutils/ReplicationNodePoolManagerMock.java +++ b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/ozone/container/testutils/ReplicationNodePoolManagerMock.java @@ -16,9 +16,9 @@ */ package org.apache.hadoop.ozone.container.testutils; -import org.apache.hadoop.hdsl.protocol.DatanodeDetails; -import org.apache.hadoop.ozone.scm.exceptions.SCMException; -import org.apache.hadoop.ozone.scm.node.NodePoolManager; +import org.apache.hadoop.hdds.scm.exceptions.SCMException; +import org.apache.hadoop.hdds.scm.node.NodePoolManager; +import org.apache.hadoop.hdds.protocol.DatanodeDetails; import java.io.IOException; import java.util.ArrayList; diff --git a/hadoop-hdsl/server-scm/src/test/java/org/apache/hadoop/ozone/container/testutils/package-info.java b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/ozone/container/testutils/package-info.java index 4e8a90bf1d..4e8a90bf1d 100644 --- a/hadoop-hdsl/server-scm/src/test/java/org/apache/hadoop/ozone/container/testutils/package-info.java +++ b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/ozone/container/testutils/package-info.java diff --git a/hadoop-hdsl/tools/pom.xml b/hadoop-hdds/tools/pom.xml index 7a5eb0a00b..b2823ef180 100644 --- a/hadoop-hdsl/tools/pom.xml +++ b/hadoop-hdds/tools/pom.xml @@ -19,29 +19,30 @@ http://maven.apache.org/xsd/maven-4.0.0.xsd"> <modelVersion>4.0.0</modelVersion> <parent> <groupId>org.apache.hadoop</groupId> - <artifactId>hadoop-hdsl</artifactId> + <artifactId>hadoop-hdds</artifactId> <version>3.2.0-SNAPSHOT</version> </parent> - <artifactId>hadoop-hdsl-tools</artifactId> + + <artifactId>hadoop-hdds-tools</artifactId> <version>3.2.0-SNAPSHOT</version> - <description>Apache Hadoop Hdsl Tools</description> - <name>Apache Hadoop HDSL tools</name> + <description>Apache HDDS Tools</description> + <name>Apache Hadoop HDDS tools</name> <packaging>jar</packaging> <properties> - <hadoop.component>hdsl</hadoop.component> + <hadoop.component>hdds</hadoop.component> <is.hadoop.component>true</is.hadoop.component> </properties> <dependencies> <dependency> <groupId>org.apache.hadoop</groupId> - <artifactId>hadoop-hdsl-common</artifactId> + <artifactId>hadoop-hdds-common</artifactId> <scope>provided</scope> </dependency> <dependency> <groupId>org.apache.hadoop</groupId> - <artifactId>hadoop-hdsl-client</artifactId> + <artifactId>hadoop-hdds-client</artifactId> <scope>provided</scope> </dependency> <dependency> diff --git a/hadoop-hdsl/tools/src/main/java/org/apache/hadoop/ozone/scm/cli/OzoneBaseCLI.java b/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/OzoneBaseCLI.java index eeec3b6459..727c81a0d8 100644 --- a/hadoop-hdsl/tools/src/main/java/org/apache/hadoop/ozone/scm/cli/OzoneBaseCLI.java +++ b/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/OzoneBaseCLI.java @@ -15,7 +15,7 @@ * See the License for the specific language governing permissions and * limitations under the License. */ -package org.apache.hadoop.ozone.scm.cli; +package org.apache.hadoop.hdds.scm.cli; import org.apache.commons.cli.CommandLine; import org.apache.commons.cli.Options; diff --git a/hadoop-hdsl/tools/src/main/java/org/apache/hadoop/ozone/scm/cli/OzoneCommandHandler.java b/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/OzoneCommandHandler.java index 1455c131e1..f9b8fcd635 100644 --- a/hadoop-hdsl/tools/src/main/java/org/apache/hadoop/ozone/scm/cli/OzoneCommandHandler.java +++ b/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/OzoneCommandHandler.java @@ -15,10 +15,10 @@ * See the License for the specific language governing permissions and * limitations under the License. */ -package org.apache.hadoop.ozone.scm.cli; +package org.apache.hadoop.hdds.scm.cli; import org.apache.commons.cli.CommandLine; -import org.apache.hadoop.scm.client.ScmClient; +import org.apache.hadoop.hdds.scm.client.ScmClient; import java.io.IOException; import java.io.PrintStream; diff --git a/hadoop-hdsl/tools/src/main/java/org/apache/hadoop/ozone/scm/cli/ResultCode.java b/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/ResultCode.java index 7b8704aa7f..27df88cf82 100644 --- a/hadoop-hdsl/tools/src/main/java/org/apache/hadoop/ozone/scm/cli/ResultCode.java +++ b/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/ResultCode.java @@ -15,7 +15,7 @@ * See the License for the specific language governing permissions and * limitations under the License. */ -package org.apache.hadoop.ozone.scm.cli; +package org.apache.hadoop.hdds.scm.cli; /** * The possible result code of SCM CLI. diff --git a/hadoop-hdsl/tools/src/main/java/org/apache/hadoop/ozone/scm/cli/SCMCLI.java b/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/SCMCLI.java index c3a1a1d64e..34553ed371 100644 --- a/hadoop-hdsl/tools/src/main/java/org/apache/hadoop/ozone/scm/cli/SCMCLI.java +++ b/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/SCMCLI.java @@ -15,7 +15,7 @@ * See the License for the specific language governing permissions and * limitations under the License. */ -package org.apache.hadoop.ozone.scm.cli; +package org.apache.hadoop.hdds.scm.cli; import org.apache.commons.cli.BasicParser; import org.apache.commons.cli.CommandLine; @@ -23,18 +23,20 @@ import org.apache.commons.cli.HelpFormatter; import org.apache.commons.cli.Option; import org.apache.commons.cli.Options; import org.apache.commons.cli.ParseException; +import org.apache.hadoop.hdds.scm.XceiverClientManager; +import org.apache.hadoop.hdds.scm.cli.container.ContainerCommandHandler; +import org.apache.hadoop.hdds.scm.cli.container.CreateContainerHandler; +import org.apache.hadoop.hdds.scm.client.ContainerOperationClient; +import org.apache.hadoop.hdds.scm.client.ScmClient; +import org.apache.hadoop.hdds.scm.protocolPB + .StorageContainerLocationProtocolClientSideTranslatorPB; +import org.apache.hadoop.hdds.scm.protocolPB.StorageContainerLocationProtocolPB; +import org.apache.hadoop.hdds.conf.OzoneConfiguration; import org.apache.hadoop.ipc.Client; import org.apache.hadoop.ipc.ProtobufRpcEngine; import org.apache.hadoop.ipc.RPC; import org.apache.hadoop.net.NetUtils; -import org.apache.hadoop.hdsl.conf.OzoneConfiguration; import org.apache.hadoop.ozone.OzoneConsts; -import org.apache.hadoop.ozone.scm.cli.container.ContainerCommandHandler; -import org.apache.hadoop.scm.XceiverClientManager; -import org.apache.hadoop.scm.client.ContainerOperationClient; -import org.apache.hadoop.scm.client.ScmClient; -import org.apache.hadoop.scm.protocolPB.StorageContainerLocationProtocolClientSideTranslatorPB; -import org.apache.hadoop.scm.protocolPB.StorageContainerLocationProtocolPB; import org.apache.hadoop.security.UserGroupInformation; import org.apache.hadoop.util.ToolRunner; @@ -44,13 +46,14 @@ import java.net.InetSocketAddress; import java.net.URISyntaxException; import java.util.Arrays; -import static org.apache.hadoop.hdsl.HdslUtils.getScmAddressForClients; -import static org.apache.hadoop.ozone.scm.cli.ResultCode.EXECUTION_ERROR; -import static org.apache.hadoop.ozone.scm.cli.ResultCode.SUCCESS; -import static org.apache.hadoop.ozone.scm.cli.ResultCode.UNRECOGNIZED_CMD; -import static org.apache.hadoop.ozone.scm.cli.container.ContainerCommandHandler.CONTAINER_CMD; -import static org.apache.hadoop.scm.ScmConfigKeys.OZONE_SCM_CONTAINER_SIZE_DEFAULT; -import static org.apache.hadoop.scm.ScmConfigKeys.OZONE_SCM_CONTAINER_SIZE_GB; +import static org.apache.hadoop.hdds.scm.ScmConfigKeys + .OZONE_SCM_CONTAINER_SIZE_DEFAULT; +import static org.apache.hadoop.hdds.scm.ScmConfigKeys + .OZONE_SCM_CONTAINER_SIZE_GB; +import static org.apache.hadoop.hdds.HddsUtils.getScmAddressForClients; +import static org.apache.hadoop.hdds.scm.cli.ResultCode.EXECUTION_ERROR; +import static org.apache.hadoop.hdds.scm.cli.ResultCode.SUCCESS; +import static org.apache.hadoop.hdds.scm.cli.ResultCode.UNRECOGNIZED_CMD; /** * This class is the CLI of SCM. @@ -126,7 +129,7 @@ public class SCMCLI extends OzoneBaseCLI { * of commands, the options are added in a cascading manner, e.g.: * {@link SCMCLI} asks {@link ContainerCommandHandler} to add it's options, * which then asks it's sub command, such as - * {@link org.apache.hadoop.ozone.scm.cli.container.CreateContainerHandler} + * {@link CreateContainerHandler} * to add it's own options. * * We need to do this because {@link BasicParser} need to take all the options @@ -146,7 +149,7 @@ public class SCMCLI extends OzoneBaseCLI { private static void addTopLevelOptions(Options options) { Option containerOps = new Option( - CONTAINER_CMD, false, "Container related options"); + ContainerCommandHandler.CONTAINER_CMD, false, "Container related options"); options.addOption(containerOps); // TODO : add pool, node and pipeline commands. } @@ -202,7 +205,7 @@ public class SCMCLI extends OzoneBaseCLI { throws IOException, URISyntaxException { OzoneCommandHandler handler = null; try { - if (cmd.hasOption(CONTAINER_CMD)) { + if (cmd.hasOption(ContainerCommandHandler.CONTAINER_CMD)) { handler = new ContainerCommandHandler(scmClient); } diff --git a/hadoop-hdsl/tools/src/main/java/org/apache/hadoop/ozone/scm/cli/container/CloseContainerHandler.java b/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/container/CloseContainerHandler.java index 7a52305373..ba420230fe 100644 --- a/hadoop-hdsl/tools/src/main/java/org/apache/hadoop/ozone/scm/cli/container/CloseContainerHandler.java +++ b/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/container/CloseContainerHandler.java @@ -15,21 +15,19 @@ * See the License for the specific language governing permissions and * limitations under the License. */ -package org.apache.hadoop.ozone.scm.cli.container; +package org.apache.hadoop.hdds.scm.cli.container; import org.apache.commons.cli.CommandLine; import org.apache.commons.cli.HelpFormatter; import org.apache.commons.cli.Option; import org.apache.commons.cli.Options; -import org.apache.hadoop.ozone.scm.cli.OzoneCommandHandler; -import org.apache.hadoop.scm.client.ScmClient; -import org.apache.hadoop.scm.container.common.helpers.Pipeline; +import org.apache.hadoop.hdds.scm.cli.OzoneCommandHandler; +import org.apache.hadoop.hdds.scm.cli.SCMCLI; +import org.apache.hadoop.hdds.scm.client.ScmClient; +import org.apache.hadoop.hdds.scm.container.common.helpers.Pipeline; import java.io.IOException; -import static org.apache.hadoop.ozone.scm.cli.SCMCLI.CMD_WIDTH; -import static org.apache.hadoop.ozone.scm.cli.SCMCLI.HELP_OP; - /** * The handler of close container command. */ @@ -45,7 +43,7 @@ public class CloseContainerHandler extends OzoneCommandHandler { } if (!cmd.hasOption(OPT_CONTAINER_NAME)) { displayHelp(); - if (!cmd.hasOption(HELP_OP)) { + if (!cmd.hasOption(SCMCLI.HELP_OP)) { throw new IOException("Expecting container name"); } else { return; @@ -68,7 +66,7 @@ public class CloseContainerHandler extends OzoneCommandHandler { Options options = new Options(); addOptions(options); HelpFormatter helpFormatter = new HelpFormatter(); - helpFormatter.printHelp(CMD_WIDTH, "hdfs scm -container -close <option>", + helpFormatter.printHelp(SCMCLI.CMD_WIDTH, "hdfs scm -container -close <option>", "where <option> is", options, ""); } diff --git a/hadoop-hdsl/tools/src/main/java/org/apache/hadoop/ozone/scm/cli/container/ContainerCommandHandler.java b/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/container/ContainerCommandHandler.java index 3474213763..980388f28c 100644 --- a/hadoop-hdsl/tools/src/main/java/org/apache/hadoop/ozone/scm/cli/container/ContainerCommandHandler.java +++ b/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/container/ContainerCommandHandler.java @@ -15,29 +15,30 @@ * See the License for the specific language governing permissions and * limitations under the License. */ -package org.apache.hadoop.ozone.scm.cli.container; +package org.apache.hadoop.hdds.scm.cli.container; import org.apache.commons.cli.CommandLine; import org.apache.commons.cli.HelpFormatter; import org.apache.commons.cli.Option; import org.apache.commons.cli.Options; -import org.apache.hadoop.ozone.scm.cli.OzoneCommandHandler; -import org.apache.hadoop.scm.client.ScmClient; +import org.apache.hadoop.hdds.scm.cli.OzoneCommandHandler; +import org.apache.hadoop.hdds.scm.client.ScmClient; import java.io.IOException; import java.util.Arrays; -import static org.apache.hadoop.ozone.scm.cli.SCMCLI.CMD_WIDTH; -import static org.apache.hadoop.ozone.scm.cli.SCMCLI.HELP_OP; -import static org.apache.hadoop.ozone.scm.cli.container.CloseContainerHandler.CONTAINER_CLOSE; -import static org.apache.hadoop.ozone.scm.cli.container - .CreateContainerHandler.CONTAINER_CREATE; -import static org.apache.hadoop.ozone.scm.cli.container - .DeleteContainerHandler.CONTAINER_DELETE; -import static org.apache.hadoop.ozone.scm.cli.container - .InfoContainerHandler.CONTAINER_INFO; -import static org.apache.hadoop.ozone.scm.cli.container - .ListContainerHandler.CONTAINER_LIST; +import static org.apache.hadoop.hdds.scm.cli.SCMCLI.CMD_WIDTH; +import static org.apache.hadoop.hdds.scm.cli.SCMCLI.HELP_OP; +import static org.apache.hadoop.hdds.scm.cli.container.CloseContainerHandler + .CONTAINER_CLOSE; +import static org.apache.hadoop.hdds.scm.cli.container.CreateContainerHandler + .CONTAINER_CREATE; +import static org.apache.hadoop.hdds.scm.cli.container.DeleteContainerHandler + .CONTAINER_DELETE; +import static org.apache.hadoop.hdds.scm.cli.container.InfoContainerHandler + .CONTAINER_INFO; +import static org.apache.hadoop.hdds.scm.cli.container.ListContainerHandler + .CONTAINER_LIST; /** * The handler class of container-specific commands, e.g. addContainer. diff --git a/hadoop-hdsl/tools/src/main/java/org/apache/hadoop/ozone/scm/cli/container/CreateContainerHandler.java b/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/container/CreateContainerHandler.java index 10f728c92b..2961831e67 100644 --- a/hadoop-hdsl/tools/src/main/java/org/apache/hadoop/ozone/scm/cli/container/CreateContainerHandler.java +++ b/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/container/CreateContainerHandler.java @@ -15,19 +15,19 @@ * See the License for the specific language governing permissions and * limitations under the License. */ -package org.apache.hadoop.ozone.scm.cli.container; +package org.apache.hadoop.hdds.scm.cli.container; import org.apache.commons.cli.CommandLine; import org.apache.commons.cli.HelpFormatter; import org.apache.commons.cli.Option; import org.apache.commons.cli.Options; -import org.apache.hadoop.ozone.scm.cli.OzoneCommandHandler; -import org.apache.hadoop.scm.client.ScmClient; +import org.apache.hadoop.hdds.scm.cli.OzoneCommandHandler; +import org.apache.hadoop.hdds.scm.client.ScmClient; import java.io.IOException; -import static org.apache.hadoop.ozone.scm.cli.SCMCLI.CMD_WIDTH; -import static org.apache.hadoop.ozone.scm.cli.SCMCLI.HELP_OP; +import static org.apache.hadoop.hdds.scm.cli.SCMCLI.CMD_WIDTH; +import static org.apache.hadoop.hdds.scm.cli.SCMCLI.HELP_OP; /** * This is the handler that process container creation command. diff --git a/hadoop-hdsl/tools/src/main/java/org/apache/hadoop/ozone/scm/cli/container/DeleteContainerHandler.java b/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/container/DeleteContainerHandler.java index 0782af3b04..a5b625a5e6 100644 --- a/hadoop-hdsl/tools/src/main/java/org/apache/hadoop/ozone/scm/cli/container/DeleteContainerHandler.java +++ b/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/container/DeleteContainerHandler.java @@ -16,21 +16,21 @@ * limitations under the License. */ -package org.apache.hadoop.ozone.scm.cli.container; +package org.apache.hadoop.hdds.scm.cli.container; import com.google.common.base.Preconditions; import org.apache.commons.cli.CommandLine; import org.apache.commons.cli.HelpFormatter; import org.apache.commons.cli.Option; import org.apache.commons.cli.Options; -import org.apache.hadoop.ozone.scm.cli.OzoneCommandHandler; -import org.apache.hadoop.scm.client.ScmClient; -import org.apache.hadoop.scm.container.common.helpers.Pipeline; +import org.apache.hadoop.hdds.scm.cli.OzoneCommandHandler; +import org.apache.hadoop.hdds.scm.client.ScmClient; +import org.apache.hadoop.hdds.scm.container.common.helpers.Pipeline; import java.io.IOException; -import static org.apache.hadoop.ozone.scm.cli.SCMCLI.CMD_WIDTH; -import static org.apache.hadoop.ozone.scm.cli.SCMCLI.HELP_OP; +import static org.apache.hadoop.hdds.scm.cli.SCMCLI.CMD_WIDTH; +import static org.apache.hadoop.hdds.scm.cli.SCMCLI.HELP_OP; /** * This is the handler that process delete container command. diff --git a/hadoop-hdsl/tools/src/main/java/org/apache/hadoop/ozone/scm/cli/container/InfoContainerHandler.java b/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/container/InfoContainerHandler.java index e62fe6b26e..c609915117 100644 --- a/hadoop-hdsl/tools/src/main/java/org/apache/hadoop/ozone/scm/cli/container/InfoContainerHandler.java +++ b/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/container/InfoContainerHandler.java @@ -15,26 +15,25 @@ * See the License for the specific language governing permissions and * limitations under the License. */ -package org.apache.hadoop.ozone.scm.cli.container; +package org.apache.hadoop.hdds.scm.cli.container; import com.google.common.base.Preconditions; import org.apache.commons.cli.CommandLine; import org.apache.commons.cli.HelpFormatter; import org.apache.commons.cli.Option; import org.apache.commons.cli.Options; - -import org.apache.hadoop.hdsl.protocol.DatanodeDetails; -import org.apache.hadoop.hdsl.protocol.proto.ContainerProtos.ContainerData; -import org.apache.hadoop.hdsl.protocol.proto.HdslProtos; -import org.apache.hadoop.ozone.scm.cli.OzoneCommandHandler; -import org.apache.hadoop.scm.client.ScmClient; -import org.apache.hadoop.scm.container.common.helpers.Pipeline; +import org.apache.hadoop.hdds.scm.cli.OzoneCommandHandler; +import org.apache.hadoop.hdds.scm.client.ScmClient; +import org.apache.hadoop.hdds.scm.container.common.helpers.Pipeline; +import org.apache.hadoop.hdds.protocol.DatanodeDetails; +import org.apache.hadoop.hdds.protocol.proto.ContainerProtos.ContainerData; +import org.apache.hadoop.hdds.protocol.proto.HddsProtos; import java.io.IOException; import java.util.stream.Collectors; -import static org.apache.hadoop.ozone.scm.cli.SCMCLI.CMD_WIDTH; -import static org.apache.hadoop.ozone.scm.cli.SCMCLI.HELP_OP; +import static org.apache.hadoop.hdds.scm.cli.SCMCLI.CMD_WIDTH; +import static org.apache.hadoop.hdds.scm.cli.SCMCLI.HELP_OP; /** * This is the handler that process container info command. @@ -77,7 +76,7 @@ public class InfoContainerHandler extends OzoneCommandHandler { logOut("Container Name: %s", containerData.getName()); String openStatus = - containerData.getState() == HdslProtos.LifeCycleState.OPEN ? "OPEN" : + containerData.getState() == HddsProtos.LifeCycleState.OPEN ? "OPEN" : "CLOSED"; logOut("Container State: %s", openStatus); if (!containerData.getHash().isEmpty()) { diff --git a/hadoop-hdsl/tools/src/main/java/org/apache/hadoop/ozone/scm/cli/container/ListContainerHandler.java b/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/container/ListContainerHandler.java index d0ac974742..0c7e79037e 100644 --- a/hadoop-hdsl/tools/src/main/java/org/apache/hadoop/ozone/scm/cli/container/ListContainerHandler.java +++ b/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/container/ListContainerHandler.java @@ -15,23 +15,23 @@ * See the License for the specific language governing permissions and * limitations under the License. */ -package org.apache.hadoop.ozone.scm.cli.container; +package org.apache.hadoop.hdds.scm.cli.container; import org.apache.commons.cli.CommandLine; import org.apache.commons.cli.HelpFormatter; import org.apache.commons.cli.Option; import org.apache.commons.cli.Options; -import org.apache.hadoop.ozone.scm.cli.OzoneCommandHandler; +import org.apache.hadoop.hdds.scm.cli.OzoneCommandHandler; +import org.apache.hadoop.hdds.scm.client.ScmClient; +import org.apache.hadoop.hdds.scm.container.common.helpers.ContainerInfo; +import org.apache.hadoop.hdds.scm.container.common.helpers.Pipeline; import org.apache.hadoop.ozone.web.utils.JsonUtils; -import org.apache.hadoop.scm.client.ScmClient; -import org.apache.hadoop.scm.container.common.helpers.ContainerInfo; -import org.apache.hadoop.scm.container.common.helpers.Pipeline; import java.io.IOException; import java.util.List; -import static org.apache.hadoop.ozone.scm.cli.SCMCLI.CMD_WIDTH; -import static org.apache.hadoop.ozone.scm.cli.SCMCLI.HELP_OP; +import static org.apache.hadoop.hdds.scm.cli.SCMCLI.CMD_WIDTH; +import static org.apache.hadoop.hdds.scm.cli.SCMCLI.HELP_OP; /** * This is the handler that process container list command. diff --git a/hadoop-hdsl/tools/src/main/java/org/apache/hadoop/ozone/scm/cli/container/package-info.java b/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/container/package-info.java index 1c9f40d8c2..0630df2d15 100644 --- a/hadoop-hdsl/tools/src/main/java/org/apache/hadoop/ozone/scm/cli/container/package-info.java +++ b/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/container/package-info.java @@ -16,4 +16,4 @@ * limitations under the License. */ -package org.apache.hadoop.ozone.scm.cli.container;
\ No newline at end of file +package org.apache.hadoop.hdds.scm.cli.container;
\ No newline at end of file diff --git a/hadoop-hdsl/common/src/main/java/org/apache/hadoop/scm/protocol/package-info.java b/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/package-info.java index 274f85955b..4762d550fb 100644 --- a/hadoop-hdsl/common/src/main/java/org/apache/hadoop/scm/protocol/package-info.java +++ b/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/package-info.java @@ -16,4 +16,4 @@ * limitations under the License. */ -package org.apache.hadoop.scm.protocol; +package org.apache.hadoop.hdds.scm.cli;
\ No newline at end of file diff --git a/hadoop-hdsl/client/src/main/java/org/apache/hadoop/ozone/client/package-info.java b/hadoop-hdsl/client/src/main/java/org/apache/hadoop/ozone/client/package-info.java deleted file mode 100644 index 754ce849fa..0000000000 --- a/hadoop-hdsl/client/src/main/java/org/apache/hadoop/ozone/client/package-info.java +++ /dev/null @@ -1,23 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.hadoop.ozone.client; - -/** - * Generic helper classes for the client side of hdsl workflows.. - */
\ No newline at end of file diff --git a/hadoop-hdsl/common/src/main/java/org/apache/hadoop/ozone/client/package-info.java b/hadoop-hdsl/common/src/main/java/org/apache/hadoop/ozone/client/package-info.java deleted file mode 100644 index 749283874a..0000000000 --- a/hadoop-hdsl/common/src/main/java/org/apache/hadoop/ozone/client/package-info.java +++ /dev/null @@ -1,23 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.hadoop.ozone.client; - -/** - * Base property types for HDSL containers and replications. - */
\ No newline at end of file diff --git a/hadoop-hdsl/common/src/main/java/org/apache/hadoop/scm/container/ContainerStates/package-info.java b/hadoop-hdsl/common/src/main/java/org/apache/hadoop/scm/container/ContainerStates/package-info.java deleted file mode 100644 index 61f5609817..0000000000 --- a/hadoop-hdsl/common/src/main/java/org/apache/hadoop/scm/container/ContainerStates/package-info.java +++ /dev/null @@ -1,22 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with this - * work for additional information regarding copyright ownership. The ASF - * licenses this file to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT - * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the - * License for the specific language governing permissions and limitations under - * the License. - * - */ - -/** - * Container States. - */ -package org.apache.hadoop.ozone.scm.container.ContainerStates;
\ No newline at end of file diff --git a/hadoop-hdsl/container-service/src/main/java/org/apache/hadoop/ozone/container/common/impl/ContainerLocationManagerImpl.java b/hadoop-hdsl/container-service/src/main/java/org/apache/hadoop/ozone/container/common/impl/ContainerLocationManagerImpl.java deleted file mode 100644 index e4c1989b46..0000000000 --- a/hadoop-hdsl/container-service/src/main/java/org/apache/hadoop/ozone/container/common/impl/ContainerLocationManagerImpl.java +++ /dev/null @@ -1,92 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.hadoop.ozone.container.common.impl; - -import com.google.common.base.Preconditions; -import org.apache.hadoop.hdfs.server.datanode.StorageLocation; -import org.apache.hadoop.ozone.OzoneConsts; -import org.apache.hadoop.ozone.container.common.interfaces - .ContainerLocationManager; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - -import java.io.IOException; -import java.nio.file.Path; -import java.util.List; - -/** - * A class that tells the ContainerManager where to place the containers. - * Please note : There is *no* one-to-one correlation between metadata - * metadataLocations and data metadataLocations. - * - * For example : A user could map all container files to a - * SSD but leave data/metadata on bunch of other disks. - */ -public class ContainerLocationManagerImpl implements ContainerLocationManager { - private static final Logger LOG = - LoggerFactory.getLogger(ContainerLocationManagerImpl.class); - - private final List<StorageLocation> dataLocations; - private int currentIndex; - private final List<StorageLocation> metadataLocations; - - /** - * Constructs a Location Manager. - * @param metadataLocations - Refers to the metadataLocations - * where we store the container metadata. - * @param dataDirs - metadataLocations where we store the actual - * data or chunk files. - * @throws IOException - */ - public ContainerLocationManagerImpl(List<StorageLocation> metadataLocations, - List<StorageLocation> dataDirs) - throws IOException { - dataLocations = dataDirs; - this.metadataLocations = metadataLocations; - } - /** - * Returns the path where the container should be placed from a set of - * metadataLocations. - * - * @return A path where we should place this container and metadata. - * @throws IOException - */ - @Override - public Path getContainerPath() - throws IOException { - Preconditions.checkState(metadataLocations.size() > 0); - int index = currentIndex % metadataLocations.size(); - Path path = metadataLocations.get(index).getFile().toPath(); - return path.resolve(OzoneConsts.CONTAINER_ROOT_PREFIX); - } - - /** - * Returns the path where the container Data file are stored. - * - * @return a path where we place the LevelDB and data files of a container. - * @throws IOException - */ - @Override - public Path getDataPath(String containerName) throws IOException { - Path currentPath = dataLocations.get(currentIndex++ % dataLocations.size()) - .getFile().toPath(); - currentPath = currentPath.resolve(OzoneConsts.CONTAINER_PREFIX); - return currentPath.resolve(containerName); - } -} diff --git a/hadoop-hdsl/server-scm/src/main/java/org/apache/hadoop/ozone/scm/pipelines/standalone/package-info.java b/hadoop-hdsl/server-scm/src/main/java/org/apache/hadoop/ozone/scm/pipelines/standalone/package-info.java deleted file mode 100644 index 7e6393abc7..0000000000 --- a/hadoop-hdsl/server-scm/src/main/java/org/apache/hadoop/ozone/scm/pipelines/standalone/package-info.java +++ /dev/null @@ -1,18 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.hadoop.ozone.scm.pipelines.standalone;
\ No newline at end of file diff --git a/hadoop-ozone/acceptance-test/README.md b/hadoop-ozone/acceptance-test/README.md index 5e642c87a8..8907c840e4 100644 --- a/hadoop-ozone/acceptance-test/README.md +++ b/hadoop-ozone/acceptance-test/README.md @@ -14,7 +14,7 @@ # Acceptance test suite for Ozone/Hdsl -This project contains acceptance tests for ozone/hdsl using docker-compose and [robot framework](http://robotframework.org/). +This project contains acceptance tests for ozone/hdds using docker-compose and [robot framework](http://robotframework.org/). ## Run diff --git a/hadoop-ozone/acceptance-test/src/test/compose/docker-config b/hadoop-ozone/acceptance-test/src/test/compose/docker-config index 20c1e30d68..8e5efa961f 100644 --- a/hadoop-ozone/acceptance-test/src/test/compose/docker-config +++ b/hadoop-ozone/acceptance-test/src/test/compose/docker-config @@ -27,7 +27,7 @@ HDFS-SITE.XML_dfs.namenode.rpc-address=namenode:9000 HDFS-SITE.XML_dfs.namenode.name.dir=/data/namenode HDFS-SITE.XML_rpc.metrics.quantile.enable=true HDFS-SITE.XML_rpc.metrics.percentiles.intervals=60,300 -HDFS-SITE.XML_dfs.datanode.plugins=org.apache.hadoop.ozone.HdslServerPlugin,org.apache.hadoop.ozone.web.ObjectStoreRestPlugin +HDFS-SITE.XML_dfs.datanode.plugins=org.apache.hadoop.ozone.web.ObjectStoreRestPlugin,org.apache.hadoop.ozone.HddsDatanodeService LOG4J.PROPERTIES_log4j.rootLogger=INFO, stdout LOG4J.PROPERTIES_log4j.appender.stdout=org.apache.log4j.ConsoleAppender LOG4J.PROPERTIES_log4j.appender.stdout.layout=org.apache.log4j.PatternLayout diff --git a/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/ObjectStore.java b/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/ObjectStore.java index 1ed41f5c8f..b9152130b3 100644 --- a/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/ObjectStore.java +++ b/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/ObjectStore.java @@ -21,6 +21,7 @@ package org.apache.hadoop.ozone.client; import com.google.common.base.Preconditions; import com.google.common.base.Strings; import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.hdds.scm.client.HddsClientUtils; import org.apache.hadoop.ozone.client.protocol.ClientProtocol; import org.apache.hadoop.security.UserGroupInformation; @@ -53,7 +54,7 @@ public class ObjectStore { */ public ObjectStore(Configuration conf, ClientProtocol proxy) { this.proxy = proxy; - this.listCacheSize = OzoneClientUtils.getListCacheSize(conf); + this.listCacheSize = HddsClientUtils.getListCacheSize(conf); } /** @@ -63,7 +64,7 @@ public class ObjectStore { */ public void createVolume(String volumeName) throws IOException { Preconditions.checkNotNull(volumeName); - OzoneClientUtils.verifyResourceName(volumeName); + HddsClientUtils.verifyResourceName(volumeName); proxy.createVolume(volumeName); } @@ -77,7 +78,7 @@ public class ObjectStore { throws IOException { Preconditions.checkNotNull(volumeName); Preconditions.checkNotNull(volumeArgs); - OzoneClientUtils.verifyResourceName(volumeName); + HddsClientUtils.verifyResourceName(volumeName); proxy.createVolume(volumeName, volumeArgs); } @@ -89,7 +90,7 @@ public class ObjectStore { */ public OzoneVolume getVolume(String volumeName) throws IOException { Preconditions.checkNotNull(volumeName); - OzoneClientUtils.verifyResourceName(volumeName); + HddsClientUtils.verifyResourceName(volumeName); OzoneVolume volume = proxy.getVolumeDetails(volumeName); return volume; } @@ -133,7 +134,7 @@ public class ObjectStore { */ public void deleteVolume(String volumeName) throws IOException { Preconditions.checkNotNull(volumeName); - OzoneClientUtils.verifyResourceName(volumeName); + HddsClientUtils.verifyResourceName(volumeName); proxy.deleteVolume(volumeName); } diff --git a/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/OzoneBucket.java b/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/OzoneBucket.java index b94e0f7122..ba6286ba96 100644 --- a/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/OzoneBucket.java +++ b/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/OzoneBucket.java @@ -22,6 +22,9 @@ package org.apache.hadoop.ozone.client; import com.google.common.base.Preconditions; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.StorageType; +import org.apache.hadoop.hdds.client.ReplicationFactor; +import org.apache.hadoop.hdds.client.ReplicationType; +import org.apache.hadoop.hdds.scm.client.HddsClientUtils; import org.apache.hadoop.ozone.OzoneConfigKeys; import org.apache.hadoop.ozone.client.io.OzoneInputStream; import org.apache.hadoop.ozone.client.io.OzoneOutputStream; @@ -107,7 +110,7 @@ public class OzoneBucket { this.acls = acls; this.storageType = storageType; this.versioning = versioning; - this.listCacheSize = OzoneClientUtils.getListCacheSize(conf); + this.listCacheSize = HddsClientUtils.getListCacheSize(conf); this.creationTime = creationTime; this.defaultReplication = ReplicationFactor.valueOf(conf.getInt( OzoneConfigKeys.OZONE_REPLICATION, diff --git a/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/OzoneClientFactory.java b/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/OzoneClientFactory.java index a235b4addf..6b24f2a915 100644 --- a/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/OzoneClientFactory.java +++ b/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/OzoneClientFactory.java @@ -20,7 +20,7 @@ package org.apache.hadoop.ozone.client; import com.google.common.base.Preconditions; import org.apache.hadoop.conf.Configuration; -import org.apache.hadoop.hdsl.conf.OzoneConfiguration; +import org.apache.hadoop.hdds.conf.OzoneConfiguration; import org.apache.hadoop.ozone.client.protocol.ClientProtocol; import org.apache.hadoop.ozone.client.rest.RestClient; import org.apache.hadoop.ozone.client.rpc.RpcClient; diff --git a/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/OzoneVolume.java b/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/OzoneVolume.java index 31cbb40fcc..c4e7331ac5 100644 --- a/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/OzoneVolume.java +++ b/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/OzoneVolume.java @@ -20,6 +20,8 @@ package org.apache.hadoop.ozone.client; import com.google.common.base.Preconditions; import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.hdds.client.OzoneQuota; +import org.apache.hadoop.hdds.scm.client.HddsClientUtils; import org.apache.hadoop.ozone.OzoneAcl; import org.apache.hadoop.ozone.client.protocol.ClientProtocol; @@ -88,7 +90,7 @@ public class OzoneVolume { this.quotaInBytes = quotaInBytes; this.creationTime = creationTime; this.acls = acls; - this.listCacheSize = OzoneClientUtils.getListCacheSize(conf); + this.listCacheSize = HddsClientUtils.getListCacheSize(conf); } /** @@ -162,7 +164,7 @@ public class OzoneVolume { * @param quota new quota * @throws IOException */ - public void setQuota(OzoneQuota quota) throws IOException { + public void setQuota(OzoneQuota quota) throws IOException { Preconditions.checkNotNull(proxy, "Client proxy is not set."); Preconditions.checkNotNull(quota); proxy.setVolumeQuota(name, quota); @@ -178,7 +180,7 @@ public class OzoneVolume { throws IOException { Preconditions.checkNotNull(proxy, "Client proxy is not set."); Preconditions.checkNotNull(bucketName); - OzoneClientUtils.verifyResourceName(bucketName); + HddsClientUtils.verifyResourceName(bucketName); proxy.createBucket(name, bucketName); } @@ -193,7 +195,7 @@ public class OzoneVolume { Preconditions.checkNotNull(proxy, "Client proxy is not set."); Preconditions.checkNotNull(bucketName); Preconditions.checkNotNull(bucketArgs); - OzoneClientUtils.verifyResourceName(bucketName); + HddsClientUtils.verifyResourceName(bucketName); proxy.createBucket(name, bucketName, bucketArgs); } @@ -206,7 +208,7 @@ public class OzoneVolume { public OzoneBucket getBucket(String bucketName) throws IOException { Preconditions.checkNotNull(proxy, "Client proxy is not set."); Preconditions.checkNotNull(bucketName); - OzoneClientUtils.verifyResourceName(bucketName); + HddsClientUtils.verifyResourceName(bucketName); OzoneBucket bucket = proxy.getBucketDetails(name, bucketName); return bucket; } @@ -231,7 +233,7 @@ public class OzoneVolume { public void deleteBucket(String bucketName) throws IOException { Preconditions.checkNotNull(proxy, "Client proxy is not set."); Preconditions.checkNotNull(bucketName); - OzoneClientUtils.verifyResourceName(bucketName); + HddsClientUtils.verifyResourceName(bucketName); proxy.deleteBucket(name, bucketName); } diff --git a/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/io/ChunkGroupInputStream.java b/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/io/ChunkGroupInputStream.java index b65d1aa850..b82ed2576b 100644 --- a/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/io/ChunkGroupInputStream.java +++ b/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/io/ChunkGroupInputStream.java @@ -20,15 +20,15 @@ package org.apache.hadoop.ozone.client.io; import com.google.common.annotations.VisibleForTesting; import org.apache.hadoop.fs.FSExceptionMessages; import org.apache.hadoop.fs.Seekable; -import org.apache.hadoop.hdsl.protocol.proto.ContainerProtos; +import org.apache.hadoop.hdds.protocol.proto.ContainerProtos; import org.apache.hadoop.ozone.ksm.helpers.KsmKeyInfo; import org.apache.hadoop.ozone.ksm.helpers.KsmKeyLocationInfo; -import org.apache.hadoop.scm.XceiverClientManager; -import org.apache.hadoop.scm.XceiverClientSpi; -import org.apache.hadoop.scm.container.common.helpers.Pipeline; -import org.apache.hadoop.scm.protocolPB.StorageContainerLocationProtocolClientSideTranslatorPB; -import org.apache.hadoop.scm.storage.ChunkInputStream; -import org.apache.hadoop.scm.storage.ContainerProtocolCalls; +import org.apache.hadoop.hdds.scm.XceiverClientManager; +import org.apache.hadoop.hdds.scm.XceiverClientSpi; +import org.apache.hadoop.hdds.scm.container.common.helpers.Pipeline; +import org.apache.hadoop.hdds.scm.protocolPB.StorageContainerLocationProtocolClientSideTranslatorPB; +import org.apache.hadoop.hdds.scm.storage.ChunkInputStream; +import org.apache.hadoop.hdds.scm.storage.ContainerProtocolCalls; import org.apache.ratis.util.Preconditions; import org.slf4j.Logger; import org.slf4j.LoggerFactory; diff --git a/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/io/ChunkGroupOutputStream.java b/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/io/ChunkGroupOutputStream.java index 26cc48a6b0..a8d3362e6c 100644 --- a/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/io/ChunkGroupOutputStream.java +++ b/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/io/ChunkGroupOutputStream.java @@ -20,25 +20,25 @@ package org.apache.hadoop.ozone.client.io; import com.google.common.annotations.VisibleForTesting; import com.google.common.base.Preconditions; import org.apache.hadoop.fs.FSExceptionMessages; -import org.apache.hadoop.hdsl.protocol.proto.ContainerProtos.Result; +import org.apache.hadoop.hdds.protocol.proto.ContainerProtos.Result; import org.apache.hadoop.ozone.ksm.helpers.KsmKeyLocationInfoGroup; -import org.apache.hadoop.hdsl.protocol.proto.HdslProtos.ReplicationType; -import org.apache.hadoop.hdsl.protocol.proto.HdslProtos.ReplicationFactor; -import org.apache.hadoop.hdsl.protocol.proto.StorageContainerLocationProtocolProtos.ObjectStageChangeRequestProto; +import org.apache.hadoop.hdds.protocol.proto.HddsProtos.ReplicationType; +import org.apache.hadoop.hdds.protocol.proto.HddsProtos.ReplicationFactor; +import org.apache.hadoop.hdds.protocol.proto.StorageContainerLocationProtocolProtos.ObjectStageChangeRequestProto; import org.apache.hadoop.ozone.ksm.helpers.KsmKeyArgs; import org.apache.hadoop.ozone.ksm.helpers.KsmKeyInfo; import org.apache.hadoop.ozone.ksm.helpers.KsmKeyLocationInfo; import org.apache.hadoop.ozone.ksm.helpers.OpenKeySession; import org.apache.hadoop.ozone.ksm.protocolPB.KeySpaceManagerProtocolClientSideTranslatorPB; -import org.apache.hadoop.scm.XceiverClientManager; -import org.apache.hadoop.scm.XceiverClientSpi; -import org.apache.hadoop.scm.container.common.helpers.Pipeline; -import org.apache.hadoop.scm.container.common.helpers +import org.apache.hadoop.hdds.scm.XceiverClientManager; +import org.apache.hadoop.hdds.scm.XceiverClientSpi; +import org.apache.hadoop.hdds.scm.container.common.helpers.Pipeline; +import org.apache.hadoop.hdds.scm.container.common.helpers .StorageContainerException; -import org.apache.hadoop.scm.protocolPB +import org.apache.hadoop.hdds.scm.protocolPB .StorageContainerLocationProtocolClientSideTranslatorPB; -import org.apache.hadoop.scm.storage.ChunkOutputStream; -import org.apache.hadoop.scm.storage.ContainerProtocolCalls; +import org.apache.hadoop.hdds.scm.storage.ChunkOutputStream; +import org.apache.hadoop.hdds.scm.storage.ContainerProtocolCalls; import org.slf4j.Logger; import org.slf4j.LoggerFactory; diff --git a/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/io/OzoneContainerTranslation.java b/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/io/OzoneContainerTranslation.java index d22d2dd4fa..bf9e80f4ab 100644 --- a/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/io/OzoneContainerTranslation.java +++ b/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/io/OzoneContainerTranslation.java @@ -19,7 +19,7 @@ package org.apache.hadoop.ozone.client.io; -import org.apache.hadoop.hdsl.protocol.proto.ContainerProtos.KeyData; +import org.apache.hadoop.hdds.protocol.proto.ContainerProtos.KeyData; /** * This class contains methods that define the translation between the Ozone diff --git a/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/io/OzoneInputStream.java b/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/io/OzoneInputStream.java index 3857bd0336..c2ff979a2d 100644 --- a/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/io/OzoneInputStream.java +++ b/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/io/OzoneInputStream.java @@ -17,7 +17,7 @@ package org.apache.hadoop.ozone.client.io; -import org.apache.hadoop.scm.storage.ChunkInputStream; +import org.apache.hadoop.hdds.scm.storage.ChunkInputStream; import java.io.IOException; import java.io.InputStream; diff --git a/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/protocol/ClientProtocol.java b/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/protocol/ClientProtocol.java index 64a970e50e..816c185df5 100644 --- a/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/protocol/ClientProtocol.java +++ b/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/protocol/ClientProtocol.java @@ -23,10 +23,10 @@ import org.apache.hadoop.ozone.OzoneAcl; import org.apache.hadoop.ozone.client.BucketArgs; import org.apache.hadoop.ozone.client.OzoneBucket; import org.apache.hadoop.ozone.client.OzoneKey; -import org.apache.hadoop.ozone.client.OzoneQuota; +import org.apache.hadoop.hdds.client.OzoneQuota; import org.apache.hadoop.ozone.client.OzoneVolume; -import org.apache.hadoop.ozone.client.ReplicationFactor; -import org.apache.hadoop.ozone.client.ReplicationType; +import org.apache.hadoop.hdds.client.ReplicationFactor; +import org.apache.hadoop.hdds.client.ReplicationType; import org.apache.hadoop.ozone.client.VolumeArgs; import org.apache.hadoop.ozone.client.io.OzoneInputStream; import org.apache.hadoop.ozone.client.io.OzoneOutputStream; diff --git a/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/rest/RestClient.java b/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/rest/RestClient.java index f52f826a66..b8b4610b8a 100644 --- a/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/rest/RestClient.java +++ b/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/rest/RestClient.java @@ -23,19 +23,19 @@ import com.fasterxml.jackson.databind.ObjectMapper; import com.google.common.base.Preconditions; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.StorageType; -import org.apache.hadoop.hdsl.protocol.proto.HdslProtos; +import org.apache.hadoop.hdds.protocol.proto.HddsProtos; +import org.apache.hadoop.hdds.scm.client.HddsClientUtils; import org.apache.hadoop.net.NetUtils; import org.apache.hadoop.ozone.OzoneAcl; import org.apache.hadoop.ozone.OzoneConfigKeys; import org.apache.hadoop.ozone.OzoneConsts; import org.apache.hadoop.ozone.client.BucketArgs; import org.apache.hadoop.ozone.client.OzoneBucket; -import org.apache.hadoop.ozone.client.OzoneClientUtils; import org.apache.hadoop.ozone.client.OzoneKey; -import org.apache.hadoop.ozone.client.OzoneQuota; +import org.apache.hadoop.hdds.client.OzoneQuota; import org.apache.hadoop.ozone.client.OzoneVolume; -import org.apache.hadoop.ozone.client.ReplicationFactor; -import org.apache.hadoop.ozone.client.ReplicationType; +import org.apache.hadoop.hdds.client.ReplicationFactor; +import org.apache.hadoop.hdds.client.ReplicationType; import org.apache.hadoop.ozone.client.VolumeArgs; import org.apache.hadoop.ozone.client.io.OzoneInputStream; import org.apache.hadoop.ozone.client.io.OzoneOutputStream; @@ -190,7 +190,7 @@ public class RestClient implements ClientProtocol { serviceListJson, serviceInfoReference); List<ServiceInfo> dataNodeInfos = services.stream().filter( - a -> a.getNodeType().equals(HdslProtos.NodeType.DATANODE)) + a -> a.getNodeType().equals(HddsProtos.NodeType.DATANODE)) .collect(Collectors.toList()); ServiceInfo restServer = selector.getRestServer(dataNodeInfos); @@ -308,7 +308,7 @@ public class RestClient implements ClientProtocol { volInfo.getCreatedBy(), volInfo.getOwner().getName(), volInfo.getQuota().sizeInBytes(), - OzoneClientUtils.formatDateTime(volInfo.getCreatedOn()), + HddsClientUtils.formatDateTime(volInfo.getCreatedOn()), null); EntityUtils.consume(response); return volume; @@ -540,7 +540,7 @@ public class RestClient implements ClientProtocol { bucketInfo.getAcls(), bucketInfo.getStorageType(), getBucketVersioningFlag(bucketInfo.getVersioning()), - OzoneClientUtils.formatDateTime(bucketInfo.getCreatedOn())); + HddsClientUtils.formatDateTime(bucketInfo.getCreatedOn())); EntityUtils.consume(response); return bucket; } catch (URISyntaxException | ParseException e) { @@ -705,8 +705,8 @@ public class RestClient implements ClientProtocol { bucketName, keyInfo.getKeyName(), keyInfo.getSize(), - OzoneClientUtils.formatDateTime(keyInfo.getCreatedOn()), - OzoneClientUtils.formatDateTime(keyInfo.getModifiedOn())); + HddsClientUtils.formatDateTime(keyInfo.getCreatedOn()), + HddsClientUtils.formatDateTime(keyInfo.getModifiedOn())); EntityUtils.consume(response); return key; } catch (URISyntaxException | ParseException e) { @@ -724,7 +724,7 @@ public class RestClient implements ClientProtocol { Header.OZONE_SIMPLE_AUTHENTICATION_SCHEME + " " + ugi.getUserName()); httpRequest.addHeader(HttpHeaders.DATE, - OzoneClientUtils.formatDateTime(Time.monotonicNow())); + HddsClientUtils.formatDateTime(Time.monotonicNow())); httpRequest.addHeader(Header.OZONE_VERSION_HEADER, Header.OZONE_V1_VERSION_HEADER); } diff --git a/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/rpc/RpcClient.java b/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/rpc/RpcClient.java index 15373cb8d4..2464fe3d50 100644 --- a/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/rpc/RpcClient.java +++ b/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/rpc/RpcClient.java @@ -20,7 +20,7 @@ package org.apache.hadoop.ozone.client.rpc; import com.google.common.base.Preconditions; import org.apache.hadoop.conf.Configuration; -import org.apache.hadoop.hdsl.conf.OzoneConfiguration; +import org.apache.hadoop.hdds.conf.OzoneConfiguration; import org.apache.hadoop.fs.StorageType; import org.apache.hadoop.io.IOUtils; import org.apache.hadoop.ipc.Client; @@ -31,10 +31,10 @@ import org.apache.hadoop.ozone.OzoneConsts; import org.apache.hadoop.ozone.client.BucketArgs; import org.apache.hadoop.ozone.client.OzoneBucket; import org.apache.hadoop.ozone.client.OzoneKey; -import org.apache.hadoop.ozone.client.OzoneQuota; +import org.apache.hadoop.hdds.client.OzoneQuota; import org.apache.hadoop.ozone.client.OzoneVolume; -import org.apache.hadoop.ozone.client.ReplicationFactor; -import org.apache.hadoop.ozone.client.ReplicationType; +import org.apache.hadoop.hdds.client.ReplicationFactor; +import org.apache.hadoop.hdds.client.ReplicationType; import org.apache.hadoop.ozone.client.VolumeArgs; import org.apache.hadoop.ozone.client.io.ChunkGroupInputStream; import org.apache.hadoop.ozone.client.io.ChunkGroupOutputStream; @@ -58,13 +58,13 @@ import org.apache.hadoop.ozone.OzoneAcl; import org.apache.hadoop.ozone.ksm.KSMConfigKeys; import org.apache.hadoop.ozone.protocol.proto .KeySpaceManagerProtocolProtos.ServicePort; -import org.apache.hadoop.hdsl.protocol.proto.HdslProtos; +import org.apache.hadoop.hdds.protocol.proto.HddsProtos; import org.apache.hadoop.ozone.protocolPB.KSMPBHelper; -import org.apache.hadoop.scm.ScmConfigKeys; -import org.apache.hadoop.scm.XceiverClientManager; -import org.apache.hadoop.scm.protocolPB +import org.apache.hadoop.hdds.scm.ScmConfigKeys; +import org.apache.hadoop.hdds.scm.XceiverClientManager; +import org.apache.hadoop.hdds.scm.protocolPB .StorageContainerLocationProtocolClientSideTranslatorPB; -import org.apache.hadoop.scm.protocolPB +import org.apache.hadoop.hdds.scm.protocolPB .StorageContainerLocationProtocolPB; import org.apache.hadoop.security.UserGroupInformation; import org.slf4j.Logger; @@ -156,7 +156,7 @@ public class RpcClient implements ClientProtocol { private InetSocketAddress getScmAddressForClient() throws IOException { List<ServiceInfo> services = keySpaceManagerClient.getServiceList(); ServiceInfo scmInfo = services.stream().filter( - a -> a.getNodeType().equals(HdslProtos.NodeType.SCM)) + a -> a.getNodeType().equals(HddsProtos.NodeType.SCM)) .collect(Collectors.toList()).get(0); return NetUtils.createSocketAddr(scmInfo.getHostname()+ ":" + scmInfo.getPort(ServicePort.Type.RPC)); @@ -460,8 +460,8 @@ public class RpcClient implements ClientProtocol { .setBucketName(bucketName) .setKeyName(keyName) .setDataSize(size) - .setType(HdslProtos.ReplicationType.valueOf(type.toString())) - .setFactor(HdslProtos.ReplicationFactor.valueOf(factor.getValue())) + .setType(HddsProtos.ReplicationType.valueOf(type.toString())) + .setFactor(HddsProtos.ReplicationFactor.valueOf(factor.getValue())) .build(); OpenKeySession openKey = keySpaceManagerClient.openKey(keyArgs); @@ -473,8 +473,8 @@ public class RpcClient implements ClientProtocol { .setKsmClient(keySpaceManagerClient) .setChunkSize(chunkSize) .setRequestID(requestId) - .setType(HdslProtos.ReplicationType.valueOf(type.toString())) - .setFactor(HdslProtos.ReplicationFactor.valueOf(factor.getValue())) + .setType(HddsProtos.ReplicationType.valueOf(type.toString())) + .setFactor(HddsProtos.ReplicationFactor.valueOf(factor.getValue())) .build(); groupOutputStream.addPreallocateBlocks( openKey.getKeyInfo().getLatestVersionLocations(), diff --git a/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/web/client/OzoneBucket.java b/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/web/client/OzoneBucket.java index 1d5edd2d28..e6fe0ecfcd 100644 --- a/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/web/client/OzoneBucket.java +++ b/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/web/client/OzoneBucket.java @@ -20,8 +20,9 @@ package org.apache.hadoop.ozone.web.client; import org.apache.commons.codec.digest.DigestUtils; import org.apache.hadoop.fs.StorageType; +import org.apache.hadoop.hdds.scm.client.HddsClientUtils; import org.apache.hadoop.io.IOUtils; -import org.apache.hadoop.ozone.client.OzoneClientUtils; + import org.apache.hadoop.ozone.OzoneConsts; import org.apache.hadoop.ozone.client.rest.OzoneException; import org.apache.hadoop.ozone.client.rest.headers.Header; @@ -30,7 +31,7 @@ import org.apache.hadoop.ozone.web.response.BucketInfo; import org.apache.hadoop.ozone.web.response.KeyInfo; import org.apache.hadoop.ozone.web.response.ListKeys; -import static org.apache.hadoop.hdsl.server.ServerUtils.releaseConnection; +import static org.apache.hadoop.hdds.server.ServerUtils.releaseConnection; import org.apache.http.HttpEntity; import org.apache.http.HttpResponse; import org.apache.http.client.methods.HttpDelete; @@ -177,7 +178,7 @@ public class OzoneBucket { HttpPut putRequest = null; InputStream is = null; - try (CloseableHttpClient httpClient = OzoneClientUtils.newHttpClient()) { + try (CloseableHttpClient httpClient = HddsClientUtils.newHttpClient()) { URIBuilder builder = new URIBuilder(volume.getClient().getEndPointURI()); builder.setPath("/" + getVolume().getVolumeName() + "/" + getBucketName() + "/" + keyName).build(); @@ -236,7 +237,7 @@ public class OzoneBucket { HttpPut putRequest = null; FileInputStream fis = null; - try (CloseableHttpClient httpClient = OzoneClientUtils.newHttpClient()) { + try (CloseableHttpClient httpClient = HddsClientUtils.newHttpClient()) { URIBuilder builder = new URIBuilder(volume.getClient().getEndPointURI()); builder.setPath("/" + getVolume().getVolumeName() + "/" + getBucketName() + "/" + keyName).build(); @@ -314,7 +315,7 @@ public class OzoneBucket { FileOutputStream outPutFile = null; HttpGet getRequest = null; - try (CloseableHttpClient httpClient = OzoneClientUtils.newHttpClient()) { + try (CloseableHttpClient httpClient = HddsClientUtils.newHttpClient()) { outPutFile = new FileOutputStream(downloadTo.toFile()); URIBuilder builder = new URIBuilder(volume.getClient().getEndPointURI()); @@ -347,7 +348,7 @@ public class OzoneBucket { HttpGet getRequest = null; ByteArrayOutputStream outPutStream = null; - try (CloseableHttpClient httpClient = OzoneClientUtils.newHttpClient()) { + try (CloseableHttpClient httpClient = HddsClientUtils.newHttpClient()) { outPutStream = new ByteArrayOutputStream(); URIBuilder builder = new URIBuilder(volume.getClient().getEndPointURI()); @@ -417,7 +418,7 @@ public class OzoneBucket { } HttpDelete deleteRequest = null; - try (CloseableHttpClient httpClient = OzoneClientUtils.newHttpClient()) { + try (CloseableHttpClient httpClient = HddsClientUtils.newHttpClient()) { URIBuilder builder = new URIBuilder(volume.getClient().getEndPointURI()); builder.setPath("/" + getVolume().getVolumeName() + "/" + getBucketName() + "/" + keyName).build(); @@ -480,7 +481,7 @@ public class OzoneBucket { public List<OzoneKey> listKeys(String resultLength, String previousKey, String prefix) throws OzoneException { HttpGet getRequest = null; - try (CloseableHttpClient httpClient = OzoneClientUtils.newHttpClient()) { + try (CloseableHttpClient httpClient = HddsClientUtils.newHttpClient()) { OzoneRestClient client = getVolume().getClient(); URIBuilder builder = new URIBuilder(volume.getClient().getEndPointURI()); builder.setPath("/" + getVolume().getVolumeName() + "/" + getBucketName()) @@ -590,7 +591,7 @@ public class OzoneBucket { } HttpGet getRequest = null; - try (CloseableHttpClient httpClient = OzoneClientUtils.newHttpClient()) { + try (CloseableHttpClient httpClient = HddsClientUtils.newHttpClient()) { OzoneRestClient client = getVolume().getClient(); URIBuilder builder = new URIBuilder(volume.getClient().getEndPointURI()); builder diff --git a/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/web/client/OzoneRestClient.java b/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/web/client/OzoneRestClient.java index 8ee363c4f2..6d0bbf413f 100644 --- a/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/web/client/OzoneRestClient.java +++ b/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/web/client/OzoneRestClient.java @@ -22,8 +22,8 @@ import com.google.common.base.Strings; import org.apache.commons.codec.digest.DigestUtils; import org.apache.commons.lang.StringUtils; +import org.apache.hadoop.hdds.scm.client.HddsClientUtils; import org.apache.hadoop.io.IOUtils; -import org.apache.hadoop.ozone.client.OzoneClientUtils; import org.apache.hadoop.ozone.client.rest.OzoneException; import org.apache.hadoop.ozone.client.rest.headers.Header; import org.apache.hadoop.ozone.web.response.ListVolumes; @@ -31,7 +31,7 @@ import org.apache.hadoop.ozone.web.response.VolumeInfo; import org.apache.hadoop.ozone.web.utils.OzoneUtils; import org.apache.hadoop.util.Time; -import static org.apache.hadoop.hdsl.server.ServerUtils.releaseConnection; +import static org.apache.hadoop.hdds.server.ServerUtils.releaseConnection; import org.apache.http.HttpEntity; import org.apache.http.HttpResponse; import org.apache.http.client.methods.HttpDelete; @@ -604,7 +604,7 @@ public class OzoneRestClient implements Closeable { HttpPut putRequest = null; FileInputStream fis = null; - try (CloseableHttpClient httpClient = OzoneClientUtils.newHttpClient()) { + try (CloseableHttpClient httpClient = HddsClientUtils.newHttpClient()) { URIBuilder builder = new URIBuilder(getEndPointURI()); builder.setPath("/" + volumeName + "/" + bucketName + "/" + keyName) .build(); @@ -650,7 +650,7 @@ public class OzoneRestClient implements Closeable { FileOutputStream outPutFile = null; HttpGet getRequest = null; - try (CloseableHttpClient httpClient = OzoneClientUtils.newHttpClient()) { + try (CloseableHttpClient httpClient = HddsClientUtils.newHttpClient()) { outPutFile = new FileOutputStream(downloadTo.toFile()); URIBuilder builder = new URIBuilder(getEndPointURI()); @@ -687,7 +687,7 @@ public class OzoneRestClient implements Closeable { OzoneUtils.verifyResourceName(bucketName); HttpGet getRequest = null; - try (CloseableHttpClient httpClient = OzoneClientUtils.newHttpClient()) { + try (CloseableHttpClient httpClient = HddsClientUtils.newHttpClient()) { URIBuilder builder = new URIBuilder(getEndPointURI()); builder.setPath("/" + volumeName + "/" + bucketName).build(); @@ -798,6 +798,6 @@ public class OzoneRestClient implements Closeable { @VisibleForTesting public CloseableHttpClient newHttpClient() { - return OzoneClientUtils.newHttpClient(); + return HddsClientUtils.newHttpClient(); } } diff --git a/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/web/client/OzoneVolume.java b/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/web/client/OzoneVolume.java index 14488a37c1..6728e680a9 100644 --- a/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/web/client/OzoneVolume.java +++ b/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/web/client/OzoneVolume.java @@ -21,7 +21,7 @@ package org.apache.hadoop.ozone.web.client; import com.google.common.annotations.VisibleForTesting; import com.google.common.base.Strings; import org.apache.hadoop.fs.StorageType; -import org.apache.hadoop.ozone.client.OzoneClientUtils; +import org.apache.hadoop.hdds.scm.client.HddsClientUtils; import org.apache.hadoop.ozone.client.rest.OzoneException; import org.apache.hadoop.ozone.client.rest.headers.Header; import org.apache.hadoop.ozone.web.request.OzoneQuota; @@ -31,7 +31,7 @@ import org.apache.hadoop.ozone.web.response.VolumeInfo; import org.apache.hadoop.ozone.OzoneConsts; import org.apache.hadoop.ozone.web.utils.OzoneUtils; -import static org.apache.hadoop.hdsl.server.ServerUtils.releaseConnection; +import static org.apache.hadoop.hdds.server.ServerUtils.releaseConnection; import org.apache.http.HttpEntity; import org.apache.http.HttpResponse; import org.apache.http.client.methods.HttpDelete; @@ -578,6 +578,6 @@ public class OzoneVolume { @VisibleForTesting public CloseableHttpClient newHttpClient() { - return OzoneClientUtils.newHttpClient(); + return HddsClientUtils.newHttpClient(); } } diff --git a/hadoop-ozone/client/src/test/java/org/apache/hadoop/ozone/client/TestOzoneClientUtils.java b/hadoop-ozone/client/src/test/java/org/apache/hadoop/ozone/client/TestHddsClientUtils.java index ccd4097827..36d1d9aeec 100644 --- a/hadoop-ozone/client/src/test/java/org/apache/hadoop/ozone/client/TestOzoneClientUtils.java +++ b/hadoop-ozone/client/src/test/java/org/apache/hadoop/ozone/client/TestHddsClientUtils.java @@ -19,9 +19,9 @@ package org.apache.hadoop.ozone.client; import org.apache.hadoop.conf.Configuration; -import org.apache.hadoop.hdsl.conf.OzoneConfiguration; +import org.apache.hadoop.hdds.conf.OzoneConfiguration; import org.apache.hadoop.ozone.ksm.KSMConfigKeys; -import org.apache.hadoop.scm.ScmConfigKeys; +import org.apache.hadoop.hdds.scm.ScmConfigKeys; import org.junit.Rule; import org.junit.Test; import org.junit.rules.ExpectedException; @@ -29,16 +29,16 @@ import org.junit.rules.Timeout; import java.net.InetSocketAddress; -import static org.apache.hadoop.hdsl.HdslUtils.getScmAddressForClients; +import static org.apache.hadoop.hdds.HddsUtils.getScmAddressForClients; import static org.apache.hadoop.ozone.KsmUtils.getKsmAddress; import static org.hamcrest.core.Is.is; import static org.junit.Assert.assertThat; /** * This test class verifies the parsing of SCM endpoint config settings. - * The parsing logic is in {@link OzoneClientUtils}. + * The parsing logic is in {@link org.apache.hadoop.hdds.scm.client.HddsClientUtils}. */ -public class TestOzoneClientUtils { +public class TestHddsClientUtils { @Rule public Timeout timeout = new Timeout(300000); diff --git a/hadoop-ozone/common/pom.xml b/hadoop-ozone/common/pom.xml index 40b905a6ba..7f51732242 100644 --- a/hadoop-ozone/common/pom.xml +++ b/hadoop-ozone/common/pom.xml @@ -62,7 +62,7 @@ http://maven.apache.org/xsd/maven-4.0.0.xsd"> ${basedir}/../../hadoop-hdfs-project/hadoop-hdfs/src/main/proto/ </param> <param> - ${basedir}/../../hadoop-hdsl/common/src/main/proto/ + ${basedir}/../../hadoop-hdds/common/src/main/proto/ </param> <param>${basedir}/src/main/proto</param> </imports> diff --git a/hadoop-ozone/common/src/main/bin/oz b/hadoop-ozone/common/src/main/bin/oz index 7e9c396525..7841e7aadf 100755 --- a/hadoop-ozone/common/src/main/bin/oz +++ b/hadoop-ozone/common/src/main/bin/oz @@ -122,7 +122,7 @@ function ozonecmd_case ;; scm) HADOOP_SUBCMD_SUPPORTDAEMONIZATION="true" - HADOOP_CLASSNAME='org.apache.hadoop.ozone.scm.StorageContainerManager' + HADOOP_CLASSNAME='org.apache.hadoop.hdds.scm.StorageContainerManager' hadoop_debug "Appending HDFS_STORAGECONTAINERMANAGER_OPTS onto HADOOP_OPTS" HADOOP_OPTS="${HADOOP_OPTS} ${HDFS_STORAGECONTAINERMANAGER_OPTS}" ;; diff --git a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/KsmUtils.java b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/KsmUtils.java index 4ae36e618e..041a69bca0 100644 --- a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/KsmUtils.java +++ b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/KsmUtils.java @@ -23,8 +23,8 @@ import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.net.NetUtils; import com.google.common.base.Optional; -import static org.apache.hadoop.hdsl.HdslUtils.getHostNameFromConfigKeys; -import static org.apache.hadoop.hdsl.HdslUtils.getPortNumberFromConfigKeys; +import static org.apache.hadoop.hdds.HddsUtils.getHostNameFromConfigKeys; +import static org.apache.hadoop.hdds.HddsUtils.getPortNumberFromConfigKeys; import static org.apache.hadoop.ozone.ksm.KSMConfigKeys.OZONE_KSM_ADDRESS_KEY; import static org.apache.hadoop.ozone.ksm.KSMConfigKeys .OZONE_KSM_BIND_HOST_DEFAULT; diff --git a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/client/rest/response/VolumeInfo.java b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/client/rest/response/VolumeInfo.java index 7a474ec057..f98b56a6d3 100644 --- a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/client/rest/response/VolumeInfo.java +++ b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/client/rest/response/VolumeInfo.java @@ -21,7 +21,7 @@ package org.apache.hadoop.ozone.client.rest.response; import java.io.IOException; import org.apache.hadoop.classification.InterfaceAudience; -import org.apache.hadoop.ozone.client.OzoneQuota; +import org.apache.hadoop.hdds.client.OzoneQuota; import com.fasterxml.jackson.databind.ObjectMapper; import com.fasterxml.jackson.databind.ObjectReader; diff --git a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/freon/OzoneGetConf.java b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/freon/OzoneGetConf.java index 454147272c..e43cada46a 100644 --- a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/freon/OzoneGetConf.java +++ b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/freon/OzoneGetConf.java @@ -30,8 +30,8 @@ import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.conf.Configured; import org.apache.hadoop.hdfs.DFSUtil; import org.apache.hadoop.hdfs.HdfsConfiguration; -import org.apache.hadoop.hdsl.HdslUtils; -import org.apache.hadoop.hdsl.conf.OzoneConfiguration; +import org.apache.hadoop.hdds.HddsUtils; +import org.apache.hadoop.hdds.conf.OzoneConfiguration; import org.apache.hadoop.ozone.KsmUtils; import org.apache.hadoop.security.SecurityUtil; import org.apache.hadoop.util.StringUtils; @@ -234,7 +234,7 @@ public class OzoneGetConf extends Configured implements Tool { @Override public int doWorkInternal(OzoneGetConf tool, String[] args) throws IOException { - Collection<InetSocketAddress> addresses = HdslUtils + Collection<InetSocketAddress> addresses = HddsUtils .getSCMAddresses(tool.getConf()); for (InetSocketAddress addr : addresses) { diff --git a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/ksm/helpers/KsmKeyArgs.java b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/ksm/helpers/KsmKeyArgs.java index c4fc69421c..cd17e28b9a 100644 --- a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/ksm/helpers/KsmKeyArgs.java +++ b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/ksm/helpers/KsmKeyArgs.java @@ -16,8 +16,8 @@ * limitations under the License. */ package org.apache.hadoop.ozone.ksm.helpers; -import org.apache.hadoop.hdsl.protocol.proto.HdslProtos.ReplicationType; -import org.apache.hadoop.hdsl.protocol.proto.HdslProtos.ReplicationFactor; +import org.apache.hadoop.hdds.protocol.proto.HddsProtos.ReplicationType; +import org.apache.hadoop.hdds.protocol.proto.HddsProtos.ReplicationFactor; /** * Args for key. Client use this to specify key's attributes on key creation diff --git a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/ksm/helpers/KsmVolumeArgs.java b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/ksm/helpers/KsmVolumeArgs.java index afc3aa8c5b..6b42c279a0 100644 --- a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/ksm/helpers/KsmVolumeArgs.java +++ b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/ksm/helpers/KsmVolumeArgs.java @@ -22,7 +22,7 @@ import org.apache.hadoop.ozone.protocol.proto .KeySpaceManagerProtocolProtos.OzoneAclInfo; import org.apache.hadoop.ozone.protocol.proto .KeySpaceManagerProtocolProtos.VolumeInfo; -import org.apache.hadoop.hdsl.protocol.proto.HdslProtos.KeyValue; +import org.apache.hadoop.hdds.protocol.proto.HddsProtos.KeyValue; import java.io.IOException; import java.util.HashMap; diff --git a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/ksm/helpers/ServiceInfo.java b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/ksm/helpers/ServiceInfo.java index 91bd61f0e4..e07232d388 100644 --- a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/ksm/helpers/ServiceInfo.java +++ b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/ksm/helpers/ServiceInfo.java @@ -28,7 +28,7 @@ import org.apache.hadoop.ozone.client.rest.response.BucketInfo; import org.apache.hadoop.ozone.protocol.proto.KeySpaceManagerProtocolProtos; import org.apache.hadoop.ozone.protocol.proto.KeySpaceManagerProtocolProtos .ServicePort; -import org.apache.hadoop.hdsl.protocol.proto.HdslProtos.NodeType; +import org.apache.hadoop.hdds.protocol.proto.HddsProtos.NodeType; import java.io.IOException; import java.util.ArrayList; diff --git a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/web/utils/OzoneUtils.java b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/web/utils/OzoneUtils.java index bff947e88f..22fff56d7d 100644 --- a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/web/utils/OzoneUtils.java +++ b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/web/utils/OzoneUtils.java @@ -29,7 +29,7 @@ import java.util.UUID; import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.conf.Configuration; -import org.apache.hadoop.hdsl.HdslUtils; +import org.apache.hadoop.hdds.HddsUtils; import org.apache.hadoop.ozone.OzoneConsts; import com.google.common.base.Preconditions; @@ -120,7 +120,7 @@ public final class OzoneUtils { * @return the path of datanode id as string */ public static String getDatanodeIdFilePath(Configuration conf) { - return HdslUtils.getDatanodeIdFilePath(conf); + return HddsUtils.getDatanodeIdFilePath(conf); } /** @@ -141,7 +141,7 @@ public final class OzoneUtils { } public static boolean isOzoneEnabled(Configuration conf) { - return HdslUtils.isHdslEnabled(conf); + return HddsUtils.isHddsEnabled(conf); } diff --git a/hadoop-ozone/common/src/main/proto/KeySpaceManagerProtocol.proto b/hadoop-ozone/common/src/main/proto/KeySpaceManagerProtocol.proto index 260936d551..a6026f1f99 100644 --- a/hadoop-ozone/common/src/main/proto/KeySpaceManagerProtocol.proto +++ b/hadoop-ozone/common/src/main/proto/KeySpaceManagerProtocol.proto @@ -35,7 +35,7 @@ This is similar to Namenode for Ozone. */ import "hdfs.proto"; -import "hdsl.proto"; +import "hdds.proto"; enum Status { OK = 1; @@ -60,7 +60,7 @@ message VolumeInfo { required string ownerName = 2; required string volume = 3; optional uint64 quotaInBytes = 4; - repeated hadoop.hdsl.KeyValue metadata = 5; + repeated hadoop.hdds.KeyValue metadata = 5; repeated OzoneAclInfo volumeAcls = 6; required uint64 creationTime = 7; } @@ -225,8 +225,8 @@ message KeyArgs { required string bucketName = 2; required string keyName = 3; optional uint64 dataSize = 4; - optional hadoop.hdsl.ReplicationType type = 5; - optional hadoop.hdsl.ReplicationFactor factor = 6; + optional hadoop.hdds.ReplicationType type = 5; + optional hadoop.hdds.ReplicationFactor factor = 6; } message KeyLocation { @@ -338,7 +338,7 @@ message ServicePort { } message ServiceInfo { - required hadoop.hdsl.NodeType nodeType = 1; + required hadoop.hdds.NodeType nodeType = 1; required string hostname = 2; repeated ServicePort servicePorts = 3; } diff --git a/hadoop-ozone/common/src/main/shellprofile.d/hadoop-ozone.sh b/hadoop-ozone/common/src/main/shellprofile.d/hadoop-ozone.sh index b50c631365..0b7a5a71c6 100644 --- a/hadoop-ozone/common/src/main/shellprofile.d/hadoop-ozone.sh +++ b/hadoop-ozone/common/src/main/shellprofile.d/hadoop-ozone.sh @@ -19,7 +19,7 @@ if [[ "${HADOOP_SHELL_EXECNAME}" = oz ]]; then fi -## @description Profile for hdsl/cblock/ozone components. +## @description Profile for hdds/cblock/ozone components. ## @audience private ## @stability evolving function _ozone_hadoop_classpath @@ -28,16 +28,16 @@ function _ozone_hadoop_classpath # get all of the ozone jars+config in the path # - if [[ -d "${HADOOP_HDFS_HOME}/${HDSL_DIR}/webapps" ]]; then - hadoop_add_classpath "${HADOOP_HDFS_HOME}/${HDSL_DIR}" + if [[ -d "${HADOOP_HDFS_HOME}/${HDDS_DIR}/webapps" ]]; then + hadoop_add_classpath "${HADOOP_HDFS_HOME}/${HDDS_DIR}" fi - if [[ -d "${HADOOP_HDFS_HOME}/${HDSL_DIR}/webapps" ]]; then + if [[ -d "${HADOOP_HDFS_HOME}/${HDDS_DIR}/webapps" ]]; then hadoop_add_classpath "${HADOOP_HDFS_HOME}/${OZONE_DIR}" fi - hadoop_add_classpath "${HADOOP_HDFS_HOME}/${HDSL_LIB_JARS_DIR}"'/*' - hadoop_add_classpath "${HADOOP_HDFS_HOME}/${HDSL_DIR}"'/*' + hadoop_add_classpath "${HADOOP_HDFS_HOME}/${HDDS_LIB_JARS_DIR}"'/*' + hadoop_add_classpath "${HADOOP_HDFS_HOME}/${HDDS_DIR}"'/*' hadoop_add_classpath "${HADOOP_HDFS_HOME}/${OZONE_LIB_JARS_DIR}"'/*' hadoop_add_classpath "${HADOOP_HDFS_HOME}/${OZONE_DIR}"'/*' hadoop_add_classpath "${HADOOP_HDFS_HOME}/${CBLOCK_LIB_JARS_DIR}"'/*' diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/scm/container/TestContainerStateManager.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/scm/container/TestContainerStateManager.java index c1e5a245c6..3ed80b3f50 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/scm/container/TestContainerStateManager.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/scm/container/TestContainerStateManager.java @@ -14,18 +14,21 @@ * License for the specific language governing permissions and limitations under * the License. */ -package org.apache.hadoop.ozone.scm.container; +package org.apache.hadoop.hdds.scm.container; import org.apache.commons.lang.RandomStringUtils; -import org.apache.hadoop.hdsl.conf.OzoneConfiguration; +import org.apache.hadoop.hdds.conf.OzoneConfiguration; +import org.apache.hadoop.hdds.scm.container.ContainerMapping; +import org.apache.hadoop.hdds.scm.container.ContainerStateManager; +import org.apache.hadoop.hdds.scm.container.Mapping; import org.apache.hadoop.ozone.MiniOzoneClassicCluster; import org.apache.hadoop.ozone.MiniOzoneCluster; import org.apache.hadoop.ozone.OzoneConsts; -import org.apache.hadoop.hdsl.protocol.proto.HdslProtos; -import org.apache.hadoop.ozone.scm.StorageContainerManager; -import org.apache.hadoop.ozone.scm.container.ContainerStates.ContainerID; -import org.apache.hadoop.scm.XceiverClientManager; -import org.apache.hadoop.scm.container.common.helpers.ContainerInfo; +import org.apache.hadoop.hdds.protocol.proto.HddsProtos; +import org.apache.hadoop.hdds.scm.StorageContainerManager; +import org.apache.hadoop.hdds.scm.container.ContainerID; +import org.apache.hadoop.hdds.scm.XceiverClientManager; +import org.apache.hadoop.hdds.scm.container.common.helpers.ContainerInfo; import org.junit.After; import org.junit.Assert; import org.junit.Before; @@ -78,7 +81,7 @@ public class TestContainerStateManager { ContainerInfo info = stateManager .getMatchingContainer(OzoneConsts.GB * 3, containerOwner, xceiverClientManager.getType(), xceiverClientManager.getFactor(), - HdslProtos.LifeCycleState.ALLOCATED); + HddsProtos.LifeCycleState.ALLOCATED); Assert.assertEquals(container1, info.getContainerName()); Assert.assertEquals(OzoneConsts.GB * 3, info.getAllocatedBytes()); Assert.assertEquals(containerOwner, info.getOwner()); @@ -86,7 +89,7 @@ public class TestContainerStateManager { info.getPipeline().getType()); Assert.assertEquals(xceiverClientManager.getFactor(), info.getPipeline().getFactor()); - Assert.assertEquals(HdslProtos.LifeCycleState.ALLOCATED, info.getState()); + Assert.assertEquals(HddsProtos.LifeCycleState.ALLOCATED, info.getState()); // Check there are two containers in ALLOCATED state after allocation String container2 = "container" + RandomStringUtils.randomNumeric(5); @@ -95,7 +98,7 @@ public class TestContainerStateManager { int numContainers = stateManager .getMatchingContainerIDs(containerOwner, xceiverClientManager.getType(), xceiverClientManager.getFactor(), - HdslProtos.LifeCycleState.ALLOCATED).size(); + HddsProtos.LifeCycleState.ALLOCATED).size(); Assert.assertEquals(2, numContainers); } @@ -108,7 +111,7 @@ public class TestContainerStateManager { xceiverClientManager.getFactor(), cname + i, containerOwner); if (i >= 5) { scm.getScmContainerManager() - .updateContainerState(cname + i, HdslProtos.LifeCycleEvent.CREATE); + .updateContainerState(cname + i, HddsProtos.LifeCycleEvent.CREATE); } } @@ -120,11 +123,11 @@ public class TestContainerStateManager { int containers = stateManager .getMatchingContainerIDs(containerOwner, xceiverClientManager.getType(), xceiverClientManager.getFactor(), - HdslProtos.LifeCycleState.ALLOCATED).size(); + HddsProtos.LifeCycleState.ALLOCATED).size(); Assert.assertEquals(5, containers); containers = stateManager.getMatchingContainerIDs(containerOwner, xceiverClientManager.getType(), xceiverClientManager.getFactor(), - HdslProtos.LifeCycleState.CREATING).size(); + HddsProtos.LifeCycleState.CREATING).size(); Assert.assertEquals(5, containers); } @@ -134,9 +137,9 @@ public class TestContainerStateManager { scm.allocateContainer(xceiverClientManager.getType(), xceiverClientManager.getFactor(), container1, containerOwner); scmContainerMapping.updateContainerState(container1, - HdslProtos.LifeCycleEvent.CREATE); + HddsProtos.LifeCycleEvent.CREATE); scmContainerMapping.updateContainerState(container1, - HdslProtos.LifeCycleEvent.CREATED); + HddsProtos.LifeCycleEvent.CREATED); String container2 = "container-56789"; scm.allocateContainer(xceiverClientManager.getType(), @@ -145,26 +148,26 @@ public class TestContainerStateManager { ContainerInfo info = stateManager .getMatchingContainer(OzoneConsts.GB * 3, containerOwner, xceiverClientManager.getType(), xceiverClientManager.getFactor(), - HdslProtos.LifeCycleState.OPEN); + HddsProtos.LifeCycleState.OPEN); Assert.assertEquals(container1, info.getContainerName()); info = stateManager .getMatchingContainer(OzoneConsts.GB * 3, containerOwner, xceiverClientManager.getType(), xceiverClientManager.getFactor(), - HdslProtos.LifeCycleState.ALLOCATED); + HddsProtos.LifeCycleState.ALLOCATED); Assert.assertEquals(container2, info.getContainerName()); scmContainerMapping.updateContainerState(container2, - HdslProtos.LifeCycleEvent.CREATE); + HddsProtos.LifeCycleEvent.CREATE); scmContainerMapping.updateContainerState(container2, - HdslProtos.LifeCycleEvent.CREATED); + HddsProtos.LifeCycleEvent.CREATED); // space has already been allocated in container1, now container 2 should // be chosen. info = stateManager .getMatchingContainer(OzoneConsts.GB * 3, containerOwner, xceiverClientManager.getType(), xceiverClientManager.getFactor(), - HdslProtos.LifeCycleState.OPEN); + HddsProtos.LifeCycleState.OPEN); Assert.assertEquals(container2, info.getContainerName()); } @@ -173,7 +176,7 @@ public class TestContainerStateManager { NavigableSet<ContainerID> containerList = stateManager .getMatchingContainerIDs(containerOwner, xceiverClientManager.getType(), xceiverClientManager.getFactor(), - HdslProtos.LifeCycleState.ALLOCATED); + HddsProtos.LifeCycleState.ALLOCATED); int containers = containerList == null ? 0 : containerList.size(); Assert.assertEquals(0, containers); @@ -184,49 +187,49 @@ public class TestContainerStateManager { xceiverClientManager.getFactor(), container1, containerOwner); containers = stateManager.getMatchingContainerIDs(containerOwner, xceiverClientManager.getType(), xceiverClientManager.getFactor(), - HdslProtos.LifeCycleState.ALLOCATED).size(); + HddsProtos.LifeCycleState.ALLOCATED).size(); Assert.assertEquals(1, containers); scmContainerMapping.updateContainerState(container1, - HdslProtos.LifeCycleEvent.CREATE); + HddsProtos.LifeCycleEvent.CREATE); containers = stateManager.getMatchingContainerIDs(containerOwner, xceiverClientManager.getType(), xceiverClientManager.getFactor(), - HdslProtos.LifeCycleState.CREATING).size(); + HddsProtos.LifeCycleState.CREATING).size(); Assert.assertEquals(1, containers); scmContainerMapping.updateContainerState(container1, - HdslProtos.LifeCycleEvent.CREATED); + HddsProtos.LifeCycleEvent.CREATED); containers = stateManager.getMatchingContainerIDs(containerOwner, xceiverClientManager.getType(), xceiverClientManager.getFactor(), - HdslProtos.LifeCycleState.OPEN).size(); + HddsProtos.LifeCycleState.OPEN).size(); Assert.assertEquals(1, containers); scmContainerMapping - .updateContainerState(container1, HdslProtos.LifeCycleEvent.FINALIZE); + .updateContainerState(container1, HddsProtos.LifeCycleEvent.FINALIZE); containers = stateManager.getMatchingContainerIDs(containerOwner, xceiverClientManager.getType(), xceiverClientManager.getFactor(), - HdslProtos.LifeCycleState.CLOSING).size(); + HddsProtos.LifeCycleState.CLOSING).size(); Assert.assertEquals(1, containers); scmContainerMapping - .updateContainerState(container1, HdslProtos.LifeCycleEvent.CLOSE); + .updateContainerState(container1, HddsProtos.LifeCycleEvent.CLOSE); containers = stateManager.getMatchingContainerIDs(containerOwner, xceiverClientManager.getType(), xceiverClientManager.getFactor(), - HdslProtos.LifeCycleState.CLOSED).size(); + HddsProtos.LifeCycleState.CLOSED).size(); Assert.assertEquals(1, containers); scmContainerMapping - .updateContainerState(container1, HdslProtos.LifeCycleEvent.DELETE); + .updateContainerState(container1, HddsProtos.LifeCycleEvent.DELETE); containers = stateManager.getMatchingContainerIDs(containerOwner, xceiverClientManager.getType(), xceiverClientManager.getFactor(), - HdslProtos.LifeCycleState.DELETING).size(); + HddsProtos.LifeCycleState.DELETING).size(); Assert.assertEquals(1, containers); scmContainerMapping - .updateContainerState(container1, HdslProtos.LifeCycleEvent.CLEANUP); + .updateContainerState(container1, HddsProtos.LifeCycleEvent.CLEANUP); containers = stateManager.getMatchingContainerIDs(containerOwner, xceiverClientManager.getType(), xceiverClientManager.getFactor(), - HdslProtos.LifeCycleState.DELETED).size(); + HddsProtos.LifeCycleState.DELETED).size(); Assert.assertEquals(1, containers); // Allocate container1 and update its state from ALLOCATED -> CREATING -> @@ -235,12 +238,12 @@ public class TestContainerStateManager { scm.allocateContainer(xceiverClientManager.getType(), xceiverClientManager.getFactor(), container2, containerOwner); scmContainerMapping.updateContainerState(container2, - HdslProtos.LifeCycleEvent.CREATE); + HddsProtos.LifeCycleEvent.CREATE); scmContainerMapping - .updateContainerState(container2, HdslProtos.LifeCycleEvent.TIMEOUT); + .updateContainerState(container2, HddsProtos.LifeCycleEvent.TIMEOUT); containers = stateManager.getMatchingContainerIDs(containerOwner, xceiverClientManager.getType(), xceiverClientManager.getFactor(), - HdslProtos.LifeCycleState.DELETING).size(); + HddsProtos.LifeCycleState.DELETING).size(); Assert.assertEquals(1, containers); // Allocate container1 and update its state from ALLOCATED -> CREATING -> @@ -249,16 +252,16 @@ public class TestContainerStateManager { scm.allocateContainer(xceiverClientManager.getType(), xceiverClientManager.getFactor(), container3, containerOwner); scmContainerMapping.updateContainerState(container3, - HdslProtos.LifeCycleEvent.CREATE); + HddsProtos.LifeCycleEvent.CREATE); scmContainerMapping.updateContainerState(container3, - HdslProtos.LifeCycleEvent.CREATED); + HddsProtos.LifeCycleEvent.CREATED); scmContainerMapping.updateContainerState(container3, - HdslProtos.LifeCycleEvent.FINALIZE); + HddsProtos.LifeCycleEvent.FINALIZE); scmContainerMapping - .updateContainerState(container3, HdslProtos.LifeCycleEvent.CLOSE); + .updateContainerState(container3, HddsProtos.LifeCycleEvent.CLOSE); containers = stateManager.getMatchingContainerIDs(containerOwner, xceiverClientManager.getType(), xceiverClientManager.getFactor(), - HdslProtos.LifeCycleState.CLOSED).size(); + HddsProtos.LifeCycleState.CLOSED).size(); Assert.assertEquals(1, containers); } @@ -268,9 +271,9 @@ public class TestContainerStateManager { scm.allocateContainer(xceiverClientManager.getType(), xceiverClientManager.getFactor(), container1, containerOwner); scmContainerMapping.updateContainerState(container1, - HdslProtos.LifeCycleEvent.CREATE); + HddsProtos.LifeCycleEvent.CREATE); scmContainerMapping.updateContainerState(container1, - HdslProtos.LifeCycleEvent.CREATED); + HddsProtos.LifeCycleEvent.CREATED); Random ran = new Random(); long allocatedSize = 0; @@ -281,7 +284,7 @@ public class TestContainerStateManager { ContainerInfo info = stateManager .getMatchingContainer(size, containerOwner, xceiverClientManager.getType(), xceiverClientManager.getFactor(), - HdslProtos.LifeCycleState.OPEN); + HddsProtos.LifeCycleState.OPEN); Assert.assertEquals(container1, info.getContainerName()); ContainerMapping containerMapping = @@ -294,8 +297,8 @@ public class TestContainerStateManager { // the persisted value should always be equal to allocated size. byte[] containerBytes = containerMapping.getContainerStore().get(container1.getBytes(utf8)); - HdslProtos.SCMContainerInfo infoProto = - HdslProtos.SCMContainerInfo.PARSER.parseFrom(containerBytes); + HddsProtos.SCMContainerInfo infoProto = + HddsProtos.SCMContainerInfo.PARSER.parseFrom(containerBytes); ContainerInfo currentInfo = ContainerInfo.fromProtobuf(infoProto); Assert.assertEquals(allocatedSize, currentInfo.getAllocatedBytes()); } diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/MiniOzoneClassicCluster.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/MiniOzoneClassicCluster.java index 403241ffc3..9335c7c9b8 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/MiniOzoneClassicCluster.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/MiniOzoneClassicCluster.java @@ -24,9 +24,9 @@ import org.apache.commons.io.FileUtils; import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hdfs.server.datanode.DataNode; -import org.apache.hadoop.hdsl.conf.OzoneConfiguration; +import org.apache.hadoop.hdds.conf.OzoneConfiguration; import org.apache.hadoop.hdfs.MiniDFSCluster; -import org.apache.hadoop.hdsl.protocol.DatanodeDetails; +import org.apache.hadoop.hdds.protocol.DatanodeDetails; import org.apache.hadoop.ipc.Client; import org.apache.hadoop.ipc.RPC; import org.apache.hadoop.net.NetUtils; @@ -36,14 +36,14 @@ import org.apache.hadoop.ozone.container.common import org.apache.hadoop.ozone.container.ozoneimpl.OzoneContainer; import org.apache.hadoop.ozone.ksm.KSMConfigKeys; import org.apache.hadoop.ozone.ksm.KeySpaceManager; -import org.apache.hadoop.ozone.scm.SCMStorage; +import org.apache.hadoop.hdds.scm.SCMStorage; import org.apache.hadoop.ozone.ksm.KSMStorage; import org.apache.hadoop.ozone.web.client.OzoneRestClient; -import org.apache.hadoop.scm.ScmConfigKeys; -import org.apache.hadoop.scm.protocolPB +import org.apache.hadoop.hdds.scm.ScmConfigKeys; +import org.apache.hadoop.hdds.scm.protocolPB .StorageContainerLocationProtocolClientSideTranslatorPB; -import org.apache.hadoop.scm.protocolPB.StorageContainerLocationProtocolPB; -import org.apache.hadoop.ozone.scm.StorageContainerManager; +import org.apache.hadoop.hdds.scm.protocolPB.StorageContainerLocationProtocolPB; +import org.apache.hadoop.hdds.scm.StorageContainerManager; import org.apache.hadoop.security.UserGroupInformation; import org.apache.hadoop.test.GenericTestUtils; @@ -75,7 +75,7 @@ import static org.apache.hadoop.ozone.OzoneConfigKeys import static org.apache.hadoop.ozone.OzoneConfigKeys .DFS_CONTAINER_RATIS_IPC_RANDOM_PORT; -import static org.apache.hadoop.hdsl.protocol.proto.HdslProtos.NodeState +import static org.apache.hadoop.hdds.protocol.proto.HddsProtos.NodeState .HEALTHY; import static org.junit.Assert.assertFalse; @@ -341,8 +341,8 @@ public final class MiniOzoneClassicCluster extends MiniDFSCluster public static DatanodeDetails getDatanodeDetails(DataNode dataNode) { DatanodeDetails datanodeDetails = null; for (ServicePlugin plugin : dataNode.getPlugins()) { - if (plugin instanceof HdslDatanodeService) { - datanodeDetails = ((HdslDatanodeService) plugin).getDatanodeDetails(); + if (plugin instanceof HddsDatanodeService) { + datanodeDetails = ((HddsDatanodeService) plugin).getDatanodeDetails(); } } return datanodeDetails; @@ -499,10 +499,10 @@ public final class MiniOzoneClassicCluster extends MiniDFSCluster conf.set(ScmConfigKeys.OZONE_SCM_HTTP_ADDRESS_KEY, "127.0.0.1:0"); conf.set(KSMConfigKeys.OZONE_KSM_ADDRESS_KEY, "127.0.0.1:0"); conf.set(KSMConfigKeys.OZONE_KSM_HTTP_ADDRESS_KEY, "127.0.0.1:0"); - conf.set(ScmConfigKeys.HDSL_REST_HTTP_ADDRESS_KEY, "127.0.0.1:0"); + conf.set(ScmConfigKeys.HDDS_REST_HTTP_ADDRESS_KEY, "127.0.0.1:0"); conf.set(DFS_DATANODE_PLUGINS_KEY, "org.apache.hadoop.ozone.web.ObjectStoreRestPlugin," + - "org.apache.hadoop.ozone.HdslDatanodeService"); + "org.apache.hadoop.ozone.HddsDatanodeService"); // Configure KSM and SCM handlers conf.setInt(ScmConfigKeys.OZONE_SCM_HANDLER_COUNT_KEY, numOfScmHandlers); diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/MiniOzoneCluster.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/MiniOzoneCluster.java index 70444fbb34..46d59deab0 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/MiniOzoneCluster.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/MiniOzoneCluster.java @@ -17,11 +17,11 @@ */ package org.apache.hadoop.ozone; +import org.apache.hadoop.hdds.scm.StorageContainerManager; import org.apache.hadoop.ozone.client.rest.OzoneException; import org.apache.hadoop.ozone.ksm.KeySpaceManager; -import org.apache.hadoop.ozone.scm.StorageContainerManager; import org.apache.hadoop.ozone.web.client.OzoneRestClient; -import org.apache.hadoop.scm.protocolPB +import org.apache.hadoop.hdds.scm.protocolPB .StorageContainerLocationProtocolClientSideTranslatorPB; import java.io.Closeable; diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/MiniOzoneTestHelper.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/MiniOzoneTestHelper.java index d6fb579c94..2ab427c24b 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/MiniOzoneTestHelper.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/MiniOzoneTestHelper.java @@ -33,26 +33,26 @@ public class MiniOzoneTestHelper { } public static OzoneContainer getOzoneContainer(DataNode dataNode) { - return findHdslPlugin(dataNode).getDatanodeStateMachine() + return findHddsPlugin(dataNode).getDatanodeStateMachine() .getContainer(); } public static ContainerManager getOzoneContainerManager(DataNode dataNode) { - return findHdslPlugin(dataNode).getDatanodeStateMachine() + return findHddsPlugin(dataNode).getDatanodeStateMachine() .getContainer().getContainerManager(); } public static DatanodeStateMachine getStateMachine(DataNode dataNode) { - return findHdslPlugin(dataNode).getDatanodeStateMachine(); + return findHddsPlugin(dataNode).getDatanodeStateMachine(); } - private static HdslDatanodeService findHdslPlugin(DataNode dataNode) { + private static HddsDatanodeService findHddsPlugin(DataNode dataNode) { for (ServicePlugin plugin : dataNode.getPlugins()) { - if (plugin instanceof HdslDatanodeService) { - return (HdslDatanodeService) plugin; + if (plugin instanceof HddsDatanodeService) { + return (HddsDatanodeService) plugin; } } - throw new IllegalStateException("Can't find the Hdsl server plugin in the" + throw new IllegalStateException("Can't find the Hdds server plugin in the" + " plugin collection of datanode"); } diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/RatisTestHelper.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/RatisTestHelper.java index eec097b54f..946dd327d9 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/RatisTestHelper.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/RatisTestHelper.java @@ -19,7 +19,7 @@ package org.apache.hadoop.ozone; import org.apache.hadoop.conf.Configuration; -import org.apache.hadoop.hdsl.conf.OzoneConfiguration; +import org.apache.hadoop.hdds.conf.OzoneConfiguration; import org.apache.hadoop.ozone.container.ContainerTestHelper; import org.apache.hadoop.ozone.web.client.OzoneRestClient; import org.apache.hadoop.ozone.client.rest.OzoneException; diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/TestContainerOperations.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/TestContainerOperations.java index 2e94adadf0..af7d1b8f86 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/TestContainerOperations.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/TestContainerOperations.java @@ -17,19 +17,19 @@ */ package org.apache.hadoop.ozone; -import org.apache.hadoop.hdsl.conf.OzoneConfiguration; +import org.apache.hadoop.hdds.conf.OzoneConfiguration; import org.apache.hadoop.ipc.ProtobufRpcEngine; import org.apache.hadoop.ipc.RPC; -import org.apache.hadoop.hdsl.protocol.proto.HdslProtos; -import org.apache.hadoop.ozone.scm.container.placement.algorithms.ContainerPlacementPolicy; -import org.apache.hadoop.ozone.scm.container.placement.algorithms.SCMContainerPlacementCapacity; -import org.apache.hadoop.scm.ScmConfigKeys; -import org.apache.hadoop.scm.XceiverClientManager; -import org.apache.hadoop.scm.client.ContainerOperationClient; -import org.apache.hadoop.scm.client.ScmClient; -import org.apache.hadoop.scm.container.common.helpers.Pipeline; -import org.apache.hadoop.scm.protocolPB.StorageContainerLocationProtocolClientSideTranslatorPB; -import org.apache.hadoop.scm.protocolPB.StorageContainerLocationProtocolPB; +import org.apache.hadoop.hdds.protocol.proto.HddsProtos; +import org.apache.hadoop.hdds.scm.container.placement.algorithms.ContainerPlacementPolicy; +import org.apache.hadoop.hdds.scm.container.placement.algorithms.SCMContainerPlacementCapacity; +import org.apache.hadoop.hdds.scm.ScmConfigKeys; +import org.apache.hadoop.hdds.scm.XceiverClientManager; +import org.apache.hadoop.hdds.scm.client.ContainerOperationClient; +import org.apache.hadoop.hdds.scm.client.ScmClient; +import org.apache.hadoop.hdds.scm.container.common.helpers.Pipeline; +import org.apache.hadoop.hdds.scm.protocolPB.StorageContainerLocationProtocolClientSideTranslatorPB; +import org.apache.hadoop.hdds.scm.protocolPB.StorageContainerLocationProtocolPB; import org.junit.AfterClass; import org.junit.BeforeClass; import org.junit.Test; @@ -80,8 +80,8 @@ public class TestContainerOperations { */ @Test public void testCreate() throws Exception { - Pipeline pipeline0 = storageClient.createContainer(HdslProtos - .ReplicationType.STAND_ALONE, HdslProtos.ReplicationFactor + Pipeline pipeline0 = storageClient.createContainer(HddsProtos + .ReplicationType.STAND_ALONE, HddsProtos.ReplicationFactor .ONE, "container0", "OZONE"); assertEquals("container0", pipeline0.getContainerName()); diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/TestMiniOzoneCluster.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/TestMiniOzoneCluster.java index ff697b57a7..06386466c5 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/TestMiniOzoneCluster.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/TestMiniOzoneCluster.java @@ -19,19 +19,19 @@ package org.apache.hadoop.ozone; import org.apache.hadoop.conf.Configuration; -import org.apache.hadoop.hdsl.conf.OzoneConfiguration; +import org.apache.hadoop.hdds.conf.OzoneConfiguration; import org.apache.hadoop.hdfs.server.datanode.DataNode; -import org.apache.hadoop.hdsl.protocol.DatanodeDetails; +import org.apache.hadoop.hdds.protocol.DatanodeDetails; import org.apache.hadoop.ozone.container.common.SCMTestUtils; import org.apache.hadoop.ozone.container.common.helpers.ContainerUtils; import org.apache.hadoop.ozone.container.common.statemachine.DatanodeStateMachine; import org.apache.hadoop.ozone.container.ozoneimpl.TestOzoneContainer; -import org.apache.hadoop.hdsl.protocol.proto.HdslProtos; -import org.apache.hadoop.ozone.scm.TestUtils; +import org.apache.hadoop.hdds.protocol.proto.HddsProtos; +import org.apache.hadoop.hdds.scm.TestUtils; import org.apache.hadoop.ozone.web.utils.OzoneUtils; -import org.apache.hadoop.scm.XceiverClient; -import org.apache.hadoop.scm.container.common.helpers.PipelineChannel; -import org.apache.hadoop.scm.container.common.helpers.Pipeline; +import org.apache.hadoop.hdds.scm.XceiverClient; +import org.apache.hadoop.hdds.scm.container.common.helpers.PipelineChannel; +import org.apache.hadoop.hdds.scm.container.common.helpers.Pipeline; import org.apache.hadoop.test.PathUtils; import org.apache.hadoop.test.TestGenericTestUtils; import org.apache.hadoop.util.ServicePlugin; @@ -97,15 +97,15 @@ public class TestMiniOzoneCluster { String containerName = OzoneUtils.getRequestID(); DatanodeDetails datanodeDetails = null; for (ServicePlugin plugin : dn.getPlugins()) { - if (plugin instanceof HdslDatanodeService) { - datanodeDetails = ((HdslDatanodeService) plugin).getDatanodeDetails(); + if (plugin instanceof HddsDatanodeService) { + datanodeDetails = ((HddsDatanodeService) plugin).getDatanodeDetails(); } } final PipelineChannel pipelineChannel = new PipelineChannel(datanodeDetails.getUuidString(), - HdslProtos.LifeCycleState.OPEN, - HdslProtos.ReplicationType.STAND_ALONE, - HdslProtos.ReplicationFactor.ONE, "test"); + HddsProtos.LifeCycleState.OPEN, + HddsProtos.ReplicationType.STAND_ALONE, + HddsProtos.ReplicationFactor.ONE, "test"); pipelineChannel.addMember(datanodeDetails); Pipeline pipeline = new Pipeline(containerName, pipelineChannel); diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/TestOzoneConfigurationFields.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/TestOzoneConfigurationFields.java index a000d9f5a1..533a3b4a69 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/TestOzoneConfigurationFields.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/TestOzoneConfigurationFields.java @@ -19,7 +19,7 @@ package org.apache.hadoop.ozone; import org.apache.hadoop.conf.TestConfigurationFieldsBase; import org.apache.hadoop.ozone.ksm.KSMConfigKeys; -import org.apache.hadoop.scm.ScmConfigKeys; +import org.apache.hadoop.hdds.scm.ScmConfigKeys; /** * Tests if configuration constants documented in ozone-defaults.xml. diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/TestStorageContainerManager.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/TestStorageContainerManager.java index b4c94bd3e0..3fa02e487a 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/TestStorageContainerManager.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/TestStorageContainerManager.java @@ -21,25 +21,26 @@ import static org.junit.Assert.fail; import java.io.IOException; import org.apache.commons.lang.RandomStringUtils; -import org.apache.hadoop.hdsl.conf.OzoneConfiguration; +import org.apache.hadoop.hdds.conf.OzoneConfiguration; +import org.apache.hadoop.hdds.scm.SCMStorage; +import org.apache.hadoop.hdds.scm.node.NodeManager; import org.apache.hadoop.ozone.protocol.commands.DeleteBlocksCommand; import org.apache.hadoop.ozone.protocol.commands.SCMCommand; -import org.apache.hadoop.hdsl.protocol.proto.HdslProtos; -import org.apache.hadoop.hdsl.protocol.proto.HdslProtos.NodeType; -import org.apache.hadoop.hdsl.protocol.proto.HdslProtos.NodeState; -import org.apache.hadoop.hdsl.protocol.proto.StorageContainerDatanodeProtocolProtos.DeletedBlocksTransaction; -import org.apache.hadoop.hdsl.protocol.proto.StorageContainerDatanodeProtocolProtos.ReportState; -import org.apache.hadoop.hdsl.protocol.proto.StorageContainerDatanodeProtocolProtos.SCMCmdType; -import org.apache.hadoop.ozone.scm.SCMStorage; -import org.apache.hadoop.ozone.scm.StorageContainerManager; -import org.apache.hadoop.ozone.scm.StorageContainerManager.StartupOption; -import org.apache.hadoop.ozone.scm.block.DeletedBlockLog; -import org.apache.hadoop.ozone.scm.block.SCMBlockDeletingService; -import org.apache.hadoop.ozone.scm.exceptions.SCMException; -import org.apache.hadoop.ozone.scm.node.NodeManager; -import org.apache.hadoop.scm.XceiverClientManager; -import org.apache.hadoop.scm.container.common.helpers.Pipeline; -import org.apache.hadoop.scm.ScmInfo; +import org.apache.hadoop.hdds.protocol.proto.HddsProtos; +import org.apache.hadoop.hdds.protocol.proto.HddsProtos.NodeType; +import org.apache.hadoop.hdds.protocol.proto.HddsProtos.NodeState; +import org.apache.hadoop.hdds.protocol.proto.StorageContainerDatanodeProtocolProtos.DeletedBlocksTransaction; +import org.apache.hadoop.hdds.protocol.proto.StorageContainerDatanodeProtocolProtos.ReportState; +import org.apache.hadoop.hdds.protocol.proto.StorageContainerDatanodeProtocolProtos.SCMCmdType; +import org.apache.hadoop.hdds.scm.StorageContainerManager; +import org.apache.hadoop.hdds.scm.StorageContainerManager.StartupOption; +import org.apache.hadoop.hdds.scm.block.DeletedBlockLog; +import org.apache.hadoop.hdds.scm.block.SCMBlockDeletingService; +import org.apache.hadoop.hdds.scm.exceptions.SCMException; +import org.apache.hadoop.hdds.scm.node.NodeManager; +import org.apache.hadoop.hdds.scm.XceiverClientManager; +import org.apache.hadoop.hdds.scm.container.common.helpers.Pipeline; +import org.apache.hadoop.hdds.scm.ScmInfo; import org.junit.Rule; import org.junit.Assert; import org.junit.Test; @@ -59,7 +60,7 @@ import com.google.common.collect.Lists; import com.google.common.collect.Maps; import org.apache.hadoop.ozone.ksm.helpers.KsmKeyInfo; import org.apache.hadoop.ozone.ksm.helpers.KsmKeyLocationInfo; -import org.apache.hadoop.scm.ScmConfigKeys; +import org.apache.hadoop.hdds.scm.ScmConfigKeys; import org.apache.hadoop.io.IOUtils; import org.junit.rules.Timeout; @@ -132,7 +133,7 @@ public class TestStorageContainerManager { try { Pipeline pipeLine2 = mockScm.allocateContainer( xceiverClientManager.getType(), - HdslProtos.ReplicationFactor.ONE, "container2", "OZONE"); + HddsProtos.ReplicationFactor.ONE, "container2", "OZONE"); if (expectPermissionDenied) { fail("Operation should fail, expecting an IOException here."); } else { @@ -145,7 +146,7 @@ public class TestStorageContainerManager { try { Pipeline pipeLine3 = mockScm.allocateContainer( xceiverClientManager.getType(), - HdslProtos.ReplicationFactor.ONE, "container3", "OZONE"); + HddsProtos.ReplicationFactor.ONE, "container3", "OZONE"); if (expectPermissionDenied) { fail("Operation should fail, expecting an IOException here."); diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/TestStorageContainerManagerHelper.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/TestStorageContainerManagerHelper.java index e80e473a6a..498a44efc2 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/TestStorageContainerManagerHelper.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/TestStorageContainerManagerHelper.java @@ -25,7 +25,7 @@ import org.apache.hadoop.fs.StorageType; import org.apache.hadoop.hdfs.DFSUtil; import org.apache.hadoop.hdfs.server.datanode.DataNode; import org.apache.hadoop.hdfs.server.datanode.ObjectStoreHandler; -import org.apache.hadoop.hdsl.protocol.DatanodeDetails; +import org.apache.hadoop.hdds.protocol.DatanodeDetails; import org.apache.hadoop.ozone.container.common.helpers.ContainerData; import org.apache.hadoop.ozone.container.common.helpers.KeyUtils; import org.apache.hadoop.ozone.container.ozoneimpl.OzoneContainer; @@ -37,7 +37,7 @@ import org.apache.hadoop.ozone.web.handlers.UserArgs; import org.apache.hadoop.ozone.web.handlers.VolumeArgs; import org.apache.hadoop.ozone.web.interfaces.StorageHandler; import org.apache.hadoop.ozone.web.utils.OzoneUtils; -import org.apache.hadoop.scm.container.common.helpers.Pipeline; +import org.apache.hadoop.hdds.scm.container.common.helpers.Pipeline; import org.apache.hadoop.utils.MetadataKeyFilters.KeyPrefixFilter; import org.apache.hadoop.utils.MetadataKeyFilters.MetadataKeyFilter; import org.apache.hadoop.utils.MetadataStore; diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rest/TestOzoneRestClient.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rest/TestOzoneRestClient.java index 74691ab524..cc72a795a2 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rest/TestOzoneRestClient.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rest/TestOzoneRestClient.java @@ -18,7 +18,7 @@ package org.apache.hadoop.ozone.client.rest; -import org.apache.hadoop.hdsl.conf.OzoneConfiguration; +import org.apache.hadoop.hdds.conf.OzoneConfiguration; import org.apache.hadoop.fs.StorageType; import org.apache.hadoop.ozone.MiniOzoneClassicCluster; import org.apache.hadoop.ozone.OzoneAcl; @@ -30,10 +30,10 @@ import org.apache.hadoop.ozone.client.OzoneBucket; import org.apache.hadoop.ozone.client.OzoneClient; import org.apache.hadoop.ozone.client.OzoneClientFactory; import org.apache.hadoop.ozone.client.OzoneKey; -import org.apache.hadoop.ozone.client.OzoneQuota; +import org.apache.hadoop.hdds.client.OzoneQuota; import org.apache.hadoop.ozone.client.OzoneVolume; -import org.apache.hadoop.ozone.client.ReplicationFactor; -import org.apache.hadoop.ozone.client.ReplicationType; +import org.apache.hadoop.hdds.client.ReplicationFactor; +import org.apache.hadoop.hdds.client.ReplicationType; import org.apache.hadoop.ozone.client.VolumeArgs; import org.apache.hadoop.ozone.client.io.OzoneInputStream; import org.apache.hadoop.ozone.client.io.OzoneOutputStream; diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestOzoneRpcClient.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestOzoneRpcClient.java index add55ee45b..59eb7cf618 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestOzoneRpcClient.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestOzoneRpcClient.java @@ -23,7 +23,7 @@ import org.apache.hadoop.fs.StorageType; import org.apache.hadoop.ozone.MiniOzoneClassicCluster; import org.apache.hadoop.ozone.OzoneAcl; import org.apache.hadoop.ozone.OzoneConfigKeys; -import org.apache.hadoop.hdsl.conf.OzoneConfiguration; +import org.apache.hadoop.hdds.conf.OzoneConfiguration; import org.apache.hadoop.ozone.OzoneConsts; import org.apache.hadoop.ozone.client.BucketArgs; import org.apache.hadoop.ozone.client.ObjectStore; @@ -31,10 +31,10 @@ import org.apache.hadoop.ozone.client.OzoneBucket; import org.apache.hadoop.ozone.client.OzoneClient; import org.apache.hadoop.ozone.client.OzoneClientFactory; import org.apache.hadoop.ozone.client.OzoneKey; -import org.apache.hadoop.ozone.client.OzoneQuota; +import org.apache.hadoop.hdds.client.OzoneQuota; import org.apache.hadoop.ozone.client.OzoneVolume; -import org.apache.hadoop.ozone.client.ReplicationFactor; -import org.apache.hadoop.ozone.client.ReplicationType; +import org.apache.hadoop.hdds.client.ReplicationFactor; +import org.apache.hadoop.hdds.client.ReplicationType; import org.apache.hadoop.ozone.client.VolumeArgs; import org.apache.hadoop.ozone.client.io.OzoneInputStream; import org.apache.hadoop.ozone.client.io.OzoneOutputStream; @@ -42,11 +42,11 @@ import org.apache.hadoop.ozone.ksm.KeySpaceManager; import org.apache.hadoop.ozone.ksm.helpers.KsmKeyArgs; import org.apache.hadoop.ozone.ksm.helpers.KsmKeyInfo; import org.apache.hadoop.ozone.ksm.helpers.KsmKeyLocationInfo; -import org.apache.hadoop.hdsl.protocol.proto.HdslProtos; +import org.apache.hadoop.hdds.protocol.proto.HddsProtos; import org.apache.hadoop.ozone.client.rest.OzoneException; -import org.apache.hadoop.scm.ScmConfigKeys; -import org.apache.hadoop.scm.container.common.helpers.Pipeline; -import org.apache.hadoop.scm.protocolPB. +import org.apache.hadoop.hdds.scm.ScmConfigKeys; +import org.apache.hadoop.hdds.scm.container.common.helpers.Pipeline; +import org.apache.hadoop.hdds.scm.protocolPB. StorageContainerLocationProtocolClientSideTranslatorPB; import org.apache.hadoop.util.Time; import org.junit.AfterClass; @@ -381,10 +381,10 @@ public class TestOzoneRpcClient { .setBucketName(bucketName) .setKeyName(keyName) .build(); - HdslProtos.ReplicationType replicationType = - HdslProtos.ReplicationType.valueOf(type.toString()); - HdslProtos.ReplicationFactor replicationFactor = - HdslProtos.ReplicationFactor.valueOf(factor.getValue()); + HddsProtos.ReplicationType replicationType = + HddsProtos.ReplicationType.valueOf(type.toString()); + HddsProtos.ReplicationFactor replicationFactor = + HddsProtos.ReplicationFactor.valueOf(factor.getValue()); KsmKeyInfo keyInfo = keySpaceManager.lookupKey(keyArgs); for (KsmKeyLocationInfo info: keyInfo.getLatestVersionLocations().getLocationList()) { diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/ContainerTestHelper.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/ContainerTestHelper.java index 3e5db29a64..7924862447 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/ContainerTestHelper.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/ContainerTestHelper.java @@ -21,23 +21,23 @@ package org.apache.hadoop.ozone.container; import com.google.common.base.Preconditions; import com.google.protobuf.ByteString; import org.apache.commons.codec.binary.Hex; -import org.apache.hadoop.hdsl.protocol.DatanodeDetails; -import org.apache.hadoop.hdsl.protocol.proto.ContainerProtos; -import org.apache.hadoop.hdsl.protocol.proto.ContainerProtos +import org.apache.hadoop.hdds.protocol.DatanodeDetails; +import org.apache.hadoop.hdds.protocol.proto.ContainerProtos; +import org.apache.hadoop.hdds.protocol.proto.ContainerProtos .ContainerCommandRequestProto; -import org.apache.hadoop.hdsl.protocol.proto.ContainerProtos +import org.apache.hadoop.hdds.protocol.proto.ContainerProtos .ContainerCommandResponseProto; -import org.apache.hadoop.hdsl.protocol.proto.HdslProtos.LifeCycleState; -import org.apache.hadoop.hdsl.protocol.proto.HdslProtos.ReplicationFactor; -import org.apache.hadoop.hdsl.protocol.proto.HdslProtos.ReplicationType; -import org.apache.hadoop.hdsl.protocol.proto.HdslProtos.KeyValue; +import org.apache.hadoop.hdds.protocol.proto.HddsProtos.LifeCycleState; +import org.apache.hadoop.hdds.protocol.proto.HddsProtos.ReplicationFactor; +import org.apache.hadoop.hdds.protocol.proto.HddsProtos.ReplicationType; +import org.apache.hadoop.hdds.protocol.proto.HddsProtos.KeyValue; import org.apache.hadoop.ozone.OzoneConfigKeys; -import org.apache.hadoop.hdsl.conf.OzoneConfiguration; +import org.apache.hadoop.hdds.conf.OzoneConfiguration; import org.apache.hadoop.ozone.OzoneConsts; import org.apache.hadoop.ozone.container.common.helpers.ChunkInfo; import org.apache.hadoop.ozone.container.common.helpers.KeyData; -import org.apache.hadoop.scm.container.common.helpers.PipelineChannel; -import org.apache.hadoop.scm.container.common.helpers.Pipeline; +import org.apache.hadoop.hdds.scm.container.common.helpers.PipelineChannel; +import org.apache.hadoop.hdds.scm.container.common.helpers.Pipeline; import org.apache.hadoop.test.GenericTestUtils; import org.junit.Assert; import org.slf4j.Logger; diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/common/TestBlockDeletingService.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/common/TestBlockDeletingService.java index 41b5a885df..0f8c4578ff 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/common/TestBlockDeletingService.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/common/TestBlockDeletingService.java @@ -20,11 +20,12 @@ package org.apache.hadoop.ozone.container.common; import com.google.common.collect.Lists; import org.apache.commons.io.FileUtils; import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.hdds.scm.TestUtils; import org.apache.hadoop.hdfs.DFSUtil; -import org.apache.hadoop.hdsl.protocol.proto.ContainerProtos; +import org.apache.hadoop.hdds.protocol.proto.ContainerProtos; import org.apache.hadoop.hdfs.server.datanode.StorageLocation; import org.apache.hadoop.ozone.OzoneConfigKeys; -import org.apache.hadoop.hdsl.conf.OzoneConfiguration; +import org.apache.hadoop.hdds.conf.OzoneConfiguration; import org.apache.hadoop.ozone.OzoneConsts; import org.apache.hadoop.ozone.container.testutils.BlockDeletingServiceTestImpl; import org.apache.hadoop.ozone.container.common.helpers.ContainerData; @@ -34,9 +35,8 @@ import org.apache.hadoop.ozone.container.common.impl.ContainerManagerImpl; import org.apache.hadoop.ozone.container.common.impl.RandomContainerDeletionChoosingPolicy; import org.apache.hadoop.ozone.container.common.interfaces.ContainerManager; import org.apache.hadoop.ozone.container.common.statemachine.background.BlockDeletingService; -import org.apache.hadoop.ozone.scm.TestUtils; import org.apache.hadoop.ozone.web.utils.OzoneUtils; -import org.apache.hadoop.scm.ScmConfigKeys; +import org.apache.hadoop.hdds.scm.ScmConfigKeys; import org.apache.hadoop.test.GenericTestUtils; import org.apache.hadoop.test.GenericTestUtils.LogCapturer; import org.apache.hadoop.utils.BackgroundService; diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/common/impl/TestContainerDeletionChoosingPolicy.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/common/impl/TestContainerDeletionChoosingPolicy.java index cb61da37a7..893f2f69dc 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/common/impl/TestContainerDeletionChoosingPolicy.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/common/impl/TestContainerDeletionChoosingPolicy.java @@ -28,17 +28,17 @@ import java.util.Map; import java.util.Random; import org.apache.commons.io.FileUtils; +import org.apache.hadoop.hdds.scm.TestUtils; import org.apache.hadoop.hdfs.DFSUtil; import org.apache.hadoop.hdfs.server.datanode.StorageLocation; -import org.apache.hadoop.hdsl.protocol.DatanodeDetails; +import org.apache.hadoop.hdds.protocol.DatanodeDetails; import org.apache.hadoop.ozone.OzoneConfigKeys; -import org.apache.hadoop.hdsl.conf.OzoneConfiguration; +import org.apache.hadoop.hdds.conf.OzoneConfiguration; import org.apache.hadoop.ozone.OzoneConsts; import org.apache.hadoop.ozone.container.common.helpers.ContainerData; import org.apache.hadoop.ozone.container.common.helpers.KeyUtils; -import org.apache.hadoop.ozone.scm.TestUtils; import org.apache.hadoop.ozone.web.utils.OzoneUtils; -import org.apache.hadoop.scm.ScmConfigKeys; +import org.apache.hadoop.hdds.scm.ScmConfigKeys; import org.apache.hadoop.test.GenericTestUtils; import org.apache.hadoop.utils.MetadataStore; import org.junit.After; diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/common/impl/TestContainerPersistence.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/common/impl/TestContainerPersistence.java index fb44270fae..fae4c491b0 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/common/impl/TestContainerPersistence.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/common/impl/TestContainerPersistence.java @@ -21,13 +21,13 @@ import org.apache.commons.codec.binary.Hex; import org.apache.commons.io.FileUtils; import org.apache.commons.lang3.RandomStringUtils; import org.apache.hadoop.fs.FileUtil; -import org.apache.hadoop.hdsl.protocol.proto.ContainerProtos; +import org.apache.hadoop.hdds.protocol.proto.ContainerProtos; import org.apache.hadoop.hdfs.server.datanode.StorageLocation; import org.apache.hadoop.ozone.OzoneConfigKeys; -import org.apache.hadoop.hdsl.conf.OzoneConfiguration; +import org.apache.hadoop.hdds.conf.OzoneConfiguration; import org.apache.hadoop.ozone.OzoneConsts; -import org.apache.hadoop.ozone.scm.TestUtils; -import org.apache.hadoop.scm.container.common.helpers.StorageContainerException; +import org.apache.hadoop.hdds.scm.TestUtils; +import org.apache.hadoop.hdds.scm.container.common.helpers.StorageContainerException; import org.apache.hadoop.test.GenericTestUtils; import org.apache.hadoop.ozone.container.common.helpers.ChunkInfo; import org.apache.hadoop.ozone.container.common.helpers.ContainerData; @@ -35,7 +35,7 @@ import org.apache.hadoop.ozone.container.common.helpers.ContainerUtils; import org.apache.hadoop.ozone.container.common.helpers.KeyData; import org.apache.hadoop.ozone.container.common.helpers.KeyUtils; import org.apache.hadoop.ozone.web.utils.OzoneUtils; -import org.apache.hadoop.scm.container.common.helpers.Pipeline; +import org.apache.hadoop.hdds.scm.container.common.helpers.Pipeline; import org.apache.hadoop.utils.MetadataStore; import org.junit.After; import org.junit.AfterClass; @@ -74,7 +74,7 @@ import static org.apache.hadoop.ozone.container.ContainerTestHelper.getChunk; import static org.apache.hadoop.ozone.container.ContainerTestHelper.getData; import static org.apache.hadoop.ozone.container.ContainerTestHelper .setDataChecksum; -import static org.apache.hadoop.hdsl.protocol.proto.ContainerProtos +import static org.apache.hadoop.hdds.protocol.proto.ContainerProtos .Stage.COMBINED; import static org.junit.Assert.assertEquals; import static org.junit.Assert.assertTrue; diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/common/statemachine/commandhandler/TestCloseContainerHandler.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/common/statemachine/commandhandler/TestCloseContainerHandler.java index e2ca7f2052..a8b8b45d6c 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/common/statemachine/commandhandler/TestCloseContainerHandler.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/common/statemachine/commandhandler/TestCloseContainerHandler.java @@ -17,25 +17,25 @@ */ package org.apache.hadoop.ozone.container.common.statemachine.commandhandler; -import org.apache.hadoop.hdsl.conf.OzoneConfiguration; -import org.apache.hadoop.hdsl.protocol.DatanodeDetails; +import org.apache.hadoop.hdds.conf.OzoneConfiguration; +import org.apache.hadoop.hdds.protocol.DatanodeDetails; import org.apache.hadoop.ozone.MiniOzoneClassicCluster; import org.apache.hadoop.ozone.MiniOzoneTestHelper; import org.apache.hadoop.ozone.OzoneConsts; import org.apache.hadoop.ozone.client.ObjectStore; import org.apache.hadoop.ozone.client.OzoneClient; import org.apache.hadoop.ozone.client.OzoneClientFactory; -import org.apache.hadoop.ozone.client.ReplicationFactor; -import org.apache.hadoop.ozone.client.ReplicationType; +import org.apache.hadoop.hdds.client.ReplicationFactor; +import org.apache.hadoop.hdds.client.ReplicationType; import org.apache.hadoop.ozone.client.io.OzoneOutputStream; import org.apache.hadoop.ozone.client.rest.OzoneException; import org.apache.hadoop.ozone.container.common.helpers.ContainerData; import org.apache.hadoop.ozone.ksm.helpers.KsmKeyArgs; import org.apache.hadoop.ozone.ksm.helpers.KsmKeyLocationInfo; import org.apache.hadoop.ozone.protocol.commands.CloseContainerCommand; -import org.apache.hadoop.hdsl.protocol.proto.HdslProtos; -import static org.apache.hadoop.scm.ScmConfigKeys.OZONE_SCM_CONTAINER_SIZE_GB; -import org.apache.hadoop.scm.container.common.helpers.StorageContainerException; +import org.apache.hadoop.hdds.protocol.proto.HddsProtos; +import static org.apache.hadoop.hdds.scm.ScmConfigKeys.OZONE_SCM_CONTAINER_SIZE_GB; +import org.apache.hadoop.hdds.scm.container.common.helpers.StorageContainerException; import org.apache.hadoop.test.GenericTestUtils; import org.junit.Assert; import org.junit.Test; @@ -74,8 +74,8 @@ public class TestCloseContainerHandler { //get the name of a valid container KsmKeyArgs keyArgs = new KsmKeyArgs.Builder().setVolumeName("test").setBucketName("test") - .setType(HdslProtos.ReplicationType.STAND_ALONE) - .setFactor(HdslProtos.ReplicationFactor.ONE).setDataSize(1024) + .setType(HddsProtos.ReplicationType.STAND_ALONE) + .setFactor(HddsProtos.ReplicationFactor.ONE).setDataSize(1024) .setKeyName("test").build(); KsmKeyLocationInfo ksmKeyLocationInfo = diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/metrics/TestContainerMetrics.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/metrics/TestContainerMetrics.java index 82eb22233c..1565fbc1c3 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/metrics/TestContainerMetrics.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/metrics/TestContainerMetrics.java @@ -23,11 +23,11 @@ import static org.apache.hadoop.test.MetricsAsserts.getMetrics; import static org.mockito.Mockito.mock; import org.apache.hadoop.hdfs.DFSConfigKeys; -import org.apache.hadoop.hdsl.conf.OzoneConfiguration; -import org.apache.hadoop.hdsl.protocol.DatanodeDetails; -import org.apache.hadoop.hdsl.protocol.proto.ContainerProtos; -import org.apache.hadoop.hdsl.protocol.proto.ContainerProtos.ContainerCommandRequestProto; -import org.apache.hadoop.hdsl.protocol.proto.ContainerProtos.ContainerCommandResponseProto; +import org.apache.hadoop.hdds.conf.OzoneConfiguration; +import org.apache.hadoop.hdds.protocol.DatanodeDetails; +import org.apache.hadoop.hdds.protocol.proto.ContainerProtos; +import org.apache.hadoop.hdds.protocol.proto.ContainerProtos.ContainerCommandRequestProto; +import org.apache.hadoop.hdds.protocol.proto.ContainerProtos.ContainerCommandResponseProto; import org.apache.hadoop.metrics2.MetricsRecordBuilder; import org.apache.hadoop.ozone.OzoneConfigKeys; import org.apache.hadoop.ozone.container.ContainerTestHelper; @@ -36,10 +36,10 @@ import org.apache.hadoop.ozone.container.common.impl.Dispatcher; import org.apache.hadoop.ozone.container.common.interfaces.ChunkManager; import org.apache.hadoop.ozone.container.common.interfaces.ContainerManager; import org.apache.hadoop.ozone.container.common.transport.server.XceiverServer; -import org.apache.hadoop.ozone.scm.TestUtils; +import org.apache.hadoop.hdds.scm.TestUtils; import org.apache.hadoop.ozone.web.utils.OzoneUtils; -import org.apache.hadoop.scm.XceiverClient; -import org.apache.hadoop.scm.container.common.helpers.Pipeline; +import org.apache.hadoop.hdds.scm.XceiverClient; +import org.apache.hadoop.hdds.scm.container.common.helpers.Pipeline; import org.junit.Assert; import org.junit.Test; import org.mockito.Mockito; diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/ozoneimpl/TestOzoneContainer.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/ozoneimpl/TestOzoneContainer.java index 251ac3a0dc..5885898fed 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/ozoneimpl/TestOzoneContainer.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/ozoneimpl/TestOzoneContainer.java @@ -18,18 +18,18 @@ package org.apache.hadoop.ozone.container.ozoneimpl; -import org.apache.hadoop.hdsl.protocol.proto.ContainerProtos; +import org.apache.hadoop.hdds.protocol.proto.ContainerProtos; import org.apache.hadoop.ozone.MiniOzoneClassicCluster; import org.apache.hadoop.ozone.MiniOzoneCluster; import org.apache.hadoop.ozone.OzoneConfigKeys; -import org.apache.hadoop.hdsl.conf.OzoneConfiguration; +import org.apache.hadoop.hdds.conf.OzoneConfiguration; import org.apache.hadoop.ozone.OzoneConsts; import org.apache.hadoop.ozone.container.ContainerTestHelper; -import org.apache.hadoop.ozone.scm.TestUtils; +import org.apache.hadoop.hdds.scm.TestUtils; import org.apache.hadoop.ozone.web.utils.OzoneUtils; -import org.apache.hadoop.scm.XceiverClient; -import org.apache.hadoop.scm.XceiverClientSpi; -import org.apache.hadoop.scm.container.common.helpers.Pipeline; +import org.apache.hadoop.hdds.scm.XceiverClient; +import org.apache.hadoop.hdds.scm.XceiverClientSpi; +import org.apache.hadoop.hdds.scm.container.common.helpers.Pipeline; import org.apache.hadoop.test.GenericTestUtils; import org.junit.Assert; import org.junit.Rule; diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/ozoneimpl/TestOzoneContainerRatis.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/ozoneimpl/TestOzoneContainerRatis.java index 1edfbd3851..10beed4512 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/ozoneimpl/TestOzoneContainerRatis.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/ozoneimpl/TestOzoneContainerRatis.java @@ -20,13 +20,13 @@ package org.apache.hadoop.ozone.container.ozoneimpl; import org.apache.hadoop.hdfs.server.datanode.DataNode; import org.apache.hadoop.ozone.MiniOzoneClassicCluster; -import org.apache.hadoop.hdsl.conf.OzoneConfiguration; +import org.apache.hadoop.hdds.conf.OzoneConfiguration; import org.apache.hadoop.ozone.OzoneConsts; import org.apache.hadoop.ozone.RatisTestHelper; import org.apache.hadoop.ozone.container.ContainerTestHelper; import org.apache.hadoop.ozone.web.utils.OzoneUtils; -import org.apache.hadoop.scm.XceiverClientSpi; -import org.apache.hadoop.scm.container.common.helpers.Pipeline; +import org.apache.hadoop.hdds.scm.XceiverClientSpi; +import org.apache.hadoop.hdds.scm.container.common.helpers.Pipeline; import org.apache.ratis.rpc.RpcType; import org.apache.ratis.rpc.SupportedRpcType; import org.apache.ratis.util.CheckedBiConsumer; diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/ozoneimpl/TestRatisManager.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/ozoneimpl/TestRatisManager.java index 5fc6e04535..5c35f38e1b 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/ozoneimpl/TestRatisManager.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/ozoneimpl/TestRatisManager.java @@ -19,9 +19,9 @@ package org.apache.hadoop.ozone.container.ozoneimpl; import org.apache.hadoop.hdfs.server.datanode.DataNode; -import org.apache.hadoop.hdsl.protocol.DatanodeDetails; +import org.apache.hadoop.hdds.protocol.DatanodeDetails; import org.apache.hadoop.ozone.MiniOzoneClassicCluster; -import org.apache.hadoop.hdsl.conf.OzoneConfiguration; +import org.apache.hadoop.hdds.conf.OzoneConfiguration; import org.apache.hadoop.ozone.OzoneConsts; import org.apache.hadoop.ozone.RatisTestHelper; import org.apache.hadoop.ozone.container.ContainerTestHelper; diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/server/TestContainerServer.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/server/TestContainerServer.java index 5ae55c3193..4d60e9e0dc 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/server/TestContainerServer.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/server/TestContainerServer.java @@ -19,12 +19,14 @@ package org.apache.hadoop.ozone.container.server; import io.netty.channel.embedded.EmbeddedChannel; -import org.apache.hadoop.hdsl.protocol.DatanodeDetails; -import org.apache.hadoop.hdsl.protocol.proto.ContainerProtos; -import org.apache.hadoop.hdsl.protocol.proto.ContainerProtos.ContainerCommandRequestProto; -import org.apache.hadoop.hdsl.protocol.proto.ContainerProtos.ContainerCommandResponseProto; +import org.apache.hadoop.hdds.protocol.DatanodeDetails; +import org.apache.hadoop.hdds.protocol.proto.ContainerProtos; +import org.apache.hadoop.hdds.protocol.proto.ContainerProtos.ContainerCommandRequestProto; +import org.apache.hadoop.hdds.protocol.proto.ContainerProtos.ContainerCommandResponseProto; + +import org.apache.hadoop.hdds.scm.TestUtils; import org.apache.hadoop.ozone.OzoneConfigKeys; -import org.apache.hadoop.hdsl.conf.OzoneConfiguration; +import org.apache.hadoop.hdds.conf.OzoneConfiguration; import org.apache.hadoop.ozone.RatisTestHelper; import org.apache.hadoop.ozone.container.ContainerTestHelper; import org.apache.hadoop.ozone.container.common.impl.Dispatcher; @@ -34,12 +36,11 @@ import org.apache.hadoop.ozone.container.common.transport.server.XceiverServer; import org.apache.hadoop.ozone.container.common.transport.server.XceiverServerHandler; import org.apache.hadoop.ozone.container.common.transport.server.XceiverServerSpi; import org.apache.hadoop.ozone.container.common.transport.server.ratis.XceiverServerRatis; -import org.apache.hadoop.ozone.scm.TestUtils; import org.apache.hadoop.ozone.web.utils.OzoneUtils; -import org.apache.hadoop.scm.XceiverClient; -import org.apache.hadoop.scm.XceiverClientRatis; -import org.apache.hadoop.scm.XceiverClientSpi; -import org.apache.hadoop.scm.container.common.helpers.Pipeline; +import org.apache.hadoop.hdds.scm.XceiverClient; +import org.apache.hadoop.hdds.scm.XceiverClientRatis; +import org.apache.hadoop.hdds.scm.XceiverClientSpi; +import org.apache.hadoop.hdds.scm.container.common.helpers.Pipeline; import org.apache.hadoop.test.GenericTestUtils; import org.apache.ratis.RatisHelper; import org.apache.ratis.client.RaftClient; diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/freon/TestDataValidate.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/freon/TestDataValidate.java index 860851b4b6..296a6164cf 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/freon/TestDataValidate.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/freon/TestDataValidate.java @@ -18,7 +18,7 @@ package org.apache.hadoop.ozone.freon; -import org.apache.hadoop.hdsl.conf.OzoneConfiguration; +import org.apache.hadoop.hdds.conf.OzoneConfiguration; import org.apache.hadoop.ozone.MiniOzoneClassicCluster; import org.apache.hadoop.ozone.MiniOzoneCluster; import org.apache.hadoop.ozone.OzoneConfigKeys; diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/freon/TestFreon.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/freon/TestFreon.java index 07892b5fb0..44030105df 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/freon/TestFreon.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/freon/TestFreon.java @@ -21,7 +21,7 @@ package org.apache.hadoop.ozone.freon; import org.apache.hadoop.ozone.MiniOzoneClassicCluster; import org.apache.hadoop.ozone.MiniOzoneCluster; import org.apache.hadoop.ozone.OzoneConfigKeys; -import org.apache.hadoop.hdsl.conf.OzoneConfiguration; +import org.apache.hadoop.hdds.conf.OzoneConfiguration; import org.apache.hadoop.ozone.OzoneConsts; import org.apache.hadoop.util.ToolRunner; import org.junit.AfterClass; diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/ksm/TestContainerReportWithKeys.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/ksm/TestContainerReportWithKeys.java index c6a45af3a7..920f5c82ea 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/ksm/TestContainerReportWithKeys.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/ksm/TestContainerReportWithKeys.java @@ -18,8 +18,10 @@ package org.apache.hadoop.ozone.ksm; import org.apache.commons.lang.RandomStringUtils; -import org.apache.hadoop.hdsl.conf.OzoneConfiguration; -import org.apache.hadoop.hdsl.protocol.proto.HdslProtos; +import org.apache.hadoop.hdds.client.ReplicationFactor; +import org.apache.hadoop.hdds.client.ReplicationType; +import org.apache.hadoop.hdds.conf.OzoneConfiguration; +import org.apache.hadoop.hdds.protocol.proto.HddsProtos; import org.apache.hadoop.ozone.MiniOzoneClassicCluster; import org.apache.hadoop.ozone.MiniOzoneTestHelper; import org.apache.hadoop.ozone.OzoneConfigKeys; @@ -30,9 +32,9 @@ import org.apache.hadoop.ozone.container.common.helpers.ContainerData; import org.apache.hadoop.ozone.container.common.interfaces.ContainerManager; import org.apache.hadoop.ozone.ksm.helpers.KsmKeyArgs; import org.apache.hadoop.ozone.ksm.helpers.KsmKeyLocationInfo; -import org.apache.hadoop.ozone.scm.StorageContainerManager; -import org.apache.hadoop.scm.container.common.helpers.ContainerInfo; -import org.apache.hadoop.scm.container.common.helpers.StorageContainerException; +import org.apache.hadoop.hdds.scm.StorageContainerManager; +import org.apache.hadoop.hdds.scm.container.common.helpers.ContainerInfo; +import org.apache.hadoop.hdds.scm.container.common.helpers.StorageContainerException; import org.junit.AfterClass; import org.junit.BeforeClass; import org.junit.Rule; @@ -106,8 +108,8 @@ public class TestContainerReportWithKeys { .setVolumeName(volumeName) .setBucketName(bucketName) .setKeyName(keyName) - .setType(HdslProtos.ReplicationType.STAND_ALONE) - .setFactor(HdslProtos.ReplicationFactor.ONE).setDataSize(keySize) + .setType(HddsProtos.ReplicationType.STAND_ALONE) + .setFactor(HddsProtos.ReplicationFactor.ONE).setDataSize(keySize) .build(); diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/ksm/TestKSMMetrcis.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/ksm/TestKSMMetrcis.java index e6f5123f46..ae50c0379e 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/ksm/TestKSMMetrcis.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/ksm/TestKSMMetrcis.java @@ -25,7 +25,7 @@ import org.apache.hadoop.metrics2.MetricsRecordBuilder; import org.apache.hadoop.ozone.MiniOzoneClassicCluster; import org.apache.hadoop.ozone.MiniOzoneCluster; import org.apache.hadoop.ozone.OzoneConfigKeys; -import org.apache.hadoop.hdsl.conf.OzoneConfiguration; +import org.apache.hadoop.hdds.conf.OzoneConfiguration; import org.apache.hadoop.ozone.OzoneConsts; import org.junit.After; import org.junit.Before; diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/ksm/TestKSMSQLCli.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/ksm/TestKSMSQLCli.java index d5ebf27b51..1db4a3e9e7 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/ksm/TestKSMSQLCli.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/ksm/TestKSMSQLCli.java @@ -19,9 +19,9 @@ package org.apache.hadoop.ozone.ksm; import org.apache.hadoop.hdfs.server.datanode.ObjectStoreHandler; import org.apache.hadoop.ozone.MiniOzoneClassicCluster; import org.apache.hadoop.ozone.OzoneConfigKeys; -import org.apache.hadoop.hdsl.conf.OzoneConfiguration; +import org.apache.hadoop.hdds.conf.OzoneConfiguration; import org.apache.hadoop.ozone.OzoneConsts; -import org.apache.hadoop.ozone.scm.cli.SQLCLI; +import org.apache.hadoop.hdds.scm.cli.SQLCLI; import org.apache.hadoop.ozone.web.handlers.BucketArgs; import org.apache.hadoop.ozone.web.handlers.KeyArgs; import org.apache.hadoop.ozone.web.handlers.UserArgs; diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/ksm/TestKeySpaceManager.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/ksm/TestKeySpaceManager.java index 4de6827807..ce40948892 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/ksm/TestKeySpaceManager.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/ksm/TestKeySpaceManager.java @@ -25,16 +25,16 @@ import org.apache.hadoop.net.NetUtils; import org.apache.hadoop.ozone.MiniOzoneClassicCluster; import org.apache.hadoop.ozone.MiniOzoneCluster; import org.apache.hadoop.ozone.OzoneConfigKeys; -import org.apache.hadoop.hdsl.conf.OzoneConfiguration; +import org.apache.hadoop.hdds.conf.OzoneConfiguration; import org.apache.hadoop.ozone.OzoneConsts; import org.apache.hadoop.ozone.common.BlockGroup; import org.apache.hadoop.ozone.client.rest.OzoneException; import org.apache.hadoop.ozone.ksm.exceptions.KSMException; -import org.apache.hadoop.ozone.scm.SCMStorage; +import org.apache.hadoop.hdds.scm.SCMStorage; import org.apache.hadoop.ozone.ksm.helpers.ServiceInfo; import org.apache.hadoop.ozone.protocol.proto .KeySpaceManagerProtocolProtos.ServicePort; -import org.apache.hadoop.hdsl.protocol.proto.HdslProtos; +import org.apache.hadoop.hdds.protocol.proto.HddsProtos; import org.apache.hadoop.ozone.web.handlers.BucketArgs; import org.apache.hadoop.ozone.web.handlers.KeyArgs; import org.apache.hadoop.ozone.web.handlers.UserArgs; @@ -46,8 +46,8 @@ import org.apache.hadoop.ozone.web.response.BucketInfo; import org.apache.hadoop.ozone.web.response.KeyInfo; import org.apache.hadoop.ozone.web.response.VolumeInfo; import org.apache.hadoop.ozone.web.utils.OzoneUtils; -import org.apache.hadoop.scm.ScmConfigKeys; -import org.apache.hadoop.scm.ScmInfo; +import org.apache.hadoop.hdds.scm.ScmConfigKeys; +import org.apache.hadoop.hdds.scm.ScmInfo; import org.apache.hadoop.test.GenericTestUtils; import org.apache.hadoop.ozone.protocol.proto .KeySpaceManagerProtocolProtos.Status; @@ -87,7 +87,8 @@ import java.util.stream.Stream; import static org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_OPEN_KEY_EXPIRE_THRESHOLD_SECONDS; import static org.apache.hadoop.ozone.OzoneConsts.DELETING_KEY_PREFIX; import static org.apache.hadoop.ozone.ksm.KSMConfigKeys.OZONE_KSM_ADDRESS_KEY; -import static org.apache.hadoop.scm.ScmConfigKeys.OZONE_SCM_CLIENT_ADDRESS_KEY; +import static org.apache.hadoop.hdds.scm.ScmConfigKeys + .OZONE_SCM_CLIENT_ADDRESS_KEY; /** * Test Key Space Manager operation in distributed handler scenario. @@ -1220,7 +1221,7 @@ public class TestKeySpaceManager { ksmMetrics.getNumGetServiceLists()); ServiceInfo ksmInfo = services.stream().filter( - a -> a.getNodeType().equals(HdslProtos.NodeType.KSM)) + a -> a.getNodeType().equals(HddsProtos.NodeType.KSM)) .collect(Collectors.toList()).get(0); InetSocketAddress ksmAddress = new InetSocketAddress(ksmInfo.getHostname(), ksmInfo.getPort(ServicePort.Type.RPC)); @@ -1228,7 +1229,7 @@ public class TestKeySpaceManager { conf.get(OZONE_KSM_ADDRESS_KEY)), ksmAddress); ServiceInfo scmInfo = services.stream().filter( - a -> a.getNodeType().equals(HdslProtos.NodeType.SCM)) + a -> a.getNodeType().equals(HddsProtos.NodeType.SCM)) .collect(Collectors.toList()).get(0); InetSocketAddress scmAddress = new InetSocketAddress(scmInfo.getHostname(), scmInfo.getPort(ServicePort.Type.RPC)); diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/ksm/TestKeySpaceManagerRestInterface.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/ksm/TestKeySpaceManagerRestInterface.java index 5425965fcc..8f12768267 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/ksm/TestKeySpaceManagerRestInterface.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/ksm/TestKeySpaceManagerRestInterface.java @@ -20,7 +20,7 @@ package org.apache.hadoop.ozone.ksm; import com.fasterxml.jackson.databind.ObjectMapper; import com.fasterxml.jackson.core.type.TypeReference; -import org.apache.hadoop.hdsl.conf.OzoneConfiguration; +import org.apache.hadoop.hdds.conf.OzoneConfiguration; import org.apache.hadoop.hdfs.server.datanode.DataNode; import org.apache.hadoop.net.NetUtils; import org.apache.hadoop.ozone.MiniOzoneClassicCluster; @@ -29,7 +29,7 @@ import org.apache.hadoop.ozone.OzoneConsts; import org.apache.hadoop.ozone.ksm.helpers.ServiceInfo; import org.apache.hadoop.ozone.protocol.proto .KeySpaceManagerProtocolProtos.ServicePort; -import org.apache.hadoop.hdsl.protocol.proto.HdslProtos; +import org.apache.hadoop.hdds.protocol.proto.HddsProtos; import org.apache.http.HttpResponse; import org.apache.http.client.HttpClient; import org.apache.http.client.methods.HttpGet; @@ -46,7 +46,7 @@ import java.util.List; import java.util.Map; import java.util.UUID; -import static org.apache.hadoop.hdsl.HdslUtils.getScmAddressForClients; +import static org.apache.hadoop.hdds.HddsUtils.getScmAddressForClients; import static org.apache.hadoop.ozone.KsmUtils.getKsmAddressForClients; /** @@ -90,14 +90,14 @@ public class TestKeySpaceManagerRestInterface { new TypeReference<List<ServiceInfo>>() {}; List<ServiceInfo> serviceInfos = objectMapper.readValue( serviceListJson, serviceInfoReference); - Map<HdslProtos.NodeType, ServiceInfo> serviceMap = new HashMap<>(); + Map<HddsProtos.NodeType, ServiceInfo> serviceMap = new HashMap<>(); for (ServiceInfo serviceInfo : serviceInfos) { serviceMap.put(serviceInfo.getNodeType(), serviceInfo); } InetSocketAddress ksmAddress = getKsmAddressForClients(conf); - ServiceInfo ksmInfo = serviceMap.get(HdslProtos.NodeType.KSM); + ServiceInfo ksmInfo = serviceMap.get(HddsProtos.NodeType.KSM); Assert.assertEquals(ksmAddress.getHostName(), ksmInfo.getHostname()); Assert.assertEquals(ksmAddress.getPort(), @@ -107,13 +107,13 @@ public class TestKeySpaceManagerRestInterface { InetSocketAddress scmAddress = getScmAddressForClients(conf); - ServiceInfo scmInfo = serviceMap.get(HdslProtos.NodeType.SCM); + ServiceInfo scmInfo = serviceMap.get(HddsProtos.NodeType.SCM); Assert.assertEquals(scmAddress.getHostName(), scmInfo.getHostname()); Assert.assertEquals(scmAddress.getPort(), scmInfo.getPort(ServicePort.Type.RPC)); - ServiceInfo datanodeInfo = serviceMap.get(HdslProtos.NodeType.DATANODE); + ServiceInfo datanodeInfo = serviceMap.get(HddsProtos.NodeType.DATANODE); DataNode datanode = ((MiniOzoneClassicCluster) cluster) .getDataNodes().get(0); Assert.assertEquals(datanode.getDatanodeHostname(), diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/ksm/TestKsmBlockVersioning.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/ksm/TestKsmBlockVersioning.java index 67f81eb567..01d2b1d1f3 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/ksm/TestKsmBlockVersioning.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/ksm/TestKsmBlockVersioning.java @@ -17,7 +17,7 @@ package org.apache.hadoop.ozone.ksm; import org.apache.commons.lang.RandomStringUtils; -import org.apache.hadoop.hdsl.conf.OzoneConfiguration; +import org.apache.hadoop.hdds.conf.OzoneConfiguration; import org.apache.hadoop.fs.StorageType; import org.apache.hadoop.hdfs.DFSUtil; import org.apache.hadoop.hdfs.server.datanode.ObjectStoreHandler; diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/ksm/TestMultipleContainerReadWrite.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/ksm/TestMultipleContainerReadWrite.java index fc3c4ab4f2..d895765cd6 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/ksm/TestMultipleContainerReadWrite.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/ksm/TestMultipleContainerReadWrite.java @@ -23,7 +23,7 @@ import org.apache.hadoop.metrics2.MetricsRecordBuilder; import org.apache.hadoop.ozone.MiniOzoneClassicCluster; import org.apache.hadoop.ozone.MiniOzoneCluster; import org.apache.hadoop.ozone.OzoneConfigKeys; -import org.apache.hadoop.hdsl.conf.OzoneConfiguration; +import org.apache.hadoop.hdds.conf.OzoneConfiguration; import org.apache.hadoop.ozone.OzoneConsts; import org.apache.hadoop.ozone.web.handlers.BucketArgs; import org.apache.hadoop.ozone.web.handlers.KeyArgs; @@ -31,7 +31,7 @@ import org.apache.hadoop.ozone.web.handlers.UserArgs; import org.apache.hadoop.ozone.web.handlers.VolumeArgs; import org.apache.hadoop.ozone.web.interfaces.StorageHandler; import org.apache.hadoop.ozone.web.utils.OzoneUtils; -import org.apache.hadoop.scm.ScmConfigKeys; +import org.apache.hadoop.hdds.scm.ScmConfigKeys; import org.junit.AfterClass; import org.junit.BeforeClass; import org.junit.Ignore; diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/ozShell/TestOzoneShell.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/ozShell/TestOzoneShell.java index fdbc15d1bc..fbaa7c23d0 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/ozShell/TestOzoneShell.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/ozShell/TestOzoneShell.java @@ -44,7 +44,7 @@ import org.apache.hadoop.ozone.OzoneAcl; import org.apache.hadoop.ozone.OzoneAcl.OzoneACLRights; import org.apache.hadoop.ozone.OzoneAcl.OzoneACLType; import org.apache.hadoop.ozone.OzoneConfigKeys; -import org.apache.hadoop.hdsl.conf.OzoneConfiguration; +import org.apache.hadoop.hdds.conf.OzoneConfiguration; import org.apache.hadoop.ozone.OzoneConsts; import org.apache.hadoop.ozone.web.client.OzoneBucket; import org.apache.hadoop.ozone.web.client.OzoneKey; diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/scm/TestAllocateContainer.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/scm/TestAllocateContainer.java index fdc0f9d42e..020fecdf12 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/scm/TestAllocateContainer.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/scm/TestAllocateContainer.java @@ -21,11 +21,11 @@ import org.apache.commons.lang.RandomStringUtils; import org.apache.hadoop.io.IOUtils; import org.apache.hadoop.ozone.MiniOzoneClassicCluster; import org.apache.hadoop.ozone.MiniOzoneCluster; -import org.apache.hadoop.hdsl.conf.OzoneConfiguration; +import org.apache.hadoop.hdds.conf.OzoneConfiguration; import org.apache.hadoop.ozone.OzoneConsts; -import org.apache.hadoop.scm.XceiverClientManager; -import org.apache.hadoop.scm.protocolPB.StorageContainerLocationProtocolClientSideTranslatorPB; -import org.apache.hadoop.scm.container.common.helpers.Pipeline; +import org.apache.hadoop.hdds.scm.XceiverClientManager; +import org.apache.hadoop.hdds.scm.protocolPB.StorageContainerLocationProtocolClientSideTranslatorPB; +import org.apache.hadoop.hdds.scm.container.common.helpers.Pipeline; import org.junit.AfterClass; import org.junit.Assert; import org.junit.BeforeClass; diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/scm/TestContainerSQLCli.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/scm/TestContainerSQLCli.java index 581011f503..5fe4f0653b 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/scm/TestContainerSQLCli.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/scm/TestContainerSQLCli.java @@ -17,23 +17,24 @@ */ package org.apache.hadoop.ozone.scm; -import org.apache.hadoop.hdsl.protocol.DatanodeDetails; +import org.apache.hadoop.hdds.protocol.DatanodeDetails; +import org.apache.hadoop.hdds.scm.node.NodeManager; import org.apache.hadoop.io.IOUtils; import org.apache.hadoop.ozone.MiniOzoneClassicCluster; import org.apache.hadoop.ozone.OzoneConfigKeys; -import org.apache.hadoop.hdsl.conf.OzoneConfiguration; +import org.apache.hadoop.hdds.conf.OzoneConfiguration; import org.apache.hadoop.ozone.OzoneConsts; -import org.apache.hadoop.hdsl.protocol.proto.HdslProtos; -import org.apache.hadoop.ozone.scm.block.BlockManagerImpl; -import org.apache.hadoop.ozone.scm.cli.SQLCLI; -import org.apache.hadoop.ozone.scm.container.ContainerMapping; -import org.apache.hadoop.ozone.scm.container.placement.algorithms.ContainerPlacementPolicy; -import org.apache.hadoop.ozone.scm.container.placement.algorithms.SCMContainerPlacementCapacity; -import org.apache.hadoop.ozone.scm.node.NodeManager; -import org.apache.hadoop.scm.ScmConfigKeys; -import org.apache.hadoop.scm.container.common.helpers.AllocatedBlock; -import org.apache.hadoop.scm.container.common.helpers.Pipeline; -import org.apache.hadoop.scm.protocolPB.StorageContainerLocationProtocolClientSideTranslatorPB; +import org.apache.hadoop.hdds.protocol.proto.HddsProtos; +import org.apache.hadoop.hdds.scm.block.BlockManagerImpl; +import org.apache.hadoop.hdds.scm.cli.SQLCLI; +import org.apache.hadoop.hdds.scm.container.ContainerMapping; +import org.apache.hadoop.hdds.scm.container.placement.algorithms.ContainerPlacementPolicy; +import org.apache.hadoop.hdds.scm.container.placement.algorithms.SCMContainerPlacementCapacity; +import org.apache.hadoop.hdds.scm.node.NodeManager; +import org.apache.hadoop.hdds.scm.ScmConfigKeys; +import org.apache.hadoop.hdds.scm.container.common.helpers.AllocatedBlock; +import org.apache.hadoop.hdds.scm.container.common.helpers.Pipeline; +import org.apache.hadoop.hdds.scm.protocolPB.StorageContainerLocationProtocolClientSideTranslatorPB; import org.junit.After; import org.junit.Before; import org.junit.Test; @@ -97,8 +98,8 @@ public class TestContainerSQLCli { private HashMap<String, String> blockContainerMap; private final static long DEFAULT_BLOCK_SIZE = 4 * KB; - private static HdslProtos.ReplicationFactor factor; - private static HdslProtos.ReplicationType type; + private static HddsProtos.ReplicationFactor factor; + private static HddsProtos.ReplicationType type; private static final String containerOwner = "OZONE"; @@ -113,11 +114,11 @@ public class TestContainerSQLCli { SCMContainerPlacementCapacity.class, ContainerPlacementPolicy.class); if(conf.getBoolean(ScmConfigKeys.DFS_CONTAINER_RATIS_ENABLED_KEY, ScmConfigKeys.DFS_CONTAINER_RATIS_ENABLED_DEFAULT)){ - factor = HdslProtos.ReplicationFactor.THREE; - type = HdslProtos.ReplicationType.RATIS; + factor = HddsProtos.ReplicationFactor.THREE; + type = HddsProtos.ReplicationType.RATIS; } else { - factor = HdslProtos.ReplicationFactor.ONE; - type = HdslProtos.ReplicationType.STAND_ALONE; + factor = HddsProtos.ReplicationFactor.ONE; + type = HddsProtos.ReplicationType.STAND_ALONE; } cluster = new MiniOzoneClassicCluster.Builder(conf).numDataNodes(2) .storageCapacities(new long[] {datanodeCapacities, datanodeCapacities}) diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/scm/TestContainerSmallFile.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/scm/TestContainerSmallFile.java index 85e424be3e..2e7f1b37e4 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/scm/TestContainerSmallFile.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/scm/TestContainerSmallFile.java @@ -17,23 +17,23 @@ */ package org.apache.hadoop.ozone.scm; -import org.apache.hadoop.hdsl.protocol.proto.ContainerProtos; +import org.apache.hadoop.hdds.protocol.proto.ContainerProtos; import org.apache.hadoop.io.IOUtils; import org.apache.hadoop.ozone.MiniOzoneClassicCluster; import org.apache.hadoop.ozone.MiniOzoneCluster; -import org.apache.hadoop.hdsl.conf.OzoneConfiguration; +import org.apache.hadoop.hdds.conf.OzoneConfiguration; import org.apache.hadoop.ozone.OzoneConsts; -import org.apache.hadoop.hdsl.protocol.proto.HdslProtos; -import org.apache.hadoop.ozone.scm.container.placement.algorithms.ContainerPlacementPolicy; -import org.apache.hadoop.ozone.scm.container.placement.algorithms.SCMContainerPlacementCapacity; -import org.apache.hadoop.scm.ScmConfigKeys; -import org.apache.hadoop.scm.protocolPB +import org.apache.hadoop.hdds.protocol.proto.HddsProtos; +import org.apache.hadoop.hdds.scm.container.placement.algorithms.ContainerPlacementPolicy; +import org.apache.hadoop.hdds.scm.container.placement.algorithms.SCMContainerPlacementCapacity; +import org.apache.hadoop.hdds.scm.ScmConfigKeys; +import org.apache.hadoop.hdds.scm.protocolPB .StorageContainerLocationProtocolClientSideTranslatorPB; -import org.apache.hadoop.scm.XceiverClientManager; -import org.apache.hadoop.scm.XceiverClientSpi; -import org.apache.hadoop.scm.container.common.helpers.Pipeline; -import org.apache.hadoop.scm.container.common.helpers.StorageContainerException; -import org.apache.hadoop.scm.storage.ContainerProtocolCalls; +import org.apache.hadoop.hdds.scm.XceiverClientManager; +import org.apache.hadoop.hdds.scm.XceiverClientSpi; +import org.apache.hadoop.hdds.scm.container.common.helpers.Pipeline; +import org.apache.hadoop.hdds.scm.container.common.helpers.StorageContainerException; +import org.apache.hadoop.hdds.scm.storage.ContainerProtocolCalls; import org.junit.AfterClass; import org.junit.Assert; import org.junit.BeforeClass; @@ -87,7 +87,7 @@ public class TestContainerSmallFile { Pipeline pipeline = storageContainerLocationClient.allocateContainer( xceiverClientManager.getType(), - HdslProtos.ReplicationFactor.ONE, containerName, containerOwner); + HddsProtos.ReplicationFactor.ONE, containerName, containerOwner); XceiverClientSpi client = xceiverClientManager.acquireClient(pipeline); ContainerProtocolCalls.createContainer(client, traceID); @@ -108,7 +108,7 @@ public class TestContainerSmallFile { Pipeline pipeline = storageContainerLocationClient.allocateContainer( xceiverClientManager.getType(), - HdslProtos.ReplicationFactor.ONE, containerName, containerOwner); + HddsProtos.ReplicationFactor.ONE, containerName, containerOwner); XceiverClientSpi client = xceiverClientManager.acquireClient(pipeline); ContainerProtocolCalls.createContainer(client, traceID); @@ -130,7 +130,7 @@ public class TestContainerSmallFile { Pipeline pipeline = storageContainerLocationClient.allocateContainer( xceiverClientManager.getType(), - HdslProtos.ReplicationFactor.ONE, containerName, containerOwner); + HddsProtos.ReplicationFactor.ONE, containerName, containerOwner); XceiverClientSpi client = xceiverClientManager.acquireClient(pipeline); ContainerProtocolCalls.createContainer(client, traceID); ContainerProtocolCalls.writeSmallFile(client, containerName, diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/scm/TestSCMCli.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/scm/TestSCMCli.java index dbc165c511..f08c3773c4 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/scm/TestSCMCli.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/scm/TestSCMCli.java @@ -17,22 +17,23 @@ */ package org.apache.hadoop.ozone.scm; -import org.apache.hadoop.hdsl.protocol.DatanodeDetails; +import org.apache.hadoop.hdds.protocol.DatanodeDetails; +import org.apache.hadoop.hdds.scm.StorageContainerManager; import org.apache.hadoop.io.IOUtils; import org.apache.hadoop.ozone.MiniOzoneClassicCluster; -import org.apache.hadoop.hdsl.conf.OzoneConfiguration; +import org.apache.hadoop.hdds.conf.OzoneConfiguration; import org.apache.hadoop.ozone.OzoneConsts; import org.apache.hadoop.ozone.container.common.helpers.ContainerData; import org.apache.hadoop.ozone.container.common.helpers.KeyUtils; -import org.apache.hadoop.hdsl.protocol.proto.HdslProtos; -import org.apache.hadoop.ozone.scm.cli.ResultCode; -import org.apache.hadoop.ozone.scm.cli.SCMCLI; -import org.apache.hadoop.scm.XceiverClientManager; -import org.apache.hadoop.scm.client.ContainerOperationClient; -import org.apache.hadoop.scm.client.ScmClient; -import org.apache.hadoop.scm.container.common.helpers.ContainerInfo; -import org.apache.hadoop.scm.container.common.helpers.Pipeline; -import org.apache.hadoop.scm.protocolPB.StorageContainerLocationProtocolClientSideTranslatorPB; +import org.apache.hadoop.hdds.protocol.proto.HddsProtos; +import org.apache.hadoop.hdds.scm.cli.ResultCode; +import org.apache.hadoop.hdds.scm.cli.SCMCLI; +import org.apache.hadoop.hdds.scm.XceiverClientManager; +import org.apache.hadoop.hdds.scm.client.ContainerOperationClient; +import org.apache.hadoop.hdds.scm.client.ScmClient; +import org.apache.hadoop.hdds.scm.container.common.helpers.ContainerInfo; +import org.apache.hadoop.hdds.scm.container.common.helpers.Pipeline; +import org.apache.hadoop.hdds.scm.protocolPB.StorageContainerLocationProtocolClientSideTranslatorPB; import org.junit.AfterClass; import org.junit.Assert; import org.junit.BeforeClass; @@ -44,9 +45,10 @@ import java.io.ByteArrayOutputStream; import java.io.IOException; import java.io.PrintStream; -import static org.apache.hadoop.hdsl.protocol.proto.HdslProtos.LifeCycleState.CLOSED; -import static org.apache.hadoop.hdsl.protocol.proto.HdslProtos.LifeCycleState.OPEN; -import static org.apache.hadoop.ozone.scm.cli.ResultCode.EXECUTION_ERROR; +import static org.apache.hadoop.hdds.protocol.proto.HddsProtos.LifeCycleState.CLOSED; +import static org.apache.hadoop.hdds.protocol.proto.HddsProtos.LifeCycleState.OPEN; + +import static org.apache.hadoop.hdds.scm.cli.ResultCode.EXECUTION_ERROR; import static org.junit.Assert.assertEquals; import static org.junit.Assert.assertNotNull; import static org.junit.Assert.assertTrue; @@ -159,7 +161,7 @@ public class TestSCMCli { containerName = "non-empty-container"; pipeline = containerOperationClient .createContainer(xceiverClientManager.getType(), - HdslProtos.ReplicationFactor.ONE, containerName, containerOwner); + HddsProtos.ReplicationFactor.ONE, containerName, containerOwner); ContainerData cdata = ContainerData .getFromProtBuf(containerOperationClient.readContainer(pipeline), conf); @@ -201,7 +203,7 @@ public class TestSCMCli { containerName = "empty-container"; pipeline = containerOperationClient .createContainer(xceiverClientManager.getType(), - HdslProtos.ReplicationFactor.ONE, containerName, containerOwner); + HddsProtos.ReplicationFactor.ONE, containerName, containerOwner); containerOperationClient.closeContainer(pipeline); Assert.assertTrue(containerExist(containerName)); @@ -214,7 +216,7 @@ public class TestSCMCli { // After the container is deleted, // a same name container can now be recreated. containerOperationClient.createContainer(xceiverClientManager.getType(), - HdslProtos.ReplicationFactor.ONE, containerName, containerOwner); + HddsProtos.ReplicationFactor.ONE, containerName, containerOwner); Assert.assertTrue(containerExist(containerName)); // **************************************** @@ -264,7 +266,7 @@ public class TestSCMCli { cname = "ContainerTestInfo1"; Pipeline pipeline = containerOperationClient .createContainer(xceiverClientManager.getType(), - HdslProtos.ReplicationFactor.ONE, cname, containerOwner); + HddsProtos.ReplicationFactor.ONE, cname, containerOwner); ContainerData data = ContainerData .getFromProtBuf(containerOperationClient.readContainer(pipeline), conf); @@ -286,7 +288,7 @@ public class TestSCMCli { cname = "ContainerTestInfo2"; pipeline = containerOperationClient .createContainer(xceiverClientManager.getType(), - HdslProtos.ReplicationFactor.ONE, cname, containerOwner); + HddsProtos.ReplicationFactor.ONE, cname, containerOwner); data = ContainerData .getFromProtBuf(containerOperationClient.readContainer(pipeline), conf); KeyUtils.getDB(data, conf).put(cname.getBytes(), "someKey".getBytes()); @@ -345,7 +347,7 @@ public class TestSCMCli { for (int index = 0; index < 20; index++) { String containerName = String.format("%s%02d", prefix, index); containerOperationClient.createContainer(xceiverClientManager.getType(), - HdslProtos.ReplicationFactor.ONE, containerName, containerOwner); + HddsProtos.ReplicationFactor.ONE, containerName, containerOwner); } ByteArrayOutputStream out = new ByteArrayOutputStream(); diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/scm/TestSCMMXBean.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/scm/TestSCMMXBean.java index 5034de0d27..ecc7a948c3 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/scm/TestSCMMXBean.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/scm/TestSCMMXBean.java @@ -20,13 +20,14 @@ package org.apache.hadoop.ozone.scm; import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; +import org.apache.hadoop.hdds.scm.StorageContainerManager; import org.apache.hadoop.io.IOUtils; import org.apache.hadoop.ozone.MiniOzoneClassicCluster; import org.apache.hadoop.ozone.MiniOzoneCluster; -import org.apache.hadoop.hdsl.conf.OzoneConfiguration; +import org.apache.hadoop.hdds.conf.OzoneConfiguration; import org.apache.hadoop.ozone.OzoneConsts; -import org.apache.hadoop.ozone.scm.container.placement.metrics.ContainerStat; -import org.apache.hadoop.ozone.scm.node.NodeManager; +import org.apache.hadoop.hdds.scm.container.placement.metrics.ContainerStat; +import org.apache.hadoop.hdds.scm.node.NodeManager; import org.junit.BeforeClass; import org.junit.AfterClass; import org.junit.Test; diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/scm/TestSCMMetrics.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/scm/TestSCMMetrics.java index 6ce6b056d0..d6a5017ee9 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/scm/TestSCMMetrics.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/scm/TestSCMMetrics.java @@ -25,18 +25,20 @@ import static org.junit.Assert.assertEquals; import java.util.UUID; import org.apache.commons.codec.digest.DigestUtils; -import org.apache.hadoop.hdsl.conf.OzoneConfiguration; +import org.apache.hadoop.hdds.conf.OzoneConfiguration; +import org.apache.hadoop.hdds.scm.StorageContainerManager; +import org.apache.hadoop.hdds.scm.TestUtils; import org.apache.hadoop.hdfs.server.datanode.DataNode; -import org.apache.hadoop.hdsl.protocol.DatanodeDetails; +import org.apache.hadoop.hdds.protocol.DatanodeDetails; import org.apache.hadoop.metrics2.MetricsRecordBuilder; import org.apache.hadoop.ozone.MiniOzoneClassicCluster; import org.apache.hadoop.ozone.OzoneConsts; import org.apache.hadoop.ozone.container.common.helpers.ContainerReport; -import org.apache.hadoop.hdsl.protocol.proto.StorageContainerDatanodeProtocolProtos; -import org.apache.hadoop.hdsl.protocol.proto.StorageContainerDatanodeProtocolProtos.ContainerReportsRequestProto; -import org.apache.hadoop.ozone.scm.container.placement.metrics.ContainerStat; -import org.apache.hadoop.ozone.scm.container.placement.metrics.SCMMetrics; -import org.apache.hadoop.ozone.scm.node.SCMNodeManager; +import org.apache.hadoop.hdds.protocol.proto.StorageContainerDatanodeProtocolProtos; +import org.apache.hadoop.hdds.protocol.proto.StorageContainerDatanodeProtocolProtos.ContainerReportsRequestProto; +import org.apache.hadoop.hdds.scm.container.placement.metrics.ContainerStat; +import org.apache.hadoop.hdds.scm.container.placement.metrics.SCMMetrics; +import org.apache.hadoop.hdds.scm.node.SCMNodeManager; import org.apache.hadoop.test.GenericTestUtils; import org.junit.Rule; import org.junit.Test; diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/scm/TestXceiverClientManager.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/scm/TestXceiverClientManager.java index d8af93ce87..6f3bdf4086 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/scm/TestXceiverClientManager.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/scm/TestXceiverClientManager.java @@ -22,15 +22,15 @@ import org.apache.commons.lang.RandomStringUtils; import org.apache.hadoop.io.IOUtils; import org.apache.hadoop.ozone.MiniOzoneClassicCluster; import org.apache.hadoop.ozone.MiniOzoneCluster; -import org.apache.hadoop.hdsl.conf.OzoneConfiguration; +import org.apache.hadoop.hdds.conf.OzoneConfiguration; import org.apache.hadoop.ozone.OzoneConsts; -import org.apache.hadoop.hdsl.protocol.proto.HdslProtos; -import org.apache.hadoop.scm.XceiverClientSpi; -import org.apache.hadoop.scm.XceiverClientManager; -import org.apache.hadoop.scm.container.common.helpers.Pipeline; -import org.apache.hadoop.scm.protocolPB +import org.apache.hadoop.hdds.protocol.proto.HddsProtos; +import org.apache.hadoop.hdds.scm.XceiverClientSpi; +import org.apache.hadoop.hdds.scm.XceiverClientManager; +import org.apache.hadoop.hdds.scm.container.common.helpers.Pipeline; +import org.apache.hadoop.hdds.scm.protocolPB .StorageContainerLocationProtocolClientSideTranslatorPB; -import org.apache.hadoop.scm.storage.ContainerProtocolCalls; +import org.apache.hadoop.hdds.scm.storage.ContainerProtocolCalls; import org.junit.Assert; import org.junit.AfterClass; import org.junit.BeforeClass; @@ -40,7 +40,7 @@ import org.junit.rules.ExpectedException; import java.io.IOException; -import static org.apache.hadoop.scm +import static org.apache.hadoop.hdds.scm .ScmConfigKeys.SCM_CONTAINER_CLIENT_MAX_SIZE_KEY; /** @@ -116,7 +116,7 @@ public class TestXceiverClientManager { String containerName1 = "container" + RandomStringUtils.randomNumeric(10); Pipeline pipeline1 = storageContainerLocationClient.allocateContainer( - clientManager.getType(), HdslProtos.ReplicationFactor.ONE, + clientManager.getType(), HddsProtos.ReplicationFactor.ONE, containerName1, containerOwner); XceiverClientSpi client1 = clientManager.acquireClient(pipeline1); Assert.assertEquals(1, client1.getRefcount()); @@ -127,7 +127,7 @@ public class TestXceiverClientManager { Pipeline pipeline2 = storageContainerLocationClient.allocateContainer( clientManager.getType(), - HdslProtos.ReplicationFactor.ONE, containerName2, containerOwner); + HddsProtos.ReplicationFactor.ONE, containerName2, containerOwner); XceiverClientSpi client2 = clientManager.acquireClient(pipeline2); Assert.assertEquals(1, client2.getRefcount()); Assert.assertEquals(containerName2, diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/scm/TestXceiverClientMetrics.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/scm/TestXceiverClientMetrics.java index 9287c2771f..1a7a11e8fa 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/scm/TestXceiverClientMetrics.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/scm/TestXceiverClientMetrics.java @@ -28,21 +28,21 @@ import java.util.concurrent.CompletableFuture; import java.util.concurrent.CountDownLatch; import org.apache.commons.lang.RandomStringUtils; -import org.apache.hadoop.hdsl.protocol.proto.ContainerProtos; -import org.apache.hadoop.hdsl.protocol.proto.ContainerProtos.ContainerCommandRequestProto; -import org.apache.hadoop.hdsl.protocol.proto.ContainerProtos.ContainerCommandResponseProto; +import org.apache.hadoop.hdds.protocol.proto.ContainerProtos; +import org.apache.hadoop.hdds.protocol.proto.ContainerProtos.ContainerCommandRequestProto; +import org.apache.hadoop.hdds.protocol.proto.ContainerProtos.ContainerCommandResponseProto; import org.apache.hadoop.metrics2.MetricsRecordBuilder; import org.apache.hadoop.ozone.MiniOzoneClassicCluster; import org.apache.hadoop.ozone.MiniOzoneCluster; -import org.apache.hadoop.hdsl.conf.OzoneConfiguration; +import org.apache.hadoop.hdds.conf.OzoneConfiguration; import org.apache.hadoop.ozone.OzoneConsts; import org.apache.hadoop.ozone.container.ContainerTestHelper; import org.apache.hadoop.ozone.web.utils.OzoneUtils; -import org.apache.hadoop.scm.XceiverClientManager; -import org.apache.hadoop.scm.XceiverClientMetrics; -import org.apache.hadoop.scm.XceiverClientSpi; -import org.apache.hadoop.scm.container.common.helpers.Pipeline; -import org.apache.hadoop.scm.protocolPB.StorageContainerLocationProtocolClientSideTranslatorPB; +import org.apache.hadoop.hdds.scm.XceiverClientManager; +import org.apache.hadoop.hdds.scm.XceiverClientMetrics; +import org.apache.hadoop.hdds.scm.XceiverClientSpi; +import org.apache.hadoop.hdds.scm.container.common.helpers.Pipeline; +import org.apache.hadoop.hdds.scm.protocolPB.StorageContainerLocationProtocolClientSideTranslatorPB; import org.apache.hadoop.test.GenericTestUtils; import org.junit.AfterClass; import org.junit.BeforeClass; diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/scm/node/TestQueryNode.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/scm/node/TestQueryNode.java index 6c468c3d80..0748824420 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/scm/node/TestQueryNode.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/scm/node/TestQueryNode.java @@ -14,14 +14,14 @@ * License for the specific language governing permissions and limitations under * the License. */ -package org.apache.hadoop.ozone.scm.node; +package org.apache.hadoop.hdds.scm.node; import org.apache.hadoop.ozone.MiniOzoneClassicCluster; -import org.apache.hadoop.hdsl.conf.OzoneConfiguration; -import org.apache.hadoop.hdsl.protocol.proto.HdslProtos; +import org.apache.hadoop.hdds.conf.OzoneConfiguration; +import org.apache.hadoop.hdds.protocol.proto.HddsProtos; import org.apache.hadoop.ozone.OzoneConsts; -import org.apache.hadoop.scm.XceiverClientManager; -import org.apache.hadoop.scm.client.ContainerOperationClient; +import org.apache.hadoop.hdds.scm.XceiverClientManager; +import org.apache.hadoop.hdds.scm.client.ContainerOperationClient; import org.apache.hadoop.test.GenericTestUtils; import org.junit.After; import org.junit.Before; @@ -31,18 +31,19 @@ import java.util.EnumSet; import java.util.concurrent.TimeUnit; import static java.util.concurrent.TimeUnit.SECONDS; -import static org.apache.hadoop.hdsl.protocol.proto.HdslProtos.NodeState.DEAD; -import static org.apache.hadoop.hdsl.protocol.proto.HdslProtos.NodeState - .HEALTHY; -import static org.apache.hadoop.hdsl.protocol.proto.HdslProtos.NodeState - .STALE; -import static org.apache.hadoop.scm.ScmConfigKeys + +import static org.apache.hadoop.hdds.protocol.proto.HddsProtos.NodeState.DEAD; +import static org.apache.hadoop.hdds.protocol.proto.HddsProtos.NodeState.HEALTHY; +import static org.apache.hadoop.hdds.protocol.proto.HddsProtos.NodeState.INVALID; +import static org.apache.hadoop.hdds.protocol.proto.HddsProtos.NodeState.STALE; + +import static org.apache.hadoop.hdds.scm.ScmConfigKeys .OZONE_SCM_DEADNODE_INTERVAL; -import static org.apache.hadoop.scm.ScmConfigKeys +import static org.apache.hadoop.hdds.scm.ScmConfigKeys .OZONE_SCM_HEARTBEAT_INTERVAL; -import static org.apache.hadoop.scm.ScmConfigKeys +import static org.apache.hadoop.hdds.scm.ScmConfigKeys .OZONE_SCM_HEARTBEAT_PROCESS_INTERVAL; -import static org.apache.hadoop.scm.ScmConfigKeys +import static org.apache.hadoop.hdds.scm.ScmConfigKeys .OZONE_SCM_STALENODE_INTERVAL; import static org.junit.Assert.assertEquals; @@ -85,9 +86,9 @@ public class TestQueryNode { @Test public void testHealthyNodesCount() throws Exception { - HdslProtos.NodePool pool = scmClient.queryNode( + HddsProtos.NodePool pool = scmClient.queryNode( EnumSet.of(HEALTHY), - HdslProtos.QueryScope.CLUSTER, ""); + HddsProtos.QueryScope.CLUSTER, ""); assertEquals("Expected live nodes", numOfDatanodes, pool.getNodesCount()); } @@ -102,7 +103,7 @@ public class TestQueryNode { 100, 4 * 1000); int nodeCount = scmClient.queryNode(EnumSet.of(STALE), - HdslProtos.QueryScope.CLUSTER, "").getNodesCount(); + HddsProtos.QueryScope.CLUSTER, "").getNodesCount(); assertEquals("Mismatch of expected nodes count", 2, nodeCount); GenericTestUtils.waitFor(() -> @@ -111,12 +112,12 @@ public class TestQueryNode { // Assert that we don't find any stale nodes. nodeCount = scmClient.queryNode(EnumSet.of(STALE), - HdslProtos.QueryScope.CLUSTER, "").getNodesCount(); + HddsProtos.QueryScope.CLUSTER, "").getNodesCount(); assertEquals("Mismatch of expected nodes count", 0, nodeCount); // Assert that we find the expected number of dead nodes. nodeCount = scmClient.queryNode(EnumSet.of(DEAD), - HdslProtos.QueryScope.CLUSTER, "").getNodesCount(); + HddsProtos.QueryScope.CLUSTER, "").getNodesCount(); assertEquals("Mismatch of expected nodes count", 2, nodeCount); } } diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/web/TestDistributedOzoneVolumes.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/web/TestDistributedOzoneVolumes.java index adfe2509a0..9131f08952 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/web/TestDistributedOzoneVolumes.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/web/TestDistributedOzoneVolumes.java @@ -20,7 +20,7 @@ package org.apache.hadoop.ozone.web; import org.apache.hadoop.hdfs.server.datanode.DataNode; import org.apache.hadoop.ozone.MiniOzoneClassicCluster; import org.apache.hadoop.ozone.OzoneConfigKeys; -import org.apache.hadoop.hdsl.conf.OzoneConfiguration; +import org.apache.hadoop.hdds.conf.OzoneConfiguration; import org.apache.hadoop.ozone.OzoneConsts; import org.apache.log4j.Level; import org.apache.log4j.Logger; diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/web/TestLocalOzoneVolumes.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/web/TestLocalOzoneVolumes.java index d4c03e3d59..ce442375b8 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/web/TestLocalOzoneVolumes.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/web/TestLocalOzoneVolumes.java @@ -20,7 +20,7 @@ package org.apache.hadoop.ozone.web; import org.apache.hadoop.hdfs.server.datanode.DataNode; import org.apache.hadoop.ozone.MiniOzoneClassicCluster; import org.apache.hadoop.ozone.OzoneConfigKeys; -import org.apache.hadoop.hdsl.conf.OzoneConfiguration; +import org.apache.hadoop.hdds.conf.OzoneConfiguration; import org.apache.hadoop.ozone.OzoneConsts; import org.apache.hadoop.test.GenericTestUtils; import org.apache.log4j.Level; diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/web/TestOzoneRestWithMiniCluster.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/web/TestOzoneRestWithMiniCluster.java index f0648709fa..ffd2003c21 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/web/TestOzoneRestWithMiniCluster.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/web/TestOzoneRestWithMiniCluster.java @@ -35,7 +35,7 @@ import org.junit.rules.ExpectedException; import org.apache.hadoop.io.IOUtils; import org.apache.hadoop.ozone.MiniOzoneCluster; -import org.apache.hadoop.hdsl.conf.OzoneConfiguration; +import org.apache.hadoop.hdds.conf.OzoneConfiguration; import org.apache.hadoop.ozone.web.client.OzoneBucket; import org.apache.hadoop.ozone.web.client.OzoneVolume; import org.apache.hadoop.ozone.web.request.OzoneQuota; diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/web/TestOzoneWebAccess.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/web/TestOzoneWebAccess.java index 1e42c67923..7dcec2c057 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/web/TestOzoneWebAccess.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/web/TestOzoneWebAccess.java @@ -21,7 +21,7 @@ package org.apache.hadoop.ozone.web; import org.apache.hadoop.hdfs.server.datanode.DataNode; import org.apache.hadoop.ozone.MiniOzoneClassicCluster; import org.apache.hadoop.ozone.OzoneConfigKeys; -import org.apache.hadoop.hdsl.conf.OzoneConfiguration; +import org.apache.hadoop.hdds.conf.OzoneConfiguration; import org.apache.hadoop.ozone.OzoneConsts; import org.apache.hadoop.ozone.client.rest.headers.Header; import org.apache.hadoop.test.GenericTestUtils; diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/web/client/TestBuckets.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/web/client/TestBuckets.java index d088598534..972e5e0230 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/web/client/TestBuckets.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/web/client/TestBuckets.java @@ -21,7 +21,7 @@ import org.apache.hadoop.fs.StorageType; import org.apache.hadoop.hdfs.server.datanode.DataNode; import org.apache.hadoop.ozone.MiniOzoneClassicCluster; import org.apache.hadoop.ozone.OzoneConfigKeys; -import org.apache.hadoop.hdsl.conf.OzoneConfiguration; +import org.apache.hadoop.hdds.conf.OzoneConfiguration; import org.apache.hadoop.ozone.OzoneConsts; import org.apache.hadoop.ozone.client.rest.OzoneException; import org.apache.hadoop.ozone.web.request.OzoneQuota; diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/web/client/TestKeys.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/web/client/TestKeys.java index 49484f31d3..392a06a7f8 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/web/client/TestKeys.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/web/client/TestKeys.java @@ -30,8 +30,8 @@ import org.apache.hadoop.hdfs.server.datanode.DataNode; import org.apache.hadoop.ozone.MiniOzoneClassicCluster; import org.apache.hadoop.ozone.MiniOzoneTestHelper; import org.apache.hadoop.ozone.OzoneConfigKeys; -import org.apache.hadoop.hdsl.conf.OzoneConfiguration; -import org.apache.hadoop.hdsl.protocol.proto.ContainerProtos; +import org.apache.hadoop.hdds.conf.OzoneConfiguration; +import org.apache.hadoop.hdds.protocol.proto.ContainerProtos; import org.apache.hadoop.ozone.OzoneConsts; import org.apache.hadoop.ozone.container.common.helpers.ContainerData; import org.apache.hadoop.ozone.container.common.helpers.ContainerUtils; diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/web/client/TestOzoneClient.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/web/client/TestOzoneClient.java index 8db67b2f6c..4fd6e7c38e 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/web/client/TestOzoneClient.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/web/client/TestOzoneClient.java @@ -46,7 +46,7 @@ import io.netty.handler.logging.LoggingHandler; import org.apache.hadoop.hdfs.server.datanode.DataNode; import org.apache.hadoop.ozone.MiniOzoneClassicCluster; import org.apache.hadoop.ozone.OzoneConfigKeys; -import org.apache.hadoop.hdsl.conf.OzoneConfiguration; +import org.apache.hadoop.hdds.conf.OzoneConfiguration; import org.apache.hadoop.ozone.OzoneConsts; import org.apache.hadoop.ozone.client.rest.headers.Header; import org.apache.hadoop.security.UserGroupInformation; diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/web/client/TestVolume.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/web/client/TestVolume.java index 5d20ca23a9..96ad2fbf42 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/web/client/TestVolume.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/web/client/TestVolume.java @@ -23,11 +23,11 @@ import org.apache.commons.lang.RandomStringUtils; import org.apache.commons.lang.StringUtils; import org.apache.hadoop.hdfs.server.datanode.DataNode; import org.apache.hadoop.ozone.MiniOzoneClassicCluster; -import org.apache.hadoop.hdsl.conf.OzoneConfiguration; +import org.apache.hadoop.hdds.conf.OzoneConfiguration; import org.apache.hadoop.ozone.OzoneConfigKeys; import org.apache.hadoop.ozone.OzoneConsts; import org.apache.hadoop.ozone.protocol.proto.KeySpaceManagerProtocolProtos.Status; -import org.apache.hadoop.ozone.client.OzoneClientUtils; +import org.apache.hadoop.hdds.scm.client.HddsClientUtils; import org.apache.hadoop.ozone.client.rest.OzoneException; import org.apache.hadoop.ozone.web.request.OzoneQuota; import org.apache.hadoop.ozone.web.utils.OzoneUtils; @@ -378,7 +378,7 @@ public class TestVolume { List<CloseableHttpClient> spyHttpClients = new ArrayList<>(); for (int i = 0; i < 5; i++) { CloseableHttpClient spyHttpClient = Mockito - .spy(OzoneClientUtils.newHttpClient()); + .spy(HddsClientUtils.newHttpClient()); spyHttpClients.add(spyHttpClient); } diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/web/client/TestVolumeRatis.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/web/client/TestVolumeRatis.java index 7a8b31b47f..97f3ee9276 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/web/client/TestVolumeRatis.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/web/client/TestVolumeRatis.java @@ -22,7 +22,7 @@ import org.apache.commons.io.FileUtils; import org.apache.hadoop.hdfs.server.datanode.DataNode; import org.apache.hadoop.ozone.MiniOzoneClassicCluster; import org.apache.hadoop.ozone.OzoneConfigKeys; -import org.apache.hadoop.hdsl.conf.OzoneConfiguration; +import org.apache.hadoop.hdds.conf.OzoneConfiguration; import org.apache.hadoop.ozone.OzoneConsts; import org.apache.hadoop.ozone.client.rest.OzoneException; diff --git a/hadoop-ozone/objectstore-service/src/main/java/org/apache/hadoop/hdfs/server/datanode/ObjectStoreHandler.java b/hadoop-ozone/objectstore-service/src/main/java/org/apache/hadoop/hdfs/server/datanode/ObjectStoreHandler.java index f998f18578..3128d31e80 100644 --- a/hadoop-ozone/objectstore-service/src/main/java/org/apache/hadoop/hdfs/server/datanode/ObjectStoreHandler.java +++ b/hadoop-ozone/objectstore-service/src/main/java/org/apache/hadoop/hdfs/server/datanode/ObjectStoreHandler.java @@ -17,8 +17,8 @@ */ package org.apache.hadoop.hdfs.server.datanode; -import static org.apache.hadoop.hdsl.HdslUtils.getScmAddressForBlockClients; -import static org.apache.hadoop.hdsl.HdslUtils.getScmAddressForClients; +import static org.apache.hadoop.hdds.HddsUtils.getScmAddressForBlockClients; +import static org.apache.hadoop.hdds.HddsUtils.getScmAddressForClients; import static org.apache.hadoop.ozone.KsmUtils.getKsmAddress; import static org.apache.hadoop.ozone.OzoneConfigKeys.*; import static com.sun.jersey.api.core.ResourceConfig.PROPERTY_CONTAINER_REQUEST_FILTERS; @@ -41,9 +41,9 @@ import org.apache.hadoop.ozone.OzoneConsts; import org.apache.hadoop.ozone.web.ObjectStoreApplication; import org.apache.hadoop.ozone.web.handlers.ServiceFilter; import org.apache.hadoop.ozone.web.netty.ObjectStoreJerseyContainer; -import org.apache.hadoop.scm.protocolPB +import org.apache.hadoop.hdds.scm.protocolPB .ScmBlockLocationProtocolClientSideTranslatorPB; -import org.apache.hadoop.scm.protocolPB.ScmBlockLocationProtocolPB; +import org.apache.hadoop.hdds.scm.protocolPB.ScmBlockLocationProtocolPB; import org.slf4j.Logger; import org.slf4j.LoggerFactory; @@ -52,10 +52,10 @@ import org.apache.hadoop.ipc.Client; import org.apache.hadoop.ipc.ProtobufRpcEngine; import org.apache.hadoop.ipc.RPC; import org.apache.hadoop.net.NetUtils; -import org.apache.hadoop.hdsl.conf.OzoneConfiguration; -import org.apache.hadoop.scm.protocolPB +import org.apache.hadoop.hdds.conf.OzoneConfiguration; +import org.apache.hadoop.hdds.scm.protocolPB .StorageContainerLocationProtocolClientSideTranslatorPB; -import org.apache.hadoop.scm.protocolPB.StorageContainerLocationProtocolPB; +import org.apache.hadoop.hdds.scm.protocolPB.StorageContainerLocationProtocolPB; import org.apache.hadoop.ozone.web.interfaces.StorageHandler; import org.apache.hadoop.ozone.web.storage.DistributedStorageHandler; import org.apache.hadoop.ozone.web.localstorage.LocalStorageHandler; diff --git a/hadoop-ozone/objectstore-service/src/main/java/org/apache/hadoop/ozone/OzoneRestUtils.java b/hadoop-ozone/objectstore-service/src/main/java/org/apache/hadoop/ozone/OzoneRestUtils.java index 641ed99f21..b13ae30e6c 100644 --- a/hadoop-ozone/objectstore-service/src/main/java/org/apache/hadoop/ozone/OzoneRestUtils.java +++ b/hadoop-ozone/objectstore-service/src/main/java/org/apache/hadoop/ozone/OzoneRestUtils.java @@ -31,7 +31,7 @@ import java.util.TimeZone; import java.util.UUID; import org.apache.hadoop.classification.InterfaceAudience; -import org.apache.hadoop.ozone.client.OzoneClientUtils; +import org.apache.hadoop.hdds.scm.client.HddsClientUtils; import org.apache.hadoop.ozone.client.io.LengthInputStream; import org.apache.hadoop.ozone.client.rest.OzoneException; import org.apache.hadoop.ozone.client.rest.headers.Header; @@ -81,7 +81,7 @@ public final class OzoneRestUtils { */ public static void verifyResourceName(String resName) throws IllegalArgumentException { - OzoneClientUtils.verifyResourceName(resName); + HddsClientUtils.verifyResourceName(resName); } /** diff --git a/hadoop-ozone/objectstore-service/src/main/java/org/apache/hadoop/ozone/web/ObjectStoreRestPlugin.java b/hadoop-ozone/objectstore-service/src/main/java/org/apache/hadoop/ozone/web/ObjectStoreRestPlugin.java index fab43429a9..2128b806cf 100644 --- a/hadoop-ozone/objectstore-service/src/main/java/org/apache/hadoop/ozone/web/ObjectStoreRestPlugin.java +++ b/hadoop-ozone/objectstore-service/src/main/java/org/apache/hadoop/ozone/web/ObjectStoreRestPlugin.java @@ -24,9 +24,9 @@ import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hdfs.server.datanode.DataNode; import org.apache.hadoop.hdfs.server.datanode.DataNodeServicePlugin; import org.apache.hadoop.hdfs.server.datanode.ObjectStoreHandler; -import org.apache.hadoop.hdsl.conf.OzoneConfiguration; -import org.apache.hadoop.hdsl.protocol.DatanodeDetails; -import org.apache.hadoop.ozone.HdslDatanodeService; +import org.apache.hadoop.hdds.conf.OzoneConfiguration; +import org.apache.hadoop.hdds.protocol.DatanodeDetails; +import org.apache.hadoop.ozone.HddsDatanodeService; import org.apache.hadoop.ozone.web.netty.ObjectStoreRestHttpServer; import org.apache.hadoop.ozone.web.utils.OzoneUtils; @@ -82,11 +82,11 @@ public class ObjectStoreRestPlugin implements DataNodeServicePlugin { public static DatanodeDetails getDatanodeDetails(DataNode dataNode) { for (ServicePlugin plugin : dataNode.getPlugins()) { - if (plugin instanceof HdslDatanodeService) { - return ((HdslDatanodeService) plugin).getDatanodeDetails(); + if (plugin instanceof HddsDatanodeService) { + return ((HddsDatanodeService) plugin).getDatanodeDetails(); } } - throw new RuntimeException("Not able to find HdslDatanodeService in the" + + throw new RuntimeException("Not able to find HddsDatanodeService in the" + " list of plugins loaded by DataNode."); } diff --git a/hadoop-ozone/objectstore-service/src/main/java/org/apache/hadoop/ozone/web/handlers/StorageHandlerBuilder.java b/hadoop-ozone/objectstore-service/src/main/java/org/apache/hadoop/ozone/web/handlers/StorageHandlerBuilder.java index 7098572d63..f86f247ac7 100644 --- a/hadoop-ozone/objectstore-service/src/main/java/org/apache/hadoop/ozone/web/handlers/StorageHandlerBuilder.java +++ b/hadoop-ozone/objectstore-service/src/main/java/org/apache/hadoop/ozone/web/handlers/StorageHandlerBuilder.java @@ -20,7 +20,7 @@ package org.apache.hadoop.ozone.web.handlers; import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.conf.Configuration; -import org.apache.hadoop.hdsl.conf.OzoneConfiguration; +import org.apache.hadoop.hdds.conf.OzoneConfiguration; import org.apache.hadoop.ozone.web.interfaces.StorageHandler; import org.apache.hadoop.ozone.web.localstorage.LocalStorageHandler; diff --git a/hadoop-ozone/objectstore-service/src/main/java/org/apache/hadoop/ozone/web/netty/ObjectStoreRestHttpServer.java b/hadoop-ozone/objectstore-service/src/main/java/org/apache/hadoop/ozone/web/netty/ObjectStoreRestHttpServer.java index 2a9de40068..e3814e141a 100644 --- a/hadoop-ozone/objectstore-service/src/main/java/org/apache/hadoop/ozone/web/netty/ObjectStoreRestHttpServer.java +++ b/hadoop-ozone/objectstore-service/src/main/java/org/apache/hadoop/ozone/web/netty/ObjectStoreRestHttpServer.java @@ -36,7 +36,7 @@ import org.apache.hadoop.hdfs.server.datanode.ObjectStoreHandler; import org.apache.hadoop.hdfs.server.datanode.web .RestCsrfPreventionFilterHandler; import org.apache.hadoop.net.NetUtils; -import org.apache.hadoop.scm.ScmConfigKeys; +import org.apache.hadoop.hdds.scm.ScmConfigKeys; import org.apache.hadoop.security.http.RestCsrfPreventionFilter; import io.netty.bootstrap.ChannelFactory; @@ -54,15 +54,17 @@ import io.netty.handler.codec.http.HttpResponseEncoder; import io.netty.handler.stream.ChunkedWriteHandler; import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; -import static org.apache.hadoop.scm.ScmConfigKeys - .HDSL_REST_CSRF_ENABLED_DEFAULT; -import static org.apache.hadoop.scm.ScmConfigKeys.HDSL_REST_CSRF_ENABLED_KEY; -import static org.apache.hadoop.scm.ScmConfigKeys - .HDSL_REST_HTTP_ADDRESS_DEFAULT; -import static org.apache.hadoop.scm.ScmConfigKeys.HDSL_REST_HTTP_ADDRESS_KEY; +import static org.apache.hadoop.hdds.scm.ScmConfigKeys + .HDDS_REST_CSRF_ENABLED_DEFAULT; +import static org.apache.hadoop.hdds.scm.ScmConfigKeys + .HDDS_REST_CSRF_ENABLED_KEY; +import static org.apache.hadoop.hdds.scm.ScmConfigKeys + .HDDS_REST_HTTP_ADDRESS_DEFAULT; +import static org.apache.hadoop.hdds.scm.ScmConfigKeys + .HDDS_REST_HTTP_ADDRESS_KEY; /** - * Netty based web server for Hdsl rest api server. + * Netty based web server for Hdds rest api server. * <p> * Based on the Datanode http serer. */ @@ -110,11 +112,11 @@ public class ObjectStoreRestHttpServer implements Closeable { }); this.httpServer.childOption(ChannelOption.WRITE_BUFFER_HIGH_WATER_MARK, - conf.getInt(ScmConfigKeys.HDSL_REST_NETTY_HIGH_WATERMARK, - ScmConfigKeys.HDSL_REST_NETTY_HIGH_WATERMARK_DEFAULT)); + conf.getInt(ScmConfigKeys.HDDS_REST_NETTY_HIGH_WATERMARK, + ScmConfigKeys.HDDS_REST_NETTY_HIGH_WATERMARK_DEFAULT)); this.httpServer.childOption(ChannelOption.WRITE_BUFFER_LOW_WATER_MARK, - conf.getInt(ScmConfigKeys.HDSL_REST_NETTY_LOW_WATERMARK, - ScmConfigKeys.HDSL_REST_NETTY_LOW_WATERMARK_DEFAULT)); + conf.getInt(ScmConfigKeys.HDDS_REST_NETTY_LOW_WATERMARK, + ScmConfigKeys.HDDS_REST_NETTY_LOW_WATERMARK_DEFAULT)); if (externalHttpChannel == null) { httpServer.channel(NioServerSocketChannel.class); @@ -141,8 +143,8 @@ public class ObjectStoreRestHttpServer implements Closeable { if (httpServer != null) { InetSocketAddress infoAddr = NetUtils.createSocketAddr( - conf.getTrimmed(HDSL_REST_HTTP_ADDRESS_KEY, - HDSL_REST_HTTP_ADDRESS_DEFAULT)); + conf.getTrimmed(HDDS_REST_HTTP_ADDRESS_KEY, + HDDS_REST_HTTP_ADDRESS_DEFAULT)); ChannelFuture f = httpServer.bind(infoAddr); try { @@ -156,7 +158,7 @@ public class ObjectStoreRestHttpServer implements Closeable { } } httpAddress = (InetSocketAddress) f.channel().localAddress(); - LOG.info("Listening HDSL REST traffic on " + httpAddress); + LOG.info("Listening HDDS REST traffic on " + httpAddress); } } @@ -183,8 +185,8 @@ public class ObjectStoreRestHttpServer implements Closeable { */ private static RestCsrfPreventionFilter createRestCsrfPreventionFilter( Configuration conf) { - if (!conf.getBoolean(HDSL_REST_CSRF_ENABLED_KEY, - HDSL_REST_CSRF_ENABLED_DEFAULT)) { + if (!conf.getBoolean(HDDS_REST_CSRF_ENABLED_KEY, + HDDS_REST_CSRF_ENABLED_DEFAULT)) { return null; } String restCsrfClassName = RestCsrfPreventionFilter.class.getName(); diff --git a/hadoop-ozone/objectstore-service/src/main/java/org/apache/hadoop/ozone/web/storage/DistributedStorageHandler.java b/hadoop-ozone/objectstore-service/src/main/java/org/apache/hadoop/ozone/web/storage/DistributedStorageHandler.java index c90cc844e3..0f4a85628b 100644 --- a/hadoop-ozone/objectstore-service/src/main/java/org/apache/hadoop/ozone/web/storage/DistributedStorageHandler.java +++ b/hadoop-ozone/objectstore-service/src/main/java/org/apache/hadoop/ozone/web/storage/DistributedStorageHandler.java @@ -29,8 +29,8 @@ import org.apache.hadoop.ozone.ksm.helpers.KsmVolumeArgs; import org.apache.hadoop.ozone.ksm.helpers.OpenKeySession; import org.apache.hadoop.ozone.ksm.protocolPB .KeySpaceManagerProtocolClientSideTranslatorPB; -import org.apache.hadoop.hdsl.conf.OzoneConfiguration; -import org.apache.hadoop.hdsl.protocol.proto.HdslProtos; +import org.apache.hadoop.hdds.conf.OzoneConfiguration; +import org.apache.hadoop.hdds.protocol.proto.HddsProtos; import org.apache.hadoop.ozone.OzoneConsts; import org.apache.hadoop.ozone.OzoneConsts.Versioning; import org.apache.hadoop.ozone.client.io.ChunkGroupInputStream; @@ -41,9 +41,9 @@ import org.apache.hadoop.ozone.protocolPB.KSMPBHelper; import org.apache.hadoop.ozone.ksm.KSMConfigKeys; import org.apache.hadoop.ozone.OzoneAcl; import org.apache.hadoop.ozone.web.request.OzoneQuota; -import org.apache.hadoop.scm.ScmConfigKeys; -import org.apache.hadoop.scm.XceiverClientManager; -import org.apache.hadoop.scm.protocolPB +import org.apache.hadoop.hdds.scm.ScmConfigKeys; +import org.apache.hadoop.hdds.scm.XceiverClientManager; +import org.apache.hadoop.hdds.scm.protocolPB .StorageContainerLocationProtocolClientSideTranslatorPB; import org.apache.hadoop.ozone.client.rest.OzoneException; import org.apache.hadoop.ozone.web.handlers.BucketArgs; @@ -84,8 +84,8 @@ public final class DistributedStorageHandler implements StorageHandler { private final OzoneAcl.OzoneACLRights groupRights; private int chunkSize; private final boolean useRatis; - private final HdslProtos.ReplicationType type; - private final HdslProtos.ReplicationFactor factor; + private final HddsProtos.ReplicationType type; + private final HddsProtos.ReplicationFactor factor; /** * Creates a new DistributedStorageHandler. @@ -107,11 +107,11 @@ public final class DistributedStorageHandler implements StorageHandler { ScmConfigKeys.DFS_CONTAINER_RATIS_ENABLED_DEFAULT); if(useRatis) { - type = HdslProtos.ReplicationType.RATIS; - factor = HdslProtos.ReplicationFactor.THREE; + type = HddsProtos.ReplicationType.RATIS; + factor = HddsProtos.ReplicationFactor.THREE; } else { - type = HdslProtos.ReplicationType.STAND_ALONE; - factor = HdslProtos.ReplicationFactor.ONE; + type = HddsProtos.ReplicationType.STAND_ALONE; + factor = HddsProtos.ReplicationFactor.ONE; } chunkSize = conf.getInt(ScmConfigKeys.OZONE_SCM_CHUNK_SIZE_KEY, diff --git a/hadoop-ozone/ozone-manager/pom.xml b/hadoop-ozone/ozone-manager/pom.xml index b0bc73dc96..9b8f42c143 100644 --- a/hadoop-ozone/ozone-manager/pom.xml +++ b/hadoop-ozone/ozone-manager/pom.xml @@ -101,7 +101,7 @@ http://maven.apache.org/xsd/maven-4.0.0.xsd"> <artifactItems> <artifactItem> <groupId>org.apache.hadoop</groupId> - <artifactId>hadoop-hdsl-server-framework</artifactId> + <artifactId>hadoop-hdds-server-framework</artifactId> <outputDirectory>${project.build.directory}/</outputDirectory> <includes>webapps/static/**/*.*</includes> </artifactItem> diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/ksm/KSMMXBean.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/ksm/KSMMXBean.java index 2e1fbd37c9..bf223324ae 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/ksm/KSMMXBean.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/ksm/KSMMXBean.java @@ -19,7 +19,7 @@ package org.apache.hadoop.ozone.ksm; import org.apache.hadoop.classification.InterfaceAudience; -import org.apache.hadoop.hdsl.server.ServiceRuntimeInfo; +import org.apache.hadoop.hdds.server.ServiceRuntimeInfo; /** * This is the JMX management interface for ksm information. diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/ksm/KSMMetadataManagerImpl.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/ksm/KSMMetadataManagerImpl.java index f241de6769..fa0eaa2acb 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/ksm/KSMMetadataManagerImpl.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/ksm/KSMMetadataManagerImpl.java @@ -27,7 +27,7 @@ import org.apache.hadoop.ozone.ksm.helpers.KsmKeyLocationInfoGroup; import org.apache.hadoop.ozone.ksm.helpers.KsmVolumeArgs; import org.apache.hadoop.ozone.ksm.helpers.KsmKeyLocationInfo; import org.apache.hadoop.ozone.common.BlockGroup; -import org.apache.hadoop.hdsl.conf.OzoneConfiguration; +import org.apache.hadoop.hdds.conf.OzoneConfiguration; import org.apache.hadoop.ozone.OzoneConsts; import org.apache.hadoop.ozone.ksm.exceptions.KSMException; import org.apache.hadoop.ozone.ksm.exceptions.KSMException.ResultCodes; @@ -65,7 +65,7 @@ import static org.apache.hadoop.ozone.ksm.KSMConfigKeys .OZONE_KSM_DB_CACHE_SIZE_DEFAULT; import static org.apache.hadoop.ozone.ksm.KSMConfigKeys .OZONE_KSM_DB_CACHE_SIZE_MB; -import static org.apache.hadoop.hdsl.server.ServerUtils.getOzoneMetaDirPath; +import static org.apache.hadoop.hdds.server.ServerUtils.getOzoneMetaDirPath; /** * KSM metadata manager interface. diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/ksm/KSMStorage.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/ksm/KSMStorage.java index d444e5c38d..015bed6986 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/ksm/KSMStorage.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/ksm/KSMStorage.java @@ -21,12 +21,12 @@ import java.io.IOException; import java.util.Properties; import java.util.UUID; -import org.apache.hadoop.hdsl.conf.OzoneConfiguration; +import org.apache.hadoop.hdds.conf.OzoneConfiguration; import org.apache.hadoop.ozone.common.Storage; -import org.apache.hadoop.hdsl.protocol.proto.HdslProtos.NodeType; +import org.apache.hadoop.hdds.protocol.proto.HddsProtos.NodeType; import static org.apache.hadoop.ozone.OzoneConsts.SCM_ID; -import static org.apache.hadoop.hdsl.server.ServerUtils.getOzoneMetaDirPath; +import static org.apache.hadoop.hdds.server.ServerUtils.getOzoneMetaDirPath; /** * KSMStorage is responsible for management of the StorageDirectories used by diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/ksm/KeyDeletingService.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/ksm/KeyDeletingService.java index 3beaed4913..14fb69c35f 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/ksm/KeyDeletingService.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/ksm/KeyDeletingService.java @@ -20,7 +20,7 @@ package org.apache.hadoop.ozone.ksm; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.ozone.common.DeleteBlockGroupResult; import org.apache.hadoop.ozone.common.BlockGroup; -import org.apache.hadoop.scm.protocol.ScmBlockLocationProtocol; +import org.apache.hadoop.hdds.scm.protocol.ScmBlockLocationProtocol; import org.apache.hadoop.util.Time; import org.apache.hadoop.utils.BackgroundService; import org.apache.hadoop.utils.BackgroundTask; diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/ksm/KeyManagerImpl.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/ksm/KeyManagerImpl.java index 95f4c08d6f..70ba178ba3 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/ksm/KeyManagerImpl.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/ksm/KeyManagerImpl.java @@ -24,15 +24,15 @@ import org.apache.hadoop.ozone.ksm.helpers.KsmKeyArgs; import org.apache.hadoop.ozone.common.BlockGroup; import org.apache.hadoop.ozone.ksm.helpers.KsmKeyInfo; import org.apache.hadoop.ozone.ksm.helpers.KsmKeyLocationInfo; -import org.apache.hadoop.hdsl.conf.OzoneConfiguration; +import org.apache.hadoop.hdds.conf.OzoneConfiguration; import org.apache.hadoop.ozone.ksm.exceptions.KSMException; import org.apache.hadoop.ozone.ksm.exceptions.KSMException.ResultCodes; import org.apache.hadoop.ozone.ksm.helpers.KsmKeyLocationInfoGroup; import org.apache.hadoop.ozone.ksm.helpers.OpenKeySession; import org.apache.hadoop.ozone.protocol.proto .KeySpaceManagerProtocolProtos.KeyInfo; -import org.apache.hadoop.scm.container.common.helpers.AllocatedBlock; -import org.apache.hadoop.scm.protocol.ScmBlockLocationProtocol; +import org.apache.hadoop.hdds.scm.container.common.helpers.AllocatedBlock; +import org.apache.hadoop.hdds.scm.protocol.ScmBlockLocationProtocol; import org.apache.hadoop.util.Time; import org.apache.hadoop.utils.BackgroundService; import org.apache.hadoop.utils.BatchOperation; @@ -71,10 +71,10 @@ import static org.apache.hadoop.ozone .OzoneConfigKeys.OZONE_SCM_BLOCK_SIZE_DEFAULT; import static org.apache.hadoop.ozone .OzoneConfigKeys.OZONE_SCM_BLOCK_SIZE_IN_MB; -import org.apache.hadoop.hdsl.protocol - .proto.HdslProtos.ReplicationType; -import org.apache.hadoop.hdsl.protocol - .proto.HdslProtos.ReplicationFactor; +import org.apache.hadoop.hdds.protocol + .proto.HddsProtos.ReplicationType; +import org.apache.hadoop.hdds.protocol + .proto.HddsProtos.ReplicationFactor; /** diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/ksm/KeySpaceManager.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/ksm/KeySpaceManager.java index fdec56c1dd..39ef396378 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/ksm/KeySpaceManager.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/ksm/KeySpaceManager.java @@ -25,7 +25,7 @@ import org.apache.hadoop.hdfs.DFSUtil; import org.apache.hadoop.ipc.Client; import org.apache.hadoop.ipc.ProtobufRpcEngine; import org.apache.hadoop.ipc.RPC; -import org.apache.hadoop.hdsl.server.ServiceRuntimeInfoImpl; +import org.apache.hadoop.hdds.server.ServiceRuntimeInfoImpl; import org.apache.hadoop.ozone.common.Storage.StorageState; import org.apache.hadoop.ozone.ksm.exceptions.KSMException; import org.apache.hadoop.ozone.ksm.helpers.KsmBucketArgs; @@ -42,32 +42,32 @@ import org.apache.hadoop.ozone.ksm.exceptions.KSMException.ResultCodes; import org.apache.hadoop.metrics2.lib.DefaultMetricsSystem; import org.apache.hadoop.metrics2.util.MBeans; import org.apache.hadoop.net.NetUtils; -import org.apache.hadoop.hdsl.conf.OzoneConfiguration; +import org.apache.hadoop.hdds.conf.OzoneConfiguration; import org.apache.hadoop.ozone.protocol.proto.KeySpaceManagerProtocolProtos .ServicePort; import org.apache.hadoop.ozone.protocol.proto .KeySpaceManagerProtocolProtos.OzoneAclInfo; -import org.apache.hadoop.hdsl.protocol.proto.HdslProtos; +import org.apache.hadoop.hdds.protocol.proto.HddsProtos; import org.apache.hadoop.ozone.protocolPB .KeySpaceManagerProtocolServerSideTranslatorPB; -import org.apache.hadoop.scm.ScmInfo; -import org.apache.hadoop.scm.protocol.ScmBlockLocationProtocol; -import org.apache.hadoop.scm.protocol.StorageContainerLocationProtocol; -import org.apache.hadoop.scm.protocolPB +import org.apache.hadoop.hdds.scm.ScmInfo; +import org.apache.hadoop.hdds.scm.protocol.ScmBlockLocationProtocol; +import org.apache.hadoop.hdds.scm.protocol.StorageContainerLocationProtocol; +import org.apache.hadoop.hdds.scm.protocolPB .ScmBlockLocationProtocolClientSideTranslatorPB; -import org.apache.hadoop.scm.protocolPB.ScmBlockLocationProtocolPB; -import org.apache.hadoop.scm.protocolPB +import org.apache.hadoop.hdds.scm.protocolPB.ScmBlockLocationProtocolPB; +import org.apache.hadoop.hdds.scm.protocolPB .StorageContainerLocationProtocolClientSideTranslatorPB; -import org.apache.hadoop.scm.protocolPB.StorageContainerLocationProtocolPB; +import org.apache.hadoop.hdds.scm.protocolPB.StorageContainerLocationProtocolPB; import org.apache.hadoop.security.UserGroupInformation; import org.apache.hadoop.util.GenericOptionsParser; import org.apache.hadoop.util.StringUtils; -import static org.apache.hadoop.hdsl.HdslUtils.getScmAddressForBlockClients; -import static org.apache.hadoop.hdsl.HdslUtils.getScmAddressForClients; -import static org.apache.hadoop.hdsl.HdslUtils.isHdslEnabled; +import static org.apache.hadoop.hdds.HddsUtils.getScmAddressForBlockClients; +import static org.apache.hadoop.hdds.HddsUtils.getScmAddressForClients; +import static org.apache.hadoop.hdds.HddsUtils.isHddsEnabled; import static org.apache.hadoop.ozone.KsmUtils.getKsmAddress; -import static org.apache.hadoop.hdsl.server.ServerUtils +import static org.apache.hadoop.hdds.server.ServerUtils .updateRPCListenAddress; import org.slf4j.Logger; import org.slf4j.LoggerFactory; @@ -92,7 +92,7 @@ import static org.apache.hadoop.ozone.ksm.KSMConfigKeys import static org.apache.hadoop.ozone.protocol.proto .KeySpaceManagerProtocolProtos.KeySpaceManagerService .newReflectiveBlockingService; -import static org.apache.hadoop.hdsl.protocol.proto.HdslProtos +import static org.apache.hadoop.hdds.protocol.proto.HddsProtos .NodeState.HEALTHY; import static org.apache.hadoop.util.ExitUtil.terminate; @@ -338,7 +338,7 @@ public final class KeySpaceManager extends ServiceRuntimeInfoImpl public static KeySpaceManager createKSM(String[] argv, OzoneConfiguration conf) throws IOException { - if (!isHdslEnabled(conf)) { + if (!isHddsEnabled(conf)) { System.err.println("KSM cannot be started in secure mode or when " + OZONE_ENABLED + " is set to false"); System.exit(1); @@ -840,7 +840,7 @@ public final class KeySpaceManager extends ServiceRuntimeInfoImpl // When we implement multi-home this call has to be handled properly. List<ServiceInfo> services = new ArrayList<>(); ServiceInfo.Builder ksmServiceInfoBuilder = ServiceInfo.newBuilder() - .setNodeType(HdslProtos.NodeType.KSM) + .setNodeType(HddsProtos.NodeType.KSM) .setHostname(ksmRpcAddress.getHostName()) .addServicePort(ServicePort.newBuilder() .setType(ServicePort.Type.RPC) @@ -865,22 +865,22 @@ public final class KeySpaceManager extends ServiceRuntimeInfoImpl InetSocketAddress scmAddr = getScmAddressForClients( configuration); ServiceInfo.Builder scmServiceInfoBuilder = ServiceInfo.newBuilder() - .setNodeType(HdslProtos.NodeType.SCM) + .setNodeType(HddsProtos.NodeType.SCM) .setHostname(scmAddr.getHostName()) .addServicePort(ServicePort.newBuilder() .setType(ServicePort.Type.RPC) .setValue(scmAddr.getPort()).build()); services.add(scmServiceInfoBuilder.build()); - List<HdslProtos.Node> nodes = scmContainerClient.queryNode( - EnumSet.of(HEALTHY), HdslProtos.QueryScope.CLUSTER, "") + List<HddsProtos.Node> nodes = scmContainerClient.queryNode( + EnumSet.of(HEALTHY), HddsProtos.QueryScope.CLUSTER, "") .getNodesList(); - for (HdslProtos.Node node : nodes) { - HdslProtos.DatanodeDetailsProto datanode = node.getNodeID(); + for (HddsProtos.Node node : nodes) { + HddsProtos.DatanodeDetailsProto datanode = node.getNodeID(); ServiceInfo.Builder dnServiceInfoBuilder = ServiceInfo.newBuilder() - .setNodeType(HdslProtos.NodeType.DATANODE) + .setNodeType(HddsProtos.NodeType.DATANODE) .setHostname(datanode.getHostName()); dnServiceInfoBuilder.addServicePort(ServicePort.newBuilder() diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/ksm/KeySpaceManagerHttpServer.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/ksm/KeySpaceManagerHttpServer.java index 3cf43608f3..478804b32c 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/ksm/KeySpaceManagerHttpServer.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/ksm/KeySpaceManagerHttpServer.java @@ -20,7 +20,7 @@ package org.apache.hadoop.ozone.ksm; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.ozone.OzoneConfigKeys; import org.apache.hadoop.ozone.OzoneConsts; -import org.apache.hadoop.hdsl.server.BaseHttpServer; +import org.apache.hadoop.hdds.server.BaseHttpServer; import java.io.IOException; diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/ksm/OpenKeyCleanupService.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/ksm/OpenKeyCleanupService.java index 7f60bf83c8..7a2d7cc9ca 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/ksm/OpenKeyCleanupService.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/ksm/OpenKeyCleanupService.java @@ -20,7 +20,7 @@ package org.apache.hadoop.ozone.ksm; import org.apache.hadoop.ozone.common.BlockGroup; import org.apache.hadoop.ozone.common.DeleteBlockGroupResult; -import org.apache.hadoop.scm.protocol.ScmBlockLocationProtocol; +import org.apache.hadoop.hdds.scm.protocol.ScmBlockLocationProtocol; import org.apache.hadoop.utils.BackgroundService; import org.apache.hadoop.utils.BackgroundTask; import org.apache.hadoop.utils.BackgroundTaskQueue; diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/ksm/VolumeManagerImpl.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/ksm/VolumeManagerImpl.java index 4561c435eb..cc2f78aa34 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/ksm/VolumeManagerImpl.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/ksm/VolumeManagerImpl.java @@ -18,7 +18,7 @@ package org.apache.hadoop.ozone.ksm; import com.google.common.base.Preconditions; import org.apache.hadoop.ozone.ksm.helpers.KsmVolumeArgs; -import org.apache.hadoop.hdsl.conf.OzoneConfiguration; +import org.apache.hadoop.hdds.conf.OzoneConfiguration; import org.apache.hadoop.ozone.ksm.exceptions.KSMException; import org.apache.hadoop.ozone.protocol.proto .KeySpaceManagerProtocolProtos.OzoneAclInfo; @@ -26,7 +26,7 @@ import org.apache.hadoop.ozone.protocol.proto .KeySpaceManagerProtocolProtos.VolumeList; import org.apache.hadoop.ozone.protocol.proto .KeySpaceManagerProtocolProtos.VolumeInfo; -import org.apache.hadoop.hdsl.protocol.proto.HdslProtos; +import org.apache.hadoop.hdds.protocol.proto.HddsProtos; import org.apache.hadoop.util.Time; import org.apache.hadoop.utils.BatchOperation; import org.slf4j.Logger; @@ -137,9 +137,9 @@ public class VolumeManagerImpl implements VolumeManager { BatchOperation batch = new BatchOperation(); // Write the vol info - List<HdslProtos.KeyValue> metadataList = new LinkedList<>(); + List<HddsProtos.KeyValue> metadataList = new LinkedList<>(); for (Map.Entry<String, String> entry : args.getKeyValueMap().entrySet()) { - metadataList.add(HdslProtos.KeyValue.newBuilder() + metadataList.add(HddsProtos.KeyValue.newBuilder() .setKey(entry.getKey()).setValue(entry.getValue()).build()); } List<OzoneAclInfo> aclList = args.getAclMap().ozoneAclGetProtobuf(); diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/protocolPB/KeySpaceManagerProtocolServerSideTranslatorPB.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/protocolPB/KeySpaceManagerProtocolServerSideTranslatorPB.java index d4bc10f455..02a4120d77 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/protocolPB/KeySpaceManagerProtocolServerSideTranslatorPB.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/protocolPB/KeySpaceManagerProtocolServerSideTranslatorPB.java @@ -98,7 +98,7 @@ import org.apache.hadoop.ozone.protocol.proto .KeySpaceManagerProtocolProtos.ServiceListRequest; import org.apache.hadoop.ozone.protocol.proto .KeySpaceManagerProtocolProtos.ServiceListResponse; -import org.apache.hadoop.hdsl.protocol.proto.HdslProtos; +import org.apache.hadoop.hdds.protocol.proto.HddsProtos; import org.slf4j.Logger; import org.slf4j.LoggerFactory; @@ -322,9 +322,9 @@ public class KeySpaceManagerProtocolServerSideTranslatorPB implements LocateKeyResponse.newBuilder(); try { KeyArgs keyArgs = request.getKeyArgs(); - HdslProtos.ReplicationType type = + HddsProtos.ReplicationType type = keyArgs.hasType()? keyArgs.getType() : null; - HdslProtos.ReplicationFactor factor = + HddsProtos.ReplicationFactor factor = keyArgs.hasFactor()? keyArgs.getFactor() : null; KsmKeyArgs ksmKeyArgs = new KsmKeyArgs.Builder() .setVolumeName(keyArgs.getVolumeName()) @@ -473,9 +473,9 @@ public class KeySpaceManagerProtocolServerSideTranslatorPB implements CommitKeyResponse.newBuilder(); try { KeyArgs keyArgs = request.getKeyArgs(); - HdslProtos.ReplicationType type = + HddsProtos.ReplicationType type = keyArgs.hasType()? keyArgs.getType() : null; - HdslProtos.ReplicationFactor factor = + HddsProtos.ReplicationFactor factor = keyArgs.hasFactor()? keyArgs.getFactor() : null; KsmKeyArgs ksmKeyArgs = new KsmKeyArgs.Builder() .setVolumeName(keyArgs.getVolumeName()) @@ -501,9 +501,9 @@ public class KeySpaceManagerProtocolServerSideTranslatorPB implements AllocateBlockResponse.newBuilder(); try { KeyArgs keyArgs = request.getKeyArgs(); - HdslProtos.ReplicationType type = + HddsProtos.ReplicationType type = keyArgs.hasType()? keyArgs.getType() : null; - HdslProtos.ReplicationFactor factor = + HddsProtos.ReplicationFactor factor = keyArgs.hasFactor()? keyArgs.getFactor() : null; KsmKeyArgs ksmKeyArgs = new KsmKeyArgs.Builder() .setVolumeName(keyArgs.getVolumeName()) diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/web/ozShell/Shell.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/web/ozShell/Shell.java index 82b3602f98..8553255d19 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/web/ozShell/Shell.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/web/ozShell/Shell.java @@ -26,7 +26,7 @@ import org.apache.commons.cli.Options; import org.apache.commons.cli.ParseException; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.conf.Configured; -import org.apache.hadoop.hdsl.conf.OzoneConfiguration; +import org.apache.hadoop.hdds.conf.OzoneConfiguration; import org.apache.hadoop.ozone.client.rest.OzoneException; import org.apache.hadoop.ozone.web.ozShell.bucket.UpdateBucketHandler; import org.apache.hadoop.ozone.web.ozShell.keys.DeleteKeyHandler; diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/ksm/TestChunkStreams.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/ksm/TestChunkStreams.java index 7f1b8ca0ab..de4a85ac46 100644 --- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/ksm/TestChunkStreams.java +++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/ksm/TestChunkStreams.java @@ -19,7 +19,7 @@ package org.apache.hadoop.ozone.ksm; import org.apache.commons.lang.RandomStringUtils; import org.apache.hadoop.ozone.client.io.ChunkGroupInputStream; import org.apache.hadoop.ozone.client.io.ChunkGroupOutputStream; -import org.apache.hadoop.scm.storage.ChunkInputStream; +import org.apache.hadoop.hdds.scm.storage.ChunkInputStream; import org.junit.Rule; import org.junit.Test; import org.junit.rules.ExpectedException; diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/ksm/TestKeySpaceManagerHttpServer.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/ksm/TestKeySpaceManagerHttpServer.java index 5e24167eb6..b263df56a1 100644 --- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/ksm/TestKeySpaceManagerHttpServer.java +++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/ksm/TestKeySpaceManagerHttpServer.java @@ -25,7 +25,7 @@ import org.apache.hadoop.hdfs.web.URLConnectionFactory; import org.apache.hadoop.http.HttpConfig; import org.apache.hadoop.http.HttpConfig.Policy; import org.apache.hadoop.net.NetUtils; -import org.apache.hadoop.scm.ScmConfigKeys; +import org.apache.hadoop.hdds.scm.ScmConfigKeys; import org.apache.hadoop.security.ssl.KeyStoreTestUtil; import org.apache.hadoop.test.GenericTestUtils; import org.junit.AfterClass; diff --git a/hadoop-ozone/pom.xml b/hadoop-ozone/pom.xml index 6878f3c93c..40c50becf8 100644 --- a/hadoop-ozone/pom.xml +++ b/hadoop-ozone/pom.xml @@ -56,32 +56,32 @@ http://maven.apache.org/xsd/maven-4.0.0.xsd"> </dependency> <dependency> <groupId>org.apache.hadoop</groupId> - <artifactId>hadoop-hdsl-common</artifactId> + <artifactId>hadoop-hdds-common</artifactId> <scope>provided</scope> </dependency> <dependency> <groupId>org.apache.hadoop</groupId> - <artifactId>hadoop-hdsl-server-framework</artifactId> + <artifactId>hadoop-hdds-server-framework</artifactId> <scope>provided</scope> </dependency> <dependency> <groupId>org.apache.hadoop</groupId> - <artifactId>hadoop-hdsl-server-scm</artifactId> + <artifactId>hadoop-hdds-server-scm</artifactId> <scope>provided</scope> </dependency> <dependency> <groupId>org.apache.hadoop</groupId> - <artifactId>hadoop-hdsl-container-service</artifactId> + <artifactId>hadoop-hdds-container-service</artifactId> <scope>provided</scope> </dependency> <dependency> <groupId>org.apache.hadoop</groupId> - <artifactId>hadoop-hdsl-client</artifactId> + <artifactId>hadoop-hdds-client</artifactId> <scope>provided</scope> </dependency> <dependency> <groupId>org.apache.hadoop</groupId> - <artifactId>hadoop-hdsl-tools</artifactId> + <artifactId>hadoop-hdds-tools</artifactId> <scope>provided</scope> </dependency> <dependency> @@ -98,13 +98,13 @@ http://maven.apache.org/xsd/maven-4.0.0.xsd"> </dependency> <dependency> <groupId>org.apache.hadoop</groupId> - <artifactId>hadoop-hdsl-server-scm</artifactId> + <artifactId>hadoop-hdds-server-scm</artifactId> <scope>test</scope> <type>test-jar</type> </dependency> <dependency> <groupId>org.apache.hadoop</groupId> - <artifactId>hadoop-hdsl-container-service</artifactId> + <artifactId>hadoop-hdds-container-service</artifactId> <scope>test</scope> <type>test-jar</type> </dependency> diff --git a/hadoop-ozone/tools/pom.xml b/hadoop-ozone/tools/pom.xml index 591960d62a..573afcff43 100644 --- a/hadoop-ozone/tools/pom.xml +++ b/hadoop-ozone/tools/pom.xml @@ -29,7 +29,7 @@ http://maven.apache.org/xsd/maven-4.0.0.xsd"> <packaging>jar</packaging> <properties> - <hadoop.component>hdsl</hadoop.component> + <hadoop.component>hdds</hadoop.component> <is.hadoop.component>true</is.hadoop.component> </properties> @@ -51,11 +51,11 @@ http://maven.apache.org/xsd/maven-4.0.0.xsd"> </dependency> <dependency> <groupId>org.apache.hadoop</groupId> - <artifactId>hadoop-hdsl-server-scm</artifactId> + <artifactId>hadoop-hdds-server-scm</artifactId> </dependency> <dependency> <groupId>org.apache.hadoop</groupId> - <artifactId>hadoop-hdsl-common</artifactId> + <artifactId>hadoop-hdds-common</artifactId> </dependency> <dependency> <groupId>org.openjdk.jmh</groupId> diff --git a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/freon/Freon.java b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/freon/Freon.java index adcff4f7b3..d933e6f2e7 100644 --- a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/freon/Freon.java +++ b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/freon/Freon.java @@ -34,8 +34,11 @@ import org.apache.commons.lang.RandomStringUtils; import org.apache.commons.lang.time.DurationFormatUtils; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.conf.Configured; +import org.apache.hadoop.hdds.client.OzoneQuota; +import org.apache.hadoop.hdds.client.ReplicationFactor; +import org.apache.hadoop.hdds.client.ReplicationType; import org.apache.hadoop.hdfs.DFSUtil; -import org.apache.hadoop.hdsl.conf.OzoneConfiguration; +import org.apache.hadoop.hdds.conf.OzoneConfiguration; import org.apache.hadoop.ozone.OzoneConsts; import org.apache.hadoop.ozone.client.*; import org.apache.hadoop.ozone.client.io.OzoneInputStream; diff --git a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/genesis/BenchMarkContainerStateMap.java b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/genesis/BenchMarkContainerStateMap.java index c20fdeb202..8a6e6347ed 100644 --- a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/genesis/BenchMarkContainerStateMap.java +++ b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/genesis/BenchMarkContainerStateMap.java @@ -19,12 +19,14 @@ package org.apache.hadoop.ozone.genesis; import com.google.common.base.Preconditions; -import org.apache.hadoop.hdsl.protocol.DatanodeDetails; -import org.apache.hadoop.ozone.scm.container.ContainerStates.ContainerStateMap; -import org.apache.hadoop.ozone.scm.exceptions.SCMException; -import org.apache.hadoop.scm.container.common.helpers.ContainerInfo; -import org.apache.hadoop.scm.container.common.helpers.Pipeline; -import org.apache.hadoop.scm.container.common.helpers.PipelineChannel; +import org.apache.hadoop.hdds.protocol.proto.HddsProtos.ReplicationFactor; +import org.apache.hadoop.hdds.protocol.proto.HddsProtos.ReplicationType; +import org.apache.hadoop.hdds.protocol.DatanodeDetails; +import org.apache.hadoop.hdds.scm.container.common.helpers.ContainerInfo; +import org.apache.hadoop.hdds.scm.container.common.helpers.Pipeline; +import org.apache.hadoop.hdds.scm.container.common.helpers.PipelineChannel; +import org.apache.hadoop.hdds.scm.container.states.ContainerStateMap; +import org.apache.hadoop.hdds.scm.exceptions.SCMException; import org.apache.hadoop.util.Time; import org.openjdk.jmh.annotations.Benchmark; import org.openjdk.jmh.annotations.Level; @@ -40,10 +42,10 @@ import java.util.ArrayList; import java.util.Iterator; import java.util.Objects; import java.util.concurrent.atomic.AtomicInteger; -import static org.apache.hadoop.hdsl.protocol.proto.HdslProtos.LifeCycleState.OPEN; -import static org.apache.hadoop.hdsl.protocol.proto.HdslProtos.LifeCycleState.CLOSED; -import static org.apache.hadoop.hdsl.protocol.proto.HdslProtos.ReplicationType; -import static org.apache.hadoop.hdsl.protocol.proto.HdslProtos.ReplicationFactor; +import static org.apache.hadoop.hdds.protocol.proto.HddsProtos.LifeCycleState.OPEN; +import static org.apache.hadoop.hdds.protocol.proto.HddsProtos.LifeCycleState.CLOSED; +import static org.apache.hadoop.hdds.protocol.proto.HddsProtos.ReplicationType; +import static org.apache.hadoop.hdds.protocol.proto.HddsProtos.ReplicationFactor; @State(Scope.Thread) public class BenchMarkContainerStateMap { diff --git a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/genesis/BenchMarkDatanodeDispatcher.java b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/genesis/BenchMarkDatanodeDispatcher.java index dfd33f7bbb..93b22f9722 100644 --- a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/genesis/BenchMarkDatanodeDispatcher.java +++ b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/genesis/BenchMarkDatanodeDispatcher.java @@ -6,16 +6,16 @@ import org.apache.commons.io.FileUtils; import org.apache.commons.lang.RandomStringUtils; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hdfs.server.datanode.StorageLocation; -import org.apache.hadoop.hdsl.conf.OzoneConfiguration; -import org.apache.hadoop.hdsl.protocol.DatanodeDetails; +import org.apache.hadoop.hdds.conf.OzoneConfiguration; +import org.apache.hadoop.hdds.protocol.DatanodeDetails; import org.apache.hadoop.ozone.container.common.impl.ChunkManagerImpl; import org.apache.hadoop.ozone.container.common.impl.ContainerManagerImpl; import org.apache.hadoop.ozone.container.common.impl.Dispatcher; import org.apache.hadoop.ozone.container.common.impl.KeyManagerImpl; import org.apache.hadoop.ozone.container.common.interfaces.ContainerManager; -import org.apache.hadoop.scm.container.common.helpers.Pipeline; -import org.apache.hadoop.scm.container.common.helpers.PipelineChannel; +import org.apache.hadoop.hdds.scm.container.common.helpers.Pipeline; +import org.apache.hadoop.hdds.scm.container.common.helpers.PipelineChannel; import org.openjdk.jmh.annotations.Benchmark; import org.openjdk.jmh.annotations.Level; import org.openjdk.jmh.annotations.Scope; @@ -33,18 +33,18 @@ import java.util.concurrent.atomic.AtomicInteger; import static org.apache.hadoop.ozone.OzoneConsts.CONTAINER_ROOT_PREFIX; -import org.apache.hadoop.hdsl.protocol.proto.HdslProtos.LifeCycleState; -import org.apache.hadoop.hdsl.protocol.proto.ContainerProtos.ContainerCommandRequestProto; -import org.apache.hadoop.hdsl.protocol.proto.ContainerProtos.CreateContainerRequestProto; -import org.apache.hadoop.hdsl.protocol.proto.ContainerProtos.ReadChunkRequestProto; -import org.apache.hadoop.hdsl.protocol.proto.ContainerProtos.WriteChunkRequestProto; -import org.apache.hadoop.hdsl.protocol.proto.ContainerProtos.PutKeyRequestProto; -import org.apache.hadoop.hdsl.protocol.proto.ContainerProtos.GetKeyRequestProto; -import org.apache.hadoop.hdsl.protocol.proto.ContainerProtos.ContainerData; - -import org.apache.hadoop.hdsl.protocol.proto.ContainerProtos; -import org.apache.hadoop.hdsl.protocol.proto.HdslProtos.ReplicationType; -import org.apache.hadoop.hdsl.protocol.proto.HdslProtos.ReplicationFactor; +import org.apache.hadoop.hdds.protocol.proto.HddsProtos.LifeCycleState; +import org.apache.hadoop.hdds.protocol.proto.ContainerProtos.ContainerCommandRequestProto; +import org.apache.hadoop.hdds.protocol.proto.ContainerProtos.CreateContainerRequestProto; +import org.apache.hadoop.hdds.protocol.proto.ContainerProtos.ReadChunkRequestProto; +import org.apache.hadoop.hdds.protocol.proto.ContainerProtos.WriteChunkRequestProto; +import org.apache.hadoop.hdds.protocol.proto.ContainerProtos.PutKeyRequestProto; +import org.apache.hadoop.hdds.protocol.proto.ContainerProtos.GetKeyRequestProto; +import org.apache.hadoop.hdds.protocol.proto.ContainerProtos.ContainerData; + +import org.apache.hadoop.hdds.protocol.proto.ContainerProtos; +import org.apache.hadoop.hdds.protocol.proto.HddsProtos.ReplicationType; +import org.apache.hadoop.hdds.protocol.proto.HddsProtos.ReplicationFactor; @State(Scope.Benchmark) public class BenchMarkDatanodeDispatcher { diff --git a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/genesis/Genesis.java b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/genesis/Genesis.java index 7c3ef3c05b..e77cb2276c 100644 --- a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/genesis/Genesis.java +++ b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/genesis/Genesis.java @@ -24,7 +24,7 @@ import org.openjdk.jmh.runner.options.Options; import org.openjdk.jmh.runner.options.OptionsBuilder; /** - * Main class that executes a set of HDSL/Ozone benchmarks. + * Main class that executes a set of HDDS/Ozone benchmarks. * We purposefully don't use the runner and tools classes from Hadoop. * There are some name collisions with OpenJDK JMH package. * <p> diff --git a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/genesis/GenesisUtil.java b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/genesis/GenesisUtil.java index 78b579d994..199ead58ea 100644 --- a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/genesis/GenesisUtil.java +++ b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/genesis/GenesisUtil.java @@ -3,7 +3,7 @@ package org.apache.hadoop.ozone.genesis; import org.apache.commons.lang.RandomStringUtils; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.conf.StorageUnit; -import org.apache.hadoop.hdsl.protocol.DatanodeDetails; +import org.apache.hadoop.hdds.protocol.DatanodeDetails; import org.apache.hadoop.utils.MetadataStore; import org.apache.hadoop.utils.MetadataStoreBuilder; diff --git a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/scm/cli/SQLCLI.java b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/scm/cli/SQLCLI.java index bd0c2ccfe1..c9b44e318d 100644 --- a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/scm/cli/SQLCLI.java +++ b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/scm/cli/SQLCLI.java @@ -15,7 +15,7 @@ * See the License for the specific language governing permissions and * limitations under the License. */ -package org.apache.hadoop.ozone.scm.cli; +package org.apache.hadoop.hdds.scm.cli; import com.google.common.base.Preconditions; import org.apache.commons.cli.BasicParser; @@ -28,16 +28,16 @@ import org.apache.commons.cli.ParseException; import org.apache.hadoop.conf.Configured; import org.apache.hadoop.hdfs.DFSUtil; import org.apache.hadoop.hdfs.DFSUtilClient; -import org.apache.hadoop.hdsl.conf.OzoneConfiguration; -import org.apache.hadoop.hdsl.protocol.DatanodeDetails; +import org.apache.hadoop.hdds.conf.OzoneConfiguration; +import org.apache.hadoop.hdds.protocol.DatanodeDetails; import org.apache.hadoop.ozone.protocol.proto.KeySpaceManagerProtocolProtos.OzoneAclInfo; import org.apache.hadoop.ozone.protocol.proto.KeySpaceManagerProtocolProtos.BucketInfo; import org.apache.hadoop.ozone.protocol.proto.KeySpaceManagerProtocolProtos.KeyInfo; import org.apache.hadoop.ozone.protocol.proto.KeySpaceManagerProtocolProtos.VolumeInfo; import org.apache.hadoop.ozone.protocol.proto.KeySpaceManagerProtocolProtos.VolumeList; -import org.apache.hadoop.hdsl.protocol.proto.HdslProtos; -import org.apache.hadoop.hdsl.protocol.proto.HdslProtos.Pipeline; -import org.apache.hadoop.scm.container.common.helpers.ContainerInfo; +import org.apache.hadoop.hdds.protocol.proto.HddsProtos; +import org.apache.hadoop.hdds.protocol.proto.HddsProtos.Pipeline; +import org.apache.hadoop.hdds.scm.container.common.helpers.ContainerInfo; import org.apache.hadoop.util.Tool; import org.apache.hadoop.util.ToolRunner; import org.apache.hadoop.utils.MetadataStore; @@ -503,7 +503,7 @@ public class SQLCLI extends Configured implements Tool { String containerName = new String(key, encoding); ContainerInfo containerInfo = null; containerInfo = ContainerInfo.fromProtobuf( - HdslProtos.SCMContainerInfo.PARSER.parseFrom(value)); + HddsProtos.SCMContainerInfo.PARSER.parseFrom(value)); Preconditions.checkNotNull(containerInfo); try { //TODO: include container state to sqllite schema @@ -533,7 +533,7 @@ public class SQLCLI extends Configured implements Tool { pipeline.getPipelineChannel().getLeaderID()); executeSQL(conn, insertContainerInfo); - for (HdslProtos.DatanodeDetailsProto dd : + for (HddsProtos.DatanodeDetailsProto dd : pipeline.getPipelineChannel().getMembersList()) { String uuid = dd.getUuid(); if (!uuidChecked.contains(uuid)) { @@ -629,7 +629,7 @@ public class SQLCLI extends Configured implements Tool { dbStore.iterate(null, (key, value) -> { DatanodeDetails nodeId = DatanodeDetails - .getFromProtoBuf(HdslProtos.DatanodeDetailsProto + .getFromProtoBuf(HddsProtos.DatanodeDetailsProto .PARSER.parseFrom(key)); String blockPool = DFSUtil.bytes2String(value); try { diff --git a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/scm/cli/package-info.java b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/scm/cli/package-info.java index 4c38ae0bd2..fbef0d2ec6 100644 --- a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/scm/cli/package-info.java +++ b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/scm/cli/package-info.java @@ -15,7 +15,7 @@ * See the License for the specific language governing permissions and * limitations under the License. */ -package org.apache.hadoop.ozone.scm.cli; +package org.apache.hadoop.hdds.scm.cli; /** * Command line helpers for scm management. diff --git a/hadoop-project/pom.xml b/hadoop-project/pom.xml index e5cd338bbe..6e94e01056 100644 --- a/hadoop-project/pom.xml +++ b/hadoop-project/pom.xml @@ -569,18 +569,18 @@ <dependency> <groupId>org.apache.hadoop</groupId> - <artifactId>hadoop-hdsl-common</artifactId> + <artifactId>hadoop-hdds-common</artifactId> <version>${project.version}</version> </dependency> <dependency> <groupId>org.apache.hadoop</groupId> - <artifactId>hadoop-hdsl-client</artifactId> + <artifactId>hadoop-hdds-client</artifactId> <version>${project.version}</version> </dependency> <dependency> <groupId>org.apache.hadoop</groupId> - <artifactId>hadoop-hdsl-tools</artifactId> + <artifactId>hadoop-hdds-tools</artifactId> <version>${project.version}</version> </dependency> <dependency> @@ -610,31 +610,31 @@ <dependency> <groupId>org.apache.hadoop</groupId> - <artifactId>hadoop-hdsl-server-framework</artifactId> + <artifactId>hadoop-hdds-server-framework</artifactId> <version>${project.version}</version> </dependency> <dependency> <groupId>org.apache.hadoop</groupId> - <artifactId>hadoop-hdsl-server-scm</artifactId> + <artifactId>hadoop-hdds-server-scm</artifactId> <version>${project.version}</version> </dependency> <dependency> <groupId>org.apache.hadoop</groupId> - <artifactId>hadoop-hdsl-container-service</artifactId> + <artifactId>hadoop-hdds-container-service</artifactId> <version>${project.version}</version> </dependency> <dependency> <groupId>org.apache.hadoop</groupId> - <artifactId>hadoop-hdsl-container-service</artifactId> + <artifactId>hadoop-hdds-container-service</artifactId> <version>${project.version}</version> <type>test-jar</type> </dependency> <dependency> <groupId>org.apache.hadoop</groupId> - <artifactId>hadoop-hdsl-server-scm</artifactId> + <artifactId>hadoop-hdds-server-scm</artifactId> <type>test-jar</type> <version>${project.version}</version> </dependency> diff --git a/hadoop-tools/hadoop-ozone/pom.xml b/hadoop-tools/hadoop-ozone/pom.xml index a58b8755a9..2464c32c1e 100644 --- a/hadoop-tools/hadoop-ozone/pom.xml +++ b/hadoop-tools/hadoop-ozone/pom.xml @@ -82,7 +82,7 @@ </dependency> <dependency> <groupId>org.apache.hadoop</groupId> - <artifactId>hadoop-hdsl-common</artifactId> + <artifactId>hadoop-hdds-common</artifactId> <scope>provided</scope> </dependency> <dependency> diff --git a/hadoop-tools/hadoop-ozone/src/main/java/org/apache/hadoop/fs/ozone/OzoneFileSystem.java b/hadoop-tools/hadoop-ozone/src/main/java/org/apache/hadoop/fs/ozone/OzoneFileSystem.java index 081d50d331..c2a2fe2c06 100644 --- a/hadoop-tools/hadoop-ozone/src/main/java/org/apache/hadoop/fs/ozone/OzoneFileSystem.java +++ b/hadoop-tools/hadoop-ozone/src/main/java/org/apache/hadoop/fs/ozone/OzoneFileSystem.java @@ -50,8 +50,8 @@ import org.apache.hadoop.ozone.client.OzoneClientFactory; import org.apache.hadoop.ozone.OzoneConfigKeys; import org.apache.hadoop.ozone.client.OzoneKey; import org.apache.hadoop.ozone.client.OzoneVolume; -import org.apache.hadoop.ozone.client.ReplicationFactor; -import org.apache.hadoop.ozone.client.ReplicationType; +import org.apache.hadoop.hdds.client.ReplicationFactor; +import org.apache.hadoop.hdds.client.ReplicationType; import org.apache.http.client.utils.URIBuilder; import org.apache.commons.lang.StringUtils; import org.apache.hadoop.classification.InterfaceAudience; diff --git a/hadoop-tools/hadoop-ozone/src/test/java/org/apache/hadoop/fs/ozone/TestOzoneFSInputStream.java b/hadoop-tools/hadoop-ozone/src/test/java/org/apache/hadoop/fs/ozone/TestOzoneFSInputStream.java index 848ddff02a..6be9f919cc 100644 --- a/hadoop-tools/hadoop-ozone/src/test/java/org/apache/hadoop/fs/ozone/TestOzoneFSInputStream.java +++ b/hadoop-tools/hadoop-ozone/src/test/java/org/apache/hadoop/fs/ozone/TestOzoneFSInputStream.java @@ -27,7 +27,7 @@ import org.apache.hadoop.fs.CommonConfigurationKeysPublic; import org.apache.hadoop.hdfs.DFSUtil; import org.apache.hadoop.hdfs.server.datanode.DataNode; import org.apache.hadoop.hdfs.server.datanode.ObjectStoreHandler; -import org.apache.hadoop.hdsl.conf.OzoneConfiguration; +import org.apache.hadoop.hdds.conf.OzoneConfiguration; import org.apache.hadoop.ozone.MiniOzoneClassicCluster; import org.apache.hadoop.ozone.OzoneConfigKeys; import org.apache.hadoop.ozone.OzoneConsts; diff --git a/hadoop-tools/hadoop-ozone/src/todo/java/org/apache/hadoop/fs/ozone/contract/OzoneContract.java b/hadoop-tools/hadoop-ozone/src/todo/java/org/apache/hadoop/fs/ozone/contract/OzoneContract.java index 85120c38dd..97ec3f4614 100644 --- a/hadoop-tools/hadoop-ozone/src/todo/java/org/apache/hadoop/fs/ozone/contract/OzoneContract.java +++ b/hadoop-tools/hadoop-ozone/src/todo/java/org/apache/hadoop/fs/ozone/contract/OzoneContract.java @@ -36,7 +36,7 @@ import org.apache.hadoop.ozone.web.handlers.VolumeArgs; import org.apache.hadoop.ozone.web.interfaces.StorageHandler; import org.apache.hadoop.ozone.web.utils.OzoneUtils; import org.apache.hadoop.ozone.ksm.KSMConfigKeys; -import org.apache.hadoop.scm.ScmConfigKeys; +import org.apache.hadoop.hdds.scm.ScmConfigKeys; import org.junit.Assert; import java.io.IOException; diff --git a/hadoop-tools/hadoop-tools-dist/pom.xml b/hadoop-tools/hadoop-tools-dist/pom.xml index 83ded3ee5e..21cc7cef8c 100644 --- a/hadoop-tools/hadoop-tools-dist/pom.xml +++ b/hadoop-tools/hadoop-tools-dist/pom.xml @@ -194,7 +194,7 @@ </profile> <profile> - <id>hdsl</id> + <id>hdds</id> <activation> <activeByDefault>false</activeByDefault> </activation> diff --git a/hadoop-tools/pom.xml b/hadoop-tools/pom.xml index 7d99762f18..f421e580ba 100644 --- a/hadoop-tools/pom.xml +++ b/hadoop-tools/pom.xml @@ -69,7 +69,7 @@ </build> <profiles> <profile> - <id>hdsl</id> + <id>hdds</id> <activation> <activeByDefault>false</activeByDefault> </activation> @@ -386,7 +386,7 @@ xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/xs <exclude>**/build/**</exclude> <exclude>**/patchprocess/**</exclude> <exclude>**/*.js</exclude> - <exclude>hadoop-hdsl/**/nvd3-*</exclude> + <exclude>hadoop-hdds/**/nvd3-*</exclude> </excludes> </configuration> </plugin> @@ -589,7 +589,7 @@ xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/xs </build> </profile> <profile> - <id>hdsl-src</id> + <id>hdds-src</id> <activation> <activeByDefault>false</activeByDefault> </activation> @@ -609,12 +609,12 @@ xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/xs <configuration> <appendAssemblyId>false</appendAssemblyId> <attach>false</attach> - <finalName>hadoop-${project.version}-src-with-hdsl</finalName> + <finalName>hadoop-${project.version}-src-with-hdds</finalName> <outputDirectory>hadoop-dist/target</outputDirectory> <!-- Not using descriptorRef and hadoop-assembly dependency --> <!-- to avoid making hadoop-main to depend on a module --> <descriptors> - <descriptor>hadoop-assemblies/src/main/resources/assemblies/hadoop-src-with-hdsl.xml</descriptor> + <descriptor>hadoop-assemblies/src/main/resources/assemblies/hadoop-src-with-hdds.xml</descriptor> </descriptors> </configuration> </execution> @@ -634,7 +634,7 @@ xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/xs <configuration> <target> <echo/> - <echo>Hadoop source tar (including HDSL) available at: ${basedir}/hadoop-dist/target/hadoop-${project.version}-src-with-hdsl.tar.gz</echo> + <echo>Hadoop source tar (including HDDS) available at: ${basedir}/hadoop-dist/target/hadoop-${project.version}-src-with-hdds.tar.gz</echo> <echo/> </target> </configuration> @@ -741,14 +741,14 @@ xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/xs </build> </profile> <profile> - <id>hdsl</id> + <id>hdds</id> <activation> <activeByDefault>false</activeByDefault> </activation> <modules> <module>hadoop-ozone</module> <module>hadoop-cblock</module> - <module>hadoop-hdsl</module> + <module>hadoop-hdds</module> <module>hadoop-ozone/acceptance-test</module> </modules> </profile> |