summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorMukul Kumar Singh <msingh@apache.org>2018-04-12 13:46:52 +0530
committerOwen O'Malley <omalley@apache.org>2018-04-26 05:36:04 -0700
commit8cc13ff962dd9da52334b28952f74850f96b9857 (patch)
tree2a55aad36dd82c0a4ff4c4baea54f98a786dbb73
parentec6c8742e58809a649752692252555ffcfa70801 (diff)
HDFS-13425. Ozone: Clean-up of ozone related change from hadoop-common-project. Contributed by Lokesh Jain.
-rw-r--r--hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/Time.java9
-rw-r--r--hadoop-hdds/common/src/test/java/org/apache/hadoop/hdds/scm/TestArchive.java114
-rw-r--r--hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/replication/ContainerSupervisor.java11
3 files changed, 10 insertions, 124 deletions
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/Time.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/Time.java
index e099c8fecb..db5a56730d 100644
--- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/Time.java
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/Time.java
@@ -17,7 +17,6 @@
*/
package org.apache.hadoop.util;
-import java.text.ParseException;
import java.text.SimpleDateFormat;
import org.apache.hadoop.classification.InterfaceAudience;
@@ -83,12 +82,4 @@ public final class Time {
public static String formatTime(long millis) {
return DATE_FORMAT.get().format(millis);
}
-
- /**
- * Convert time in human readable format to millisecond.
- * @return time in milliseconds
- */
- public static long formatDate(String date) throws ParseException {
- return DATE_FORMAT.get().parse(date).getTime();
- }
}
diff --git a/hadoop-hdds/common/src/test/java/org/apache/hadoop/hdds/scm/TestArchive.java b/hadoop-hdds/common/src/test/java/org/apache/hadoop/hdds/scm/TestArchive.java
deleted file mode 100644
index f53f770fcc..0000000000
--- a/hadoop-hdds/common/src/test/java/org/apache/hadoop/hdds/scm/TestArchive.java
+++ /dev/null
@@ -1,114 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements. See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership. The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License. You may obtain a copy of the License at
- * <p>
- * http://www.apache.org/licenses/LICENSE-2.0
- * <p>
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.hdds.scm;
-
-import org.apache.commons.io.FileUtils;
-import org.apache.commons.lang3.RandomUtils;
-import org.apache.hadoop.fs.FileUtil;
-import org.junit.Assert;
-import org.junit.Before;
-import org.junit.Rule;
-import org.junit.Test;
-import org.junit.rules.TemporaryFolder;
-
-import java.io.File;
-import java.io.IOException;
-import java.nio.file.Files;
-import java.nio.file.Path;
-import java.nio.file.Paths;
-import java.util.Iterator;
-import java.util.Random;
-import java.util.zip.Adler32;
-import java.util.zip.Checksum;
-
-/**
- * Test archive creation and unpacking.
- */
-public class TestArchive {
- private static final int DIR_COUNT = 10;
- private static final int SUB_DIR_COUNT = 3;
- private static final int FILE_COUNT = 10;
- private long checksumWrite = 0L;
- private long checksumRead = 0L;
- private long tmp = 0L;
- private Checksum crc = new Adler32();
-
- @Rule
- public TemporaryFolder folder = new TemporaryFolder();
-
- @Rule
- public TemporaryFolder outputFolder = new TemporaryFolder();
-
-
- @Before
- public void setUp() throws Exception {
- Random r = new Random();
- final int megaByte = 1024 * 1024;
-
- for (int x = 0; x < DIR_COUNT; x++) {
- File subdir = folder.newFolder(String.format("dir%d", x));
- for (int y = 0; y < SUB_DIR_COUNT; y++) {
- File targetDir = new File(subdir.getPath().concat(File.separator)
- .concat(String.format("subdir%d%d", x, y)));
- if(!targetDir.mkdirs()) {
- throw new IOException("Failed to create subdirectory. " +
- targetDir.toString());
- }
- for (int z = 0; z < FILE_COUNT; z++) {
- Path temp = Paths.get(targetDir.getPath().concat(File.separator)
- .concat(String.format("File%d.txt", z)));
- byte[] buf = RandomUtils.nextBytes(r.nextInt(megaByte));
- Files.write(temp, buf);
- crc.reset();
- crc.update(buf, 0, buf.length);
- tmp = crc.getValue();
- checksumWrite +=tmp;
- }
- }
- }
- }
-
- @Test
- public void testArchive() throws Exception {
- File archiveFile = new File(outputFolder.getRoot() + File.separator
- + "test.container.zip");
- long zipCheckSum = FileUtil.zip(folder.getRoot(), archiveFile);
- Assert.assertTrue(zipCheckSum > 0);
- File decomp = new File(outputFolder.getRoot() + File.separator +
- "decompress");
- if (!decomp.exists() && !decomp.mkdirs()) {
- throw new IOException("Unable to create the destination directory. " +
- decomp.getPath());
- }
-
- FileUtil.unZip(archiveFile, decomp);
- String[] patterns = {"txt"};
- Iterator<File> iter = FileUtils.iterateFiles(decomp, patterns, true);
- int count = 0;
- while (iter.hasNext()) {
- count++;
- byte[] buf = Files.readAllBytes(iter.next().toPath());
- crc.reset();
- crc.update(buf, 0, buf.length);
- tmp = crc.getValue();
- checksumRead += tmp;
- }
- Assert.assertEquals(DIR_COUNT * SUB_DIR_COUNT * FILE_COUNT, count);
- Assert.assertEquals(checksumWrite, checksumRead);
- }
-}
diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/replication/ContainerSupervisor.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/replication/ContainerSupervisor.java
index 52321eee4a..c14303fc8e 100644
--- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/replication/ContainerSupervisor.java
+++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/replication/ContainerSupervisor.java
@@ -28,6 +28,7 @@ import org.apache.hadoop.hdds.protocol.proto
.StorageContainerDatanodeProtocolProtos.ContainerReportsRequestProto;
import org.apache.hadoop.util.Time;
import org.apache.hadoop.util.concurrent.HadoopExecutors;
+import org.apache.hadoop.util.concurrent.HadoopThreadPoolExecutor;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
@@ -39,6 +40,8 @@ import java.util.List;
import java.util.PriorityQueue;
import java.util.Set;
import java.util.concurrent.ExecutorService;
+import java.util.concurrent.LinkedBlockingQueue;
+import java.util.concurrent.ThreadFactory;
import java.util.concurrent.TimeUnit;
import java.util.concurrent.atomic.AtomicBoolean;
import java.util.concurrent.atomic.AtomicInteger;
@@ -129,7 +132,7 @@ public class ContainerSupervisor implements Closeable {
this.poolQueue = new PriorityQueue<>();
this.runnable = new AtomicBoolean(true);
this.threadFaultCount = new AtomicInteger(0);
- this.executorService = HadoopExecutors.newCachedThreadPool(
+ this.executorService = newCachedThreadPool(
new ThreadFactoryBuilder().setDaemon(true)
.setNameFormat("Container Reports Processing Thread - %d")
.build(), maxContainerReportThreads);
@@ -139,6 +142,12 @@ public class ContainerSupervisor implements Closeable {
initPoolProcessThread();
}
+ private ExecutorService newCachedThreadPool(ThreadFactory threadFactory,
+ int maxThreads) {
+ return new HadoopThreadPoolExecutor(0, maxThreads, 60L, TimeUnit.SECONDS,
+ new LinkedBlockingQueue<>(), threadFactory);
+ }
+
/**
* Returns the number of pools that are under process right now.
* @return int - Number of pools that are in process.