aboutsummaryrefslogtreecommitdiff
path: root/bigtop-tests/smoke-tests/odpi-runtime/src/test/java/org/odpi/specs/runtime/hive/TestThrift.java
diff options
context:
space:
mode:
Diffstat (limited to 'bigtop-tests/smoke-tests/odpi-runtime/src/test/java/org/odpi/specs/runtime/hive/TestThrift.java')
-rw-r--r--bigtop-tests/smoke-tests/odpi-runtime/src/test/java/org/odpi/specs/runtime/hive/TestThrift.java396
1 files changed, 198 insertions, 198 deletions
diff --git a/bigtop-tests/smoke-tests/odpi-runtime/src/test/java/org/odpi/specs/runtime/hive/TestThrift.java b/bigtop-tests/smoke-tests/odpi-runtime/src/test/java/org/odpi/specs/runtime/hive/TestThrift.java
index f54b7e5a..8139eff0 100644
--- a/bigtop-tests/smoke-tests/odpi-runtime/src/test/java/org/odpi/specs/runtime/hive/TestThrift.java
+++ b/bigtop-tests/smoke-tests/odpi-runtime/src/test/java/org/odpi/specs/runtime/hive/TestThrift.java
@@ -45,207 +45,207 @@ import java.util.Random;
public class TestThrift {
- private static final Log LOG = LogFactory.getLog(TestThrift.class.getName());
+ private static final Log LOG = LogFactory.getLog(TestThrift.class.getName());
+
+ private static IMetaStoreClient client = null;
+ private static HiveConf conf;
+
+ private Random rand;
+
+ @BeforeClass
+ public static void connect() throws MetaException {
+ if (JdbcConnector.testActive(JdbcConnector.TEST_THRIFT, "Test Thrift ")) {
+ String url = JdbcConnector.getProperty(JdbcConnector.METASTORE_URL, "Thrift metastore URL");
+ conf = new HiveConf();
+ conf.setVar(HiveConf.ConfVars.METASTOREURIS, url);
+ LOG.info("Set to test against metastore at " + url);
+ client = new HiveMetaStoreClient(conf);
+ }
+ }
+
+ @Before
+ public void checkIfActive() {
+ Assume.assumeTrue(JdbcConnector.testActive(JdbcConnector.TEST_THRIFT, "Test Thrift "));
+ rand = new Random();
+ }
+
+ @Test
+ public void db() throws TException {
+ final String dbName = "bigtop_thrift_db_" + rand.nextInt(Integer.MAX_VALUE);
+
+ Database db = new Database(dbName, "a db", null, new HashMap<String, String>());
+ client.createDatabase(db);
+ db = client.getDatabase(dbName);
+ Assert.assertNotNull(db);
+ db = new Database(db);
+ db.getParameters().put("a", "b");
+ client.alterDatabase(dbName, db);
+ List<String> alldbs = client.getDatabases("bigtop_*");
+ Assert.assertNotNull(alldbs);
+ Assert.assertTrue(alldbs.size() > 0);
+ alldbs = client.getAllDatabases();
+ Assert.assertNotNull(alldbs);
+ Assert.assertTrue(alldbs.size() > 0);
+ client.dropDatabase(dbName, true, true);
+ }
+
+ // Not testing types calls, as they aren't used AFAIK
+
+ @Test
+ public void nonPartitionedTable() throws TException {
+ final String tableName = "bigtop_thrift_table_" + rand.nextInt(Integer.MAX_VALUE);
+
+ // I don't test every operation related to tables, but only those that are frequently used.
+ SerDeInfo serde = new SerDeInfo("default_serde",
+ conf.getVar(HiveConf.ConfVars.HIVEDEFAULTSERDE), new HashMap<String, String>());
+ FieldSchema fs = new FieldSchema("a", "int", "no comment");
+ StorageDescriptor sd = new StorageDescriptor(Collections.singletonList(fs), null,
+ conf.getVar(HiveConf.ConfVars.HIVEDEFAULTFILEFORMAT),
+ conf.getVar(HiveConf.ConfVars.HIVEDEFAULTFILEFORMAT), false, 0, serde, null, null,
+ new HashMap<String, String>());
+ Table table = new Table(tableName, "default", "me", 0, 0, 0, sd, null,
+ new HashMap<String, String>(), null, null, TableType.MANAGED_TABLE.toString());
+ client.createTable(table);
+
+ table = client.getTable("default", tableName);
+ Assert.assertNotNull(table);
+
+ List<Table> tables =
+ client.getTableObjectsByName("default", Collections.singletonList(tableName));
+ Assert.assertNotNull(tables);
+ Assert.assertEquals(1, tables.size());
+
+ List<String> tableNames = client.getTables("default", "bigtop_*");
+ Assert.assertNotNull(tableNames);
+ Assert.assertTrue(tableNames.size() >= 1);
+
+ tableNames = client.getAllTables("default");
+ Assert.assertNotNull(tableNames);
+ Assert.assertTrue(tableNames.size() >= 1);
- private static IMetaStoreClient client = null;
- private static HiveConf conf;
+ List<FieldSchema> cols = client.getFields("default", tableName);
+ Assert.assertNotNull(cols);
+ Assert.assertEquals(1, cols.size());
- private Random rand;
+ cols = client.getSchema("default", tableName);
+ Assert.assertNotNull(cols);
+ Assert.assertEquals(1, cols.size());
- @BeforeClass
- public static void connect() throws MetaException {
- if (JdbcConnector.testActive(JdbcConnector.TEST_THRIFT, "Test Thrift ")) {
- String url = JdbcConnector.getProperty(JdbcConnector.METASTORE_URL, "Thrift metastore URL");
- conf = new HiveConf();
- conf.setVar(HiveConf.ConfVars.METASTOREURIS, url);
- LOG.info("Set to test against metastore at " + url);
- client = new HiveMetaStoreClient(conf);
+ table = new Table(table);
+ table.getParameters().put("a", "b");
+ client.alter_table("default", tableName, table, false);
+
+ table.getParameters().put("c", "d");
+ client.alter_table("default", tableName, table);
+
+ client.dropTable("default", tableName, true, false);
+ }
+
+ @Test
+ public void partitionedTable() throws TException {
+ final String tableName = "bigtop_thrift_partitioned_table_" + rand.nextInt(Integer.MAX_VALUE);
+
+ // I don't test every operation related to tables, but only those that are frequently used.
+ SerDeInfo serde = new SerDeInfo("default_serde",
+ conf.getVar(HiveConf.ConfVars.HIVEDEFAULTSERDE), new HashMap<String, String>());
+ FieldSchema fs = new FieldSchema("a", "int", "no comment");
+ StorageDescriptor sd = new StorageDescriptor(Collections.singletonList(fs), null,
+ conf.getVar(HiveConf.ConfVars.HIVEDEFAULTFILEFORMAT),
+ conf.getVar(HiveConf.ConfVars.HIVEDEFAULTFILEFORMAT), false, 0, serde, null, null,
+ new HashMap<String, String>());
+ FieldSchema pk = new FieldSchema("pk", "string", "");
+ Table table = new Table(tableName, "default", "me", 0, 0, 0, sd, Collections.singletonList(pk),
+ new HashMap<String, String>(), null, null, TableType.MANAGED_TABLE.toString());
+ client.createTable(table);
+
+ sd = new StorageDescriptor(Collections.singletonList(fs), null,
+ conf.getVar(HiveConf.ConfVars.HIVEDEFAULTSERDE),
+ conf.getVar(HiveConf.ConfVars.HIVEDEFAULTSERDE), false, 0, serde, null, null,
+ new HashMap<String, String>());
+ Partition partition = new Partition(Collections.singletonList("x"), "default", tableName, 0,
+ 0, sd, new HashMap<String, String>());
+ client.add_partition(partition);
+
+ List<Partition> partitions = new ArrayList<>(2);
+ sd = new StorageDescriptor(Collections.singletonList(fs), null,
+ conf.getVar(HiveConf.ConfVars.HIVEDEFAULTSERDE),
+ conf.getVar(HiveConf.ConfVars.HIVEDEFAULTSERDE), false, 0, serde, null, null,
+ new HashMap<String, String>());
+ partitions.add(new Partition(Collections.singletonList("y"), "default", tableName, 0,
+ 0, sd, new HashMap<String, String>()));
+ sd = new StorageDescriptor(Collections.singletonList(fs), null,
+ conf.getVar(HiveConf.ConfVars.HIVEDEFAULTSERDE),
+ conf.getVar(HiveConf.ConfVars.HIVEDEFAULTSERDE), false, 0, serde, null, null,
+ new HashMap<String, String>());
+ partitions.add(new Partition(Collections.singletonList("z"), "default", tableName, 0,
+ 0, sd, new HashMap<String, String>()));
+ client.add_partitions(partitions);
+
+ List<Partition> parts = client.listPartitions("default", tableName, (short) -1);
+ Assert.assertNotNull(parts);
+ Assert.assertEquals(3, parts.size());
+
+ parts = client.listPartitions("default", tableName, Collections.singletonList("x"),
+ (short) -1);
+ Assert.assertNotNull(parts);
+ Assert.assertEquals(1, parts.size());
+
+ parts = client.listPartitionsWithAuthInfo("default", tableName, (short) -1, "me",
+ Collections.<String>emptyList());
+ Assert.assertNotNull(parts);
+ Assert.assertEquals(3, parts.size());
+
+ List<String> partNames = client.listPartitionNames("default", tableName, (short) -1);
+ Assert.assertNotNull(partNames);
+ Assert.assertEquals(3, partNames.size());
+
+ parts = client.listPartitionsByFilter("default", tableName, "pk = \"x\"", (short) -1);
+ Assert.assertNotNull(parts);
+ Assert.assertEquals(1, parts.size());
+
+ parts = client.getPartitionsByNames("default", tableName, Collections.singletonList("pk=x"));
+ Assert.assertNotNull(parts);
+ Assert.assertEquals(1, parts.size());
+
+ partition = client.getPartition("default", tableName, Collections.singletonList("x"));
+ Assert.assertNotNull(partition);
+
+ partition = client.getPartition("default", tableName, "pk=x");
+ Assert.assertNotNull(partition);
+
+ partition = client.getPartitionWithAuthInfo("default", tableName, Collections.singletonList("x"),
+ "me", Collections.<String>emptyList());
+ Assert.assertNotNull(partition);
+
+ partition = new Partition(partition);
+ partition.getParameters().put("a", "b");
+ client.alter_partition("default", tableName, partition);
+
+ for (Partition p : parts) p.getParameters().put("c", "d");
+ client.alter_partitions("default", tableName, parts);
+
+ // Not testing get_partitions_by_expr because I don't want to hard code some byte sequence
+ // from the parser. The odds that anyone other than Hive parser would call this method seem
+ // low, since you'd have to exactly match the serliazation of the Hive parser.
+
+ // Not testing partition marking events, not used by anyone but Hive replication AFAIK
+
+ client.dropPartition("default", tableName, "pk=x", true);
+ client.dropPartition("default", tableName, Collections.singletonList("y"), true);
}
- }
-
- @Before
- public void checkIfActive() {
- Assume.assumeTrue(JdbcConnector.testActive(JdbcConnector.TEST_THRIFT, "Test Thrift "));
- rand = new Random();
- }
-
- @Test
- public void db() throws TException {
- final String dbName = "bigtop_thrift_db_" + rand.nextInt(Integer.MAX_VALUE);
-
- Database db = new Database(dbName, "a db", null, new HashMap<String, String>());
- client.createDatabase(db);
- db = client.getDatabase(dbName);
- Assert.assertNotNull(db);
- db = new Database(db);
- db.getParameters().put("a", "b");
- client.alterDatabase(dbName, db);
- List<String> alldbs = client.getDatabases("bigtop_*");
- Assert.assertNotNull(alldbs);
- Assert.assertTrue(alldbs.size() > 0);
- alldbs = client.getAllDatabases();
- Assert.assertNotNull(alldbs);
- Assert.assertTrue(alldbs.size() > 0);
- client.dropDatabase(dbName, true, true);
- }
-
- // Not testing types calls, as they aren't used AFAIK
-
- @Test
- public void nonPartitionedTable() throws TException {
- final String tableName = "bigtop_thrift_table_" + rand.nextInt(Integer.MAX_VALUE);
-
- // I don't test every operation related to tables, but only those that are frequently used.
- SerDeInfo serde = new SerDeInfo("default_serde",
- conf.getVar(HiveConf.ConfVars.HIVEDEFAULTSERDE), new HashMap<String, String>());
- FieldSchema fs = new FieldSchema("a", "int", "no comment");
- StorageDescriptor sd = new StorageDescriptor(Collections.singletonList(fs), null,
- conf.getVar(HiveConf.ConfVars.HIVEDEFAULTFILEFORMAT),
- conf.getVar(HiveConf.ConfVars.HIVEDEFAULTFILEFORMAT), false, 0, serde, null, null,
- new HashMap<String, String>());
- Table table = new Table(tableName, "default", "me", 0, 0, 0, sd, null,
- new HashMap<String, String>(), null, null, TableType.MANAGED_TABLE.toString());
- client.createTable(table);
-
- table = client.getTable("default", tableName);
- Assert.assertNotNull(table);
-
- List<Table> tables =
- client.getTableObjectsByName("default", Collections.singletonList(tableName));
- Assert.assertNotNull(tables);
- Assert.assertEquals(1, tables.size());
-
- List<String> tableNames = client.getTables("default", "bigtop_*");
- Assert.assertNotNull(tableNames);
- Assert.assertTrue(tableNames.size() >= 1);
-
- tableNames = client.getAllTables("default");
- Assert.assertNotNull(tableNames);
- Assert.assertTrue(tableNames.size() >= 1);
-
- List<FieldSchema> cols = client.getFields("default", tableName);
- Assert.assertNotNull(cols);
- Assert.assertEquals(1, cols.size());
-
- cols = client.getSchema("default", tableName);
- Assert.assertNotNull(cols);
- Assert.assertEquals(1, cols.size());
-
- table = new Table(table);
- table.getParameters().put("a", "b");
- client.alter_table("default", tableName, table, false);
-
- table.getParameters().put("c", "d");
- client.alter_table("default", tableName, table);
-
- client.dropTable("default", tableName, true, false);
- }
-
- @Test
- public void partitionedTable() throws TException {
- final String tableName = "bigtop_thrift_partitioned_table_" + rand.nextInt(Integer.MAX_VALUE);
-
- // I don't test every operation related to tables, but only those that are frequently used.
- SerDeInfo serde = new SerDeInfo("default_serde",
- conf.getVar(HiveConf.ConfVars.HIVEDEFAULTSERDE), new HashMap<String, String>());
- FieldSchema fs = new FieldSchema("a", "int", "no comment");
- StorageDescriptor sd = new StorageDescriptor(Collections.singletonList(fs), null,
- conf.getVar(HiveConf.ConfVars.HIVEDEFAULTFILEFORMAT),
- conf.getVar(HiveConf.ConfVars.HIVEDEFAULTFILEFORMAT), false, 0, serde, null, null,
- new HashMap<String, String>());
- FieldSchema pk = new FieldSchema("pk", "string", "");
- Table table = new Table(tableName, "default", "me", 0, 0, 0, sd, Collections.singletonList(pk),
- new HashMap<String, String>(), null, null, TableType.MANAGED_TABLE.toString());
- client.createTable(table);
-
- sd = new StorageDescriptor(Collections.singletonList(fs), null,
- conf.getVar(HiveConf.ConfVars.HIVEDEFAULTSERDE),
- conf.getVar(HiveConf.ConfVars.HIVEDEFAULTSERDE), false, 0, serde, null, null,
- new HashMap<String, String>());
- Partition partition = new Partition(Collections.singletonList("x"), "default", tableName, 0,
- 0, sd, new HashMap<String, String>());
- client.add_partition(partition);
-
- List<Partition> partitions = new ArrayList<>(2);
- sd = new StorageDescriptor(Collections.singletonList(fs), null,
- conf.getVar(HiveConf.ConfVars.HIVEDEFAULTSERDE),
- conf.getVar(HiveConf.ConfVars.HIVEDEFAULTSERDE), false, 0, serde, null, null,
- new HashMap<String, String>());
- partitions.add(new Partition(Collections.singletonList("y"), "default", tableName, 0,
- 0, sd, new HashMap<String, String>()));
- sd = new StorageDescriptor(Collections.singletonList(fs), null,
- conf.getVar(HiveConf.ConfVars.HIVEDEFAULTSERDE),
- conf.getVar(HiveConf.ConfVars.HIVEDEFAULTSERDE), false, 0, serde, null, null,
- new HashMap<String, String>());
- partitions.add(new Partition(Collections.singletonList("z"), "default", tableName, 0,
- 0, sd, new HashMap<String, String>()));
- client.add_partitions(partitions);
-
- List<Partition> parts = client.listPartitions("default", tableName, (short)-1);
- Assert.assertNotNull(parts);
- Assert.assertEquals(3, parts.size());
-
- parts = client.listPartitions("default", tableName, Collections.singletonList("x"),
- (short)-1);
- Assert.assertNotNull(parts);
- Assert.assertEquals(1, parts.size());
-
- parts = client.listPartitionsWithAuthInfo("default", tableName, (short)-1, "me",
- Collections.<String>emptyList());
- Assert.assertNotNull(parts);
- Assert.assertEquals(3, parts.size());
-
- List<String> partNames = client.listPartitionNames("default", tableName, (short)-1);
- Assert.assertNotNull(partNames);
- Assert.assertEquals(3, partNames.size());
-
- parts = client.listPartitionsByFilter("default", tableName, "pk = \"x\"", (short)-1);
- Assert.assertNotNull(parts);
- Assert.assertEquals(1, parts.size());
-
- parts = client.getPartitionsByNames("default", tableName, Collections.singletonList("pk=x"));
- Assert.assertNotNull(parts);
- Assert.assertEquals(1, parts.size());
-
- partition = client.getPartition("default", tableName, Collections.singletonList("x"));
- Assert.assertNotNull(partition);
-
- partition = client.getPartition("default", tableName, "pk=x");
- Assert.assertNotNull(partition);
-
- partition = client.getPartitionWithAuthInfo("default", tableName, Collections.singletonList("x"),
- "me", Collections.<String>emptyList());
- Assert.assertNotNull(partition);
-
- partition = new Partition(partition);
- partition.getParameters().put("a", "b");
- client.alter_partition("default", tableName, partition);
-
- for (Partition p : parts) p.getParameters().put("c", "d");
- client.alter_partitions("default", tableName, parts);
-
- // Not testing get_partitions_by_expr because I don't want to hard code some byte sequence
- // from the parser. The odds that anyone other than Hive parser would call this method seem
- // low, since you'd have to exactly match the serliazation of the Hive parser.
-
- // Not testing partition marking events, not used by anyone but Hive replication AFAIK
-
- client.dropPartition("default", tableName, "pk=x", true);
- client.dropPartition("default", tableName, Collections.singletonList("y"), true);
- }
-
- // Not testing index calls, as no one uses indices
-
-
- // Not sure if anyone uses stats calls or not. Other query engines might. Ignoring for now.
-
- // Not sure if anyone else uses functions, though I'm guessing not as without Hive classes they
- // won't be runable.
-
- // Not testing authorization calls as AFAIK no one else uses Hive security
-
- // Not testing transaction/locking calls, as those are used only by Hive.
-
- // Not testing notification logging calls, as those are used only by Hive replication.
+
+ // Not testing index calls, as no one uses indices
+
+
+ // Not sure if anyone uses stats calls or not. Other query engines might. Ignoring for now.
+
+ // Not sure if anyone else uses functions, though I'm guessing not as without Hive classes they
+ // won't be runable.
+
+ // Not testing authorization calls as AFAIK no one else uses Hive security
+
+ // Not testing transaction/locking calls, as those are used only by Hive.
+
+ // Not testing notification logging calls, as those are used only by Hive replication.
}