Commit 1a390f01 by Suma Shivaprasad

ATLAS-529 support drop database(sumasai)

parent 755e59c0
......@@ -20,6 +20,7 @@ package org.apache.atlas.hive.hook;
import com.google.common.util.concurrent.ThreadFactoryBuilder;
import org.apache.atlas.AtlasClient;
import org.apache.atlas.hive.bridge.HiveMetaStoreBridge;
import org.apache.atlas.hive.model.HiveDataModelGenerator;
import org.apache.atlas.hive.model.HiveDataTypes;
......@@ -347,6 +348,10 @@ public class HiveHook extends AtlasHook implements ExecuteWithHookContext {
deleteTable(dgiBridge, event);
break;
case DROPDATABASE:
deleteDatabase(dgiBridge, event);
break;
default:
}
......@@ -354,8 +359,14 @@ public class HiveHook extends AtlasHook implements ExecuteWithHookContext {
}
private void deleteTable(HiveMetaStoreBridge dgiBridge, HiveEventContext event) {
for (Entity output : event.outputs) {
for (WriteEntity output : event.outputs) {
if (Type.TABLE.equals(output.getType())) {
deleteTable(dgiBridge, event, output);
}
}
}
private void deleteTable(HiveMetaStoreBridge dgiBridge, HiveEventContext event, WriteEntity output) {
final String tblQualifiedName = HiveMetaStoreBridge.getTableQualifiedName(dgiBridge.getClusterName(), output.getTable().getDbName(), output.getTable().getTableName());
LOG.info("Deleting table {} ", tblQualifiedName);
messages.add(
......@@ -364,6 +375,23 @@ public class HiveHook extends AtlasHook implements ExecuteWithHookContext {
HiveDataModelGenerator.NAME,
tblQualifiedName));
}
private void deleteDatabase(HiveMetaStoreBridge dgiBridge, HiveEventContext event) {
if (event.outputs.size() > 1) {
LOG.info("Starting deletion of tables and databases with cascade {} " , event.queryStr);
}
for (WriteEntity output : event.outputs) {
if (Type.TABLE.equals(output.getType())) {
deleteTable(dgiBridge, event, output);
} else if (Type.DATABASE.equals(output.getType())) {
final String dbQualifiedName = HiveMetaStoreBridge.getDBQualifiedName(dgiBridge.getClusterName(), output.getDatabase().getName());
messages.add(
new HookNotification.EntityDeleteRequest(event.getUser(),
HiveDataTypes.HIVE_DB.getName(),
AtlasClient.REFERENCEABLE_ATTRIBUTE_NAME,
dbQualifiedName));
}
}
}
......
......@@ -877,7 +877,6 @@ public class HiveHookIT {
assertColumnIsRegistered(HiveMetaStoreBridge.getColumnQualifiedName(HiveMetaStoreBridge.getTableQualifiedName(CLUSTER_NAME, DEFAULT_DB, tableName), "name"));
final String query = String.format("drop table %s ", tableName);
runCommand(query);
assertColumnIsNotRegistered(HiveMetaStoreBridge.getColumnQualifiedName(HiveMetaStoreBridge.getTableQualifiedName(CLUSTER_NAME, DEFAULT_DB, tableName), "id"));
assertColumnIsNotRegistered(HiveMetaStoreBridge.getColumnQualifiedName(HiveMetaStoreBridge.getTableQualifiedName(CLUSTER_NAME, DEFAULT_DB, tableName), "name"));
......@@ -885,6 +884,65 @@ public class HiveHookIT {
}
@Test
public void testDropDatabaseWithCascade() throws Exception {
//Test Deletion of database and its corresponding tables
String dbName = "db" + random();
runCommand("create database " + dbName + " WITH DBPROPERTIES ('p1'='v1')");
final int numTables = 10;
String[] tableNames = new String[numTables];
for(int i = 0; i < numTables; i++) {
tableNames[i] = createTable(true, true, false);
}
final String query = String.format("drop database %s cascade", dbName);
runCommand(query);
//Verify columns are not registered for one of the tables
assertColumnIsNotRegistered(HiveMetaStoreBridge.getColumnQualifiedName(HiveMetaStoreBridge.getTableQualifiedName(CLUSTER_NAME, dbName, tableNames[0]), "id"));
assertColumnIsNotRegistered(HiveMetaStoreBridge.getColumnQualifiedName(HiveMetaStoreBridge.getTableQualifiedName(CLUSTER_NAME, dbName, tableNames[0]), "name"));
for(int i = 0; i < numTables; i++) {
assertTableIsNotRegistered(dbName, tableNames[i]);
}
assertDBIsNotRegistered(dbName);
}
@Test
public void testDropDatabaseWithoutCascade() throws Exception {
//Test Deletion of database and its corresponding tables
String dbName = "db" + random();
runCommand("create database " + dbName + " WITH DBPROPERTIES ('p1'='v1')");
final int numTables = 10;
String[] tableNames = new String[numTables];
for(int i = 0; i < numTables; i++) {
tableNames[i] = createTable(true, true, false);
String query = String.format("drop table %s", tableNames[i]);
runCommand(query);
assertTableIsNotRegistered(dbName, tableNames[i]);
}
final String query = String.format("drop database %s", dbName);
runCommand(query);
assertDBIsNotRegistered(dbName);
}
@Test
public void testDropNonExistingDB() throws Exception {
//Test Deletion of a non existing DB
final String dbName = "nonexistingdb";
assertDBIsNotRegistered(dbName);
final String query = String.format("drop database if exists %s cascade", dbName);
runCommand(query);
//Should have no effect
assertDBIsNotRegistered(dbName);
assertProcessIsNotRegistered(query);
}
@Test
public void testDropNonExistingTable() throws Exception {
//Test Deletion of a non existing table
final String tableName = "nonexistingtable";
......@@ -1095,6 +1153,14 @@ public class HiveHookIT {
assertEntityIsNotRegistered(QUERY_TYPE.DSL, query);
}
private void assertDBIsNotRegistered(String dbName) throws Exception {
LOG.debug("Searching for database {}.{}", dbName);
String query = String.format(
"%s as d where name = '%s' and clusterName = '%s'" + " select d",
HiveDataTypes.HIVE_DB.getName(), dbName.toLowerCase(), CLUSTER_NAME);
assertEntityIsNotRegistered(QUERY_TYPE.DSL, query);
}
private String assertTableIsRegistered(String dbName, String tableName) throws Exception {
LOG.debug("Searching for table {}.{}", dbName, tableName);
String query = String.format(
......
......@@ -13,6 +13,7 @@ ATLAS-409 Atlas will not import avro tables with schema read from a file (dosset
ATLAS-379 Create sqoop and falcon metadata addons (venkatnrangan,bvellanki,sowmyaramesh via shwethags)
ALL CHANGES:
ATLAS-529 support drop database (sumasai)
ATLAS-528 Support drop table,view (sumasai)
ATLAS-603 Document High Availability of Atlas (yhemanth via sumasai)
ATLAS-498 Support Embedded HBase (tbeerbower via sumasai)
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment