Commit c2be0646 by apoorvnaik Committed by Madhan Neethiraj

ATLAS-2444: fix for IT failures

parent 4152bc6d
......@@ -615,7 +615,12 @@ public class HiveMetaStoreBridge {
ref.set("nameServiceId", nameServiceID);
} else {
ref.set("path", pathUri);
// Only append clusterName for the HDFS path
if (pathUri.startsWith(HdfsNameServiceResolver.HDFS_SCHEME)) {
ref.set(AtlasClient.REFERENCEABLE_ATTRIBUTE_NAME, getHdfsPathQualifiedName(clusterName, pathUri));
} else {
ref.set(AtlasClient.REFERENCEABLE_ATTRIBUTE_NAME, pathUri);
}
}
ref.set(AtlasConstants.CLUSTER_NAME_ATTRIBUTE, clusterName);
return ref;
......
......@@ -231,14 +231,22 @@ public class HiveITBase {
Referenceable hdfsPathRef = atlasClient.getEntity(hdfsPathId);
Assert.assertEquals(hdfsPathRef.get("path"), testPathNormed);
Assert.assertEquals(hdfsPathRef.get(NAME), Path.getPathWithoutSchemeAndAuthority(path).toString().toLowerCase());
Assert.assertEquals(hdfsPathRef.get(AtlasClient.REFERENCEABLE_ATTRIBUTE_NAME), testPathNormed);
if (testPathNormed != null) {
Assert.assertTrue(((String)hdfsPathRef.get(AtlasClient.REFERENCEABLE_ATTRIBUTE_NAME)).startsWith(testPathNormed));
}
}
}
private String assertHDFSPathIsRegistered(String path) throws Exception {
LOG.debug("Searching for hdfs path {}", path);
// ATLAS-2444 HDFS name node federation adds the cluster name to the qualifiedName
if (path.startsWith("hdfs://")) {
String pathWithCluster = path + "@" + CLUSTER_NAME;
return assertEntityIsRegistered(HiveMetaStoreBridge.HDFS_PATH, AtlasClient.REFERENCEABLE_ATTRIBUTE_NAME, pathWithCluster, null);
} else {
return assertEntityIsRegistered(HiveMetaStoreBridge.HDFS_PATH, AtlasClient.REFERENCEABLE_ATTRIBUTE_NAME, path, null);
}
}
protected String assertDatabaseIsRegistered(String dbName) throws Exception {
return assertDatabaseIsRegistered(dbName, null);
......
......@@ -263,11 +263,10 @@ public class HiveHookIT extends HiveITBase {
Iterator<? extends Entity> iterator = expectedTables.iterator();
for(int i = 0; i < expectedTables.size(); i++) {
Entity hiveEntity = iterator.next();
if (Entity.Type.TABLE.equals(hiveEntity.getType()) ||
Entity.Type.DFS_DIR.equals(hiveEntity.getType())) {
if (Entity.Type.TABLE.equals(hiveEntity.getType()) || Entity.Type.DFS_DIR.equals(hiveEntity.getType())) {
Referenceable entity = atlasClient.getEntity(tableRef.get(i)._getId());
LOG.debug("Validating output {} {} ", i, entity);
Assert.assertEquals(entity.get(AtlasClient.REFERENCEABLE_ATTRIBUTE_NAME), hiveEntity.getName());
assertTrue(((String)entity.get(AtlasClient.REFERENCEABLE_ATTRIBUTE_NAME)).startsWith(hiveEntity.getName()));
}
}
}
......
......@@ -32,8 +32,9 @@ import java.util.Objects;
public class HdfsNameServiceResolver {
private static final Logger LOG = LoggerFactory.getLogger(HdfsNameServiceResolver.class);
public static final String HDFS_SCHEME = "hdfs://";
private static final int DEFAULT_PORT = 8020;
private static final String HDFS_SCHEME = "hdfs://";
private static final String HDFS_NAMESERVICE_PROPERTY_KEY = "dfs.nameservices";
private static final String HDFS_INTERNAL_NAMESERVICE_PROPERTY_KEY = "dfs.internal.nameservices";
private static final String HDFS_NAMENODES_HA_NODES_PREFIX = "dfs.ha.namenodes.";
......@@ -83,7 +84,7 @@ public class HdfsNameServiceResolver {
String ret = path;
// Only handle URLs that begin with hdfs://
if (path.indexOf(HDFS_SCHEME) == 0) {
if (path != null && path.indexOf(HDFS_SCHEME) == 0) {
URI uri = new Path(path).toUri();
String nsId;
......@@ -114,7 +115,7 @@ public class HdfsNameServiceResolver {
String ret = "";
// Only handle path URLs that begin with hdfs://
if (path.indexOf(HDFS_SCHEME) == 0) {
if (path != null && path.indexOf(HDFS_SCHEME) == 0) {
try {
URI uri = new Path(path).toUri();
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment