Commit 34cdef44 by Dan Markwat

Merge remote-tracking branch 'origin/guice-enabled'

Conflicts: repository/pom.xml repository/src/main/java/org/apache/hadoop/metadata/services/GraphBackedMetadataRepository.java repository/src/main/java/org/apache/hadoop/metadata/services/MetadataRepository.java repository/src/main/java/org/apache/hadoop/metadata/services/TitanGraphService.java repository/src/test/java/org/apache/hadoop/metadata/services/GraphBackedMetadataRepositoryTest.java webapp/src/main/java/org/apache/hadoop/metadata/web/resources/EntityResource.java webapp/src/main/resources/application.properties webapp/src/test/java/org/apache/hadoop/metadata/GraphRepositoryServiceIT.java
parents faf0b48b 96350bda
<project xmlns="http://maven.apache.org/POM/4.0.0" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/xsd/maven-4.0.0.xsd">
<modelVersion>4.0.0</modelVersion>
<parent>
<groupId>org.apache.hadoop.metadata</groupId>
<artifactId>metadata-governance</artifactId>
<version>0.1-incubating-SNAPSHOT</version>
</parent>
<artifactId>metadata-bridge</artifactId>
<dependencies>
<dependency>
<groupId>org.apache.hive</groupId>
<artifactId>hive-metastore</artifactId>
<version>0.14.0</version>
</dependency>
<dependency>
<groupId>com.google.code.gson</groupId>
<artifactId>gson</artifactId>
<version>2.2.2</version>
</dependency>
<dependency>
<groupId>junit</groupId>
<artifactId>junit</artifactId>
<version>4.10</version>
<scope>test</scope>
</dependency>
<dependency>
<groupId>org.apache.hadoop.metadata</groupId>
<artifactId>metadata-typesystem</artifactId>
<version>0.1-incubating-SNAPSHOT</version>
</dependency>
</dependencies>
<project xmlns="http://maven.apache.org/POM/4.0.0" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/xsd/maven-4.0.0.xsd">
<modelVersion>4.0.0</modelVersion>
<parent>
<groupId>org.apache.hadoop.metadata</groupId>
<artifactId>metadata-governance</artifactId>
<version>0.1-incubating-SNAPSHOT</version>
</parent>
<artifactId>metadata-bridge</artifactId>
<dependencies>
<dependency>
<groupId>org.apache.hive</groupId>
<artifactId>hive-metastore</artifactId>
<version>0.14.0</version>
</dependency>
<dependency>
<groupId>com.google.code.gson</groupId>
<artifactId>gson</artifactId>
<version>2.2.2</version>
</dependency>
<dependency>
<groupId>junit</groupId>
<artifactId>junit</artifactId>
<version>4.10</version>
<scope>test</scope>
</dependency>
<dependency>
<groupId>org.apache.hadoop.metadata</groupId>
<artifactId>metadata-typesystem</artifactId>
<version>0.1-incubating-SNAPSHOT</version>
</dependency>
</dependencies>
</project>
\ No newline at end of file
package org.apache.hadoop.metadata.bridge;
import org.apache.hadoop.metadata.types.AttributeDefinition;
import org.apache.hadoop.metadata.types.ClassType;
import org.apache.hadoop.metadata.types.HierarchicalTypeDefinition;
import org.apache.hadoop.metadata.types.TypeSystem;
import com.google.common.collect.ImmutableList;
public interface Bridge {
boolean defineBridgeTypes(TypeSystem ts);
}
package org.apache.hadoop.metadata.bridge;
import org.apache.hadoop.metadata.types.AttributeDefinition;
import org.apache.hadoop.metadata.types.ClassType;
import org.apache.hadoop.metadata.types.HierarchicalTypeDefinition;
import org.apache.hadoop.metadata.types.TypeSystem;
import com.google.common.collect.ImmutableList;
public interface Bridge {
boolean defineBridgeTypes(TypeSystem ts);
}
package org.apache.hadoop.metadata.bridge;
import org.apache.hadoop.metadata.types.AttributeDefinition;
import org.apache.hadoop.metadata.types.ClassType;
import org.apache.hadoop.metadata.types.HierarchicalTypeDefinition;
import org.apache.hadoop.metadata.types.TypeSystem;
import com.google.common.collect.ImmutableList;
public class BridgeAssistant {
protected HierarchicalTypeDefinition<ClassType> createClassTypeDef(String name, ImmutableList<String> superTypes, AttributeDefinition... attrDefs) {return new HierarchicalTypeDefinition(ClassType.class, name, superTypes, attrDefs);}
}
package org.apache.hadoop.metadata.bridge;
import org.apache.hadoop.metadata.types.AttributeDefinition;
import org.apache.hadoop.metadata.types.ClassType;
import org.apache.hadoop.metadata.types.HierarchicalTypeDefinition;
import org.apache.hadoop.metadata.types.TypeSystem;
import com.google.common.collect.ImmutableList;
public class BridgeAssistant {
protected HierarchicalTypeDefinition<ClassType> createClassTypeDef(String name, ImmutableList<String> superTypes, AttributeDefinition... attrDefs) {return new HierarchicalTypeDefinition(ClassType.class, name, superTypes, attrDefs);}
}
package org.apache.hadoop.metadata.bridge;
import org.apache.hadoop.hive.metastore.api.MetaException;
public class BridgeException extends MetaException {
/**
*
*/
private static final long serialVersionUID = -384401342591560473L;
}
package org.apache.hadoop.metadata.bridge;
import org.apache.hadoop.hive.metastore.api.MetaException;
public class BridgeException extends MetaException {
/**
*
*/
private static final long serialVersionUID = -384401342591560473L;
}
package org.apache.hadoop.metadata.bridge;
import org.apache.hadoop.metadata.types.TypeSystem;
public class BridgeManager {
TypeSystem ts;
BridgeManager(TypeSystem ts){
this.ts = ts;
}
}
package org.apache.hadoop.metadata.bridge;
import org.apache.hadoop.metadata.types.TypeSystem;
public class BridgeManager {
TypeSystem ts;
BridgeManager(TypeSystem ts){
this.ts = ts;
}
}
package org.apache.hadoop.metadata.bridge.hivelineage;
import org.apache.hadoop.metadata.bridge.Bridge;
import org.apache.hadoop.metadata.types.TypeSystem;
public class HiveLineageBridge implements Bridge {
@Override
public boolean defineBridgeTypes(TypeSystem ts) {
// TODO Auto-generated method stub
return false;
}
}
package org.apache.hadoop.metadata.bridge.hivelineage;
import org.apache.hadoop.metadata.bridge.Bridge;
import org.apache.hadoop.metadata.types.TypeSystem;
public class HiveLineageBridge implements Bridge {
@Override
public boolean defineBridgeTypes(TypeSystem ts) {
// TODO Auto-generated method stub
return false;
}
}
package org.apache.hadoop.metadata.bridge.hivestructure;
import org.apache.hadoop.hive.conf.HiveConf;
import org.apache.hadoop.hive.metastore.HiveMetaStoreClient;
import org.apache.hadoop.hive.metastore.api.Database;
import org.apache.hadoop.hive.metastore.api.FieldSchema;
import org.apache.hadoop.hive.metastore.api.MetaException;
import org.apache.hadoop.hive.metastore.api.NoSuchObjectException;
import org.apache.hadoop.hive.metastore.api.Table;
import org.apache.hadoop.hive.metastore.api.UnknownDBException;
import org.apache.hadoop.hive.metastore.api.UnknownTableException;
import org.apache.hadoop.metadata.MetadataException;
import org.apache.hadoop.metadata.MetadataService;
import org.apache.hadoop.metadata.Referenceable;
import org.apache.hadoop.metadata.storage.RepositoryException;
import org.apache.hadoop.metadata.types.ClassType;
import org.apache.thrift.TException;
/*
* Initial pass at one time importer TODO - needs re-write
*/
public class HiveMetaImporter {
private static HiveMetaStoreClient msc;
private static MetadataService ms;
public HiveMetaImporter(MetadataService ms){
try {
this.ms = ms;
msc = new HiveMetaStoreClient(new HiveConf());
} catch (MetaException e) {
// TODO Auto-generated catch block
e.printStackTrace();
}
}
public static boolean fullImport(){
try{
databasesImport();
for (String dbName : msc.getAllDatabases()){
tablesImport(dbName);
for(String tbName : msc.getAllTables(dbName)){
fieldsImport(dbName,tbName);
}
return true;
}
}catch(MetaException me){
me.printStackTrace();
}catch(RepositoryException re){
re.printStackTrace();
}
return false;
}
public static boolean databasesImport() throws MetaException, RepositoryException{
ClassType classType = null;
try {
classType = ms.getTypeSystem().getDataType(ClassType.class, HiveStructureBridge.DB_CLASS_TYPE);
} catch (MetadataException e1) {
e1.printStackTrace();
}
for(String dbName : msc.getAllDatabases()){
databaseImport(dbName);
}
return true;
}
public static boolean databaseImport(String dbName) throws MetaException, RepositoryException{
try {
Database db = msc.getDatabase(dbName);
Referenceable dbRef = new Referenceable(HiveStructureBridge.DB_CLASS_TYPE);
dbRef.set("DESC", db.getDescription());
dbRef.set("DB_LOCATION_URI", db.getLocationUri());
dbRef.set("NAME", db.getName());
if(db.isSetOwnerType()){dbRef.set("OWNER_TYPE", db.getOwnerType());}
if(db.isSetOwnerName()){dbRef.set("OWNER_NAME", db.getOwnerName());}
ms.getRepository().create(dbRef);
} catch (NoSuchObjectException e) {
// TODO Auto-generated catch block
e.printStackTrace();
} catch (TException e) {
// TODO Auto-generated catch block
e.printStackTrace();
}
return true;
}
public static boolean tablesImport(String dbName) throws MetaException, RepositoryException{
ClassType classType = null;
try {
classType = ms.getTypeSystem().getDataType(ClassType.class, HiveStructureBridge.TB_CLASS_TYPE);
} catch (MetadataException e1) {
e1.printStackTrace();
}
for(String tbName : msc.getAllTables(dbName)){
tableImport(dbName, tbName);
}
return true;
}
public static boolean tableImport(String dbName, String tbName) throws MetaException, RepositoryException{
try {
Table tb = msc.getTable(dbName, tbName);
Referenceable tbRef = new Referenceable(HiveStructureBridge.TB_CLASS_TYPE);
tbRef.set("CREATE_TIME", tb.getCreateTime());
tbRef.set("LAST_ACCESS_TIME", tb.getLastAccessTime());
tbRef.set("OWNER", tb.getOwner());
tbRef.set("TBL_NAME", tb.getTableName());
tbRef.set("TBL_TYPE", tb.getTableType());
if(tb.isSetViewExpandedText()){tbRef.set("VIEW_EXPANDED_TEXT", tb.getViewExpandedText());}
if(tb.isSetViewOriginalText()){tbRef.set("VIEW_ORIGINAL_TEXT", tb.getViewOriginalText());}
ms.getRepository().create(tbRef);
} catch (NoSuchObjectException e) {
// TODO Auto-generated catch block
e.printStackTrace();
} catch (TException e) {
// TODO Auto-generated catch block
e.printStackTrace();
}
return true;
}
public static boolean fieldsImport (String dbName, String tbName) throws MetaException, RepositoryException{
ClassType classType = null;
try {
classType = ms.getTypeSystem().getDataType(ClassType.class, HiveStructureBridge.FD_CLASS_TYPE);
} catch (MetadataException e1) {
e1.printStackTrace();
}
try {
for(FieldSchema fs : msc.getFields(dbName, tbName)){
Referenceable fdRef = new Referenceable(HiveStructureBridge.FD_CLASS_TYPE);
if(fs.isSetComment()){fdRef.set("COMMENT", fs.getName());}
fdRef.set("COLUMN_NAME", fs.getName());
fdRef.set("TYPE_NAME", fs.getType());
ms.getRepository().create(fdRef);
}
} catch (UnknownTableException e) {
// TODO Auto-generated catch block
e.printStackTrace();
} catch (UnknownDBException e) {
// TODO Auto-generated catch block
e.printStackTrace();
} catch (TException e) {
// TODO Auto-generated catch block
e.printStackTrace();
}
return true;
}
public static boolean fieldImport(String dbName, String tbName, String fdName) throws MetaException{
try {
for(FieldSchema fs : msc.getFields(dbName, tbName)){
if (fs.getName().equals(fs)){
Referenceable fdRef = new Referenceable(HiveStructureBridge.TB_CLASS_TYPE);
if(fs.isSetComment()){fdRef.set("COMMENT", fs.getName());}
fdRef.set("COLUMN_NAME", fs.getName());
fdRef.set("TYPE_NAME", fs.getType());
//SaveObject to MS Backend
return true;
}
}
} catch (UnknownTableException e) {
// TODO Auto-generated catch block
e.printStackTrace();
} catch (UnknownDBException e) {
// TODO Auto-generated catch block
e.printStackTrace();
} catch (TException e) {
// TODO Auto-generated catch block
e.printStackTrace();
}
return true;
}
}
package org.apache.hadoop.metadata.bridge.hivestructure;
import org.apache.hadoop.hive.conf.HiveConf;
import org.apache.hadoop.hive.metastore.HiveMetaStoreClient;
import org.apache.hadoop.hive.metastore.api.Database;
import org.apache.hadoop.hive.metastore.api.FieldSchema;
import org.apache.hadoop.hive.metastore.api.MetaException;
import org.apache.hadoop.hive.metastore.api.NoSuchObjectException;
import org.apache.hadoop.hive.metastore.api.Table;
import org.apache.hadoop.hive.metastore.api.UnknownDBException;
import org.apache.hadoop.hive.metastore.api.UnknownTableException;
import org.apache.hadoop.metadata.MetadataException;
import org.apache.hadoop.metadata.MetadataService;
import org.apache.hadoop.metadata.Referenceable;
import org.apache.hadoop.metadata.storage.RepositoryException;
import org.apache.hadoop.metadata.types.ClassType;
import org.apache.thrift.TException;
/*
* Initial pass at one time importer TODO - needs re-write
*/
public class HiveMetaImporter {
private static HiveMetaStoreClient msc;
private static MetadataService ms;
public HiveMetaImporter(MetadataService ms){
try {
this.ms = ms;
msc = new HiveMetaStoreClient(new HiveConf());
} catch (MetaException e) {
// TODO Auto-generated catch block
e.printStackTrace();
}
}
public static boolean fullImport(){
try{
databasesImport();
for (String dbName : msc.getAllDatabases()){
tablesImport(dbName);
for(String tbName : msc.getAllTables(dbName)){
fieldsImport(dbName,tbName);
}
return true;
}
}catch(MetaException me){
me.printStackTrace();
}catch(RepositoryException re){
re.printStackTrace();
}
return false;
}
public static boolean databasesImport() throws MetaException, RepositoryException{
ClassType classType = null;
try {
classType = ms.getTypeSystem().getDataType(ClassType.class, HiveStructureBridge.DB_CLASS_TYPE);
} catch (MetadataException e1) {
e1.printStackTrace();
}
for(String dbName : msc.getAllDatabases()){
databaseImport(dbName);
}
return true;
}
public static boolean databaseImport(String dbName) throws MetaException, RepositoryException{
try {
Database db = msc.getDatabase(dbName);
Referenceable dbRef = new Referenceable(HiveStructureBridge.DB_CLASS_TYPE);
dbRef.set("DESC", db.getDescription());
dbRef.set("DB_LOCATION_URI", db.getLocationUri());
dbRef.set("NAME", db.getName());
if(db.isSetOwnerType()){dbRef.set("OWNER_TYPE", db.getOwnerType());}
if(db.isSetOwnerName()){dbRef.set("OWNER_NAME", db.getOwnerName());}
ms.getRepository().create(dbRef);
} catch (NoSuchObjectException e) {
// TODO Auto-generated catch block
e.printStackTrace();
} catch (TException e) {
// TODO Auto-generated catch block
e.printStackTrace();
}
return true;
}
public static boolean tablesImport(String dbName) throws MetaException, RepositoryException{
ClassType classType = null;
try {
classType = ms.getTypeSystem().getDataType(ClassType.class, HiveStructureBridge.TB_CLASS_TYPE);
} catch (MetadataException e1) {
e1.printStackTrace();
}
for(String tbName : msc.getAllTables(dbName)){
tableImport(dbName, tbName);
}
return true;
}
public static boolean tableImport(String dbName, String tbName) throws MetaException, RepositoryException{
try {
Table tb = msc.getTable(dbName, tbName);
Referenceable tbRef = new Referenceable(HiveStructureBridge.TB_CLASS_TYPE);
tbRef.set("CREATE_TIME", tb.getCreateTime());
tbRef.set("LAST_ACCESS_TIME", tb.getLastAccessTime());
tbRef.set("OWNER", tb.getOwner());
tbRef.set("TBL_NAME", tb.getTableName());
tbRef.set("TBL_TYPE", tb.getTableType());
if(tb.isSetViewExpandedText()){tbRef.set("VIEW_EXPANDED_TEXT", tb.getViewExpandedText());}
if(tb.isSetViewOriginalText()){tbRef.set("VIEW_ORIGINAL_TEXT", tb.getViewOriginalText());}
ms.getRepository().create(tbRef);
} catch (NoSuchObjectException e) {
// TODO Auto-generated catch block
e.printStackTrace();
} catch (TException e) {
// TODO Auto-generated catch block
e.printStackTrace();
}
return true;
}
public static boolean fieldsImport (String dbName, String tbName) throws MetaException, RepositoryException{
ClassType classType = null;
try {
classType = ms.getTypeSystem().getDataType(ClassType.class, HiveStructureBridge.FD_CLASS_TYPE);
} catch (MetadataException e1) {
e1.printStackTrace();
}
try {
for(FieldSchema fs : msc.getFields(dbName, tbName)){
Referenceable fdRef = new Referenceable(HiveStructureBridge.FD_CLASS_TYPE);
if(fs.isSetComment()){fdRef.set("COMMENT", fs.getName());}
fdRef.set("COLUMN_NAME", fs.getName());
fdRef.set("TYPE_NAME", fs.getType());
ms.getRepository().create(fdRef);
}
} catch (UnknownTableException e) {
// TODO Auto-generated catch block
e.printStackTrace();
} catch (UnknownDBException e) {
// TODO Auto-generated catch block
e.printStackTrace();
} catch (TException e) {
// TODO Auto-generated catch block
e.printStackTrace();
}
return true;
}
public static boolean fieldImport(String dbName, String tbName, String fdName) throws MetaException{
try {
for(FieldSchema fs : msc.getFields(dbName, tbName)){
if (fs.getName().equals(fs)){
Referenceable fdRef = new Referenceable(HiveStructureBridge.TB_CLASS_TYPE);
if(fs.isSetComment()){fdRef.set("COMMENT", fs.getName());}
fdRef.set("COLUMN_NAME", fs.getName());
fdRef.set("TYPE_NAME", fs.getType());
//SaveObject to MS Backend
return true;
}
}
} catch (UnknownTableException e) {
// TODO Auto-generated catch block
e.printStackTrace();
} catch (UnknownDBException e) {
// TODO Auto-generated catch block
e.printStackTrace();
} catch (TException e) {
// TODO Auto-generated catch block
e.printStackTrace();
}
return true;
}
}
package org.apache.hadoop.metadata.bridge.hivestructure;
import java.util.ArrayList;
import org.apache.hadoop.metadata.MetadataException;
import org.apache.hadoop.metadata.bridge.Bridge;
import org.apache.hadoop.metadata.bridge.BridgeAssistant;
import org.apache.hadoop.metadata.types.AttributeDefinition;
import org.apache.hadoop.metadata.types.ClassType;
import org.apache.hadoop.metadata.types.HierarchicalTypeDefinition;
import org.apache.hadoop.metadata.types.Multiplicity;
import org.apache.hadoop.metadata.types.TypeSystem;
public class HiveStructureBridge extends BridgeAssistant implements Bridge{
static final String DB_CLASS_TYPE = "HiveDatabase";
static final String TB_CLASS_TYPE = "HiveTable";
static final String FD_CLASS_TYPE = "HiveField";
@Override
public boolean defineBridgeTypes(TypeSystem ts) {
ArrayList<HierarchicalTypeDefinition<?>> al = new ArrayList<HierarchicalTypeDefinition<?>>();
try{
HierarchicalTypeDefinition<ClassType> databaseClassTypeDef = new HierarchicalTypeDefinition<ClassType>("ClassType",DB_CLASS_TYPE, null,
new AttributeDefinition[]{
new AttributeDefinition("DESC", "STRING_TYPE", Multiplicity.OPTIONAL, false, null),
new AttributeDefinition("DB_LOCATION_URI", "STRING_TYPE", Multiplicity.REQUIRED, false, null),
new AttributeDefinition("NAME", "STRING_TYPE", Multiplicity.REQUIRED, false, null),
new AttributeDefinition("OWNER_TYPE", "STRING_TYPE", Multiplicity.OPTIONAL, false, null),
new AttributeDefinition("OWNER_NAME", "STRING_TYPE", Multiplicity.OPTIONAL, false, null)
}
);
HierarchicalTypeDefinition<ClassType> tableClassTypeDef = new HierarchicalTypeDefinition<ClassType>("ClassType",TB_CLASS_TYPE, null,
new AttributeDefinition[]{
new AttributeDefinition("CREATE_TIME", "LONG_TYPE", Multiplicity.REQUIRED, false, null),
new AttributeDefinition("LAST_ACCESS_TIME", "LONG_TYPE", Multiplicity.REQUIRED, false, null),
new AttributeDefinition("OWNER", "STRING_TYPE", Multiplicity.REQUIRED, false, null),
new AttributeDefinition("TBL_NAME", "STRING_TYPE", Multiplicity.REQUIRED, false, null),
new AttributeDefinition("TBL_TYPE", "STRING_TYPE", Multiplicity.REQUIRED, false, null),
new AttributeDefinition("VIEW_EXPANDED_TEXT", "STRING_TYPE", Multiplicity.OPTIONAL, false, null),
new AttributeDefinition("VIEW_ORIGINAL_TEXT", "STRING_TYPE", Multiplicity.OPTIONAL, false, null)
}
);
HierarchicalTypeDefinition<ClassType> columnClassTypeDef = new HierarchicalTypeDefinition<ClassType>("ClassType",FD_CLASS_TYPE, null,
new AttributeDefinition[]{
new AttributeDefinition("COMMENT", "STRING_TYPE", Multiplicity.OPTIONAL, false, null),
new AttributeDefinition("COLUMN_NAME", "STRING_TYPE", Multiplicity.REQUIRED, false, null),
new AttributeDefinition("TYPE_NAME", "STRING_TYPE", Multiplicity.REQUIRED, false, null)
}
);
}catch(ClassNotFoundException e){
e.printStackTrace();
}
for (HierarchicalTypeDefinition htd : al){
try {
ts.defineClassType(htd);
} catch (MetadataException e) {
System.out.println(htd.hierarchicalMetaTypeName + "could not be added to the type system");
e.printStackTrace();
}
}
return false;
}
}
package org.apache.hadoop.metadata.bridge.hivestructure;
import java.util.ArrayList;
import org.apache.hadoop.metadata.MetadataException;
import org.apache.hadoop.metadata.bridge.Bridge;
import org.apache.hadoop.metadata.bridge.BridgeAssistant;
import org.apache.hadoop.metadata.types.AttributeDefinition;
import org.apache.hadoop.metadata.types.ClassType;
import org.apache.hadoop.metadata.types.HierarchicalTypeDefinition;
import org.apache.hadoop.metadata.types.Multiplicity;
import org.apache.hadoop.metadata.types.TypeSystem;
public class HiveStructureBridge extends BridgeAssistant implements Bridge{
static final String DB_CLASS_TYPE = "HiveDatabase";
static final String TB_CLASS_TYPE = "HiveTable";
static final String FD_CLASS_TYPE = "HiveField";
@Override
public boolean defineBridgeTypes(TypeSystem ts) {
ArrayList<HierarchicalTypeDefinition<?>> al = new ArrayList<HierarchicalTypeDefinition<?>>();
try{
HierarchicalTypeDefinition<ClassType> databaseClassTypeDef = new HierarchicalTypeDefinition<ClassType>("ClassType",DB_CLASS_TYPE, null,
new AttributeDefinition[]{
new AttributeDefinition("DESC", "STRING_TYPE", Multiplicity.OPTIONAL, false, null),
new AttributeDefinition("DB_LOCATION_URI", "STRING_TYPE", Multiplicity.REQUIRED, false, null),
new AttributeDefinition("NAME", "STRING_TYPE", Multiplicity.REQUIRED, false, null),
new AttributeDefinition("OWNER_TYPE", "STRING_TYPE", Multiplicity.OPTIONAL, false, null),
new AttributeDefinition("OWNER_NAME", "STRING_TYPE", Multiplicity.OPTIONAL, false, null)
}
);
HierarchicalTypeDefinition<ClassType> tableClassTypeDef = new HierarchicalTypeDefinition<ClassType>("ClassType",TB_CLASS_TYPE, null,
new AttributeDefinition[]{
new AttributeDefinition("CREATE_TIME", "LONG_TYPE", Multiplicity.REQUIRED, false, null),
new AttributeDefinition("LAST_ACCESS_TIME", "LONG_TYPE", Multiplicity.REQUIRED, false, null),
new AttributeDefinition("OWNER", "STRING_TYPE", Multiplicity.REQUIRED, false, null),
new AttributeDefinition("TBL_NAME", "STRING_TYPE", Multiplicity.REQUIRED, false, null),
new AttributeDefinition("TBL_TYPE", "STRING_TYPE", Multiplicity.REQUIRED, false, null),
new AttributeDefinition("VIEW_EXPANDED_TEXT", "STRING_TYPE", Multiplicity.OPTIONAL, false, null),
new AttributeDefinition("VIEW_ORIGINAL_TEXT", "STRING_TYPE", Multiplicity.OPTIONAL, false, null)
}
);
HierarchicalTypeDefinition<ClassType> columnClassTypeDef = new HierarchicalTypeDefinition<ClassType>("ClassType",FD_CLASS_TYPE, null,
new AttributeDefinition[]{
new AttributeDefinition("COMMENT", "STRING_TYPE", Multiplicity.OPTIONAL, false, null),
new AttributeDefinition("COLUMN_NAME", "STRING_TYPE", Multiplicity.REQUIRED, false, null),
new AttributeDefinition("TYPE_NAME", "STRING_TYPE", Multiplicity.REQUIRED, false, null)
}
);
}catch(ClassNotFoundException e){
e.printStackTrace();
}
for (HierarchicalTypeDefinition htd : al){
try {
ts.defineClassType(htd);
} catch (MetadataException e) {
System.out.println(htd.hierarchicalMetaTypeName + "could not be added to the type system");
e.printStackTrace();
}
}
return false;
}
}
......@@ -27,13 +27,6 @@ import java.io.IOException;
public interface Service extends Closeable {
/**
* Name of the service.
*
* @return name of the service
*/
String getName();
/**
* Starts the service. This method blocks until the service has completely started.
*
* @throws Exception
......
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.metadata.service;
import org.apache.commons.configuration.ConfigurationException;
import org.apache.commons.configuration.PropertiesConfiguration;
import org.apache.hadoop.metadata.MetadataException;
import org.apache.hadoop.metadata.util.ReflectionUtils;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
/**
* Initializer that uses at startup to bring up all the Metadata startup
* services.
*/
public class ServiceInitializer {
private static final Logger LOG = LoggerFactory
.getLogger(ServiceInitializer.class);
private final Services services = Services.get();
// default property file name/path
private static final String DEFAULT_CONFIG_PATH = "application.properties";
// system property referenced by this class to extract user-overriden
// properties file
public static final String PROPERTIES_SYS_PROP = "metadata.properties";
// Path to the properties file (must be on the classpath for
// PropertiesConfiguration to work)
private final String propertyPath;
/**
* Default constructor. Use the metadata.properties System property to
* determine the property file name.
*/
public ServiceInitializer() {
propertyPath = System.getProperty(PROPERTIES_SYS_PROP,
DEFAULT_CONFIG_PATH);
}
/**
* Create a ServiceInitializer, specifying the properties file filename
* explicitly
*
* @param propPath
* the filename of the properties file with the service
* intializer information
*/
public ServiceInitializer(String propPath) {
propertyPath = propPath;
}
/**
* Get the configuration properties for the ServiceInitializer
*
* @return
* @throws ConfigurationException
*/
public PropertiesConfiguration getConfiguration()
throws ConfigurationException {
return new PropertiesConfiguration(propertyPath);
}
/**
* Initialize the services specified by the application.services property
*
* @throws MetadataException
*/
public void initialize() throws MetadataException {
/*
* TODO - determine whether this service model is the right model;
* Inter-service dependencies can wreak havoc using the current model
*/
String[] serviceClassNames;
LOG.info("Loading services using properties file: {}", propertyPath);
try {
PropertiesConfiguration configuration = getConfiguration();
serviceClassNames = configuration
.getStringArray("application.services");
} catch (ConfigurationException e) {
throw new RuntimeException("unable to get server properties");
}
for (String serviceClassName : serviceClassNames) {
serviceClassName = serviceClassName.trim();
if (serviceClassName.isEmpty()) {
continue;
}
Service service = ReflectionUtils
.getInstanceByClassName(serviceClassName);
services.register(service);
LOG.info("Initializing service: {}", serviceClassName);
try {
service.start();
} catch (Throwable t) {
LOG.error("Failed to initialize service {}", serviceClassName,
t);
throw new MetadataException(t);
}
LOG.info("Service initialized: {}", serviceClassName);
}
}
public void destroy() throws MetadataException {
for (Service service : services) {
LOG.info("Destroying service: {}", service.getClass().getName());
try {
service.stop();
} catch (Throwable t) {
LOG.error("Failed to destroy service {}", service.getClass()
.getName(), t);
throw new MetadataException(t);
}
LOG.info("Service destroyed: {}", service.getClass().getName());
}
}
}
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.metadata.service;
import org.apache.commons.configuration.ConfigurationException;
import org.apache.commons.configuration.PropertiesConfiguration;
import org.apache.hadoop.metadata.MetadataException;
import org.apache.hadoop.metadata.util.ReflectionUtils;
import java.util.Iterator;
import java.util.LinkedHashMap;
import java.util.Map;
import java.util.NoSuchElementException;
/**
* Repository of services initialized at startup.
*/
public final class Services implements Iterable<Service> {
private static final Services INSTANCE = new Services();
private Services() {
}
public static Services get() {
return INSTANCE;
}
private final Map<String, Service> services =
new LinkedHashMap<String, Service>();
public synchronized void register(Service service) throws MetadataException {
if (services.containsKey(service.getName())) {
throw new MetadataException("Service " + service.getName() + " already registered");
} else {
services.put(service.getName(), service);
}
}
@SuppressWarnings("unchecked")
public <T extends Service> T getService(String serviceName) {
if (services.containsKey(serviceName)) {
return (T) services.get(serviceName);
} else {
throw new NoSuchElementException(
"Service " + serviceName + " not registered with registry");
}
}
public boolean isRegistered(String serviceName) {
return services.containsKey(serviceName);
}
@Override
public Iterator<Service> iterator() {
return services.values().iterator();
}
public Service init(String serviceName) throws MetadataException {
if (isRegistered(serviceName)) {
throw new MetadataException("Service is already initialized " + serviceName);
}
String serviceClassName;
try {
PropertiesConfiguration configuration =
new PropertiesConfiguration("application.properties");
serviceClassName = configuration.getString(serviceName + ".impl");
} catch (ConfigurationException e) {
throw new MetadataException("unable to get server properties");
}
Service service = ReflectionUtils.getInstanceByClassName(serviceClassName);
register(service);
return service;
}
public void reset() {
services.clear();
}
}
package org.apache.hadoop.metadata.service;
import org.testng.Assert;
import org.testng.annotations.AfterClass;
import org.testng.annotations.BeforeClass;
import org.testng.annotations.Test;
/**
* Unit test for the Service Initializer.
*
* Test functionality to allow loading of different property files.
*/
public class ServiceInitializerTest {
private final String propertiesFileName = "test.application.properties";
private ServiceInitializer sinit;
@BeforeClass
public void setUp() throws Exception {
// setup for the test properties file
System.setProperty(ServiceInitializer.PROPERTIES_SYS_PROP,
propertiesFileName);
sinit = new ServiceInitializer();
}
@AfterClass
public void tearDown() throws Exception {
// test destruction of the Services - no exceptions is assumed a success
sinit.destroy();
}
@Test
public void testPropsAreSet() throws Exception {
Assert.assertEquals(
sinit.getConfiguration().getString(
"application.services"),
TestService.NAME);
}
@Test
public void testInitialize() throws Exception {
// test the initialization of the initializer
// no exceptions is assumed a success
sinit.initialize();
}
}
package org.apache.hadoop.metadata.service;
import java.io.IOException;
public class TestService implements Service {
public static final String NAME = TestService.class.getName();
@Override
public String getName() {
return NAME;
}
@Override
public void start() throws Exception {
}
@Override
public void stop() {
}
@Override
public void close() throws IOException {
}
}
application.services=org.apache.hadoop.metadata.service.TestService
\ No newline at end of file
package com.aetna.hadoop.dgc.hive;
import java.io.Serializable;
import java.util.List;
import java.util.ArrayList;
public class HiveLineageBean implements Serializable {
/**
*
*/
private static final long serialVersionUID = 1L;
public String queryId;
public String hiveId;
public String user;
public String queryStartTime;
public String queryEndTime;
public String query;
public String tableName;
public String tableLocation;
public boolean success;
public boolean failed;
public String executionEngine;
ArrayList<SourceTables> sourceTables;
ArrayList<QueryColumns> queryColumns;
ArrayList<WhereClause> whereClause;
ArrayList<CreateColumns> createColumns;
ArrayList<GroupBy> groupBy;
ArrayList<GroupBy> orderBy;
public String getQueryId() {
return this.queryId ;
}
public void setQueryId(String queryId) {
this.queryId = queryId;
}
public String getExecutionEngine() {
return this.executionEngine ;
}
public void setExecutionEngine(String executionEngine) {
this.executionEngine = executionEngine;
}
public String getHiveId() {
return this.hiveId ;
}
public void setHiveId(String hiveId) {
this.hiveId = hiveId;
}
public boolean getSuccess() {
return this.success ;
}
public void setSuccess(boolean success) {
this.success = success;
}
public boolean getFailed() {
return this.failed ;
}
public void setFailed(boolean failed) {
this.failed = failed;
}
public String getTableName() {
return this.tableName ;
}
public void setTableName(String tableName) {
this.tableName = tableName;
}
public String getTableLocation() {
return this.tableLocation ;
}
public void setTableLocation(String tableLocation) {
this.tableLocation = tableLocation;
}
public String getUser() {
return this.user ;
}
public void setUser(String user) {
this.user = user;
}
public String getQueryStartTime() {
return this.queryStartTime ;
}
public void setQueryStartTime(String queryStartTime) {
this.queryStartTime = queryStartTime;
}
public String getQueryEndTime() {
return this.queryEndTime ;
}
public void setQueryEndTime(String queryEndTime) {
this.queryEndTime = queryEndTime;
}
public String getQuery() {
return this.query ;
}
public void setQuery(String query) {
this.query = query;
}
public ArrayList<SourceTables> getSourceTables() {
return this.sourceTables ;
}
public void setSourceTables(ArrayList<SourceTables> sourceTables) {
this.sourceTables = sourceTables;
}
public ArrayList<QueryColumns> getQueryColumns() {
return this.queryColumns ;
}
public void setQueryColumns(ArrayList<QueryColumns> queryColumns) {
this.queryColumns = queryColumns;
}
public ArrayList<WhereClause> getWhereClause() {
return this.whereClause ;
}
public void setWhereClause(ArrayList<WhereClause> whereClause) {
this.whereClause = whereClause;
}
public ArrayList<GroupBy> getGroupBy() {
return this.groupBy ;
}
public void setGroupBy(ArrayList<GroupBy> groupBy) {
this.groupBy = groupBy;
}
public class SourceTables {
public String tableName;
public String tableAlias;
public String databaseName;
public String getTableName() {
return this.tableName ;
}
public void setTableName(String tableName) {
this.tableName = tableName;
}
public String getTableAlias() {
return this.tableAlias ;
}
public void setTableAlias(String tableAlias) {
this.tableAlias = tableAlias;
}
public String getDatabaseName() {
return this.databaseName ;
}
public void setDatabaseName(String databaseName) {
this.databaseName = databaseName;
}
}
public class QueryColumns {
public String tbAliasOrName;
public String columnName;
public String columnAlias;
public String columnFunction;
public String getTbAliasOrName() {
return this.tbAliasOrName ;
}
public void setTbAliasOrName(String tbAliasOrName) {
this.tbAliasOrName = tbAliasOrName;
}
public String getColumnName() {
return this.columnName ;
}
public void setColumnName(String columnName) {
this.columnName = columnName;
}
public String getColumnAlias() {
return this.columnAlias ;
}
public void setColumnAlias(String columnAlias) {
this.columnAlias = columnAlias;
}
public String getColumnFunction() {
return this.columnFunction ;
}
public void setColumnFunction(String columnFunction) {
this.columnFunction = columnFunction;
}
}
public class GroupBy {
public String tbAliasOrName;
public String columnName;
public String getTbAliasOrName() {
return this.tbAliasOrName ;
}
public void setTbAliasOrName(String tbAliasOrName) {
this.tbAliasOrName = tbAliasOrName;
}
public String getColumnName() {
return this.columnName ;
}
public void setColumnName(String columnName) {
this.columnName = columnName;
}
}
public class WhereClause {
public String tbAliasOrName;
public String columnCondition;
public String columnName;
public String columnOperator;
public String columnValue;
public String getColumnCondition() {
return this.columnCondition ;
}
public void setColumnCondition(String columnCondition) {
this.columnCondition = columnCondition;
}
public String getTbAliasOrName() {
return this.tbAliasOrName ;
}
public void setTbAliasOrName(String tbAliasOrName) {
this.tbAliasOrName = tbAliasOrName;
}
public String getColumnName() {
return this.columnName ;
}
public void setColumnName(String columnName) {
this.columnName = columnName;
}
public String getColumnOperator() {
return this.columnOperator ;
}
public void setColumnOperator(String columnOperator) {
this.columnOperator = columnOperator;
}
public String getColumnValue() {
return this.columnValue ;
}
public void setColumnValue(String columnValue) {
this.columnValue = columnValue;
}
}
public ArrayList<CreateColumns> getCreateColumns() {
return this.createColumns ;
}
public void setCreateColumns(ArrayList<CreateColumns> createColumns) {
this.createColumns = createColumns;
}
public class CreateColumns {
public String columnName;
public String columnType;
public String getColumnName() {
return this.columnName ;
}
public void setColumnName(String columnName) {
this.columnName = columnName;
}
public String getColumnType() {
return this.columnType ;
}
public void setColumnType(String columnType) {
this.columnType = columnType;
}
}
}
package com.aetna.hadoop.dgc.hive;
import java.io.Serializable;
import java.util.List;
import java.util.ArrayList;
public class HiveLineageBean implements Serializable {
/**
*
*/
private static final long serialVersionUID = 1L;
public String queryId;
public String hiveId;
public String user;
public String queryStartTime;
public String queryEndTime;
public String query;
public String tableName;
public String tableLocation;
public boolean success;
public boolean failed;
public String executionEngine;
ArrayList<SourceTables> sourceTables;
ArrayList<QueryColumns> queryColumns;
ArrayList<WhereClause> whereClause;
ArrayList<CreateColumns> createColumns;
ArrayList<GroupBy> groupBy;
ArrayList<GroupBy> orderBy;
public String getQueryId() {
return this.queryId ;
}
public void setQueryId(String queryId) {
this.queryId = queryId;
}
public String getExecutionEngine() {
return this.executionEngine ;
}
public void setExecutionEngine(String executionEngine) {
this.executionEngine = executionEngine;
}
public String getHiveId() {
return this.hiveId ;
}
public void setHiveId(String hiveId) {
this.hiveId = hiveId;
}
public boolean getSuccess() {
return this.success ;
}
public void setSuccess(boolean success) {
this.success = success;
}
public boolean getFailed() {
return this.failed ;
}
public void setFailed(boolean failed) {
this.failed = failed;
}
public String getTableName() {
return this.tableName ;
}
public void setTableName(String tableName) {
this.tableName = tableName;
}
public String getTableLocation() {
return this.tableLocation ;
}
public void setTableLocation(String tableLocation) {
this.tableLocation = tableLocation;
}
public String getUser() {
return this.user ;
}
public void setUser(String user) {
this.user = user;
}
public String getQueryStartTime() {
return this.queryStartTime ;
}
public void setQueryStartTime(String queryStartTime) {
this.queryStartTime = queryStartTime;
}
public String getQueryEndTime() {
return this.queryEndTime ;
}
public void setQueryEndTime(String queryEndTime) {
this.queryEndTime = queryEndTime;
}
public String getQuery() {
return this.query ;
}
public void setQuery(String query) {
this.query = query;
}
public ArrayList<SourceTables> getSourceTables() {
return this.sourceTables ;
}
public void setSourceTables(ArrayList<SourceTables> sourceTables) {
this.sourceTables = sourceTables;
}
public ArrayList<QueryColumns> getQueryColumns() {
return this.queryColumns ;
}
public void setQueryColumns(ArrayList<QueryColumns> queryColumns) {
this.queryColumns = queryColumns;
}
public ArrayList<WhereClause> getWhereClause() {
return this.whereClause ;
}
public void setWhereClause(ArrayList<WhereClause> whereClause) {
this.whereClause = whereClause;
}
public ArrayList<GroupBy> getGroupBy() {
return this.groupBy ;
}
public void setGroupBy(ArrayList<GroupBy> groupBy) {
this.groupBy = groupBy;
}
public class SourceTables {
public String tableName;
public String tableAlias;
public String databaseName;
public String getTableName() {
return this.tableName ;
}
public void setTableName(String tableName) {
this.tableName = tableName;
}
public String getTableAlias() {
return this.tableAlias ;
}
public void setTableAlias(String tableAlias) {
this.tableAlias = tableAlias;
}
public String getDatabaseName() {
return this.databaseName ;
}
public void setDatabaseName(String databaseName) {
this.databaseName = databaseName;
}
}
public class QueryColumns {
public String tbAliasOrName;
public String columnName;
public String columnAlias;
public String columnFunction;
public String getTbAliasOrName() {
return this.tbAliasOrName ;
}
public void setTbAliasOrName(String tbAliasOrName) {
this.tbAliasOrName = tbAliasOrName;
}
public String getColumnName() {
return this.columnName ;
}
public void setColumnName(String columnName) {
this.columnName = columnName;
}
public String getColumnAlias() {
return this.columnAlias ;
}
public void setColumnAlias(String columnAlias) {
this.columnAlias = columnAlias;
}
public String getColumnFunction() {
return this.columnFunction ;
}
public void setColumnFunction(String columnFunction) {
this.columnFunction = columnFunction;
}
}
public class GroupBy {
public String tbAliasOrName;
public String columnName;
public String getTbAliasOrName() {
return this.tbAliasOrName ;
}
public void setTbAliasOrName(String tbAliasOrName) {
this.tbAliasOrName = tbAliasOrName;
}
public String getColumnName() {
return this.columnName ;
}
public void setColumnName(String columnName) {
this.columnName = columnName;
}
}
public class WhereClause {
public String tbAliasOrName;
public String columnCondition;
public String columnName;
public String columnOperator;
public String columnValue;
public String getColumnCondition() {
return this.columnCondition ;
}
public void setColumnCondition(String columnCondition) {
this.columnCondition = columnCondition;
}
public String getTbAliasOrName() {
return this.tbAliasOrName ;
}
public void setTbAliasOrName(String tbAliasOrName) {
this.tbAliasOrName = tbAliasOrName;
}
public String getColumnName() {
return this.columnName ;
}
public void setColumnName(String columnName) {
this.columnName = columnName;
}
public String getColumnOperator() {
return this.columnOperator ;
}
public void setColumnOperator(String columnOperator) {
this.columnOperator = columnOperator;
}
public String getColumnValue() {
return this.columnValue ;
}
public void setColumnValue(String columnValue) {
this.columnValue = columnValue;
}
}
public ArrayList<CreateColumns> getCreateColumns() {
return this.createColumns ;
}
public void setCreateColumns(ArrayList<CreateColumns> createColumns) {
this.createColumns = createColumns;
}
public class CreateColumns {
public String columnName;
public String columnType;
public String getColumnName() {
return this.columnName ;
}
public void setColumnName(String columnName) {
this.columnName = columnName;
}
public String getColumnType() {
return this.columnType ;
}
public void setColumnType(String columnType) {
this.columnType = columnType;
}
}
}
package com.aetna.hadoop.dgc.hive;
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
import java.io.IOException;
import java.util.ArrayList;
import java.util.LinkedHashMap;
import java.util.Map;
import java.util.Stack;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.hive.ql.lib.DefaultGraphWalker;
import org.apache.hadoop.hive.ql.lib.DefaultRuleDispatcher;
import org.apache.hadoop.hive.ql.lib.Dispatcher;
import org.apache.hadoop.hive.ql.lib.GraphWalker;
import org.apache.hadoop.hive.ql.lib.Node;
import org.apache.hadoop.hive.ql.lib.NodeProcessor;
import org.apache.hadoop.hive.ql.lib.NodeProcessorCtx;
import org.apache.hadoop.hive.ql.lib.Rule;
import org.apache.hadoop.hive.ql.parse.ASTNode;
import org.apache.hadoop.hive.ql.parse.HiveParser;
import org.apache.hadoop.hive.ql.parse.ParseDriver;
import org.apache.hadoop.hive.ql.parse.ParseException;
import org.apache.hadoop.hive.ql.parse.SemanticException;
import org.apache.log4j.ConsoleAppender;
import org.apache.log4j.Level;
import org.apache.log4j.LogManager;
import org.apache.log4j.Logger;
import org.apache.log4j.PatternLayout;
import com.aetna.hadoop.dgc.hive.HiveLineageBean.CreateColumns;
import com.aetna.hadoop.dgc.hive.HiveLineageBean.GroupBy;
import com.aetna.hadoop.dgc.hive.HiveLineageBean.QueryColumns;
import com.aetna.hadoop.dgc.hive.HiveLineageBean.SourceTables;
import com.aetna.hadoop.dgc.hive.HiveLineageBean.WhereClause;
import com.google.gson.Gson;
/**
*
* This class prints out the lineage info. It takes sql as input and prints
* lineage info. Currently this prints only input and output tables for a given
* sql. Later we can expand to add join tables etc.
*
*/
public class HiveLineageInfo implements NodeProcessor {
private final Log LOG = LogFactory.getLog(HiveLineageInfo.class.getName());
public Map<Integer, String> queryMap;
public Integer counter = 0;
public HiveLineageBean hlb = new HiveLineageBean();;
public ArrayList<SourceTables> sourceTables;
public ArrayList<QueryColumns> queryColumns;
public ArrayList<GroupBy> groupBy;
public ArrayList<WhereClause> whereClause;
public ArrayList<CreateColumns> createColumns;
/**
* @return Custom HiveLineageBean data to be passed to GSON parsert
*/
public HiveLineageBean getHLBean() {
return hlb;
}
/**
* Implements the process method for the NodeProcessor interface.
*/
public Object process(Node nd, Stack<Node> stack, NodeProcessorCtx procCtx, Object... nodeOutputs) throws SemanticException {
ASTNode pt = (ASTNode) nd;
/*
* Check the 1st-level children and do simple semantic checks: 1) CTLT and
* CTAS should not coexists. 2) CTLT or CTAS should not coexists with column
* list (target table schema). 3) CTAS does not support partitioning (for
* now).
*/
switch (pt.getToken().getType()) {
case HiveParser.TOK_TABREF:
sourceTables = new ArrayList<SourceTables>();
LOG.debug("From Table Dump: "+pt.dump());
fromTableDump(pt);
this.hlb.setSourceTables(sourceTables);
break;
case HiveParser.TOK_SELECT:
queryColumns = new ArrayList<QueryColumns>();
LOG.debug("Column Dump: "+pt.dump());
columnTableDump(pt);
this.hlb.setQueryColumns(queryColumns);
break;
case HiveParser.TOK_WHERE:
whereClause = new ArrayList<WhereClause>();
LOG.debug("WHERE CLAUSE DUMP: "+pt.dump());
whereDump(pt);
this.hlb.setWhereClause(whereClause);
break;
case HiveParser.TOK_GROUPBY:
groupBy = new ArrayList<GroupBy>();
LOG.debug("GROUPBY CLAUSE DUMP: "+pt.dump());
groupByDump(pt);
this.hlb.setGroupBy(groupBy);
break;
case HiveParser.TOK_CREATETABLE:
createColumns = new ArrayList<CreateColumns>();
LOG.debug("CREATABLE DUMP: "+pt.dump());
createTableDump(pt);
break;
}
return null;
}
/**
* Walks the whereTree called by processWalker
*/
public void whereDump(ASTNode nodeIn) {
counter = 0;
wdump(nodeIn);
}
/**
* Walks the Where Tree called by whereDump
*/
private void wdump(ASTNode nodeIn) {
boolean parseChild = true;
if (nodeIn.getType() == HiveParser.TOK_TABLE_OR_COL) {
WhereClause whreClse = hlb.new WhereClause();
if (nodeIn.getParent().getText().equalsIgnoreCase(".")) {
ASTNode checkOrAnd = (ASTNode) nodeIn.getParent().getParent().getChild(1).getParent().getParent();
if (checkOrAnd.getType() == HiveParser.KW_AND || checkOrAnd.getType() == HiveParser.KW_OR) {
LOG.info("WHERE:: "+checkOrAnd.getText());
whreClse.setColumnOperator(checkOrAnd.getText());
}
LOG.info("Table Alias:: "+nodeIn.getChild(0).getText());
whreClse.setTbAliasOrName(nodeIn.getChild(0).getText());
LOG.info("Delimiter:: "+nodeIn.getParent().getText());
LOG.info("Column:: "+nodeIn.getParent().getChild(1).getText());
whreClse.setColumnName(nodeIn.getParent().getChild(1).getText());
LOG.info("Column Qualifer:: "+nodeIn.getParent().getParent().getChild(1).getParent().getText());
whreClse.setColumnOperator(nodeIn.getParent().getParent().getChild(1).getParent().getText());
LOG.info("Column Value:: "+nodeIn.getParent().getParent().getChild(1).getText());
whreClse.setColumnValue(nodeIn.getParent().getParent().getChild(1).getText());
} else {
ASTNode checkOrAnd = (ASTNode) nodeIn.getParent().getParent().getChild(1).getParent();
if (checkOrAnd.getType() == HiveParser.KW_AND || checkOrAnd.getType() == HiveParser.KW_OR) {
LOG.info("WHERE:: "+checkOrAnd.getText());
whreClse.setColumnOperator(checkOrAnd.getText());
}
LOG.info("Column:: = "+nodeIn.getChild(0).getText());
whreClse.setColumnName(nodeIn.getChild(0).getText());
//LOG.info("Delimiter "+nodeIn.getParent().getText());
LOG.info("Column Qualifer:: "+nodeIn.getParent().getChild(1).getParent().getText());
whreClse.setColumnOperator(nodeIn.getParent().getChild(1).getParent().getText());
LOG.info("Column Value:: "+nodeIn.getParent().getChild(1).getText());
whreClse.setColumnValue(nodeIn.getParent().getChild(1).getText());
}
whereClause.add(whreClse);
}
if (parseChild) {
int childCount = nodeIn.getChildCount();
if (childCount != 0 ){
for (int numr = 0; numr < childCount; numr++) {
wdump((ASTNode)nodeIn.getChild(numr));
}
}
}
}
/**
* Walks the GroupByTree called by processWalker
*/
public void groupByDump(ASTNode nodeIn) {
counter = 0;
gdump(nodeIn);
}
/**
* Walks the GroupBy Tree called by groupByDump
*/
private void gdump(ASTNode nodeIn) {
boolean parseChild = true;
if (nodeIn.getType() == HiveParser.TOK_TABLE_OR_COL) {
GroupBy grpBy = hlb.new GroupBy();
ASTNode parentNode = (ASTNode) nodeIn.getParent();
if (parentNode.getText().equalsIgnoreCase(".")) {
LOG.info("GroupBy TableAlias: "+nodeIn.getChild(0).getText());
grpBy.setTbAliasOrName(nodeIn.getChild(0).getText());
LOG.info("GroupBy Column:: "+parentNode.getChild(1).getText());
grpBy.setColumnName(parentNode.getChild(1).getText());
} else {
LOG.info("GroupBy Column: "+nodeIn.getChild(0).getText());
grpBy.setColumnName(nodeIn.getChild(0).getText());
}
groupBy.add(grpBy);
}
if (parseChild) {
int childCount = nodeIn.getChildCount();
if (childCount != 0 ){
for (int numr = 0; numr < childCount; numr++) {
gdump((ASTNode)nodeIn.getChild(numr));
}
}
}
}
/**
* Walks the CreateTable Tree called by processWalker
*/
public void createTableDump(ASTNode nodeIn) {
counter = 0;
if (nodeIn.getFirstChildWithType(HiveParser.TOK_TABNAME) != null) {
LOG.info("Create TableName:: "+nodeIn.getFirstChildWithType(HiveParser.TOK_TABNAME).getText());
hlb.setTableName(nodeIn.getFirstChildWithType(HiveParser.TOK_TABNAME).getChild(0).getText());
}
if (nodeIn.getFirstChildWithType(HiveParser.TOK_TABLELOCATION) != null) {
LOG.info("Create Table Location:: "+nodeIn.getFirstChildWithType(HiveParser.TOK_TABLELOCATION).getText());
hlb.setTableLocation(nodeIn.getFirstChildWithType(HiveParser.TOK_TABLELOCATION).getChild(0).getText());
}
if (nodeIn.getFirstChildWithType(HiveParser.TOK_TABCOLLIST) != null ) {
ctdump((ASTNode)nodeIn.getFirstChildWithType(HiveParser.TOK_TABCOLLIST).getParent());
hlb.setCreateColumns(createColumns);
}
}
/**
* Walks the CreateTable Tree called by createTableDump
*/
private void ctdump(ASTNode nodeIn) {
boolean parseChild = true;
if (nodeIn.getType() == HiveParser.TOK_TABCOL) {
CreateColumns crtClmns = hlb.new CreateColumns();
LOG.info("Create Column Name:: "+nodeIn.getChild(0).getText());
crtClmns.setColumnName(nodeIn.getChild(0).getText());
LOG.info("Create Column Type:: "+nodeIn.getChild(1).getText());
crtClmns.setColumnType(nodeIn.getChild(1).getText());
createColumns.add(crtClmns);
}
if (parseChild) {
int childCount = nodeIn.getChildCount();
if (childCount != 0 ){
for (int numr = 0; numr < childCount; numr++) {
ctdump((ASTNode)nodeIn.getChild(numr));
}
}
}
}
/**
* Walks the fromTable Tree called by processWalker
*/
public void fromTableDump(ASTNode nodeIn) {
counter = 0;
ftdump(nodeIn);
}
/**
* Walks the fromTable Tree called by fromTableDump
*/
private void ftdump(ASTNode nodeIn) {
boolean parseChild = true;
if (nodeIn.getType() == HiveParser.TOK_TABNAME && nodeIn.getParent().getType() == HiveParser.TOK_TABREF) {
SourceTables hlbSbls = hlb.new SourceTables();
if (nodeIn.getChildCount() == 2) {
LOG.info("From DBName:: "+nodeIn.getChild(0).getText());
hlbSbls.setDatabaseName(nodeIn.getChild(0).getText());
LOG.info("From TableName:: "+nodeIn.getChild(1).getText());
hlbSbls.setTableName(nodeIn.getChild(1).getText());
} else {
LOG.info("From TableName:: "+nodeIn.getChild(0).getText());
hlbSbls.setTableName(nodeIn.getChild(0).getText());
}
if (nodeIn.getType() == HiveParser.TOK_TABNAME && nodeIn.getParent().getChild(1) != null) {
LOG.info("From DB/Table Alias:: "+nodeIn.getParent().getChild(1).getText());
hlbSbls.setTableAlias(nodeIn.getParent().getChild(1).getText());
}
sourceTables.add(hlbSbls);
}
if (parseChild) {
int childCount = nodeIn.getChildCount();
if (childCount != 0 ){
for (int numr = 0; numr < childCount; numr++) {
ftdump((ASTNode)nodeIn.getChild(numr));
}
}
}
}
/**
* Walks the column Tree called by processWalker
*/
public void columnTableDump(ASTNode nodeIn) {
counter = 0;
clmnTdump(nodeIn);
}
/**
* Walks the columnDump Tree called by columnTableDump
*/
private void clmnTdump(ASTNode nodeIn) {
boolean parseChild = true;
if (nodeIn.getType() == HiveParser.TOK_TABLE_OR_COL && nodeIn.getAncestor(HiveParser.TOK_SELEXPR) != null ) {
QueryColumns qclmns = hlb.new QueryColumns();
if (nodeIn.getAncestor(HiveParser.TOK_FUNCTION) != null && nodeIn.getAncestor(HiveParser.TOK_SELEXPR) != null) {
LOG.info("Function Query:: "+nodeIn.getAncestor(HiveParser.TOK_FUNCTION).getChild(0).getText());
qclmns.setColumnFunction(nodeIn.getAncestor(HiveParser.TOK_FUNCTION).getChild(0).getText());
}
if (nodeIn.getParent().getText().equalsIgnoreCase(".")) {
LOG.info("Table Name/Alias:: "+nodeIn.getChild(0).getText());
qclmns.setTbAliasOrName(nodeIn.getChild(0).getText());
LOG.info("Column:: "+nodeIn.getParent().getChild(1).getText());
qclmns.setColumnName(nodeIn.getParent().getChild(1).getText());
if (nodeIn.getAncestor(HiveParser.TOK_SELEXPR).getChild(1) != null) {
LOG.info("Column Alias:: "+nodeIn.getAncestor(HiveParser.TOK_SELEXPR).getChild(1).getText());
qclmns.setColumnAlias(nodeIn.getAncestor(HiveParser.TOK_SELEXPR).getChild(1).getText());
}
} else {
LOG.info("Column:: "+nodeIn.getChild(0).getText());
qclmns.setColumnName(nodeIn.getChild(0).getText());
if (nodeIn.getParent().getChild(1) != null) {
LOG.info("Column Alias:: "+nodeIn.getParent().getChild(1).getText());
qclmns.setColumnAlias(nodeIn.getParent().getChild(1).getText());
}
}
if (qclmns.getColumnName() != null) {
queryColumns.add(qclmns);
}
}
if (parseChild) {
int childCount = nodeIn.getChildCount();
if (childCount != 0 ){
for (int numr = 0; numr < childCount; numr++) {
clmnTdump((ASTNode)nodeIn.getChild(numr));
}
}
}
}
/**
* parses given query and gets the lineage info.
*
* @param query
* @throws ParseException
*/
public void getLineageInfo(String query) throws ParseException,
SemanticException {
/*
* Get the AST tree
*/
ParseDriver pd = new ParseDriver();
ASTNode tree = pd.parse(query);
LOG.info("DUMP TREE: "+tree.dump());
while ((tree.getToken() == null) && (tree.getChildCount() > 0)) {
tree = (ASTNode) tree.getChild(0);
}
/*
* initialize Event Processor and dispatcher.
*/
// create a walker which walks the tree in a DFS manner while maintaining
// the operator stack. The dispatcher
// generates the plan from the operator tree
Map<Rule, NodeProcessor> rules = new LinkedHashMap<Rule, NodeProcessor>();
// The dispatcher fires the processor corresponding to the closest matching
// rule and passes the context along
Dispatcher disp = new DefaultRuleDispatcher(this, rules, null);
GraphWalker ogw = new DefaultGraphWalker(disp);
// Create a list of topop nodes
ArrayList<Node> topNodes = new ArrayList<Node>();
topNodes.add(tree);
ogw.startWalking(topNodes, null);
}
//Main method to run tests and return json/gson feed from a query
public static void main(String[] args) throws IOException, ParseException,
SemanticException {
String query = args[0];
ConsoleAppender console = new ConsoleAppender(); //create appender
//configure the appender
String PATTERN = "%d [%p|%c|%C{1}] %m%n";
console.setLayout(new PatternLayout(PATTERN));
console.setThreshold(Level.DEBUG);
console.activateOptions();
//add appender to any Logger (here is root)
Logger.getRootLogger().addAppender(console);
LogManager.getRootLogger().setLevel(Level.DEBUG);
HiveLineageInfo lep = new HiveLineageInfo();
lep.getLineageInfo(query);
Gson gson = new Gson();
String jsonOut = gson.toJson(lep.getHLBean());
System.out.println("GSON/JSON Generate :: "+jsonOut);
}
}
package com.aetna.hadoop.dgc.hive;
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
import java.io.IOException;
import java.util.ArrayList;
import java.util.LinkedHashMap;
import java.util.Map;
import java.util.Stack;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.hive.ql.lib.DefaultGraphWalker;
import org.apache.hadoop.hive.ql.lib.DefaultRuleDispatcher;
import org.apache.hadoop.hive.ql.lib.Dispatcher;
import org.apache.hadoop.hive.ql.lib.GraphWalker;
import org.apache.hadoop.hive.ql.lib.Node;
import org.apache.hadoop.hive.ql.lib.NodeProcessor;
import org.apache.hadoop.hive.ql.lib.NodeProcessorCtx;
import org.apache.hadoop.hive.ql.lib.Rule;
import org.apache.hadoop.hive.ql.parse.ASTNode;
import org.apache.hadoop.hive.ql.parse.HiveParser;
import org.apache.hadoop.hive.ql.parse.ParseDriver;
import org.apache.hadoop.hive.ql.parse.ParseException;
import org.apache.hadoop.hive.ql.parse.SemanticException;
import org.apache.log4j.ConsoleAppender;
import org.apache.log4j.Level;
import org.apache.log4j.LogManager;
import org.apache.log4j.Logger;
import org.apache.log4j.PatternLayout;
import com.aetna.hadoop.dgc.hive.HiveLineageBean.CreateColumns;
import com.aetna.hadoop.dgc.hive.HiveLineageBean.GroupBy;
import com.aetna.hadoop.dgc.hive.HiveLineageBean.QueryColumns;
import com.aetna.hadoop.dgc.hive.HiveLineageBean.SourceTables;
import com.aetna.hadoop.dgc.hive.HiveLineageBean.WhereClause;
import com.google.gson.Gson;
/**
*
* This class prints out the lineage info. It takes sql as input and prints
* lineage info. Currently this prints only input and output tables for a given
* sql. Later we can expand to add join tables etc.
*
*/
public class HiveLineageInfo implements NodeProcessor {
private final Log LOG = LogFactory.getLog(HiveLineageInfo.class.getName());
public Map<Integer, String> queryMap;
public Integer counter = 0;
public HiveLineageBean hlb = new HiveLineageBean();;
public ArrayList<SourceTables> sourceTables;
public ArrayList<QueryColumns> queryColumns;
public ArrayList<GroupBy> groupBy;
public ArrayList<WhereClause> whereClause;
public ArrayList<CreateColumns> createColumns;
/**
* @return Custom HiveLineageBean data to be passed to GSON parsert
*/
public HiveLineageBean getHLBean() {
return hlb;
}
/**
* Implements the process method for the NodeProcessor interface.
*/
public Object process(Node nd, Stack<Node> stack, NodeProcessorCtx procCtx, Object... nodeOutputs) throws SemanticException {
ASTNode pt = (ASTNode) nd;
/*
* Check the 1st-level children and do simple semantic checks: 1) CTLT and
* CTAS should not coexists. 2) CTLT or CTAS should not coexists with column
* list (target table schema). 3) CTAS does not support partitioning (for
* now).
*/
switch (pt.getToken().getType()) {
case HiveParser.TOK_TABREF:
sourceTables = new ArrayList<SourceTables>();
LOG.debug("From Table Dump: "+pt.dump());
fromTableDump(pt);
this.hlb.setSourceTables(sourceTables);
break;
case HiveParser.TOK_SELECT:
queryColumns = new ArrayList<QueryColumns>();
LOG.debug("Column Dump: "+pt.dump());
columnTableDump(pt);
this.hlb.setQueryColumns(queryColumns);
break;
case HiveParser.TOK_WHERE:
whereClause = new ArrayList<WhereClause>();
LOG.debug("WHERE CLAUSE DUMP: "+pt.dump());
whereDump(pt);
this.hlb.setWhereClause(whereClause);
break;
case HiveParser.TOK_GROUPBY:
groupBy = new ArrayList<GroupBy>();
LOG.debug("GROUPBY CLAUSE DUMP: "+pt.dump());
groupByDump(pt);
this.hlb.setGroupBy(groupBy);
break;
case HiveParser.TOK_CREATETABLE:
createColumns = new ArrayList<CreateColumns>();
LOG.debug("CREATABLE DUMP: "+pt.dump());
createTableDump(pt);
break;
}
return null;
}
/**
* Walks the whereTree called by processWalker
*/
public void whereDump(ASTNode nodeIn) {
counter = 0;
wdump(nodeIn);
}
/**
* Walks the Where Tree called by whereDump
*/
private void wdump(ASTNode nodeIn) {
boolean parseChild = true;
if (nodeIn.getType() == HiveParser.TOK_TABLE_OR_COL) {
WhereClause whreClse = hlb.new WhereClause();
if (nodeIn.getParent().getText().equalsIgnoreCase(".")) {
ASTNode checkOrAnd = (ASTNode) nodeIn.getParent().getParent().getChild(1).getParent().getParent();
if (checkOrAnd.getType() == HiveParser.KW_AND || checkOrAnd.getType() == HiveParser.KW_OR) {
LOG.info("WHERE:: "+checkOrAnd.getText());
whreClse.setColumnOperator(checkOrAnd.getText());
}
LOG.info("Table Alias:: "+nodeIn.getChild(0).getText());
whreClse.setTbAliasOrName(nodeIn.getChild(0).getText());
LOG.info("Delimiter:: "+nodeIn.getParent().getText());
LOG.info("Column:: "+nodeIn.getParent().getChild(1).getText());
whreClse.setColumnName(nodeIn.getParent().getChild(1).getText());
LOG.info("Column Qualifer:: "+nodeIn.getParent().getParent().getChild(1).getParent().getText());
whreClse.setColumnOperator(nodeIn.getParent().getParent().getChild(1).getParent().getText());
LOG.info("Column Value:: "+nodeIn.getParent().getParent().getChild(1).getText());
whreClse.setColumnValue(nodeIn.getParent().getParent().getChild(1).getText());
} else {
ASTNode checkOrAnd = (ASTNode) nodeIn.getParent().getParent().getChild(1).getParent();
if (checkOrAnd.getType() == HiveParser.KW_AND || checkOrAnd.getType() == HiveParser.KW_OR) {
LOG.info("WHERE:: "+checkOrAnd.getText());
whreClse.setColumnOperator(checkOrAnd.getText());
}
LOG.info("Column:: = "+nodeIn.getChild(0).getText());
whreClse.setColumnName(nodeIn.getChild(0).getText());
//LOG.info("Delimiter "+nodeIn.getParent().getText());
LOG.info("Column Qualifer:: "+nodeIn.getParent().getChild(1).getParent().getText());
whreClse.setColumnOperator(nodeIn.getParent().getChild(1).getParent().getText());
LOG.info("Column Value:: "+nodeIn.getParent().getChild(1).getText());
whreClse.setColumnValue(nodeIn.getParent().getChild(1).getText());
}
whereClause.add(whreClse);
}
if (parseChild) {
int childCount = nodeIn.getChildCount();
if (childCount != 0 ){
for (int numr = 0; numr < childCount; numr++) {
wdump((ASTNode)nodeIn.getChild(numr));
}
}
}
}
/**
* Walks the GroupByTree called by processWalker
*/
public void groupByDump(ASTNode nodeIn) {
counter = 0;
gdump(nodeIn);
}
/**
* Walks the GroupBy Tree called by groupByDump
*/
private void gdump(ASTNode nodeIn) {
boolean parseChild = true;
if (nodeIn.getType() == HiveParser.TOK_TABLE_OR_COL) {
GroupBy grpBy = hlb.new GroupBy();
ASTNode parentNode = (ASTNode) nodeIn.getParent();
if (parentNode.getText().equalsIgnoreCase(".")) {
LOG.info("GroupBy TableAlias: "+nodeIn.getChild(0).getText());
grpBy.setTbAliasOrName(nodeIn.getChild(0).getText());
LOG.info("GroupBy Column:: "+parentNode.getChild(1).getText());
grpBy.setColumnName(parentNode.getChild(1).getText());
} else {
LOG.info("GroupBy Column: "+nodeIn.getChild(0).getText());
grpBy.setColumnName(nodeIn.getChild(0).getText());
}
groupBy.add(grpBy);
}
if (parseChild) {
int childCount = nodeIn.getChildCount();
if (childCount != 0 ){
for (int numr = 0; numr < childCount; numr++) {
gdump((ASTNode)nodeIn.getChild(numr));
}
}
}
}
/**
* Walks the CreateTable Tree called by processWalker
*/
public void createTableDump(ASTNode nodeIn) {
counter = 0;
if (nodeIn.getFirstChildWithType(HiveParser.TOK_TABNAME) != null) {
LOG.info("Create TableName:: "+nodeIn.getFirstChildWithType(HiveParser.TOK_TABNAME).getText());
hlb.setTableName(nodeIn.getFirstChildWithType(HiveParser.TOK_TABNAME).getChild(0).getText());
}
if (nodeIn.getFirstChildWithType(HiveParser.TOK_TABLELOCATION) != null) {
LOG.info("Create Table Location:: "+nodeIn.getFirstChildWithType(HiveParser.TOK_TABLELOCATION).getText());
hlb.setTableLocation(nodeIn.getFirstChildWithType(HiveParser.TOK_TABLELOCATION).getChild(0).getText());
}
if (nodeIn.getFirstChildWithType(HiveParser.TOK_TABCOLLIST) != null ) {
ctdump((ASTNode)nodeIn.getFirstChildWithType(HiveParser.TOK_TABCOLLIST).getParent());
hlb.setCreateColumns(createColumns);
}
}
/**
* Walks the CreateTable Tree called by createTableDump
*/
private void ctdump(ASTNode nodeIn) {
boolean parseChild = true;
if (nodeIn.getType() == HiveParser.TOK_TABCOL) {
CreateColumns crtClmns = hlb.new CreateColumns();
LOG.info("Create Column Name:: "+nodeIn.getChild(0).getText());
crtClmns.setColumnName(nodeIn.getChild(0).getText());
LOG.info("Create Column Type:: "+nodeIn.getChild(1).getText());
crtClmns.setColumnType(nodeIn.getChild(1).getText());
createColumns.add(crtClmns);
}
if (parseChild) {
int childCount = nodeIn.getChildCount();
if (childCount != 0 ){
for (int numr = 0; numr < childCount; numr++) {
ctdump((ASTNode)nodeIn.getChild(numr));
}
}
}
}
/**
* Walks the fromTable Tree called by processWalker
*/
public void fromTableDump(ASTNode nodeIn) {
counter = 0;
ftdump(nodeIn);
}
/**
* Walks the fromTable Tree called by fromTableDump
*/
private void ftdump(ASTNode nodeIn) {
boolean parseChild = true;
if (nodeIn.getType() == HiveParser.TOK_TABNAME && nodeIn.getParent().getType() == HiveParser.TOK_TABREF) {
SourceTables hlbSbls = hlb.new SourceTables();
if (nodeIn.getChildCount() == 2) {
LOG.info("From DBName:: "+nodeIn.getChild(0).getText());
hlbSbls.setDatabaseName(nodeIn.getChild(0).getText());
LOG.info("From TableName:: "+nodeIn.getChild(1).getText());
hlbSbls.setTableName(nodeIn.getChild(1).getText());
} else {
LOG.info("From TableName:: "+nodeIn.getChild(0).getText());
hlbSbls.setTableName(nodeIn.getChild(0).getText());
}
if (nodeIn.getType() == HiveParser.TOK_TABNAME && nodeIn.getParent().getChild(1) != null) {
LOG.info("From DB/Table Alias:: "+nodeIn.getParent().getChild(1).getText());
hlbSbls.setTableAlias(nodeIn.getParent().getChild(1).getText());
}
sourceTables.add(hlbSbls);
}
if (parseChild) {
int childCount = nodeIn.getChildCount();
if (childCount != 0 ){
for (int numr = 0; numr < childCount; numr++) {
ftdump((ASTNode)nodeIn.getChild(numr));
}
}
}
}
/**
* Walks the column Tree called by processWalker
*/
public void columnTableDump(ASTNode nodeIn) {
counter = 0;
clmnTdump(nodeIn);
}
/**
* Walks the columnDump Tree called by columnTableDump
*/
private void clmnTdump(ASTNode nodeIn) {
boolean parseChild = true;
if (nodeIn.getType() == HiveParser.TOK_TABLE_OR_COL && nodeIn.getAncestor(HiveParser.TOK_SELEXPR) != null ) {
QueryColumns qclmns = hlb.new QueryColumns();
if (nodeIn.getAncestor(HiveParser.TOK_FUNCTION) != null && nodeIn.getAncestor(HiveParser.TOK_SELEXPR) != null) {
LOG.info("Function Query:: "+nodeIn.getAncestor(HiveParser.TOK_FUNCTION).getChild(0).getText());
qclmns.setColumnFunction(nodeIn.getAncestor(HiveParser.TOK_FUNCTION).getChild(0).getText());
}
if (nodeIn.getParent().getText().equalsIgnoreCase(".")) {
LOG.info("Table Name/Alias:: "+nodeIn.getChild(0).getText());
qclmns.setTbAliasOrName(nodeIn.getChild(0).getText());
LOG.info("Column:: "+nodeIn.getParent().getChild(1).getText());
qclmns.setColumnName(nodeIn.getParent().getChild(1).getText());
if (nodeIn.getAncestor(HiveParser.TOK_SELEXPR).getChild(1) != null) {
LOG.info("Column Alias:: "+nodeIn.getAncestor(HiveParser.TOK_SELEXPR).getChild(1).getText());
qclmns.setColumnAlias(nodeIn.getAncestor(HiveParser.TOK_SELEXPR).getChild(1).getText());
}
} else {
LOG.info("Column:: "+nodeIn.getChild(0).getText());
qclmns.setColumnName(nodeIn.getChild(0).getText());
if (nodeIn.getParent().getChild(1) != null) {
LOG.info("Column Alias:: "+nodeIn.getParent().getChild(1).getText());
qclmns.setColumnAlias(nodeIn.getParent().getChild(1).getText());
}
}
if (qclmns.getColumnName() != null) {
queryColumns.add(qclmns);
}
}
if (parseChild) {
int childCount = nodeIn.getChildCount();
if (childCount != 0 ){
for (int numr = 0; numr < childCount; numr++) {
clmnTdump((ASTNode)nodeIn.getChild(numr));
}
}
}
}
/**
* parses given query and gets the lineage info.
*
* @param query
* @throws ParseException
*/
public void getLineageInfo(String query) throws ParseException,
SemanticException {
/*
* Get the AST tree
*/
ParseDriver pd = new ParseDriver();
ASTNode tree = pd.parse(query);
LOG.info("DUMP TREE: "+tree.dump());
while ((tree.getToken() == null) && (tree.getChildCount() > 0)) {
tree = (ASTNode) tree.getChild(0);
}
/*
* initialize Event Processor and dispatcher.
*/
// create a walker which walks the tree in a DFS manner while maintaining
// the operator stack. The dispatcher
// generates the plan from the operator tree
Map<Rule, NodeProcessor> rules = new LinkedHashMap<Rule, NodeProcessor>();
// The dispatcher fires the processor corresponding to the closest matching
// rule and passes the context along
Dispatcher disp = new DefaultRuleDispatcher(this, rules, null);
GraphWalker ogw = new DefaultGraphWalker(disp);
// Create a list of topop nodes
ArrayList<Node> topNodes = new ArrayList<Node>();
topNodes.add(tree);
ogw.startWalking(topNodes, null);
}
//Main method to run tests and return json/gson feed from a query
public static void main(String[] args) throws IOException, ParseException,
SemanticException {
String query = args[0];
ConsoleAppender console = new ConsoleAppender(); //create appender
//configure the appender
String PATTERN = "%d [%p|%c|%C{1}] %m%n";
console.setLayout(new PatternLayout(PATTERN));
console.setThreshold(Level.DEBUG);
console.activateOptions();
//add appender to any Logger (here is root)
Logger.getRootLogger().addAppender(console);
LogManager.getRootLogger().setLevel(Level.DEBUG);
HiveLineageInfo lep = new HiveLineageInfo();
lep.getLineageInfo(query);
Gson gson = new Gson();
String jsonOut = gson.toJson(lep.getHLBean());
System.out.println("GSON/JSON Generate :: "+jsonOut);
}
}
package com.aetna.hadoop.dgc.hive;
import com.google.common.util.concurrent.ThreadFactoryBuilder;
import com.google.gson.Gson;
import java.io.DataOutputStream;
import java.io.IOException;
import java.io.InputStream;
import java.io.InputStreamReader;
import java.net.HttpURLConnection;
import java.net.URL;
import java.net.URLEncoder;
import java.util.Collections;
import java.util.Comparator;
import java.util.List;
import java.util.Set;
import javax.net.ssl.HttpsURLConnection;
import javax.net.ssl.SSLContext;
import javax.net.ssl.TrustManager;
import javax.net.ssl.X509TrustManager;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.hive.conf.HiveConf;
import org.apache.hadoop.hive.ql.QueryPlan;
import org.apache.hadoop.hive.ql.exec.ExplainTask;
import org.apache.hadoop.hive.ql.exec.Task;
import org.apache.hadoop.hive.ql.exec.Utilities;
import org.apache.hadoop.hive.ql.exec.tez.TezTask;
import org.apache.hadoop.hive.ql.hooks.ExecuteWithHookContext;
import org.apache.hadoop.hive.ql.hooks.HookContext;
import org.apache.hadoop.hive.ql.hooks.ReadEntity;
import org.apache.hadoop.hive.ql.session.SessionState;
import org.apache.hadoop.util.StringUtils;
//import org.apache.hadoop.yarn.api.records.timeline.TimelineEntity;
//import org.apache.hadoop.yarn.api.records.timeline.TimelineEvent;
//import org.apache.hadoop.yarn.client.api.TimelineClient;
//import org.apache.hadoop.yarn.conf.YarnConfiguration;
/**
* DGC Hook sends query + plan info to DGCCollector Service. To enable (hadoop 2.4 and up) set
* hive.exec.pre.hooks/hive.exec.post.hooks/hive.exec.failure.hooks to include this class.
*/
public class Hook implements ExecuteWithHookContext {
private static final Log LOG = LogFactory.getLog(Hook.class.getName());
private HiveLineageBean hlb;
@Override
public void run(HookContext hookContext) throws Exception {
long currentTime = System.currentTimeMillis();
String executionEngine = null;
try {
QueryPlan plan = hookContext.getQueryPlan();
if (plan == null) {
return;
}
ExplainTask explain = new ExplainTask();
explain.initialize(hookContext.getConf(), plan, null);
String queryId = plan.getQueryId();
String queryStartTime = plan.getQueryStartTime().toString();
String user = hookContext.getUgi().getUserName();
String query = plan.getQueryStr();
int numMrJobs = Utilities.getMRTasks(plan.getRootTasks()).size();
int numTezJobs = Utilities.getTezTasks(plan.getRootTasks()).size();
String hiveId = explain.getId();
SessionState sess = SessionState.get();
if (numTezJobs > 0) {
executionEngine="tez";
}
if (numMrJobs > 0) {
executionEngine="mr";
}
hiveId = sess.getSessionId();
switch(hookContext.getHookType()) {
case PRE_EXEC_HOOK:
Set<ReadEntity> db = hookContext.getInputs();
for (Object o : db) {
LOG.debug("DB:Table="+o.toString());
}
currentTime = System.currentTimeMillis();
HiveLineageInfo lep_pre = new HiveLineageInfo();
lep_pre.getLineageInfo(query);
hlb=lep_pre.getHLBean();
hlb.setQueryEndTime(Long.toString(currentTime));
hlb.setQueryId(queryId);
hlb.setQuery(query);
hlb.setUser(user);
hlb.setHiveId(hiveId);
hlb.setSuccess(false);
if (executionEngine.equalsIgnoreCase("mr")) {
hlb.setExecutionEngine("mapreduce");
}
if (executionEngine.equalsIgnoreCase("tez")) {
hlb.setExecutionEngine("tez");
}
if (executionEngine.equalsIgnoreCase("spark")) {
hlb.setExecutionEngine("spark");
}
hlb.setQueryStartTime(queryStartTime);
fireAndForget(hookContext.getConf(), hlb, queryId);
break;
case POST_EXEC_HOOK:
currentTime = System.currentTimeMillis();
HiveLineageInfo lep_post = new HiveLineageInfo();
lep_post.getLineageInfo(query);
hlb=lep_post.getHLBean();
hlb.setQueryEndTime(Long.toString(currentTime));
hlb.setQueryId(queryId);
hlb.setQuery(query);
hlb.setUser(user);
hlb.setQueryStartTime(queryStartTime);
hlb.setSuccess(true);
hlb.setHiveId(hiveId);
if (executionEngine.equalsIgnoreCase("mr")) {
hlb.setExecutionEngine("mapreduce");
}
if (executionEngine.equalsIgnoreCase("tez")) {
hlb.setExecutionEngine("tez");
}
if (executionEngine.equalsIgnoreCase("spark")) {
hlb.setExecutionEngine("spark");
}
fireAndForget(hookContext.getConf(), hlb, queryId);
break;
case ON_FAILURE_HOOK:
HiveLineageInfo lep_failed = new HiveLineageInfo();
lep_failed.getLineageInfo(query);
hlb=lep_failed.getHLBean();
hlb.setQueryEndTime(Long.toString(currentTime));
hlb.setQueryId(queryId);
hlb.setQuery(query);
hlb.setUser(user);
hlb.setQueryStartTime(queryStartTime);
hlb.setSuccess(false);
hlb.setFailed(true);
hlb.setHiveId(hiveId);
if (executionEngine.equalsIgnoreCase("mr")) {
hlb.setExecutionEngine("mapreduce");
}
if (executionEngine.equalsIgnoreCase("tez")) {
hlb.setExecutionEngine("tez");
}
if (executionEngine.equalsIgnoreCase("spark")) {
hlb.setExecutionEngine("spark");
}
fireAndForget(hookContext.getConf(), hlb, queryId);
break;
default:
//ignore
break;
}
} catch (Exception e) {
LOG.info("Failed to submit plan to DGC: " + StringUtils.stringifyException(e));
}
}
public void fireAndForget(Configuration conf, HiveLineageBean hookData, String queryId) throws Exception {
String postUri = "http://167.69.111.50:20810/HiveHookCollector/HookServlet";
if (conf.getTrimmed("aetna.hive.hook") != null) {
postUri = conf.getTrimmed("aetna.hive.hook");
}
Gson gson = new Gson();
String gsonString = gson.toJson(hookData);
LOG.debug("GSON String: "+gsonString);
String encodedGsonQuery = URLEncoder.encode(gsonString, "UTF-8");
String encodedQueryId = URLEncoder.encode(queryId, "UTF-8");
String postData = "hookdata=" + encodedGsonQuery+"&queryid="+encodedQueryId;
// Create a trust manager that does not validate certificate chains
if (postUri.contains("https:")) {
TrustManager[] trustAllCerts = new TrustManager[]{
new X509TrustManager() {
public java.security.cert.X509Certificate[] getAcceptedIssuers() {
return null;
}
public void checkClientTrusted(
java.security.cert.X509Certificate[] certs, String authType) {
}
public void checkServerTrusted(
java.security.cert.X509Certificate[] certs, String authType) {
}
}
};
// Install the all-trusting trust manager
try {
SSLContext sc = SSLContext.getInstance("SSL");
sc.init(null, trustAllCerts, new java.security.SecureRandom());
HttpsURLConnection.setDefaultSSLSocketFactory(sc.getSocketFactory());
} catch (Exception e) {
e.printStackTrace();
}
}
URL url = new URL(postUri);
LOG.debug("Post URI: "+postUri);
DataOutputStream wr = null;
//HttpURLConnection urlcon = null;
if (postUri.contains("https:")) {
HttpsURLConnection urlcon = null;
urlcon = (HttpsURLConnection)url.openConnection();
urlcon.setRequestMethod("POST");
urlcon.setRequestProperty("X-Requested-By", "HiveHook");
urlcon.setRequestProperty("Content-Type","application/x-www-form-urlencoded");
urlcon.setUseCaches(false);
urlcon.setDoInput(true);
urlcon.setDoOutput(true);
wr = new DataOutputStream (urlcon.getOutputStream());
LOG.debug("PostString: "+postData);
//wr.writeBytes(postString.);
wr.write(postData.getBytes());
wr.flush ();
wr.close ();
InputStream is = urlcon.getInputStream();
InputStreamReader isr = new InputStreamReader(is);
int numCharsRead;
char[] charArray = new char[1024];
StringBuffer sb = new StringBuffer();
while ((numCharsRead = isr.read(charArray)) > 0) {
sb.append(charArray, 0, numCharsRead);
}
String result = sb.toString();
LOG.debug("Post Response: "+result);
isr.close();
is.close();
urlcon.disconnect();
} else {
HttpURLConnection urlcon = null;
urlcon = (HttpURLConnection)url.openConnection();
urlcon.setRequestMethod("POST");
urlcon.setRequestProperty("X-Requested-By", "HiveHook");
urlcon.setRequestProperty("Content-Type","application/x-www-form-urlencoded");
urlcon.setUseCaches(false);
urlcon.setDoInput(true);
urlcon.setDoOutput(true);
wr = new DataOutputStream (urlcon.getOutputStream());
LOG.debug("PostString: "+postData);
//wr.writeBytes(postString.);
wr.write(postData.getBytes());
wr.flush ();
wr.close ();
InputStream is = urlcon.getInputStream();
InputStreamReader isr = new InputStreamReader(is);
int numCharsRead;
char[] charArray = new char[1024];
StringBuffer sb = new StringBuffer();
while ((numCharsRead = isr.read(charArray)) > 0) {
sb.append(charArray, 0, numCharsRead);
}
String result = sb.toString();
LOG.debug("Post Response: "+result);
isr.close();
is.close();
urlcon.disconnect();
}
}
}
package com.aetna.hadoop.dgc.hive;
import com.google.common.util.concurrent.ThreadFactoryBuilder;
import com.google.gson.Gson;
import java.io.DataOutputStream;
import java.io.IOException;
import java.io.InputStream;
import java.io.InputStreamReader;
import java.net.HttpURLConnection;
import java.net.URL;
import java.net.URLEncoder;
import java.util.Collections;
import java.util.Comparator;
import java.util.List;
import java.util.Set;
import javax.net.ssl.HttpsURLConnection;
import javax.net.ssl.SSLContext;
import javax.net.ssl.TrustManager;
import javax.net.ssl.X509TrustManager;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.hive.conf.HiveConf;
import org.apache.hadoop.hive.ql.QueryPlan;
import org.apache.hadoop.hive.ql.exec.ExplainTask;
import org.apache.hadoop.hive.ql.exec.Task;
import org.apache.hadoop.hive.ql.exec.Utilities;
import org.apache.hadoop.hive.ql.exec.tez.TezTask;
import org.apache.hadoop.hive.ql.hooks.ExecuteWithHookContext;
import org.apache.hadoop.hive.ql.hooks.HookContext;
import org.apache.hadoop.hive.ql.hooks.ReadEntity;
import org.apache.hadoop.hive.ql.session.SessionState;
import org.apache.hadoop.util.StringUtils;
//import org.apache.hadoop.yarn.api.records.timeline.TimelineEntity;
//import org.apache.hadoop.yarn.api.records.timeline.TimelineEvent;
//import org.apache.hadoop.yarn.client.api.TimelineClient;
//import org.apache.hadoop.yarn.conf.YarnConfiguration;
/**
* DGC Hook sends query + plan info to DGCCollector Service. To enable (hadoop 2.4 and up) set
* hive.exec.pre.hooks/hive.exec.post.hooks/hive.exec.failure.hooks to include this class.
*/
public class Hook implements ExecuteWithHookContext {
private static final Log LOG = LogFactory.getLog(Hook.class.getName());
private HiveLineageBean hlb;
@Override
public void run(HookContext hookContext) throws Exception {
long currentTime = System.currentTimeMillis();
String executionEngine = null;
try {
QueryPlan plan = hookContext.getQueryPlan();
if (plan == null) {
return;
}
ExplainTask explain = new ExplainTask();
explain.initialize(hookContext.getConf(), plan, null);
String queryId = plan.getQueryId();
String queryStartTime = plan.getQueryStartTime().toString();
String user = hookContext.getUgi().getUserName();
String query = plan.getQueryStr();
int numMrJobs = Utilities.getMRTasks(plan.getRootTasks()).size();
int numTezJobs = Utilities.getTezTasks(plan.getRootTasks()).size();
String hiveId = explain.getId();
SessionState sess = SessionState.get();
if (numTezJobs > 0) {
executionEngine="tez";
}
if (numMrJobs > 0) {
executionEngine="mr";
}
hiveId = sess.getSessionId();
switch(hookContext.getHookType()) {
case PRE_EXEC_HOOK:
Set<ReadEntity> db = hookContext.getInputs();
for (Object o : db) {
LOG.debug("DB:Table="+o.toString());
}
currentTime = System.currentTimeMillis();
HiveLineageInfo lep_pre = new HiveLineageInfo();
lep_pre.getLineageInfo(query);
hlb=lep_pre.getHLBean();
hlb.setQueryEndTime(Long.toString(currentTime));
hlb.setQueryId(queryId);
hlb.setQuery(query);
hlb.setUser(user);
hlb.setHiveId(hiveId);
hlb.setSuccess(false);
if (executionEngine.equalsIgnoreCase("mr")) {
hlb.setExecutionEngine("mapreduce");
}
if (executionEngine.equalsIgnoreCase("tez")) {
hlb.setExecutionEngine("tez");
}
if (executionEngine.equalsIgnoreCase("spark")) {
hlb.setExecutionEngine("spark");
}
hlb.setQueryStartTime(queryStartTime);
fireAndForget(hookContext.getConf(), hlb, queryId);
break;
case POST_EXEC_HOOK:
currentTime = System.currentTimeMillis();
HiveLineageInfo lep_post = new HiveLineageInfo();
lep_post.getLineageInfo(query);
hlb=lep_post.getHLBean();
hlb.setQueryEndTime(Long.toString(currentTime));
hlb.setQueryId(queryId);
hlb.setQuery(query);
hlb.setUser(user);
hlb.setQueryStartTime(queryStartTime);
hlb.setSuccess(true);
hlb.setHiveId(hiveId);
if (executionEngine.equalsIgnoreCase("mr")) {
hlb.setExecutionEngine("mapreduce");
}
if (executionEngine.equalsIgnoreCase("tez")) {
hlb.setExecutionEngine("tez");
}
if (executionEngine.equalsIgnoreCase("spark")) {
hlb.setExecutionEngine("spark");
}
fireAndForget(hookContext.getConf(), hlb, queryId);
break;
case ON_FAILURE_HOOK:
HiveLineageInfo lep_failed = new HiveLineageInfo();
lep_failed.getLineageInfo(query);
hlb=lep_failed.getHLBean();
hlb.setQueryEndTime(Long.toString(currentTime));
hlb.setQueryId(queryId);
hlb.setQuery(query);
hlb.setUser(user);
hlb.setQueryStartTime(queryStartTime);
hlb.setSuccess(false);
hlb.setFailed(true);
hlb.setHiveId(hiveId);
if (executionEngine.equalsIgnoreCase("mr")) {
hlb.setExecutionEngine("mapreduce");
}
if (executionEngine.equalsIgnoreCase("tez")) {
hlb.setExecutionEngine("tez");
}
if (executionEngine.equalsIgnoreCase("spark")) {
hlb.setExecutionEngine("spark");
}
fireAndForget(hookContext.getConf(), hlb, queryId);
break;
default:
//ignore
break;
}
} catch (Exception e) {
LOG.info("Failed to submit plan to DGC: " + StringUtils.stringifyException(e));
}
}
public void fireAndForget(Configuration conf, HiveLineageBean hookData, String queryId) throws Exception {
String postUri = "http://167.69.111.50:20810/HiveHookCollector/HookServlet";
if (conf.getTrimmed("aetna.hive.hook") != null) {
postUri = conf.getTrimmed("aetna.hive.hook");
}
Gson gson = new Gson();
String gsonString = gson.toJson(hookData);
LOG.debug("GSON String: "+gsonString);
String encodedGsonQuery = URLEncoder.encode(gsonString, "UTF-8");
String encodedQueryId = URLEncoder.encode(queryId, "UTF-8");
String postData = "hookdata=" + encodedGsonQuery+"&queryid="+encodedQueryId;
// Create a trust manager that does not validate certificate chains
if (postUri.contains("https:")) {
TrustManager[] trustAllCerts = new TrustManager[]{
new X509TrustManager() {
public java.security.cert.X509Certificate[] getAcceptedIssuers() {
return null;
}
public void checkClientTrusted(
java.security.cert.X509Certificate[] certs, String authType) {
}
public void checkServerTrusted(
java.security.cert.X509Certificate[] certs, String authType) {
}
}
};
// Install the all-trusting trust manager
try {
SSLContext sc = SSLContext.getInstance("SSL");
sc.init(null, trustAllCerts, new java.security.SecureRandom());
HttpsURLConnection.setDefaultSSLSocketFactory(sc.getSocketFactory());
} catch (Exception e) {
e.printStackTrace();
}
}
URL url = new URL(postUri);
LOG.debug("Post URI: "+postUri);
DataOutputStream wr = null;
//HttpURLConnection urlcon = null;
if (postUri.contains("https:")) {
HttpsURLConnection urlcon = null;
urlcon = (HttpsURLConnection)url.openConnection();
urlcon.setRequestMethod("POST");
urlcon.setRequestProperty("X-Requested-By", "HiveHook");
urlcon.setRequestProperty("Content-Type","application/x-www-form-urlencoded");
urlcon.setUseCaches(false);
urlcon.setDoInput(true);
urlcon.setDoOutput(true);
wr = new DataOutputStream (urlcon.getOutputStream());
LOG.debug("PostString: "+postData);
//wr.writeBytes(postString.);
wr.write(postData.getBytes());
wr.flush ();
wr.close ();
InputStream is = urlcon.getInputStream();
InputStreamReader isr = new InputStreamReader(is);
int numCharsRead;
char[] charArray = new char[1024];
StringBuffer sb = new StringBuffer();
while ((numCharsRead = isr.read(charArray)) > 0) {
sb.append(charArray, 0, numCharsRead);
}
String result = sb.toString();
LOG.debug("Post Response: "+result);
isr.close();
is.close();
urlcon.disconnect();
} else {
HttpURLConnection urlcon = null;
urlcon = (HttpURLConnection)url.openConnection();
urlcon.setRequestMethod("POST");
urlcon.setRequestProperty("X-Requested-By", "HiveHook");
urlcon.setRequestProperty("Content-Type","application/x-www-form-urlencoded");
urlcon.setUseCaches(false);
urlcon.setDoInput(true);
urlcon.setDoOutput(true);
wr = new DataOutputStream (urlcon.getOutputStream());
LOG.debug("PostString: "+postData);
//wr.writeBytes(postString.);
wr.write(postData.getBytes());
wr.flush ();
wr.close ();
InputStream is = urlcon.getInputStream();
InputStreamReader isr = new InputStreamReader(is);
int numCharsRead;
char[] charArray = new char[1024];
StringBuffer sb = new StringBuffer();
while ((numCharsRead = isr.read(charArray)) > 0) {
sb.append(charArray, 0, numCharsRead);
}
String result = sb.toString();
LOG.debug("Post Response: "+result);
isr.close();
is.close();
urlcon.disconnect();
}
}
}
<?xml version="1.0" encoding="UTF-8"?>
<!--
~ Licensed to the Apache Software Foundation (ASF) under one
~ or more contributor license agreements. See the NOTICE file
~ distributed with this work for additional information
~ regarding copyright ownership. The ASF licenses this file
~ to you under the Apache License, Version 2.0 (the
~ "License"); you may not use this file except in compliance
~ with the License. You may obtain a copy of the License at
~
~ http://www.apache.org/licenses/LICENSE-2.0
~
~ Unless required by applicable law or agreed to in writing, software
~ distributed under the License is distributed on an "AS IS" BASIS,
~ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
~ See the License for the specific language governing permissions and
~ limitations under the License.
-->
<project xmlns="http://maven.apache.org/POM/4.0.0"
xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/maven-v4_0_0.xsd">
<modelVersion>4.0.0</modelVersion>
<parent>
<groupId>org.apache.hadoop.metadata</groupId>
<artifactId>metadata-governance</artifactId>
<version>0.1-incubating-SNAPSHOT</version>
</parent>
<artifactId>metadata-repository</artifactId>
<description>Apache Metadata Repository Module</description>
<name>Apache Metadata Repository</name>
<packaging>jar</packaging>
<profiles>
<profile>
<id>hadoop-2</id>
<activation>
<activeByDefault>true</activeByDefault>
</activation>
<dependencies>
<dependency>
<groupId>org.apache.hadoop</groupId>
<artifactId>hadoop-client</artifactId>
</dependency>
<dependency>
<groupId>org.apache.hadoop</groupId>
<artifactId>hadoop-hdfs</artifactId>
</dependency>
<dependency>
<groupId>org.apache.hadoop</groupId>
<artifactId>hadoop-hdfs</artifactId>
<classifier>tests</classifier>
</dependency>
<dependency>
<groupId>org.apache.hadoop</groupId>
<artifactId>hadoop-common</artifactId>
<classifier>tests</classifier>
</dependency>
<dependency>
<groupId>org.apache.hadoop</groupId>
<artifactId>hadoop-common</artifactId>
</dependency>
</dependencies>
</profile>
</profiles>
<dependencies>
<dependency>
<groupId>org.apache.hadoop.metadata</groupId>
<artifactId>metadata-common</artifactId>
</dependency>
<dependency>
<!-- ~ Licensed to the Apache Software Foundation (ASF) under one ~ or more
contributor license agreements. See the NOTICE file ~ distributed with this
work for additional information ~ regarding copyright ownership. The ASF
licenses this file ~ to you under the Apache License, Version 2.0 (the ~
"License"); you may not use this file except in compliance ~ with the License.
You may obtain a copy of the License at ~ ~ http://www.apache.org/licenses/LICENSE-2.0
~ ~ Unless required by applicable law or agreed to in writing, software ~
distributed under the License is distributed on an "AS IS" BASIS, ~ WITHOUT
WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. ~ See the
License for the specific language governing permissions and ~ limitations
under the License. -->
<project xmlns="http://maven.apache.org/POM/4.0.0" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/maven-v4_0_0.xsd">
<modelVersion>4.0.0</modelVersion>
<parent>
<groupId>org.apache.hadoop.metadata</groupId>
<artifactId>metadata-governance</artifactId>
<version>0.1-incubating-SNAPSHOT</version>
</parent>
<artifactId>metadata-repository</artifactId>
<description>Apache Metadata Repository Module</description>
<name>Apache Metadata Repository</name>
<packaging>jar</packaging>
<profiles>
<profile>
<id>hadoop-2</id>
<activation>
<activeByDefault>true</activeByDefault>
</activation>
<dependencies>
<dependency>
<groupId>org.apache.hadoop</groupId>
<artifactId>hadoop-client</artifactId>
</dependency>
<dependency>
<groupId>org.apache.hadoop</groupId>
<artifactId>hadoop-hdfs</artifactId>
</dependency>
<dependency>
<groupId>org.apache.hadoop</groupId>
<artifactId>hadoop-hdfs</artifactId>
<classifier>tests</classifier>
</dependency>
<dependency>
<groupId>org.apache.hadoop</groupId>
<artifactId>hadoop-common</artifactId>
<classifier>tests</classifier>
</dependency>
<dependency>
<groupId>org.apache.hadoop</groupId>
<artifactId>hadoop-common</artifactId>
</dependency>
</dependencies>
</profile>
</profiles>
<dependencies>
<dependency>
<groupId>org.apache.hadoop.metadata</groupId>
<artifactId>metadata-common</artifactId>
</dependency>
<dependency>
<groupId>org.apache.hadoop.metadata</groupId>
<artifactId>metadata-typesystem</artifactId>
</dependency>
<dependency>
<groupId>org.slf4j</groupId>
<artifactId>slf4j-log4j12</artifactId>
</dependency>
<dependency>
<groupId>org.slf4j</groupId>
<artifactId>slf4j-api</artifactId>
</dependency>
<dependency>
<groupId>com.google.inject</groupId>
<artifactId>guice</artifactId>
</dependency>
<dependency>
<groupId>org.codehaus.jettison</groupId>
<artifactId>jettison</artifactId>
</dependency>
<dependency>
<groupId>com.googlecode.json-simple</groupId>
<artifactId>json-simple</artifactId>
</dependency>
<dependency>
<groupId>com.tinkerpop.blueprints</groupId>
<artifactId>blueprints-core</artifactId>
</dependency>
<dependency>
<groupId>com.thinkaurelius.titan</groupId>
<artifactId>titan-core</artifactId>
</dependency>
<dependency>
<groupId>com.thinkaurelius.titan</groupId>
<artifactId>titan-berkeleyje</artifactId>
</dependency>
<dependency>
<groupId>com.thinkaurelius.titan</groupId>
<artifactId>titan-es</artifactId>
</dependency>
<dependency>
<groupId>org.testng</groupId>
<artifactId>testng</artifactId>
</dependency>
<dependency>
<groupId>org.mockito</groupId>
<artifactId>mockito-all</artifactId>
</dependency>
</dependencies>
<build>
<plugins>
<plugin>
<groupId>org.apache.maven.plugins</groupId>
<artifactId>maven-compiler-plugin</artifactId>
<configuration>
<source>1.7</source>
<target>1.7</target>
</configuration>
</plugin>
<plugin>
<groupId>org.apache.maven.plugins</groupId>
<artifactId>maven-jar-plugin</artifactId>
<version>2.4</version>
<configuration>
<excludes>
<exclude>**/log4j.xml</exclude>
</excludes>
</configuration>
</plugin>
</plugins>
</build>
<dependency>
<groupId>org.slf4j</groupId>
<artifactId>slf4j-log4j12</artifactId>
</dependency>
<dependency>
<groupId>org.slf4j</groupId>
<artifactId>slf4j-api</artifactId>
</dependency>
<dependency>
<groupId>com.google.inject</groupId>
<artifactId>guice</artifactId>
</dependency>
<dependency>
<groupId>com.google.inject.extensions</groupId>
<artifactId>guice-throwingproviders</artifactId>
<version>3.0</version>
</dependency>
<dependency>
<groupId>org.codehaus.jettison</groupId>
<artifactId>jettison</artifactId>
</dependency>
<dependency>
<groupId>com.googlecode.json-simple</groupId>
<artifactId>json-simple</artifactId>
</dependency>
<dependency>
<groupId>com.tinkerpop.blueprints</groupId>
<artifactId>blueprints-core</artifactId>
</dependency>
<dependency>
<groupId>com.thinkaurelius.titan</groupId>
<artifactId>titan-core</artifactId>
</dependency>
<dependency>
<groupId>com.thinkaurelius.titan</groupId>
<artifactId>titan-berkeleyje</artifactId>
</dependency>
<dependency>
<groupId>com.thinkaurelius.titan</groupId>
<artifactId>titan-es</artifactId>
</dependency>
<dependency>
<groupId>org.testng</groupId>
<artifactId>testng</artifactId>
</dependency>
<dependency>
<groupId>org.mockito</groupId>
<artifactId>mockito-all</artifactId>
</dependency>
</dependencies>
<build>
<plugins>
<plugin>
<groupId>org.apache.maven.plugins</groupId>
<artifactId>maven-compiler-plugin</artifactId>
<configuration>
<source>1.7</source>
<target>1.7</target>
</configuration>
</plugin>
<plugin>
<groupId>org.apache.maven.plugins</groupId>
<artifactId>maven-jar-plugin</artifactId>
<version>2.4</version>
<configuration>
<excludes>
<exclude>**/log4j.xml</exclude>
</excludes>
</configuration>
</plugin>
</plugins>
</build>
</project>
......@@ -24,12 +24,49 @@
*/
package org.apache.hadoop.metadata;
import org.apache.hadoop.metadata.services.GraphBackedMetadataRepository;
import org.apache.hadoop.metadata.services.GraphProvider;
import org.apache.hadoop.metadata.services.GraphService;
import org.apache.hadoop.metadata.services.GraphServiceConfigurator;
import org.apache.hadoop.metadata.services.MetadataRepository;
import org.apache.hadoop.metadata.services.TitanGraphProvider;
import org.apache.hadoop.metadata.storage.IRepository;
import org.apache.hadoop.metadata.storage.memory.MemRepository;
import com.google.inject.Scopes;
import com.google.inject.throwingproviders.ThrowingProviderBinder;
import com.thinkaurelius.titan.core.TitanGraph;
/**
* Guice module for Repository module.
*/
public class RepositoryMetadataModule extends com.google.inject.AbstractModule {
protected void configure() {
// add configuration logic here
}
// Graph Service implementation class
private Class<? extends GraphService> graphServiceClass;
// MetadataRepositoryService implementation class
private Class<? extends MetadataRepository> metadataRepoClass;
public RepositoryMetadataModule() {
GraphServiceConfigurator gsp = new GraphServiceConfigurator();
// get the impl classes for the repo and the graph service
this.graphServiceClass = gsp.getImplClass();
this.metadataRepoClass = GraphBackedMetadataRepository.class;
}
protected void configure() {
// special wiring for Titan Graph
ThrowingProviderBinder.create(binder())
.bind(GraphProvider.class, TitanGraph.class)
.to(TitanGraphProvider.class)
.in(Scopes.SINGLETON);
// allow for dynamic binding of the metadata repo & graph service
// bind the MetadataRepositoryService interface to an implementation
bind(MetadataRepository.class).to(metadataRepoClass);
// bind the GraphService interface to an implementation
bind(GraphService.class).to(graphServiceClass);
}
}
......@@ -18,25 +18,31 @@
package org.apache.hadoop.metadata.services;
import java.io.IOException;
import java.util.List;
import javax.inject.Inject;
import org.apache.hadoop.metadata.ITypedReferenceableInstance;
import org.apache.hadoop.metadata.MetadataException;
import org.apache.hadoop.metadata.json.Serialization$;
import org.apache.hadoop.metadata.service.Services;
import org.apache.hadoop.metadata.types.TypeSystem;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import java.io.IOException;
import java.util.List;
public class DefaultMetadataService implements MetadataService {
private static final Logger LOG =
LoggerFactory.getLogger(DefaultMetadataService.class);
public static final String NAME = DefaultMetadataService.class.getSimpleName();
private TypeSystem typeSystem;
private MetadataRepository repositoryService;
private final TypeSystem typeSystem;
private final MetadataRepository repository;
@Inject
DefaultMetadataService(MetadataRepository repository) throws MetadataException {
this.typeSystem = new TypeSystem();
this.repository = repository;
}
/**
* Creates a new type based on the type system to enable adding
......@@ -84,7 +90,7 @@ public class DefaultMetadataService implements MetadataService {
String entityDefinition) throws MetadataException {
ITypedReferenceableInstance entityInstance =
Serialization$.MODULE$.fromJson(entityDefinition);
return repositoryService.createEntity(entityInstance, entityType);
return repository.createEntity(entityInstance, entityType);
}
/**
......@@ -124,35 +130,12 @@ public class DefaultMetadataService implements MetadataService {
}
/**
* Name of the service.
*
* @return name of the service
*/
@Override
public String getName() {
return NAME;
}
/**
* Starts the service. This method blocks until the service has completely started.
*
* @throws Exception
*/
@Override
public void start() throws Exception {
LOG.info("Initializing the Metadata service");
if (Services.get().isRegistered(TitanGraphService.NAME)) {
DefaultTypesService typesService = Services.get().getService(DefaultTypesService.NAME);
typeSystem = typesService.getTypeSystem();
} else {
throw new RuntimeException("Types service is not initialized");
}
if (Services.get().isRegistered(TitanGraphService.NAME)) {
repositoryService = Services.get().getService(GraphBackedMetadataRepository.NAME);
} else {
throw new RuntimeException("repository service is not initialized");
}
}
/**
......@@ -160,8 +143,6 @@ public class DefaultMetadataService implements MetadataService {
*/
@Override
public void stop() {
// do nothing
repositoryService = null;
}
/**
......
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.metadata.services;
import org.apache.hadoop.metadata.types.TypeSystem;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import java.io.IOException;
public class DefaultTypesService implements TypesService {
private static final Logger LOG =
LoggerFactory.getLogger(DefaultTypesService.class);
public static final String NAME = DefaultTypesService.class.getSimpleName();
private TypeSystem typeSystem;
@Override
public TypeSystem getTypeSystem() {
assert typeSystem != null;
return typeSystem;
}
/**
* Name of the service.
*
* @return name of the service
*/
@Override
public String getName() {
return NAME;
}
/**
* Starts the service. This method blocks until the service has completely started.
*
* @throws Exception
*/
@Override
public void start() throws Exception {
LOG.info("Initializing the type system");
typeSystem = new TypeSystem();
}
/**
* Stops the service. This method blocks until the service has completely shut down.
*/
@Override
public void stop() {
}
/**
* A version of stop() that is designed to be usable in Java7 closure
* clauses.
* Implementation classes MUST relay this directly to {@link #stop()}
*
* @throws java.io.IOException never
* @throws RuntimeException on any failure during the stop operation
*/
@Override
public void close() throws IOException {
}
}
......@@ -18,16 +18,23 @@
package org.apache.hadoop.metadata.services;
import com.tinkerpop.blueprints.Direction;
import com.tinkerpop.blueprints.Edge;
import com.tinkerpop.blueprints.TransactionalGraph;
import com.tinkerpop.blueprints.Vertex;
import java.io.IOException;
import java.util.ArrayList;
import java.util.Collections;
import java.util.HashMap;
import java.util.List;
import java.util.Map;
import java.util.UUID;
import java.util.concurrent.ConcurrentHashMap;
import java.util.concurrent.atomic.AtomicInteger;
import javax.inject.Inject;
import org.apache.hadoop.metadata.IReferenceableInstance;
import org.apache.hadoop.metadata.ITypedInstance;
import org.apache.hadoop.metadata.ITypedReferenceableInstance;
import org.apache.hadoop.metadata.ITypedStruct;
import org.apache.hadoop.metadata.MetadataException;
import org.apache.hadoop.metadata.service.Services;
import org.apache.hadoop.metadata.storage.Id;
import org.apache.hadoop.metadata.storage.MapIds;
import org.apache.hadoop.metadata.storage.RepositoryException;
......@@ -41,18 +48,14 @@ import org.apache.hadoop.metadata.types.TypeSystem;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import java.io.IOException;
import java.util.ArrayList;
import java.util.Collections;
import java.util.HashMap;
import java.util.List;
import java.util.Map;
import java.util.UUID;
import java.util.concurrent.ConcurrentHashMap;
import java.util.concurrent.atomic.AtomicInteger;
import com.tinkerpop.blueprints.Direction;
import com.tinkerpop.blueprints.Edge;
import com.tinkerpop.blueprints.TransactionalGraph;
import com.tinkerpop.blueprints.Vertex;
/**
* An implementation backed by Titan Graph DB.
* An implementation backed by a Graph database provided
* as a Graph Service.
*/
public class GraphBackedMetadataRepository implements MetadataRepository {
......@@ -69,19 +72,16 @@ public class GraphBackedMetadataRepository implements MetadataRepository {
private final AtomicInteger ID_SEQ = new AtomicInteger(0);
// private ConcurrentHashMap<String, ITypedReferenceableInstance> types;
private ConcurrentHashMap<String, ITypedReferenceableInstance> instances;
private GraphService graphService;
private TypeSystem typeSystem;
/**
* Name of the service.
*
* @return name of the service
*/
@Override
public String getName() {
return NAME;
private final ConcurrentHashMap<String, ITypedReferenceableInstance> instances;
private final GraphService graphService;
private final TypeSystem typeSystem;
@Inject
GraphBackedMetadataRepository(GraphService graphService) throws MetadataException {
this.instances = new ConcurrentHashMap<>();
this.graphService = graphService;
this.typeSystem = new TypeSystem();
}
/**
......@@ -91,20 +91,6 @@ public class GraphBackedMetadataRepository implements MetadataRepository {
*/
@Override
public void start() throws Exception {
if (Services.get().isRegistered(TitanGraphService.NAME)) {
graphService = Services.get().getService(TitanGraphService.NAME);
} else {
throw new RuntimeException("graph service is not initialized");
}
if (Services.get().isRegistered(DefaultTypesService.NAME)) {
DefaultTypesService typesService = Services.get().getService(DefaultTypesService.NAME);
typeSystem = typesService.getTypeSystem();
} else {
throw new RuntimeException("Types service is not initialized");
}
instances = new ConcurrentHashMap<>();
}
/**
......@@ -112,8 +98,6 @@ public class GraphBackedMetadataRepository implements MetadataRepository {
*/
@Override
public void stop() {
// do nothing
graphService = null;
}
/**
......@@ -129,16 +113,12 @@ public class GraphBackedMetadataRepository implements MetadataRepository {
stop();
}
private TransactionalGraph getTransactionalGraph() {
return graphService.getTransactionalGraph();
}
@Override
public String createEntity(IReferenceableInstance entity,
String entityType) throws RepositoryException {
LOG.info("adding entity={} type={}", entity, entityType);
final TransactionalGraph transactionalGraph = getTransactionalGraph();
final TransactionalGraph transactionalGraph = graphService.getTransactionalGraph();
try {
// todo check if this is a duplicate
......
package org.apache.hadoop.metadata.services;
import org.apache.commons.configuration.ConfigurationException;
import com.google.inject.throwingproviders.CheckedProvider;
import com.tinkerpop.blueprints.Graph;
public interface GraphProvider<T extends Graph> extends CheckedProvider<T> {
@Override
T get() throws ConfigurationException;
}
......@@ -18,12 +18,13 @@
package org.apache.hadoop.metadata.services;
import java.util.Set;
import org.apache.hadoop.metadata.service.Service;
import com.tinkerpop.blueprints.Graph;
import com.tinkerpop.blueprints.KeyIndexableGraph;
import com.tinkerpop.blueprints.TransactionalGraph;
import org.apache.hadoop.metadata.service.Service;
import java.util.Set;
/**
* A blueprints based graph service.
......
package org.apache.hadoop.metadata.services;
import com.thinkaurelius.titan.core.TitanGraph;
public class GraphServiceConfigurator extends PropertyBasedConfigurator<GraphService> {
private static final String PROPERTY_NAME = "metadata.graph.impl.class";
private static final String DEFAULT_IMPL_CLASS = TitanGraph.class.getName();
private static final String CONFIG_PATH = "application.properties";
public GraphServiceConfigurator() {
super("metadata.graph.propertyName", "metadata.graph.defaultImplClass",
"metadata.graph.configurationPath", PROPERTY_NAME,
DEFAULT_IMPL_CLASS, CONFIG_PATH);
}
}
package org.apache.hadoop.metadata.services;
import java.util.Properties;
import org.apache.commons.configuration.Configuration;
import org.apache.commons.configuration.ConfigurationException;
import org.apache.commons.configuration.PropertiesConfiguration;
public abstract class PropertyBasedConfigurator<T> {
private final String propertyName;
private final String defaultImplClass;
private final String configurationPath;
PropertyBasedConfigurator(String propertyNameProp, String defaultImplClassProp,
String configurationPathProp, String propertyNameDefaultProp,
String defaultImplClassDefaultProp, String configPathDefaultProp) {
Properties props = System.getProperties();
this.propertyName = props.getProperty(propertyNameProp,
propertyNameDefaultProp);
this.defaultImplClass = props.getProperty(defaultImplClassProp,
defaultImplClassDefaultProp);
this.configurationPath = props.getProperty(configurationPathProp,
configPathDefaultProp);
}
PropertyBasedConfigurator(String propertyNameProp, String defaultImplClassProp,
String configurationPathProp) {
Properties props = System.getProperties();
this.propertyName = props.getProperty(propertyNameProp);
this.defaultImplClass = props.getProperty(defaultImplClassProp);
this.configurationPath = props.getProperty(configurationPathProp);
}
public String getPropertyName() {
return propertyName;
}
public String getDefaultImplClass() {
return defaultImplClass;
}
public String getConfigurationPath() {
return configurationPath;
}
public Configuration getConfiguration() {
String path = getConfigurationPath();
Configuration config = null;
try {
config = new PropertiesConfiguration(path);
} catch (ConfigurationException e) {
config = new PropertiesConfiguration();
}
return config;
}
public String getClassName() {
Configuration config = getConfiguration();
String propName = getPropertyName();
String defaultClass = getDefaultImplClass();
return config.getString(propName, defaultClass);
}
@SuppressWarnings("unchecked")
public Class<? extends T> getImplClass() {
String className = getClassName();
Class<? extends T> ret = null;
try {
ret = (Class<? extends T>) PropertyBasedConfigurator.class
.getClassLoader().loadClass(className);
} catch (ClassNotFoundException e) {
throw new RuntimeException(e);
}
return ret;
}
}
package org.apache.hadoop.metadata.services;
import javax.inject.Singleton;
import org.apache.commons.configuration.Configuration;
import org.apache.commons.configuration.ConfigurationException;
import org.apache.commons.configuration.PropertiesConfiguration;
import com.thinkaurelius.titan.core.TitanFactory;
import com.thinkaurelius.titan.core.TitanGraph;
public class TitanGraphProvider implements GraphProvider<TitanGraph> {
private static final String SYSTEM_PROP = "";
private static final String DEFAULT_PATH = "graph.properties";
private final String configPath;
public TitanGraphProvider() {
configPath = System.getProperties().getProperty(SYSTEM_PROP,
DEFAULT_PATH);
}
public Configuration getConfiguration() throws ConfigurationException {
return new PropertiesConfiguration(configPath);
}
@Override
@Singleton
public TitanGraph get() throws ConfigurationException {
TitanGraph graph = null;
Configuration config;
try {
config = getConfiguration();
} catch (ConfigurationException e) {
throw new RuntimeException(e);
}
graph = TitanFactory.open(config);
return graph;
}
}
......@@ -18,7 +18,22 @@
package org.apache.hadoop.metadata.services;
import com.thinkaurelius.titan.core.TitanFactory;
import java.io.IOException;
import java.util.ArrayList;
import java.util.Arrays;
import java.util.Iterator;
import java.util.List;
import java.util.Set;
import javax.inject.Inject;
import javax.inject.Singleton;
import org.apache.commons.configuration.Configuration;
import org.apache.commons.configuration.ConfigurationException;
import org.apache.commons.configuration.PropertiesConfiguration;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import com.thinkaurelius.titan.core.TitanGraph;
import com.thinkaurelius.titan.core.schema.TitanGraphIndex;
import com.thinkaurelius.titan.core.schema.TitanManagement;
......@@ -28,119 +43,84 @@ import com.tinkerpop.blueprints.KeyIndexableGraph;
import com.tinkerpop.blueprints.TransactionalGraph;
import com.tinkerpop.blueprints.Vertex;
import org.apache.commons.configuration.Configuration;
import org.apache.commons.configuration.ConfigurationException;
import org.apache.commons.configuration.PropertiesConfiguration;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import java.io.IOException;
import java.util.ArrayList;
import java.util.Arrays;
import java.util.Iterator;
import java.util.List;
import java.util.Set;
/**
* Default implementation for Graph service backed by Titan.
*/
@Singleton
public class TitanGraphService implements GraphService {
private static final Logger LOG = LoggerFactory.getLogger(TitanGraphService.class);
public static final String NAME = TitanGraphService.class.getSimpleName();
/**
* Constant for the configuration property that indicates the prefix.
*/
private static final String METADATA_PREFIX = "metadata.graph.";
private static final String INDEXER_PREFIX = "metadata.indexer.vertex.";
private static final List<String> acceptedTypes = Arrays.asList("String","Int","Long");
private TitanGraph titanGraph;
private Set<String> vertexIndexedKeys;
private Set<String> edgeIndexedKeys;
/**
* Name of the service.
*
* @return name of the service
*/
@Override
public String getName() {
return NAME;
}
/**
* Starts the service. This method blocks until the service has completely started.
*
* @throws Exception
*/
@Override
public void start() throws Exception {
Configuration graphConfig = getConfiguration();
titanGraph = initializeGraphDB(graphConfig);
createIndicesForVertexKeys();
// todo - create Edge Cardinality Constraints
LOG.info("Initialized titanGraph db: {}", titanGraph);
vertexIndexedKeys = getIndexableGraph().getIndexedKeys(Vertex.class);
LOG.info("Init vertex property keys: {}", vertexIndexedKeys);
edgeIndexedKeys = getIndexableGraph().getIndexedKeys(Edge.class);
LOG.info("Init edge property keys: {}", edgeIndexedKeys);
}
private static Configuration getConfiguration() throws ConfigurationException {
PropertiesConfiguration configProperties =
new PropertiesConfiguration("application.properties");
Configuration graphConfig = new PropertiesConfiguration();
final Iterator<String> iterator = configProperties.getKeys();
while (iterator.hasNext()) {
String key = iterator.next();
if (key.startsWith(METADATA_PREFIX)) {
String value = (String) configProperties.getProperty(key);
key = key.substring(METADATA_PREFIX.length());
graphConfig.setProperty(key, value);
}
}
return graphConfig;
}
private static Configuration getConfiguration(String filename, String prefix) throws ConfigurationException {
PropertiesConfiguration configProperties =
new PropertiesConfiguration(filename);
Configuration graphConfig = new PropertiesConfiguration();
final Iterator<String> iterator = configProperties.getKeys();
while (iterator.hasNext()) {
String key = iterator.next();
if (key.startsWith(prefix)) {
String value = (String) configProperties.getProperty(key);
key = key.substring(prefix.length());
graphConfig.setProperty(key, value);
}
}
return graphConfig;
}
protected TitanGraph initializeGraphDB(Configuration graphConfig) {
LOG.info("Initializing titanGraph db");
return TitanFactory.open(graphConfig);
}
protected void createIndicesForVertexKeys() throws ConfigurationException {
if (!titanGraph.getIndexedKeys(Vertex.class).isEmpty()) {
LOG.info("Indexes already exist for titanGraph");
return;
}
LOG.info("Indexes do not exist, Creating indexes for titanGraph using indexer.properties.");
private static final Logger LOG = LoggerFactory.getLogger(TitanGraphService.class);
/**
* Constant for the configuration property that indicates the prefix.
*/
private static final String INDEXER_PREFIX = "metadata.indexer.vertex.";
private static final List<String> acceptedTypes = Arrays.asList("String", "Int", "Long");
private final TitanGraph titanGraph;
/**
* Initialize this service through injection with a custom Provider.
*
* @param graph
* @throws ConfigurationException
*/
@Inject
TitanGraphService(GraphProvider<TitanGraph> graph) throws ConfigurationException {
// TODO reimplement to save the Provider and initialize the graph inside the start() method
this.titanGraph = graph.get();
//start();
}
/**
* Initializes this Service. The starting of Titan is handled by the Provider
* @throws ConfigurationException
*/
@Override
public void start() throws ConfigurationException {
createIndicesForVertexKeys();
// todo - create Edge Cardinality Constraints
LOG.info("Initialized titanGraph db: {}", titanGraph);
Set<String> vertexIndexedKeys = getVertexIndexedKeys();
LOG.info("Init vertex property keys: {}", vertexIndexedKeys);
Set<String> edgeIndexedKeys = getEdgeIndexedKeys();
LOG.info("Init edge property keys: {}", edgeIndexedKeys);
}
private static Configuration getConfiguration(String filename, String prefix)
throws ConfigurationException {
PropertiesConfiguration configProperties = new PropertiesConfiguration(
filename);
Configuration graphConfig = new PropertiesConfiguration();
final Iterator<String> iterator = configProperties.getKeys();
while (iterator.hasNext()) {
String key = iterator.next();
if (key.startsWith(prefix)) {
String value = (String) configProperties.getProperty(key);
key = key.substring(prefix.length());
graphConfig.setProperty(key, value);
}
}
return graphConfig;
}
/**
* Initializes the indices for the graph.
* @throws ConfigurationException
*/
// TODO move this functionality to the MetadataRepository?
protected void createIndicesForVertexKeys() throws ConfigurationException {
if (!titanGraph.getIndexedKeys(Vertex.class).isEmpty()) {
LOG.info("Indexes already exist for titanGraph");
return;
}
LOG.info("Indexes do not exist, Creating indexes for titanGraph using indexer.properties.");
TitanManagement mgmt = titanGraph.getManagementSystem();
mgmt.buildIndex("mainIndex", Vertex.class).buildMixedIndex("search");
......@@ -154,95 +134,110 @@ public class TitanGraphService implements GraphService {
if (!indexConfig.isEmpty()) {
// Get a list of property names to iterate through...
List<String> propList = new ArrayList<String>();
List<String> propList = new ArrayList<String>();
Iterator<String> it = indexConfig.getKeys("property.name");
while (it.hasNext()) {
propList.add(it.next());
}
it = propList.iterator();
while (it.hasNext()) {
// Pull the property name and index, so we can register the name and look up the type.
// Pull the property name and index, so we can register the name
// and look up the type.
String prop = it.next().toString();
String index = prop.substring(prop.lastIndexOf(".") + 1);
String type = null;
prop = indexConfig.getProperty(prop).toString();
// Look up the type for the specified property name.
if (indexConfig.containsKey("property.type." + index)) {
type = indexConfig.getProperty("property.type." + index).toString();
type = indexConfig.getProperty("property.type." + index)
.toString();
} else {
throw new ConfigurationException("No type specified for property " + index + " in indexer.properties.");
throw new ConfigurationException(
"No type specified for property " + index
+ " in indexer.properties.");
}
// Is the type submitted one of the approved ones?
// Is the type submitted one of the approved ones?
if (!acceptedTypes.contains(type)) {
throw new ConfigurationException("The type provided in indexer.properties for property " + prop + " is not supported. Supported types are: " + acceptedTypes.toString());
throw new ConfigurationException(
"The type provided in indexer.properties for property "
+ prop
+ " is not supported. Supported types are: "
+ acceptedTypes.toString());
}
// Add the key.
LOG.info("Adding property: " + prop + " to index as type: " + type);
mgmt.addIndexKey(graphIndex,mgmt.makePropertyKey(prop).dataType(type.getClass()).make());
}
LOG.info("Adding property: " + prop + " to index as type: "
+ type);
mgmt.addIndexKey(graphIndex, mgmt.makePropertyKey(prop)
.dataType(type.getClass()).make());
}
mgmt.commit();
LOG.info("Index creation complete.");
}
}
/**
* Stops the service. This method blocks until the service has completely shut down.
*/
@Override
public void stop() {
if (titanGraph != null) {
titanGraph.shutdown();
}
}
/**
* A version of stop() that is designed to be usable in Java7 closure
* clauses.
* Implementation classes MUST relay this directly to {@link #stop()}
*
* @throws java.io.IOException never
* @throws RuntimeException on any failure during the stop operation
*/
@Override
public void close() throws IOException {
stop();
}
@Override
public Graph getBlueprintsGraph() {
return titanGraph;
}
@Override
public KeyIndexableGraph getIndexableGraph() {
return titanGraph;
}
@Override
public TransactionalGraph getTransactionalGraph() {
return titanGraph;
}
public TitanGraph getTitanGraph() {
return titanGraph;
}
@Override
public Set<String> getVertexIndexedKeys() {
return vertexIndexedKeys;
}
@Override
public Set<String> getEdgeIndexedKeys() {
return edgeIndexedKeys;
}
}
/**
* Stops the service. This method blocks until the service has completely
* shut down.
*/
@Override
public void stop() {
if (titanGraph != null) {
titanGraph.shutdown();
}
}
/**
* A version of stop() that is designed to be usable in Java7 closure
* clauses. Implementation classes MUST relay this directly to
* {@link #stop()}
*
* @throws java.io.IOException
* never
* @throws RuntimeException
* on any failure during the stop operation
*/
@Override
public void close() throws IOException {
stop();
}
@Override
public Graph getBlueprintsGraph() {
return titanGraph;
}
@Override
public KeyIndexableGraph getIndexableGraph() {
return titanGraph;
}
@Override
public TransactionalGraph getTransactionalGraph() {
return titanGraph;
}
public TitanGraph getTitanGraph() {
return titanGraph;
}
@Override
public Set<String> getVertexIndexedKeys() {
// this must use the graph API instead of setting this value as a class member - it can change after creation
return getIndexableGraph().getIndexedKeys(Vertex.class);
}
@Override
public Set<String> getEdgeIndexedKeys() {
// this must use the graph API instead of setting this value as a class member - it can change after creation
return getIndexableGraph().getIndexedKeys(Edge.class);
}
}
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.metadata.services;
import org.apache.hadoop.metadata.service.Service;
import org.apache.hadoop.metadata.types.TypeSystem;
public interface TypesService extends Service {
TypeSystem getTypeSystem();
}
package org.apache.hadoop.metadata;
import com.google.inject.Guice;
import com.google.inject.Injector;
import com.google.inject.Module;
public abstract class GuiceEnabledTestBase {
/*
* Guice.createInjector() takes your Modules, and returns a new Injector
* instance. Most applications will call this method exactly once, in their
* main() method.
*/
public final Injector injector;
GuiceEnabledTestBase() {
injector = Guice.createInjector();
}
GuiceEnabledTestBase(Module... modules) {
injector = Guice.createInjector(modules);
}
}
package org.apache.hadoop.metadata;
public abstract class RepositoryModuleBaseTest extends GuiceEnabledTestBase {
public RepositoryModuleBaseTest() {
super(new RepositoryMetadataModule());
}
}
package org.apache.hadoop.metadata;
import junit.framework.Assert;
import org.apache.hadoop.metadata.services.GraphService;
import org.testng.annotations.AfterClass;
import org.testng.annotations.BeforeClass;
import org.testng.annotations.Test;
/**
* Unit test for Guice injector service loading
*/
public class RepositoryServiceLoadingTest extends GuiceEnabledTestBase {
public RepositoryServiceLoadingTest() {
super(new RepositoryMetadataModule());
}
@BeforeClass
public void setUp() throws Exception {
}
@AfterClass
public void tearDown() throws Exception {
}
@Test
public void testGetGraphService() throws Exception {
/*
* Now that we've got the injector, we can build objects.
*/
GraphService gs = injector.getInstance(GraphService.class);
Assert.assertNotNull(gs);
}
}
package org.apache.hadoop.metadata.services;
import com.google.common.collect.ImmutableList;
import com.thinkaurelius.titan.core.TitanGraph;
import com.tinkerpop.blueprints.Direction;
import com.tinkerpop.blueprints.Edge;
import com.tinkerpop.blueprints.Vertex;
import java.util.List;
import org.apache.hadoop.metadata.ITypedReferenceableInstance;
import org.apache.hadoop.metadata.MetadataException;
import org.apache.hadoop.metadata.MetadataService;
import org.apache.hadoop.metadata.Referenceable;
import org.apache.hadoop.metadata.service.Services;
import org.apache.hadoop.metadata.RepositoryModuleBaseTest;
import org.apache.hadoop.metadata.storage.IRepository;
import org.apache.hadoop.metadata.storage.memory.MemRepository;
import org.apache.hadoop.metadata.types.AttributeDefinition;
import org.apache.hadoop.metadata.types.ClassType;
......@@ -27,59 +24,44 @@ import org.testng.annotations.AfterClass;
import org.testng.annotations.BeforeClass;
import org.testng.annotations.Test;
import java.util.List;
import com.google.common.collect.ImmutableList;
import com.thinkaurelius.titan.core.TitanGraph;
import com.tinkerpop.blueprints.Direction;
import com.tinkerpop.blueprints.Edge;
import com.tinkerpop.blueprints.Vertex;
public class GraphBackedMetadataRepositoryTest {
public class GraphBackedMetadataRepositoryTest extends RepositoryModuleBaseTest {
private static final String ENTITY_TYPE = "hive-table";
private TitanGraphService titanGraphService;
private GraphBackedMetadataRepository repositoryService;
protected org.apache.hadoop.metadata.MetadataService ms;
private IRepository repo;
private TypeSystem ts;
private String guid;
@BeforeClass
public void setUp() throws Exception {
titanGraphService = new TitanGraphService();
titanGraphService = super.injector.getInstance(TitanGraphService.class);
titanGraphService.start();
Services.get().register(titanGraphService);
DefaultTypesService typesService = new DefaultTypesService();
typesService.start();
Services.get().register(typesService);
TypeSystem ts = typesService.getTypeSystem();
repositoryService = new GraphBackedMetadataRepository();
repositoryService = super.injector.getInstance(GraphBackedMetadataRepository.class);
repositoryService.start();
Services.get().register(repositoryService);
// todo - only used for types
MemRepository mr = new MemRepository(ts);
ms = new org.apache.hadoop.metadata.MetadataService(mr, ts);
MetadataService.setCurrentService(ms);
ts = new TypeSystem();
repo = new MemRepository(ts);
defineDeptEmployeeTypes(ts);
}
@AfterClass
public void tearDown() throws Exception {
Services.get().getService(GraphBackedMetadataRepository.NAME).close();
Services.get().getService(TitanGraphService.NAME).close();
Services.get().reset();
}
@Test
public void testGetName() throws Exception {
Assert.assertEquals(GraphBackedMetadataRepository.NAME,
GraphBackedMetadataRepository.class.getSimpleName());
Assert.assertEquals(repositoryService.getName(), GraphBackedMetadataRepository.NAME);
}
@Test
public void testSubmitEntity() throws Exception {
TypeSystem typeSystem = MetadataService.getCurrentService().getTypeSystem();
Referenceable hrDept = createDeptEg1(typeSystem);
ClassType deptType = typeSystem.getDataType(ClassType.class, "Department");
Referenceable hrDept = createDeptEg1(ts);
ClassType deptType = ts.getDataType(ClassType.class, "Department");
ITypedReferenceableInstance hrDept2 = deptType.convert(hrDept, Multiplicity.REQUIRED);
guid = repositoryService.createEntity(hrDept2, ENTITY_TYPE);
......@@ -118,20 +100,6 @@ public class GraphBackedMetadataRepositoryTest {
Assert.assertEquals(entityList.size(), 0); // as this is not implemented yet
}
@Test(expectedExceptions = RuntimeException.class)
public void testStartWithOutGraphServiceRegistration() throws Exception {
try {
Services.get().reset();
GraphBackedMetadataRepository repositoryService = new
GraphBackedMetadataRepository();
repositoryService.start();
Assert.fail("This should have thrown an exception");
} finally {
Services.get().register(titanGraphService);
Services.get().register(repositoryService);
}
}
/*
* Class Hierarchy is:
* Department(name : String, employees : Array[Person])
......@@ -184,7 +152,7 @@ public class GraphBackedMetadataRepositoryTest {
ts.getDataType(ClassType.class, "Manager")
);
ms.getRepository().defineTypes(types);
repo.defineTypes(types);
}
protected Referenceable createDeptEg1(TypeSystem ts) throws MetadataException {
......@@ -228,4 +196,4 @@ public class GraphBackedMetadataRepositoryTest {
String name, ImmutableList<String> superTypes, AttributeDefinition... attrDefs) {
return new HierarchicalTypeDefinition(ClassType.class, name, superTypes, attrDefs);
}
}
}
\ No newline at end of file
package org.apache.hadoop.metadata.services;
import org.apache.hadoop.metadata.RepositoryModuleBaseTest;
import org.testng.Assert;
import org.testng.annotations.AfterClass;
import org.testng.annotations.BeforeClass;
......@@ -8,13 +9,14 @@ import org.testng.annotations.Test;
/**
* Unit test for TitanGraphService.
*/
public class TitanGraphServiceTest {
public class TitanGraphServiceTest extends RepositoryModuleBaseTest {
private TitanGraphService titanGraphService;
@BeforeClass
public void setUp() throws Exception {
titanGraphService = new TitanGraphService();
titanGraphService = super.injector.getInstance(TitanGraphService.class);
//titanGraphService = new TitanGraphService();
titanGraphService.start();
}
......@@ -24,12 +26,6 @@ public class TitanGraphServiceTest {
}
@Test
public void testGetName() throws Exception {
Assert.assertEquals(TitanGraphService.NAME, TitanGraphService.class.getSimpleName());
Assert.assertEquals(titanGraphService.getName(), TitanGraphService.NAME);
}
@Test
public void testStart() throws Exception {
Assert.assertNotNull(titanGraphService.getBlueprintsGraph());
}
......@@ -65,4 +61,4 @@ public class TitanGraphServiceTest {
Assert.assertNotNull(titanGraphService.getEdgeIndexedKeys());
Assert.assertTrue(titanGraphService.getEdgeIndexedKeys().size() > 0);
}
}
\ No newline at end of file
}
......@@ -16,21 +16,7 @@
# limitations under the License.
#
application.services=org.apache.hadoop.metadata.services.TitanGraphService,\
org.apache.hadoop.metadata.services.GraphBackedMetadataRepositoryService
# Graph implementation
#metadata.graph.blueprints.graph=com.thinkaurelius.titan.core.TitanFactory
# Graph Storage
metadata.graph.storage.backend=berkeleyje
metadata.graph.storage.directory=target/data/berkeley
# Graph Search Index
metadata.graph.index.search.backend=elasticsearch
metadata.graph.index.search.directory=target/data/es
metadata.graph.index.search.elasticsearch.client-only=false
metadata.graph.index.search.elasticsearch.local-mode=true
# GraphService implementation
metadata.graph.impl.class=org.apache.hadoop.metadata.services.TitanGraphService
metadata.enableTLS=false
storage.backend=inmemory
# Graph Search Index
index.search.backend=elasticsearch
index.search.directory=target/data/es
index.search.elasticsearch.client-only=false
index.search.elasticsearch.local-mode=true
\ No newline at end of file
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# This is formatted as follows:
# metadata.indexer.vertex.property.name.<index>=<Property Name>
# metadata.indexer.vertex.property.type.<index>=<Data Type>
metadata.indexer.vertex.property.name.0=DESC
metadata.indexer.vertex.property.type.0=String
metadata.indexer.vertex.property.name.1=DB_LOCATION_URI
metadata.indexer.vertex.property.type.1=String
metadata.indexer.vertex.property.name.2=NAME
metadata.indexer.vertex.property.type.2=String
metadata.indexer.vertex.property.name.3=OWNER_NAME
metadata.indexer.vertex.property.type.3=String
metadata.indexer.vertex.property.name.4=TBL_NAME
metadata.indexer.vertex.property.type.4=String
metadata.indexer.vertex.property.name.5=COMMENT
metadata.indexer.vertex.property.type.5=String
metadata.indexer.vertex.property.name.6=COLUMN_NAME
metadata.indexer.vertex.property.type.6=String
metadata.indexer.vertex.property.name.7=TYPE_NAME
metadata.indexer.vertex.property.type.7=String
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# This is formatted as follows:
# metadata.indexer.vertex.property.name.<index>=<Property Name>
# metadata.indexer.vertex.property.type.<index>=<Data Type>
metadata.indexer.vertex.property.name.0=DESC
metadata.indexer.vertex.property.type.0=String
metadata.indexer.vertex.property.name.1=DB_LOCATION_URI
metadata.indexer.vertex.property.type.1=String
metadata.indexer.vertex.property.name.2=NAME
metadata.indexer.vertex.property.type.2=String
metadata.indexer.vertex.property.name.3=OWNER_NAME
metadata.indexer.vertex.property.type.3=String
metadata.indexer.vertex.property.name.4=TBL_NAME
metadata.indexer.vertex.property.type.4=String
metadata.indexer.vertex.property.name.5=COMMENT
metadata.indexer.vertex.property.type.5=String
metadata.indexer.vertex.property.name.6=COLUMN_NAME
metadata.indexer.vertex.property.type.6=String
metadata.indexer.vertex.property.name.7=TYPE_NAME
metadata.indexer.vertex.property.type.7=String
......@@ -22,6 +22,7 @@ package org.apache.hadoop.metadata;
import org.apache.hadoop.metadata.storage.IRepository;
import org.apache.hadoop.metadata.types.TypeSystem;
// TODO get rid of this class in favor of Dependency injection
public class MetadataService {
final IRepository repo;
......
......@@ -127,6 +127,21 @@
<groupId>org.testng</groupId>
<artifactId>testng</artifactId>
</dependency>
<dependency>
<groupId>com.google.inject.extensions</groupId>
<artifactId>guice-servlet</artifactId>
<version>3.0</version>
</dependency>
<dependency>
<groupId>com.google.code.gson</groupId>
<artifactId>gson</artifactId>
<version>2.3.1</version>
</dependency>
<dependency>
<groupId>com.sun.jersey.contribs</groupId>
<artifactId>jersey-guice</artifactId>
<version>1.18.3</version>
</dependency>
</dependencies>
<build>
......@@ -196,15 +211,15 @@
<artifactId>keytool-maven-plugin</artifactId>
<executions>
<execution>
<id>clean</id>
<phase>generate-resources</phase>
<id>clean-server</id>
<goals>
<goal>clean</goal>
</goals>
</execution>
<execution>
<id>genkey</id>
<phase>generate-resources</phase>
<id>server</id>
<goals>
<goal>generateKeyPair</goal>
</goals>
......@@ -241,6 +256,7 @@
</connector>
</connectors>
<webApp>${project.build.directory}/metadata-webapp-${project.version}</webApp>
<contextPath>/</contextPath>
<useTestClasspath>true</useTestClasspath>
<systemProperties>
<systemProperty>
......
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.metadata.web.listeners;
import org.apache.commons.configuration.ConfigurationException;
import org.apache.commons.configuration.PropertiesConfiguration;
import org.apache.hadoop.metadata.MetadataException;
import org.apache.hadoop.metadata.service.ServiceInitializer;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import javax.servlet.ServletContextEvent;
import javax.servlet.ServletContextListener;
/**
* Listener for bootstrapping Services and configuration properties.
*/
public class ApplicationStartupListener implements ServletContextListener {
private static final Logger LOG = LoggerFactory.getLogger(ApplicationStartupListener.class);
private final ServiceInitializer startupServices = new ServiceInitializer();
@Override
public void contextInitialized(ServletContextEvent sce) {
try {
startupServices.initialize();
showStartupInfo();
} catch (MetadataException e) {
throw new RuntimeException("Error starting services", e);
}
}
private void showStartupInfo() {
StringBuilder buffer = new StringBuilder();
buffer.append("\n############################################");
buffer.append("\n Metadata Server (STARTED) ");
buffer.append("\n############################################");
try {
PropertiesConfiguration configuration = new PropertiesConfiguration("application.properties");
buffer.append(configuration.toString());
} catch (ConfigurationException e) {
buffer.append("*** Unable to get build info ***").append(e.getMessage());
}
LOG.info(buffer.toString());
}
@Override
public void contextDestroyed(ServletContextEvent sce) {
try {
startupServices.destroy();
} catch (MetadataException e) {
LOG.warn("Error destroying services", e);
}
StringBuilder buffer = new StringBuilder();
buffer.append("\n############################################");
buffer.append("\n Metadata Server (SHUTDOWN) ");
buffer.append("\n############################################");
LOG.info(buffer.toString());
}
}
\ No newline at end of file
package org.apache.hadoop.metadata.web.listeners;
import java.util.HashMap;
import java.util.Map;
import org.apache.hadoop.metadata.RepositoryMetadataModule;
import org.apache.hadoop.metadata.services.GraphService;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import com.google.inject.Guice;
import com.google.inject.Injector;
import com.google.inject.servlet.GuiceServletContextListener;
import com.sun.jersey.api.core.PackagesResourceConfig;
import com.sun.jersey.guice.JerseyServletModule;
import com.sun.jersey.guice.spi.container.servlet.GuiceContainer;
public class GuiceServletConfig extends GuiceServletContextListener {
private static final Logger LOG = LoggerFactory
.getLogger(GuiceServletConfig.class);
private static final String GUICE_CTX_PARAM = "guice.packages";
@Override
protected Injector getInjector() {
LOG.info("Loading Guice modules");
/*
* More information on this can be found here:
* https://jersey.java.net/nonav/apidocs/1.11/contribs/jersey-guice/com/sun/jersey/guice/spi/container/servlet/package-summary.html
*/
Injector injector = Guice.createInjector(
new RepositoryMetadataModule(),
new JerseyServletModule() {
@Override
protected void configureServlets() {
String packages = getServletContext().getInitParameter(GUICE_CTX_PARAM);
LOG.info("Jersey loading from packages: " + packages);
Map<String, String> params = new HashMap<String, String>();
params.put(PackagesResourceConfig.PROPERTY_PACKAGES, packages);
serve("/api/metadata/*").with(GuiceContainer.class, params);
}
});
LOG.info("Guice modules loaded");
LOG.info("Bootstrapping services");
// get the Graph Service
GraphService graphService = injector.getInstance(GraphService.class);
try {
// start/init the service
graphService.start();
} catch (Exception e) {
throw new RuntimeException(e);
}
LOG.info(String.format("Loaded Service: %s", graphService.getClass().getName()));
LOG.info("Services bootstrapped successfully");
return injector;
}
}
......@@ -23,6 +23,7 @@ import org.apache.hadoop.metadata.web.util.Servlets;
import org.codehaus.jettison.json.JSONException;
import org.codehaus.jettison.json.JSONObject;
import javax.inject.Singleton;
import javax.ws.rs.GET;
import javax.ws.rs.Path;
import javax.ws.rs.Produces;
......@@ -34,6 +35,7 @@ import javax.ws.rs.core.Response;
* Jersey Resource for admin operations.
*/
@Path("admin")
@Singleton
public class AdminResource {
@GET
......
......@@ -18,19 +18,11 @@
package org.apache.hadoop.metadata.web.resources;
import com.google.common.base.Preconditions;
import org.apache.commons.io.IOUtils;
import org.apache.hadoop.metadata.MetadataException;
import org.apache.hadoop.metadata.service.Services;
import org.apache.hadoop.metadata.services.DefaultMetadataService;
import org.apache.hadoop.metadata.services.MetadataService;
import org.apache.hadoop.metadata.web.util.Servlets;
import org.codehaus.jettison.json.JSONObject;
import org.json.simple.JSONValue;
import org.json.simple.parser.ParseException;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import java.io.IOException;
import java.io.StringWriter;
import javax.inject.Inject;
import javax.inject.Singleton;
import javax.servlet.http.HttpServletRequest;
import javax.ws.rs.Consumes;
import javax.ws.rs.DELETE;
......@@ -45,8 +37,18 @@ import javax.ws.rs.WebApplicationException;
import javax.ws.rs.core.Context;
import javax.ws.rs.core.MediaType;
import javax.ws.rs.core.Response;
import java.io.IOException;
import java.io.StringWriter;
import org.apache.commons.io.IOUtils;
import org.apache.hadoop.metadata.MetadataException;
import org.apache.hadoop.metadata.services.MetadataService;
import org.apache.hadoop.metadata.web.util.Servlets;
import org.codehaus.jettison.json.JSONObject;
import org.json.simple.JSONValue;
import org.json.simple.parser.ParseException;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import com.google.common.base.Preconditions;
/**
* Entity management operations as REST API.
......@@ -55,17 +57,22 @@ import java.io.StringWriter;
* of the Type they correspond with.
*/
@Path("entities")
@Singleton
public class EntityResource {
private static final Logger LOG = LoggerFactory.getLogger(EntityResource.class);
private MetadataService metadataService;
public EntityResource() {
metadataService = Services.get().getService(DefaultMetadataService.NAME);
if (metadataService == null) {
throw new RuntimeException("graph service is not initialized");
}
private final MetadataService metadataService;
/**
* Created by the Guice ServletModule and injected with the
* configured MetadataService.
*
* @param metadataService
*/
@Inject
public EntityResource(MetadataService metadataService) {
this.metadataService = metadataService;
}
@POST
......
......@@ -18,6 +18,7 @@
package org.apache.hadoop.metadata.web.resources;
import javax.inject.Singleton;
import javax.ws.rs.Path;
/**
......@@ -30,5 +31,6 @@ import javax.ws.rs.Path;
* 'search': find entities generated by Hive processes or that were generated by Sqoop, etc.
*/
@Path("discovery")
@Singleton
public class MetadataDiscoveryResource {
}
......@@ -18,25 +18,12 @@
package org.apache.hadoop.metadata.web.resources;
import com.tinkerpop.blueprints.Direction;
import com.tinkerpop.blueprints.Edge;
import com.tinkerpop.blueprints.Element;
import com.tinkerpop.blueprints.Graph;
import com.tinkerpop.blueprints.Vertex;
import com.tinkerpop.blueprints.VertexQuery;
import com.tinkerpop.blueprints.util.io.graphson.GraphSONMode;
import com.tinkerpop.blueprints.util.io.graphson.GraphSONUtility;
import org.apache.commons.lang.StringUtils;
import org.apache.hadoop.metadata.service.Services;
import org.apache.hadoop.metadata.services.GraphService;
import org.apache.hadoop.metadata.services.TitanGraphService;
import org.apache.hadoop.metadata.web.util.Servlets;
import org.codehaus.jettison.json.JSONArray;
import org.codehaus.jettison.json.JSONException;
import org.codehaus.jettison.json.JSONObject;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import java.util.HashMap;
import java.util.Map;
import java.util.Set;
import javax.inject.Inject;
import javax.inject.Singleton;
import javax.ws.rs.DefaultValue;
import javax.ws.rs.GET;
import javax.ws.rs.Path;
......@@ -46,9 +33,24 @@ import javax.ws.rs.QueryParam;
import javax.ws.rs.WebApplicationException;
import javax.ws.rs.core.MediaType;
import javax.ws.rs.core.Response;
import java.util.HashMap;
import java.util.Map;
import java.util.Set;
import org.apache.commons.lang.StringUtils;
import org.apache.hadoop.metadata.services.GraphService;
import org.apache.hadoop.metadata.web.util.Servlets;
import org.codehaus.jettison.json.JSONArray;
import org.codehaus.jettison.json.JSONException;
import org.codehaus.jettison.json.JSONObject;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import com.tinkerpop.blueprints.Direction;
import com.tinkerpop.blueprints.Edge;
import com.tinkerpop.blueprints.Element;
import com.tinkerpop.blueprints.Graph;
import com.tinkerpop.blueprints.Vertex;
import com.tinkerpop.blueprints.VertexQuery;
import com.tinkerpop.blueprints.util.io.graphson.GraphSONMode;
import com.tinkerpop.blueprints.util.io.graphson.GraphSONUtility;
/**
* Jersey Resource for lineage metadata operations.
......@@ -59,22 +61,25 @@ import java.util.Set;
* for accessing the backend graph.
*/
@Path("graph")
@Singleton
public class RexsterGraphResource {
private static final Logger LOG = LoggerFactory.getLogger(RexsterGraphResource.class);
public static final String RESULTS = "results";
public static final String TOTAL_SIZE = "totalSize";
private GraphService graphService;
private final GraphService graphService;
public RexsterGraphResource() {
graphService = Services.get().getService(TitanGraphService.NAME);
@Inject
public RexsterGraphResource(GraphService graphService) {
this.graphService = graphService;
/*graphService = Services.get().getService(TitanGraphService.NAME);
if (graphService == null) {
throw new WebApplicationException(Response
.status(Response.Status.INTERNAL_SERVER_ERROR)
.tag("graph service is not initialized")
.build());
}
}*/
}
protected Graph getGraph() {
......
......@@ -18,6 +18,7 @@
package org.apache.hadoop.metadata.web.resources;
import javax.inject.Singleton;
import javax.servlet.http.HttpServletRequest;
import javax.ws.rs.*;
import javax.ws.rs.core.Context;
......@@ -31,6 +32,7 @@ import javax.ws.rs.core.Response;
* e.g. a Hive table
*/
@Path("types")
@Singleton
public class TypesResource {
@POST
......
<?xml version="1.0" encoding="UTF-8"?>
<!--
~ Licensed to the Apache Software Foundation (ASF) under one
~ or more contributor license agreements. See the NOTICE file
~ distributed with this work for additional information
~ regarding copyright ownership. The ASF licenses this file
~ to you under the Apache License, Version 2.0 (the
~ "License"); you may not use this file except in compliance
~ with the License. You may obtain a copy of the License at
~
~ http://www.apache.org/licenses/LICENSE-2.0
~
~ Unless required by applicable law or agreed to in writing, software
~ distributed under the License is distributed on an "AS IS" BASIS,
~ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
~ See the License for the specific language governing permissions and
~ limitations under the License.
-->
<!-- ~ Licensed to the Apache Software Foundation (ASF) under one ~ or more
contributor license agreements. See the NOTICE file ~ distributed with this
work for additional information ~ regarding copyright ownership. The ASF
licenses this file ~ to you under the Apache License, Version 2.0 (the ~
"License"); you may not use this file except in compliance ~ with the License.
You may obtain a copy of the License at ~ ~ http://www.apache.org/licenses/LICENSE-2.0
~ ~ Unless required by applicable law or agreed to in writing, software ~
distributed under the License is distributed on an "AS IS" BASIS, ~ WITHOUT
WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. ~ See the
License for the specific language governing permissions and ~ limitations
under the License. -->
<!DOCTYPE web-app PUBLIC "-//Sun Microsystems, Inc.//DTD Web Application 2.3//EN"
"http://java.sun.com/dtd/web-app_2_3.dtd">
<web-app>
<display-name>Apache Falcon Placeholder</display-name>
<description>Apache Falcon Placeholder</description>
<filter>
<filter-name>audit</filter-name>
<filter-class>org.apache.hadoop.metadata.web.filters.AuditFilter</filter-class>
</filter>
<filter>
<filter-name>authentication</filter-name>
<filter-class>org.apache.hadoop.metadata.web.filters.AuthenticationFilter</filter-class>
</filter>
<filter-mapping>
<filter-name>audit</filter-name>
<servlet-name>MetadataRESTApi</servlet-name>
</filter-mapping>
<filter-mapping>
<filter-name>authentication</filter-name>
<servlet-name>MetadataRESTApi</servlet-name>
</filter-mapping>
<listener>
<listener-class>org.apache.hadoop.metadata.web.listeners.ApplicationStartupListener</listener-class>
</listener>
<servlet>
<servlet-name>MetadataRESTApi</servlet-name>
<servlet-class>com.sun.jersey.spi.container.servlet.ServletContainer</servlet-class>
<init-param>
<param-name>com.sun.jersey.config.property.resourceConfigClass</param-name>
<param-value>com.sun.jersey.api.core.PackagesResourceConfig</param-value>
</init-param>
<init-param>
<param-name>com.sun.jersey.config.property.packages</param-name>
<param-value>
org.apache.hadoop.metadata.web.resources,org.apache.hadoop.metadata.web.params
</param-value>
</init-param>
<load-on-startup>1</load-on-startup>
</servlet>
<servlet-mapping>
<servlet-name>MetadataRESTApi</servlet-name>
<url-pattern>/api/metadata/*</url-pattern>
</servlet-mapping>
<display-name>Apache Metadata Placeholder</display-name>
<description>Apache Metadata Placeholder</description>
<context-param>
<param-name>guice.packages</param-name>
<param-value>org.apache.hadoop.metadata.web.resources,org.apache.hadoop.metadata.web.params</param-value>
</context-param>
<!--
More information can be found here:
https://jersey.java.net/nonav/apidocs/1.11/contribs/jersey-guice/com/sun/jersey/guice/spi/container/servlet/package-summary.html
-->
<filter>
<filter-name>guiceFilter</filter-name>
<filter-class>com.google.inject.servlet.GuiceFilter</filter-class>
</filter>
<filter-mapping>
<filter-name>guiceFilter</filter-name>
<url-pattern>/*</url-pattern>
</filter-mapping>
<listener>
<listener-class>org.apache.hadoop.metadata.web.listeners.ApplicationStartupListener</listener-class>
</listener>
<listener>
<listener-class>org.apache.hadoop.metadata.web.listeners.GuiceServletConfig</listener-class>
</listener>
</web-app>
package org.apache.hadoop.metadata;
import org.apache.hadoop.metadata.service.Services;
import org.apache.hadoop.metadata.services.GraphBackedMetadataRepository;
import org.apache.hadoop.metadata.services.TitanGraphService;
import org.json.simple.JSONValue;
import org.testng.Assert;
import org.testng.annotations.AfterClass;
import org.testng.annotations.BeforeClass;
import org.testng.annotations.Test;
import java.util.HashMap;
import java.util.Map;
public class GraphRepositoryServiceIT {
private static final String ENTITY_NAME = "clicks-table";
private static final String ENTITY_TYPE = "hive-table";
private static final String DATABASE_NAME = "ads";
private static final String TABLE_NAME = "clicks-table";
@BeforeClass
public void setUp() throws Exception {
TitanGraphService titanGraphService = new TitanGraphService();
titanGraphService.start();
Services.get().register(titanGraphService);
GraphBackedMetadataRepository repositoryService
= new GraphBackedMetadataRepository();
repositoryService.start();
Services.get().register(repositoryService);
}
@AfterClass
public void tearDown() throws Exception {
Services.get().getService(GraphBackedMetadataRepository.NAME).close();
Services.get().getService(TitanGraphService.NAME).close();
Services.get().reset();
}
/*
@Test
public void testRepository() throws Exception {
GraphBackedMetadataRepositoryService repositoryService =
Services.get().getService(GraphBackedMetadataRepositoryService.NAME);
String entityStream = getTestEntityJSON();
String guid = repositoryService.createEntity(entityStream, ENTITY_TYPE);
Assert.assertNotNull(guid);
String entity = repositoryService.getEntityDefinition(ENTITY_NAME, ENTITY_TYPE);
@SuppressWarnings("unchecked")
Map<String, String> entityProperties =
(Map<String, String>) JSONValue.parseWithException(entity);
Assert.assertEquals(entityProperties.get("guid"), guid);
Assert.assertEquals(entityProperties.get("entityName"), ENTITY_NAME);
Assert.assertEquals(entityProperties.get("entityType"), ENTITY_TYPE);
Assert.assertEquals(entityProperties.get("database"), DATABASE_NAME);
Assert.assertEquals(entityProperties.get("table"), TABLE_NAME);
}
private String getTestEntityJSON() {
Map<String, String> props = new HashMap<>();
props.put("entityName", ENTITY_NAME);
props.put("entityType", ENTITY_TYPE);
props.put("database", DATABASE_NAME);
props.put("table", TABLE_NAME);
return JSONValue.toJSONString(props);
}
*/
}
package org.apache.hadoop.metadata;
import com.tinkerpop.blueprints.GraphQuery;
import com.tinkerpop.blueprints.TransactionalGraph;
import com.tinkerpop.blueprints.Vertex;
import org.apache.hadoop.metadata.services.TitanGraphService;
import org.apache.hadoop.metadata.util.GraphUtils;
import org.testng.annotations.Test;
import java.util.Iterator;
import java.util.UUID;
/**
* End to end graph put/get test.
*/
public class TitanGraphServiceIT {
@Test
public void testTitanGraph() throws Exception {
TitanGraphService titanGraphService = new TitanGraphService();
titanGraphService.start();
try {
String guid = UUID.randomUUID().toString();
final TransactionalGraph graph = titanGraphService.getTransactionalGraph();
System.out.println("graph = " + graph);
System.out.println("graph.getVertices() = " + graph.getVertices());
Vertex entityVertex = null;
try {
graph.rollback();
entityVertex = graph.addVertex(null);
entityVertex.setProperty("guid", guid);
entityVertex.setProperty("entityName", "entityName");
entityVertex.setProperty("entityType", "entityType");
} catch (Exception e) {
graph.rollback();
e.printStackTrace();
} finally {
graph.commit();
}
System.out.println("vertex = " + GraphUtils.vertexString(entityVertex));
GraphQuery query = graph.query()
.has("entityName", "entityName")
.has("entityType", "entityType");
Iterator<Vertex> results = query.vertices().iterator();
if (results.hasNext()) {
Vertex vertexFromQuery = results.next();
System.out.println("vertex = " + GraphUtils.vertexString(vertexFromQuery));
}
} finally {
Thread.sleep(1000);
titanGraphService.stop();
}
}
}
......@@ -18,18 +18,22 @@
package org.apache.hadoop.metadata.web.resources;
import com.sun.jersey.api.client.ClientResponse;
import com.sun.jersey.api.client.WebResource;
import org.json.simple.JSONValue;
import org.testng.Assert;
import org.testng.annotations.Test;
import java.util.HashMap;
import java.util.Map;
import java.util.UUID;
import javax.ws.rs.HttpMethod;
import javax.ws.rs.core.MediaType;
import javax.ws.rs.core.Response;
import java.util.HashMap;
import java.util.Map;
import java.util.UUID;
import org.json.simple.JSONValue;
import org.testng.Assert;
import org.testng.annotations.Test;
import com.google.gson.JsonElement;
import com.google.gson.JsonParser;
import com.sun.jersey.api.client.ClientResponse;
import com.sun.jersey.api.client.WebResource;
/**
* Integration tests for Entity Jersey Resource.
......@@ -44,6 +48,7 @@ public class EntityJerseyResourceIT extends BaseResourceIT {
@Test
public void testSubmitEntity() {
String entityStream = getTestEntityJSON();
JsonParser parser = new JsonParser();
WebResource resource = service
.path("api/metadata/entities/submit")
......@@ -54,11 +59,15 @@ public class EntityJerseyResourceIT extends BaseResourceIT {
.type(MediaType.APPLICATION_JSON)
.method(HttpMethod.POST, ClientResponse.class, entityStream);
Assert.assertEquals(clientResponse.getStatus(), Response.Status.OK.getStatusCode());
String response = clientResponse.getEntity(String.class);
Assert.assertNotNull(response);
JsonElement elem = parser.parse(response);
String guid = elem.getAsJsonObject().get("GUID").getAsString();
try {
Assert.assertNotNull(UUID.fromString(response));
Assert.assertNotNull(UUID.fromString(guid));
} catch (IllegalArgumentException e) {
Assert.fail("Response is not a guid, " + response);
}
......
......@@ -16,40 +16,7 @@
# limitations under the License.
#
application.services=org.apache.hadoop.metadata.services.TitanGraphService,\
org.apache.hadoop.metadata.services.GraphBackedMetadataRepositoryService,\
org.apache.hadoop.metadata.services.DefaultMetadataService
# GraphService implementation
metadata.graph.impl.class=org.apache.hadoop.metadata.services.TitanGraphService
######### Implementation classes #########
## DO NOT MODIFY UNLESS SURE ABOUT CHANGE ##
metadata.GraphService.impl=org.apache.hadoop.metadata.services.TitanGraphService
metadata.MetadataRepositoryService.impl=org.apache.hadoop.metadata.services.GraphBackedMetadataRepositoryService
######### Implementation classes #########
######### Graph Database Configs #########
# Graph implementation
#metadata.graph.blueprints.graph=com.thinkaurelius.titan.core.TitanFactory
# Graph Storage
metadata.graph.storage.backend=berkeleyje
metadata.graph.storage.directory=target/data/berkeley
# Graph Search Index
metadata.graph.index.search.backend=elasticsearch
metadata.graph.index.search.directory=target/data/es
metadata.graph.index.search.elasticsearch.client-only=false
metadata.graph.index.search.elasticsearch.local-mode=true
######### Graph Database Configs #########
######### Security Properties #########
# SSL config
metadata.enableTLS=false
######### Security Properties #########
storage.backend=inmemory
# Graph Search Index
index.search.backend=elasticsearch
index.search.directory=target/data/es
index.search.elasticsearch.client-only=false
index.search.elasticsearch.local-mode=true
\ No newline at end of file
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# This is formatted as follows:
# metadata.indexer.vertex.property.name.<index>=<Property Name>
# metadata.indexer.vertex.property.type.<index>=<Data Type>
metadata.indexer.vertex.property.name.0=DESC
metadata.indexer.vertex.property.type.0=String
metadata.indexer.vertex.property.name.1=DB_LOCATION_URI
metadata.indexer.vertex.property.type.1=String
metadata.indexer.vertex.property.name.2=NAME
metadata.indexer.vertex.property.type.2=String
metadata.indexer.vertex.property.name.3=OWNER_NAME
metadata.indexer.vertex.property.type.3=String
metadata.indexer.vertex.property.name.4=TBL_NAME
metadata.indexer.vertex.property.type.4=String
metadata.indexer.vertex.property.name.5=COMMENT
metadata.indexer.vertex.property.type.5=String
metadata.indexer.vertex.property.name.6=COLUMN_NAME
metadata.indexer.vertex.property.type.6=String
metadata.indexer.vertex.property.name.7=TYPE_NAME
metadata.indexer.vertex.property.type.7=String
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment