Commit c8aabc11 by Venkatesh Seetharam

Initial project code with maven modules and basic repository code. Contributed…

Initial project code with maven modules and basic repository code. Contributed by Venkatesh Seetharam
parent a4f51455
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Maven
target
# IntelliJ
*.iml
*.ipr
*.iws
.idea
# Eclipse
.classpath
.project
.settings
.externalToolBuilders
maven-eclipse.xml
#ActiveMQ
activemq-data
build
#log files
logs
*.log
\ No newline at end of file
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
Metadata and Governance Overview
This aims to provide a simple repository for storing entities and associated
relationships among other entities.
The goal is to capture lineage for both entities and its associated instances.
It also captures provenance, lineage, classification, etc. associated with each
of the entities in th metadata repository.
<?xml version="1.0" encoding="UTF-8"?>
<!--
~ Licensed to the Apache Software Foundation (ASF) under one
~ or more contributor license agreements. See the NOTICE file
~ distributed with this work for additional information
~ regarding copyright ownership. The ASF licenses this file
~ to you under the Apache License, Version 2.0 (the
~ "License"); you may not use this file except in compliance
~ with the License. You may obtain a copy of the License at
~
~ http://www.apache.org/licenses/LICENSE-2.0
~
~ Unless required by applicable law or agreed to in writing, software
~ distributed under the License is distributed on an "AS IS" BASIS,
~ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
~ See the License for the specific language governing permissions and
~ limitations under the License.
-->
<project xmlns="http://maven.apache.org/POM/4.0.0"
xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/maven-v4_0_0.xsd">
<modelVersion>4.0.0</modelVersion>
<parent>
<groupId>org.apache.hadoop.metadata</groupId>
<artifactId>metadata-governance</artifactId>
<version>0.1-incubating-SNAPSHOT</version>
</parent>
<artifactId>metadata-common</artifactId>
<description>Apache Metadata Common Module</description>
<name>Apache Metadata Commons</name>
<packaging>jar</packaging>
<profiles>
<profile>
<id>hadoop-2</id>
<activation>
<activeByDefault>true</activeByDefault>
</activation>
<dependencies>
<dependency>
<groupId>org.apache.hadoop</groupId>
<artifactId>hadoop-client</artifactId>
</dependency>
<dependency>
<groupId>org.apache.hadoop</groupId>
<artifactId>hadoop-hdfs</artifactId>
</dependency>
<dependency>
<groupId>org.apache.hadoop</groupId>
<artifactId>hadoop-hdfs</artifactId>
<classifier>tests</classifier>
</dependency>
<dependency>
<groupId>org.apache.hadoop</groupId>
<artifactId>hadoop-common</artifactId>
<classifier>tests</classifier>
</dependency>
<dependency>
<groupId>org.apache.hadoop</groupId>
<artifactId>hadoop-common</artifactId>
</dependency>
</dependencies>
</profile>
</profiles>
<dependencies>
<dependency>
<groupId>commons-el</groupId>
<artifactId>commons-el</artifactId>
</dependency>
<dependency>
<groupId>javax.servlet.jsp</groupId>
<artifactId>jsp-api</artifactId>
</dependency>
<dependency>
<groupId>org.testng</groupId>
<artifactId>testng</artifactId>
</dependency>
<dependency>
<groupId>org.slf4j</groupId>
<artifactId>slf4j-log4j12</artifactId>
</dependency>
<dependency>
<groupId>org.slf4j</groupId>
<artifactId>slf4j-api</artifactId>
</dependency>
<dependency>
<groupId>org.codehaus.jettison</groupId>
<artifactId>jettison</artifactId>
</dependency>
<dependency>
<groupId>org.mockito</groupId>
<artifactId>mockito-all</artifactId>
</dependency>
<dependency>
<groupId>net.sourceforge.findbugs</groupId>
<artifactId>annotations</artifactId>
</dependency>
<dependency>
<groupId>com.googlecode.json-simple</groupId>
<artifactId>json-simple</artifactId>
</dependency>
<dependency>
<groupId>com.tinkerpop.blueprints</groupId>
<artifactId>blueprints-core</artifactId>
</dependency>
<dependency>
<groupId>com.thinkaurelius.titan</groupId>
<artifactId>titan-core</artifactId>
</dependency>
<dependency>
<groupId>com.thinkaurelius.titan</groupId>
<artifactId>titan-berkeleyje</artifactId>
</dependency>
</dependencies>
<build>
<plugins>
<plugin>
<groupId>org.apache.maven.plugins</groupId>
<artifactId>maven-compiler-plugin</artifactId>
<configuration>
<source>1.6</source>
<target>1.6</target>
</configuration>
</plugin>
<plugin>
<groupId>org.apache.maven.plugins</groupId>
<artifactId>maven-jar-plugin</artifactId>
<version>2.4</version>
<configuration>
<excludes>
<exclude>**/log4j.xml</exclude>
</excludes>
</configuration>
</plugin>
</plugins>
</build>
</project>
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.metadata;
public class MetadataException extends Exception {
/**
* Constructs a new exception with the specified detail message. The
* cause is not initialized, and may subsequently be initialized by
* a call to {@link #initCause}.
*
* @param message the detail message. The detail message is saved for
* later retrieval by the {@link #getMessage()} method.
*/
public MetadataException(String message) {
super(message);
}
/**
* Constructs a new exception with the specified detail message and
* cause. <p>Note that the detail message associated with
* {@code cause} is <i>not</i> automatically incorporated in
* this exception's detail message.
*
* @param message the detail message (which is saved for later retrieval
* by the {@link #getMessage()} method).
* @param cause the cause (which is saved for later retrieval by the
* {@link #getCause()} method). (A <tt>null</tt> value is
* permitted, and indicates that the cause is nonexistent or
* unknown.)
* @since 1.4
*/
public MetadataException(String message, Throwable cause) {
super(message, cause);
}
/**
* Constructs a new exception with the specified cause and a detail
* message of <tt>(cause==null ? null : cause.toString())</tt> (which
* typically contains the class and detail message of <tt>cause</tt>).
* This constructor is useful for exceptions that are little more than
* wrappers for other throwables (for example, {@link
* java.security.PrivilegedActionException}).
*
* @param cause the cause (which is saved for later retrieval by the
* {@link #getCause()} method). (A <tt>null</tt> value is
* permitted, and indicates that the cause is nonexistent or
* unknown.)
* @since 1.4
*/
public MetadataException(Throwable cause) {
super(cause);
}
}
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.metadata.service;
import java.io.Closeable;
import java.io.IOException;
/**
* Service interface that's initialized at startup.
*/
public interface Service extends Closeable {
/**
* Name of the service.
*
* @return name of the service
*/
String getName();
/**
* Starts the service. This method blocks until the service has completely started.
*
* @throws Exception
*/
void start() throws Exception;
/**
* Stops the service. This method blocks until the service has completely shut down.
*/
void stop();
/**
* A version of stop() that is designed to be usable in Java7 closure
* clauses.
* Implementation classes MUST relay this directly to {@link #stop()}
* @throws java.io.IOException never
* @throws RuntimeException on any failure during the stop operation
*/
void close() throws IOException;
}
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.metadata.service;
import org.apache.commons.configuration.ConfigurationException;
import org.apache.commons.configuration.PropertiesConfiguration;
import org.apache.hadoop.metadata.MetadataException;
import org.apache.hadoop.metadata.util.ReflectionUtils;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
/**
* Initializer that uses at startup to bring up all the Metadata startup services.
*/
public class ServiceInitializer {
private static final Logger LOG = LoggerFactory.getLogger(ServiceInitializer.class);
private final Services services = Services.get();
public void initialize() throws MetadataException {
String serviceClassNames;
try {
PropertiesConfiguration configuration = new PropertiesConfiguration("application.properties");
serviceClassNames = configuration.getString("application.services");
} catch (ConfigurationException e) {
throw new MetadataException("unable to get server properties");
}
serviceClassNames
= "org.apache.hadoop.metadata.services.TitanGraphService,org.apache.hadoop.metadata.services.GraphBackedMetadataRepositoryService";
for (String serviceClassName : serviceClassNames.split(",")) {
serviceClassName = serviceClassName.trim();
if (serviceClassName.isEmpty()) {
continue;
}
Service service = ReflectionUtils.getInstanceByClassName(serviceClassName);
services.register(service);
LOG.info("Initializing service: {}", serviceClassName);
try {
service.start();
} catch (Throwable t) {
LOG.error("Failed to initialize service {}", serviceClassName, t);
throw new MetadataException(t);
}
LOG.info("Service initialized: {}", serviceClassName);
}
}
public void destroy() throws MetadataException {
for (Service service : services) {
LOG.info("Destroying service: {}", service.getClass().getName());
try {
service.stop();
} catch (Throwable t) {
LOG.error("Failed to destroy service {}", service.getClass().getName(), t);
throw new MetadataException(t);
}
LOG.info("Service destroyed: {}", service.getClass().getName());
}
}
}
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.metadata.service;
import org.apache.commons.configuration.ConfigurationException;
import org.apache.commons.configuration.PropertiesConfiguration;
import org.apache.hadoop.metadata.MetadataException;
import org.apache.hadoop.metadata.util.ReflectionUtils;
import java.util.Iterator;
import java.util.LinkedHashMap;
import java.util.Map;
import java.util.NoSuchElementException;
/**
* Repository of services initialized at startup.
*/
public final class Services implements Iterable<Service> {
private static final Services INSTANCE = new Services();
private Services() {
}
public static Services get() {
return INSTANCE;
}
private final Map<String, Service> services =
new LinkedHashMap<String, Service>();
public synchronized void register(Service service) throws MetadataException {
if (services.containsKey(service.getName())) {
throw new MetadataException("Service " + service.getName() + " already registered");
} else {
services.put(service.getName(), service);
}
}
@SuppressWarnings("unchecked")
public <T extends Service> T getService(String serviceName) {
if (services.containsKey(serviceName)) {
return (T) services.get(serviceName);
} else {
throw new NoSuchElementException("Service " + serviceName + " not registered with registry");
}
}
public boolean isRegistered(String serviceName) {
return services.containsKey(serviceName);
}
@Override
public Iterator<Service> iterator() {
return services.values().iterator();
}
public Service init(String serviceName) throws MetadataException {
if (isRegistered(serviceName)) {
throw new MetadataException("Service is already initialized " + serviceName);
}
String serviceClassName;
try {
PropertiesConfiguration configuration = new PropertiesConfiguration("application.properties");
serviceClassName = configuration.getString(serviceName + ".impl");
} catch (ConfigurationException e) {
throw new MetadataException("unable to get server properties");
}
Service service = ReflectionUtils.getInstanceByClassName(serviceClassName);
register(service);
return service;
}
public void reset() {
services.clear();
}
}
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.metadata.util;
import java.text.DateFormat;
import java.text.ParseException;
import java.text.SimpleDateFormat;
import java.util.Date;
import java.util.TimeZone;
import java.util.regex.Matcher;
import java.util.regex.Pattern;
/**
* Support function to parse and format date in xsd string.
*/
public final class DateTimeHelper {
private static final String DATE_PATTERN =
"(2\\d\\d\\d|19\\d\\d)-(0[1-9]|1[012])-(0[1-9]|1[0-9]|2[0-9]|3[01])T([0-1][0-9]|2[0-3]):([0-5][0-9])Z";
private static final Pattern PATTERN = Pattern.compile(DATE_PATTERN);
public static final String ISO8601_FORMAT = "yyyy-MM-dd'T'HH:mm'Z'";
private DateTimeHelper() {}
public static String getTimeZoneId(TimeZone tz) {
return tz.getID();
}
public static DateFormat getDateFormat() {
DateFormat dateFormat = new SimpleDateFormat(ISO8601_FORMAT);
dateFormat.setTimeZone(TimeZone.getTimeZone("UTC"));
return dateFormat;
}
public static String formatDateUTC(Date date) {
return (date != null) ? getDateFormat().format(date) : null;
}
public static Date parseDateUTC(String dateStr) {
if (!validate(dateStr)) {
throw new IllegalArgumentException(dateStr + " is not a valid UTC string");
}
try {
return getDateFormat().parse(dateStr);
} catch (ParseException e) {
throw new RuntimeException(e);
}
}
public static String formatDateUTCToISO8601(final String dateString, final String dateStringFormat) {
try {
DateFormat dateFormat = new SimpleDateFormat(dateStringFormat.substring(0, dateString.length()));
dateFormat.setTimeZone(TimeZone.getTimeZone("UTC"));
return DateTimeHelper.formatDateUTC(dateFormat.parse(dateString));
} catch (ParseException e) {
throw new RuntimeException(e);
}
}
/**
* Validate date format with regular expression.
*
* @param date date address for validation
* @return true valid date fromat, false invalid date format
*/
public static boolean validate(final String date) {
Matcher matcher = PATTERN.matcher(date);
if (matcher.matches()) {
matcher.reset();
if (matcher.find()) {
int year = Integer.parseInt(matcher.group(1));
String month = matcher.group(2);
String day = matcher.group(3);
if (day.equals("31")
&& (month.equals("4") || month.equals("6")
|| month.equals("9") || month.equals("11")
|| month.equals("04") || month.equals("06") || month.equals("09"))) {
return false; // only 1,3,5,7,8,10,12 has 31 days
} else if (month.equals("2") || month.equals("02")) {
// leap year
if (year % 4 == 0) {
return !(day.equals("30") || day.equals("31"));
} else {
return !(day.equals("29") || day.equals("30") || day.equals("31"));
}
} else {
return true;
}
} else {
return false;
}
} else {
return false;
}
}
}
\ No newline at end of file
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.metadata.util;
import org.apache.hadoop.metadata.MetadataException;
import java.lang.reflect.Constructor;
import java.lang.reflect.Method;
/**
* Helper methods for class instantiation through reflection.
*/
public final class ReflectionUtils {
private ReflectionUtils() {}
@SuppressWarnings("unchecked")
public static <T> T getInstanceByClassName(String clazzName) throws MetadataException {
try {
Class<T> clazz = (Class<T>) ReflectionUtils.class.getClassLoader().loadClass(clazzName);
try {
return clazz.newInstance();
} catch (IllegalAccessException e) {
Method method = clazz.getMethod("get");
return (T) method.invoke(null);
}
} catch (Exception e) {
throw new MetadataException("Unable to get instance for " + clazzName, e);
}
}
/**
* Invokes constructor with one argument.
* @param clazzName - classname
* @param argCls - Class of the argument
* @param arg - constructor argument
* @param <T> - instance type
* @return Class instance
* @throws MetadataException
*/
@SuppressWarnings("unchecked")
public static <T> T getInstanceByClassName(String clazzName, Class<?> argCls,
Object arg) throws MetadataException {
try {
Class<T> clazz = (Class<T>) ReflectionUtils.class.getClassLoader().loadClass(clazzName);
Constructor<T> constructor = clazz.getConstructor(argCls);
return constructor.newInstance(arg);
} catch (Exception e) {
throw new MetadataException("Unable to get instance for " + clazzName, e);
}
}
}
<?xml version="1.0" encoding="UTF-8" ?>
<!--
~ Licensed to the Apache Software Foundation (ASF) under one
~ or more contributor license agreements. See the NOTICE file
~ distributed with this work for additional information
~ regarding copyright ownership. The ASF licenses this file
~ to you under the Apache License, Version 2.0 (the
~ "License"); you may not use this file except in compliance
~ with the License. You may obtain a copy of the License at
~
~ http://www.apache.org/licenses/LICENSE-2.0
~
~ Unless required by applicable law or agreed to in writing, software
~ distributed under the License is distributed on an "AS IS" BASIS,
~ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
~ See the License for the specific language governing permissions and
~ limitations under the License.
-->
<!DOCTYPE log4j:configuration SYSTEM "log4j.dtd">
<log4j:configuration xmlns:log4j="http://jakarta.apache.org/log4j/">
<appender name="console" class="org.apache.log4j.ConsoleAppender">
<param name="Target" value="System.out"/>
<layout class="org.apache.log4j.PatternLayout">
<param name="ConversionPattern" value="%d %-5p - [%t:%x] ~ %m (%c{1}:%L)%n"/>
</layout>
</appender>
<appender name="FILE" class="org.apache.log4j.DailyRollingFileAppender">
<param name="File" value="${user.dir}/target/logs/application.log"/>
<param name="Append" value="true"/>
<param name="Threshold" value="debug"/>
<layout class="org.apache.log4j.PatternLayout">
<param name="ConversionPattern" value="%d %-5p - [%t:%x] ~ %m (%c{1}:%L)%n"/>
</layout>
</appender>
<appender name="AUDIT" class="org.apache.log4j.DailyRollingFileAppender">
<param name="File" value="${user.dir}/target/logs/audit.log"/>
<param name="Append" value="true"/>
<param name="Threshold" value="debug"/>
<layout class="org.apache.log4j.PatternLayout">
<param name="ConversionPattern" value="%d %x %m%n"/>
</layout>
</appender>
<logger name="org.apache.hadoop.metadata" additivity="false">
<level value="debug"/>
<appender-ref ref="FILE"/>
</logger>
<logger name="AUDIT">
<level value="info"/>
<appender-ref ref="AUDIT"/>
</logger>
<root>
<priority value="info"/>
<appender-ref ref="console"/>
</root>
</log4j:configuration>
<?xml version="1.0" encoding="UTF-8"?>
<!--
~ Licensed to the Apache Software Foundation (ASF) under one
~ or more contributor license agreements. See the NOTICE file
~ distributed with this work for additional information
~ regarding copyright ownership. The ASF licenses this file
~ to you under the Apache License, Version 2.0 (the
~ "License"); you may not use this file except in compliance
~ with the License. You may obtain a copy of the License at
~
~ http://www.apache.org/licenses/LICENSE-2.0
~
~ Unless required by applicable law or agreed to in writing, software
~ distributed under the License is distributed on an "AS IS" BASIS,
~ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
~ See the License for the specific language governing permissions and
~ limitations under the License.
-->
<project xmlns="http://maven.apache.org/POM/4.0.0" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/maven-v4_0_0.xsd">
<modelVersion>4.0.0</modelVersion>
<parent>
<groupId>org.apache.hadoop.metadata</groupId>
<artifactId>metadata-governance</artifactId>
<version>0.1-incubating-SNAPSHOT</version>
</parent>
<artifactId>metadata-docs</artifactId>
<description>Apache Metadata Documentation</description>
<name>Apache Metadata Documentation</name>
<properties>
<skipTests>true</skipTests>
</properties>
<build>
<plugins>
<plugin>
<groupId>org.apache.maven.plugins</groupId>
<artifactId>maven-site-plugin</artifactId>
<dependencies>
<dependency>
<groupId>org.apache.maven.doxia</groupId>
<artifactId>doxia-module-twiki</artifactId>
<version>1.3</version>
</dependency>
</dependencies>
<executions>
<execution>
<goals>
<goal>site</goal>
</goals>
<phase>prepare-package</phase>
</execution>
</executions>
<configuration>
<generateProjectInfo>false</generateProjectInfo>
<generateReports>false</generateReports>
<skip>false</skip>
</configuration>
</plugin>
</plugins>
</build>
</project>
<?xml version="1.0" encoding="UTF-8"?>
<!--
~ Licensed to the Apache Software Foundation (ASF) under one
~ or more contributor license agreements. See the NOTICE file
~ distributed with this work for additional information
~ regarding copyright ownership. The ASF licenses this file
~ to you under the Apache License, Version 2.0 (the
~ "License"); you may not use this file except in compliance
~ with the License. You may obtain a copy of the License at
~
~ http://www.apache.org/licenses/LICENSE-2.0
~
~ Unless required by applicable law or agreed to in writing, software
~ distributed under the License is distributed on an "AS IS" BASIS,
~ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
~ See the License for the specific language governing permissions and
~ limitations under the License.
-->
<project name="Metadata and Governance" xmlns="http://maven.apache.org/DECORATION/1.3.0"
xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
xsi:schemaLocation="http://maven.apache.org/DECORATION/1.3.0 http://maven.apache.org/xsd/decoration-1.3.0.xsd">
<skin>
<groupId>org.apache.maven.skins</groupId>
<artifactId>maven-fluido-skin</artifactId>
<version>1.3.0</version>
</skin>
<custom>
<fluidoSkin>
<project>Apache Metadata and Governance</project>
<sideBarEnabled>false</sideBarEnabled>
</fluidoSkin>
</custom>
<bannerLeft>
<name>DGC - Metadata</name>
<src>./images/metadata-logo.png</src>
<width>200px</width>
<height>45px</height>
</bannerLeft>
<bannerRight>
<name>Apache Incubator</name>
<src>./images/apache-incubator-logo.png</src>
<href>http://incubator.apache.org</href>
</bannerRight>
<publishDate position="right"/>
<version position="right"/>
<body>
<head>
<script type="text/javascript">
$( document ).ready( function() { $( '.carousel' ).carousel( { interval: 3500 } ) } );
</script>
</head>
<breadcrumbs position="left">
<item name="MetadataGovernance" title="Apache Metadata and Governance" href="index.html"/>
</breadcrumbs>
<footer>
© 2011-2012 The Apache Software Foundation. Apache Metadata and Governance, Apache,
the Apache feather logo, and the Apache Metadata and Governance project logo are
trademarks of The Apache Software Foundation.
</footer>
</body>
</project>
\ No newline at end of file
---+ Data Governance and Metadata platform for Hadoop
---++ Why?
*
* Captures Lineage information for data sets and processes
---+ Getting Started
#LicenseInfo
---+ Licensing Information
Metadata (DGC) is distributed under [[http://www.apache.org/licenses/LICENSE-2.0][Apache License 2.0]].
This diff is collapsed. Click to expand it.
<?xml version="1.0" encoding="UTF-8"?>
<!--
~ Licensed to the Apache Software Foundation (ASF) under one
~ or more contributor license agreements. See the NOTICE file
~ distributed with this work for additional information
~ regarding copyright ownership. The ASF licenses this file
~ to you under the Apache License, Version 2.0 (the
~ "License"); you may not use this file except in compliance
~ with the License. You may obtain a copy of the License at
~
~ http://www.apache.org/licenses/LICENSE-2.0
~
~ Unless required by applicable law or agreed to in writing, software
~ distributed under the License is distributed on an "AS IS" BASIS,
~ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
~ See the License for the specific language governing permissions and
~ limitations under the License.
-->
<project xmlns="http://maven.apache.org/POM/4.0.0"
xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/maven-v4_0_0.xsd">
<modelVersion>4.0.0</modelVersion>
<parent>
<groupId>org.apache.hadoop.metadata</groupId>
<artifactId>metadata-governance</artifactId>
<version>0.1-incubating-SNAPSHOT</version>
</parent>
<artifactId>metadata-repository</artifactId>
<description>Apache Metadata Repository Module</description>
<name>Apache Metadata Repository</name>
<packaging>jar</packaging>
<profiles>
<profile>
<id>hadoop-2</id>
<activation>
<activeByDefault>true</activeByDefault>
</activation>
<dependencies>
<dependency>
<groupId>org.apache.hadoop</groupId>
<artifactId>hadoop-client</artifactId>
</dependency>
<dependency>
<groupId>org.apache.hadoop</groupId>
<artifactId>hadoop-hdfs</artifactId>
</dependency>
<dependency>
<groupId>org.apache.hadoop</groupId>
<artifactId>hadoop-hdfs</artifactId>
<classifier>tests</classifier>
</dependency>
<dependency>
<groupId>org.apache.hadoop</groupId>
<artifactId>hadoop-common</artifactId>
<classifier>tests</classifier>
</dependency>
<dependency>
<groupId>org.apache.hadoop</groupId>
<artifactId>hadoop-common</artifactId>
</dependency>
</dependencies>
</profile>
</profiles>
<dependencies>
<dependency>
<groupId>org.apache.hadoop.metadata</groupId>
<artifactId>metadata-common</artifactId>
</dependency>
<dependency>
<groupId>org.slf4j</groupId>
<artifactId>slf4j-log4j12</artifactId>
</dependency>
<dependency>
<groupId>org.slf4j</groupId>
<artifactId>slf4j-api</artifactId>
</dependency>
<dependency>
<groupId>com.google.inject</groupId>
<artifactId>guice</artifactId>
</dependency>
<dependency>
<groupId>org.codehaus.jettison</groupId>
<artifactId>jettison</artifactId>
</dependency>
<dependency>
<groupId>com.googlecode.json-simple</groupId>
<artifactId>json-simple</artifactId>
</dependency>
<dependency>
<groupId>com.tinkerpop.blueprints</groupId>
<artifactId>blueprints-core</artifactId>
</dependency>
<dependency>
<groupId>com.thinkaurelius.titan</groupId>
<artifactId>titan-core</artifactId>
</dependency>
<dependency>
<groupId>com.thinkaurelius.titan</groupId>
<artifactId>titan-berkeleyje</artifactId>
</dependency>
<dependency>
<groupId>com.thinkaurelius.titan</groupId>
<artifactId>titan-es</artifactId>
</dependency>
<dependency>
<groupId>org.testng</groupId>
<artifactId>testng</artifactId>
</dependency>
<dependency>
<groupId>org.mockito</groupId>
<artifactId>mockito-all</artifactId>
</dependency>
</dependencies>
<build>
<plugins>
<plugin>
<groupId>org.apache.maven.plugins</groupId>
<artifactId>maven-compiler-plugin</artifactId>
<configuration>
<source>1.7</source>
<target>1.7</target>
</configuration>
</plugin>
<plugin>
<groupId>org.apache.maven.plugins</groupId>
<artifactId>maven-jar-plugin</artifactId>
<version>2.4</version>
<configuration>
<excludes>
<exclude>**/log4j.xml</exclude>
</excludes>
</configuration>
</plugin>
</plugins>
</build>
</project>
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
/*
* Created by IntelliJ IDEA.
* User: seetharam
* Date: 12/1/14
* Time: 2:21 PM
*/
package org.apache.hadoop.metadata;
/**
* Guice module for Repository module.
*/
public class RepositoryMetadataModule extends com.google.inject.AbstractModule {
protected void configure() {
// add configuration logic here
}
}
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.metadata.services;
import com.google.common.base.Preconditions;
import com.thinkaurelius.titan.core.TitanGraph;
import com.tinkerpop.blueprints.GraphQuery;
import com.tinkerpop.blueprints.Vertex;
import org.apache.hadoop.metadata.service.Services;
import org.json.simple.JSONValue;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import java.io.IOException;
import java.util.HashMap;
import java.util.Iterator;
import java.util.List;
import java.util.Map;
import java.util.UUID;
/**
* An implementation backed by Titan Graph DB.
*/
public class GraphBackedMetadataRepositoryService implements MetadataRepositoryService {
private static final Logger LOG =
LoggerFactory.getLogger(GraphBackedMetadataRepositoryService.class);
public static final String NAME = GraphBackedMetadataRepositoryService.class.getSimpleName();
private GraphService graphService;
/**
* Name of the service.
*
* @return name of the service
*/
@Override
public String getName() {
return NAME;
}
/**
* Starts the service. This method blocks until the service has completely started.
*
* @throws Exception
*/
@Override
public void start() throws Exception {
graphService = Services.get().getService(TitanGraphService.NAME);
if (graphService == null) {
throw new RuntimeException("graph service is not initialized");
}
}
/**
* Stops the service. This method blocks until the service has completely shut down.
*/
@Override
public void stop() {
// do nothing
}
/**
* A version of stop() that is designed to be usable in Java7 closure
* clauses.
* Implementation classes MUST relay this directly to {@link #stop()}
*
* @throws java.io.IOException never
* @throws RuntimeException on any failure during the stop operation
*/
@Override
public void close() throws IOException {
stop();
}
private TitanGraph getGraph() {
return ((TitanGraphService) graphService).getTitanGraph();
}
@Override
public String submitEntity(String entity, String entityType) {
Map<String, String> properties = (Map<String, String>) JSONValue.parse(entity);
final String entityName = properties.get("entityName");
Preconditions.checkNotNull(entityName, "entity name cannot be null");
// todo check if this is a duplicate
final String guid = UUID.randomUUID().toString();
try {
getGraph().newTransaction();
Vertex entityVertex = getGraph().addVertex(null);
entityVertex.setProperty("guid", guid);
entityVertex.setProperty("entityName", entityName);
entityVertex.setProperty("entityType", entityType);
for (Map.Entry<String, String> entry : properties.entrySet()) {
entityVertex.setProperty(entry.getKey(), entry.getValue());
}
} finally {
getGraph().commit();
}
return guid;
}
@Override
public String getEntityDefinition(String entityName, String entityType) {
Vertex entityVertex = findVertex(entityName, entityType);
if (entityVertex == null) {
return null;
}
Map<String, String> properties = extractProperties(entityVertex);
return JSONValue.toJSONString(properties);
}
protected Vertex findVertex(String entityName, String entityType) {
LOG.debug("Finding vertex for: name={}, type={}", entityName, entityType);
GraphQuery query = getGraph().query()
.has("entityName", entityName)
.has("entityType", entityType);
Iterator<Vertex> results = query.vertices().iterator();
return results.hasNext() ? results.next() : null; // returning one since name/type is unique
}
private Map<String, String> extractProperties(Vertex entityVertex) {
Map<String, String> properties = new HashMap<>();
for (String key : entityVertex.getPropertyKeys()) {
properties.put(key, String.valueOf(entityVertex.getProperty(key)));
}
return properties;
}
@Override
public List<String> getEntityList(String entityType) {
return null;
}
}
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.metadata.services;
import com.tinkerpop.blueprints.Graph;
import com.tinkerpop.blueprints.KeyIndexableGraph;
import com.tinkerpop.blueprints.TransactionalGraph;
import org.apache.hadoop.metadata.service.Service;
import java.util.Set;
/**
* A blueprints based graph service.
*/
public interface GraphService extends Service {
/**
* Returns an handle to the graph db.
*
* @return an handle to the graph db
*/
Graph getGraph();
KeyIndexableGraph getIndexableGraph();
TransactionalGraph getTransactionalGraph();
Set<String> getVertexIndexedKeys();
Set<String> getEdgeIndexedKeys();
}
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.metadata.services;
import org.apache.hadoop.metadata.service.Service;
import java.util.List;
/**
* An interface for persisting metadata into a blueprints enabled graph db.
*/
public interface MetadataRepositoryService extends Service {
String submitEntity(String entity, String entityType);
String getEntityDefinition(String entityName, String entityType);
List<String> getEntityList(String entityType);
}
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.metadata.services;
import com.thinkaurelius.titan.core.Cardinality;
import com.thinkaurelius.titan.core.PropertyKey;
import com.thinkaurelius.titan.core.TitanFactory;
import com.thinkaurelius.titan.core.TitanGraph;
import com.thinkaurelius.titan.core.schema.TitanManagement;
import com.thinkaurelius.titan.graphdb.blueprints.TitanBlueprintsGraph;
import com.tinkerpop.blueprints.Edge;
import com.tinkerpop.blueprints.Element;
import com.tinkerpop.blueprints.Graph;
import com.tinkerpop.blueprints.KeyIndexableGraph;
import com.tinkerpop.blueprints.TransactionalGraph;
import com.tinkerpop.blueprints.Vertex;
import org.apache.commons.configuration.Configuration;
import org.apache.commons.configuration.ConfigurationException;
import org.apache.commons.configuration.PropertiesConfiguration;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import java.io.IOException;
import java.util.Iterator;
import java.util.Set;
/**
* Default implementation for Graph service backed by Titan.
*/
public class TitanGraphService implements GraphService {
private static final Logger LOG = LoggerFactory.getLogger(TitanGraphService.class);
public static final String NAME = TitanGraphService.class.getSimpleName();
/**
* Constant for the configuration property that indicates the prefix.
*/
private static final String METADATA_PREFIX = "metadata.graph.";
private static final String METADATA_INDEX_KEY = "index.name";
private Configuration graphConfig;
private Graph graph;
private Set<String> vertexIndexedKeys;
private Set<String> edgeIndexedKeys;
/**
* Name of the service.
*
* @return name of the service
*/
@Override
public String getName() {
return NAME;
}
/**
* Starts the service. This method blocks until the service has completely started.
*
* @throws Exception
*/
@Override
public void start() throws Exception {
graphConfig = getConfiguration();
graph = initializeGraphDB();
// createIndicesForVertexKeys();
// todo - create Edge Cardinality Constraints
LOG.info("Initialized graph db: {}", graph);
vertexIndexedKeys = getIndexableGraph().getIndexedKeys(Vertex.class);
LOG.info("Init vertex property keys: {}", vertexIndexedKeys);
edgeIndexedKeys = getIndexableGraph().getIndexedKeys(Edge.class);
LOG.info("Init edge property keys: {}", edgeIndexedKeys);
}
protected Graph initializeGraphDB() {
LOG.info("Initializing graph db");
// return GraphFactory.open(graphConfig);
Configuration graphConfig = new PropertiesConfiguration();
graphConfig.setProperty("storage.backend", "berkeleyje");
graphConfig.setProperty("storage.directory", "target/data/graphdb");
return TitanFactory.open(graphConfig);
}
private static Configuration getConfiguration() throws ConfigurationException {
PropertiesConfiguration configProperties = new PropertiesConfiguration("application.properties");
Configuration graphConfig = new PropertiesConfiguration();
final Iterator<String> iterator = configProperties.getKeys();
while (iterator.hasNext()) {
String key = iterator.next();
System.out.println("key = " + key);
if (key.startsWith(METADATA_PREFIX)) {
String value = (String) configProperties.getProperty(key);
key = key.substring(METADATA_PREFIX.length());
System.out.println("**** key = " + key + ", value = " + value);
graphConfig.setProperty(key, value);
}
}
return graphConfig;
}
/**
* This unfortunately requires a handle to Titan implementation since
* com.tinkerpop.blueprints.KeyIndexableGraph#createKeyIndex does not create an index.
*/
protected void createIndicesForVertexKeys() {
if (!((KeyIndexableGraph) graph).getIndexedKeys(Vertex.class).isEmpty()) {
LOG.info("Indexes already exist for graph");
return;
}
LOG.info("Indexes does not exist, Creating indexes for graph");
// todo - externalize this
String indexName = graphConfig.getString(METADATA_INDEX_KEY);
PropertyKey guid = createPropertyKey("guid", String.class, Cardinality.SINGLE);
createIndex(indexName, guid, Vertex.class, true);
getTitanGraph().commit();
}
private PropertyKey createPropertyKey(String propertyKeyName, Class<String> dataType,
Cardinality cardinality) {
PropertyKey propertyKey = getTitanGraph().getManagementSystem()
.makePropertyKey(propertyKeyName)
.dataType(dataType)
.cardinality(cardinality)
.make();
LOG.info("Created property key {}", propertyKey);
return propertyKey;
}
private void createIndex(String indexName, PropertyKey propertyKey,
Class<? extends Element> clazz, boolean isUnique) {
TitanManagement managementSystem = getTitanGraph().getManagementSystem();
managementSystem.buildPropertyIndex(propertyKey, indexName);
TitanManagement.IndexBuilder indexBuilder = managementSystem
.buildIndex(indexName, clazz)
.addKey(propertyKey);
if (isUnique) {
indexBuilder.unique();
}
indexBuilder.buildCompositeIndex();
}
/**
* Stops the service. This method blocks until the service has completely shut down.
*/
@Override
public void stop() {
}
/**
* A version of stop() that is designed to be usable in Java7 closure
* clauses.
* Implementation classes MUST relay this directly to {@link #stop()}
*
* @throws java.io.IOException never
* @throws RuntimeException on any failure during the stop operation
*/
@Override
public void close() throws IOException {
stop();
}
@Override
public Graph getGraph() {
return graph;
}
@Override
public KeyIndexableGraph getIndexableGraph() {
return (KeyIndexableGraph) graph;
}
@Override
public TransactionalGraph getTransactionalGraph() {
return (TransactionalGraph) graph;
}
protected TitanBlueprintsGraph getTitanBlueprintsGraph() {
return (TitanBlueprintsGraph) graph;
}
public TitanGraph getTitanGraph() {
return (TitanGraph) graph;
}
@Override
public Set<String> getVertexIndexedKeys() {
return vertexIndexedKeys;
}
@Override
public Set<String> getEdgeIndexedKeys() {
return edgeIndexedKeys;
}
}
<?xml version="1.0" encoding="UTF-8" ?>
<!--
~ Licensed to the Apache Software Foundation (ASF) under one
~ or more contributor license agreements. See the NOTICE file
~ distributed with this work for additional information
~ regarding copyright ownership. The ASF licenses this file
~ to you under the Apache License, Version 2.0 (the
~ "License"); you may not use this file except in compliance
~ with the License. You may obtain a copy of the License at
~
~ http://www.apache.org/licenses/LICENSE-2.0
~
~ Unless required by applicable law or agreed to in writing, software
~ distributed under the License is distributed on an "AS IS" BASIS,
~ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
~ See the License for the specific language governing permissions and
~ limitations under the License.
-->
<!DOCTYPE log4j:configuration SYSTEM "log4j.dtd">
<log4j:configuration xmlns:log4j="http://jakarta.apache.org/log4j/">
<appender name="console" class="org.apache.log4j.ConsoleAppender">
<param name="Target" value="System.out"/>
<layout class="org.apache.log4j.PatternLayout">
<param name="ConversionPattern" value="%d %-5p - [%t:%x] ~ %m (%c{1}:%L)%n"/>
</layout>
</appender>
<appender name="FILE" class="org.apache.log4j.DailyRollingFileAppender">
<param name="File" value="${user.dir}/target/logs/application.log"/>
<param name="Append" value="true"/>
<param name="Threshold" value="debug"/>
<layout class="org.apache.log4j.PatternLayout">
<param name="ConversionPattern" value="%d %-5p - [%t:%x] ~ %m (%c{1}:%L)%n"/>
</layout>
</appender>
<appender name="AUDIT" class="org.apache.log4j.DailyRollingFileAppender">
<param name="File" value="${user.dir}/target/logs/audit.log"/>
<param name="Append" value="true"/>
<param name="Threshold" value="debug"/>
<layout class="org.apache.log4j.PatternLayout">
<param name="ConversionPattern" value="%d %x %m%n"/>
</layout>
</appender>
<appender name="METRIC" class="org.apache.log4j.DailyRollingFileAppender">
<param name="File" value="${user.dir}/target/logs/metric.log"/>
<param name="Append" value="true"/>
<param name="Threshold" value="debug"/>
<layout class="org.apache.log4j.PatternLayout">
<param name="ConversionPattern" value="%d %m%n"/>
</layout>
</appender>
<logger name="org.apache.hadoop.metadata" additivity="false">
<level value="debug"/>
<appender-ref ref="FILE"/>
</logger>
<root>
<priority value="info"/>
<appender-ref ref="console"/>
</root>
</log4j:configuration>
This diff is collapsed. Click to expand it.
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.metadata;
import org.apache.commons.cli.CommandLine;
import org.apache.commons.cli.GnuParser;
import org.apache.commons.cli.Option;
import org.apache.commons.cli.Options;
import org.apache.commons.cli.ParseException;
import org.apache.commons.configuration.ConfigurationException;
import org.apache.commons.configuration.PropertiesConfiguration;
import org.apache.commons.lang.StringUtils;
import org.apache.hadoop.metadata.web.service.EmbeddedServer;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
/**
* Driver for running Metadata as a standalone server with embedded jetty server.
*/
public final class Main {
private static final Logger LOG = LoggerFactory.getLogger(Main.class);
private static final String APP_PATH = "app";
private static final String APP_PORT = "port";
/**
* Prevent users from constructing this.
*/
private Main() {
}
private static CommandLine parseArgs(String[] args) throws ParseException {
Options options = new Options();
Option opt;
opt = new Option(APP_PATH, true, "Application Path");
opt.setRequired(false);
options.addOption(opt);
opt = new Option(APP_PORT, true, "Application Port");
opt.setRequired(false);
options.addOption(opt);
return new GnuParser().parse(options, args);
}
public static void main(String[] args) throws Exception {
CommandLine cmd = parseArgs(args);
String projectVersion = getProjectVersion();
// String appPath = "webapp/target/metadata-webapp-" + projectVersion;
String appPath = "webapp/target/metadata-governance";
if (cmd.hasOption(APP_PATH)) {
appPath = cmd.getOptionValue(APP_PATH);
}
PropertiesConfiguration configuration = new PropertiesConfiguration("application.properties");
final String enableTLSFlag = configuration.getString("metadata.enableTLS");
final int appPort = getApplicationPort(cmd, enableTLSFlag);
final boolean enableTLS = isTLSEnabled(enableTLSFlag, appPort);
configuration.setProperty("metadata.enableTLS", String.valueOf(enableTLS));
LOG.info(">>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>");
LOG.info("Server starting with TLS ? {} on port {}", enableTLS, appPort);
LOG.info("<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<");
EmbeddedServer server = EmbeddedServer.newServer(appPort, appPath, enableTLS);
server.start();
}
private static String getProjectVersion() throws ConfigurationException {
PropertiesConfiguration configuration =
new PropertiesConfiguration("metadata-buildinfo.properties");
return configuration.getString("project.version");
}
private static int getApplicationPort(CommandLine cmd, String enableTLSFlag) {
final int appPort;
if (cmd.hasOption(APP_PORT)) {
appPort = Integer.valueOf(cmd.getOptionValue(APP_PORT));
} else {
// default : metadata.enableTLS is true
appPort = StringUtils.isEmpty(enableTLSFlag)
|| enableTLSFlag.equals("true") ? 15443 : 15000;
}
return appPort;
}
private static boolean isTLSEnabled(String enableTLSFlag, int appPort) {
return Boolean.valueOf(StringUtils.isEmpty(enableTLSFlag)
? System.getProperty("metadata.enableTLS", (appPort % 1000) == 443 ? "true" : "false")
: enableTLSFlag);
}
}
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.metadata.web.errors;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import javax.ws.rs.WebApplicationException;
import javax.ws.rs.core.Response;
import javax.ws.rs.ext.ExceptionMapper;
import java.util.concurrent.ThreadLocalRandom;
/**
* Exception mapper for Jersey.
* @param <E>
*/
public class LoggingExceptionMapper<E extends Throwable> implements ExceptionMapper<E> {
private static final Logger LOGGER = LoggerFactory.getLogger(LoggingExceptionMapper.class);
@Override
public Response toResponse(E exception) {
if (exception instanceof WebApplicationException) {
return ((WebApplicationException) exception).getResponse();
}
final long id = ThreadLocalRandom.current().nextLong();
logException(id, exception);
return Response.serverError()
.entity(formatErrorMessage(id, exception))
.build();
}
@SuppressWarnings("UnusedParameters")
protected String formatErrorMessage(long id, E exception) {
return String.format("There was an error processing your request. It has been logged (ID %016x).", id);
}
protected void logException(long id, E exception) {
LOGGER.error(formatLogMessage(id, exception), exception);
}
@SuppressWarnings("UnusedParameters")
protected String formatLogMessage(long id, Throwable exception) {
return String.format("Error handling a request: %016x", id);
}
}
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.metadata.web.filters;
import org.apache.hadoop.metadata.util.DateTimeHelper;
import org.apache.hadoop.metadata.web.util.Servlets;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import javax.servlet.Filter;
import javax.servlet.FilterChain;
import javax.servlet.FilterConfig;
import javax.servlet.ServletException;
import javax.servlet.ServletRequest;
import javax.servlet.ServletResponse;
import javax.servlet.http.HttpServletRequest;
import javax.servlet.http.HttpServletResponse;
import java.io.IOException;
import java.util.Date;
import java.util.UUID;
/**
* This records audit information as part of the filter after processing the request
* and also introduces a UUID into request and response for tracing requests in logs.
*/
public class AuditFilter implements Filter {
private static final Logger AUDIT_LOG = LoggerFactory.getLogger("AUDIT");
private static final Logger LOG = LoggerFactory.getLogger(AuditFilter.class);
@Override
public void init(FilterConfig filterConfig) throws ServletException {
LOG.info("AuditFilter initialization started");
}
@Override
public void doFilter(ServletRequest request,
ServletResponse response,
FilterChain filterChain) throws IOException, ServletException {
final String requestTimeISO9601 = DateTimeHelper.formatDateUTC(new Date());
final HttpServletRequest httpRequest = (HttpServletRequest) request;
final String requestId = UUID.randomUUID().toString();
final Thread currentThread = Thread.currentThread();
final String oldName = currentThread.getName();
try {
currentThread.setName(formatName(oldName, requestId));
filterChain.doFilter(request, response);
} finally {
recordAudit(httpRequest, requestTimeISO9601);
// put the request id into the response so users can trace logs for this request
((HttpServletResponse) response).setHeader(Servlets.REQUEST_ID, requestId);
currentThread.setName(oldName);
}
}
private String formatName(String oldName, String requestId) {
return oldName + " - " + requestId;
}
private void recordAudit(HttpServletRequest httpRequest, String whenISO9601) {
final String who = getUserFromRequest(httpRequest);
final String fromHost = httpRequest.getRemoteHost();
final String fromAddress = httpRequest.getRemoteAddr();
final String whatURL = Servlets.getRequestURL(httpRequest);
final String whatAddrs = httpRequest.getLocalAddr();
LOG.debug("Audit: {}/{} performed request {} ({}) at time {}",
who, fromAddress, whatURL, whatAddrs, whenISO9601);
audit(who, fromAddress, fromHost, whatURL, whatAddrs, whenISO9601);
}
private String getUserFromRequest(HttpServletRequest httpRequest) {
// look for the user in the request
final String userFromRequest = Servlets.getUserFromRequest(httpRequest);
return userFromRequest == null ? "UNKNOWN" : userFromRequest;
}
private void audit(String who, String fromAddress, String fromHost, String whatURL,
String whatAddrs, String whenISO9601) {
AUDIT_LOG.info("Audit: {}/{}-{} performed request {} ({}) at time {}",
who, fromAddress, fromHost, whatURL, whatAddrs, whenISO9601);
}
@Override
public void destroy() {
// do nothing
}
}
\ No newline at end of file
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.metadata.web.filters;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import javax.servlet.Filter;
import javax.servlet.FilterChain;
import javax.servlet.FilterConfig;
import javax.servlet.ServletException;
import javax.servlet.ServletRequest;
import javax.servlet.ServletResponse;
import java.io.IOException;
/**
* This enforces authentication as part of the filter before processing the request.
* todo: Subclass of {@link org.apache.hadoop.security.authentication.server.AuthenticationFilter}.
*/
public class AuthenticationFilter implements Filter {
private static final Logger LOG = LoggerFactory.getLogger(AuthenticationFilter.class);
@Override
public void init(FilterConfig filterConfig) throws ServletException {
LOG.info("AuthenticationFilter initialization started");
}
@Override
public void doFilter(ServletRequest request, ServletResponse response, FilterChain chain)
throws IOException, ServletException {
chain.doFilter(request, response);
}
@Override
public void destroy() {
// do nothing
}
}
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.metadata.web.listeners;
import org.apache.commons.configuration.ConfigurationException;
import org.apache.commons.configuration.PropertiesConfiguration;
import org.apache.hadoop.metadata.MetadataException;
import org.apache.hadoop.metadata.service.ServiceInitializer;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import javax.servlet.ServletContextEvent;
import javax.servlet.ServletContextListener;
/**
* Listener for bootstrapping Services and configuration properties.
*/
public class ApplicationStartupListener implements ServletContextListener {
private static final Logger LOG = LoggerFactory.getLogger(ApplicationStartupListener.class);
private final ServiceInitializer startupServices = new ServiceInitializer();
@Override
public void contextInitialized(ServletContextEvent sce) {
try {
startupServices.initialize();
showStartupInfo();
} catch (MetadataException e) {
throw new RuntimeException("Error starting services", e);
}
}
private void showStartupInfo() {
StringBuilder buffer = new StringBuilder();
buffer.append("\n############################################");
buffer.append("\n Metadata Server (STARTED) ");
buffer.append("\n############################################");
try {
PropertiesConfiguration configuration = new PropertiesConfiguration("application.properties");
buffer.append(configuration.toString());
} catch (ConfigurationException e) {
buffer.append("*** Unable to get build info ***").append(e.getMessage());
}
LOG.info(buffer.toString());
}
@Override
public void contextDestroyed(ServletContextEvent sce) {
try {
startupServices.destroy();
} catch (MetadataException e) {
LOG.warn("Error destroying services", e);
}
StringBuilder buffer = new StringBuilder();
buffer.append("\n############################################");
buffer.append("\n Metadata Server (SHUTDOWN) ");
buffer.append("\n############################################");
LOG.info(buffer.toString());
}
}
\ No newline at end of file
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.metadata.web.params;
import javax.ws.rs.WebApplicationException;
import javax.ws.rs.core.MediaType;
import javax.ws.rs.core.Response;
/**
* An abstract base class from which to build Jersey parameter classes.
*
* @param <T> the type of value wrapped by the parameter
*/
public abstract class AbstractParam<T> {
private final T value;
/**
* Given an input value from a client, creates a parameter wrapping its parsed value.
*
* @param input an input value from a client request
*/
@SuppressWarnings({"AbstractMethodCallInConstructor", "OverriddenMethodCallDuringObjectConstruction"})
protected AbstractParam(String input) {
try {
this.value = parse(input);
} catch (Exception e) {
throw new WebApplicationException(error(input, e));
}
}
/**
* Given a string representation which was unable to be parsed and the exception thrown, produce
* a {@link javax.ws.rs.core.Response} to be sent to the client.
*
* By default, generates a {@code 400 Bad Request} with a plain text entity generated by
* {@link #errorMessage(String, Exception)}.
*
* @param input the raw input value
* @param e the exception thrown while parsing {@code input}
* @return the {@link javax.ws.rs.core.Response} to be sent to the client
*/
protected Response error(String input, Exception e) {
return Response.status(getErrorStatus())
.entity(errorMessage(input, e))
.type(mediaType())
.build();
}
/**
* Returns the media type of the error message entity.
*
* @return the media type of the error message entity
*/
protected MediaType mediaType() {
return MediaType.TEXT_PLAIN_TYPE;
}
/**
* Given a string representation which was unable to be parsed and the exception thrown, produce
* an entity to be sent to the client.
*
* @param input the raw input value
* @param e the exception thrown while parsing {@code input}
* @return the error message to be sent the client
*/
protected String errorMessage(String input, Exception e) {
return String.format("Invalid parameter: %s (%s)", input, e.getMessage());
}
/**
* Given a string representation which was unable to be parsed, produce a {@link javax.ws.rs.core.Response.Status} for the
* {@link Response} to be sent to the client.
*
* @return the HTTP {@link javax.ws.rs.core.Response.Status} of the error message
*/
@SuppressWarnings("MethodMayBeStatic")
protected Response.Status getErrorStatus() {
return Response.Status.BAD_REQUEST;
}
/**
* Given a string representation, parse it and return an instance of the parameter type.
*
* @param input the raw input
* @return {@code input}, parsed as an instance of {@code T}
* @throws Exception if there is an error parsing the input
*/
protected abstract T parse(String input) throws Exception;
/**
* Returns the underlying value.
*
* @return the underlying value
*/
public T get() {
return value;
}
@Override
public boolean equals(Object obj) {
if (this == obj) { return true; }
if ((obj == null) || (getClass() != obj.getClass())) { return false; }
final AbstractParam<?> that = (AbstractParam<?>) obj;
return value.equals(that.value);
}
@Override
public int hashCode() {
return value.hashCode();
}
@Override
public String toString() {
return value.toString();
}
}
\ No newline at end of file
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.metadata.web.params;
/**
* A parameter encapsulating boolean values. If the query parameter value is {@code "true"},
* regardless of case, the returned value is {@link Boolean#TRUE}. If the query parameter value is
* {@code "false"}, regardless of case, the returned value is {@link Boolean#FALSE}. All other
* values will return a {@code 400 Bad Request} response.
*/
public class BooleanParam extends AbstractParam<Boolean> {
public BooleanParam(String input) {
super(input);
}
@Override
protected String errorMessage(String input, Exception e) {
return '"' + input + "\" must be \"true\" or \"false\".";
}
@Override
protected Boolean parse(String input) throws Exception {
if ("true".equalsIgnoreCase(input)) {
return Boolean.TRUE;
}
if ("false".equalsIgnoreCase(input)) {
return Boolean.FALSE;
}
throw new Exception();
}
}
\ No newline at end of file
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.metadata.web.params;
import org.joda.time.DateTime;
import org.joda.time.DateTimeZone;
/**
* A parameter encapsulating date/time values. All non-parsable values will return a {@code 400 Bad
* Request} response. All values returned are in UTC.
*/
public class DateTimeParam extends AbstractParam<DateTime> {
public DateTimeParam(String input) {
super(input);
}
@Override
protected DateTime parse(String input) throws Exception {
return new DateTime(input, DateTimeZone.UTC);
}
}
\ No newline at end of file
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.metadata.web.resources;
import org.apache.commons.lang.StringUtils;
import org.apache.hadoop.metadata.web.util.Servlets;
import org.apache.hadoop.util.VersionInfo;
import org.codehaus.jettison.json.JSONException;
import org.codehaus.jettison.json.JSONObject;
import javax.ws.rs.GET;
import javax.ws.rs.Path;
import javax.ws.rs.Produces;
import javax.ws.rs.WebApplicationException;
import javax.ws.rs.core.MediaType;
import javax.ws.rs.core.Response;
/**
* Jersey Resource for admin operations.
*/
@Path("admin")
public class AdminResource {
@GET
@Path("stack")
@Produces(MediaType.TEXT_PLAIN)
public String getThreadDump() {
ThreadGroup topThreadGroup = Thread.currentThread().getThreadGroup();
while (topThreadGroup.getParent() != null) {
topThreadGroup = topThreadGroup.getParent();
}
Thread[] threads = new Thread[topThreadGroup.activeCount()];
int nr = topThreadGroup.enumerate(threads);
StringBuilder builder = new StringBuilder();
for (int i = 0; i < nr; i++) {
builder.append(threads[i].getName()).append("\nState: ").
append(threads[i].getState()).append("\n");
String stackTrace = StringUtils.join(threads[i].getStackTrace(), "\n");
builder.append(stackTrace);
}
return builder.toString();
}
private Response version;
@GET
@Path("version")
@Produces(MediaType.APPLICATION_JSON)
public Response getVersion() {
if (version == null) {
try {
JSONObject response = new JSONObject();
response.put("Version", "v0.1"); // todo: get version
response.put("Hadoop", VersionInfo.getVersion() + "-r" + VersionInfo.getRevision());
version = Response.ok(response).build();
} catch (JSONException e) {
throw new WebApplicationException(
Servlets.getErrorResponse(e, Response.Status.INTERNAL_SERVER_ERROR));
}
}
return version;
}
}
\ No newline at end of file
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.metadata.web.resources;
import com.google.common.base.Preconditions;
import org.apache.commons.io.IOUtils;
import org.apache.hadoop.metadata.service.Services;
import org.apache.hadoop.metadata.services.GraphBackedMetadataRepositoryService;
import org.apache.hadoop.metadata.services.MetadataRepositoryService;
import org.apache.hadoop.metadata.web.util.Servlets;
import org.codehaus.jettison.json.JSONObject;
import org.json.simple.JSONValue;
import org.json.simple.parser.ParseException;
import javax.servlet.http.HttpServletRequest;
import javax.ws.rs.Consumes;
import javax.ws.rs.DELETE;
import javax.ws.rs.DefaultValue;
import javax.ws.rs.GET;
import javax.ws.rs.POST;
import javax.ws.rs.Path;
import javax.ws.rs.PathParam;
import javax.ws.rs.Produces;
import javax.ws.rs.QueryParam;
import javax.ws.rs.WebApplicationException;
import javax.ws.rs.core.Context;
import javax.ws.rs.core.MediaType;
import javax.ws.rs.core.Response;
import java.io.IOException;
import java.io.StringWriter;
/**
* Entity management operations as REST API.
*/
@Path("entities")
public class EntityResource {
private MetadataRepositoryService repositoryService;
public EntityResource() {
repositoryService = Services.get().getService(GraphBackedMetadataRepositoryService.NAME);
if (repositoryService == null) {
throw new RuntimeException("graph service is not initialized");
}
}
@POST
@Path("submit/{entityType}")
@Consumes(MediaType.APPLICATION_JSON)
@Produces(MediaType.APPLICATION_JSON)
public Response submit(@Context HttpServletRequest request,
@PathParam("entityType") final String entityType) {
try {
final String entity = getEntity(request, entityType);
System.out.println("entity = " + entity);
validateEntity(entity, entityType);
final String guid = repositoryService.submitEntity(entity, entityType);
JSONObject response = new JSONObject();
response.put("GUID", guid);
return Response.ok(response).build();
} catch (Exception e) {
throw new WebApplicationException(
Servlets.getErrorResponse(e, Response.Status.BAD_REQUEST));
}
}
private String getEntity(HttpServletRequest request,
String entityType) throws IOException {
StringWriter writer = new StringWriter();
IOUtils.copy(request.getInputStream(), writer);
return writer.toString();
}
private void validateEntity(String entity, String entityType) throws ParseException {
Preconditions.checkNotNull(entity, "entity cannot be null");
Preconditions.checkNotNull(entityType, "entity type cannot be null");
JSONValue.parseWithException(entity);
}
@GET
@Path("definition/{guid}")
@Produces(MediaType.APPLICATION_JSON)
public Response getEntityDefinition(@PathParam("guid") String guid) {
return Response.ok().build();
}
@GET
@Path("definition/{entityType}/{entityName}")
@Produces(MediaType.APPLICATION_JSON)
public Response getEntityDefinition(@PathParam("entityType") String entityType,
@PathParam("entityName") String entityName) {
final String entityDefinition = repositoryService.getEntityDefinition(entityName, entityType);
return Response.ok(entityDefinition).build();
}
@POST
@Path("validate/{entityType}")
@Consumes(MediaType.APPLICATION_JSON)
@Produces(MediaType.APPLICATION_JSON)
public Response validate(@Context HttpServletRequest request,
@PathParam("entityType") String entityType) {
return Response.ok().build();
}
@DELETE
@Path("delete/{entityType}/{entityName}")
@Produces(MediaType.APPLICATION_JSON)
public Response delete(
@Context HttpServletRequest request,
@PathParam("entityType") final String entityType,
@PathParam("entityName") final String entityName) {
return Response.ok().build();
}
@POST
@Path("update/{entityType}/{entityName}")
@Consumes(MediaType.APPLICATION_JSON)
@Produces(MediaType.APPLICATION_JSON)
public Response update(@Context HttpServletRequest request,
@PathParam("entityType") final String entityType,
@PathParam("entityName") final String entityName) {
return Response.ok().build();
}
@GET
@Path("status/{entityType}/{entityName}")
@Produces(MediaType.APPLICATION_JSON)
public Response getStatus(@PathParam("entityType") String entityType,
@PathParam("entityName") String entityName) {
return Response.ok().build();
}
@GET
@Path("dependencies/{entityType}/{entityName}")
@Produces(MediaType.APPLICATION_JSON)
public Response getDependencies(@PathParam("entityType") String entityType,
@PathParam("entityName") String entityName) {
return Response.ok().build();
}
@GET
@Path("list/{entityType}")
@Produces(MediaType.APPLICATION_JSON)
public Response getEntityList(@PathParam("entityType") String entityType,
@DefaultValue("0") @QueryParam("offset") Integer offset,
@QueryParam("numResults") Integer resultsPerPage) {
return Response.ok().build();
}
}
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.metadata.web.resources;
import javax.ws.rs.Path;
/**
* Jersey Resource for metadata operations.
*/
@Path("discovery")
public class MetadataDiscoveryResource {
}
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.metadata.web.resources;
import javax.servlet.http.HttpServletRequest;
import javax.ws.rs.*;
import javax.ws.rs.core.Context;
import javax.ws.rs.core.MediaType;
import javax.ws.rs.core.Response;
/**
* This class provides RESTful API for Types.
*/
@Path("types")
public class TypesResource {
@POST
@Path("submit/{type}")
@Consumes(MediaType.TEXT_XML)
@Produces(MediaType.APPLICATION_JSON)
public Response submit(@Context HttpServletRequest request,
@PathParam("type") String type) {
return Response.ok().build();
}
@DELETE
@Path("delete/{type}")
@Produces(MediaType.APPLICATION_JSON)
public Response delete(@Context HttpServletRequest request,
@PathParam("type") String type) {
// todo - should this be supported?
return Response.status(Response.Status.BAD_REQUEST).build();
}
@POST
@Path("update/{type}")
@Produces(MediaType.APPLICATION_JSON)
public Response update(@Context HttpServletRequest request,
@PathParam("type") String type) {
return Response.ok().build();
}
}
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.metadata.web.service;
import org.apache.commons.configuration.ConfigurationException;
import org.apache.commons.configuration.PropertiesConfiguration;
import org.mortbay.jetty.Connector;
import org.mortbay.jetty.Server;
import org.mortbay.jetty.bio.SocketConnector;
import org.mortbay.jetty.webapp.WebAppContext;
/**
* This class embeds a Jetty server and a connector.
*/
public class EmbeddedServer {
private static final int DEFAULT_BUFFER_SIZE = 16192;
protected final Server server = new Server();
public EmbeddedServer(int port, String path) {
Connector connector = getConnector(port);
server.addConnector(connector);
WebAppContext application = new WebAppContext(path, "/");
server.setHandler(application);
}
protected Connector getConnector(int port) {
Connector connector = new SocketConnector();
connector.setPort(port);
connector.setHost("0.0.0.0");
// this is to enable large header sizes when Kerberos is enabled with AD
final Integer bufferSize = getBufferSize();
connector.setHeaderBufferSize(bufferSize);
connector.setRequestBufferSize(bufferSize);
return connector;
}
private Integer getBufferSize() {
try {
PropertiesConfiguration configuration = new PropertiesConfiguration("application.properties");
return configuration.getInt("metadata.jetty.request.buffer.size", DEFAULT_BUFFER_SIZE);
} catch (ConfigurationException e) {
// do nothing
}
return DEFAULT_BUFFER_SIZE;
}
public void start() throws Exception {
server.start();
server.join();
}
public void stop() throws Exception {
server.stop();
}
public static EmbeddedServer newServer(int port, String path, boolean secure) {
if (secure) {
return new SecureEmbeddedServer(port, path);
} else {
return new EmbeddedServer(port, path);
}
}
}
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.metadata.web.service;
import org.apache.commons.configuration.ConfigurationException;
import org.apache.commons.configuration.PropertiesConfiguration;
import org.mortbay.jetty.Connector;
import org.mortbay.jetty.security.SslSocketConnector;
/**
* This is a jetty server which requires client auth via certificates.
*/
public class SecureEmbeddedServer extends EmbeddedServer {
public SecureEmbeddedServer(int port, String path) {
super(port, path);
}
protected Connector getConnector(int port) {
PropertiesConfiguration config = getConfiguration();
SslSocketConnector connector = new SslSocketConnector();
connector.setPort(port);
connector.setHost("0.0.0.0");
connector.setKeystore(config.getString("keystore.file",
System.getProperty("keystore.file", "conf/metadata.keystore")));
connector.setKeyPassword(config.getString("keystore.password",
System.getProperty("keystore.password", "metadata-passwd")));
connector.setTruststore(config.getString("truststore.file",
System.getProperty("truststore.file", "conf/metadata.keystore")));
connector.setTrustPassword(config.getString("truststore.password",
System.getProperty("truststore.password", "metadata-passwd")));
connector.setPassword(config.getString("password",
System.getProperty("password", "metadata-passwd")));
connector.setWantClientAuth(true);
return connector;
}
private PropertiesConfiguration getConfiguration() {
try {
return new PropertiesConfiguration("application.properties");
} catch (ConfigurationException e) {
throw new RuntimeException("Unable to load configuration: application.properties");
}
}
}
\ No newline at end of file
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.metadata.web.util;
import org.apache.commons.lang.StringUtils;
import org.codehaus.jettison.json.JSONObject;
import javax.servlet.http.HttpServletRequest;
import javax.ws.rs.core.MediaType;
import javax.ws.rs.core.Response;
/**
* Utility functions for dealing with servlets.
*/
public final class Servlets {
public static final String REQUEST_ID = "requestId";
private Servlets() {
/* singleton */
}
/**
* Returns the user of the given request.
*
* @param httpRequest an HTTP servlet request
* @return the user
*/
public static String getUserFromRequest(HttpServletRequest httpRequest) {
String user = httpRequest.getRemoteUser();
if (!StringUtils.isEmpty(user)) {
return user;
}
user = httpRequest.getParameter("user.name"); // available in query-param
if (!StringUtils.isEmpty(user)) {
return user;
}
user = httpRequest.getHeader("Remote-User"); // backwards-compatibility
if (!StringUtils.isEmpty(user)) {
return user;
}
return null;
}
/**
* Returns the URI of the given request.
*
* @param httpRequest an HTTP servlet request
* @return the URI, including the query string
*/
public static String getRequestURI(HttpServletRequest httpRequest) {
final StringBuilder url = new StringBuilder(100).append(httpRequest.getRequestURI());
if (httpRequest.getQueryString() != null) {
url.append('?').append(httpRequest.getQueryString());
}
return url.toString();
}
/**
* Returns the full URL of the given request.
*
* @param httpRequest an HTTP servlet request
* @return the full URL, including the query string
*/
public static String getRequestURL(HttpServletRequest httpRequest) {
final StringBuilder url = new StringBuilder(100).append(httpRequest.getRequestURL());
if (httpRequest.getQueryString() != null) {
url.append('?').append(httpRequest.getQueryString());
}
return url.toString();
}
public static Response getErrorResponse(Throwable e, Response.Status status) {
return getErrorResponse(e.getMessage(), status);
}
public static Response getErrorResponse(String message, Response.Status status) {
return Response
.status(status)
.entity(JSONObject.quote(message))
.type(MediaType.APPLICATION_JSON)
.build();
}
}
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
application.services=org.apache.hadoop.metadata.services.TitanGraphService,org.apache.hadoop.metadata.services.GraphBackedMetadataRepositoryService
#metadata.graph.schema.default=none
# Graph implementation
metadata.graph.blueprints.graph=com.thinkaurelius.titan.core.TitanFactory
# Graph Storage
metadata.graph.storage.backend=berkeleyje
metadata.graph.storage.directory=${user.dir}/target/data/graphdb
# Graph Search Index
#metadata.graph.index.name=search
#metadata.graph.index.search.backend=elasticsearch
#metadata.graph.index.search.directory=${user.dir}/target/data/searchindex
#metadata.graph.index.search.elasticsearch.client-only=false
#metadata.graph.index.search.elasticsearch.local-mode=true
metadata.enableTLS=false
<?xml version="1.0" encoding="UTF-8" ?>
<!--
~ Licensed to the Apache Software Foundation (ASF) under one
~ or more contributor license agreements. See the NOTICE file
~ distributed with this work for additional information
~ regarding copyright ownership. The ASF licenses this file
~ to you under the Apache License, Version 2.0 (the
~ "License"); you may not use this file except in compliance
~ with the License. You may obtain a copy of the License at
~
~ http://www.apache.org/licenses/LICENSE-2.0
~
~ Unless required by applicable law or agreed to in writing, software
~ distributed under the License is distributed on an "AS IS" BASIS,
~ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
~ See the License for the specific language governing permissions and
~ limitations under the License.
-->
<!DOCTYPE log4j:configuration SYSTEM "log4j.dtd">
<log4j:configuration xmlns:log4j="http://jakarta.apache.org/log4j/">
<appender name="console" class="org.apache.log4j.ConsoleAppender">
<param name="Target" value="System.out"/>
<layout class="org.apache.log4j.PatternLayout">
<param name="ConversionPattern" value="%d %-5p - [%t:%x] ~ %m (%c{1}:%L)%n"/>
</layout>
</appender>
<appender name="FILE" class="org.apache.log4j.DailyRollingFileAppender">
<param name="File" value="${user.dir}/target/logs/application.log"/>
<param name="Append" value="true"/>
<param name="Threshold" value="debug"/>
<layout class="org.apache.log4j.PatternLayout">
<param name="ConversionPattern" value="%d %-5p - [%t:%x] ~ %m (%c{1}:%L)%n"/>
</layout>
</appender>
<appender name="AUDIT" class="org.apache.log4j.DailyRollingFileAppender">
<param name="File" value="${user.dir}/target/logs/audit.log"/>
<param name="Append" value="true"/>
<param name="Threshold" value="debug"/>
<layout class="org.apache.log4j.PatternLayout">
<param name="ConversionPattern" value="%d %x %m%n"/>
</layout>
</appender>
<logger name="org.apache.hadoop.metadata" additivity="false">
<level value="debug"/>
<appender-ref ref="console"/>
<appender-ref ref="FILE"/>
</logger>
<logger name="AUDIT">
<level value="info"/>
<appender-ref ref="console"/>
<appender-ref ref="AUDIT"/>
</logger>
<root>
<priority value="info"/>
<appender-ref ref="console"/>
<appender-ref ref="FILE"/>
</root>
</log4j:configuration>
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
######################
*.domain=all
*.build.user=${user.name}
*.build.epoch=${timestamp}
*.project.version=${pom.version}
*.build.version=${pom.version}-r${buildNumber}
*.vc.revision=${buildNumber}
*.vc.source.url=${scm.connection}
######################
<?xml version="1.0" encoding="UTF-8"?>
<!--
~ Licensed to the Apache Software Foundation (ASF) under one
~ or more contributor license agreements. See the NOTICE file
~ distributed with this work for additional information
~ regarding copyright ownership. The ASF licenses this file
~ to you under the Apache License, Version 2.0 (the
~ "License"); you may not use this file except in compliance
~ with the License. You may obtain a copy of the License at
~
~ http://www.apache.org/licenses/LICENSE-2.0
~
~ Unless required by applicable law or agreed to in writing, software
~ distributed under the License is distributed on an "AS IS" BASIS,
~ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
~ See the License for the specific language governing permissions and
~ limitations under the License.
-->
<!DOCTYPE web-app PUBLIC "-//Sun Microsystems, Inc.//DTD Web Application 2.3//EN"
"http://java.sun.com/dtd/web-app_2_3.dtd">
<web-app>
<display-name>Apache Falcon Placeholder</display-name>
<description>Apache Falcon Placeholder</description>
<filter>
<filter-name>audit</filter-name>
<filter-class>org.apache.hadoop.metadata.web.filters.AuditFilter</filter-class>
</filter>
<filter>
<filter-name>authentication</filter-name>
<filter-class>org.apache.hadoop.metadata.web.filters.AuthenticationFilter</filter-class>
</filter>
<filter-mapping>
<filter-name>audit</filter-name>
<servlet-name>MetadataRESTApi</servlet-name>
</filter-mapping>
<filter-mapping>
<filter-name>authentication</filter-name>
<servlet-name>MetadataRESTApi</servlet-name>
</filter-mapping>
<listener>
<listener-class>org.apache.hadoop.metadata.web.listeners.ApplicationStartupListener</listener-class>
</listener>
<servlet>
<servlet-name>MetadataRESTApi</servlet-name>
<servlet-class>com.sun.jersey.spi.container.servlet.ServletContainer</servlet-class>
<init-param>
<param-name>com.sun.jersey.config.property.resourceConfigClass</param-name>
<param-value>com.sun.jersey.api.core.PackagesResourceConfig</param-value>
</init-param>
<init-param>
<param-name>com.sun.jersey.config.property.packages</param-name>
<param-value>
org.apache.hadoop.metadata.web.resources,org.apache.hadoop.metadata.web.params
</param-value>
</init-param>
<load-on-startup>1</load-on-startup>
</servlet>
<servlet-mapping>
<servlet-name>MetadataRESTApi</servlet-name>
<url-pattern>/api/metadata/*</url-pattern>
</servlet-mapping>
</web-app>
<!--
~ Licensed to the Apache Software Foundation (ASF) under one
~ or more contributor license agreements. See the NOTICE file
~ distributed with this work for additional information
~ regarding copyright ownership. The ASF licenses this file
~ to you under the Apache License, Version 2.0 (the
~ "License"); you may not use this file except in compliance
~ with the License. You may obtain a copy of the License at
~
~ http://www.apache.org/licenses/LICENSE-2.0
~
~ Unless required by applicable law or agreed to in writing, software
~ distributed under the License is distributed on an "AS IS" BASIS,
~ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
~ See the License for the specific language governing permissions and
~ limitations under the License.
-->
<!DOCTYPE HTML>
<html lang="en">
<head>
<meta charset="UTF-8" />
<meta name="viewport" content="width=device-width, initial-scale=1.0" />
<meta name="Date-Revision-yyyymmdd" content="20130821" />
<meta http-equiv="Content-Language" content="en" />
<title>Apache Falcon - Data management and processing platform</title>
</head>
<body class="topBarEnabled">
<h1> Apache Metadata</h1>
More information at: <a href="http://dgc.incubator.apache.org/index.html" title="About">Project Website</a>
</body>
</html>
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.metadata;
import com.sun.jersey.api.client.Client;
import com.sun.jersey.api.client.ClientResponse;
import com.sun.jersey.api.client.WebResource;
import com.sun.jersey.api.client.config.DefaultClientConfig;
import org.json.simple.JSONValue;
import javax.ws.rs.HttpMethod;
import javax.ws.rs.core.MediaType;
import javax.ws.rs.core.UriBuilder;
import java.util.HashMap;
import java.util.Map;
public class TestDriver {
public static void main(String[] args) throws Exception {
String baseUrl = "http://localhost:15000/";
DefaultClientConfig config = new DefaultClientConfig();
Client client = Client.create(config);
client.resource(UriBuilder.fromUri(baseUrl).build());
WebResource service = client.resource(UriBuilder.fromUri(baseUrl).build());
/*
ClientResponse clientResponse = service.path("api/metadata/entities/list/blah")
.accept(MediaType.APPLICATION_JSON)
.type(MediaType.APPLICATION_JSON)
.method(HttpMethod.GET, ClientResponse.class);
String response = clientResponse.getEntity(String.class);
System.out.println("response = " + response);
*/
// String filePath = "/tmp/metadata/sampleentity.json";
// InputStream entityStream = getServletInputStream(filePath);
final String entityName = "clicks-table";
final String entityType = "hive-table";
submitEntity(service, entityName, entityType);
WebResource resource = service
.path("api/metadata/entities/definition")
.path(entityType)
.path(entityName);
ClientResponse clientResponse = resource
.accept(MediaType.APPLICATION_JSON)
.type(MediaType.APPLICATION_JSON)
.method(HttpMethod.GET, ClientResponse.class);
String response = clientResponse.getEntity(String.class);
System.out.println("response = " + response);
}
private static void submitEntity(WebResource service, String entityName, String entityType) {
Map<String, String> props = new HashMap<>();
props.put("entityName", entityName);
props.put("entityType", entityType);
props.put("database", "foo");
props.put("blah", "blah");
String entityStream = JSONValue.toJSONString(props);
WebResource resource = service
.path("api/metadata/entities/submit")
.path(entityType);
ClientResponse clientResponse = resource
.accept(MediaType.APPLICATION_JSON)
.type(MediaType.APPLICATION_JSON)
.method(HttpMethod.POST, ClientResponse.class, entityStream);
String response = clientResponse.getEntity(String.class);
System.out.println("response = " + response);
}
}
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment