Commit ebc4502a by Suma Shivaprasad

ATLAS-114 Upgrade Hbase client to 1.1.2(sumasai)

parent 5bc6f6bd
...@@ -248,3 +248,20 @@ WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWIS ...@@ -248,3 +248,20 @@ WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWIS
ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
POSSIBILITY OF SUCH DAMAGE. POSSIBILITY OF SUCH DAMAGE.
-----------------------------------------------------------------------
Titan Apache 2.0 License
-----------------------------------------------------------------------
Copyright 2012-2013 Aurelius LLC
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
...@@ -17,6 +17,7 @@ ...@@ -17,6 +17,7 @@
# limitations under the License. # limitations under the License.
import getpass import getpass
import os import os
import re
import platform import platform
import subprocess import subprocess
from threading import Thread from threading import Thread
...@@ -31,7 +32,7 @@ CONF = "conf" ...@@ -31,7 +32,7 @@ CONF = "conf"
LOG="logs" LOG="logs"
WEBAPP="server" + os.sep + "webapp" WEBAPP="server" + os.sep + "webapp"
DATA="data" DATA="data"
ENV_KEYS = ["JAVA_HOME", "METADATA_OPTS", "METADATA_LOG_DIR", "METADATA_PID_DIR", "METADATA_CONF", "METADATACPPATH", "METADATA_DATA_DIR", "METADATA_HOME_DIR", "METADATA_EXPANDED_WEBAPP_DIR"] ENV_KEYS = ["JAVA_HOME", "METADATA_OPTS", "METADATA_LOG_DIR", "METADATA_PID_DIR", "METADATA_CONF", "METADATACPPATH", "METADATA_DATA_DIR", "METADATA_HOME_DIR", "METADATA_EXPANDED_WEBAPP_DIR", "HBASE_CONF_DIR"]
METADATA_CONF = "METADATA_CONF" METADATA_CONF = "METADATA_CONF"
METADATA_LOG = "METADATA_LOG_DIR" METADATA_LOG = "METADATA_LOG_DIR"
METADATA_PID = "METADATA_PID_DIR" METADATA_PID = "METADATA_PID_DIR"
...@@ -39,6 +40,7 @@ METADATA_WEBAPP = "METADATA_EXPANDED_WEBAPP_DIR" ...@@ -39,6 +40,7 @@ METADATA_WEBAPP = "METADATA_EXPANDED_WEBAPP_DIR"
METADATA_OPTS = "METADATA_OPTS" METADATA_OPTS = "METADATA_OPTS"
METADATA_DATA = "METADATA_DATA_DIR" METADATA_DATA = "METADATA_DATA_DIR"
METADATA_HOME = "METADATA_HOME_DIR" METADATA_HOME = "METADATA_HOME_DIR"
HBASE_CONF_DIR = "HBASE_CONF_DIR"
IS_WINDOWS = platform.system() == "Windows" IS_WINDOWS = platform.system() == "Windows"
ON_POSIX = 'posix' in sys.builtin_module_names ON_POSIX = 'posix' in sys.builtin_module_names
DEBUG = False DEBUG = False
...@@ -60,6 +62,10 @@ def confDir(dir): ...@@ -60,6 +62,10 @@ def confDir(dir):
localconf = os.path.join(dir, CONF) localconf = os.path.join(dir, CONF)
return os.environ.get(METADATA_CONF, localconf) return os.environ.get(METADATA_CONF, localconf)
def hbaseConfDir(atlasConfDir):
parentDir = os.path.dirname(atlasConfDir)
return os.environ.get(HBASE_CONF_DIR, os.path.join(parentDir, "hbase", CONF))
def logDir(dir): def logDir(dir):
localLog = os.path.join(dir, LOG) localLog = os.path.join(dir, LOG)
return os.environ.get(METADATA_LOG, localLog) return os.environ.get(METADATA_LOG, localLog)
...@@ -325,3 +331,9 @@ def server_already_running(pid): ...@@ -325,3 +331,9 @@ def server_already_running(pid):
def server_pid_not_running(pid): def server_pid_not_running(pid):
print "The Server is no longer running with pid %s" %pid print "The Server is no longer running with pid %s" %pid
def grep(file, value):
for line in open(file).readlines():
if re.match(value, line):
return line
return None
...@@ -25,6 +25,8 @@ METADATA_LOG_OPTS="-Datlas.log.dir=%s -Datlas.log.file=application.log" ...@@ -25,6 +25,8 @@ METADATA_LOG_OPTS="-Datlas.log.dir=%s -Datlas.log.file=application.log"
METADATA_COMMAND_OPTS="-Datlas.home=%s" METADATA_COMMAND_OPTS="-Datlas.home=%s"
METADATA_CONFIG_OPTS="-Datlas.conf=%s" METADATA_CONFIG_OPTS="-Datlas.conf=%s"
DEFAULT_JVM_OPTS="-Xmx1024m -XX:MaxPermSize=512m -Dlog4j.configuration=atlas-log4j.xml" DEFAULT_JVM_OPTS="-Xmx1024m -XX:MaxPermSize=512m -Dlog4j.configuration=atlas-log4j.xml"
CONF_FILE="application.properties"
HBASE_STORAGE_CONF_ENTRY="atlas.graph.storage.backend\s*=\s*hbase"
def main(): def main():
...@@ -50,12 +52,21 @@ def main(): ...@@ -50,12 +52,21 @@ def main():
web_app_dir = mc.webAppDir(metadata_home) web_app_dir = mc.webAppDir(metadata_home)
mc.expandWebApp(metadata_home) mc.expandWebApp(metadata_home)
#add hbase-site.xml to classpath
hbase_conf_dir = mc.hbaseConfDir(confdir)
p = os.pathsep p = os.pathsep
metadata_classpath = confdir + p \ metadata_classpath = confdir + p \
+ os.path.join(web_app_dir, "atlas", "WEB-INF", "classes" ) + p \ + os.path.join(web_app_dir, "atlas", "WEB-INF", "classes" ) + p \
+ os.path.join(web_app_dir, "atlas", "WEB-INF", "lib", "*" ) + p \ + os.path.join(web_app_dir, "atlas", "WEB-INF", "lib", "*" ) + p \
+ os.path.join(metadata_home, "libext", "*") + os.path.join(metadata_home, "libext", "*")
if os.path.exists(hbase_conf_dir):
metadata_classpath = metadata_classpath + p \
+ hbase_conf_dir
else:
storage_backend = mc.grep(os.path.join(confdir, CONF_FILE), HBASE_STORAGE_CONF_ENTRY)
if storage_backend != None:
raise Exception("Could not find hbase-site.xml in %s. Please set env var HBASE_CONF_DIR to the hbase client conf dir", hbase_conf_dir)
metadata_pid_file = mc.pidFile(metadata_home) metadata_pid_file = mc.pidFile(metadata_home)
......
...@@ -25,7 +25,9 @@ atlas.graph.storage.directory=${sys:atlas.home}/data/berkley ...@@ -25,7 +25,9 @@ atlas.graph.storage.directory=${sys:atlas.home}/data/berkley
#hbase #hbase
#For standalone mode , specify localhost #For standalone mode , specify localhost
#for distributed mode, specify zookeeper quorum here - For more information refer http://s3.thinkaurelius.com/docs/titan/current/hbase.html#_remote_server_mode_2 #for distributed mode, specify zookeeper quorum here - For more information refer http://s3.thinkaurelius.com/docs/titan/current/hbase.html#_remote_server_mode_2
#atlas.graph.storage.hostname=localhost atlas.graph.storage.hostname=localhost
atlas.graph.storage.hbase.regions-per-server=1
atlas.graph.storage.lock.wait-time=10000
#Solr #Solr
#atlas.graph.index.search.backend=solr #atlas.graph.index.search.backend=solr
......
...@@ -33,10 +33,19 @@ atlas.graph.storage.backend=hbase ...@@ -33,10 +33,19 @@ atlas.graph.storage.backend=hbase
atlas.graph.storage.hostname=<ZooKeeper Quorum> atlas.graph.storage.hostname=<ZooKeeper Quorum>
</verbatim> </verbatim>
HBASE_CONF_DIR environment variable needs to be set to point to the Hbase client configuration directory which is added to classpath when Atlas starts up.
hbase-site.xml needs to have the following properties set according to the cluster setup
<verbatim>
#Set below to /hbase-secure if the Hbase server is setup in secure mode
zookeeper.znode.parent=/hbase-unsecure
</verbatim>
Advanced configuration Advanced configuration
# If you are planning to use any of the configs mentioned below, they need to be prefixed with "atlas.graph." to take effect in ATLAS
Refer http://s3.thinkaurelius.com/docs/titan/0.5.4/titan-config-ref.html#_storage_hbase Refer http://s3.thinkaurelius.com/docs/titan/0.5.4/titan-config-ref.html#_storage_hbase
---++++ Graph Search Index ---++++ Graph Search Index
This section sets up the graph db - titan - to use an search indexing system. The example This section sets up the graph db - titan - to use an search indexing system. The example
configuration below setsup to use an embedded Elastic search indexing system. configuration below setsup to use an embedded Elastic search indexing system.
......
...@@ -121,6 +121,16 @@ and change it to look as below ...@@ -121,6 +121,16 @@ and change it to look as below
export METADATA_SERVER_OPTS="-Djava.awt.headless=true -Djava.security.krb5.realm= -Djava.security.krb5.kdc=" export METADATA_SERVER_OPTS="-Djava.awt.headless=true -Djava.security.krb5.realm= -Djava.security.krb5.kdc="
</verbatim> </verbatim>
* Hbase as the Storage Backend for the Graph Repository
By default, Atlas uses Titan as the graph repository and is the only graph repository implementation available currently.
The HBase versions currently supported are 0.98.x, 1.0.x, 1.1.x. For configuring ATLAS graph persistence on HBase, please go through the "Configuration - Graph persistence engine - HBase" section
for more details.
Pre-requisites for running HBase as a distributed cluster
* 3 or 5 ZooKeeper nodes
* Atleast 3 RegionServer nodes. It would be ideal to run the DataNodes on the same hosts as the Region servers for data locality.
* Configuring SOLR as the Indexing Backend for the Graph Repository * Configuring SOLR as the Indexing Backend for the Graph Repository
By default, Atlas uses Titan as the graph repository and is the only graph repository implementation available currently. By default, Atlas uses Titan as the graph repository and is the only graph repository implementation available currently.
...@@ -152,6 +162,13 @@ For configuring Titan to work with Solr, please follow the instructions below ...@@ -152,6 +162,13 @@ For configuring Titan to work with Solr, please follow the instructions below
For more information on Titan solr configuration , please refer http://s3.thinkaurelius.com/docs/titan/0.5.4/solr.htm For more information on Titan solr configuration , please refer http://s3.thinkaurelius.com/docs/titan/0.5.4/solr.htm
Pre-requisites for running Solr in cloud mode
* Memory - Solr is both memory and CPU intensive. Make sure the server running Solr has adequate memory, CPU and disk.
Solr works well with 32GB RAM. Plan to provide as much memory as possible to Solr process
* Disk - If the number of entities that need to be stored are large, plan to have at least 500 GB free space in the volume where Solr is going to store the index data
* SolrCloud has support for replication and sharding. It is highly recommended to use SolrCloud with at least two Solr nodes running on different servers with replication enabled.
If using SolrCloud, then you also need ZooKeeper installed and configured with 3 or 5 ZooKeeper nodes
*Starting Atlas Server* *Starting Atlas Server*
<verbatim> <verbatim>
bin/atlas_start.py [-port <port>] bin/atlas_start.py [-port <port>]
......
...@@ -328,7 +328,7 @@ ...@@ -328,7 +328,7 @@
<tinkerpop.version>2.6.0</tinkerpop.version> <tinkerpop.version>2.6.0</tinkerpop.version>
<titan.version>0.5.4</titan.version> <titan.version>0.5.4</titan.version>
<hadoop.version>2.7.0</hadoop.version> <hadoop.version>2.7.0</hadoop.version>
<hbase.version>0.98.9-hadoop2</hbase.version> <hbase.version>1.1.2</hbase.version>
<solr.version>5.1.0</solr.version> <solr.version>5.1.0</solr.version>
<kafka.version>0.8.2.0</kafka.version> <kafka.version>0.8.2.0</kafka.version>
<!-- scala versions --> <!-- scala versions -->
...@@ -1512,6 +1512,7 @@ ...@@ -1512,6 +1512,7 @@
<exclude>**/build.log</exclude> <exclude>**/build.log</exclude>
<exclude>.bowerrc</exclude> <exclude>.bowerrc</exclude>
<exclude>*.json</exclude> <exclude>*.json</exclude>
<exclude>**/overlays/**</exclude>
</excludes> </excludes>
</configuration> </configuration>
<executions> <executions>
......
...@@ -9,6 +9,7 @@ ATLAS-54 Rename configs in hive hook (shwethags) ...@@ -9,6 +9,7 @@ ATLAS-54 Rename configs in hive hook (shwethags)
ATLAS-3 Mixed Index creation fails with Date types (sumasai via shwethags) ATLAS-3 Mixed Index creation fails with Date types (sumasai via shwethags)
ALL CHANGES: ALL CHANGES:
ATLAS-114 Upgrade hbase client to 1.1.2 (sumasai)
ATLAS-296 IllegalArgumentException during hive HiveHookIT integration tests (tbeerbower via shwethags) ATLAS-296 IllegalArgumentException during hive HiveHookIT integration tests (tbeerbower via shwethags)
ATLAS-158 Provide Atlas Entity Change Notification (tbeerbower via shwethags) ATLAS-158 Provide Atlas Entity Change Notification (tbeerbower via shwethags)
ATALS-238 atlas_start.py- the Atlas server won’t restart after improper shutdown(ndjouri via sumasai) ATALS-238 atlas_start.py- the Atlas server won’t restart after improper shutdown(ndjouri via sumasai)
......
...@@ -114,10 +114,11 @@ ...@@ -114,10 +114,11 @@
<artifactId>titan-berkeleyje</artifactId> <artifactId>titan-berkeleyje</artifactId>
</dependency> </dependency>
<dependency> <!-- Commenting out since titan-hbase classes are shaded for 1.x support -->
<groupId>com.thinkaurelius.titan</groupId> <!--<dependency>-->
<artifactId>titan-hbase</artifactId> <!--<groupId>com.thinkaurelius.titan</groupId>-->
</dependency> <!--<artifactId>titan-hbase</artifactId>-->
<!--</dependency>-->
<dependency> <dependency>
<groupId>org.apache.hbase</groupId> <groupId>org.apache.hbase</groupId>
......
/*
* Copyright 2012-2013 Aurelius LLC
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.thinkaurelius.titan.diskstorage.hbase;
import java.io.Closeable;
import java.io.IOException;
import org.apache.hadoop.hbase.ClusterStatus;
import org.apache.hadoop.hbase.HColumnDescriptor;
import org.apache.hadoop.hbase.HTableDescriptor;
import org.apache.hadoop.hbase.TableNotFoundException;
import org.apache.hadoop.hbase.client.HBaseAdmin;
/**
* This interface hides ABI/API breaking changes that HBase has made to its Admin/HBaseAdmin over the course
* of development from 0.94 to 1.0 and beyond.
*/
public interface AdminMask extends Closeable
{
void clearTable(String tableName, long timestamp) throws IOException;
HTableDescriptor getTableDescriptor(String tableName) throws TableNotFoundException, IOException;
boolean tableExists(String tableName) throws IOException;
void createTable(HTableDescriptor desc) throws IOException;
void createTable(HTableDescriptor desc, byte[] startKey, byte[] endKey, int numRegions) throws IOException;
/**
* Estimate the number of regionservers in the HBase cluster.
*
* This is usually implemented by calling
* {@link HBaseAdmin#getClusterStatus()} and then
* {@link ClusterStatus#getServers()} and finally {@code size()} on the
* returned server list.
*
* @return the number of servers in the cluster or -1 if it could not be determined
*/
int getEstimatedRegionServerCount();
void disableTable(String tableName) throws IOException;
void enableTable(String tableName) throws IOException;
boolean isTableDisabled(String tableName) throws IOException;
void addColumn(String tableName, HColumnDescriptor columnDescriptor) throws IOException;
}
/*
* Copyright 2012-2013 Aurelius LLC
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.thinkaurelius.titan.diskstorage.hbase;
import java.io.Closeable;
import java.io.IOException;
/**
* This interface hides ABI/API breaking changes that HBase has made to its (H)Connection class over the course
* of development from 0.94 to 1.0 and beyond.
*/
public interface ConnectionMask extends Closeable
{
TableMask getTable(String name) throws IOException;
AdminMask getAdmin() throws IOException;
}
/*
* Copyright 2012-2013 Aurelius LLC
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.thinkaurelius.titan.diskstorage.hbase;
import java.io.IOException;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import com.thinkaurelius.titan.util.system.IOUtils;
import org.apache.hadoop.hbase.HColumnDescriptor;
import org.apache.hadoop.hbase.HTableDescriptor;
import org.apache.hadoop.hbase.TableNotFoundException;
import org.apache.hadoop.hbase.client.Delete;
import org.apache.hadoop.hbase.client.HBaseAdmin;
import org.apache.hadoop.hbase.client.HTable;
import org.apache.hadoop.hbase.client.Result;
import org.apache.hadoop.hbase.client.ResultScanner;
import org.apache.hadoop.hbase.client.Scan;
public class HBaseAdmin0_98 implements AdminMask
{
private static final Logger log = LoggerFactory.getLogger(HBaseAdmin0_98.class);
private final HBaseAdmin adm;
public HBaseAdmin0_98(HBaseAdmin adm)
{
this.adm = adm;
}
@Override
public void clearTable(String tableName, long timestamp) throws IOException
{
if (!adm.tableExists(tableName)) {
log.debug("clearStorage() called before table {} was created, skipping.", tableName);
return;
}
// Unfortunately, linear scanning and deleting tables is faster in HBase < 1 when running integration tests than
// disabling and deleting tables.
HTable table = null;
try {
table = new HTable(adm.getConfiguration(), tableName);
Scan scan = new Scan();
scan.setBatch(100);
scan.setCacheBlocks(false);
scan.setCaching(2000);
scan.setTimeRange(0, Long.MAX_VALUE);
scan.setMaxVersions(1);
ResultScanner scanner = null;
try {
scanner = table.getScanner(scan);
for (Result res : scanner) {
Delete d = new Delete(res.getRow());
d.setTimestamp(timestamp);
table.delete(d);
}
} finally {
IOUtils.closeQuietly(scanner);
}
} finally {
IOUtils.closeQuietly(table);
}
}
@Override
public HTableDescriptor getTableDescriptor(String tableName) throws TableNotFoundException, IOException
{
return adm.getTableDescriptor(tableName.getBytes());
}
@Override
public boolean tableExists(String tableName) throws IOException
{
return adm.tableExists(tableName);
}
@Override
public void createTable(HTableDescriptor desc) throws IOException
{
adm.createTable(desc);
}
@Override
public void createTable(HTableDescriptor desc, byte[] startKey, byte[] endKey, int numRegions) throws IOException
{
adm.createTable(desc, startKey, endKey, numRegions);
}
@Override
public int getEstimatedRegionServerCount()
{
int serverCount = -1;
try {
serverCount = adm.getClusterStatus().getServers().size();
log.debug("Read {} servers from HBase ClusterStatus", serverCount);
} catch (IOException e) {
log.debug("Unable to retrieve HBase cluster status", e);
}
return serverCount;
}
@Override
public void disableTable(String tableName) throws IOException
{
adm.disableTable(tableName);
}
@Override
public void enableTable(String tableName) throws IOException
{
adm.enableTable(tableName);
}
@Override
public boolean isTableDisabled(String tableName) throws IOException
{
return adm.isTableDisabled(tableName);
}
@Override
public void addColumn(String tableName, HColumnDescriptor columnDescriptor) throws IOException
{
adm.addColumn(tableName, columnDescriptor);
}
@Override
public void close() throws IOException
{
adm.close();
}
}
/*
* Copyright 2012-2013 Aurelius LLC
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.thinkaurelius.titan.diskstorage.hbase;
import java.io.IOException;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import org.apache.hadoop.hbase.HColumnDescriptor;
import org.apache.hadoop.hbase.HTableDescriptor;
import org.apache.hadoop.hbase.TableName;
import org.apache.hadoop.hbase.TableNotDisabledException;
import org.apache.hadoop.hbase.TableNotFoundException;
import org.apache.hadoop.hbase.client.Admin;
import org.apache.hadoop.hbase.client.HBaseAdmin;
public class HBaseAdmin1_0 implements AdminMask
{
private static final Logger log = LoggerFactory.getLogger(HBaseAdmin1_0.class);
private final Admin adm;
public HBaseAdmin1_0(HBaseAdmin adm)
{
this.adm = adm;
}
@Override
public void clearTable(String tableString, long timestamp) throws IOException
{
TableName tableName = TableName.valueOf(tableString);
if (!adm.tableExists(tableName)) {
log.debug("Attempted to clear table {} before it exists (noop)", tableString);
return;
}
if (!adm.isTableDisabled(tableName))
adm.disableTable(tableName);
if (!adm.isTableDisabled(tableName))
throw new RuntimeException("Unable to disable table " + tableName);
// This API call appears to both truncate and reenable the table.
log.info("Truncating table {}", tableName);
adm.truncateTable(tableName, true /* preserve splits */);
try {
adm.enableTable(tableName);
} catch (TableNotDisabledException e) {
// This triggers seemingly every time in testing with 1.0.2.
log.debug("Table automatically reenabled by truncation: {}", tableName, e);
}
}
@Override
public HTableDescriptor getTableDescriptor(String tableString) throws TableNotFoundException, IOException
{
return adm.getTableDescriptor(TableName.valueOf(tableString));
}
@Override
public boolean tableExists(String tableString) throws IOException
{
return adm.tableExists(TableName.valueOf(tableString));
}
@Override
public void createTable(HTableDescriptor desc) throws IOException
{
adm.createTable(desc);
}
@Override
public void createTable(HTableDescriptor desc, byte[] startKey, byte[] endKey, int numRegions) throws IOException
{
adm.createTable(desc, startKey, endKey, numRegions);
}
@Override
public int getEstimatedRegionServerCount()
{
int serverCount = -1;
try {
serverCount = adm.getClusterStatus().getServers().size();
log.debug("Read {} servers from HBase ClusterStatus", serverCount);
} catch (IOException e) {
log.debug("Unable to retrieve HBase cluster status", e);
}
return serverCount;
}
@Override
public void disableTable(String tableString) throws IOException
{
adm.disableTable(TableName.valueOf(tableString));
}
@Override
public void enableTable(String tableString) throws IOException
{
adm.enableTable(TableName.valueOf(tableString));
}
@Override
public boolean isTableDisabled(String tableString) throws IOException
{
return adm.isTableDisabled(TableName.valueOf(tableString));
}
@Override
public void addColumn(String tableString, HColumnDescriptor columnDescriptor) throws IOException
{
adm.addColumn(TableName.valueOf(tableString), columnDescriptor);
}
@Override
public void close() throws IOException
{
adm.close();
}
}
/*
* Copyright 2012-2013 Aurelius LLC
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.thinkaurelius.titan.diskstorage.hbase;
import java.io.IOException;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.hbase.HColumnDescriptor;
import org.apache.hadoop.hbase.HTableDescriptor;
import org.apache.hadoop.hbase.ZooKeeperConnectionException;
import org.apache.hadoop.hbase.client.Delete;
public interface HBaseCompat {
/**
* Configure the compression scheme {@code algo} on a column family
* descriptor {@code cd}. The {@code algo} parameter is a string value
* corresponding to one of the values of HBase's Compression enum. The
* Compression enum has moved between packages as HBase has evolved, which
* is why this method has a String argument in the signature instead of the
* enum itself.
*
* @param cd
* column family to configure
* @param algo
* compression type to use
*/
public void setCompression(HColumnDescriptor cd, String algo);
/**
* Create and return a HTableDescriptor instance with the given name. The
* constructors on this method have remained stable over HBase development
* so far, but the old HTableDescriptor(String) constructor & byte[] friends
* are now marked deprecated and may eventually be removed in favor of the
* HTableDescriptor(TableName) constructor. That constructor (and the
* TableName type) only exists in newer HBase versions. Hence this method.
*
* @param tableName
* HBase table name
* @return a new table descriptor instance
*/
public HTableDescriptor newTableDescriptor(String tableName);
ConnectionMask createConnection(Configuration conf) throws IOException;
void addColumnFamilyToTableDescriptor(HTableDescriptor tdesc, HColumnDescriptor cdesc);
void setTimestamp(Delete d, long timestamp);
}
/*
* Copyright 2012-2013 Aurelius LLC
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.thinkaurelius.titan.diskstorage.hbase;
import java.io.IOException;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.hbase.HColumnDescriptor;
import org.apache.hadoop.hbase.HTableDescriptor;
import org.apache.hadoop.hbase.TableName;
import org.apache.hadoop.hbase.client.Delete;
import org.apache.hadoop.hbase.client.HConnectionManager;
import org.apache.hadoop.hbase.io.compress.Compression;
public class HBaseCompat0_98 implements HBaseCompat {
@Override
public void setCompression(HColumnDescriptor cd, String algo) {
cd.setCompressionType(Compression.Algorithm.valueOf(algo));
}
@Override
public HTableDescriptor newTableDescriptor(String tableName) {
TableName tn = TableName.valueOf(tableName);
return new HTableDescriptor(tn);
}
@Override
public ConnectionMask createConnection(Configuration conf) throws IOException
{
return new HConnection0_98(HConnectionManager.createConnection(conf));
}
@Override
public void addColumnFamilyToTableDescriptor(HTableDescriptor tdesc, HColumnDescriptor cdesc)
{
tdesc.addFamily(cdesc);
}
@Override
public void setTimestamp(Delete d, long timestamp)
{
d.setTimestamp(timestamp);
}
}
/*
* Copyright 2012-2013 Aurelius LLC
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.thinkaurelius.titan.diskstorage.hbase;
import java.io.IOException;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.hbase.HColumnDescriptor;
import org.apache.hadoop.hbase.HTableDescriptor;
import org.apache.hadoop.hbase.TableName;
import org.apache.hadoop.hbase.client.ConnectionFactory;
import org.apache.hadoop.hbase.client.Delete;
import org.apache.hadoop.hbase.client.HConnectionManager;
import org.apache.hadoop.hbase.io.compress.Compression;
public class HBaseCompat1_0 implements HBaseCompat {
@Override
public void setCompression(HColumnDescriptor cd, String algo) {
cd.setCompressionType(Compression.Algorithm.valueOf(algo));
}
@Override
public HTableDescriptor newTableDescriptor(String tableName) {
TableName tn = TableName.valueOf(tableName);
return new HTableDescriptor(tn);
}
@Override
public ConnectionMask createConnection(Configuration conf) throws IOException
{
return new HConnection1_0(ConnectionFactory.createConnection(conf));
}
@Override
public void addColumnFamilyToTableDescriptor(HTableDescriptor tdesc, HColumnDescriptor cdesc)
{
tdesc.addFamily(cdesc);
}
@Override
public void setTimestamp(Delete d, long timestamp)
{
d.setTimestamp(timestamp);
}
}
/*
* Copyright 2012-2013 Aurelius LLC
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.thinkaurelius.titan.diskstorage.hbase;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.hbase.HColumnDescriptor;
import org.apache.hadoop.hbase.HTableDescriptor;
import org.apache.hadoop.hbase.TableName;
import org.apache.hadoop.hbase.client.ConnectionFactory;
import org.apache.hadoop.hbase.client.Delete;
import org.apache.hadoop.hbase.io.compress.Compression;
import java.io.IOException;
public class HBaseCompat1_1 implements HBaseCompat {
@Override
public void setCompression(HColumnDescriptor cd, String algo) {
cd.setCompressionType(Compression.Algorithm.valueOf(algo));
}
@Override
public HTableDescriptor newTableDescriptor(String tableName) {
TableName tn = TableName.valueOf(tableName);
return new HTableDescriptor(tn);
}
@Override
public ConnectionMask createConnection(Configuration conf) throws IOException
{
return new HConnection1_0(ConnectionFactory.createConnection(conf));
}
@Override
public void addColumnFamilyToTableDescriptor(HTableDescriptor tdesc, HColumnDescriptor cdesc)
{
tdesc.addFamily(cdesc);
}
@Override
public void setTimestamp(Delete d, long timestamp)
{
d.setTimestamp(timestamp);
}
}
/*
* Copyright 2012-2013 Aurelius LLC
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.thinkaurelius.titan.diskstorage.hbase;
import java.util.Arrays;
import org.apache.hadoop.hbase.util.VersionInfo;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
public class HBaseCompatLoader {
private static final Logger log = LoggerFactory.getLogger(HBaseCompatLoader.class);
private static final String DEFAULT_HBASE_COMPAT_VERSION = "1.1";
private static final String DEFAULT_HBASE_CLASS_NAME = "com.thinkaurelius.titan.diskstorage.hbase.HBaseCompat1_1";
private static HBaseCompat cachedCompat;
public synchronized static HBaseCompat getCompat(String classOverride) {
if (null != cachedCompat) {
log.debug("Returning cached HBase compatibility layer: {}", cachedCompat);
return cachedCompat;
}
HBaseCompat compat;
String className = null;
String classNameSource = null;
if (null != classOverride) {
className = classOverride;
classNameSource = "from explicit configuration";
} else {
String hbaseVersion = VersionInfo.getVersion();
for (String supportedVersion : Arrays.asList("0.94", "0.96", "0.98", "1.0", "1.1")) {
if (hbaseVersion.startsWith(supportedVersion + ".")) {
className = "com.thinkaurelius.titan.diskstorage.hbase.HBaseCompat" + supportedVersion.replaceAll("\\.", "_");
classNameSource = "supporting runtime HBase version " + hbaseVersion;
break;
}
}
if (null == className) {
log.info("The HBase version {} is not explicitly supported by Titan. " +
"Loading Titan's compatibility layer for its most recent supported HBase version ({})",
hbaseVersion, DEFAULT_HBASE_COMPAT_VERSION);
className = DEFAULT_HBASE_CLASS_NAME;
classNameSource = " by default";
}
}
final String errTemplate = " when instantiating HBase compatibility class " + className;
try {
compat = (HBaseCompat)Class.forName(className).newInstance();
log.info("Instantiated HBase compatibility layer {}: {}", classNameSource, compat.getClass().getCanonicalName());
} catch (IllegalAccessException e) {
throw new RuntimeException(e.getClass().getSimpleName() + errTemplate, e);
} catch (InstantiationException e) {
throw new RuntimeException(e.getClass().getSimpleName() + errTemplate, e);
} catch (ClassNotFoundException e) {
throw new RuntimeException(e.getClass().getSimpleName() + errTemplate, e);
}
return cachedCompat = compat;
}
}
/*
* Copyright 2012-2013 Aurelius LLC
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.thinkaurelius.titan.diskstorage.hbase;
import com.thinkaurelius.titan.diskstorage.BaseTransactionConfig;
import com.thinkaurelius.titan.diskstorage.common.AbstractStoreTransaction;
/**
* This class overrides and adds nothing compared with
* {@link com.thinkaurelius.titan.diskstorage.locking.consistentkey.ExpectedValueCheckingTransaction}; however, it creates a transaction type specific
* to HBase, which lets us check for user errors like passing a Cassandra
* transaction into a HBase method.
*
* @author Dan LaRocque <dalaro@hopcount.org>
*/
public class HBaseTransaction extends AbstractStoreTransaction {
public HBaseTransaction(final BaseTransactionConfig config) {
super(config);
}
}
/*
* Copyright 2012-2013 Aurelius LLC
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.thinkaurelius.titan.diskstorage.hbase;
import java.io.IOException;
import org.apache.hadoop.hbase.client.HBaseAdmin;
import org.apache.hadoop.hbase.client.HConnection;
public class HConnection0_98 implements ConnectionMask
{
private final HConnection cnx;
public HConnection0_98(HConnection cnx)
{
this.cnx = cnx;
}
@Override
public TableMask getTable(String name) throws IOException
{
return new HTable0_98(cnx.getTable(name));
}
@Override
public AdminMask getAdmin() throws IOException
{
return new HBaseAdmin0_98(new HBaseAdmin(cnx));
}
@Override
public void close() throws IOException
{
cnx.close();
}
}
/*
* Copyright 2012-2013 Aurelius LLC
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.thinkaurelius.titan.diskstorage.hbase;
import java.io.IOException;
import org.apache.hadoop.hbase.TableName;
import org.apache.hadoop.hbase.client.Connection;
import org.apache.hadoop.hbase.client.HBaseAdmin;
public class HConnection1_0 implements ConnectionMask
{
private final Connection cnx;
public HConnection1_0(Connection cnx)
{
this.cnx = cnx;
}
@Override
public TableMask getTable(String name) throws IOException
{
return new HTable1_0(cnx.getTable(TableName.valueOf(name)));
}
@Override
public AdminMask getAdmin() throws IOException
{
return new HBaseAdmin1_0(new HBaseAdmin(cnx));
}
@Override
public void close() throws IOException
{
cnx.close();
}
}
/*
* Copyright 2012-2013 Aurelius LLC
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.thinkaurelius.titan.diskstorage.hbase;
import java.io.IOException;
import java.util.List;
import org.apache.hadoop.hbase.client.Get;
import org.apache.hadoop.hbase.client.HTableInterface;
import org.apache.hadoop.hbase.client.Result;
import org.apache.hadoop.hbase.client.ResultScanner;
import org.apache.hadoop.hbase.client.Row;
import org.apache.hadoop.hbase.client.Scan;
public class HTable0_98 implements TableMask
{
private final HTableInterface table;
public HTable0_98(HTableInterface table)
{
this.table = table;
}
@Override
public ResultScanner getScanner(Scan filter) throws IOException
{
return table.getScanner(filter);
}
@Override
public Result[] get(List<Get> gets) throws IOException
{
return table.get(gets);
}
@Override
public void batch(List<Row> writes, Object[] results) throws IOException, InterruptedException
{
table.batch(writes, results);
table.flushCommits();
}
@Override
public void close() throws IOException
{
table.close();
}
}
/*
* Copyright 2012-2013 Aurelius LLC
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.thinkaurelius.titan.diskstorage.hbase;
import java.io.IOException;
import java.util.List;
import org.apache.hadoop.hbase.client.Get;
import org.apache.hadoop.hbase.client.HTableInterface;
import org.apache.hadoop.hbase.client.Result;
import org.apache.hadoop.hbase.client.ResultScanner;
import org.apache.hadoop.hbase.client.Row;
import org.apache.hadoop.hbase.client.Scan;
import org.apache.hadoop.hbase.client.Table;
public class HTable1_0 implements TableMask
{
private final Table table;
public HTable1_0(Table table)
{
this.table = table;
}
@Override
public ResultScanner getScanner(Scan filter) throws IOException
{
return table.getScanner(filter);
}
@Override
public Result[] get(List<Get> gets) throws IOException
{
return table.get(gets);
}
@Override
public void batch(List<Row> writes, Object[] results) throws IOException, InterruptedException
{
table.batch(writes, results);
/* table.flushCommits(); not needed anymore */
}
@Override
public void close() throws IOException
{
table.close();
}
}
/*
* Copyright 2012-2013 Aurelius LLC
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.thinkaurelius.titan.diskstorage.hbase;
import java.io.Closeable;
import java.io.IOException;
import java.util.List;
import org.apache.hadoop.hbase.client.Get;
import org.apache.hadoop.hbase.client.Result;
import org.apache.hadoop.hbase.client.ResultScanner;
import org.apache.hadoop.hbase.client.Row;
import org.apache.hadoop.hbase.client.Scan;
/**
* This interface hides ABI/API breaking changes that HBase has made to its Table/HTableInterface over the course
* of development from 0.94 to 1.0 and beyond.
*/
public interface TableMask extends Closeable
{
ResultScanner getScanner(Scan filter) throws IOException;
Result[] get(List<Get> gets) throws IOException;
void batch(List<Row> writes, Object[] results) throws IOException, InterruptedException;
}
/** /*
* Licensed to the Apache Software Foundation (ASF) under one * Copyright 2012-2013 Aurelius LLC
* or more contributor license agreements. See the NOTICE file * Licensed under the Apache License, Version 2.0 (the "License");
* distributed with this work for additional information * you may not use this file except in compliance with the License.
* regarding copyright ownership. The ASF licenses this file * You may obtain a copy of the License at
* to you under the Apache License, Version 2.0 (the *
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
* <p/>
* http://www.apache.org/licenses/LICENSE-2.0 * http://www.apache.org/licenses/LICENSE-2.0
* <p/> *
* Unless required by applicable law or agreed to in writing, software * Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS, * distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and * See the License for the specific language governing permissions and
* limitations under the License. * limitations under the License.
*/ */
package com.thinkaurelius.titan.diskstorage.solr; package com.thinkaurelius.titan.diskstorage.solr;
import com.google.common.base.Joiner; import com.google.common.base.Joiner;
......
...@@ -29,7 +29,10 @@ atlas.graph.storage.directory=target/data/berkley ...@@ -29,7 +29,10 @@ atlas.graph.storage.directory=target/data/berkley
#hbase #hbase
#For standalone mode , specify localhost #For standalone mode , specify localhost
#for distributed mode, specify zookeeper quorum here - For more information refer http://s3.thinkaurelius.com/docs/titan/current/hbase.html#_remote_server_mode_2 #for distributed mode, specify zookeeper quorum here - For more information refer http://s3.thinkaurelius.com/docs/titan/current/hbase.html#_remote_server_mode_2
atlas.graph.storage.hostname=${titan.storage.hostname} atlas.graph.storage.hostname=${titan.storage.hostname}
atlas.graph.storage.hbase.regions-per-server=1
atlas.graph.storage.lock.wait-time=10000
#ElasticSearch #ElasticSearch
atlas.graph.index.search.directory=target/data/es atlas.graph.index.search.directory=target/data/es
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment