Commit da8581e9 by Pierre Padovani Committed by David Radley

ATLAS-2470 - JanusGraph Cassandra support

parent 9368c8a0
...@@ -41,6 +41,7 @@ ...@@ -41,6 +41,7 @@
<artifactId>atlas-plugin-classloader</artifactId> <artifactId>atlas-plugin-classloader</artifactId>
</dependency> </dependency>
<!-- exclude the dropwizard metrics as it is an older version that conflicts -->
<dependency> <dependency>
<groupId>org.apache.storm</groupId> <groupId>org.apache.storm</groupId>
<artifactId>storm-core</artifactId> <artifactId>storm-core</artifactId>
...@@ -55,7 +56,37 @@ ...@@ -55,7 +56,37 @@
<groupId>javax.servlet</groupId> <groupId>javax.servlet</groupId>
<artifactId>servlet-api</artifactId> <artifactId>servlet-api</artifactId>
</exclusion> </exclusion>
<exclusion>
<groupId>io.dropwizard.metrics</groupId>
<artifactId>metrics-core</artifactId>
</exclusion>
<exclusion>
<groupId>io.dropwizard.metrics</groupId>
<artifactId>metrics-graphite</artifactId>
</exclusion>
<exclusion>
<groupId>io.dropwizard.metrics</groupId>
<artifactId>metrics-ganglia</artifactId>
</exclusion>
</exclusions> </exclusions>
</dependency> </dependency>
<!-- Update metrics to same version used in Atlas to avoid conflicts -->
<dependency>
<groupId>io.dropwizard.metrics</groupId>
<artifactId>metrics-core</artifactId>
<version>${dropwizard-metrics}</version>
</dependency>
<dependency>
<groupId>io.dropwizard.metrics</groupId>
<artifactId>metrics-graphite</artifactId>
<version>${dropwizard-metrics}</version>
</dependency>
<dependency>
<groupId>io.dropwizard.metrics</groupId>
<artifactId>metrics-ganglia</artifactId>
<version>${dropwizard-metrics}</version>
</dependency>
</dependencies> </dependencies>
</project> </project>
...@@ -86,6 +86,7 @@ ...@@ -86,6 +86,7 @@
</dependency> </dependency>
<!-- apache storm core dependencies --> <!-- apache storm core dependencies -->
<!-- exclude the dropwizard metrics as it is an older version that conflicts -->
<dependency> <dependency>
<groupId>org.apache.storm</groupId> <groupId>org.apache.storm</groupId>
<artifactId>storm-core</artifactId> <artifactId>storm-core</artifactId>
...@@ -99,9 +100,38 @@ ...@@ -99,9 +100,38 @@
<groupId>javax.servlet</groupId> <groupId>javax.servlet</groupId>
<artifactId>servlet-api</artifactId> <artifactId>servlet-api</artifactId>
</exclusion> </exclusion>
<exclusion>
<groupId>io.dropwizard.metrics</groupId>
<artifactId>metrics-core</artifactId>
</exclusion>
<exclusion>
<groupId>io.dropwizard.metrics</groupId>
<artifactId>metrics-graphite</artifactId>
</exclusion>
<exclusion>
<groupId>io.dropwizard.metrics</groupId>
<artifactId>metrics-ganglia</artifactId>
</exclusion>
</exclusions> </exclusions>
</dependency> </dependency>
<!-- Update metrics to same version used in Atlas to avoid conflicts -->
<dependency>
<groupId>io.dropwizard.metrics</groupId>
<artifactId>metrics-core</artifactId>
<version>${dropwizard-metrics}</version>
</dependency>
<dependency>
<groupId>io.dropwizard.metrics</groupId>
<artifactId>metrics-graphite</artifactId>
<version>${dropwizard-metrics}</version>
</dependency>
<dependency>
<groupId>io.dropwizard.metrics</groupId>
<artifactId>metrics-ganglia</artifactId>
<version>${dropwizard-metrics}</version>
</dependency>
<!-- Testing dependencies --> <!-- Testing dependencies -->
<dependency> <dependency>
<groupId>org.testng</groupId> <groupId>org.testng</groupId>
......
...@@ -39,6 +39,12 @@ ...@@ -39,6 +39,12 @@
atlas.graph.storage.hostname= atlas.graph.storage.hostname=
atlas.graph.storage.hbase.regions-per-server=1 atlas.graph.storage.hbase.regions-per-server=1
atlas.graph.storage.lock.wait-time=10000 atlas.graph.storage.lock.wait-time=10000
#In order to use Cassandra as a backend, comment out the hbase specific properties above, and uncomment the
#the following properties
#atlas.graph.storage.clustername=
#atlas.graph.storage.port=
</graph.storage.properties> </graph.storage.properties>
<graph.index.backend>solr</graph.index.backend> <graph.index.backend>solr</graph.index.backend>
<graph.index.properties>#Solr <graph.index.properties>#Solr
...@@ -184,6 +190,7 @@ atlas.graph.index.search.solr.wait-searcher=true ...@@ -184,6 +190,7 @@ atlas.graph.index.search.solr.wait-searcher=true
#atlas.graph.index.search.solr.http-urls=http://localhost:8983/solr #atlas.graph.index.search.solr.http-urls=http://localhost:8983/solr
</graph.index.properties> </graph.index.properties>
<cassandra.embedded>false</cassandra.embedded>
<hbase.embedded>true</hbase.embedded> <hbase.embedded>true</hbase.embedded>
<solr.embedded>true</solr.embedded> <solr.embedded>true</solr.embedded>
...@@ -249,6 +256,97 @@ atlas.graph.index.search.solr.wait-searcher=true ...@@ -249,6 +256,97 @@ atlas.graph.index.search.solr.wait-searcher=true
</plugins> </plugins>
</build> </build>
</profile> </profile>
<!-- profile to package and configure embedded cassandra and solr with the distribution -->
<profile>
<id>embedded-cassandra-solr</id>
<activation>
<activeByDefault>false</activeByDefault>
</activation>
<properties>
<graph.storage.backend>embeddedcassandra</graph.storage.backend>
<entity.repository.properties>atlas.EntityAuditRepository.impl=org.apache.atlas.repository.audit.CassandraBasedAuditRepository</entity.repository.properties>
<graph.storage.properties>#Cassandra
atlas.graph.storage.conf-file=${sys:atlas.home}/conf/cassandra.yml
</graph.storage.properties>
<graph.index.properties>#Solr
#Solr cloud mode properties
atlas.graph.index.search.solr.mode=cloud
atlas.graph.index.search.solr.zookeeper-url=localhost:2181
atlas.graph.index.search.solr.zookeeper-connect-timeout=60000
atlas.graph.index.search.solr.zookeeper-session-timeout=60000
atlas.graph.index.search.solr.wait-searcher=true
#Solr http mode properties
#atlas.graph.index.search.solr.mode=http
#atlas.graph.index.search.solr.http-urls=http://localhost:8983/solr
</graph.index.properties>
<cassandra.embedded>true</cassandra.embedded>
<hbase.embedded>false</hbase.embedded>
<solr.embedded>true</solr.embedded>
<solr.dir>${project.build.directory}/solr</solr.dir>
<solr.tar>http://archive.apache.org/dist/lucene/solr/${solr.version}/solr-${solr.version}.tgz</solr.tar>
<solr.folder>solr-${solr.version}</solr.folder>
<zk.dir>${project.build.directory}/zk</zk.dir>
<zk.tar>http://archive.apache.org/dist/zookeeper/zookeeper-${zookeeper.version}/zookeeper-${zookeeper.version}.tar.gz</zk.tar>
<zk.folder>zookeeper-${zookeeper.version}</zk.folder>
</properties>
<build>
<plugins>
<plugin>
<groupId>org.apache.maven.plugins</groupId>
<artifactId>maven-antrun-plugin</artifactId>
<version>1.7</version>
<executions>
<!-- package solr -->
<execution>
<id>solr</id>
<phase>generate-resources</phase>
<goals>
<goal>run</goal>
</goals>
<configuration>
<target name="Download SOLR">
<mkdir dir="${solr.dir}" />
<mkdir dir="${project.basedir}/solr" />
<get src="${solr.tar}" dest="${project.basedir}/solr/${solr.folder}.tgz" usetimestamp="true" verbose="true" skipexisting="true" />
<untar src="${project.basedir}/solr/${solr.folder}.tgz" dest="${project.build.directory}/solr.temp" compression="gzip" />
<copy todir="${solr.dir}">
<fileset dir="${project.build.directory}/solr.temp/${solr.folder}">
<include name="**/*" />
</fileset>
</copy>
</target>
</configuration>
</execution>
<!-- package zookeeper -->
<execution>
<id>zk</id>
<phase>generate-resources</phase>
<goals>
<goal>run</goal>
</goals>
<configuration>
<target name="Download zookeeper">
<mkdir dir="${zk.dir}" />
<mkdir dir="${project.basedir}/zk" />
<get src="${zk.tar}" dest="${project.basedir}/zk/${zk.folder}.tgz" usetimestamp="true" verbose="true" skipexisting="true" />
<untar src="${project.basedir}/zk/${zk.folder}.tgz" dest="${project.build.directory}/zk.temp" compression="gzip" />
<copy todir="${zk.dir}">
<fileset dir="${project.build.directory}/zk.temp/${zk.folder}">
<include name="**/*" />
</fileset>
</copy>
</target>
</configuration>
</execution>
</executions>
</plugin>
</plugins>
</build>
</profile>
</profiles> </profiles>
<build> <build>
......
...@@ -46,6 +46,7 @@ ATLAS_HOME = "ATLAS_HOME_DIR" ...@@ -46,6 +46,7 @@ ATLAS_HOME = "ATLAS_HOME_DIR"
HBASE_CONF_DIR = "HBASE_CONF_DIR" HBASE_CONF_DIR = "HBASE_CONF_DIR"
MANAGE_LOCAL_HBASE = "MANAGE_LOCAL_HBASE" MANAGE_LOCAL_HBASE = "MANAGE_LOCAL_HBASE"
MANAGE_LOCAL_SOLR = "MANAGE_LOCAL_SOLR" MANAGE_LOCAL_SOLR = "MANAGE_LOCAL_SOLR"
MANAGE_EMBEDDED_CASSANDRA = "MANAGE_EMBEDDED_CASSANDRA"
SOLR_BIN = "SOLR_BIN" SOLR_BIN = "SOLR_BIN"
SOLR_CONF = "SOLR_CONF" SOLR_CONF = "SOLR_CONF"
SOLR_PORT = "SOLR_PORT" SOLR_PORT = "SOLR_PORT"
...@@ -56,7 +57,8 @@ SOLR_REPLICATION_FACTOR = "SOLR_REPLICATION_FACTOR" ...@@ -56,7 +57,8 @@ SOLR_REPLICATION_FACTOR = "SOLR_REPLICATION_FACTOR"
DEFAULT_SOLR_REPLICATION_FACTOR = "1" DEFAULT_SOLR_REPLICATION_FACTOR = "1"
ENV_KEYS = ["JAVA_HOME", ATLAS_OPTS, ATLAS_SERVER_OPTS, ATLAS_SERVER_HEAP, ATLAS_LOG, ATLAS_PID, ATLAS_CONF, ENV_KEYS = ["JAVA_HOME", ATLAS_OPTS, ATLAS_SERVER_OPTS, ATLAS_SERVER_HEAP, ATLAS_LOG, ATLAS_PID, ATLAS_CONF,
"ATLASCPPATH", ATLAS_DATA, ATLAS_HOME, ATLAS_WEBAPP, HBASE_CONF_DIR, SOLR_PORT] "ATLASCPPATH", ATLAS_DATA, ATLAS_HOME, ATLAS_WEBAPP, HBASE_CONF_DIR, SOLR_PORT, MANAGE_LOCAL_HBASE,
MANAGE_LOCAL_SOLR, MANAGE_EMBEDDED_CASSANDRA]
IS_WINDOWS = platform.system() == "Windows" IS_WINDOWS = platform.system() == "Windows"
ON_POSIX = 'posix' in sys.builtin_module_names ON_POSIX = 'posix' in sys.builtin_module_names
CONF_FILE="atlas-application.properties" CONF_FILE="atlas-application.properties"
...@@ -99,6 +101,9 @@ def hbaseBinDir(dir): ...@@ -99,6 +101,9 @@ def hbaseBinDir(dir):
def hbaseConfDir(dir): def hbaseConfDir(dir):
return os.environ.get(HBASE_CONF_DIR, os.path.join(dir, "hbase", CONF)) return os.environ.get(HBASE_CONF_DIR, os.path.join(dir, "hbase", CONF))
def zookeeperBinDir(dir):
return os.environ.get(SOLR_BIN, os.path.join(dir, "zk", BIN))
def solrBinDir(dir): def solrBinDir(dir):
return os.environ.get(SOLR_BIN, os.path.join(dir, "solr", BIN)) return os.environ.get(SOLR_BIN, os.path.join(dir, "solr", BIN))
...@@ -430,6 +435,12 @@ def is_solr(confdir): ...@@ -430,6 +435,12 @@ def is_solr(confdir):
confdir = os.path.join(confdir, CONF_FILE) confdir = os.path.join(confdir, CONF_FILE)
return grep(confdir, SOLR_INDEX_CONF_ENTRY) is not None return grep(confdir, SOLR_INDEX_CONF_ENTRY) is not None
def is_cassandra_local(configdir):
if os.environ.get(MANAGE_EMBEDDED_CASSANDRA, "False").lower() == 'false':
return False
return True
def is_solr_local(confdir): def is_solr_local(confdir):
if os.environ.get(MANAGE_LOCAL_SOLR, "False").lower() == 'false': if os.environ.get(MANAGE_LOCAL_SOLR, "False").lower() == 'false':
return False return False
...@@ -499,6 +510,16 @@ def wait_for_startup(confdir, wait): ...@@ -499,6 +510,16 @@ def wait_for_startup(confdir, wait):
sys.stdout.write('\n') sys.stdout.write('\n')
def run_zookeeper(dir, action, logdir = None, wait=True):
zookeeperScript = "zkServer.sh"
if IS_WINDOWS:
zookeeperScript = "zkServer.cmd"
cmd = [os.path.join(dir, zookeeperScript), action, os.path.join(dir, '../../conf/zookeeper/zoo.cfg')]
return runProcess(cmd, logdir, False, wait)
def run_solr(dir, action, zk_url = None, port = None, logdir = None, wait=True): def run_solr(dir, action, zk_url = None, port = None, logdir = None, wait=True):
solrScript = "solr" solrScript = "solr"
...@@ -561,6 +582,48 @@ def configure_hbase(dir): ...@@ -561,6 +582,48 @@ def configure_hbase(dir):
f.close() f.close()
os.remove(tmpl_file) os.remove(tmpl_file)
def configure_zookeeper(dir):
conf_dir = os.path.join(dir, CONF, "zookeeper")
zk_conf_file = "zoo.cfg"
tmpl_file = os.path.join(conf_dir, zk_conf_file + ".template")
conf_file = os.path.join(conf_dir, zk_conf_file)
if os.path.exists(tmpl_file):
debug ("Configuring " + tmpl_file + " to " + conf_file)
f = open(tmpl_file,'r')
template = f.read()
f.close()
config = template.replace("${atlas_home}", dir)
f = open(conf_file,'w')
f.write(config)
f.close()
os.remove(tmpl_file)
def configure_cassandra(dir):
conf_dir = os.path.join(dir, CONF)
cassandra_conf_file = "cassandra.yml"
tmpl_file = os.path.join(conf_dir, cassandra_conf_file + ".template")
conf_file = os.path.join(conf_dir, cassandra_conf_file)
if os.path.exists(tmpl_file):
debug ("Configuring " + tmpl_file + " to " + conf_file)
f = open(tmpl_file,'r')
template = f.read()
f.close()
config = template.replace("${atlas_home}", dir)
f = open(conf_file,'w')
f.write(config)
f.close()
os.remove(tmpl_file)
def server_already_running(pid): def server_already_running(pid):
print "Atlas server is already running under process %s" % pid print "Atlas server is already running under process %s" % pid
sys.exit() sys.exit()
......
...@@ -118,6 +118,14 @@ def main(): ...@@ -118,6 +118,14 @@ def main():
#solr setup #solr setup
if mc.is_solr_local(confdir): if mc.is_solr_local(confdir):
print "configured for local solr." print "configured for local solr."
if mc.is_cassandra_local(confdir):
print "Cassandra embedded configured."
mc.configure_cassandra(atlas_home)
mc.configure_zookeeper(atlas_home)
mc.run_zookeeper(mc.zookeeperBinDir(atlas_home), "start", logdir)
print "zookeeper started."
mc.run_solr(mc.solrBinDir(atlas_home), "start", mc.get_solr_zk_url(confdir), mc.solrPort(), logdir) mc.run_solr(mc.solrBinDir(atlas_home), "start", mc.get_solr_zk_url(confdir), mc.solrPort(), logdir)
print "solr started." print "solr started."
......
...@@ -68,8 +68,12 @@ def main(): ...@@ -68,8 +68,12 @@ def main():
# stop solr # stop solr
if mc.is_solr_local(confdir): if mc.is_solr_local(confdir):
mc.run_solr(mc.solrBinDir(atlas_home), "stop", None, mc.solrPort(), None, True) mc.run_solr(mc.solrBinDir(atlas_home), "stop", None, mc.solrPort(), None, True)
if mc.is_cassandra_local(confdir):
mc.run_zookeeper(mc.zookeeperBinDir(atlas_home), "stop")
# stop hbase # stop hbase
if mc.is_hbase_local(confdir): if mc.is_hbase_local(confdir):
mc.run_hbase_action(mc.hbaseBinDir(atlas_home), "stop", None, None, True) mc.run_hbase_action(mc.hbaseBinDir(atlas_home), "stop", None, None, True)
......
...@@ -24,6 +24,16 @@ ...@@ -24,6 +24,16 @@
#atlas.graphdb.backend=org.apache.atlas.repository.graphdb.janus.AtlasJanusGraphDatabase #atlas.graphdb.backend=org.apache.atlas.repository.graphdb.janus.AtlasJanusGraphDatabase
# Graph Storage # Graph Storage
# Set atlas.graph.storage.backend to the correct value for your desired storage
# backend. Possible values:
#
# hbase
# cassandra
# embeddedcassandra - Should only be set by building Atlas with -Pdist,embedded-cassandra-solr
# berkeleyje
#
# See the configuration documentation for more information about configuring the various storage backends.
#
atlas.graph.storage.backend=${graph.storage.backend} atlas.graph.storage.backend=${graph.storage.backend}
atlas.graph.storage.hbase.table=apache_atlas_janus atlas.graph.storage.hbase.table=apache_atlas_janus
...@@ -52,10 +62,17 @@ ${graph.storage.properties} ...@@ -52,10 +62,17 @@ ${graph.storage.properties}
# #
# Allowed Values: # Allowed Values:
# org.apache.atlas.repository.audit.HBaseBasedAuditRepository - log entity changes to hbase # org.apache.atlas.repository.audit.HBaseBasedAuditRepository - log entity changes to hbase
# org.apache.atlas.repository.audit.CassandraBasedAuditRepository - log entity changes to cassandra
# org.apache.atlas.repository.audit.NoopEntityAuditRepository - disable the audit repository # org.apache.atlas.repository.audit.NoopEntityAuditRepository - disable the audit repository
# #
${entity.repository.properties} ${entity.repository.properties}
# if Cassandra is used as a backend for audit from the above property, uncomment and set the following
# properties appropriately. If using the embedded cassandra profile, these properties can remain
# commented out.
# atlas.EntityAuditRepository.keyspace=atlas_audit
# atlas.EntityAuditRepository.replicationFactor=1
# Graph Search Index # Graph Search Index
atlas.graph.index.search.backend=${graph.index.backend} atlas.graph.index.search.backend=${graph.index.backend}
......
...@@ -60,3 +60,6 @@ export MANAGE_LOCAL_HBASE=${hbase.embedded} ...@@ -60,3 +60,6 @@ export MANAGE_LOCAL_HBASE=${hbase.embedded}
# indicates whether or not a local instance of Solr should be started for Atlas # indicates whether or not a local instance of Solr should be started for Atlas
export MANAGE_LOCAL_SOLR=${solr.embedded} export MANAGE_LOCAL_SOLR=${solr.embedded}
# indicates whether or not cassandra is the embedded backend for Atlas
export MANAGE_EMBEDDED_CASSANDRA=${cassandra.embedded}
\ No newline at end of file
#
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Cassandra storage config YAML
# Used when JanusGraph runs with Cassandra embedded inside its own JVM
#
# NOTE: IT IS NOT RECOMMENDED TO USE EMBEDDED CASSANDRA IN A PRODUCTION
# DEPLOYMENT. CASSANDRA SHOULD BE INSTALLED AND RUN SEPARATELY.
#
# NOTE:
# See http://wiki.apache.org/cassandra/StorageConfiguration for
# full explanations of configuration directives
#
# The name of the cluster. This is mainly used to prevent machines in
# one logical cluster from joining another.
cluster_name: 'JanusGraph'
# This defines the number of tokens randomly assigned to this node on the ring
# The more tokens, relative to other nodes, the larger the proportion of data
# that this node will store. You probably want all nodes to have the same number
# of tokens assuming they have equal hardware capability.
#
# If you leave this unspecified, Cassandra will use the default of 1 token for legacy compatibility,
# and will use the initial_token as described below.
#
# Specifying initial_token will override this setting.
#
# If you already have a cluster with 1 token per node, and wish to migrate to
# multiple tokens per node, see http://wiki.apache.org/cassandra/Operations
num_tokens: 256
# initial_token allows you to specify tokens manually. While you can use # it with
# vnodes (num_tokens > 1, above) -- in which case you should provide a
# comma-separated list -- it's primarily used when adding nodes # to legacy clusters
# that do not have vnodes enabled.
# initial_token:
# May either be "true" or "false" to enable globally, or contain a list
# of data centers to enable per-datacenter.
# hinted_handoff_enabled: DC1,DC2
# See http://wiki.apache.org/cassandra/HintedHandoff
hinted_handoff_enabled: true
# this defines the maximum amount of time a dead host will have hints
# generated. After it has been dead this long, new hints for it will not be
# created until it has been seen alive and gone down again.
max_hint_window_in_ms: 10800000 # 3 hours
# Maximum throttle in KBs per second, per delivery thread. This will be
# reduced proportionally to the number of nodes in the cluster. (If there
# are two nodes in the cluster, each delivery thread will use the maximum
# rate; if there are three, each will throttle to half of the maximum,
# since we expect two nodes to be delivering hints simultaneously.)
hinted_handoff_throttle_in_kb: 1024
# Number of threads with which to deliver hints;
# Consider increasing this number when you have multi-dc deployments, since
# cross-dc handoff tends to be slower
max_hints_delivery_threads: 2
# Maximum throttle in KBs per second, total. This will be
# reduced proportionally to the number of nodes in the cluster.
batchlog_replay_throttle_in_kb: 1024
# Authentication backend, implementing IAuthenticator; used to identify users
# Out of the box, Cassandra provides org.apache.cassandra.auth.{AllowAllAuthenticator,
# PasswordAuthenticator}.
#
# - AllowAllAuthenticator performs no checks - set it to disable authentication.
# - PasswordAuthenticator relies on username/password pairs to authenticate
# users. It keeps usernames and hashed passwords in system_auth.credentials table.
# Please increase system_auth keyspace replication factor if you use this authenticator.
authenticator: AllowAllAuthenticator
# Authorization backend, implementing IAuthorizer; used to limit access/provide permissions
# Out of the box, Cassandra provides org.apache.cassandra.auth.{AllowAllAuthorizer,
# CassandraAuthorizer}.
#
# - AllowAllAuthorizer allows any action to any user - set it to disable authorization.
# - CassandraAuthorizer stores permissions in system_auth.permissions table. Please
# increase system_auth keyspace replication factor if you use this authorizer.
authorizer: AllowAllAuthorizer
# Validity period for permissions cache (fetching permissions can be an
# expensive operation depending on the authorizer, CassandraAuthorizer is
# one example). Defaults to 2000, set to 0 to disable.
# Will be disabled automatically for AllowAllAuthorizer.
permissions_validity_in_ms: 2000
# The partitioner is responsible for distributing rows (by key) across
# nodes in the cluster. Any IPartitioner may be used, including your
# own as long as it is on the classpath. Out of the box, Cassandra
# provides org.apache.cassandra.dht.{Murmur3Partitioner, RandomPartitioner
# ByteOrderedPartitioner, OrderPreservingPartitioner (deprecated)}.
#
# - RandomPartitioner distributes rows across the cluster evenly by md5.
# This is the default prior to 1.2 and is retained for compatibility.
# - Murmur3Partitioner is similar to RandomPartioner but uses Murmur3_128
# Hash Function instead of md5. When in doubt, this is the best option.
# - ByteOrderedPartitioner orders rows lexically by key bytes. BOP allows
# scanning rows in key order, but the ordering can generate hot spots
# for sequential insertion workloads.
# - OrderPreservingPartitioner is an obsolete form of BOP, that stores
# - keys in a less-efficient format and only works with keys that are
# UTF8-encoded Strings.
# - CollatingOPP collates according to EN,US rules rather than lexical byte
# ordering. Use this as an example if you need custom collation.
#
# See http://wiki.apache.org/cassandra/Operations for more on
# partitioners and token selection.
partitioner: org.apache.cassandra.dht.Murmur3Partitioner
# Directories where Cassandra should store data on disk. Cassandra
# will spread data evenly across them, subject to the granularity of
# the configured compaction strategy.
data_file_directories:
- ${atlas_home}/data/cassandra/data
# commit log
commitlog_directory: ${atlas_home}/data/cassandra/commitlog
# policy for data disk failures:
# stop: shut down gossip and Thrift, leaving the node effectively dead, but
# can still be inspected via JMX.
# best_effort: stop using the failed disk and respond to requests based on
# remaining available sstables. This means you WILL see obsolete
# data at CL.ONE!
# ignore: ignore fatal errors and let requests fail, as in pre-1.2 Cassandra
disk_failure_policy: stop
# policy for commit disk failures:
# stop: shut down gossip and Thrift, leaving the node effectively dead, but
# can still be inspected via JMX.
# stop_commit: shutdown the commit log, letting writes collect but
# continuing to service reads, as in pre-2.0.5 Cassandra
# ignore: ignore fatal errors and let the batches fail
commit_failure_policy: stop
# Maximum size of the key cache in memory.
#
# Each key cache hit saves 1 seek and each row cache hit saves 2 seeks at the
# minimum, sometimes more. The key cache is fairly tiny for the amount of
# time it saves, so it's worthwhile to use it at large numbers.
# The row cache saves even more time, but must contain the entire row,
# so it is extremely space-intensive. It's best to only use the
# row cache if you have hot rows or static rows.
#
# NOTE: if you reduce the size, you may not get you hottest keys loaded on startup.
#
# Default value is empty to make it "auto" (min(5% of Heap (in MB), 100MB)). Set to 0 to disable key cache.
key_cache_size_in_mb:
# Duration in seconds after which Cassandra should
# save the key cache. Caches are saved to saved_caches_directory as
# specified in this configuration file.
#
# Saved caches greatly improve cold-start speeds, and is relatively cheap in
# terms of I/O for the key cache. Row cache saving is much more expensive and
# has limited use.
#
# Default is 14400 or 4 hours.
key_cache_save_period: 14400
# Number of keys from the key cache to save
# Disabled by default, meaning all keys are going to be saved
# key_cache_keys_to_save: 100
# Maximum size of the row cache in memory.
# NOTE: if you reduce the size, you may not get you hottest keys loaded on startup.
#
# Default value is 0, to disable row caching.
row_cache_size_in_mb: 0
# Duration in seconds after which Cassandra should
# safe the row cache. Caches are saved to saved_caches_directory as specified
# in this configuration file.
#
# Saved caches greatly improve cold-start speeds, and is relatively cheap in
# terms of I/O for the key cache. Row cache saving is much more expensive and
# has limited use.
#
# Default is 0 to disable saving the row cache.
row_cache_save_period: 0
# Number of keys from the row cache to save
# Disabled by default, meaning all keys are going to be saved
# row_cache_keys_to_save: 100
# The off-heap memory allocator. Affects storage engine metadata as
# well as caches. Experiments show that JEMAlloc saves some memory
# than the native GCC allocator (i.e., JEMalloc is more
# fragmentation-resistant).
#
# Supported values are: NativeAllocator, JEMallocAllocator
#
# If you intend to use JEMallocAllocator you have to install JEMalloc as library and
# modify cassandra-env.sh as directed in the file.
#
# Defaults to NativeAllocator
# memory_allocator: NativeAllocator
# saved caches
saved_caches_directory: ${atlas_home}/data/cassandra/saved_caches
# commitlog_sync may be either "periodic" or "batch."
# When in batch mode, Cassandra won't ack writes until the commit log
# has been fsynced to disk. It will wait up to
# commitlog_sync_batch_window_in_ms milliseconds for other writes, before
# performing the sync.
#
# commitlog_sync: batch
# commitlog_sync_batch_window_in_ms: 50
#
# the other option is "periodic" where writes may be acked immediately
# and the CommitLog is simply synced every commitlog_sync_period_in_ms
# milliseconds. By default this allows 1024*(CPU cores) pending
# entries on the commitlog queue. If you are writing very large blobs,
# you should reduce that; 16*cores works reasonably well for 1MB blobs.
# It should be at least as large as the concurrent_writes setting.
commitlog_sync: periodic
commitlog_sync_period_in_ms: 10000
# commitlog_periodic_queue_size:
# The size of the individual commitlog file segments. A commitlog
# segment may be archived, deleted, or recycled once all the data
# in it (potentially from each columnfamily in the system) has been
# flushed to sstables.
#
# The default size is 32, which is almost always fine, but if you are
# archiving commitlog segments (see commitlog_archiving.properties),
# then you probably want a finer granularity of archiving; 8 or 16 MB
# is reasonable.
commitlog_segment_size_in_mb: 32
# any class that implements the SeedProvider interface and has a
# constructor that takes a Map<String, String> of parameters will do.
seed_provider:
# Addresses of hosts that are deemed contact points.
# Cassandra nodes use this list of hosts to find each other and learn
# the topology of the ring. You must change this if you are running
# multiple nodes!
- class_name: org.apache.cassandra.locator.SimpleSeedProvider
parameters:
# seeds is actually a comma-delimited list of addresses.
# Ex: "<ip1>,<ip2>,<ip3>"
- seeds: "127.0.0.1"
# For workloads with more data than can fit in memory, Cassandra's
# bottleneck will be reads that need to fetch data from
# disk. "concurrent_reads" should be set to (16 * number_of_drives) in
# order to allow the operations to enqueue low enough in the stack
# that the OS and drives can reorder them.
#
# On the other hand, since writes are almost never IO bound, the ideal
# number of "concurrent_writes" is dependent on the number of cores in
# your system; (8 * number_of_cores) is a good rule of thumb.
concurrent_reads: 32
concurrent_writes: 32
# Total memory to use for sstable-reading buffers. Defaults to
# the smaller of 1/4 of heap or 512MB.
# file_cache_size_in_mb: 512
# Total memory to use for memtables. Cassandra will flush the largest
# memtable when this much memory is used.
# If omitted, Cassandra will set it to 1/4 of the heap.
# memtable_total_space_in_mb: 2048
# Total space to use for commitlogs. Since commitlog segments are
# mmapped, and hence use up address space, the default size is 32
# on 32-bit JVMs, and 1024 on 64-bit JVMs.
#
# If space gets above this value (it will round up to the next nearest
# segment multiple), Cassandra will flush every dirty CF in the oldest
# segment and remove it. So a small total commitlog space will tend
# to cause more flush activity on less-active columnfamilies.
# commitlog_total_space_in_mb: 4096
# This sets the amount of memtable flush writer threads. These will
# be blocked by disk io, and each one will hold a memtable in memory
# while blocked. If you have a large heap and many data directories,
# you can increase this value for better flush performance.
# By default this will be set to the amount of data directories defined.
#memtable_flush_writers: 1
# Whether to, when doing sequential writing, fsync() at intervals in
# order to force the operating system to flush the dirty
# buffers. Enable this to avoid sudden dirty buffer flushing from
# impacting read latencies. Almost always a good idea on SSDs; not
# necessarily on platters.
trickle_fsync: false
trickle_fsync_interval_in_kb: 10240
# TCP port, for commands and data
storage_port: 7000
# SSL port, for encrypted communication. Unused unless enabled in
# encryption_options
ssl_storage_port: 7001
# Address to bind to and tell other Cassandra nodes to connect to. You
# _must_ change this if you want multiple nodes to be able to
# communicate!
#
# Leaving it blank leaves it up to InetAddress.getLocalHost(). This
# will always do the Right Thing _if_ the node is properly configured
# (hostname, name resolution, etc), and the Right Thing is to use the
# address associated with the hostname (it might not be).
#
# Setting this to 0.0.0.0 is always wrong.
listen_address: localhost
# Address to broadcast to other Cassandra nodes
# Leaving this blank will set it to the same value as listen_address
# broadcast_address: 1.2.3.4
# Internode authentication backend, implementing IInternodeAuthenticator;
# used to allow/disallow connections from peer nodes.
# internode_authenticator: org.apache.cassandra.auth.AllowAllInternodeAuthenticator
# Whether to start the native transport server.
# Please note that the address on which the native transport is bound is the
# same as the rpc_address. The port however is different and specified below.
start_native_transport: true
# port for the CQL native transport to listen for clients on
native_transport_port: 9042
# The maximum threads for handling requests when the native transport is used.
# This is similar to rpc_max_threads though the default differs slightly (and
# there is no native_transport_min_threads, idle threads will always be stopped
# after 30 seconds).
# native_transport_max_threads: 128
#
# The maximum size of allowed frame. Frame (requests) larger than this will
# be rejected as invalid. The default is 256MB.
# native_transport_max_frame_size_in_mb: 256
# Whether to start the thrift rpc server.
start_rpc: true
# The address to bind the Thrift RPC service and native transport
# server -- clients connect here.
#
# Leaving this blank has the same effect it does for ListenAddress,
# (i.e. it will be based on the configured hostname of the node).
#
# Note that unlike ListenAddress above, it is allowed to specify 0.0.0.0
# here if you want to listen on all interfaces, but that will break clients
# that rely on node auto-discovery.
rpc_address: localhost
# port for Thrift to listen for clients on
rpc_port: 9160
# enable or disable keepalive on rpc connections
rpc_keepalive: true
# Cassandra provides two out-of-the-box options for the RPC Server:
#
# sync -> One thread per thrift connection. For a very large number of clients, memory
# will be your limiting factor. On a 64 bit JVM, 180KB is the minimum stack size
# per thread, and that will correspond to your use of virtual memory (but physical memory
# may be limited depending on use of stack space).
#
# hsha -> Stands for "half synchronous, half asynchronous." All thrift clients are handled
# asynchronously using a small number of threads that does not vary with the amount
# of thrift clients (and thus scales well to many clients). The rpc requests are still
# synchronous (one thread per active request).
#
# The default is sync because on Windows hsha is about 30% slower. On Linux,
# sync/hsha performance is about the same, with hsha of course using less memory.
#
# Alternatively, can provide your own RPC server by providing the fully-qualified class name
# of an o.a.c.t.TServerFactory that can create an instance of it.
rpc_server_type: sync
# Uncomment rpc_min|max_thread to set request pool size limits.
#
# Regardless of your choice of RPC server (see above), the number of maximum requests in the
# RPC thread pool dictates how many concurrent requests are possible (but if you are using the sync
# RPC server, it also dictates the number of clients that can be connected at all).
#
# The default is unlimited and thus provides no protection against clients overwhelming the server. You are
# encouraged to set a maximum that makes sense for you in production, but do keep in mind that
# rpc_max_threads represents the maximum number of client requests this server may execute concurrently.
#
# rpc_min_threads: 16
# rpc_max_threads: 2048
# uncomment to set socket buffer sizes on rpc connections
# rpc_send_buff_size_in_bytes:
# rpc_recv_buff_size_in_bytes:
# Uncomment to set socket buffer size for internode communication
# Note that when setting this, the buffer size is limited by net.core.wmem_max
# and when not setting it it is defined by net.ipv4.tcp_wmem
# See:
# /proc/sys/net/core/wmem_max
# /proc/sys/net/core/rmem_max
# /proc/sys/net/ipv4/tcp_wmem
# /proc/sys/net/ipv4/tcp_wmem
# and: man tcp
# internode_send_buff_size_in_bytes:
# internode_recv_buff_size_in_bytes:
# Frame size for thrift (maximum message length).
thrift_framed_transport_size_in_mb: 15
# Set to true to have Cassandra create a hard link to each sstable
# flushed or streamed locally in a backups/ subdirectory of the
# keyspace data. Removing these links is the operator's
# responsibility.
incremental_backups: false
# Whether or not to take a snapshot before each compaction. Be
# careful using this option, since Cassandra won't clean up the
# snapshots for you. Mostly useful if you're paranoid when there
# is a data format change.
snapshot_before_compaction: false
# Whether or not a snapshot is taken of the data before keyspace truncation
# or dropping of column families. The STRONGLY advised default of true
# should be used to provide data safety. If you set this flag to false, you will
# lose data on truncation or drop.
auto_snapshot: true
# When executing a scan, within or across a partition, we need to keep the
# tombstones seen in memory so we can return them to the coordinator, which
# will use them to make sure other replicas also know about the deleted rows.
# With workloads that generate a lot of tombstones, this can cause performance
# problems and even exaust the server heap.
# (http://www.datastax.com/dev/blog/cassandra-anti-patterns-queues-and-queue-like-datasets)
# Adjust the thresholds here if you understand the dangers and want to
# scan more tombstones anyway. These thresholds may also be adjusted at runtime
# using the StorageService mbean.
tombstone_warn_threshold: 1000
tombstone_failure_threshold: 100000
# Add column indexes to a row after its contents reach this size.
# Increase if your column values are large, or if you have a very large
# number of columns. The competing causes are, Cassandra has to
# deserialize this much of the row to read a single column, so you want
# it to be small - at least if you do many partial-row reads - but all
# the index data is read for each access, so you don't want to generate
# that wastefully either.
column_index_size_in_kb: 64
# Number of simultaneous compactions to allow, NOT including
# validation "compactions" for anti-entropy repair. Simultaneous
# compactions can help preserve read performance in a mixed read/write
# workload, by mitigating the tendency of small sstables to accumulate
# during a single long running compactions. The default is usually
# fine and if you experience problems with compaction running too
# slowly or too fast, you should look at
# compaction_throughput_mb_per_sec first.
#
# concurrent_compactors defaults to the number of cores.
# Uncomment to make compaction mono-threaded, the pre-0.8 default.
#concurrent_compactors: 1
# Throttles compaction to the given total throughput across the entire
# system. The faster you insert data, the faster you need to compact in
# order to keep the sstable count down, but in general, setting this to
# 16 to 32 times the rate you are inserting data is more than sufficient.
# Setting this to 0 disables throttling. Note that this account for all types
# of compaction, including validation compaction.
compaction_throughput_mb_per_sec: 16
# Throttles all outbound streaming file transfers on this node to the
# given total throughput in Mbps. This is necessary because Cassandra does
# mostly sequential IO when streaming data during bootstrap or repair, which
# can lead to saturating the network connection and degrading rpc performance.
# When unset, the default is 200 Mbps or 25 MB/s.
# stream_throughput_outbound_megabits_per_sec: 200
# How long the coordinator should wait for read operations to complete
read_request_timeout_in_ms: 5000
# How long the coordinator should wait for seq or index scans to complete
range_request_timeout_in_ms: 10000
# How long the coordinator should wait for writes to complete
write_request_timeout_in_ms: 2000
# How long a coordinator should continue to retry a CAS operation
# that contends with other proposals for the same row
cas_contention_timeout_in_ms: 1000
# How long the coordinator should wait for truncates to complete
# (This can be much longer, because unless auto_snapshot is disabled
# we need to flush first so we can snapshot before removing the data.)
truncate_request_timeout_in_ms: 60000
# The default timeout for other, miscellaneous operations
request_timeout_in_ms: 10000
# Enable operation timeout information exchange between nodes to accurately
# measure request timeouts. If disabled, replicas will assume that requests
# were forwarded to them instantly by the coordinator, which means that
# under overload conditions we will waste that much extra time processing
# already-timed-out requests.
#
# Warning: before enabling this property make sure to ntp is installed
# and the times are synchronized between the nodes.
cross_node_timeout: false
# Enable socket timeout for streaming operation.
# When a timeout occurs during streaming, streaming is retried from the start
# of the current file. This _can_ involve re-streaming an important amount of
# data, so you should avoid setting the value too low.
# Default value is 0, which never timeout streams.
# streaming_socket_timeout_in_ms: 0
# phi value that must be reached for a host to be marked down.
# most users should never need to adjust this.
# phi_convict_threshold: 8
# endpoint_snitch -- Set this to a class that implements
# IEndpointSnitch. The snitch has two functions:
# - it teaches Cassandra enough about your network topology to route
# requests efficiently
# - it allows Cassandra to spread replicas around your cluster to avoid
# correlated failures. It does this by grouping machines into
# "datacenters" and "racks." Cassandra will do its best not to have
# more than one replica on the same "rack" (which may not actually
# be a physical location)
#
# IF YOU CHANGE THE SNITCH AFTER DATA IS INSERTED INTO THE CLUSTER,
# YOU MUST RUN A FULL REPAIR, SINCE THE SNITCH AFFECTS WHERE REPLICAS
# ARE PLACED.
#
# Out of the box, Cassandra provides
# - SimpleSnitch:
# Treats Strategy order as proximity. This improves cache locality
# when disabling read repair, which can further improve throughput.
# Only appropriate for single-datacenter deployments.
# - PropertyFileSnitch:
# Proximity is determined by rack and data center, which are
# explicitly configured in cassandra-topology.properties.
# - GossipingPropertyFileSnitch
# The rack and datacenter for the local node are defined in
# cassandra-rackdc.properties and propagated to other nodes via gossip. If
# cassandra-topology.properties exists, it is used as a fallback, allowing
# migration from the PropertyFileSnitch.
# - RackInferringSnitch:
# Proximity is determined by rack and data center, which are
# assumed to correspond to the 3rd and 2nd octet of each node's
# IP address, respectively. Unless this happens to match your
# deployment conventions (as it did Facebook's), this is best used
# as an example of writing a custom Snitch class.
# - Ec2Snitch:
# Appropriate for EC2 deployments in a single Region. Loads Region
# and Availability Zone information from the EC2 API. The Region is
# treated as the datacenter, and the Availability Zone as the rack.
# Only private IPs are used, so this will not work across multiple
# Regions.
# - Ec2MultiRegionSnitch:
# Uses public IPs as broadcast_address to allow cross-region
# connectivity. (Thus, you should set seed addresses to the public
# IP as well.) You will need to open the storage_port or
# ssl_storage_port on the public IP firewall. (For intra-Region
# traffic, Cassandra will switch to the private IP after
# establishing a connection.)
#
# You can use a custom Snitch by setting this to the full class name
# of the snitch, which will be assumed to be on your classpath.
endpoint_snitch: SimpleSnitch
# controls how often to perform the more expensive part of host score
# calculation
dynamic_snitch_update_interval_in_ms: 100
# controls how often to reset all host scores, allowing a bad host to
# possibly recover
dynamic_snitch_reset_interval_in_ms: 600000
# if set greater than zero and read_repair_chance is < 1.0, this will allow
# 'pinning' of replicas to hosts in order to increase cache capacity.
# The badness threshold will control how much worse the pinned host has to be
# before the dynamic snitch will prefer other replicas over it. This is
# expressed as a double which represents a percentage. Thus, a value of
# 0.2 means Cassandra would continue to prefer the static snitch values
# until the pinned host was 20% worse than the fastest.
dynamic_snitch_badness_threshold: 0.1
# request_scheduler -- Set this to a class that implements
# RequestScheduler, which will schedule incoming client requests
# according to the specific policy. This is useful for multi-tenancy
# with a single Cassandra cluster.
# NOTE: This is specifically for requests from the client and does
# not affect inter node communication.
# org.apache.cassandra.scheduler.NoScheduler - No scheduling takes place
# org.apache.cassandra.scheduler.RoundRobinScheduler - Round robin of
# client requests to a node with a separate queue for each
# request_scheduler_id. The scheduler is further customized by
# request_scheduler_options as described below.
request_scheduler: org.apache.cassandra.scheduler.NoScheduler
# Scheduler Options vary based on the type of scheduler
# NoScheduler - Has no options
# RoundRobin
# - throttle_limit -- The throttle_limit is the number of in-flight
# requests per client. Requests beyond
# that limit are queued up until
# running requests can complete.
# The value of 80 here is twice the number of
# concurrent_reads + concurrent_writes.
# - default_weight -- default_weight is optional and allows for
# overriding the default which is 1.
# - weights -- Weights are optional and will default to 1 or the
# overridden default_weight. The weight translates into how
# many requests are handled during each turn of the
# RoundRobin, based on the scheduler id.
#
# request_scheduler_options:
# throttle_limit: 80
# default_weight: 5
# weights:
# Keyspace1: 1
# Keyspace2: 5
# request_scheduler_id -- An identifier based on which to perform
# the request scheduling. Currently the only valid option is keyspace.
# request_scheduler_id: keyspace
# Enable or disable inter-node encryption
# Default settings are TLS v1, RSA 1024-bit keys (it is imperative that
# users generate their own keys) TLS_RSA_WITH_AES_128_CBC_SHA as the cipher
# suite for authentication, key exchange and encryption of the actual data transfers.
# NOTE: No custom encryption options are enabled at the moment
# The available internode options are : all, none, dc, rack
#
# If set to dc cassandra will encrypt the traffic between the DCs
# If set to rack cassandra will encrypt the traffic between the racks
#
# The passwords used in these options must match the passwords used when generating
# the keystore and truststore. For instructions on generating these files, see:
# http://download.oracle.com/javase/6/docs/technotes/guides/security/jsse/JSSERefGuide.html#CreateKeystore
#
server_encryption_options:
internode_encryption: none
keystore: conf/.keystore
keystore_password: cassandra
truststore: conf/.truststore
truststore_password: cassandra
# More advanced defaults below:
# protocol: TLS
# algorithm: SunX509
# store_type: JKS
# cipher_suites: [TLS_RSA_WITH_AES_128_CBC_SHA,TLS_RSA_WITH_AES_256_CBC_SHA]
# require_client_auth: false
# enable or disable client/server encryption.
client_encryption_options:
enabled: false
keystore: conf/.keystore
keystore_password: cassandra
# require_client_auth: false
# Set trustore and truststore_password if require_client_auth is true
# truststore: conf/.truststore
# truststore_password: cassandra
# More advanced defaults below:
# protocol: TLS
# algorithm: SunX509
# store_type: JKS
# cipher_suites: [TLS_RSA_WITH_AES_128_CBC_SHA,TLS_RSA_WITH_AES_256_CBC_SHA]
# internode_compression controls whether traffic between nodes is
# compressed.
# can be: all - all traffic is compressed
# dc - traffic between different datacenters is compressed
# none - nothing is compressed.
internode_compression: all
# Enable or disable tcp_nodelay for inter-dc communication.
# Disabling it will result in larger (but fewer) network packets being sent,
# reducing overhead from the TCP protocol itself, at the cost of increasing
# latency if you block for cross-datacenter responses.
inter_dc_tcp_nodelay: false
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Define some default values that can be overridden by system properties
zookeeper.root.logger=INFO, CONSOLE
zookeeper.console.threshold=INFO
zookeeper.log.dir=/CHANGE_ME/logs
zookeeper.log.file=zookeeper.log
zookeeper.log.threshold=DEBUG
zookeeper.tracelog.dir=.
zookeeper.tracelog.file=zookeeper_trace.log
#
# ZooKeeper Logging Configuration
#
# Format is "<default threshold> (, <appender>)+
# DEFAULT: console appender only
log4j.rootLogger=${zookeeper.root.logger}
# Example with rolling log file
#log4j.rootLogger=DEBUG, CONSOLE, ROLLINGFILE
# Example with rolling log file and tracing
#log4j.rootLogger=TRACE, CONSOLE, ROLLINGFILE, TRACEFILE
#
# Log INFO level and above messages to the console
#
log4j.appender.CONSOLE=org.apache.log4j.ConsoleAppender
log4j.appender.CONSOLE.Threshold=${zookeeper.console.threshold}
log4j.appender.CONSOLE.layout=org.apache.log4j.PatternLayout
log4j.appender.CONSOLE.layout.ConversionPattern=%d{ISO8601} [myid:%X{myid}] - %-5p [%t:%C{1}@%L] - %m%n
#
# Add ROLLINGFILE to rootLogger to get log file output
# Log DEBUG level and above messages to a log file
log4j.appender.ROLLINGFILE=org.apache.log4j.RollingFileAppender
log4j.appender.ROLLINGFILE.Threshold=${zookeeper.log.threshold}
log4j.appender.ROLLINGFILE.File=${zookeeper.log.dir}/${zookeeper.log.file}
# Max log file size of 10MB
log4j.appender.ROLLINGFILE.MaxFileSize=10MB
# uncomment the next line to limit number of backup files
#log4j.appender.ROLLINGFILE.MaxBackupIndex=10
log4j.appender.ROLLINGFILE.layout=org.apache.log4j.PatternLayout
log4j.appender.ROLLINGFILE.layout.ConversionPattern=%d{ISO8601} [myid:%X{myid}] - %-5p [%t:%C{1}@%L] - %m%n
#
# Add TRACEFILE to rootLogger to get log file output
# Log DEBUG level and above messages to a log file
log4j.appender.TRACEFILE=org.apache.log4j.FileAppender
log4j.appender.TRACEFILE.Threshold=TRACE
log4j.appender.TRACEFILE.File=${zookeeper.tracelog.dir}/${zookeeper.tracelog.file}
log4j.appender.TRACEFILE.layout=org.apache.log4j.PatternLayout
### Notice we are including log4j's NDC here (%x)
log4j.appender.TRACEFILE.layout.ConversionPattern=%d{ISO8601} [myid:%X{myid}] - %-5p [%t:%C{1}@%L][%x] - %m%n
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# The number of milliseconds of each tick
tickTime=2000
# The number of ticks that the initial
# synchronization phase can take
initLimit=10
# The number of ticks that can pass between
# sending a request and getting an acknowledgement
syncLimit=5
# the directory where the snapshot is stored.
# do not use /tmp for storage, /tmp here is just
# example sakes.
dataDir=${atlas_home}/data/zookeeper/data
# the port at which the clients will connect
clientPort=2181
# the maximum number of client connections.
# increase this if you need to handle more clients
#maxClientCnxns=60
#
# Be sure to read the maintenance section of the
# administrator guide before turning on autopurge.
#
# http://zookeeper.apache.org/doc/current/zookeeperAdmin.html#sc_maintenance
#
# The number of snapshots to retain in dataDir
#autopurge.snapRetainCount=3
# Purge task interval in hours
# Set to "0" to disable auto purge feature
#autopurge.purgeInterval=1
...@@ -80,6 +80,13 @@ ...@@ -80,6 +80,13 @@
</fileSet> </fileSet>
<fileSet> <fileSet>
<directory>target/zk</directory>
<outputDirectory>zk</outputDirectory>
<fileMode>0755</fileMode>
<directoryMode>0755</directoryMode>
</fileSet>
<fileSet>
<directory>../logs</directory> <directory>../logs</directory>
<outputDirectory>logs</outputDirectory> <outputDirectory>logs</outputDirectory>
<directoryMode>0777</directoryMode> <directoryMode>0777</directoryMode>
......
...@@ -34,6 +34,15 @@ mvn clean -DskipTests package -Pdist,embedded-hbase-solr</verbatim> ...@@ -34,6 +34,15 @@ mvn clean -DskipTests package -Pdist,embedded-hbase-solr</verbatim>
Using the embedded-hbase-solr profile will configure Atlas so that an HBase instance and a Solr instance will be started and stopped along with the Atlas server by default. Using the embedded-hbase-solr profile will configure Atlas so that an HBase instance and a Solr instance will be started and stopped along with the Atlas server by default.
---+++ Packaging Atlas with Embedded Cassandra & Solr
To create Apache Atlas package that includes Cassandra and Solr, build with the embedded-cassandra-solr profile as shown below:
<verbatim>
mvn clean package -Pdist,embedded-cassandra-solr</verbatim>
Using the embedded-cassandra-solr profile will configure Atlas so that an embedded Cassandra instance and a Solr instance will be started and stopped along with the Atlas server by default.
NOTE: This distribution profile is only intended to be used for single node development not in production.
---+++ Apache Atlas Package ---+++ Apache Atlas Package
Build will create following files, which are used to install Apache Atlas. Build will create following files, which are used to install Apache Atlas.
......
...@@ -81,6 +81,40 @@ ...@@ -81,6 +81,40 @@
<groupId>org.janusgraph</groupId> <groupId>org.janusgraph</groupId>
<artifactId>janusgraph-berkeleyje</artifactId> <artifactId>janusgraph-berkeleyje</artifactId>
<version>${janus.version}</version> <version>${janus.version}</version>
<exclusions>
<exclusion>
<groupId>ch.qos.logback</groupId>
<artifactId>*</artifactId>
</exclusion>
</exclusions>
</dependency>
<dependency>
<groupId>org.janusgraph</groupId>
<artifactId>janusgraph-cassandra</artifactId>
<version>${janus.version}</version>
<exclusions>
<exclusion>
<groupId>ch.qos.logback</groupId>
<artifactId>*</artifactId>
</exclusion>
<exclusion>
<groupId>com.codahale.metrics</groupId>
<artifactId>metrics-core</artifactId>
</exclusion>
</exclusions>
</dependency>
<dependency>
<groupId>org.janusgraph</groupId>
<artifactId>janusgraph-cql</artifactId>
<version>${janus.version}</version>
<exclusions>
<exclusion>
<groupId>ch.qos.logback</groupId>
<artifactId>*</artifactId>
</exclusion>
</exclusions>
</dependency> </dependency>
<dependency> <dependency>
......
...@@ -581,6 +581,7 @@ ...@@ -581,6 +581,7 @@
<antlr4.plugin.version>4.5</antlr4.plugin.version> <antlr4.plugin.version>4.5</antlr4.plugin.version>
<maven-site-plugin.version>3.7</maven-site-plugin.version> <maven-site-plugin.version>3.7</maven-site-plugin.version>
<doxia.version>1.8</doxia.version> <doxia.version>1.8</doxia.version>
<dropwizard-metrics>3.2.2</dropwizard-metrics>
<PermGen>64m</PermGen> <PermGen>64m</PermGen>
<MaxPermGen>512m</MaxPermGen> <MaxPermGen>512m</MaxPermGen>
...@@ -671,7 +672,7 @@ ...@@ -671,7 +672,7 @@
<graphdb.backend.impl>org.apache.atlas.repository.graphdb.janus.AtlasJanusGraphDatabase</graphdb.backend.impl> <graphdb.backend.impl>org.apache.atlas.repository.graphdb.janus.AtlasJanusGraphDatabase</graphdb.backend.impl>
<graph.index.backend>solr</graph.index.backend> <graph.index.backend>solr</graph.index.backend>
<tests.solr.embedded>true</tests.solr.embedded> <tests.solr.embedded>true</tests.solr.embedded>
<distro.exclude.packages>WEB-INF/lib/je-*.jar,WEB-INF/lib/elasticsearch-*.jar,WEB-INF/lib/solr-test-framework-*.jar, WEB-INF/lib/jts-*.jar</distro.exclude.packages> <distro.exclude.packages>WEB-INF/lib/je-*.jar,WEB-INF/lib/elasticsearch-*.jar,WEB-INF/lib/solr-test-framework-*.jar, WEB-INF/lib/jts-*.jar,WEB-INF/lib/logback-*.jar</distro.exclude.packages>
</properties> </properties>
</profile> </profile>
...@@ -691,7 +692,7 @@ ...@@ -691,7 +692,7 @@
<graphdb.backend.impl>org.apache.atlas.repository.graphdb.janus.AtlasJanusGraphDatabase</graphdb.backend.impl> <graphdb.backend.impl>org.apache.atlas.repository.graphdb.janus.AtlasJanusGraphDatabase</graphdb.backend.impl>
<graph.index.backend>solr</graph.index.backend> <graph.index.backend>solr</graph.index.backend>
<tests.solr.embedded>true</tests.solr.embedded> <tests.solr.embedded>true</tests.solr.embedded>
<distro.exclude.packages>WEB-INF/lib/je-*.jar,WEB-INF/lib/elasticsearch-*.jar,WEB-INF/lib/solr-test-framework-*.jar, WEB-INF/lib/jts-*.jar</distro.exclude.packages> <distro.exclude.packages>WEB-INF/lib/je-*.jar,WEB-INF/lib/elasticsearch-*.jar,WEB-INF/lib/solr-test-framework-*.jar, WEB-INF/lib/jts-*.jar,WEB-INF/lib/logback-*.jar</distro.exclude.packages>
</properties> </properties>
</profile> </profile>
......
...@@ -89,6 +89,19 @@ ...@@ -89,6 +89,19 @@
<artifactId>mockito-all</artifactId> <artifactId>mockito-all</artifactId>
</dependency> </dependency>
<dependency>
<groupId>org.apache.cassandra</groupId>
<artifactId>cassandra-all</artifactId>
<exclusions>
<exclusion>
<groupId>ch.qos.logback</groupId>
<artifactId>*</artifactId>
</exclusion>
</exclusions>
<version>2.1.8</version>
</dependency>
<!-- Test dependencies --> <!-- Test dependencies -->
<dependency> <dependency>
...@@ -156,6 +169,39 @@ ...@@ -156,6 +169,39 @@
<artifactId>guice-multibindings</artifactId> <artifactId>guice-multibindings</artifactId>
<version>4.1.0</version> <version>4.1.0</version>
<scope>test</scope> <scope>test</scope>
<exclusions>
<exclusion>
<groupId>ch.qos.logback</groupId>
<artifactId>*</artifactId>
</exclusion>
</exclusions>
</dependency>
<dependency>
<groupId>com.datastax.cassandra</groupId>
<artifactId>cassandra-driver-core</artifactId>
<version>3.1.4</version>
<exclusions>
<exclusion>
<groupId>ch.qos.logback</groupId>
<artifactId>*</artifactId>
</exclusion>
<exclusion>
<groupId>io.dropwizard.metrics</groupId>
<artifactId>metrics-core</artifactId>
</exclusion>
</exclusions>
</dependency>
<dependency>
<groupId>org.cassandraunit</groupId>
<artifactId>cassandra-unit</artifactId>
<version>2.0.2.2</version>
<scope>test</scope>
<exclusions>
<exclusion>
<groupId>ch.qos.logback</groupId>
<artifactId>*</artifactId>
</exclusion>
</exclusions>
</dependency> </dependency>
</dependencies> </dependencies>
......
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
* <p/>
* http://www.apache.org/licenses/LICENSE-2.0
* <p/>
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.atlas.repository.audit;
import com.google.common.annotations.VisibleForTesting;
import org.apache.atlas.ApplicationProperties;
import org.apache.atlas.AtlasException;
import org.apache.atlas.EntityAuditEvent;
import org.apache.atlas.exception.AtlasBaseException;
import org.apache.atlas.listener.ActiveStateChangeHandler;
import org.apache.atlas.model.audit.EntityAuditEventV2;
import org.apache.atlas.service.Service;
import org.apache.commons.collections.CollectionUtils;
import org.apache.commons.configuration.Configuration;
import org.apache.hadoop.hbase.util.Bytes;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import java.util.Arrays;
import java.util.HashMap;
import java.util.List;
import java.util.Map;
/**
* This abstract base class should be used when adding support for an audit storage backend.
*/
public abstract class AbstractStorageBasedAuditRepository implements Service, EntityAuditRepository, ActiveStateChangeHandler {
private static final Logger LOG = LoggerFactory.getLogger(HBaseBasedAuditRepository.class);
private static final String AUDIT_REPOSITORY_MAX_SIZE_PROPERTY = "atlas.hbase.client.keyvalue.maxsize";
private static final String AUDIT_EXCLUDE_ATTRIBUTE_PROPERTY = "atlas.audit.hbase.entity";
protected static final String FIELD_SEPARATOR = ":";
private static final long ATLAS_HBASE_KEYVALUE_DEFAULT_SIZE = 1024 * 1024;
protected static Configuration APPLICATION_PROPERTIES = null;
public static final String CONFIG_PREFIX = "atlas.audit";
public static final String CONFIG_PERSIST_ENTITY_DEFINITION = CONFIG_PREFIX + ".persistEntityDefinition";
protected Map<String, List<String>> auditExcludedAttributesCache = new HashMap<>();
protected static boolean persistEntityDefinition;
static {
try {
persistEntityDefinition = ApplicationProperties.get().getBoolean(CONFIG_PERSIST_ENTITY_DEFINITION, false);
} catch (AtlasException e) {
throw new RuntimeException(e);
}
}
@Override
public void instanceIsActive() throws AtlasException { LOG.info("Reacting to active: No action for now."); }
@Override
public void instanceIsPassive() {
LOG.info("Reacting to passive: No action for now.");
}
@Override
public int getHandlerOrder() {
return HandlerOrder.AUDIT_REPOSITORY.getOrder();
}
@Override
public void putEventsV1(EntityAuditEvent... events) throws AtlasException {
putEventsV1(Arrays.asList(events));
}
@Override
public void putEventsV2(EntityAuditEventV2... events) throws AtlasBaseException {
putEventsV2(Arrays.asList(events));
}
@Override
public List<Object> listEvents(String entityId, String startKey, short maxResults) throws AtlasBaseException {
List ret = listEventsV2(entityId, startKey, maxResults);
try {
if (CollectionUtils.isEmpty(ret)) {
ret = listEventsV1(entityId, startKey, maxResults);
}
} catch (AtlasException e) {
throw new AtlasBaseException(e);
}
return ret;
}
@Override
public long repositoryMaxSize() {
long ret;
initApplicationProperties();
if (APPLICATION_PROPERTIES == null) {
ret = ATLAS_HBASE_KEYVALUE_DEFAULT_SIZE;
} else {
ret = APPLICATION_PROPERTIES.getLong(AUDIT_REPOSITORY_MAX_SIZE_PROPERTY, ATLAS_HBASE_KEYVALUE_DEFAULT_SIZE);
}
return ret;
}
@Override
public List<String> getAuditExcludeAttributes(String entityType) {
List<String> ret = null;
initApplicationProperties();
if (auditExcludedAttributesCache.containsKey(entityType)) {
ret = auditExcludedAttributesCache.get(entityType);
} else if (APPLICATION_PROPERTIES != null) {
String[] excludeAttributes = APPLICATION_PROPERTIES.getStringArray(AUDIT_EXCLUDE_ATTRIBUTE_PROPERTY + "." +
entityType + "." + "attributes.exclude");
if (excludeAttributes != null) {
ret = Arrays.asList(excludeAttributes);
}
auditExcludedAttributesCache.put(entityType, ret);
}
return ret;
}
protected void initApplicationProperties() {
if (APPLICATION_PROPERTIES == null) {
try {
APPLICATION_PROPERTIES = ApplicationProperties.get();
} catch (AtlasException ex) {
// ignore
}
}
}
/**
* Only should be used to initialize Application properties for testing.
*
* @param config
*/
@VisibleForTesting
protected void setApplicationProperties(Configuration config) {
APPLICATION_PROPERTIES = config;
}
protected byte[] getKey(String id, Long ts) {
assert id != null : "entity id can't be null";
assert ts != null : "timestamp can't be null";
String keyStr = id + FIELD_SEPARATOR + ts;
return Bytes.toBytes(keyStr);
}
}
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
* <p/>
* http://www.apache.org/licenses/LICENSE-2.0
* <p/>
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.atlas.repository.audit;
import com.datastax.driver.core.BatchStatement;
import com.datastax.driver.core.BoundStatement;
import com.datastax.driver.core.Cluster;
import com.datastax.driver.core.PreparedStatement;
import com.datastax.driver.core.ResultSet;
import com.datastax.driver.core.Row;
import com.datastax.driver.core.Session;
import com.google.common.annotations.VisibleForTesting;
import org.apache.atlas.ApplicationProperties;
import org.apache.atlas.AtlasException;
import org.apache.atlas.EntityAuditEvent;
import org.apache.atlas.annotation.ConditionalOnAtlasProperty;
import org.apache.atlas.exception.AtlasBaseException;
import org.apache.atlas.model.audit.EntityAuditEventV2;
import org.apache.commons.configuration.Configuration;
import org.apache.commons.lang.StringUtils;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import org.springframework.stereotype.Component;
import java.util.ArrayList;
import java.util.HashMap;
import java.util.List;
import java.util.Map;
import javax.inject.Singleton;
/**
* This class provides cassandra support as the backend for audit storage support.
*/
@Singleton
@Component
@ConditionalOnAtlasProperty(property = "atlas.EntityAuditRepository.impl", isDefault = true)
public class CassandraBasedAuditRepository extends AbstractStorageBasedAuditRepository {
private static final Logger LOG = LoggerFactory.getLogger(CassandraBasedAuditRepository.class);
// Default keyspace to store the audit entries
private static final String DEFAULT_KEYSPACE = "atlas_audit";
// When running in embedded cassandra mode, this is the default cluster name used
private static final String DEFAULT_CLUSTER_NAME = "JanusGraph";
// Default cassandra port
private static final int DEFAULT_PORT = 9042;
private static final int DEFAULT_REPLICATION_FACTOR = 3;
// The environment variable that tells us we are running in embedded mode
public static final String MANAGE_EMBEDDED_CASSANDRA = "MANAGE_EMBEDDED_CASSANDRA";
// Application properties
public static final String CASSANDRA_HOSTNAME_PROPERTY = "atlas.graph.storage.hostname";
public static final String CASSANDRA_CLUSTERNAME_PROPERTY = "atlas.graph.storage.clustername";
public static final String CASSANDRA_PORT_PROPERTY = "atlas.graph.storage.port";
public static final String CASSANDRA_REPLICATION_FACTOR_PROPERTY = "atlas.EntityAuditRepository.replicationFactor";
public static final String CASSANDRA_AUDIT_KEYSPACE_PROPERTY = "atlas.EntityAuditRepository.keyspace";
private static final String AUDIT_TABLE_SCHEMA =
"CREATE TABLE audit(entityid text, "
+ "created bigint, "
+ "action text, "
+ "user text, "
+ "detail text, "
+ "entity text, "
+ "PRIMARY KEY (entityid, created)"
+ ") WITH CLUSTERING ORDER BY (created DESC);";
private static final String ENTITYID = "entityid";
private static final String CREATED = "created";
private static final String ACTION = "action";
private static final String USER = "user";
private static final String DETAIL = "detail";
private static final String ENTITY = "entity";
private static final String INSERT_STATEMENT_TEMPLATE = "INSERT INTO audit (entityid,created,action,user,detail,entity) VALUES (?,?,?,?,?,?)";
private static final String SELECT_STATEMENT_TEMPLATE = "select * from audit where entityid=? order by created desc limit 10;";
private static final String SELECT_DATE_STATEMENT_TEMPLATE = "select * from audit where entityid=? and created<=? order by created desc limit 10;";
private String keyspace;
private int replicationFactor;
private Session cassSession;
private String clusterName;
private int port;
private Map<String, List<String>> auditExcludedAttributesCache = new HashMap<>();
private PreparedStatement insertStatement;
private PreparedStatement selectStatement;
private PreparedStatement selectDateStatement;
@Override
public void putEventsV1(List<EntityAuditEvent> events) throws AtlasException {
BoundStatement stmt = new BoundStatement(insertStatement);
BatchStatement batch = new BatchStatement();
events.forEach(event -> batch.add(stmt.bind(event.getEntityId(), event.getTimestamp(),
event.getAction().toString(), event.getUser(), event.getDetails(),
(persistEntityDefinition ? event.getEntityDefinitionString() : null))));
cassSession.execute(batch);
}
@Override
public void putEventsV2(List<EntityAuditEventV2> events) throws AtlasBaseException {
BoundStatement stmt = new BoundStatement(insertStatement);
BatchStatement batch = new BatchStatement();
events.forEach(event -> batch.add(stmt.bind(event.getEntityId(), event.getTimestamp(),
event.getAction().toString(), event.getUser(), event.getDetails(),
(persistEntityDefinition ? event.getEntity().toString() : null))));
cassSession.execute(batch);
}
private BoundStatement getSelectStatement(String entityId, String startKey) {
BoundStatement stmt;
if (StringUtils.isEmpty(startKey)) {
stmt = new BoundStatement(selectStatement).bind(entityId);
} else {
stmt = new BoundStatement(selectDateStatement).bind(entityId, Long.valueOf(startKey.split(FIELD_SEPARATOR)[1]));
}
return stmt;
}
@Override
public List<EntityAuditEvent> listEventsV1(String entityId, String startKey, short maxResults) throws AtlasException {
if (LOG.isDebugEnabled()) {
LOG.debug("Listing events for entity id {}, starting timestamp {}, #records {}", entityId, startKey, maxResults);
}
ResultSet rs = cassSession.execute(getSelectStatement(entityId, startKey));
List<EntityAuditEvent> entityResults = new ArrayList<>();
for (Row row : rs) {
String rowEntityId = row.getString(ENTITYID);
if (!entityId.equals(rowEntityId)) {
continue;
}
EntityAuditEvent event = new EntityAuditEvent();
event.setEntityId(rowEntityId);
event.setAction(EntityAuditEvent.EntityAuditAction.fromString(row.getString(ACTION)));
event.setDetails(row.getString(DETAIL));
event.setUser(row.getString(USER));
event.setTimestamp(row.getLong(CREATED));
event.setEventKey(rowEntityId + ":" + event.getTimestamp());
if (persistEntityDefinition) {
event.setEntityDefinition(row.getString(ENTITY));
}
entityResults.add(event);
}
return entityResults;
}
@Override
public List<EntityAuditEventV2> listEventsV2(String entityId, String startKey, short maxResults) throws AtlasBaseException {
if (LOG.isDebugEnabled()) {
LOG.debug("Listing events for entity id {}, starting timestamp {}, #records {}", entityId, startKey, maxResults);
}
ResultSet rs = cassSession.execute(getSelectStatement(entityId, startKey));
List<EntityAuditEventV2> entityResults = new ArrayList<>();
for (Row row : rs) {
String rowEntityId = row.getString(ENTITYID);
if (!entityId.equals(rowEntityId)) {
continue;
}
EntityAuditEventV2 event = new EntityAuditEventV2();
event.setEntityId(rowEntityId);
event.setAction(EntityAuditEventV2.EntityAuditAction.fromString(row.getString(ACTION)));
event.setDetails(row.getString(DETAIL));
event.setUser(row.getString(USER));
event.setTimestamp(row.getLong(CREATED));
event.setEventKey(rowEntityId + ":" + event.getTimestamp());
if (persistEntityDefinition) {
event.setEntityDefinition(row.getString(ENTITY));
}
entityResults.add(event);
}
return entityResults;
}
@Override
public void start() throws AtlasException {
initApplicationProperties();
initializeSettings();
startInternal();
}
void initializeSettings() {
keyspace = APPLICATION_PROPERTIES.getString(CASSANDRA_AUDIT_KEYSPACE_PROPERTY, DEFAULT_KEYSPACE);
replicationFactor = APPLICATION_PROPERTIES.getInt(CASSANDRA_REPLICATION_FACTOR_PROPERTY, DEFAULT_REPLICATION_FACTOR);
clusterName = APPLICATION_PROPERTIES.getString(CASSANDRA_CLUSTERNAME_PROPERTY, DEFAULT_CLUSTER_NAME);
port = APPLICATION_PROPERTIES.getInt(CASSANDRA_PORT_PROPERTY, DEFAULT_PORT);
}
@VisibleForTesting
void startInternal() throws AtlasException {
createSession();
}
void createSession() throws AtlasException {
Cluster.Builder cassandraClusterBuilder = Cluster.builder();
String hostname = APPLICATION_PROPERTIES.getString(CASSANDRA_HOSTNAME_PROPERTY, "localhost");
Cluster cluster = cassandraClusterBuilder.addContactPoint(hostname).withClusterName(clusterName).withPort(port).build();
try {
cassSession = cluster.connect();
if (cluster.getMetadata().getKeyspace(keyspace) == null) {
String query = "CREATE KEYSPACE " + keyspace + " WITH replication "
+ "= {'class':'SimpleStrategy', 'replication_factor':" + replicationFactor + "}; ";
cassSession.execute(query);
cassSession.close();
cassSession = cluster.connect(keyspace);
// create the audit table
cassSession.execute(AUDIT_TABLE_SCHEMA);
} else {
cassSession.close();
cassSession = cluster.connect(keyspace);
}
insertStatement = cassSession.prepare(INSERT_STATEMENT_TEMPLATE.replace("KEYSPACE", keyspace));
selectStatement = cassSession.prepare(SELECT_STATEMENT_TEMPLATE.replace("KEYSPACE", keyspace));
selectDateStatement = cassSession.prepare(SELECT_DATE_STATEMENT_TEMPLATE.replace("KEYSPACE", keyspace));
} catch (Exception e) {
throw new AtlasException(e);
}
}
@Override
public void stop() throws AtlasException {
cassSession.close();
}
}
...@@ -80,13 +80,11 @@ import java.util.Map; ...@@ -80,13 +80,11 @@ import java.util.Map;
@Singleton @Singleton
@Component @Component
@ConditionalOnAtlasProperty(property = "atlas.EntityAuditRepository.impl", isDefault = true) @ConditionalOnAtlasProperty(property = "atlas.EntityAuditRepository.impl", isDefault = true)
public class HBaseBasedAuditRepository implements Service, EntityAuditRepository, ActiveStateChangeHandler { public class HBaseBasedAuditRepository extends AbstractStorageBasedAuditRepository {
private static final Logger LOG = LoggerFactory.getLogger(HBaseBasedAuditRepository.class); private static final Logger LOG = LoggerFactory.getLogger(HBaseBasedAuditRepository.class);
public static final String CONFIG_PREFIX = "atlas.audit";
public static final String CONFIG_TABLE_NAME = CONFIG_PREFIX + ".hbase.tablename"; public static final String CONFIG_TABLE_NAME = CONFIG_PREFIX + ".hbase.tablename";
public static final String DEFAULT_TABLE_NAME = "ATLAS_ENTITY_AUDIT_EVENTS"; public static final String DEFAULT_TABLE_NAME = "ATLAS_ENTITY_AUDIT_EVENTS";
public static final String CONFIG_PERSIST_ENTITY_DEFINITION = CONFIG_PREFIX + ".persistEntityDefinition";
public static final byte[] COLUMN_FAMILY = Bytes.toBytes("dt"); public static final byte[] COLUMN_FAMILY = Bytes.toBytes("dt");
public static final byte[] COLUMN_ACTION = Bytes.toBytes("a"); public static final byte[] COLUMN_ACTION = Bytes.toBytes("a");
...@@ -94,23 +92,6 @@ public class HBaseBasedAuditRepository implements Service, EntityAuditRepository ...@@ -94,23 +92,6 @@ public class HBaseBasedAuditRepository implements Service, EntityAuditRepository
public static final byte[] COLUMN_USER = Bytes.toBytes("u"); public static final byte[] COLUMN_USER = Bytes.toBytes("u");
public static final byte[] COLUMN_DEFINITION = Bytes.toBytes("f"); public static final byte[] COLUMN_DEFINITION = Bytes.toBytes("f");
private static final String AUDIT_REPOSITORY_MAX_SIZE_PROPERTY = "atlas.hbase.client.keyvalue.maxsize";
private static final String AUDIT_EXCLUDE_ATTRIBUTE_PROPERTY = "atlas.audit.hbase.entity";
private static final String FIELD_SEPARATOR = ":";
private static final long ATLAS_HBASE_KEYVALUE_DEFAULT_SIZE = 1024 * 1024;
private static Configuration APPLICATION_PROPERTIES = null;
private static boolean persistEntityDefinition;
private Map<String, List<String>> auditExcludedAttributesCache = new HashMap<>();
static {
try {
persistEntityDefinition = ApplicationProperties.get().getBoolean(CONFIG_PERSIST_ENTITY_DEFINITION, false);
} catch (AtlasException e) {
throw new RuntimeException(e);
}
}
private TableName tableName; private TableName tableName;
private Connection connection; private Connection connection;
...@@ -120,16 +101,6 @@ public class HBaseBasedAuditRepository implements Service, EntityAuditRepository ...@@ -120,16 +101,6 @@ public class HBaseBasedAuditRepository implements Service, EntityAuditRepository
* @throws AtlasException * @throws AtlasException
*/ */
@Override @Override
public void putEventsV1(EntityAuditEvent... events) throws AtlasException {
putEventsV1(Arrays.asList(events));
}
/**
* Add events to the event repository
* @param events events to be added
* @throws AtlasException
*/
@Override
public void putEventsV1(List<EntityAuditEvent> events) throws AtlasException { public void putEventsV1(List<EntityAuditEvent> events) throws AtlasException {
if (LOG.isDebugEnabled()) { if (LOG.isDebugEnabled()) {
LOG.debug("Putting {} events", events.size()); LOG.debug("Putting {} events", events.size());
...@@ -159,11 +130,6 @@ public class HBaseBasedAuditRepository implements Service, EntityAuditRepository ...@@ -159,11 +130,6 @@ public class HBaseBasedAuditRepository implements Service, EntityAuditRepository
} }
@Override @Override
public void putEventsV2(EntityAuditEventV2... events) throws AtlasBaseException {
putEventsV2(Arrays.asList(events));
}
@Override
public void putEventsV2(List<EntityAuditEventV2> events) throws AtlasBaseException { public void putEventsV2(List<EntityAuditEventV2> events) throws AtlasBaseException {
if (LOG.isDebugEnabled()) { if (LOG.isDebugEnabled()) {
LOG.debug("Putting {} events", events.size()); LOG.debug("Putting {} events", events.size());
...@@ -283,34 +249,12 @@ public class HBaseBasedAuditRepository implements Service, EntityAuditRepository ...@@ -283,34 +249,12 @@ public class HBaseBasedAuditRepository implements Service, EntityAuditRepository
} }
} }
@Override
public List<Object> listEvents(String entityId, String startKey, short maxResults) throws AtlasBaseException {
List ret = listEventsV2(entityId, startKey, maxResults);
try {
if (CollectionUtils.isEmpty(ret)) {
ret = listEventsV1(entityId, startKey, maxResults);
}
} catch (AtlasException e) {
throw new AtlasBaseException(e);
}
return ret;
}
private <T> void addColumn(Put put, byte[] columnName, T columnValue) { private <T> void addColumn(Put put, byte[] columnName, T columnValue) {
if (columnValue != null && !columnValue.toString().isEmpty()) { if (columnValue != null && !columnValue.toString().isEmpty()) {
put.addColumn(COLUMN_FAMILY, columnName, Bytes.toBytes(columnValue.toString())); put.addColumn(COLUMN_FAMILY, columnName, Bytes.toBytes(columnValue.toString()));
} }
} }
private byte[] getKey(String id, Long ts) {
assert id != null : "entity id can't be null";
assert ts != null : "timestamp can't be null";
String keyStr = id + FIELD_SEPARATOR + ts;
return Bytes.toBytes(keyStr);
}
/** /**
* List events for the given entity id in decreasing order of timestamp, from the given startKey. Returns n results * List events for the given entity id in decreasing order of timestamp, from the given startKey. Returns n results
* @param entityId entity id * @param entityId entity id
...@@ -386,52 +330,6 @@ public class HBaseBasedAuditRepository implements Service, EntityAuditRepository ...@@ -386,52 +330,6 @@ public class HBaseBasedAuditRepository implements Service, EntityAuditRepository
} }
} }
@Override
public long repositoryMaxSize() {
long ret;
initApplicationProperties();
if (APPLICATION_PROPERTIES == null) {
ret = ATLAS_HBASE_KEYVALUE_DEFAULT_SIZE;
} else {
ret = APPLICATION_PROPERTIES.getLong(AUDIT_REPOSITORY_MAX_SIZE_PROPERTY, ATLAS_HBASE_KEYVALUE_DEFAULT_SIZE);
}
return ret;
}
@Override
public List<String> getAuditExcludeAttributes(String entityType) {
List<String> ret = null;
initApplicationProperties();
if (auditExcludedAttributesCache.containsKey(entityType)) {
ret = auditExcludedAttributesCache.get(entityType);
} else if (APPLICATION_PROPERTIES != null) {
String[] excludeAttributes = APPLICATION_PROPERTIES.getStringArray(AUDIT_EXCLUDE_ATTRIBUTE_PROPERTY + "." +
entityType + "." + "attributes.exclude");
if (excludeAttributes != null) {
ret = Arrays.asList(excludeAttributes);
}
auditExcludedAttributesCache.put(entityType, ret);
}
return ret;
}
private void initApplicationProperties() {
if (APPLICATION_PROPERTIES == null) {
try {
APPLICATION_PROPERTIES = ApplicationProperties.get();
} catch (AtlasException ex) {
// ignore
}
}
}
private String getResultString(Result result, byte[] columnName) { private String getResultString(Result result, byte[] columnName) {
byte[] rawValue = result.getValue(COLUMN_FAMILY, columnName); byte[] rawValue = result.getValue(COLUMN_FAMILY, columnName);
if ( rawValue != null) { if ( rawValue != null) {
...@@ -560,13 +458,4 @@ public class HBaseBasedAuditRepository implements Service, EntityAuditRepository ...@@ -560,13 +458,4 @@ public class HBaseBasedAuditRepository implements Service, EntityAuditRepository
createTableIfNotExists(); createTableIfNotExists();
} }
@Override
public void instanceIsPassive() {
LOG.info("Reacting to passive: No action for now.");
}
@Override
public int getHandlerOrder() {
return HandlerOrder.HBASE_AUDIT_REPOSITORY.getOrder();
}
} }
...@@ -20,6 +20,8 @@ package org.apache.atlas.repository.audit; ...@@ -20,6 +20,8 @@ package org.apache.atlas.repository.audit;
import org.apache.atlas.EntityAuditEvent; import org.apache.atlas.EntityAuditEvent;
import org.apache.atlas.TestUtilsV2; import org.apache.atlas.TestUtilsV2;
import org.apache.atlas.model.audit.EntityAuditEventV2;
import org.apache.atlas.model.instance.AtlasEntity;
import org.apache.atlas.v1.model.instance.Referenceable; import org.apache.atlas.v1.model.instance.Referenceable;
import org.testng.annotations.BeforeTest; import org.testng.annotations.BeforeTest;
import org.testng.annotations.Test; import org.testng.annotations.Test;
...@@ -42,7 +44,7 @@ public class AuditRepositoryTestBase { ...@@ -42,7 +44,7 @@ public class AuditRepositoryTestBase {
} }
@Test @Test
public void testAddEvents() throws Exception { public void testAddEventsV1() throws Exception {
EntityAuditEvent event = new EntityAuditEvent(rand(), System.currentTimeMillis(), "u1", EntityAuditEvent event = new EntityAuditEvent(rand(), System.currentTimeMillis(), "u1",
EntityAuditEvent.EntityAuditAction.ENTITY_CREATE, "d1", new Referenceable(rand())); EntityAuditEvent.EntityAuditAction.ENTITY_CREATE, "d1", new Referenceable(rand()));
...@@ -51,11 +53,11 @@ public class AuditRepositoryTestBase { ...@@ -51,11 +53,11 @@ public class AuditRepositoryTestBase {
List<EntityAuditEvent> events = eventRepository.listEventsV1(event.getEntityId(), null, (short) 10); List<EntityAuditEvent> events = eventRepository.listEventsV1(event.getEntityId(), null, (short) 10);
assertEquals(events.size(), 1); assertEquals(events.size(), 1);
assertEventEquals(events.get(0), event); assertEventV1Equals(events.get(0), event);
} }
@Test @Test
public void testListPagination() throws Exception { public void testListPaginationV1() throws Exception {
String id1 = "id1" + rand(); String id1 = "id1" + rand();
String id2 = "id2" + rand(); String id2 = "id2" + rand();
String id3 = "id3" + rand(); String id3 = "id3" + rand();
...@@ -76,24 +78,24 @@ public class AuditRepositoryTestBase { ...@@ -76,24 +78,24 @@ public class AuditRepositoryTestBase {
//Use ts for which there is no event - ts + 2 //Use ts for which there is no event - ts + 2
List<EntityAuditEvent> events = eventRepository.listEventsV1(id2, null, (short) 3); List<EntityAuditEvent> events = eventRepository.listEventsV1(id2, null, (short) 3);
assertEquals(events.size(), 3); assertEquals(events.size(), 3);
assertEventEquals(events.get(0), expectedEvents.get(0)); assertEventV1Equals(events.get(0), expectedEvents.get(0));
assertEventEquals(events.get(1), expectedEvents.get(1)); assertEventV1Equals(events.get(1), expectedEvents.get(1));
assertEventEquals(events.get(2), expectedEvents.get(2)); assertEventV1Equals(events.get(2), expectedEvents.get(2));
//Use last event's timestamp for next list(). Should give only 1 event and shouldn't include events from other id //Use last event's timestamp for next list(). Should give only 1 event and shouldn't include events from other id
events = eventRepository.listEventsV1(id2, events.get(2).getEventKey(), (short) 3); events = eventRepository.listEventsV1(id2, events.get(2).getEventKey(), (short) 3);
assertEquals(events.size(), 1); assertEquals(events.size(), 1);
assertEventEquals(events.get(0), expectedEvents.get(2)); assertEventV1Equals(events.get(0), expectedEvents.get(2));
} }
@Test @Test
public void testInvalidEntityId() throws Exception { public void testInvalidEntityIdV1() throws Exception {
List<EntityAuditEvent> events = eventRepository.listEventsV1(rand(), null, (short) 3); List<EntityAuditEvent> events = eventRepository.listEventsV1(rand(), null, (short) 3);
assertEquals(events.size(), 0); assertEquals(events.size(), 0);
} }
protected void assertEventEquals(EntityAuditEvent actual, EntityAuditEvent expected) { protected void assertEventV1Equals(EntityAuditEvent actual, EntityAuditEvent expected) {
if (expected != null) { if (expected != null) {
assertNotNull(actual); assertNotNull(actual);
} }
...@@ -103,4 +105,69 @@ public class AuditRepositoryTestBase { ...@@ -103,4 +105,69 @@ public class AuditRepositoryTestBase {
assertEquals(actual.getTimestamp(), expected.getTimestamp()); assertEquals(actual.getTimestamp(), expected.getTimestamp());
assertEquals(actual.getDetails(), expected.getDetails()); assertEquals(actual.getDetails(), expected.getDetails());
} }
@Test
public void testAddEventsV2() throws Exception {
EntityAuditEventV2 event = new EntityAuditEventV2(rand(), System.currentTimeMillis(), "u1",
EntityAuditEventV2.EntityAuditAction.ENTITY_CREATE, "d1", new AtlasEntity(rand()));
eventRepository.putEventsV2(event);
List<EntityAuditEventV2> events = eventRepository.listEventsV2(event.getEntityId(), null, (short) 10);
assertEquals(events.size(), 1);
assertEventV2Equals(events.get(0), event);
}
@Test
public void testListPaginationV2() throws Exception {
String id1 = "id1" + rand();
String id2 = "id2" + rand();
String id3 = "id3" + rand();
long ts = System.currentTimeMillis();
AtlasEntity entity = new AtlasEntity(rand());
List<EntityAuditEventV2> expectedEvents = new ArrayList<>(3);
for (int i = 0; i < 3; i++) {
//Add events for both ids
EntityAuditEventV2 event = new EntityAuditEventV2(id2, ts - i, "user" + i, EntityAuditEventV2.EntityAuditAction.ENTITY_UPDATE, "details" + i, entity);
eventRepository.putEventsV2(event);
expectedEvents.add(event);
eventRepository.putEventsV2(new EntityAuditEventV2(id1, ts - i, "user" + i, EntityAuditEventV2.EntityAuditAction.ENTITY_DELETE, "details" + i, entity));
eventRepository.putEventsV2(new EntityAuditEventV2(id3, ts - i, "user" + i, EntityAuditEventV2.EntityAuditAction.ENTITY_CREATE, "details" + i, entity));
}
//Use ts for which there is no event - ts + 2
List<EntityAuditEventV2> events = eventRepository.listEventsV2(id2, null, (short) 3);
assertEquals(events.size(), 3);
assertEventV2Equals(events.get(0), expectedEvents.get(0));
assertEventV2Equals(events.get(1), expectedEvents.get(1));
assertEventV2Equals(events.get(2), expectedEvents.get(2));
//Use last event's timestamp for next list(). Should give only 1 event and shouldn't include events from other id
events = eventRepository.listEventsV2(id2, events.get(2).getEventKey(), (short) 3);
assertEquals(events.size(), 1);
assertEventV2Equals(events.get(0), expectedEvents.get(2));
}
@Test
public void testInvalidEntityIdV2() throws Exception {
List<EntityAuditEvent> events = eventRepository.listEventsV1(rand(), null, (short) 3);
assertEquals(events.size(), 0);
}
protected void assertEventV2Equals(EntityAuditEventV2 actual, EntityAuditEventV2 expected) {
if (expected != null) {
assertNotNull(actual);
}
assertEquals(actual.getEntityId(), expected.getEntityId());
assertEquals(actual.getAction(), expected.getAction());
assertEquals(actual.getTimestamp(), expected.getTimestamp());
assertEquals(actual.getDetails(), expected.getDetails());
}
} }
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
* <p/>
* http://www.apache.org/licenses/LICENSE-2.0
* <p/>
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.atlas.repository.audit;
import org.apache.atlas.ApplicationProperties;
import org.apache.atlas.AtlasException;
import org.apache.cassandra.exceptions.ConfigurationException;
import org.apache.commons.configuration.Configuration;
import org.apache.commons.configuration.MapConfiguration;
import org.apache.thrift.transport.TTransportException;
import org.cassandraunit.utils.EmbeddedCassandraServerHelper;
import org.testng.annotations.BeforeClass;
import java.io.IOException;
import java.util.HashMap;
import java.util.Map;
public class CassandraAuditRepositoryTest extends AuditRepositoryTestBase {
@BeforeClass
public void setup() throws InterruptedException, TTransportException, ConfigurationException, IOException,
AtlasException {
EmbeddedCassandraServerHelper.startEmbeddedCassandra("cassandra_test.yml");
eventRepository = new CassandraBasedAuditRepository();
Map<String, Object> props = new HashMap<>();
props.put(CassandraBasedAuditRepository.MANAGE_EMBEDDED_CASSANDRA, Boolean.TRUE);
props.put(CassandraBasedAuditRepository.CASSANDRA_CLUSTERNAME_PROPERTY, "Test Cluster");
props.put(CassandraBasedAuditRepository.CASSANDRA_HOSTNAME_PROPERTY, "localhost");
props.put(CassandraBasedAuditRepository.CASSANDRA_PORT_PROPERTY, 9042);
Configuration atlasConf = new MapConfiguration(props);
((CassandraBasedAuditRepository)eventRepository).setApplicationProperties(atlasConf);
((CassandraBasedAuditRepository)eventRepository).start();
// Pause for a second to ensure that the embedded cluster has started
Thread.sleep(1000);
}
}
#
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Cassandra storage config YAML
# Used when JanusGraph runs with Cassandra embedded inside its own JVM
# NOTE:
# See http://wiki.apache.org/cassandra/StorageConfiguration for
# full explanations of configuration directives
# /NOTE
# The name of the cluster. This is mainly used to prevent machines in
# one logical cluster from joining another.
cluster_name: 'JanusGraph'
# This defines the number of tokens randomly assigned to this node on the ring
# The more tokens, relative to other nodes, the larger the proportion of data
# that this node will store. You probably want all nodes to have the same number
# of tokens assuming they have equal hardware capability.
#
# If you leave this unspecified, Cassandra will use the default of 1 token for legacy compatibility,
# and will use the initial_token as described below.
#
# Specifying initial_token will override this setting.
#
# If you already have a cluster with 1 token per node, and wish to migrate to
# multiple tokens per node, see http://wiki.apache.org/cassandra/Operations
num_tokens: 256
# initial_token allows you to specify tokens manually. While you can use # it with
# vnodes (num_tokens > 1, above) -- in which case you should provide a
# comma-separated list -- it's primarily used when adding nodes # to legacy clusters
# that do not have vnodes enabled.
# initial_token:
# May either be "true" or "false" to enable globally, or contain a list
# of data centers to enable per-datacenter.
# hinted_handoff_enabled: DC1,DC2
# See http://wiki.apache.org/cassandra/HintedHandoff
hinted_handoff_enabled: true
# this defines the maximum amount of time a dead host will have hints
# generated. After it has been dead this long, new hints for it will not be
# created until it has been seen alive and gone down again.
max_hint_window_in_ms: 10800000 # 3 hours
# Maximum throttle in KBs per second, per delivery thread. This will be
# reduced proportionally to the number of nodes in the cluster. (If there
# are two nodes in the cluster, each delivery thread will use the maximum
# rate; if there are three, each will throttle to half of the maximum,
# since we expect two nodes to be delivering hints simultaneously.)
hinted_handoff_throttle_in_kb: 1024
# Number of threads with which to deliver hints;
# Consider increasing this number when you have multi-dc deployments, since
# cross-dc handoff tends to be slower
max_hints_delivery_threads: 2
# Maximum throttle in KBs per second, total. This will be
# reduced proportionally to the number of nodes in the cluster.
batchlog_replay_throttle_in_kb: 1024
# Authentication backend, implementing IAuthenticator; used to identify users
# Out of the box, Cassandra provides org.apache.cassandra.auth.{AllowAllAuthenticator,
# PasswordAuthenticator}.
#
# - AllowAllAuthenticator performs no checks - set it to disable authentication.
# - PasswordAuthenticator relies on username/password pairs to authenticate
# users. It keeps usernames and hashed passwords in system_auth.credentials table.
# Please increase system_auth keyspace replication factor if you use this authenticator.
authenticator: AllowAllAuthenticator
# Authorization backend, implementing IAuthorizer; used to limit access/provide permissions
# Out of the box, Cassandra provides org.apache.cassandra.auth.{AllowAllAuthorizer,
# CassandraAuthorizer}.
#
# - AllowAllAuthorizer allows any action to any user - set it to disable authorization.
# - CassandraAuthorizer stores permissions in system_auth.permissions table. Please
# increase system_auth keyspace replication factor if you use this authorizer.
authorizer: AllowAllAuthorizer
# Validity period for permissions cache (fetching permissions can be an
# expensive operation depending on the authorizer, CassandraAuthorizer is
# one example). Defaults to 2000, set to 0 to disable.
# Will be disabled automatically for AllowAllAuthorizer.
permissions_validity_in_ms: 2000
# The partitioner is responsible for distributing rows (by key) across
# nodes in the cluster. Any IPartitioner may be used, including your
# own as long as it is on the classpath. Out of the box, Cassandra
# provides org.apache.cassandra.dht.{Murmur3Partitioner, RandomPartitioner
# ByteOrderedPartitioner, OrderPreservingPartitioner (deprecated)}.
#
# - RandomPartitioner distributes rows across the cluster evenly by md5.
# This is the default prior to 1.2 and is retained for compatibility.
# - Murmur3Partitioner is similar to RandomPartioner but uses Murmur3_128
# Hash Function instead of md5. When in doubt, this is the best option.
# - ByteOrderedPartitioner orders rows lexically by key bytes. BOP allows
# scanning rows in key order, but the ordering can generate hot spots
# for sequential insertion workloads.
# - OrderPreservingPartitioner is an obsolete form of BOP, that stores
# - keys in a less-efficient format and only works with keys that are
# UTF8-encoded Strings.
# - CollatingOPP collates according to EN,US rules rather than lexical byte
# ordering. Use this as an example if you need custom collation.
#
# See http://wiki.apache.org/cassandra/Operations for more on
# partitioners and token selection.
partitioner: org.apache.cassandra.dht.Murmur3Partitioner
# Directories where Cassandra should store data on disk. Cassandra
# will spread data evenly across them, subject to the granularity of
# the configured compaction strategy.
data_file_directories:
- target/cassandra/data
# commit log
commitlog_directory: target/cassandra/commitlog
# policy for data disk failures:
# stop: shut down gossip and Thrift, leaving the node effectively dead, but
# can still be inspected via JMX.
# best_effort: stop using the failed disk and respond to requests based on
# remaining available sstables. This means you WILL see obsolete
# data at CL.ONE!
# ignore: ignore fatal errors and let requests fail, as in pre-1.2 Cassandra
disk_failure_policy: stop
# policy for commit disk failures:
# stop: shut down gossip and Thrift, leaving the node effectively dead, but
# can still be inspected via JMX.
# stop_commit: shutdown the commit log, letting writes collect but
# continuing to service reads, as in pre-2.0.5 Cassandra
# ignore: ignore fatal errors and let the batches fail
commit_failure_policy: stop
# Maximum size of the key cache in memory.
#
# Each key cache hit saves 1 seek and each row cache hit saves 2 seeks at the
# minimum, sometimes more. The key cache is fairly tiny for the amount of
# time it saves, so it's worthwhile to use it at large numbers.
# The row cache saves even more time, but must contain the entire row,
# so it is extremely space-intensive. It's best to only use the
# row cache if you have hot rows or static rows.
#
# NOTE: if you reduce the size, you may not get you hottest keys loaded on startup.
#
# Default value is empty to make it "auto" (min(5% of Heap (in MB), 100MB)). Set to 0 to disable key cache.
key_cache_size_in_mb:
# Duration in seconds after which Cassandra should
# save the key cache. Caches are saved to saved_caches_directory as
# specified in this configuration file.
#
# Saved caches greatly improve cold-start speeds, and is relatively cheap in
# terms of I/O for the key cache. Row cache saving is much more expensive and
# has limited use.
#
# Default is 14400 or 4 hours.
key_cache_save_period: 14400
# Number of keys from the key cache to save
# Disabled by default, meaning all keys are going to be saved
# key_cache_keys_to_save: 100
# Maximum size of the row cache in memory.
# NOTE: if you reduce the size, you may not get you hottest keys loaded on startup.
#
# Default value is 0, to disable row caching.
row_cache_size_in_mb: 0
# Duration in seconds after which Cassandra should
# safe the row cache. Caches are saved to saved_caches_directory as specified
# in this configuration file.
#
# Saved caches greatly improve cold-start speeds, and is relatively cheap in
# terms of I/O for the key cache. Row cache saving is much more expensive and
# has limited use.
#
# Default is 0 to disable saving the row cache.
row_cache_save_period: 0
# Number of keys from the row cache to save
# Disabled by default, meaning all keys are going to be saved
# row_cache_keys_to_save: 100
# The off-heap memory allocator. Affects storage engine metadata as
# well as caches. Experiments show that JEMAlloc saves some memory
# than the native GCC allocator (i.e., JEMalloc is more
# fragmentation-resistant).
#
# Supported values are: NativeAllocator, JEMallocAllocator
#
# If you intend to use JEMallocAllocator you have to install JEMalloc as library and
# modify cassandra-env.sh as directed in the file.
#
# Defaults to NativeAllocator
# memory_allocator: NativeAllocator
# saved caches
saved_caches_directory: target/cassandra/saved_caches
# commitlog_sync may be either "periodic" or "batch."
# When in batch mode, Cassandra won't ack writes until the commit log
# has been fsynced to disk. It will wait up to
# commitlog_sync_batch_window_in_ms milliseconds for other writes, before
# performing the sync.
#
# commitlog_sync: batch
# commitlog_sync_batch_window_in_ms: 50
#
# the other option is "periodic" where writes may be acked immediately
# and the CommitLog is simply synced every commitlog_sync_period_in_ms
# milliseconds. By default this allows 1024*(CPU cores) pending
# entries on the commitlog queue. If you are writing very large blobs,
# you should reduce that; 16*cores works reasonably well for 1MB blobs.
# It should be at least as large as the concurrent_writes setting.
commitlog_sync: periodic
commitlog_sync_period_in_ms: 10000
# commitlog_periodic_queue_size:
# The size of the individual commitlog file segments. A commitlog
# segment may be archived, deleted, or recycled once all the data
# in it (potentially from each columnfamily in the system) has been
# flushed to sstables.
#
# The default size is 32, which is almost always fine, but if you are
# archiving commitlog segments (see commitlog_archiving.properties),
# then you probably want a finer granularity of archiving; 8 or 16 MB
# is reasonable.
commitlog_segment_size_in_mb: 32
# any class that implements the SeedProvider interface and has a
# constructor that takes a Map<String, String> of parameters will do.
seed_provider:
# Addresses of hosts that are deemed contact points.
# Cassandra nodes use this list of hosts to find each other and learn
# the topology of the ring. You must change this if you are running
# multiple nodes!
- class_name: org.apache.cassandra.locator.SimpleSeedProvider
parameters:
# seeds is actually a comma-delimited list of addresses.
# Ex: "<ip1>,<ip2>,<ip3>"
- seeds: "127.0.0.1"
# For workloads with more data than can fit in memory, Cassandra's
# bottleneck will be reads that need to fetch data from
# disk. "concurrent_reads" should be set to (16 * number_of_drives) in
# order to allow the operations to enqueue low enough in the stack
# that the OS and drives can reorder them.
#
# On the other hand, since writes are almost never IO bound, the ideal
# number of "concurrent_writes" is dependent on the number of cores in
# your system; (8 * number_of_cores) is a good rule of thumb.
concurrent_reads: 32
concurrent_writes: 32
# Total memory to use for sstable-reading buffers. Defaults to
# the smaller of 1/4 of heap or 512MB.
# file_cache_size_in_mb: 512
# Total memory to use for memtables. Cassandra will flush the largest
# memtable when this much memory is used.
# If omitted, Cassandra will set it to 1/4 of the heap.
# memtable_total_space_in_mb: 2048
# Total space to use for commitlogs. Since commitlog segments are
# mmapped, and hence use up address space, the default size is 32
# on 32-bit JVMs, and 1024 on 64-bit JVMs.
#
# If space gets above this value (it will round up to the next nearest
# segment multiple), Cassandra will flush every dirty CF in the oldest
# segment and remove it. So a small total commitlog space will tend
# to cause more flush activity on less-active columnfamilies.
# commitlog_total_space_in_mb: 4096
# This sets the amount of memtable flush writer threads. These will
# be blocked by disk io, and each one will hold a memtable in memory
# while blocked. If you have a large heap and many data directories,
# you can increase this value for better flush performance.
# By default this will be set to the amount of data directories defined.
#memtable_flush_writers: 1
# Whether to, when doing sequential writing, fsync() at intervals in
# order to force the operating system to flush the dirty
# buffers. Enable this to avoid sudden dirty buffer flushing from
# impacting read latencies. Almost always a good idea on SSDs; not
# necessarily on platters.
trickle_fsync: false
trickle_fsync_interval_in_kb: 10240
# TCP port, for commands and data
storage_port: 7000
# SSL port, for encrypted communication. Unused unless enabled in
# encryption_options
ssl_storage_port: 7001
# Address to bind to and tell other Cassandra nodes to connect to. You
# _must_ change this if you want multiple nodes to be able to
# communicate!
#
# Leaving it blank leaves it up to InetAddress.getLocalHost(). This
# will always do the Right Thing _if_ the node is properly configured
# (hostname, name resolution, etc), and the Right Thing is to use the
# address associated with the hostname (it might not be).
#
# Setting this to 0.0.0.0 is always wrong.
listen_address: localhost
# Address to broadcast to other Cassandra nodes
# Leaving this blank will set it to the same value as listen_address
# broadcast_address: 1.2.3.4
# Internode authentication backend, implementing IInternodeAuthenticator;
# used to allow/disallow connections from peer nodes.
# internode_authenticator: org.apache.cassandra.auth.AllowAllInternodeAuthenticator
# Whether to start the native transport server.
# Please note that the address on which the native transport is bound is the
# same as the rpc_address. The port however is different and specified below.
start_native_transport: true
# port for the CQL native transport to listen for clients on
native_transport_port: 9042
# The maximum threads for handling requests when the native transport is used.
# This is similar to rpc_max_threads though the default differs slightly (and
# there is no native_transport_min_threads, idle threads will always be stopped
# after 30 seconds).
# native_transport_max_threads: 128
#
# The maximum size of allowed frame. Frame (requests) larger than this will
# be rejected as invalid. The default is 256MB.
# native_transport_max_frame_size_in_mb: 256
# Whether to start the thrift rpc server.
start_rpc: true
# The address to bind the Thrift RPC service and native transport
# server -- clients connect here.
#
# Leaving this blank has the same effect it does for ListenAddress,
# (i.e. it will be based on the configured hostname of the node).
#
# Note that unlike ListenAddress above, it is allowed to specify 0.0.0.0
# here if you want to listen on all interfaces, but that will break clients
# that rely on node auto-discovery.
rpc_address: localhost
# port for Thrift to listen for clients on
rpc_port: 9160
# enable or disable keepalive on rpc connections
rpc_keepalive: true
# Cassandra provides two out-of-the-box options for the RPC Server:
#
# sync -> One thread per thrift connection. For a very large number of clients, memory
# will be your limiting factor. On a 64 bit JVM, 180KB is the minimum stack size
# per thread, and that will correspond to your use of virtual memory (but physical memory
# may be limited depending on use of stack space).
#
# hsha -> Stands for "half synchronous, half asynchronous." All thrift clients are handled
# asynchronously using a small number of threads that does not vary with the amount
# of thrift clients (and thus scales well to many clients). The rpc requests are still
# synchronous (one thread per active request).
#
# The default is sync because on Windows hsha is about 30% slower. On Linux,
# sync/hsha performance is about the same, with hsha of course using less memory.
#
# Alternatively, can provide your own RPC server by providing the fully-qualified class name
# of an o.a.c.t.TServerFactory that can create an instance of it.
rpc_server_type: sync
# Uncomment rpc_min|max_thread to set request pool size limits.
#
# Regardless of your choice of RPC server (see above), the number of maximum requests in the
# RPC thread pool dictates how many concurrent requests are possible (but if you are using the sync
# RPC server, it also dictates the number of clients that can be connected at all).
#
# The default is unlimited and thus provides no protection against clients overwhelming the server. You are
# encouraged to set a maximum that makes sense for you in production, but do keep in mind that
# rpc_max_threads represents the maximum number of client requests this server may execute concurrently.
#
# rpc_min_threads: 16
# rpc_max_threads: 2048
# uncomment to set socket buffer sizes on rpc connections
# rpc_send_buff_size_in_bytes:
# rpc_recv_buff_size_in_bytes:
# Uncomment to set socket buffer size for internode communication
# Note that when setting this, the buffer size is limited by net.core.wmem_max
# and when not setting it it is defined by net.ipv4.tcp_wmem
# See:
# /proc/sys/net/core/wmem_max
# /proc/sys/net/core/rmem_max
# /proc/sys/net/ipv4/tcp_wmem
# /proc/sys/net/ipv4/tcp_wmem
# and: man tcp
# internode_send_buff_size_in_bytes:
# internode_recv_buff_size_in_bytes:
# Frame size for thrift (maximum message length).
thrift_framed_transport_size_in_mb: 15
# Set to true to have Cassandra create a hard link to each sstable
# flushed or streamed locally in a backups/ subdirectory of the
# keyspace data. Removing these links is the operator's
# responsibility.
incremental_backups: false
# Whether or not to take a snapshot before each compaction. Be
# careful using this option, since Cassandra won't clean up the
# snapshots for you. Mostly useful if you're paranoid when there
# is a data format change.
snapshot_before_compaction: false
# Whether or not a snapshot is taken of the data before keyspace truncation
# or dropping of column families. The STRONGLY advised default of true
# should be used to provide data safety. If you set this flag to false, you will
# lose data on truncation or drop.
auto_snapshot: true
# When executing a scan, within or across a partition, we need to keep the
# tombstones seen in memory so we can return them to the coordinator, which
# will use them to make sure other replicas also know about the deleted rows.
# With workloads that generate a lot of tombstones, this can cause performance
# problems and even exaust the server heap.
# (http://www.datastax.com/dev/blog/cassandra-anti-patterns-queues-and-queue-like-datasets)
# Adjust the thresholds here if you understand the dangers and want to
# scan more tombstones anyway. These thresholds may also be adjusted at runtime
# using the StorageService mbean.
tombstone_warn_threshold: 1000
tombstone_failure_threshold: 100000
# Add column indexes to a row after its contents reach this size.
# Increase if your column values are large, or if you have a very large
# number of columns. The competing causes are, Cassandra has to
# deserialize this much of the row to read a single column, so you want
# it to be small - at least if you do many partial-row reads - but all
# the index data is read for each access, so you don't want to generate
# that wastefully either.
column_index_size_in_kb: 64
# Number of simultaneous compactions to allow, NOT including
# validation "compactions" for anti-entropy repair. Simultaneous
# compactions can help preserve read performance in a mixed read/write
# workload, by mitigating the tendency of small sstables to accumulate
# during a single long running compactions. The default is usually
# fine and if you experience problems with compaction running too
# slowly or too fast, you should look at
# compaction_throughput_mb_per_sec first.
#
# concurrent_compactors defaults to the number of cores.
# Uncomment to make compaction mono-threaded, the pre-0.8 default.
#concurrent_compactors: 1
# Throttles compaction to the given total throughput across the entire
# system. The faster you insert data, the faster you need to compact in
# order to keep the sstable count down, but in general, setting this to
# 16 to 32 times the rate you are inserting data is more than sufficient.
# Setting this to 0 disables throttling. Note that this account for all types
# of compaction, including validation compaction.
compaction_throughput_mb_per_sec: 16
# Throttles all outbound streaming file transfers on this node to the
# given total throughput in Mbps. This is necessary because Cassandra does
# mostly sequential IO when streaming data during bootstrap or repair, which
# can lead to saturating the network connection and degrading rpc performance.
# When unset, the default is 200 Mbps or 25 MB/s.
# stream_throughput_outbound_megabits_per_sec: 200
# How long the coordinator should wait for read operations to complete
read_request_timeout_in_ms: 5000
# How long the coordinator should wait for seq or index scans to complete
range_request_timeout_in_ms: 10000
# How long the coordinator should wait for writes to complete
write_request_timeout_in_ms: 2000
# How long a coordinator should continue to retry a CAS operation
# that contends with other proposals for the same row
cas_contention_timeout_in_ms: 1000
# How long the coordinator should wait for truncates to complete
# (This can be much longer, because unless auto_snapshot is disabled
# we need to flush first so we can snapshot before removing the data.)
truncate_request_timeout_in_ms: 60000
# The default timeout for other, miscellaneous operations
request_timeout_in_ms: 10000
# Enable operation timeout information exchange between nodes to accurately
# measure request timeouts. If disabled, replicas will assume that requests
# were forwarded to them instantly by the coordinator, which means that
# under overload conditions we will waste that much extra time processing
# already-timed-out requests.
#
# Warning: before enabling this property make sure to ntp is installed
# and the times are synchronized between the nodes.
cross_node_timeout: false
# Enable socket timeout for streaming operation.
# When a timeout occurs during streaming, streaming is retried from the start
# of the current file. This _can_ involve re-streaming an important amount of
# data, so you should avoid setting the value too low.
# Default value is 0, which never timeout streams.
# streaming_socket_timeout_in_ms: 0
# phi value that must be reached for a host to be marked down.
# most users should never need to adjust this.
# phi_convict_threshold: 8
# endpoint_snitch -- Set this to a class that implements
# IEndpointSnitch. The snitch has two functions:
# - it teaches Cassandra enough about your network topology to route
# requests efficiently
# - it allows Cassandra to spread replicas around your cluster to avoid
# correlated failures. It does this by grouping machines into
# "datacenters" and "racks." Cassandra will do its best not to have
# more than one replica on the same "rack" (which may not actually
# be a physical location)
#
# IF YOU CHANGE THE SNITCH AFTER DATA IS INSERTED INTO THE CLUSTER,
# YOU MUST RUN A FULL REPAIR, SINCE THE SNITCH AFFECTS WHERE REPLICAS
# ARE PLACED.
#
# Out of the box, Cassandra provides
# - SimpleSnitch:
# Treats Strategy order as proximity. This improves cache locality
# when disabling read repair, which can further improve throughput.
# Only appropriate for single-datacenter deployments.
# - PropertyFileSnitch:
# Proximity is determined by rack and data center, which are
# explicitly configured in cassandra-topology.properties.
# - GossipingPropertyFileSnitch
# The rack and datacenter for the local node are defined in
# cassandra-rackdc.properties and propagated to other nodes via gossip. If
# cassandra-topology.properties exists, it is used as a fallback, allowing
# migration from the PropertyFileSnitch.
# - RackInferringSnitch:
# Proximity is determined by rack and data center, which are
# assumed to correspond to the 3rd and 2nd octet of each node's
# IP address, respectively. Unless this happens to match your
# deployment conventions (as it did Facebook's), this is best used
# as an example of writing a custom Snitch class.
# - Ec2Snitch:
# Appropriate for EC2 deployments in a single Region. Loads Region
# and Availability Zone information from the EC2 API. The Region is
# treated as the datacenter, and the Availability Zone as the rack.
# Only private IPs are used, so this will not work across multiple
# Regions.
# - Ec2MultiRegionSnitch:
# Uses public IPs as broadcast_address to allow cross-region
# connectivity. (Thus, you should set seed addresses to the public
# IP as well.) You will need to open the storage_port or
# ssl_storage_port on the public IP firewall. (For intra-Region
# traffic, Cassandra will switch to the private IP after
# establishing a connection.)
#
# You can use a custom Snitch by setting this to the full class name
# of the snitch, which will be assumed to be on your classpath.
endpoint_snitch: SimpleSnitch
# controls how often to perform the more expensive part of host score
# calculation
dynamic_snitch_update_interval_in_ms: 100
# controls how often to reset all host scores, allowing a bad host to
# possibly recover
dynamic_snitch_reset_interval_in_ms: 600000
# if set greater than zero and read_repair_chance is < 1.0, this will allow
# 'pinning' of replicas to hosts in order to increase cache capacity.
# The badness threshold will control how much worse the pinned host has to be
# before the dynamic snitch will prefer other replicas over it. This is
# expressed as a double which represents a percentage. Thus, a value of
# 0.2 means Cassandra would continue to prefer the static snitch values
# until the pinned host was 20% worse than the fastest.
dynamic_snitch_badness_threshold: 0.1
# request_scheduler -- Set this to a class that implements
# RequestScheduler, which will schedule incoming client requests
# according to the specific policy. This is useful for multi-tenancy
# with a single Cassandra cluster.
# NOTE: This is specifically for requests from the client and does
# not affect inter node communication.
# org.apache.cassandra.scheduler.NoScheduler - No scheduling takes place
# org.apache.cassandra.scheduler.RoundRobinScheduler - Round robin of
# client requests to a node with a separate queue for each
# request_scheduler_id. The scheduler is further customized by
# request_scheduler_options as described below.
request_scheduler: org.apache.cassandra.scheduler.NoScheduler
# Scheduler Options vary based on the type of scheduler
# NoScheduler - Has no options
# RoundRobin
# - throttle_limit -- The throttle_limit is the number of in-flight
# requests per client. Requests beyond
# that limit are queued up until
# running requests can complete.
# The value of 80 here is twice the number of
# concurrent_reads + concurrent_writes.
# - default_weight -- default_weight is optional and allows for
# overriding the default which is 1.
# - weights -- Weights are optional and will default to 1 or the
# overridden default_weight. The weight translates into how
# many requests are handled during each turn of the
# RoundRobin, based on the scheduler id.
#
# request_scheduler_options:
# throttle_limit: 80
# default_weight: 5
# weights:
# Keyspace1: 1
# Keyspace2: 5
# request_scheduler_id -- An identifier based on which to perform
# the request scheduling. Currently the only valid option is keyspace.
# request_scheduler_id: keyspace
# Enable or disable inter-node encryption
# Default settings are TLS v1, RSA 1024-bit keys (it is imperative that
# users generate their own keys) TLS_RSA_WITH_AES_128_CBC_SHA as the cipher
# suite for authentication, key exchange and encryption of the actual data transfers.
# NOTE: No custom encryption options are enabled at the moment
# The available internode options are : all, none, dc, rack
#
# If set to dc cassandra will encrypt the traffic between the DCs
# If set to rack cassandra will encrypt the traffic between the racks
#
# The passwords used in these options must match the passwords used when generating
# the keystore and truststore. For instructions on generating these files, see:
# http://download.oracle.com/javase/6/docs/technotes/guides/security/jsse/JSSERefGuide.html#CreateKeystore
#
server_encryption_options:
internode_encryption: none
keystore: conf/.keystore
keystore_password: cassandra
truststore: conf/.truststore
truststore_password: cassandra
# More advanced defaults below:
# protocol: TLS
# algorithm: SunX509
# store_type: JKS
# cipher_suites: [TLS_RSA_WITH_AES_128_CBC_SHA,TLS_RSA_WITH_AES_256_CBC_SHA]
# require_client_auth: false
# enable or disable client/server encryption.
client_encryption_options:
enabled: false
keystore: conf/.keystore
keystore_password: cassandra
# require_client_auth: false
# Set trustore and truststore_password if require_client_auth is true
# truststore: conf/.truststore
# truststore_password: cassandra
# More advanced defaults below:
# protocol: TLS
# algorithm: SunX509
# store_type: JKS
# cipher_suites: [TLS_RSA_WITH_AES_128_CBC_SHA,TLS_RSA_WITH_AES_256_CBC_SHA]
# internode_compression controls whether traffic between nodes is
# compressed.
# can be: all - all traffic is compressed
# dc - traffic between different datacenters is compressed
# none - nothing is compressed.
internode_compression: all
# Enable or disable tcp_nodelay for inter-dc communication.
# Disabling it will result in larger (but fewer) network packets being sent,
# reducing overhead from the TCP protocol itself, at the cost of increasing
# latency if you block for cross-datacenter responses.
inter_dc_tcp_nodelay: false
...@@ -27,7 +27,7 @@ import org.apache.atlas.AtlasException; ...@@ -27,7 +27,7 @@ import org.apache.atlas.AtlasException;
*/ */
public interface ActiveStateChangeHandler { public interface ActiveStateChangeHandler {
public enum HandlerOrder { public enum HandlerOrder {
HBASE_AUDIT_REPOSITORY(0), AUDIT_REPOSITORY(0),
GRAPH_BACKED_SEARCH_INDEXER(1), GRAPH_BACKED_SEARCH_INDEXER(1),
TYPEDEF_STORE_INITIALIZER(2), TYPEDEF_STORE_INITIALIZER(2),
DEFAULT_METADATA_SERVICE(3), DEFAULT_METADATA_SERVICE(3),
......
...@@ -44,4 +44,8 @@ ...@@ -44,4 +44,8 @@
<int name="connTimeout">${connTimeout:15000}</int> <int name="connTimeout">${connTimeout:15000}</int>
</shardHandlerFactory> </shardHandlerFactory>
<metrics>
</metrics>
</solr> </solr>
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment