Skip to content
Projects
Groups
Snippets
Help
This project
Loading...
Sign in / Register
Toggle navigation
A
atlas
Project
Overview
Details
Activity
Cycle Analytics
Repository
Repository
Files
Commits
Branches
Tags
Contributors
Graph
Compare
Charts
Issues
0
Issues
0
List
Board
Labels
Milestones
Merge Requests
0
Merge Requests
0
CI / CD
CI / CD
Pipelines
Jobs
Schedules
Charts
Wiki
Wiki
Snippets
Snippets
Members
Members
Collapse sidebar
Close sidebar
Activity
Graph
Charts
Create a new issue
Jobs
Commits
Issue Boards
Open sidebar
dataplatform
atlas
Commits
919120f6
Commit
919120f6
authored
Dec 03, 2015
by
Suma Shivaprasad
Browse files
Options
Browse Files
Download
Email Patches
Plain Diff
ATLAS-352 Improve write performance on type and entity creation with Hbase(sumasai)
parent
91ad0218
Hide whitespace changes
Inline
Side-by-side
Showing
30 changed files
with
686 additions
and
144 deletions
+686
-144
pom.xml
distro/pom.xml
+10
-0
atlas_start.py
distro/src/bin/atlas_start.py
+1
-0
standalone-package.xml
distro/src/main/assemblies/standalone-package.xml
+1
-1
TestMetadata.py
distro/src/test/python/scripts/TestMetadata.py
+3
-7
pom.xml
pom.xml
+7
-0
release-log.txt
release-log.txt
+1
-0
pom.xml
repository/pom.xml
+5
-46
RepositoryMetadataModule.java
.../main/java/org/apache/atlas/RepositoryMetadataModule.java
+1
-0
GraphRepoMapperScaleTest.java
...ache/atlas/repository/graph/GraphRepoMapperScaleTest.java
+23
-37
pom.xml
titan/pom.xml
+106
-0
AdminMask.java
.../com/thinkaurelius/titan/diskstorage/hbase/AdminMask.java
+0
-0
ConnectionMask.java
...thinkaurelius/titan/diskstorage/hbase/ConnectionMask.java
+0
-0
HBaseAdmin0_98.java
...thinkaurelius/titan/diskstorage/hbase/HBaseAdmin0_98.java
+0
-0
HBaseAdmin1_0.java
.../thinkaurelius/titan/diskstorage/hbase/HBaseAdmin1_0.java
+0
-0
HBaseCompat.java
...om/thinkaurelius/titan/diskstorage/hbase/HBaseCompat.java
+0
-1
HBaseCompat0_98.java
...hinkaurelius/titan/diskstorage/hbase/HBaseCompat0_98.java
+0
-0
HBaseCompat1_0.java
...thinkaurelius/titan/diskstorage/hbase/HBaseCompat1_0.java
+0
-1
HBaseCompat1_1.java
...thinkaurelius/titan/diskstorage/hbase/HBaseCompat1_1.java
+0
-0
HBaseCompatLoader.java
...nkaurelius/titan/diskstorage/hbase/HBaseCompatLoader.java
+0
-0
HBaseKeyColumnValueStore.java
...ius/titan/diskstorage/hbase/HBaseKeyColumnValueStore.java
+31
-2
HBaseStoreManager.java
...nkaurelius/titan/diskstorage/hbase/HBaseStoreManager.java
+26
-25
HBaseTransaction.java
...inkaurelius/titan/diskstorage/hbase/HBaseTransaction.java
+43
-1
HConnection0_98.java
...hinkaurelius/titan/diskstorage/hbase/HConnection0_98.java
+0
-0
HConnection1_0.java
...thinkaurelius/titan/diskstorage/hbase/HConnection1_0.java
+0
-0
HTable0_98.java
...com/thinkaurelius/titan/diskstorage/hbase/HTable0_98.java
+0
-0
HTable1_0.java
.../com/thinkaurelius/titan/diskstorage/hbase/HTable1_0.java
+0
-1
TableMask.java
.../com/thinkaurelius/titan/diskstorage/hbase/TableMask.java
+0
-0
LocalLockMediator.java
...aurelius/titan/diskstorage/locking/LocalLockMediator.java
+345
-0
Solr5Index.java
.../com/thinkaurelius/titan/diskstorage/solr/Solr5Index.java
+23
-22
LocalLockMediatorTest.java
...lius/titan/diskstorage/locking/LocalLockMediatorTest.java
+60
-0
No files found.
distro/pom.xml
View file @
919120f6
...
@@ -89,6 +89,16 @@
...
@@ -89,6 +89,16 @@
</profiles>
</profiles>
<build>
<build>
<outputDirectory>
target/bin
</outputDirectory>
<resources>
<resource>
<directory>
src/bin
</directory>
<filtering>
true
</filtering>
<includes>
<include>
**/*.py
</include>
</includes>
</resource>
</resources>
<plugins>
<plugins>
<plugin>
<plugin>
<groupId>
org.codehaus.mojo
</groupId>
<groupId>
org.codehaus.mojo
</groupId>
...
...
distro/src/bin/atlas_start.py
View file @
919120f6
...
@@ -58,6 +58,7 @@ def main():
...
@@ -58,6 +58,7 @@ def main():
p
=
os
.
pathsep
p
=
os
.
pathsep
metadata_classpath
=
confdir
+
p
\
metadata_classpath
=
confdir
+
p
\
+
os
.
path
.
join
(
web_app_dir
,
"atlas"
,
"WEB-INF"
,
"classes"
)
+
p
\
+
os
.
path
.
join
(
web_app_dir
,
"atlas"
,
"WEB-INF"
,
"classes"
)
+
p
\
+
os
.
path
.
join
(
web_app_dir
,
"atlas"
,
"WEB-INF"
,
"lib"
,
"atlas-titan-${project.version}.jar"
)
+
p
\
+
os
.
path
.
join
(
web_app_dir
,
"atlas"
,
"WEB-INF"
,
"lib"
,
"*"
)
+
p
\
+
os
.
path
.
join
(
web_app_dir
,
"atlas"
,
"WEB-INF"
,
"lib"
,
"*"
)
+
p
\
+
os
.
path
.
join
(
metadata_home
,
"libext"
,
"*"
)
+
os
.
path
.
join
(
metadata_home
,
"libext"
,
"*"
)
if
os
.
path
.
exists
(
hbase_conf_dir
):
if
os
.
path
.
exists
(
hbase_conf_dir
):
...
...
distro/src/main/assemblies/standalone-package.xml
View file @
919120f6
...
@@ -48,7 +48,7 @@
...
@@ -48,7 +48,7 @@
</fileSet>
</fileSet>
<fileSet>
<fileSet>
<directory>
src
/bin
</directory>
<directory>
target
/bin
</directory>
<outputDirectory>
bin
</outputDirectory>
<outputDirectory>
bin
</outputDirectory>
<fileMode>
0755
</fileMode>
<fileMode>
0755
</fileMode>
<directoryMode>
0755
</directoryMode>
<directoryMode>
0755
</directoryMode>
...
...
distro/src/test/python/scripts/TestMetadata.py
View file @
919120f6
...
@@ -56,20 +56,16 @@ class TestMetadata(unittest.TestCase):
...
@@ -56,20 +56,16 @@ class TestMetadata(unittest.TestCase):
java_mock
.
assert_called_with
(
java_mock
.
assert_called_with
(
'org.apache.atlas.Atlas'
,
'org.apache.atlas.Atlas'
,
[
'-app'
,
'metadata_home
\\
server
\\
webapp
\\
atlas'
],
[
'-app'
,
'metadata_home
\\
server
\\
webapp
\\
atlas'
],
'metadata_home
\\
conf;metadata_home
\\
server
\\
webapp
\\
atlas
\\
WEB-INF
\\
classes;metadata_home
\\
server
\\
webapp
\\
atlas
\\
WEB-INF
\\
lib
\\
*;metadata_home
\\
libext
\\
*;metadata_home
\\
hbase
\\
conf'
,
'metadata_home
\\
conf;metadata_home
\\
server
\\
webapp
\\
atlas
\\
WEB-INF
\\
classes;metadata_home
\\
server
\\
webapp
\\
atlas
\\
WEB-INF
\\
lib
\\
atlas-titan-${project.version}.jar;metadata_home
\\
server
\\
webapp
\\
atlas
\\
WEB-INF
\\
lib
\\
*;metadata_home
\\
libext
\\
*;metadata_home
\\
hbase
\\
conf'
,
[
'-Datlas.log.dir=metadata_home
\\
logs'
,
'-Datlas.log.file=application.log'
,
'-Datlas.home=metadata_home'
,
'-Datlas.conf=metadata_home
\\
conf'
,
'-Xmx1024m'
,
'-XX:MaxPermSize=512m'
,
'-Dlog4j.configuration=atlas-log4j.xml'
],
'metadata_home
\\
logs'
)
[
'-Datlas.log.dir=metadata_home
\\
logs'
,
'-Datlas.log.file=application.log'
,
'-Datlas.home=metadata_home'
,
'-Datlas.conf=metadata_home
\\
conf'
,
'-Xmx1024m'
,
'-XX:MaxPermSize=512m'
,
'-Dlog4j.configuration=atlas-log4j.xml'
],
'metadata_home
\\
logs'
)
else
:
else
:
java_mock
.
assert_called_with
(
java_mock
.
assert_called_with
(
'org.apache.atlas.Atlas'
,
'org.apache.atlas.Atlas'
,
[
'-app'
,
'metadata_home/server/webapp/atlas'
],
[
'-app'
,
'metadata_home/server/webapp/atlas'
],
'metadata_home/conf:metadata_home/server/webapp/atlas/WEB-INF/classes:metadata_home/server/webapp/atlas/WEB-INF/lib/*:metadata_home/libext/*:metadata_home/hbase/conf'
,
'metadata_home/conf:metadata_home/server/webapp/atlas/WEB-INF/classes:metadata_home/server/webapp/atlas/WEB-INF/lib/
atlas-titan-${project.version}.jar:metadata_home/server/webapp/atlas/WEB-INF/lib/
*:metadata_home/libext/*:metadata_home/hbase/conf'
,
[
'-Datlas.log.dir=metadata_home/logs'
,
'-Datlas.log.file=application.log'
,
'-Datlas.home=metadata_home'
,
'-Datlas.conf=metadata_home/conf'
,
'-Xmx1024m'
,
'-XX:MaxPermSize=512m'
,
'-Dlog4j.configuration=atlas-log4j.xml'
],
'metadata_home/logs'
)
[
'-Datlas.log.dir=metadata_home/logs'
,
'-Datlas.log.file=application.log'
,
'-Datlas.home=metadata_home'
,
'-Datlas.conf=metadata_home/conf'
,
'-Xmx1024m'
,
'-XX:MaxPermSize=512m'
,
'-Dlog4j.configuration=atlas-log4j.xml'
],
'metadata_home/logs'
)
pass
pass
def
test_jar_java_lookups_fail
(
self
):
def
test_jar_java_lookups_fail
(
self
):
...
...
pom.xml
View file @
919120f6
...
@@ -409,6 +409,7 @@
...
@@ -409,6 +409,7 @@
<module>
typesystem
</module>
<module>
typesystem
</module>
<module>
notification
</module>
<module>
notification
</module>
<module>
client
</module>
<module>
client
</module>
<module>
titan
</module>
<module>
repository
</module>
<module>
repository
</module>
<module>
dashboard
</module>
<module>
dashboard
</module>
<module>
webapp
</module>
<module>
webapp
</module>
...
@@ -925,6 +926,12 @@
...
@@ -925,6 +926,12 @@
<dependency>
<dependency>
<groupId>
org.apache.atlas
</groupId>
<groupId>
org.apache.atlas
</groupId>
<artifactId>
atlas-titan
</artifactId>
<version>
${project.version}
</version>
</dependency>
<dependency>
<groupId>
org.apache.atlas
</groupId>
<artifactId>
atlas-repository
</artifactId>
<artifactId>
atlas-repository
</artifactId>
<version>
${project.version}
</version>
<version>
${project.version}
</version>
</dependency>
</dependency>
...
...
release-log.txt
View file @
919120f6
...
@@ -9,6 +9,7 @@ ATLAS-54 Rename configs in hive hook (shwethags)
...
@@ -9,6 +9,7 @@ ATLAS-54 Rename configs in hive hook (shwethags)
ATLAS-3 Mixed Index creation fails with Date types (sumasai via shwethags)
ATLAS-3 Mixed Index creation fails with Date types (sumasai via shwethags)
ALL CHANGES:
ALL CHANGES:
ATLAS-352 Improve write performance on type and entity creation with Hbase (sumasai)
ATLAS-350 Document jaas config details for atlas (tbeerbower via shwethags)
ATLAS-350 Document jaas config details for atlas (tbeerbower via shwethags)
ATLAS-344 Document HBase permissions for secure cluster (tbeerbower via shwethags)
ATLAS-344 Document HBase permissions for secure cluster (tbeerbower via shwethags)
ATLAS-335 Kerberized cluster: Atlas fails to come up with hbase as backend (sumasai via shwethags)
ATLAS-335 Kerberized cluster: Atlas fails to come up with hbase as backend (sumasai via shwethags)
...
...
repository/pom.xml
View file @
919120f6
...
@@ -50,6 +50,11 @@
...
@@ -50,6 +50,11 @@
</dependency>
</dependency>
<dependency>
<dependency>
<groupId>
org.apache.atlas
</groupId>
<artifactId>
atlas-titan
</artifactId>
</dependency>
<dependency>
<groupId>
joda-time
</groupId>
<groupId>
joda-time
</groupId>
<artifactId>
joda-time
</artifactId>
<artifactId>
joda-time
</artifactId>
</dependency>
</dependency>
...
@@ -85,52 +90,6 @@
...
@@ -85,52 +90,6 @@
</dependency>
</dependency>
<dependency>
<dependency>
<groupId>
com.thinkaurelius.titan
</groupId>
<artifactId>
titan-core
</artifactId>
</dependency>
<dependency>
<groupId>
com.thinkaurelius.titan
</groupId>
<artifactId>
titan-es
</artifactId>
</dependency>
<dependency>
<groupId>
com.vividsolutions
</groupId>
<artifactId>
jts
</artifactId>
</dependency>
<dependency>
<groupId>
org.apache.solr
</groupId>
<artifactId>
solr-core
</artifactId>
</dependency>
<dependency>
<groupId>
org.apache.solr
</groupId>
<artifactId>
solr-solrj
</artifactId>
</dependency>
<dependency>
<groupId>
com.thinkaurelius.titan
</groupId>
<artifactId>
titan-berkeleyje
</artifactId>
</dependency>
<!-- Commenting out since titan-hbase classes are shaded for 1.x support -->
<!--<dependency>-->
<!--<groupId>com.thinkaurelius.titan</groupId>-->
<!--<artifactId>titan-hbase</artifactId>-->
<!--</dependency>-->
<dependency>
<groupId>
org.apache.hbase
</groupId>
<artifactId>
hbase-client
</artifactId>
</dependency>
<dependency>
<groupId>
com.thinkaurelius.titan
</groupId>
<artifactId>
titan-lucene
</artifactId>
</dependency>
<dependency>
<groupId>
com.tinkerpop.gremlin
</groupId>
<groupId>
com.tinkerpop.gremlin
</groupId>
<artifactId>
gremlin-java
</artifactId>
<artifactId>
gremlin-java
</artifactId>
</dependency>
</dependency>
...
...
repository/src/main/java/org/apache/atlas/RepositoryMetadataModule.java
View file @
919120f6
...
@@ -27,6 +27,7 @@ import org.aopalliance.intercept.MethodInterceptor;
...
@@ -27,6 +27,7 @@ import org.aopalliance.intercept.MethodInterceptor;
import
org.apache.atlas.discovery.DiscoveryService
;
import
org.apache.atlas.discovery.DiscoveryService
;
import
org.apache.atlas.discovery.HiveLineageService
;
import
org.apache.atlas.discovery.HiveLineageService
;
import
org.apache.atlas.discovery.LineageService
;
import
org.apache.atlas.discovery.LineageService
;
import
org.apache.atlas.discovery.SearchIndexer
;
import
org.apache.atlas.discovery.graph.GraphBackedDiscoveryService
;
import
org.apache.atlas.discovery.graph.GraphBackedDiscoveryService
;
import
org.apache.atlas.listener.TypesChangeListener
;
import
org.apache.atlas.listener.TypesChangeListener
;
import
org.apache.atlas.repository.MetadataRepository
;
import
org.apache.atlas.repository.MetadataRepository
;
...
...
repository/src/test/java/org/apache/atlas/repository/graph/GraphRepoMapperScaleTest.java
View file @
919120f6
...
@@ -18,9 +18,11 @@
...
@@ -18,9 +18,11 @@
package
org
.
apache
.
atlas
.
repository
.
graph
;
package
org
.
apache
.
atlas
.
repository
.
graph
;
import
com.google.inject.Inject
;
import
com.thinkaurelius.titan.core.TitanFactory
;
import
com.thinkaurelius.titan.core.TitanFactory
;
import
com.thinkaurelius.titan.core.TitanGraph
;
import
com.thinkaurelius.titan.core.TitanGraph
;
import
com.thinkaurelius.titan.core.TitanIndexQuery
;
import
com.thinkaurelius.titan.core.TitanIndexQuery
;
import
com.thinkaurelius.titan.core.util.TitanCleanup
;
import
com.thinkaurelius.titan.diskstorage.BackendException
;
import
com.thinkaurelius.titan.diskstorage.BackendException
;
import
com.thinkaurelius.titan.diskstorage.configuration.ReadConfiguration
;
import
com.thinkaurelius.titan.diskstorage.configuration.ReadConfiguration
;
import
com.thinkaurelius.titan.diskstorage.configuration.backend.CommonsConfiguration
;
import
com.thinkaurelius.titan.diskstorage.configuration.backend.CommonsConfiguration
;
...
@@ -30,6 +32,7 @@ import com.tinkerpop.blueprints.GraphQuery;
...
@@ -30,6 +32,7 @@ import com.tinkerpop.blueprints.GraphQuery;
import
com.tinkerpop.blueprints.Predicate
;
import
com.tinkerpop.blueprints.Predicate
;
import
com.tinkerpop.blueprints.Vertex
;
import
com.tinkerpop.blueprints.Vertex
;
import
org.apache.atlas.GraphTransaction
;
import
org.apache.atlas.GraphTransaction
;
import
org.apache.atlas.RepositoryMetadataModule
;
import
org.apache.atlas.TestUtils
;
import
org.apache.atlas.TestUtils
;
import
org.apache.atlas.repository.Constants
;
import
org.apache.atlas.repository.Constants
;
import
org.apache.atlas.typesystem.ITypedReferenceableInstance
;
import
org.apache.atlas.typesystem.ITypedReferenceableInstance
;
...
@@ -43,6 +46,7 @@ import org.apache.commons.io.FileUtils;
...
@@ -43,6 +46,7 @@ import org.apache.commons.io.FileUtils;
import
org.testng.Assert
;
import
org.testng.Assert
;
import
org.testng.annotations.AfterClass
;
import
org.testng.annotations.AfterClass
;
import
org.testng.annotations.BeforeClass
;
import
org.testng.annotations.BeforeClass
;
import
org.testng.annotations.Guice
;
import
org.testng.annotations.Test
;
import
org.testng.annotations.Test
;
import
java.io.File
;
import
java.io.File
;
...
@@ -53,6 +57,7 @@ import java.util.Date;
...
@@ -53,6 +57,7 @@ import java.util.Date;
import
java.util.Random
;
import
java.util.Random
;
@Test
@Test
@Guice
(
modules
=
RepositoryMetadataModule
.
class
)
public
class
GraphRepoMapperScaleTest
{
public
class
GraphRepoMapperScaleTest
{
private
static
final
String
DATABASE_NAME
=
"foo"
;
private
static
final
String
DATABASE_NAME
=
"foo"
;
...
@@ -61,50 +66,21 @@ public class GraphRepoMapperScaleTest {
...
@@ -61,50 +66,21 @@ public class GraphRepoMapperScaleTest {
private
static
final
String
INDEX_DIR
=
private
static
final
String
INDEX_DIR
=
System
.
getProperty
(
"java.io.tmpdir"
,
"/tmp"
)
+
"/atlas-test"
+
new
Random
().
nextLong
();
System
.
getProperty
(
"java.io.tmpdir"
,
"/tmp"
)
+
"/atlas-test"
+
new
Random
().
nextLong
();
private
GraphProvider
<
TitanGraph
>
graphProvider
=
new
GraphProvider
<
TitanGraph
>()
{
@Inject
GraphProvider
<
TitanGraph
>
graphProvider
;
private
TitanGraph
graph
=
null
;
//Ensure separate directory for graph provider to avoid issues with index merging
@Override
public
TitanGraph
get
()
{
try
{
if
(
graph
==
null
)
{
synchronized
(
GraphRepoMapperScaleTest
.
class
)
{
if
(
graph
==
null
)
{
ReadConfiguration
config
=
new
CommonsConfiguration
()
{{
set
(
"storage.backend"
,
"inmemory"
);
set
(
"index.search.directory"
,
INDEX_DIR
);
set
(
"index.search.backend"
,
"elasticsearch"
);
set
(
"index.search.elasticsearch.local-mode"
,
"true"
);
set
(
"index.search.elasticsearch.client-only"
,
"false"
);
}};
GraphDatabaseConfiguration
graphconfig
=
new
GraphDatabaseConfiguration
(
config
);
graphconfig
.
getBackend
().
clearStorage
();
graph
=
TitanFactory
.
open
(
config
);
}
}
}
}
catch
(
BackendException
e
)
{
e
.
printStackTrace
();
}
return
graph
;
}
};
@Inject
private
GraphBackedMetadataRepository
repositoryService
;
private
GraphBackedMetadataRepository
repositoryService
;
private
GraphBackedSearchIndexer
searchIndexer
;
private
GraphBackedSearchIndexer
searchIndexer
;
private
TypeSystem
typeSystem
=
TypeSystem
.
getInstance
();
private
TypeSystem
typeSystem
=
TypeSystem
.
getInstance
();
private
String
dbGUID
;
private
String
dbGUID
;
@BeforeClass
@BeforeClass
@GraphTransaction
@GraphTransaction
public
void
setUp
()
throws
Exception
{
public
void
setUp
()
throws
Exception
{
//Make sure we can cleanup the index directory
repositoryService
=
new
GraphBackedMetadataRepository
(
graphProvider
);
searchIndexer
=
new
GraphBackedSearchIndexer
(
graphProvider
);
searchIndexer
=
new
GraphBackedSearchIndexer
(
graphProvider
);
Collection
<
IDataType
>
typesAdded
=
TestUtils
.
createHiveTypes
(
typeSystem
);
Collection
<
IDataType
>
typesAdded
=
TestUtils
.
createHiveTypes
(
typeSystem
);
searchIndexer
.
onAdd
(
typesAdded
);
searchIndexer
.
onAdd
(
typesAdded
);
...
@@ -112,11 +88,17 @@ public class GraphRepoMapperScaleTest {
...
@@ -112,11 +88,17 @@ public class GraphRepoMapperScaleTest {
@AfterClass
@AfterClass
public
void
tearDown
()
throws
Exception
{
public
void
tearDown
()
throws
Exception
{
graphProvider
.
get
().
shutdown
();
TypeSystem
.
getInstance
().
reset
();
try
{
try
{
FileUtils
.
deleteDirectory
(
new
File
(
INDEX_DIR
));
//TODO - Fix failure during shutdown while using BDB
}
catch
(
IOException
ioe
)
{
graphProvider
.
get
().
shutdown
();
System
.
err
.
println
(
"Failed to cleanup index directory"
);
}
catch
(
Exception
e
)
{
e
.
printStackTrace
();
}
try
{
TitanCleanup
.
clear
(
graphProvider
.
get
());
}
catch
(
Exception
e
)
{
e
.
printStackTrace
();
}
}
}
}
...
@@ -142,6 +124,10 @@ public class GraphRepoMapperScaleTest {
...
@@ -142,6 +124,10 @@ public class GraphRepoMapperScaleTest {
@Test
(
dependsOnMethods
=
"testSubmitEntity"
)
@Test
(
dependsOnMethods
=
"testSubmitEntity"
)
public
void
testSearchIndex
()
throws
Exception
{
public
void
testSearchIndex
()
throws
Exception
{
//Elasticsearch requires some time before index is updated
Thread
.
sleep
(
5000
);
searchWithOutIndex
(
Constants
.
GUID_PROPERTY_KEY
,
dbGUID
);
searchWithOutIndex
(
Constants
.
GUID_PROPERTY_KEY
,
dbGUID
);
searchWithOutIndex
(
Constants
.
ENTITY_TYPE_PROPERTY_KEY
,
"column_type"
);
searchWithOutIndex
(
Constants
.
ENTITY_TYPE_PROPERTY_KEY
,
"column_type"
);
searchWithOutIndex
(
Constants
.
ENTITY_TYPE_PROPERTY_KEY
,
TestUtils
.
TABLE_TYPE
);
searchWithOutIndex
(
Constants
.
ENTITY_TYPE_PROPERTY_KEY
,
TestUtils
.
TABLE_TYPE
);
...
...
titan/pom.xml
0 → 100644
View file @
919120f6
<?xml version="1.0" encoding="UTF-8"?>
<!--
~ Licensed to the Apache Software Foundation (ASF) under one
~ or more contributor license agreements. See the NOTICE file
~ distributed with this work for additional information
~ regarding copyright ownership. The ASF licenses this file
~ to you under the Apache License, Version 2.0 (the
~ "License"); you may not use this file except in compliance
~ with the License. You may obtain a copy of the License at
~
~ http://www.apache.org/licenses/LICENSE-2.0
~
~ Unless required by applicable law or agreed to in writing, software
~ distributed under the License is distributed on an "AS IS" BASIS,
~ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
~ See the License for the specific language governing permissions and
~ limitations under the License.
-->
<project
xmlns=
"http://maven.apache.org/POM/4.0.0"
xmlns:xsi=
"http://www.w3.org/2001/XMLSchema-instance"
xsi:schemaLocation=
"http://maven.apache.org/POM/4.0.0 http://maven.apache.org/xsd/maven-4.0.0.xsd"
>
<parent>
<artifactId>
apache-atlas
</artifactId>
<groupId>
org.apache.atlas
</groupId>
<version>
0.6-incubating-SNAPSHOT
</version>
</parent>
<modelVersion>
4.0.0
</modelVersion>
<artifactId>
atlas-titan
</artifactId>
<description>
Apache Atlas Titan Overrides
</description>
<name>
Apache Atlas Titan
</name>
<packaging>
jar
</packaging>
<dependencies>
<dependency>
<groupId>
com.thinkaurelius.titan
</groupId>
<artifactId>
titan-core
</artifactId>
</dependency>
<dependency>
<groupId>
org.apache.hbase
</groupId>
<artifactId>
hbase-client
</artifactId>
</dependency>
<dependency>
<groupId>
com.vividsolutions
</groupId>
<artifactId>
jts
</artifactId>
</dependency>
<dependency>
<groupId>
org.apache.solr
</groupId>
<artifactId>
solr-core
</artifactId>
</dependency>
<dependency>
<groupId>
org.apache.solr
</groupId>
<artifactId>
solr-solrj
</artifactId>
</dependency>
<dependency>
<groupId>
com.thinkaurelius.titan
</groupId>
<artifactId>
titan-es
</artifactId>
</dependency>
<dependency>
<groupId>
com.thinkaurelius.titan
</groupId>
<artifactId>
titan-berkeleyje
</artifactId>
</dependency>
<dependency>
<groupId>
com.thinkaurelius.titan
</groupId>
<artifactId>
titan-lucene
</artifactId>
</dependency>
<dependency>
<groupId>
org.testng
</groupId>
<artifactId>
testng
</artifactId>
</dependency>
<dependency>
<groupId>
org.mockito
</groupId>
<artifactId>
mockito-all
</artifactId>
</dependency>
</dependencies>
<build>
<plugins>
<plugin>
<groupId>
org.apache.maven.plugins
</groupId>
<artifactId>
maven-jar-plugin
</artifactId>
<version>
2.4
</version>
<configuration>
<excludes>
<exclude>
**/log4j.xml
</exclude>
</excludes>
</configuration>
</plugin>
<plugin>
<groupId>
net.alchim31.maven
</groupId>
<artifactId>
scala-maven-plugin
</artifactId>
</plugin>
</plugins>
</build>
</project>
\ No newline at end of file
repository
/src/main/java/com/thinkaurelius/titan/diskstorage/hbase/AdminMask.java
→
titan
/src/main/java/com/thinkaurelius/titan/diskstorage/hbase/AdminMask.java
View file @
919120f6
File moved
repository
/src/main/java/com/thinkaurelius/titan/diskstorage/hbase/ConnectionMask.java
→
titan
/src/main/java/com/thinkaurelius/titan/diskstorage/hbase/ConnectionMask.java
View file @
919120f6
File moved
repository
/src/main/java/com/thinkaurelius/titan/diskstorage/hbase/HBaseAdmin0_98.java
→
titan
/src/main/java/com/thinkaurelius/titan/diskstorage/hbase/HBaseAdmin0_98.java
View file @
919120f6
File moved
repository
/src/main/java/com/thinkaurelius/titan/diskstorage/hbase/HBaseAdmin1_0.java
→
titan
/src/main/java/com/thinkaurelius/titan/diskstorage/hbase/HBaseAdmin1_0.java
View file @
919120f6
File moved
repository
/src/main/java/com/thinkaurelius/titan/diskstorage/hbase/HBaseCompat.java
→
titan
/src/main/java/com/thinkaurelius/titan/diskstorage/hbase/HBaseCompat.java
View file @
919120f6
...
@@ -19,7 +19,6 @@ import java.io.IOException;
...
@@ -19,7 +19,6 @@ import java.io.IOException;
import
org.apache.hadoop.conf.Configuration
;
import
org.apache.hadoop.conf.Configuration
;
import
org.apache.hadoop.hbase.HColumnDescriptor
;
import
org.apache.hadoop.hbase.HColumnDescriptor
;
import
org.apache.hadoop.hbase.HTableDescriptor
;
import
org.apache.hadoop.hbase.HTableDescriptor
;
import
org.apache.hadoop.hbase.ZooKeeperConnectionException
;
import
org.apache.hadoop.hbase.client.Delete
;
import
org.apache.hadoop.hbase.client.Delete
;
public
interface
HBaseCompat
{
public
interface
HBaseCompat
{
...
...
repository
/src/main/java/com/thinkaurelius/titan/diskstorage/hbase/HBaseCompat0_98.java
→
titan
/src/main/java/com/thinkaurelius/titan/diskstorage/hbase/HBaseCompat0_98.java
View file @
919120f6
File moved
repository
/src/main/java/com/thinkaurelius/titan/diskstorage/hbase/HBaseCompat1_0.java
→
titan
/src/main/java/com/thinkaurelius/titan/diskstorage/hbase/HBaseCompat1_0.java
View file @
919120f6
...
@@ -22,7 +22,6 @@ import org.apache.hadoop.hbase.HTableDescriptor;
...
@@ -22,7 +22,6 @@ import org.apache.hadoop.hbase.HTableDescriptor;
import
org.apache.hadoop.hbase.TableName
;
import
org.apache.hadoop.hbase.TableName
;
import
org.apache.hadoop.hbase.client.ConnectionFactory
;
import
org.apache.hadoop.hbase.client.ConnectionFactory
;
import
org.apache.hadoop.hbase.client.Delete
;
import
org.apache.hadoop.hbase.client.Delete
;
import
org.apache.hadoop.hbase.client.HConnectionManager
;
import
org.apache.hadoop.hbase.io.compress.Compression
;
import
org.apache.hadoop.hbase.io.compress.Compression
;
public
class
HBaseCompat1_0
implements
HBaseCompat
{
public
class
HBaseCompat1_0
implements
HBaseCompat
{
...
...
repository
/src/main/java/com/thinkaurelius/titan/diskstorage/hbase/HBaseCompat1_1.java
→
titan
/src/main/java/com/thinkaurelius/titan/diskstorage/hbase/HBaseCompat1_1.java
View file @
919120f6
File moved
repository
/src/main/java/com/thinkaurelius/titan/diskstorage/hbase/HBaseCompatLoader.java
→
titan
/src/main/java/com/thinkaurelius/titan/diskstorage/hbase/HBaseCompatLoader.java
View file @
919120f6
File moved
repository
/src/main/java/com/thinkaurelius/titan/diskstorage/hbase/HBaseKeyColumnValueStore.java
→
titan
/src/main/java/com/thinkaurelius/titan/diskstorage/hbase/HBaseKeyColumnValueStore.java
View file @
919120f6
...
@@ -18,12 +18,19 @@ import com.google.common.base.Predicate;
...
@@ -18,12 +18,19 @@ import com.google.common.base.Predicate;
import
com.google.common.collect.ImmutableMap
;
import
com.google.common.collect.ImmutableMap
;
import
com.google.common.collect.Iterables
;
import
com.google.common.collect.Iterables
;
import
com.google.common.collect.Iterators
;
import
com.google.common.collect.Iterators
;
import
com.thinkaurelius.titan.core.attribute.Duration
;
import
com.thinkaurelius.titan.diskstorage.*
;
import
com.thinkaurelius.titan.diskstorage.*
;
import
com.thinkaurelius.titan.diskstorage.keycolumnvalue.*
;
import
com.thinkaurelius.titan.diskstorage.keycolumnvalue.*
;
import
com.thinkaurelius.titan.diskstorage.locking.LocalLockMediator
;
import
com.thinkaurelius.titan.diskstorage.locking.PermanentLockingException
;
import
com.thinkaurelius.titan.diskstorage.util.KeyColumn
;
import
com.thinkaurelius.titan.diskstorage.util.RecordIterator
;
import
com.thinkaurelius.titan.diskstorage.util.RecordIterator
;
import
com.thinkaurelius.titan.diskstorage.util.StaticArrayBuffer
;
import
com.thinkaurelius.titan.diskstorage.util.StaticArrayBuffer
;
import
com.thinkaurelius.titan.diskstorage.util.StaticArrayEntry
;
import
com.thinkaurelius.titan.diskstorage.util.StaticArrayEntry
;
import
com.thinkaurelius.titan.diskstorage.util.StaticArrayEntryList
;
import
com.thinkaurelius.titan.diskstorage.util.StaticArrayEntryList
;
import
com.thinkaurelius.titan.diskstorage.util.time.Timepoint
;
import
com.thinkaurelius.titan.diskstorage.util.time.Timestamps
;
import
com.thinkaurelius.titan.graphdb.configuration.GraphDatabaseConfiguration
;
import
com.thinkaurelius.titan.util.system.IOUtils
;
import
com.thinkaurelius.titan.util.system.IOUtils
;
import
org.apache.hadoop.hbase.client.*
;
import
org.apache.hadoop.hbase.client.*
;
...
@@ -31,6 +38,7 @@ import org.apache.hadoop.hbase.filter.ColumnPaginationFilter;
...
@@ -31,6 +38,7 @@ import org.apache.hadoop.hbase.filter.ColumnPaginationFilter;
import
org.apache.hadoop.hbase.filter.ColumnRangeFilter
;
import
org.apache.hadoop.hbase.filter.ColumnRangeFilter
;
import
org.apache.hadoop.hbase.filter.Filter
;
import
org.apache.hadoop.hbase.filter.Filter
;
import
org.apache.hadoop.hbase.filter.FilterList
;
import
org.apache.hadoop.hbase.filter.FilterList
;
import
org.apache.hadoop.hbase.util.Bytes
;
import
org.slf4j.Logger
;
import
org.slf4j.Logger
;
import
org.slf4j.LoggerFactory
;
import
org.slf4j.LoggerFactory
;
...
@@ -39,6 +47,10 @@ import javax.annotation.Nullable;
...
@@ -39,6 +47,10 @@ import javax.annotation.Nullable;
import
java.io.Closeable
;
import
java.io.Closeable
;
import
java.io.IOException
;
import
java.io.IOException
;
import
java.util.*
;
import
java.util.*
;
import
java.util.Map
;
import
java.util.concurrent.TimeUnit
;
import
static
com
.
thinkaurelius
.
titan
.
diskstorage
.
EntryMetaData
.*;
/**
/**
* Here are some areas that might need work:
* Here are some areas that might need work:
...
@@ -71,7 +83,11 @@ public class HBaseKeyColumnValueStore implements KeyColumnValueStore {
...
@@ -71,7 +83,11 @@ public class HBaseKeyColumnValueStore implements KeyColumnValueStore {
private
final
ConnectionMask
cnx
;
private
final
ConnectionMask
cnx
;
HBaseKeyColumnValueStore
(
HBaseStoreManager
storeManager
,
ConnectionMask
cnx
,
String
tableName
,
String
columnFamily
,
String
storeName
)
{
private
LocalLockMediator
<
StoreTransaction
>
localLockMediator
;
private
Duration
lockExpiryTime
;
HBaseKeyColumnValueStore
(
HBaseStoreManager
storeManager
,
ConnectionMask
cnx
,
String
tableName
,
String
columnFamily
,
String
storeName
,
LocalLockMediator
<
StoreTransaction
>
llm
)
{
this
.
storeManager
=
storeManager
;
this
.
storeManager
=
storeManager
;
this
.
cnx
=
cnx
;
this
.
cnx
=
cnx
;
this
.
tableName
=
tableName
;
this
.
tableName
=
tableName
;
...
@@ -79,6 +95,8 @@ public class HBaseKeyColumnValueStore implements KeyColumnValueStore {
...
@@ -79,6 +95,8 @@ public class HBaseKeyColumnValueStore implements KeyColumnValueStore {
this
.
storeName
=
storeName
;
this
.
storeName
=
storeName
;
this
.
columnFamilyBytes
=
columnFamily
.
getBytes
();
this
.
columnFamilyBytes
=
columnFamily
.
getBytes
();
this
.
entryGetter
=
new
HBaseGetter
(
storeManager
.
getMetaDataSchema
(
storeName
));
this
.
entryGetter
=
new
HBaseGetter
(
storeManager
.
getMetaDataSchema
(
storeName
));
this
.
localLockMediator
=
llm
;
this
.
lockExpiryTime
=
storeManager
.
getStorageConfig
().
get
(
GraphDatabaseConfiguration
.
LOCK_EXPIRE
);
}
}
@Override
@Override
...
@@ -107,7 +125,15 @@ public class HBaseKeyColumnValueStore implements KeyColumnValueStore {
...
@@ -107,7 +125,15 @@ public class HBaseKeyColumnValueStore implements KeyColumnValueStore {
StaticBuffer
column
,
StaticBuffer
column
,
StaticBuffer
expectedValue
,
StaticBuffer
expectedValue
,
StoreTransaction
txh
)
throws
BackendException
{
StoreTransaction
txh
)
throws
BackendException
{
throw
new
UnsupportedOperationException
();
KeyColumn
lockID
=
new
KeyColumn
(
key
,
column
);
logger
.
debug
(
"Attempting to acquireLock on {} "
,
lockID
);
final
Timepoint
lockStartTime
=
Timestamps
.
NANO
.
getTime
(
System
.
nanoTime
(),
TimeUnit
.
NANOSECONDS
);
boolean
locked
=
localLockMediator
.
lock
(
lockID
,
txh
,
lockStartTime
.
add
(
lockExpiryTime
));
if
(!
locked
)
{
throw
new
PermanentLockingException
(
"Could not lock the keyColumn "
+
lockID
+
" on CF {} "
+
Bytes
.
toString
(
columnFamilyBytes
));
}
((
HBaseTransaction
)
txh
).
updateLocks
(
lockID
,
expectedValue
);
}
}
@Override
@Override
...
@@ -167,7 +193,9 @@ public class HBaseKeyColumnValueStore implements KeyColumnValueStore {
...
@@ -167,7 +193,9 @@ public class HBaseKeyColumnValueStore implements KeyColumnValueStore {
try
{
try
{
table
=
cnx
.
getTable
(
tableName
);
table
=
cnx
.
getTable
(
tableName
);
logger
.
debug
(
"Get requests {} {} "
,
Bytes
.
toString
(
columnFamilyBytes
),
requests
.
size
());
results
=
table
.
get
(
requests
);
results
=
table
.
get
(
requests
);
logger
.
debug
(
"Get requests finished {} {} "
,
Bytes
.
toString
(
columnFamilyBytes
),
requests
.
size
());
}
finally
{
}
finally
{
IOUtils
.
closeQuietly
(
table
);
IOUtils
.
closeQuietly
(
table
);
}
}
...
@@ -231,6 +259,7 @@ public class HBaseKeyColumnValueStore implements KeyColumnValueStore {
...
@@ -231,6 +259,7 @@ public class HBaseKeyColumnValueStore implements KeyColumnValueStore {
TableMask
table
=
null
;
TableMask
table
=
null
;
logger
.
debug
(
"Scan for row keys {} {} "
,
Bytes
.
toString
(
startKey
),
Bytes
.
toString
(
endKey
));
try
{
try
{
table
=
cnx
.
getTable
(
tableName
);
table
=
cnx
.
getTable
(
tableName
);
return
new
RowIterator
(
table
,
table
.
getScanner
(
scan
.
setFilter
(
filters
)),
columnFamilyBytes
);
return
new
RowIterator
(
table
,
table
.
getScanner
(
scan
.
setFilter
(
filters
)),
columnFamilyBytes
);
...
...
repository
/src/main/java/com/thinkaurelius/titan/diskstorage/hbase/HBaseStoreManager.java
→
titan
/src/main/java/com/thinkaurelius/titan/diskstorage/hbase/HBaseStoreManager.java
View file @
919120f6
...
@@ -14,14 +14,6 @@
...
@@ -14,14 +14,6 @@
*/
*/
package
com
.
thinkaurelius
.
titan
.
diskstorage
.
hbase
;
package
com
.
thinkaurelius
.
titan
.
diskstorage
.
hbase
;
import
static
com
.
thinkaurelius
.
titan
.
diskstorage
.
Backend
.
EDGESTORE_NAME
;
import
static
com
.
thinkaurelius
.
titan
.
diskstorage
.
Backend
.
ID_STORE_NAME
;
import
static
com
.
thinkaurelius
.
titan
.
diskstorage
.
Backend
.
INDEXSTORE_NAME
;
import
static
com
.
thinkaurelius
.
titan
.
diskstorage
.
Backend
.
LOCK_STORE_SUFFIX
;
import
static
com
.
thinkaurelius
.
titan
.
diskstorage
.
Backend
.
SYSTEM_MGMT_LOG_NAME
;
import
static
com
.
thinkaurelius
.
titan
.
diskstorage
.
Backend
.
SYSTEM_TX_LOG_NAME
;
import
static
com
.
thinkaurelius
.
titan
.
graphdb
.
configuration
.
GraphDatabaseConfiguration
.
SYSTEM_PROPERTIES_STORE_NAME
;
import
java.io.IOException
;
import
java.io.IOException
;
import
java.nio.ByteBuffer
;
import
java.nio.ByteBuffer
;
import
java.util.ArrayList
;
import
java.util.ArrayList
;
...
@@ -34,8 +26,11 @@ import java.util.NavigableMap;
...
@@ -34,8 +26,11 @@ import java.util.NavigableMap;
import
java.util.concurrent.ConcurrentHashMap
;
import
java.util.concurrent.ConcurrentHashMap
;
import
java.util.concurrent.ConcurrentMap
;
import
java.util.concurrent.ConcurrentMap
;
import
com.thinkaurelius.titan.diskstorage.Backend
;
import
com.thinkaurelius.titan.diskstorage.configuration.ConfigElement
;
import
com.thinkaurelius.titan.diskstorage.configuration.ConfigElement
;
import
com.thinkaurelius.titan.diskstorage.keycolumnvalue.CustomizeStoreKCVSManager
;
import
com.thinkaurelius.titan.diskstorage.keycolumnvalue.CustomizeStoreKCVSManager
;
import
com.thinkaurelius.titan.diskstorage.locking.LocalLockMediator
;
import
com.thinkaurelius.titan.diskstorage.locking.LocalLockMediators
;
import
com.thinkaurelius.titan.diskstorage.util.time.Timestamps
;
import
com.thinkaurelius.titan.diskstorage.util.time.Timestamps
;
import
org.apache.hadoop.hbase.HBaseConfiguration
;
import
org.apache.hadoop.hbase.HBaseConfiguration
;
import
org.apache.hadoop.hbase.HColumnDescriptor
;
import
org.apache.hadoop.hbase.HColumnDescriptor
;
...
@@ -51,6 +46,7 @@ import org.apache.hadoop.hbase.client.HBaseAdmin;
...
@@ -51,6 +46,7 @@ import org.apache.hadoop.hbase.client.HBaseAdmin;
import
org.apache.hadoop.hbase.client.HTable
;
import
org.apache.hadoop.hbase.client.HTable
;
import
org.apache.hadoop.hbase.client.Put
;
import
org.apache.hadoop.hbase.client.Put
;
import
org.apache.hadoop.hbase.client.Row
;
import
org.apache.hadoop.hbase.client.Row
;
import
org.apache.hadoop.hbase.io.encoding.DataBlockEncoding
;
import
org.apache.hadoop.hbase.util.Pair
;
import
org.apache.hadoop.hbase.util.Pair
;
import
org.apache.hadoop.hbase.util.VersionInfo
;
import
org.apache.hadoop.hbase.util.VersionInfo
;
import
org.slf4j.Logger
;
import
org.slf4j.Logger
;
...
@@ -236,15 +232,15 @@ public class HBaseStoreManager extends DistributedStoreManager implements KeyCol
...
@@ -236,15 +232,15 @@ public class HBaseStoreManager extends DistributedStoreManager implements KeyCol
private
static
final
BiMap
<
String
,
String
>
SHORT_CF_NAME_MAP
=
private
static
final
BiMap
<
String
,
String
>
SHORT_CF_NAME_MAP
=
ImmutableBiMap
.<
String
,
String
>
builder
()
ImmutableBiMap
.<
String
,
String
>
builder
()
.
put
(
INDEXSTORE_NAME
,
"g"
)
.
put
(
Backend
.
INDEXSTORE_NAME
,
"g"
)
.
put
(
INDEXSTORE_NAME
+
LOCK_STORE_SUFFIX
,
"h"
)
.
put
(
Backend
.
INDEXSTORE_NAME
+
Backend
.
LOCK_STORE_SUFFIX
,
"h"
)
.
put
(
ID_STORE_NAME
,
"i"
)
.
put
(
Backend
.
ID_STORE_NAME
,
"i"
)
.
put
(
EDGESTORE_NAME
,
"e"
)
.
put
(
Backend
.
EDGESTORE_NAME
,
"e"
)
.
put
(
EDGESTORE_NAME
+
LOCK_STORE_SUFFIX
,
"f"
)
.
put
(
Backend
.
EDGESTORE_NAME
+
Backend
.
LOCK_STORE_SUFFIX
,
"f"
)
.
put
(
SYSTEM_PROPERTIES_STORE_NAME
,
"s"
)
.
put
(
GraphDatabaseConfiguration
.
SYSTEM_PROPERTIES_STORE_NAME
,
"s"
)
.
put
(
SYSTEM_PROPERTIES_STORE_NAME
+
LOCK_STORE_SUFFIX
,
"t"
)
.
put
(
GraphDatabaseConfiguration
.
SYSTEM_PROPERTIES_STORE_NAME
+
Backend
.
LOCK_STORE_SUFFIX
,
"t"
)
.
put
(
SYSTEM_MGMT_LOG_NAME
,
"m"
)
.
put
(
Backend
.
SYSTEM_MGMT_LOG_NAME
,
"m"
)
.
put
(
SYSTEM_TX_LOG_NAME
,
"l"
)
.
put
(
Backend
.
SYSTEM_TX_LOG_NAME
,
"l"
)
.
build
();
.
build
();
private
static
final
StaticBuffer
FOUR_ZERO_BYTES
=
BufferUtil
.
zeroBuffer
(
4
);
private
static
final
StaticBuffer
FOUR_ZERO_BYTES
=
BufferUtil
.
zeroBuffer
(
4
);
...
@@ -275,6 +271,8 @@ public class HBaseStoreManager extends DistributedStoreManager implements KeyCol
...
@@ -275,6 +271,8 @@ public class HBaseStoreManager extends DistributedStoreManager implements KeyCol
// Mutable instance state
// Mutable instance state
private
final
ConcurrentMap
<
String
,
HBaseKeyColumnValueStore
>
openStores
;
private
final
ConcurrentMap
<
String
,
HBaseKeyColumnValueStore
>
openStores
;
private
LocalLockMediator
<
StoreTransaction
>
llm
;
public
HBaseStoreManager
(
com
.
thinkaurelius
.
titan
.
diskstorage
.
configuration
.
Configuration
config
)
throws
BackendException
{
public
HBaseStoreManager
(
com
.
thinkaurelius
.
titan
.
diskstorage
.
configuration
.
Configuration
config
)
throws
BackendException
{
super
(
config
,
PORT_DEFAULT
);
super
(
config
,
PORT_DEFAULT
);
...
@@ -390,6 +388,7 @@ public class HBaseStoreManager extends DistributedStoreManager implements KeyCol
...
@@ -390,6 +388,7 @@ public class HBaseStoreManager extends DistributedStoreManager implements KeyCol
.
orderedScan
(
true
).
unorderedScan
(
true
).
batchMutation
(
true
)
.
orderedScan
(
true
).
unorderedScan
(
true
).
batchMutation
(
true
)
.
multiQuery
(
true
).
distributed
(
true
).
keyOrdered
(
true
).
storeTTL
(
true
)
.
multiQuery
(
true
).
distributed
(
true
).
keyOrdered
(
true
).
storeTTL
(
true
)
.
timestamps
(
true
).
preferredTimestamps
(
PREFERRED_TIMESTAMPS
)
.
timestamps
(
true
).
preferredTimestamps
(
PREFERRED_TIMESTAMPS
)
.
locking
(
true
)
.
keyConsistent
(
c
);
.
keyConsistent
(
c
);
try
{
try
{
...
@@ -403,6 +402,7 @@ public class HBaseStoreManager extends DistributedStoreManager implements KeyCol
...
@@ -403,6 +402,7 @@ public class HBaseStoreManager extends DistributedStoreManager implements KeyCol
@Override
@Override
public
void
mutateMany
(
Map
<
String
,
Map
<
StaticBuffer
,
KCVMutation
>>
mutations
,
StoreTransaction
txh
)
throws
BackendException
{
public
void
mutateMany
(
Map
<
String
,
Map
<
StaticBuffer
,
KCVMutation
>>
mutations
,
StoreTransaction
txh
)
throws
BackendException
{
logger
.
debug
(
"Enter mutateMany"
);
final
MaskedTimestamp
commitTime
=
new
MaskedTimestamp
(
txh
);
final
MaskedTimestamp
commitTime
=
new
MaskedTimestamp
(
txh
);
// In case of an addition and deletion with identical timestamps, the
// In case of an addition and deletion with identical timestamps, the
// deletion tombstone wins.
// deletion tombstone wins.
...
@@ -429,7 +429,9 @@ public class HBaseStoreManager extends DistributedStoreManager implements KeyCol
...
@@ -429,7 +429,9 @@ public class HBaseStoreManager extends DistributedStoreManager implements KeyCol
try
{
try
{
table
=
cnx
.
getTable
(
tableName
);
table
=
cnx
.
getTable
(
tableName
);
logger
.
debug
(
"mutateMany : batch mutate started size {} "
,
batch
.
size
());
table
.
batch
(
batch
,
new
Object
[
batch
.
size
()]);
table
.
batch
(
batch
,
new
Object
[
batch
.
size
()]);
logger
.
debug
(
"mutateMany : batch mutate finished {} "
,
batch
.
size
());
}
finally
{
}
finally
{
IOUtils
.
closeQuietly
(
table
);
IOUtils
.
closeQuietly
(
table
);
}
}
...
@@ -456,7 +458,9 @@ public class HBaseStoreManager extends DistributedStoreManager implements KeyCol
...
@@ -456,7 +458,9 @@ public class HBaseStoreManager extends DistributedStoreManager implements KeyCol
if
(
store
==
null
)
{
if
(
store
==
null
)
{
final
String
cfName
=
shortCfNames
?
shortenCfName
(
longName
)
:
longName
;
final
String
cfName
=
shortCfNames
?
shortenCfName
(
longName
)
:
longName
;
HBaseKeyColumnValueStore
newStore
=
new
HBaseKeyColumnValueStore
(
this
,
cnx
,
tableName
,
cfName
,
longName
);
final
String
llmPrefix
=
getName
();
llm
=
LocalLockMediators
.
INSTANCE
.<
StoreTransaction
>
get
(
llmPrefix
,
times
);
HBaseKeyColumnValueStore
newStore
=
new
HBaseKeyColumnValueStore
(
this
,
cnx
,
tableName
,
cfName
,
longName
,
llm
);
store
=
openStores
.
putIfAbsent
(
longName
,
newStore
);
// nothing bad happens if we loose to other thread
store
=
openStores
.
putIfAbsent
(
longName
,
newStore
);
// nothing bad happens if we loose to other thread
...
@@ -475,7 +479,7 @@ public class HBaseStoreManager extends DistributedStoreManager implements KeyCol
...
@@ -475,7 +479,7 @@ public class HBaseStoreManager extends DistributedStoreManager implements KeyCol
@Override
@Override
public
StoreTransaction
beginTransaction
(
final
BaseTransactionConfig
config
)
throws
BackendException
{
public
StoreTransaction
beginTransaction
(
final
BaseTransactionConfig
config
)
throws
BackendException
{
return
new
HBaseTransaction
(
config
);
return
new
HBaseTransaction
(
config
,
llm
);
}
}
@Override
@Override
...
@@ -804,12 +808,7 @@ public class HBaseStoreManager extends DistributedStoreManager implements KeyCol
...
@@ -804,12 +808,7 @@ public class HBaseStoreManager extends DistributedStoreManager implements KeyCol
adm
.
addColumn
(
tableName
,
cdesc
);
adm
.
addColumn
(
tableName
,
cdesc
);
try
{
logger
.
debug
(
"Added HBase ColumnFamily {}, waiting for 1 sec. to propogate."
,
columnFamily
);
logger
.
debug
(
"Added HBase ColumnFamily {}, waiting for 1 sec. to propogate."
,
columnFamily
);
Thread
.
sleep
(
1000L
);
}
catch
(
InterruptedException
ie
)
{
throw
new
TemporaryBackendException
(
ie
);
}
adm
.
enableTable
(
tableName
);
adm
.
enableTable
(
tableName
);
}
catch
(
TableNotFoundException
ee
)
{
}
catch
(
TableNotFoundException
ee
)
{
...
@@ -832,6 +831,8 @@ public class HBaseStoreManager extends DistributedStoreManager implements KeyCol
...
@@ -832,6 +831,8 @@ public class HBaseStoreManager extends DistributedStoreManager implements KeyCol
if
(
ttlInSeconds
>
0
)
if
(
ttlInSeconds
>
0
)
cdesc
.
setTimeToLive
(
ttlInSeconds
);
cdesc
.
setTimeToLive
(
ttlInSeconds
);
cdesc
.
setDataBlockEncoding
(
DataBlockEncoding
.
FAST_DIFF
);
}
}
/**
/**
...
...
repository
/src/main/java/com/thinkaurelius/titan/diskstorage/hbase/HBaseTransaction.java
→
titan
/src/main/java/com/thinkaurelius/titan/diskstorage/hbase/HBaseTransaction.java
View file @
919120f6
...
@@ -14,8 +14,18 @@
...
@@ -14,8 +14,18 @@
*/
*/
package
com
.
thinkaurelius
.
titan
.
diskstorage
.
hbase
;
package
com
.
thinkaurelius
.
titan
.
diskstorage
.
hbase
;
import
com.thinkaurelius.titan.diskstorage.BackendException
;
import
com.thinkaurelius.titan.diskstorage.BaseTransactionConfig
;
import
com.thinkaurelius.titan.diskstorage.BaseTransactionConfig
;
import
com.thinkaurelius.titan.diskstorage.StaticBuffer
;
import
com.thinkaurelius.titan.diskstorage.common.AbstractStoreTransaction
;
import
com.thinkaurelius.titan.diskstorage.common.AbstractStoreTransaction
;
import
com.thinkaurelius.titan.diskstorage.keycolumnvalue.StoreTransaction
;
import
com.thinkaurelius.titan.diskstorage.locking.LocalLockMediator
;
import
com.thinkaurelius.titan.diskstorage.util.KeyColumn
;
import
org.slf4j.Logger
;
import
org.slf4j.LoggerFactory
;
import
java.util.LinkedHashSet
;
import
java.util.Set
;
/**
/**
* This class overrides and adds nothing compared with
* This class overrides and adds nothing compared with
...
@@ -27,7 +37,39 @@ import com.thinkaurelius.titan.diskstorage.common.AbstractStoreTransaction;
...
@@ -27,7 +37,39 @@ import com.thinkaurelius.titan.diskstorage.common.AbstractStoreTransaction;
*/
*/
public
class
HBaseTransaction
extends
AbstractStoreTransaction
{
public
class
HBaseTransaction
extends
AbstractStoreTransaction
{
public
HBaseTransaction
(
final
BaseTransactionConfig
config
)
{
private
static
final
Logger
log
=
LoggerFactory
.
getLogger
(
HBaseTransaction
.
class
);
LocalLockMediator
<
StoreTransaction
>
llm
;
Set
<
KeyColumn
>
keyColumnLocks
=
new
LinkedHashSet
<>();
public
HBaseTransaction
(
final
BaseTransactionConfig
config
,
LocalLockMediator
<
StoreTransaction
>
llm
)
{
super
(
config
);
super
(
config
);
this
.
llm
=
llm
;
}
@Override
public
synchronized
void
rollback
()
throws
BackendException
{
super
.
rollback
();
log
.
debug
(
"Rolled back transaction"
);
deleteAllLocks
();
}
@Override
public
synchronized
void
commit
()
throws
BackendException
{
super
.
commit
();
log
.
debug
(
"Committed transaction"
);
deleteAllLocks
();
}
public
void
updateLocks
(
KeyColumn
lockID
,
StaticBuffer
expectedValue
)
{
keyColumnLocks
.
add
(
lockID
);
}
private
void
deleteAllLocks
()
{
for
(
KeyColumn
kc
:
keyColumnLocks
)
{
log
.
debug
(
"Removed lock {} "
,
kc
);
llm
.
unlock
(
kc
,
this
);
}
}
}
}
}
repository
/src/main/java/com/thinkaurelius/titan/diskstorage/hbase/HConnection0_98.java
→
titan
/src/main/java/com/thinkaurelius/titan/diskstorage/hbase/HConnection0_98.java
View file @
919120f6
File moved
repository
/src/main/java/com/thinkaurelius/titan/diskstorage/hbase/HConnection1_0.java
→
titan
/src/main/java/com/thinkaurelius/titan/diskstorage/hbase/HConnection1_0.java
View file @
919120f6
File moved
repository
/src/main/java/com/thinkaurelius/titan/diskstorage/hbase/HTable0_98.java
→
titan
/src/main/java/com/thinkaurelius/titan/diskstorage/hbase/HTable0_98.java
View file @
919120f6
File moved
repository
/src/main/java/com/thinkaurelius/titan/diskstorage/hbase/HTable1_0.java
→
titan
/src/main/java/com/thinkaurelius/titan/diskstorage/hbase/HTable1_0.java
View file @
919120f6
...
@@ -18,7 +18,6 @@ import java.io.IOException;
...
@@ -18,7 +18,6 @@ import java.io.IOException;
import
java.util.List
;
import
java.util.List
;
import
org.apache.hadoop.hbase.client.Get
;
import
org.apache.hadoop.hbase.client.Get
;
import
org.apache.hadoop.hbase.client.HTableInterface
;
import
org.apache.hadoop.hbase.client.Result
;
import
org.apache.hadoop.hbase.client.Result
;
import
org.apache.hadoop.hbase.client.ResultScanner
;
import
org.apache.hadoop.hbase.client.ResultScanner
;
import
org.apache.hadoop.hbase.client.Row
;
import
org.apache.hadoop.hbase.client.Row
;
...
...
repository
/src/main/java/com/thinkaurelius/titan/diskstorage/hbase/TableMask.java
→
titan
/src/main/java/com/thinkaurelius/titan/diskstorage/hbase/TableMask.java
View file @
919120f6
File moved
titan/src/main/java/com/thinkaurelius/titan/diskstorage/locking/LocalLockMediator.java
0 → 100644
View file @
919120f6
/*
* Copyright 2012-2013 Aurelius LLC
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package
com
.
thinkaurelius
.
titan
.
diskstorage
.
locking
;
import
com.google.common.base.Preconditions
;
import
com.thinkaurelius.titan.diskstorage.util.time.Timepoint
;
import
com.thinkaurelius.titan.diskstorage.util.time.TimestampProvider
;
import
com.thinkaurelius.titan.diskstorage.locking.consistentkey.ExpectedValueCheckingTransaction
;
import
com.thinkaurelius.titan.diskstorage.util.KeyColumn
;
import
org.slf4j.Logger
;
import
org.slf4j.LoggerFactory
;
import
java.util.concurrent.ConcurrentHashMap
;
import
java.util.concurrent.DelayQueue
;
import
java.util.concurrent.Delayed
;
import
java.util.concurrent.ExecutorService
;
import
java.util.concurrent.Executors
;
import
java.util.concurrent.ThreadFactory
;
import
java.util.concurrent.TimeUnit
;
/**
* This class resolves lock contention between two transactions on the same JVM.
* <p/>
* This is not just an optimization to reduce network traffic. Locks written by
* Titan to a distributed key-value store contain an identifier, the "Rid",
* which is unique only to the process level. The Rid can't tell which
* transaction in a process holds any given lock. This class prevents two
* transactions in a single process from concurrently writing the same lock to a
* distributed key-value store.
*
* @author Dan LaRocque <dalaro@hopcount.org>
*/
public
class
LocalLockMediator
<
T
>
{
private
static
final
Logger
log
=
LoggerFactory
.
getLogger
(
LocalLockMediator
.
class
);
/**
* Namespace for which this mediator is responsible
*
* @see LocalLockMediatorProvider
*/
private
final
String
name
;
private
final
TimestampProvider
times
;
private
DelayQueue
<
ExpirableKeyColumn
>
expiryQueue
=
new
DelayQueue
<>();
private
ExecutorService
lockCleanerService
=
Executors
.
newFixedThreadPool
(
1
,
new
ThreadFactory
()
{
@Override
public
Thread
newThread
(
Runnable
runnable
)
{
Thread
thread
=
Executors
.
defaultThreadFactory
().
newThread
(
runnable
);
thread
.
setDaemon
(
true
);
return
thread
;
}
});
/**
* Maps a ({@code key}, {@code column}) pair to the local transaction
* holding a lock on that pair. Values in this map may have already expired
* according to {@link AuditRecord#expires}, in which case the lock should
* be considered invalid.
*/
private
final
ConcurrentHashMap
<
KeyColumn
,
AuditRecord
<
T
>>
locks
=
new
ConcurrentHashMap
<
KeyColumn
,
AuditRecord
<
T
>>();
public
LocalLockMediator
(
String
name
,
TimestampProvider
times
)
{
this
.
name
=
name
;
this
.
times
=
times
;
Preconditions
.
checkNotNull
(
name
);
Preconditions
.
checkNotNull
(
times
);
lockCleanerService
.
submit
(
new
LockCleaner
());
}
/**
* Acquire the lock specified by {@code kc}.
* <p/>
* <p/>
* For any particular key-column, whatever value of {@code requestor} is
* passed to this method must also be passed to the associated later call to
* {@link #unlock(KeyColumn, ExpectedValueCheckingTransaction)}.
* <p/>
* If some requestor {@code r} calls this method on a KeyColumn {@code k}
* and this method returns true, then subsequent calls to this method by
* {@code r} on {@code l} merely attempt to update the {@code expiresAt}
* timestamp. This differs from typical lock reentrance: multiple successful
* calls to this method do not require an equal number of calls to
* {@code #unlock()}. One {@code #unlock()} call is enough, no matter how
* many times a {@code requestor} called {@code lock} beforehand. Note that
* updating the timestamp may fail, in which case the lock is considered to
* have expired and the calling context should assume it no longer holds the
* lock specified by {@code kc}.
* <p/>
* The number of nanoseconds elapsed since the UNIX Epoch is not readily
* available within the JVM. When reckoning expiration times, this method
* uses the approximation implemented by
* {@link com.thinkaurelius.titan.diskstorage.util.NanoTime#getApproxNSSinceEpoch(false)}.
* <p/>
* The current implementation of this method returns true when given an
* {@code expiresAt} argument in the past. Future implementations may return
* false instead.
*
* @param kc lock identifier
* @param requestor the object locking {@code kc}
* @param expires instant at which this lock will automatically expire
* @return true if the lock is acquired, false if it was not acquired
*/
public
boolean
lock
(
KeyColumn
kc
,
T
requestor
,
Timepoint
expires
)
{
assert
null
!=
kc
;
assert
null
!=
requestor
;
AuditRecord
<
T
>
audit
=
new
AuditRecord
<
T
>(
requestor
,
expires
);
AuditRecord
<
T
>
inmap
=
locks
.
putIfAbsent
(
kc
,
audit
);
boolean
success
=
false
;
if
(
null
==
inmap
)
{
// Uncontended lock succeeded
if
(
log
.
isTraceEnabled
())
{
log
.
trace
(
"New local lock created: {} namespace={} txn={}"
,
new
Object
[]{
kc
,
name
,
requestor
});
}
success
=
true
;
}
else
if
(
inmap
.
equals
(
audit
))
{
// requestor has already locked kc; update expiresAt
success
=
locks
.
replace
(
kc
,
inmap
,
audit
);
if
(
log
.
isTraceEnabled
())
{
if
(
success
)
{
log
.
trace
(
"Updated local lock expiration: {} namespace={} txn={} oldexp={} newexp={}"
,
new
Object
[]{
kc
,
name
,
requestor
,
inmap
.
expires
,
audit
.
expires
});
}
else
{
log
.
trace
(
"Failed to update local lock expiration: {} namespace={} txn={} oldexp={} newexp={}"
,
new
Object
[]{
kc
,
name
,
requestor
,
inmap
.
expires
,
audit
.
expires
});
}
}
}
else
if
(
0
>
inmap
.
expires
.
compareTo
(
times
.
getTime
()))
{
// the recorded lock has expired; replace it
success
=
locks
.
replace
(
kc
,
inmap
,
audit
);
if
(
log
.
isTraceEnabled
())
{
log
.
trace
(
"Discarding expired lock: {} namespace={} txn={} expired={}"
,
new
Object
[]{
kc
,
name
,
inmap
.
holder
,
inmap
.
expires
});
}
}
else
{
// we lost to a valid lock
if
(
log
.
isTraceEnabled
())
{
log
.
trace
(
"Local lock failed: {} namespace={} txn={} (already owned by {})"
,
new
Object
[]{
kc
,
name
,
requestor
,
inmap
});
}
}
if
(
success
)
{
expiryQueue
.
add
(
new
ExpirableKeyColumn
(
kc
,
expires
));
}
return
success
;
}
/**
* Release the lock specified by {@code kc} and which was previously
* locked by {@code requestor}, if it is possible to release it.
*
* @param kc lock identifier
* @param requestor the object which previously locked {@code kc}
*/
public
boolean
unlock
(
KeyColumn
kc
,
T
requestor
)
{
if
(!
locks
.
containsKey
(
kc
))
{
log
.
info
(
"Local unlock failed: no locks found for {}"
,
kc
);
return
false
;
}
AuditRecord
<
T
>
unlocker
=
new
AuditRecord
<
T
>(
requestor
,
null
);
AuditRecord
<
T
>
holder
=
locks
.
get
(
kc
);
if
(!
holder
.
equals
(
unlocker
))
{
log
.
error
(
"Local unlock of {} by {} failed: it is held by {}"
,
new
Object
[]{
kc
,
unlocker
,
holder
});
return
false
;
}
boolean
removed
=
locks
.
remove
(
kc
,
unlocker
);
if
(
removed
)
{
expiryQueue
.
remove
(
kc
);
if
(
log
.
isTraceEnabled
())
{
log
.
trace
(
"Local unlock succeeded: {} namespace={} txn={}"
,
new
Object
[]{
kc
,
name
,
requestor
});
}
}
else
{
log
.
warn
(
"Local unlock warning: lock record for {} disappeared "
+
"during removal; this suggests the lock either expired "
+
"while we were removing it, or that it was erroneously "
+
"unlocked multiple times."
,
kc
);
}
// Even if !removed, we're finished unlocking, so return true
return
true
;
}
public
String
toString
()
{
return
"LocalLockMediator ["
+
name
+
", ~"
+
locks
.
size
()
+
" current locks]"
;
}
/**
* A record containing the local transaction that holds a lock and the
* lock's expiration time.
*/
private
static
class
AuditRecord
<
T
>
{
/**
* The local transaction that holds/held the lock.
*/
private
final
T
holder
;
/**
* The expiration time of a the lock.
*/
private
final
Timepoint
expires
;
/**
* Cached hashCode.
*/
private
int
hashCode
;
private
AuditRecord
(
T
holder
,
Timepoint
expires
)
{
this
.
holder
=
holder
;
this
.
expires
=
expires
;
}
/**
* This implementation depends only on the lock holder and not on the
* lock expiration time.
*/
@Override
public
int
hashCode
()
{
if
(
0
==
hashCode
)
hashCode
=
holder
.
hashCode
();
return
hashCode
;
}
/**
* This implementation depends only on the lock holder and not on the
* lock expiration time.
*/
@Override
public
boolean
equals
(
Object
obj
)
{
if
(
this
==
obj
)
return
true
;
if
(
obj
==
null
)
return
false
;
if
(
getClass
()
!=
obj
.
getClass
())
return
false
;
/*
* This warning suppression is harmless because we are only going to
* call other.holder.equals(...), and since equals(...) is part of
* Object, it is guaranteed to be defined no matter the concrete
* type of parameter T.
*/
@SuppressWarnings
(
"rawtypes"
)
AuditRecord
other
=
(
AuditRecord
)
obj
;
if
(
holder
==
null
)
{
if
(
other
.
holder
!=
null
)
return
false
;
}
else
if
(!
holder
.
equals
(
other
.
holder
))
return
false
;
return
true
;
}
@Override
public
String
toString
()
{
return
"AuditRecord [txn="
+
holder
+
", expires="
+
expires
+
"]"
;
}
}
private
class
LockCleaner
implements
Runnable
{
@Override
public
void
run
()
{
try
{
while
(
true
)
{
log
.
debug
(
"Lock Cleaner service started"
);
ExpirableKeyColumn
lock
=
expiryQueue
.
take
();
log
.
debug
(
"Expiring key column "
+
lock
.
getKeyColumn
());
locks
.
remove
(
lock
.
getKeyColumn
());
}
}
catch
(
InterruptedException
e
)
{
log
.
debug
(
"Received interrupt. Exiting"
);
}
}
}
private
static
class
ExpirableKeyColumn
implements
Delayed
{
private
Timepoint
expiryTime
;
private
KeyColumn
kc
;
public
ExpirableKeyColumn
(
KeyColumn
keyColumn
,
Timepoint
expiryTime
)
{
this
.
kc
=
keyColumn
;
this
.
expiryTime
=
expiryTime
;
}
@Override
public
long
getDelay
(
TimeUnit
unit
)
{
return
expiryTime
.
getTimestamp
(
TimeUnit
.
NANOSECONDS
);
}
@Override
public
int
compareTo
(
Delayed
o
)
{
if
(
this
.
expiryTime
.
getTimestamp
(
TimeUnit
.
NANOSECONDS
)
<
((
ExpirableKeyColumn
)
o
).
expiryTime
.
getTimestamp
(
TimeUnit
.
NANOSECONDS
))
{
return
-
1
;
}
if
(
this
.
expiryTime
.
getTimestamp
(
TimeUnit
.
NANOSECONDS
)
>
((
ExpirableKeyColumn
)
o
).
expiryTime
.
getTimestamp
(
TimeUnit
.
NANOSECONDS
))
{
return
1
;
}
return
0
;
}
public
KeyColumn
getKeyColumn
()
{
return
kc
;
}
}
}
repository
/src/main/java/com/thinkaurelius/titan/diskstorage/solr/Solr5Index.java
→
titan
/src/main/java/com/thinkaurelius/titan/diskstorage/solr/Solr5Index.java
View file @
919120f6
...
@@ -41,6 +41,7 @@ import com.thinkaurelius.titan.diskstorage.indexing.IndexQuery;
...
@@ -41,6 +41,7 @@ import com.thinkaurelius.titan.diskstorage.indexing.IndexQuery;
import
com.thinkaurelius.titan.diskstorage.indexing.KeyInformation
;
import
com.thinkaurelius.titan.diskstorage.indexing.KeyInformation
;
import
com.thinkaurelius.titan.diskstorage.indexing.RawQuery
;
import
com.thinkaurelius.titan.diskstorage.indexing.RawQuery
;
import
com.thinkaurelius.titan.diskstorage.util.DefaultTransaction
;
import
com.thinkaurelius.titan.diskstorage.util.DefaultTransaction
;
import
com.thinkaurelius.titan.graphdb.configuration.GraphDatabaseConfiguration
;
import
com.thinkaurelius.titan.graphdb.configuration.PreInitializeConfigOptions
;
import
com.thinkaurelius.titan.graphdb.configuration.PreInitializeConfigOptions
;
import
com.thinkaurelius.titan.graphdb.database.serialize.AttributeUtil
;
import
com.thinkaurelius.titan.graphdb.database.serialize.AttributeUtil
;
import
com.thinkaurelius.titan.graphdb.database.serialize.attribute.AbstractDecimal
;
import
com.thinkaurelius.titan.graphdb.database.serialize.attribute.AbstractDecimal
;
...
@@ -89,8 +90,8 @@ import java.util.Map;
...
@@ -89,8 +90,8 @@ import java.util.Map;
import
java.util.TimeZone
;
import
java.util.TimeZone
;
import
java.util.UUID
;
import
java.util.UUID
;
import
static
com
.
thinkaurelius
.
titan
.
graphdb
.
configuration
.
GraphDatabaseConfiguration
.
INDEX_MAX_RESULT_SET_SIZE
;
import
static
com
.
thinkaurelius
.
titan
.
core
.
attribute
.
Cmp
.*
;
import
static
com
.
thinkaurelius
.
titan
.
graphdb
.
configuration
.
GraphDatabaseConfiguration
.
INDEX_NS
;
import
static
com
.
thinkaurelius
.
titan
.
core
.
schema
.
Mapping
.*
;
/**
/**
* NOTE: Copied from titan for supporting sol5. Do not change
* NOTE: Copied from titan for supporting sol5. Do not change
...
@@ -115,7 +116,7 @@ public class Solr5Index implements IndexProvider {
...
@@ -115,7 +116,7 @@ public class Solr5Index implements IndexProvider {
}
}
public
static
final
ConfigNamespace
SOLR_NS
=
public
static
final
ConfigNamespace
SOLR_NS
=
new
ConfigNamespace
(
INDEX_NS
,
"solr"
,
"Solr index configuration"
);
new
ConfigNamespace
(
GraphDatabaseConfiguration
.
INDEX_NS
,
"solr"
,
"Solr index configuration"
);
public
static
final
ConfigOption
<
String
>
SOLR_MODE
=
new
ConfigOption
<
String
>(
SOLR_NS
,
"mode"
,
public
static
final
ConfigOption
<
String
>
SOLR_MODE
=
new
ConfigOption
<
String
>(
SOLR_NS
,
"mode"
,
"The operation mode for Solr which is either via HTTP (`http`) or using SolrCloud (`cloud`)"
,
"The operation mode for Solr which is either via HTTP (`http`) or using SolrCloud (`cloud`)"
,
...
@@ -182,7 +183,7 @@ public class Solr5Index implements IndexProvider {
...
@@ -182,7 +183,7 @@ public class Solr5Index implements IndexProvider {
private
static
final
IndexFeatures
SOLR_FEATURES
=
new
IndexFeatures
.
Builder
().
supportsDocumentTTL
()
private
static
final
IndexFeatures
SOLR_FEATURES
=
new
IndexFeatures
.
Builder
().
supportsDocumentTTL
()
.
setDefaultStringMapping
(
Mapping
.
TEXT
).
supportedStringMappings
(
Mapping
.
TEXT
,
Mapping
.
STRING
).
build
();
.
setDefaultStringMapping
(
TEXT
).
supportedStringMappings
(
TEXT
,
STRING
).
build
();
private
final
SolrClient
solrClient
;
private
final
SolrClient
solrClient
;
private
final
Configuration
configuration
;
private
final
Configuration
configuration
;
...
@@ -200,7 +201,7 @@ public class Solr5Index implements IndexProvider {
...
@@ -200,7 +201,7 @@ public class Solr5Index implements IndexProvider {
mode
=
Mode
.
parse
(
config
.
get
(
SOLR_MODE
));
mode
=
Mode
.
parse
(
config
.
get
(
SOLR_MODE
));
dynFields
=
config
.
get
(
DYNAMIC_FIELDS
);
dynFields
=
config
.
get
(
DYNAMIC_FIELDS
);
keyFieldIds
=
parseKeyFieldsForCollections
(
config
);
keyFieldIds
=
parseKeyFieldsForCollections
(
config
);
maxResults
=
config
.
get
(
INDEX_MAX_RESULT_SET_SIZE
);
maxResults
=
config
.
get
(
GraphDatabaseConfiguration
.
INDEX_MAX_RESULT_SET_SIZE
);
ttlField
=
config
.
get
(
TTL_FIELD
);
ttlField
=
config
.
get
(
TTL_FIELD
);
waitSearcher
=
config
.
get
(
WAIT_SEARCHER
);
waitSearcher
=
config
.
get
(
WAIT_SEARCHER
);
...
@@ -556,10 +557,10 @@ public class Solr5Index implements IndexProvider {
...
@@ -556,10 +557,10 @@ public class Solr5Index implements IndexProvider {
}
}
}
else
if
(
value
instanceof
String
)
{
}
else
if
(
value
instanceof
String
)
{
Mapping
map
=
getStringMapping
(
informations
.
get
(
key
));
Mapping
map
=
getStringMapping
(
informations
.
get
(
key
));
assert
map
==
Mapping
.
TEXT
||
map
==
Mapping
.
STRING
;
assert
map
==
TEXT
||
map
==
STRING
;
if
(
map
==
Mapping
.
TEXT
&&
!
titanPredicate
.
toString
().
startsWith
(
"CONTAINS"
))
if
(
map
==
TEXT
&&
!
titanPredicate
.
toString
().
startsWith
(
"CONTAINS"
))
throw
new
IllegalArgumentException
(
"Text mapped string values only support CONTAINS queries and not: "
+
titanPredicate
);
throw
new
IllegalArgumentException
(
"Text mapped string values only support CONTAINS queries and not: "
+
titanPredicate
);
if
(
map
==
Mapping
.
STRING
&&
titanPredicate
.
toString
().
startsWith
(
"CONTAINS"
))
if
(
map
==
STRING
&&
titanPredicate
.
toString
().
startsWith
(
"CONTAINS"
))
throw
new
IllegalArgumentException
(
"String mapped string values do not support CONTAINS queries: "
+
titanPredicate
);
throw
new
IllegalArgumentException
(
"String mapped string values do not support CONTAINS queries: "
+
titanPredicate
);
//Special case
//Special case
...
@@ -587,9 +588,9 @@ public class Solr5Index implements IndexProvider {
...
@@ -587,9 +588,9 @@ public class Solr5Index implements IndexProvider {
return
(
key
+
":"
+
escapeValue
(
value
)
+
"*"
);
return
(
key
+
":"
+
escapeValue
(
value
)
+
"*"
);
}
else
if
(
titanPredicate
==
Text
.
REGEX
||
titanPredicate
==
Text
.
CONTAINS_REGEX
)
{
}
else
if
(
titanPredicate
==
Text
.
REGEX
||
titanPredicate
==
Text
.
CONTAINS_REGEX
)
{
return
(
key
+
":/"
+
value
+
"/"
);
return
(
key
+
":/"
+
value
+
"/"
);
}
else
if
(
titanPredicate
==
Cmp
.
EQUAL
)
{
}
else
if
(
titanPredicate
==
EQUAL
)
{
return
(
key
+
":\""
+
escapeValue
(
value
)
+
"\""
);
return
(
key
+
":\""
+
escapeValue
(
value
)
+
"\""
);
}
else
if
(
titanPredicate
==
Cmp
.
NOT_EQUAL
)
{
}
else
if
(
titanPredicate
==
NOT_EQUAL
)
{
return
(
"-"
+
key
+
":\""
+
escapeValue
(
value
)
+
"\""
);
return
(
"-"
+
key
+
":\""
+
escapeValue
(
value
)
+
"\""
);
}
else
{
}
else
{
throw
new
IllegalArgumentException
(
"Relation is not supported for string value: "
+
titanPredicate
);
throw
new
IllegalArgumentException
(
"Relation is not supported for string value: "
+
titanPredicate
);
...
@@ -651,9 +652,9 @@ public class Solr5Index implements IndexProvider {
...
@@ -651,9 +652,9 @@ public class Solr5Index implements IndexProvider {
throw
new
IllegalArgumentException
(
"Boolean types only support EQUAL or NOT_EQUAL"
);
throw
new
IllegalArgumentException
(
"Boolean types only support EQUAL or NOT_EQUAL"
);
}
}
}
else
if
(
value
instanceof
UUID
)
{
}
else
if
(
value
instanceof
UUID
)
{
if
(
titanPredicate
==
Cmp
.
EQUAL
)
{
if
(
titanPredicate
==
EQUAL
)
{
return
(
key
+
":\""
+
escapeValue
(
value
)
+
"\""
);
return
(
key
+
":\""
+
escapeValue
(
value
)
+
"\""
);
}
else
if
(
titanPredicate
==
Cmp
.
NOT_EQUAL
)
{
}
else
if
(
titanPredicate
==
NOT_EQUAL
)
{
return
(
"-"
+
key
+
":\""
+
escapeValue
(
value
)
+
"\""
);
return
(
"-"
+
key
+
":\""
+
escapeValue
(
value
)
+
"\""
);
}
else
{
}
else
{
throw
new
IllegalArgumentException
(
"Relation is not supported for uuid value: "
+
titanPredicate
);
throw
new
IllegalArgumentException
(
"Relation is not supported for uuid value: "
+
titanPredicate
);
...
@@ -779,8 +780,8 @@ public class Solr5Index implements IndexProvider {
...
@@ -779,8 +780,8 @@ public class Solr5Index implements IndexProvider {
@Override
@Override
public
boolean
supports
(
KeyInformation
information
,
TitanPredicate
titanPredicate
)
{
public
boolean
supports
(
KeyInformation
information
,
TitanPredicate
titanPredicate
)
{
Class
<?>
dataType
=
information
.
getDataType
();
Class
<?>
dataType
=
information
.
getDataType
();
Mapping
mapping
=
Mapping
.
getMapping
(
information
);
Mapping
mapping
=
getMapping
(
information
);
if
(
mapping
!=
Mapping
.
DEFAULT
&&
!
AttributeUtil
.
isString
(
dataType
))
return
false
;
if
(
mapping
!=
DEFAULT
&&
!
AttributeUtil
.
isString
(
dataType
))
return
false
;
if
(
Number
.
class
.
isAssignableFrom
(
dataType
))
{
if
(
Number
.
class
.
isAssignableFrom
(
dataType
))
{
return
titanPredicate
instanceof
Cmp
;
return
titanPredicate
instanceof
Cmp
;
...
@@ -792,16 +793,16 @@ public class Solr5Index implements IndexProvider {
...
@@ -792,16 +793,16 @@ public class Solr5Index implements IndexProvider {
case
TEXT:
case
TEXT:
return
titanPredicate
==
Text
.
CONTAINS
||
titanPredicate
==
Text
.
CONTAINS_PREFIX
||
titanPredicate
==
Text
.
CONTAINS_REGEX
;
return
titanPredicate
==
Text
.
CONTAINS
||
titanPredicate
==
Text
.
CONTAINS_PREFIX
||
titanPredicate
==
Text
.
CONTAINS_REGEX
;
case
STRING:
case
STRING:
return
titanPredicate
==
Cmp
.
EQUAL
||
titanPredicate
==
Cmp
.
NOT_EQUAL
||
titanPredicate
==
Text
.
REGEX
||
titanPredicate
==
Text
.
PREFIX
;
return
titanPredicate
==
EQUAL
||
titanPredicate
==
NOT_EQUAL
||
titanPredicate
==
Text
.
REGEX
||
titanPredicate
==
Text
.
PREFIX
;
// case TEXTSTRING:
// case TEXTSTRING:
// return (titanPredicate instanceof Text) || titanPredicate == Cmp.EQUAL || titanPredicate==Cmp.NOT_EQUAL;
// return (titanPredicate instanceof Text) || titanPredicate == Cmp.EQUAL || titanPredicate==Cmp.NOT_EQUAL;
}
}
}
else
if
(
dataType
==
Date
.
class
)
{
}
else
if
(
dataType
==
Date
.
class
)
{
if
(
titanPredicate
instanceof
Cmp
)
return
true
;
if
(
titanPredicate
instanceof
Cmp
)
return
true
;
}
else
if
(
dataType
==
Boolean
.
class
)
{
}
else
if
(
dataType
==
Boolean
.
class
)
{
return
titanPredicate
==
Cmp
.
EQUAL
||
titanPredicate
==
Cmp
.
NOT_EQUAL
;
return
titanPredicate
==
EQUAL
||
titanPredicate
==
NOT_EQUAL
;
}
else
if
(
dataType
==
UUID
.
class
)
{
}
else
if
(
dataType
==
UUID
.
class
)
{
return
titanPredicate
==
Cmp
.
EQUAL
||
titanPredicate
==
Cmp
.
NOT_EQUAL
;
return
titanPredicate
==
EQUAL
||
titanPredicate
==
NOT_EQUAL
;
}
}
return
false
;
return
false
;
}
}
...
@@ -809,11 +810,11 @@ public class Solr5Index implements IndexProvider {
...
@@ -809,11 +810,11 @@ public class Solr5Index implements IndexProvider {
@Override
@Override
public
boolean
supports
(
KeyInformation
information
)
{
public
boolean
supports
(
KeyInformation
information
)
{
Class
<?>
dataType
=
information
.
getDataType
();
Class
<?>
dataType
=
information
.
getDataType
();
Mapping
mapping
=
Mapping
.
getMapping
(
information
);
Mapping
mapping
=
getMapping
(
information
);
if
(
Number
.
class
.
isAssignableFrom
(
dataType
)
||
dataType
==
Geoshape
.
class
||
dataType
==
Date
.
class
||
dataType
==
Boolean
.
class
||
dataType
==
UUID
.
class
)
{
if
(
Number
.
class
.
isAssignableFrom
(
dataType
)
||
dataType
==
Geoshape
.
class
||
dataType
==
Date
.
class
||
dataType
==
Boolean
.
class
||
dataType
==
UUID
.
class
)
{
if
(
mapping
==
Mapping
.
DEFAULT
)
return
true
;
if
(
mapping
==
DEFAULT
)
return
true
;
}
else
if
(
AttributeUtil
.
isString
(
dataType
))
{
}
else
if
(
AttributeUtil
.
isString
(
dataType
))
{
if
(
mapping
==
Mapping
.
DEFAULT
||
mapping
==
Mapping
.
TEXT
||
mapping
==
Mapping
.
STRING
)
return
true
;
if
(
mapping
==
DEFAULT
||
mapping
==
TEXT
||
mapping
==
STRING
)
return
true
;
}
}
return
false
;
return
false
;
}
}
...
@@ -861,8 +862,8 @@ public class Solr5Index implements IndexProvider {
...
@@ -861,8 +862,8 @@ public class Solr5Index implements IndexProvider {
private
static
Mapping
getStringMapping
(
KeyInformation
information
)
{
private
static
Mapping
getStringMapping
(
KeyInformation
information
)
{
assert
AttributeUtil
.
isString
(
information
.
getDataType
());
assert
AttributeUtil
.
isString
(
information
.
getDataType
());
Mapping
map
=
Mapping
.
getMapping
(
information
);
Mapping
map
=
getMapping
(
information
);
if
(
map
==
Mapping
.
DEFAULT
)
map
=
Mapping
.
TEXT
;
if
(
map
==
DEFAULT
)
map
=
TEXT
;
return
map
;
return
map
;
}
}
...
...
titan/src/test/java/com/thinkaurelius/titan/diskstorage/locking/LocalLockMediatorTest.java
0 → 100644
View file @
919120f6
/*
* Copyright 2012-2013 Aurelius LLC
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package
com
.
thinkaurelius
.
titan
.
diskstorage
.
locking
;
import
com.thinkaurelius.titan.diskstorage.hbase.HBaseTransaction
;
import
com.thinkaurelius.titan.diskstorage.util.time.TimestampProvider
;
import
com.thinkaurelius.titan.diskstorage.util.time.Timestamps
;
import
com.thinkaurelius.titan.diskstorage.StaticBuffer
;
import
com.thinkaurelius.titan.diskstorage.util.KeyColumn
;
import
com.thinkaurelius.titan.diskstorage.util.StaticArrayBuffer
;
import
org.mockito.Mockito
;
import
org.testng.Assert
;
import
org.testng.annotations.Test
;
import
java.util.concurrent.TimeUnit
;
public
class
LocalLockMediatorTest
{
private
static
final
String
LOCK_NAMESPACE
=
"test"
;
private
static
final
StaticBuffer
LOCK_ROW
=
StaticArrayBuffer
.
of
(
new
byte
[]{
1
});
private
static
final
StaticBuffer
LOCK_COL
=
StaticArrayBuffer
.
of
(
new
byte
[]{
1
});
private
static
final
KeyColumn
kc
=
new
KeyColumn
(
LOCK_ROW
,
LOCK_COL
);
private
static
final
HBaseTransaction
mockTx1
=
Mockito
.
mock
(
HBaseTransaction
.
class
);
private
static
final
HBaseTransaction
mockTx2
=
Mockito
.
mock
(
HBaseTransaction
.
class
);
@Test
public
void
testLock
()
throws
InterruptedException
{
TimestampProvider
times
=
Timestamps
.
MICRO
;
LocalLockMediator
<
HBaseTransaction
>
llm
=
new
LocalLockMediator
<
HBaseTransaction
>(
LOCK_NAMESPACE
,
times
);
//Expire immediately
Assert
.
assertTrue
(
llm
.
lock
(
kc
,
mockTx1
,
times
.
getTime
(
0
,
TimeUnit
.
NANOSECONDS
)));
Assert
.
assertTrue
(
llm
.
lock
(
kc
,
mockTx2
,
times
.
getTime
(
Long
.
MAX_VALUE
,
TimeUnit
.
NANOSECONDS
)));
llm
=
new
LocalLockMediator
<
HBaseTransaction
>(
LOCK_NAMESPACE
,
times
);
//Expire later
Assert
.
assertTrue
(
llm
.
lock
(
kc
,
mockTx1
,
times
.
getTime
(
Long
.
MAX_VALUE
,
TimeUnit
.
NANOSECONDS
)));
//So second lock should fail on same keyCol
Assert
.
assertFalse
(
llm
.
lock
(
kc
,
mockTx2
,
times
.
getTime
(
Long
.
MAX_VALUE
,
TimeUnit
.
NANOSECONDS
)));
//Unlock
Assert
.
assertTrue
(
llm
.
unlock
(
kc
,
mockTx1
));
//Now locking should succeed
Assert
.
assertTrue
(
llm
.
lock
(
kc
,
mockTx2
,
times
.
getTime
(
Long
.
MAX_VALUE
,
TimeUnit
.
NANOSECONDS
)));
}
}
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment