public class OraclePropertyGraph extends OraclePropertyGraphBase
OraclePropertyGraphBase.OptimizationFlag
TransactionalGraph.Conclusion
TYPE_DT_BOOL, TYPE_DT_BYTE, TYPE_DT_CHAR, TYPE_DT_DATE, TYPE_DT_DOUBLE, TYPE_DT_EMPTY, TYPE_DT_FLOAT, TYPE_DT_INTEGER, TYPE_DT_LONG, TYPE_DT_SERI, TYPE_DT_SHORT, TYPE_DT_STRING
ERR_ARG_CANNOT_BE_NULL, ERR_CODE_NOT_IN_LIST, ERR_DOP_CONSTRAINT, ERR_E_DS_MUSTBE_FILE_OR_IS, ERR_E_MAX_LINE_CONSTRAINT, ERR_EDGE_DIR_NOT_BOTH, ERR_GRAPH_NAME_CANT_BENULL, ERR_HIT_EXCEPTION, ERR_HIT_INTERRUPT, ERR_HIT_IO, ERR_HIT_OPGE, ERR_HIT_PARSEE, ERR_ID_CANNOT_BE_NULL, ERR_IDX_KEY_CANNOT_BE_NULL, ERR_IN_DS_EDGE_CANTBE_NULL, ERR_IN_DS_VERT_CANTBE_NULL, ERR_IN_E_FLAT_DOESNT_EXIST, ERR_IN_V_FLAT_DOESNT_EXIST, ERR_INVALID_NUM_FIELDS, ERR_INVALID_NUM_NOARG, ERR_INVALID_NUMBER, ERR_INVALID_VAL, ERR_KEY_CANNOT_BE_ID, ERR_KEY_CANNOT_BE_NULL, ERR_KEY_NOT_SET_TO, ERR_NO_ELEM_TO_BE_CONSUMED, ERR_NO_OP_SUPPORTED, ERR_NOT_IMPLEMENTED_YET, ERR_NOT_ORACLE_V_E, ERR_NUM_PART_CONSTRAINT, ERR_OFFSET_CONSTRAINT, ERR_OFFSET_E_MUST_BE_POS, ERR_OFFSET_V_MUST_BE_POS, ERR_PG_ALREADY_EXISTS, ERR_PG_NOT_EMPTY, ERR_SIZE_MUST_BE_POS, ERR_TIMEOUT_TAB_CREATION, ERR_TYPE_ID_NOT_RECO, ERR_UNSUPPORTED_VAL_TYP, ERR_USER_REQ_OP_CANCEL, ERR_V_DS_MUSTBE_FILE_OR_IS, ERR_V_MAX_LINE_CONSTRAINT, ERR_VALUE_CANNOT_BE_NULL, INFO_EMP_K_NO_P_VAL, INFO_NUL_K_NO_P_VAL
Modifier and Type | Method and Description |
---|---|
void |
addAttributeToAllEdges(EdgeOpCallback eoc,
boolean skipStoreToCache,
int dop,
ProgressListener pl)
Adds an attribute to all edges based on the specified
EdgeOpCallback . |
void |
addAttributeToAllVertices(VertexOpCallback voc,
boolean skipStoreToCache,
int dop,
ProgressListener pl)
Adds an attribute to all vertices based on the specified
VertexOpCallback . |
Edge |
addEdge(Object id,
Vertex outVertex,
Vertex inVertex,
String label)
This method adds an edge to the graph instance.
|
Vertex |
addVertex(Object id)
This method adds an vertex to the graph instance.
|
void |
clearCache()
This method clear the Vertex and Edge caches associated to this
property graph instance.
|
void |
clearRepository()
This method removes all vertices and edges from this property graph instance.
|
void |
commit()
Commit changes done to this property graph instace.
|
static byte[] |
concatenate(byte[] ba1,
byte[] ba2)
This method returns a byte array that concatenates the two given byte array
If one of the argument is null, the return value will be the other argument.
|
static byte[] |
concatenate(byte[] ba1,
byte[] ba2,
byte[] ba3)
This method returns a byte array that concatenates the two given byte array
If one of the argument is null, the return value will be the other argument.
|
long |
countEdges(int dop,
ProgressListener pl)
Counts all Edges using parallel scan and the specified DOP.
|
long |
countVertices(int dop,
ProgressListener pl)
Counts all vertices using parallel scans and the specified DOP.
|
void |
dropIndexTable()
This method removes all indices meta data from this property graph instance.
|
void |
flushHbaseCommits()
Flush HBase tables so property graph changes are commited into Apache HBase.
|
String |
getCompression()
Get the type of compression used in the property graph tables
|
org.apache.hadoop.hbase.io.compress.Compression.Algorithm |
getCompressionAlgorithm(String compress)
An utility to get a Compression Algorithm based on the specified string
|
org.apache.hadoop.conf.Configuration |
getConfiguration()
This method returns the underlying Configuration instance used for HBase
|
org.apache.hadoop.hbase.io.encoding.DataBlockEncoding |
getDataBlockEncoding()
Gets the data block encoding algorithm used in block cache and optionally
on disk for all property graph tables
|
OracleEdge |
getEdge(Object id)
Return the edge referenced by the object identifier.
|
int |
getEdgeBlockCacheSize()
Gets the size of the block cache (in bytes) associated to the
edge table
|
int |
getEdgeInitialNumRegions()
Get the initial number of regions defined for the Apache HBase Edge tables associated
to this property graph.
|
OracleEdgeBase |
getEdgeInstance(Long eid,
boolean bCreateIfAbsentFromCache,
boolean bSkipStoreToCache)
Returns an instance of OracleEdgeBase.
|
OracleEdgeBase |
getEdgeInstance(OracleVertexBase outVertex,
OracleVertexBase inVertex,
String edgeLabel,
Long eid,
boolean bCreateIfAbsentFromCache,
boolean bSkipStoreToCache)
Returns an instance of OracleEdgeBase.
|
int |
getEdgePropertyNames(int dop,
int timeout,
Set<String> propertyNames,
ProgressListener pl)
Gets the property names of all edges using parallel scans and the specified DOP.
|
Iterable<Edge> |
getEdges(String[] keys,
EdgeFilterCallback efc,
OraclePropertyGraphBase.OptimizationFlag flag)
Return an
Iterable to all the edges in the graph that have any of the specified keys and
satisfy the specified edge filter callback. |
Iterable<Edge> |
getEdges(String key,
Object value,
Class dtClass,
boolean acceptWildcard,
boolean preferredLuceneQuery)
Return an iterable to all matching edges that have a particular key/value property.
|
Iterable<Edge>[] |
getEdgesFromKey(org.apache.hadoop.hbase.client.HConnection[] conns,
boolean skipStoreToCache,
int startSplitID,
String[] keys)
Gets an array of
Iterable objects that hold all the edges in the graph
that have any of the specified keys and satisfy the specified edge filter callback
by executing a lookup over the secondary index table. |
int |
getEdgeSndIndexBlockCacheSize()
Gets the size of the block cache (in bytes) associated to the secondary index
edge table
|
String |
getEdgeSndIndexTabName()
Get the name of the Apache HBase table used to store secondary indexes on edges in this property
graph.
|
long |
getEdgeSndIndexWriteBufferSize()
Set the write buffer size associated to the edge secondary index table
|
Iterable<Edge>[] |
getEdgesPartitioned(org.apache.hadoop.hbase.client.HConnection[] connections,
boolean bSkipStoreToCache,
int startSplitID)
Gets an array of
Iterable objects that hold all the edges in the graph. |
Iterable<Edge>[] |
getEdgesPartitioned(org.apache.hadoop.hbase.client.HConnection[] connections,
boolean bSkipStoreToCache,
int startSplitID,
boolean bClonedGraph)
Gets an array of
Iterable objects that hold all the edges in the graph. |
Iterable<Edge>[] |
getEdgesPartitioned(org.apache.hadoop.hbase.client.HConnection[] connections,
boolean bSkipStoreToCache,
int startSplitID,
EdgeFilterCallback efc)
Gets an array of
Iterable objects that hold all the edges in the graph that
satisfy the specified edge filter callback. |
Iterable<Edge>[] |
getEdgesPartitioned(org.apache.hadoop.hbase.client.HConnection[] connections,
boolean bSkipStoreToCache,
int startSplitID,
EdgeFilterCallback efc,
OraclePropertyGraphBase.OptimizationFlag flag)
Gets an array of
Iterable objects that hold all the edges in the graph that
satisfy the specified edge filter callback. |
Iterable<Edge>[] |
getEdgesPartitioned(org.apache.hadoop.hbase.client.HConnection[] connections,
boolean bSkipStoreToCache,
int startSplitID,
OraclePropertyGraphBase.OptimizationFlag flag)
Gets an array of
Iterable objects that hold all the edges in the graph. |
Iterable<Edge>[] |
getEdgesPartitioned(org.apache.hadoop.hbase.client.HConnection[] connections,
boolean bSkipStoreToCache,
int startSplitID,
String key)
Gets an array of
Iterable objects that hold all the edges in the
graph that have a particular key. |
Iterable<Edge>[] |
getEdgesPartitioned(org.apache.hadoop.hbase.client.HConnection[] connections,
boolean bSkipStoreToCache,
int startSplitID,
String[] keys)
Gets an array of
Iterable objects that hold all the edges in the graph
that have any of the specified keys. |
Iterable<Edge>[] |
getEdgesPartitioned(org.apache.hadoop.hbase.client.HConnection[] connections,
boolean bSkipStoreToCache,
int startSplitID,
String[] keys,
boolean bClonedGraph,
EdgeFilterCallback efc)
Gets an array of
Iterable objects that hold all the edges in the graph
that have any of the specified keys and satisfy the specified edge filter callback. |
Iterable<Edge>[] |
getEdgesPartitioned(org.apache.hadoop.hbase.client.HConnection[] connections,
boolean bSkipStoreToCache,
int startSplitID,
String[] keys,
boolean bClonedGraph,
EdgeFilterCallback efc,
OraclePropertyGraphBase.OptimizationFlag flag)
Gets an array of
Iterable objects that hold all the edges in the graph
that have any of the specified keys and satisfy the specified edge filter callback. |
Iterable<Edge>[] |
getEdgesPartitioned(org.apache.hadoop.hbase.client.HConnection[] connections,
boolean bSkipStoreToCache,
int startSplitID,
String key,
boolean bClonedGraph)
Gets an array of
Iterable objects that hold all the edges in the
graph that have a particular key. |
int |
getEdgeTableSplits()
Get the number of Edge table splits based on the current number of splits
per region associated to this graph and the number of regions in the edge
table.
|
String |
getEdgeTabName()
Get the name of the Apache HBase table used to store edges in this property
graph.
|
long |
getEdgeWriteBufferSize()
Set the write buffer size associated to the edge table
|
Features |
getFeatures()
Get the particular features of the graph implementation.
|
String |
getGraphName()
This method returns the name of this property graph instance
|
static org.apache.hadoop.conf.Configuration |
getHbaseConfiguration(oracle.pgx.config.PgHbaseGraphConfig config)
Gets a HBase configuration from a PgHbaseGraphConfig object
|
static OraclePropertyGraph |
getInstance(org.apache.hadoop.conf.Configuration conf,
org.apache.hadoop.hbase.client.HConnection hconn,
String szGraphName)
Returns an instance of OraclePropertyGraph using the Apache HBase connection
and graph name provided.
|
static OraclePropertyGraph |
getInstance(org.apache.hadoop.conf.Configuration conf,
org.apache.hadoop.hbase.client.HConnection hconn,
String szGraphName,
int initialNumRegions)
Returns an instance of OraclePropertyGraph using the Apache HBase connection
and graph name provided.
|
static OraclePropertyGraph |
getInstance(org.apache.hadoop.conf.Configuration conf,
org.apache.hadoop.hbase.client.HConnection hconn,
String szGraphName,
int initialNumRegions,
int splitsPerRegion)
Returns an instance of OraclePropertyGraph using the Apache HBase connection
and graph name provided.
|
static OraclePropertyGraph |
getInstance(org.apache.hadoop.conf.Configuration conf,
org.apache.hadoop.hbase.client.HConnection hconn,
String szGraphName,
int initialNumRegions,
int splitsPerRegion,
int blockCacheSize)
Returns an instance of OraclePropertyGraph using the Apache HBase connection
and graph name provided.
|
static OraclePropertyGraph |
getInstance(org.apache.hadoop.conf.Configuration conf,
org.apache.hadoop.hbase.client.HConnection hconn,
String szGraphName,
String compress,
org.apache.hadoop.hbase.io.encoding.DataBlockEncoding dbe,
int splitsPerRegion,
int blockCacheSize,
int initialVertexNumRegions,
int initialEdgeNumRegions)
Returns an instance of OraclePropertyGraph using the Apache HBase connection
and graph name provided.
|
static OraclePropertyGraph |
getInstance(org.apache.hadoop.conf.Configuration conf,
org.apache.hadoop.hbase.client.HConnection hconn,
String szGraphName,
String compress,
int initialNumRegions)
Returns an instance of OraclePropertyGraph using the Apache HBase connection
and graph name provided.
|
static OraclePropertyGraph |
getInstance(org.apache.hadoop.conf.Configuration conf,
org.apache.hadoop.hbase.client.HConnection hconn,
String szGraphName,
String compress,
int initialNumRegions,
int splitsPerRegion)
Returns an instance of OraclePropertyGraph using the Apache HBase connection
and graph name provided.
|
static OraclePropertyGraph |
getInstance(org.apache.hadoop.conf.Configuration conf,
org.apache.hadoop.hbase.client.HConnection hconn,
String szGraphName,
String compress,
int initialNumRegions,
int splitsPerRegion,
int blockCacheSize)
Returns an instance of OraclePropertyGraph using the Apache HBase connection
and graph name provided.
|
static OraclePropertyGraph |
getInstance(org.apache.hadoop.conf.Configuration conf,
org.apache.hadoop.hbase.client.HConnection hconn,
String szGraphName,
String compress,
int splitsPerRegion,
int blockCacheSize,
int initialVertexNumRegions,
int initialEdgeNumRegions)
Returns an instance of OraclePropertyGraph using the Apache HBase connection
and graph name provided.
|
static OraclePropertyGraph |
getInstance(oracle.pgx.config.PgHbaseGraphConfig config)
Returns an instance of OraclePropertyGraph using a PGX PgHbaseGraphConfig
object.
|
static OraclePropertyGraph |
getInstanceWithSplits(org.apache.hadoop.conf.Configuration conf,
org.apache.hadoop.hbase.client.HConnection hconn,
String szGraphName,
int splitsPerRegion)
Returns an instance of OraclePropertyGraph using the Apache HBase connection
and graph name provided and the specified number of splits per region.
|
Set<Long> |
getInvalidEdgeIDSet(int dop,
ProgressListener pl)
Gets a set of invalid edges that are stored in the property graph.
|
int |
getLuceneBlockCacheSize()
Gets the size of the block cache (in bytes) associated to the Lucene index table
|
String |
getLuceneTabName()
Get the name of the Apache HBase table used to store index metadata in this property
graph.
|
long |
getLuceneWriteBufferSize()
Set the write buffer size associated to the Lucene index table
|
long |
getMaxEdgeID(int dop,
ProgressListener pl)
Get the maximum edge ID using parallel scan and the specified DOP.
|
long |
getMaxVertexID(int dop,
ProgressListener pl)
Get maximum vertex ID using parallel scans and the specified DOP.
|
long |
getMinEdgeID(int dop,
ProgressListener pl)
Get the minimum edge ID using parallel scan with dop threads
|
long |
getMinVertexID(int dop,
ProgressListener pl)
Get minimum vertex ID using parallel scans and the specified DOP.
|
int |
getNumSplitsPerRegion()
Get the number of splits per region to use when scanning vertices/edges
|
long |
getOperationQueueSize()
Get the size of the operation queue
|
HBaseIndexManager |
getOracleIndexManager()
Gets the OracleIndexManager object associated to this property graph instance.
|
int |
getSaltSize()
Gets the salt size
|
int |
getScanCachingSize()
Get the caching size used in Scan operations
|
int |
getSecondaryIndexNumBuckets()
Return the number of buckets used to store secondary index data into the
vertex/edge secondary index tables
|
OracleVertex |
getVertex(Object id)
Return the vertex referenced by the object identifier.
|
int |
getVertexBlockCacheSize()
Gets the size of the block cache (in bytes) associated to the
vertex table
|
Set<Long> |
getVertexIDSet(int dop,
ProgressListener pl)
Gets a set of Vertex IDs that are stored in the property graph
|
int |
getVertexInitialNumRegions()
Get the initial number of regions defined for the Apache HBase Vertex tables associated
to this property graph.
|
OracleVertexBase |
getVertexInstance(Long vid,
boolean bCreateIfAbsentFromCache,
boolean bSkipStoreToCache)
Returns an instance of OracleVertexBase.
|
int |
getVertexPropertyNames(int dop,
int timeout,
Set<String> propertyNames,
ProgressListener pl)
Gets the property names of all vertices using parallel scans and the specified DOP.
|
int |
getVertexSndIndexBlockCacheSize()
Gets the size of the block cache (in bytes) associated to the secondary index
vertex table
|
String |
getVertexSndIndexTabName()
Get the name of the Apache HBase table used to store secondary indexes on vertices in this property
graph.
|
long |
getVertexSndIndexWriteBufferSize()
Get the write buffer size associated to the vertex secondary index table
|
int |
getVertexTableSplits()
Get the number of Vertex table splits based on the current number of splits
per region associated to this graph and the number of regions in the vertex
table.
|
String |
getVertexTabName()
Get the name of the Apache HBase table used to store vertices in this property
graph.
|
long |
getVertexWriteBufferSize()
Get the write buffer size associated to the vertex table
|
Iterable<Vertex> |
getVertices()
Return an iterable to all the vertices in the graph.
|
Iterable<Vertex> |
getVertices(String[] keys,
VertexFilterCallback vfc,
OraclePropertyGraphBase.OptimizationFlag flag)
Return an
Iterable to all the vertices in the graph that have any of the specified keys and
satisfy the specified vertex filter callback. |
Iterable<Vertex> |
getVertices(String key,
Object value,
Class dtClass,
boolean acceptWildcard,
boolean preferredLuceneQuery)
Return an iterable to all matching vertices that have a particular key/value property.
|
Iterable<Vertex>[] |
getVerticesFromKey(org.apache.hadoop.hbase.client.HConnection[] conns,
boolean skipStoreToCache,
int startSplitID,
String[] keys)
Gets an array of
Iterable objects that hold all the vertices in the graph
that have any of the specified keys and satisfy the specified vertex filter callback
by executing a lookup over the secondary index table. |
Iterable<Vertex>[] |
getVerticesPartitioned(org.apache.hadoop.hbase.client.HConnection[] connections,
boolean bSkipStoreToCache,
int startSplitID)
Gets an array of
Iterable objects that hold all the vertices in the graph. |
Iterable<Vertex>[] |
getVerticesPartitioned(org.apache.hadoop.hbase.client.HConnection[] connections,
boolean bSkipStoreToCache,
int startSplitID,
boolean bClonedGraph)
Gets an array of
Iterable objects that hold all the vertices in the graph. |
Iterable<Vertex>[] |
getVerticesPartitioned(org.apache.hadoop.hbase.client.HConnection[] connections,
boolean bSkipStoreToCache,
int startSplitID,
OraclePropertyGraphBase.OptimizationFlag flag)
Gets an array of
Iterable objects that hold all the vertices in the graph. |
Iterable<Vertex>[] |
getVerticesPartitioned(org.apache.hadoop.hbase.client.HConnection[] connections,
boolean bSkipStoreToCache,
int startSplitID,
String key)
Gets an array of
Iterable objects that hold all the vertices in the
graph that have a particular key. |
Iterable<Vertex>[] |
getVerticesPartitioned(org.apache.hadoop.hbase.client.HConnection[] connections,
boolean bSkipStoreToCache,
int startSplitID,
String[] keys)
Gets an array of
Iterable objects that hold all the vertices in the graph
that have any of the specified keys. |
Iterable<Vertex>[] |
getVerticesPartitioned(org.apache.hadoop.hbase.client.HConnection[] connections,
boolean bSkipStoreToCache,
int startSplitID,
String[] keys,
boolean bCloneGraph,
VertexFilterCallback vfc)
Gets an array of
Iterable objects that hold all the vertices in the graph
that have any of the specified keys and satisfy the specified vertex filter callback. |
Iterable<Vertex>[] |
getVerticesPartitioned(org.apache.hadoop.hbase.client.HConnection[] connections,
boolean bSkipStoreToCache,
int startSplitID,
String[] keys,
boolean bCloneGraph,
VertexFilterCallback vfc,
OraclePropertyGraphBase.OptimizationFlag flag)
Gets an array of
Iterable objects that hold all the vertices in the graph
that have any of the specified keys and satisfy the specified vertex filter callback. |
Iterable<Vertex>[] |
getVerticesPartitioned(org.apache.hadoop.hbase.client.HConnection[] connections,
boolean bSkipStoreToCache,
int startSplitID,
String key,
boolean bCloneGraph)
Gets an array of
Iterable objects that hold all the vertices in the
graph that have a particular key. |
Iterable<Vertex>[] |
getVerticesPartitioned(org.apache.hadoop.hbase.client.HConnection[] connections,
boolean bSkipStoreToCache,
int startSplitID,
VertexFilterCallback vfc,
OraclePropertyGraphBase.OptimizationFlag flag)
Gets an array of
Iterable objects that hold all the vertices in the graph that
satisfy the specified vertex filter callback. |
long |
getWriteBufferSize()
Get the write buffer size associated to the vertex table
|
static org.apache.hadoop.conf.Configuration |
prepareSecureConfig(org.apache.hadoop.conf.Configuration config,
String hbaseSecAuth,
String hadoopSecAuth,
String hmKerberosPrincipal,
String rsKerberosPrincipal,
String userPrincipal,
String keytab)
Prepares the Apache HBase configuration object to work with HBase secure cluster
|
static void |
quietlyClose(org.apache.hadoop.hbase.client.HTable htable)
Quietly close the specified HTable object.
|
static void |
quietlyClose(org.apache.hadoop.hbase.client.HTableInterface htable)
Quietly close the specified HTableInterface object.
|
void |
removeAttributeFromAllEdges(EdgeOpCallback eoc,
boolean skipStoreToCache,
int dop,
ProgressListener pl)
Removes an attribute from all edges based on the specified
EdgeOpCallback . |
void |
removeAttributeFromAllVertices(VertexOpCallback voc,
boolean skipStoreToCache,
int dop,
ProgressListener pl)
Removes an attribute from all vertices based on the specified
VertexOpCallback . |
void |
removeEdge(Edge edge)
Remove the provided edge from the graph
|
void |
removeVertex(Vertex vertex)
Remove the provided vertex from the graph
|
static byte[] |
saltEdge(byte[] ba)
Salts an edge ID represented as a byte array
|
static byte[] |
saltVertex(byte[] ba)
Salts a vertex ID represented as a byte array
|
void |
setCompression(String compress)
Set the type of compression used in the property graph tables.
|
void |
setDataBlockEncoding(org.apache.hadoop.hbase.io.encoding.DataBlockEncoding dbe)
Set the data block encoding used in the property graph tables.
|
void |
setEdgeBlockCacheSize(int blockCSize)
Set the size of the block cache (in bytes) associated to the edge table
This is a meta data operation, so changes to this setting will show after
clearing the repository and recreating the edge tables.
|
void |
setEdgeInitialNumRegions(int numRegions)
Set the initial number of regions defined for the Apache HBase edge tables associated
to this property graph.
|
void |
setEdgeSndIndexBlockCacheSize(int blockCSize)
Set the size of the block cache (in bytes) associated to the edge secondary table
This is a meta data operation, so changes to this setting will show after
clearing the repository and recreating the edge tables.
|
void |
setEdgeSndIndexWriteBufferSize(long writebuffer)
Set the size of the write buffer associated to the edge secondary index table
|
void |
setEdgeWriteBufferSize(long writebuffer)
Set the maximum size of the write buffer associated to the edge table
|
void |
setIndexLuceneWriteBufferSize(long writebuffer)
Set the maxium size of the write buffer associated to the Lucene index table
|
void |
setInitialNumRegions(int numRegions)
Set the initial number of regions defined for the Apache HBase tables associated
to this property graph.
|
void |
setLuceneBlockCacheSize(int blockCSize)
Set the size of the block cache (in bytes) associated to the Lucene index table
This is a meta data operation, so changes to this setting will show after
clearing the repository and recreating the lucene tables.
|
void |
setNumSplitsPerRegion(int iNumSplitsPerRegion)
Set the number of splits per region to use when scanning vertices/edges
|
void |
setScanCachingSize(int iScanCachingSize)
Set the caching size used in Scan operations
|
void |
setVertexBlockCacheSize(int blockCSize)
Set the size of the block cache (in bytes) associated to the vertex table
|
void |
setVertexInitialNumRegions(int numRegions)
Set the initial number of regions defined for the Apache HBase Vertex tables associated
to this property graph.
|
void |
setVertexSndIndexBlockCacheSize(int blockCSize)
Set the size of the block cache (in bytes) associated to the vertex secondary table
This is a meta data operation, so changes to this setting will show after
clearing the repository and recreating the vertex tables.
|
void |
setVertexSndIndexWriteBufferSize(long writebuffer)
Set the maximum size of the write buffer associated to the vertex secondary index
table
|
void |
setVertexWriteBufferSize(long writebuffer)
Set the maximum size of the write buffer associated to the vertex table
|
void |
setWriteBufferSize(long writebuffer)
Set the maximum size of the write buffer associated to the graph tables
|
void |
shutdown()
A shutdown function is required to properly close the graph.
|
String |
toString()
Returns a very succinct String representation of this Graph
instance
|
addAttributeToAllEdges, addAttributeToAllEdges, addAttributeToAllVertices, addAttributeToAllVertices, commitIndices, countEdges, countEdges, countVertices, countVertices, createIndex, createKeyIndex, createKeyIndex, createKeyIndex, dropAllAutoIndices, dropAllIndices, dropAllManualIndices, dropIndex, dropKeyIndex, dropKeyIndex, dropKeyIndex, getAutoIndex, getBatchSize, getConfig, getDefaultEdgeOptFlag, getDefaultIndexParameters, getDefaultVertexOptFlag, getEdgeFilterCallback, getEdgePropertyNames, getEdges, getEdges, getEdges, getEdges, getEdges, getEdges, getEdges, getEdges, getIndex, getIndexedKeys, getIndexParameters, getIndices, getInvalidEdgeIDSet, getInvalidEdgeIDSet, getMaxEdgeID, getMaxEdgeID, getMaxVertexID, getMaxVertexID, getMinEdgeID, getMinEdgeID, getMinVertexID, getMinVertexID, getProgressListener, getQueueSize, getShortestPath, getShortestPath, getSimpleDateFormat, getStringForObj, getVertexFilterCallback, getVertexIDSet, getVertexIDSet, getVertexPropertyNames, getVertexPropertyNames, getVertexPropertyNames, getVertices, getVertices, getVertices, getVertices, getVertices, getVertices, getVertices, isEdgeAutoIndexEnabled, isEmpty, isVertexAutoIndexEnabled, query, refreshAutoIndices, removeAttributeFromAllEdges, removeAttributeFromAllEdges, removeAttributeFromAllVertices, removeAttributeFromAllVertices, rollback, serializableToStr, setBatchSize, setDefaultEdgeOptFlag, setDefaultIndexParameters, setDefaultVertexOptFlag, setEdgeFilterCallback, setProgressListener, setQueueSize, setVertexFilterCallback, startTransaction, stopTransaction, strToSerializable, sum
public void addAttributeToAllEdges(EdgeOpCallback eoc, boolean skipStoreToCache, int dop, ProgressListener pl)
EdgeOpCallback
.addAttributeToAllEdges
in class OraclePropertyGraphBase
eoc
- an EdgeOpCallback
object.skipStoreToCache
- if true, the edges instances will not be
stored into the cache.dop
- a positive integer defining the number of threads to use when
parallel processing the edges.pl
- a ProgressListener object.public void addAttributeToAllVertices(VertexOpCallback voc, boolean skipStoreToCache, int dop, ProgressListener pl)
VertexOpCallback
.addAttributeToAllVertices
in class OraclePropertyGraphBase
voc
- a VertexOpCallback
object.skipStoreToCache
- if true, the vertices instances will not be
stored into the cache.dop
- a positive integer defining the number of threads to use when
parallel processing the vertices.pl
- a ProgressListener object.public Edge addEdge(Object id, Vertex outVertex, Vertex inVertex, String label)
addEdge
in interface Graph
addEdge
in class OraclePropertyGraphBase
id
- the id of the vertexoutVertex
- has to be of type OracleVertexinVertex
- has to be of type OracleVertexlabel
- edge labelOracleEdge
object.public Vertex addVertex(Object id)
addVertex
in interface Graph
addVertex
in class OraclePropertyGraphBase
id
- the id of the vertexOracleVertex
object.public void clearCache()
public void clearRepository() throws org.apache.hadoop.hbase.MasterNotRunningException, IOException
clearRepository
in class OraclePropertyGraphBase
org.apache.hadoop.hbase.MasterNotRunningException
IOException
public void commit()
public static byte[] concatenate(byte[] ba1, byte[] ba2)
public static byte[] concatenate(byte[] ba1, byte[] ba2, byte[] ba3)
public long countEdges(int dop, ProgressListener pl)
countEdges
in class OraclePropertyGraphBase
dop
- a positive integer defining the number of threads to use when
parallel scanning the edges.pl
- a ProgressListener object.public long countVertices(int dop, ProgressListener pl)
countVertices
in class OraclePropertyGraphBase
dop
- a positive integer defining the number of threads to use when
parallel scanning the vertices.pl
- a ProgressListener object.public void dropIndexTable() throws org.apache.hadoop.hbase.MasterNotRunningException, IOException
org.apache.hadoop.hbase.MasterNotRunningException
IOException
public void flushHbaseCommits()
public String getCompression()
public org.apache.hadoop.hbase.io.compress.Compression.Algorithm getCompressionAlgorithm(String compress)
public org.apache.hadoop.conf.Configuration getConfiguration()
public org.apache.hadoop.hbase.io.encoding.DataBlockEncoding getDataBlockEncoding()
public OracleEdge getEdge(Object id)
getEdge
in interface Graph
getEdge
in class OraclePropertyGraphBase
id
- the object identifier of the referenced edgeOracleEdge
objectpublic int getEdgeBlockCacheSize()
public int getEdgeInitialNumRegions()
public OracleEdgeBase getEdgeInstance(Long eid, boolean bCreateIfAbsentFromCache, boolean bSkipStoreToCache)
getEdgeInstance
in class OraclePropertyGraphBase
eid
- the id of the edgebCreateIfAbsentFromCache
- if false, returns a NULL value if there
is no match in the cache.bSkipStoreToCache
- if true, the vertex instance will not be
stored into the cache.public OracleEdgeBase getEdgeInstance(OracleVertexBase outVertex, OracleVertexBase inVertex, String edgeLabel, Long eid, boolean bCreateIfAbsentFromCache, boolean bSkipStoreToCache)
getEdgeInstance
in class OraclePropertyGraphBase
eid
- the id of the edgeoutVertex
- the outgoing Vertex of the edgeinVertex
- the incoming Vertex of the edgeedgeLabel
- the label for the Edge.bCreateIfAbsentFromCache
- if false, returns a NULL value if there
is no match in the cache.bSkipStoreToCache
- if true, the vertex instance will not be
stored into the cache.public int getEdgePropertyNames(int dop, int timeout, Set<String> propertyNames, ProgressListener pl)
getEdgePropertyNames
in class OraclePropertyGraphBase
dop
- a positive integer defining the number of threads to use when
parallel scanning the edgestimeout
- time out in the unit of secondspropertyNames
- a set of property names to be returnedpl
- a ProgressListener object.public Iterable<Edge> getEdges(String[] keys, EdgeFilterCallback efc, OraclePropertyGraphBase.OptimizationFlag flag)
Iterable
to all the edges in the graph that have any of the specified keys and
satisfy the specified edge filter callback. The information read back from
the edges will be complete or partial based on the optimization flag
specified.getEdges
in class OraclePropertyGraphBase
keys
- an array of property key names. It can be NULL which implies all vertices in
the graph satisfying the edge filter callback will be returned.efc
- a EdgeFilterCallback
object specifying the conditions to keep a
edge in the Iterable
.flag
- an OptimizationFlag object specifying if a partial or complete edge object will
be returned. It can be NULL which implies the edges created must be complete.public Iterable<Edge> getEdges(String key, Object value, Class dtClass, boolean acceptWildcard, boolean preferredLuceneQuery)
getEdges
in class OraclePropertyGraphBase
key
- the name of the property. It MUST not be NULL.value
- the value of the property. It can be NULL which will cause all vertices with the given key to be returned.dtClass
- the data type of the value object.acceptWildcard
- specifies if wild cards can be used in the value object. Wild cards can be used only when
an automatic text index on the given key is present.preferredLuceneQuery
- if true, a look up to the automatic index will be executed to find the matches.public Iterable<Edge>[] getEdgesFromKey(org.apache.hadoop.hbase.client.HConnection[] conns, boolean skipStoreToCache, int startSplitID, String[] keys)
Iterable
objects that hold all the edges in the graph
that have any of the specified keys and satisfy the specified edge filter callback
by executing a lookup over the secondary index table.
The information read back from the edges will be complete or partial based
on the optimization flag specified.
Each element in the Iterable
array uses a separate connection
provided to fetch a subset of the results from the corresponding split.
The splits are determined by # of regions & num of splits per region for the
table.
Note that we assign an integer ID (in the range of [0, N - 1]) to all the
splits in the vertex table with N splits. The subset of splits queried will
consist of those splits with ID value in the range of
[startSplitID, startSplit - 1 + size of connections array].conns
- an array of connections to Apache HBase.skipStoreToCache
- if true, the edges instances will not be
stored into the cache.startSplitID
- the ID of the starting split.keys
- an array of property key names. It can be NULL which implies all edges in
the graph satisfying the edge filter callback will be returned.Iterable
array object.public int getEdgeSndIndexBlockCacheSize()
public String getEdgeSndIndexTabName()
public long getEdgeSndIndexWriteBufferSize()
public Iterable<Edge>[] getEdgesPartitioned(org.apache.hadoop.hbase.client.HConnection[] connections, boolean bSkipStoreToCache, int startSplitID)
Iterable
objects that hold all the edges in the graph.
Each element in the Iterable
array uses a separate connection
provided to fetch a subset of the results from the corresponding split.
The splits are determined by # of regions & num of splits per region for the
table.
Note that we assign an integer ID (in the range of [0, N - 1]) to all the
splits in the vertex table with N splits. The subset of splits queried will
consist of those splits with ID value in the range of
[startSplitID, startSplit - 1 + size of connections array].connections
- an array of connections to Apache HBase.bSkipStoreToCache
- if true, the edges instances will not be
stored into the cache.startSplitID
- the ID of the starting split.Iterable
array object.public Iterable<Edge>[] getEdgesPartitioned(org.apache.hadoop.hbase.client.HConnection[] connections, boolean bSkipStoreToCache, int startSplitID, boolean bClonedGraph)
Iterable
objects that hold all the edges in the graph.
Each element in the Iterable
array uses a separate connection
provided to fetch a subset of the results from the corresponding split.
The splits are determined by # of regions & num of splits per region for the
table.
Note that we assign an integer ID (in the range of [0, N - 1]) to all the
splits in the vertex table with N splits. The subset of splits queried will
consist of those splits with ID value in the range of
[startSplitID, startSplit - 1 + size of connections array].connections
- an array of connections to Apache HBase.bSkipStoreToCache
- if true, the edges instances will not be
stored into the cache.startSplitID
- the ID of the starting split.bClonedGraph
- if true, each Iterable in the array will use a new
OraclePropertyGraph instance behind the scenes. If there are
writing operations done over the retrieved edges, the associated
property graphs need to be commited for the changes to be persisted
into Apache HBase.Iterable
array object.public Iterable<Edge>[] getEdgesPartitioned(org.apache.hadoop.hbase.client.HConnection[] connections, boolean bSkipStoreToCache, int startSplitID, EdgeFilterCallback efc)
Iterable
objects that hold all the edges in the graph that
satisfy the specified edge filter callback.
Each element in the Iterable
array uses a separate connection
provided to fetch a subset of the results from the corresponding split.
The splits are determined by # of regions & num of splits per region for the
table.
Note that we assign an integer ID (in the range of [0, N - 1]) to all the
splits in the vertex table with N splits. The subset of splits queried will
consist of those splits with ID value in the range of
[startSplitID, startSplit - 1 + size of connections array].connections
- an array of connections to Apache HBase.bSkipStoreToCache
- if true, the edges instances will not be
stored into the cache.startSplitID
- the ID of the starting split.efc
- a EdgeFilterCallback
object specifying the conditions to keep an
edge in the Iterable
.Iterable
array object.public Iterable<Edge>[] getEdgesPartitioned(org.apache.hadoop.hbase.client.HConnection[] connections, boolean bSkipStoreToCache, int startSplitID, EdgeFilterCallback efc, OraclePropertyGraphBase.OptimizationFlag flag)
Iterable
objects that hold all the edges in the graph that
satisfy the specified edge filter callback. The information read back from
the edges will be complete or partial based on the optimization flag
specified.
Each element in the Iterable
array uses a separate connection
provided to fetch a subset of the results from the corresponding split.
The splits are determined by # of regions & num of splits per region for the
table.
Note that we assign an integer ID (in the range of [0, N - 1]) to all the
splits in the vertex table with N splits. The subset of splits queried will
consist of those splits with ID value in the range of
[startSplitID, startSplit - 1 + size of connections array].connections
- an array of connections to Apache HBase.bSkipStoreToCache
- if true, the edges instances will not be
stored into the cache.flag
- an OptimizationFlag object specifying if a partial or complete edge object will
be returned. It can be NULL which implies the edges created must be complete.startSplitID
- the ID of the starting split.efc
- a EdgeFilterCallback
object specifying the conditions to keep an
edge in the Iterable
.Iterable
array object.public Iterable<Edge>[] getEdgesPartitioned(org.apache.hadoop.hbase.client.HConnection[] connections, boolean bSkipStoreToCache, int startSplitID, OraclePropertyGraphBase.OptimizationFlag flag)
Iterable
objects that hold all the edges in the graph.
The information read back from the edges will be complete or partial based on the optimization flag
specified.
Each element in the Iterable
array uses a separate connection
provided to fetch a subset of the results from the corresponding split.
The splits are determined by # of regions & num of splits per region for the
table.
Note that we assign an integer ID (in the range of [0, N - 1]) to all the
splits in the vertex table with N splits. The subset of splits queried will
consist of those splits with ID value in the range of
[startSplitID, startSplit - 1 + size of connections array].connections
- an array of connections to Apache HBase.bSkipStoreToCache
- if true, the edges instances will not be
stored into the cache.flag
- an OptimizationFlag object specifying if a partial or complete vertex object will
be returned. It can be NULL which implies the edges created must be complete.startSplitID
- the ID of the starting split.Iterable
array object.public Iterable<Edge>[] getEdgesPartitioned(org.apache.hadoop.hbase.client.HConnection[] connections, boolean bSkipStoreToCache, int startSplitID, String key)
Iterable
objects that hold all the edges in the
graph that have a particular key.
Each element in the Iterable
array uses a separate connection
provided to fetch a subset of the results from the corresponding split.
The splits are determined by # of regions & num of splits per region for the
table.
Note that we assign an integer ID (in the range of [0, N - 1]) to all the
splits in the vertex table with N splits. The subset of splits queried will
consist of those splits with ID value in the range of
[startSplitID, startSplit - 1 + size of connections array].connections
- an array of connections to Apache HBase.bSkipStoreToCache
- if true, the edges instances will not be
stored into the cache.startSplitID
- the ID of the starting split.key
- name of the property. It can be NULL which implies all edges in
the graph will be returned.Iterable
array object.public Iterable<Edge>[] getEdgesPartitioned(org.apache.hadoop.hbase.client.HConnection[] connections, boolean bSkipStoreToCache, int startSplitID, String[] keys)
Iterable
objects that hold all the edges in the graph
that have any of the specified keys.
Each element in the Iterable
array uses a separate connection
provided to fetch a subset of the results from the corresponding split.
The splits are determined by # of regions & num of splits per region for the
table.
Note that we assign an integer ID (in the range of [0, N - 1]) to all the
splits in the vertex table with N splits. The subset of splits queried will
consist of those splits with ID value in the range of
[startSplitID, startSplit - 1 + size of connections array].connections
- an array of connections to Apache HBase.bSkipStoreToCache
- if true, the edges instances will not be
stored into the cache.startSplitID
- the ID of the starting split.keys
- an array of property key names. It can be NULL which implies all edges in
the graph will be returned.Iterable
array object.public Iterable<Edge>[] getEdgesPartitioned(org.apache.hadoop.hbase.client.HConnection[] connections, boolean bSkipStoreToCache, int startSplitID, String[] keys, boolean bClonedGraph, EdgeFilterCallback efc)
Iterable
objects that hold all the edges in the graph
that have any of the specified keys and satisfy the specified edge filter callback.
Each element in the Iterable
array uses a separate connection
provided to fetch a subset of the results from the corresponding split.
The splits are determined by # of regions & num of splits per region for the
table.
Note that we assign an integer ID (in the range of [0, N - 1]) to all the
splits in the vertex table with N splits. The subset of splits queried will
consist of those splits with ID value in the range of
[startSplitID, startSplit - 1 + size of connections array].connections
- an array of connections to Apache HBase.bSkipStoreToCache
- if true, the edges instances will not be
stored into the cache.startSplitID
- the ID of the starting split.efc
- a EdgeFilterCallback
object specifying the conditions to keep an
edge in the Iterable
.keys
- an array of property key names. It can be NULL which implies all edges in
the graph satisfying the edge filter callback will be returned.bClonedGraph
- if true, each Iterable in the array will use a new
OraclePropertyGraph instance behind the scenes. If there are
writing operations done over the retrieved edges, the associated
property graphs need to be commited for the changes to be persisted
into Apache HBase.Iterable
array object.public Iterable<Edge>[] getEdgesPartitioned(org.apache.hadoop.hbase.client.HConnection[] connections, boolean bSkipStoreToCache, int startSplitID, String[] keys, boolean bClonedGraph, EdgeFilterCallback efc, OraclePropertyGraphBase.OptimizationFlag flag)
Iterable
objects that hold all the edges in the graph
that have any of the specified keys and satisfy the specified edge filter callback.
The information read back from the edges will be complete or partial based
on the optimization flag specified.
Each element in the Iterable
array uses a separate connection
provided to fetch a subset of the results from the corresponding split.
The splits are determined by # of regions & num of splits per region for the
table.
Note that we assign an integer ID (in the range of [0, N - 1]) to all the
splits in the vertex table with N splits. The subset of splits queried will
consist of those splits with ID value in the range of
[startSplitID, startSplit - 1 + size of connections array].connections
- an array of connections to Apache HBase.bSkipStoreToCache
- if true, the edges instances will not be
stored into the cache.flag
- an OptimizationFlag object specifying if a partial or complete vertex object will
be returned. It can be NULL which implies the edges created must be complete.startSplitID
- the ID of the starting split.keys
- an array of property key names. It can be NULL which implies all edges in
the graph satisfying the edge filter callback will be returned.efc
- a EdgeFilterCallback
object specifying the conditions to keep an
edge in the Iterable
.bClonedGraph
- if true, each Iterable in the array will use a new
OraclePropertyGraph instance behind the scenes. If there are
writing operations done over the retrieved edges, the associated
property graphs need to be commited for the changes to be persisted
into Apache HBase.Iterable
array object.public Iterable<Edge>[] getEdgesPartitioned(org.apache.hadoop.hbase.client.HConnection[] connections, boolean bSkipStoreToCache, int startSplitID, String key, boolean bClonedGraph)
Iterable
objects that hold all the edges in the
graph that have a particular key.
Each element in the Iterable
array uses a separate connection
provided to fetch a subset of the results from the corresponding split.
The splits are determined by # of regions & num of splits per region for the
table.
Note that we assign an integer ID (in the range of [0, N - 1]) to all the
splits in the vertex table with N splits. The subset of splits queried will
consist of those splits with ID value in the range of
[startSplitID, startSplit - 1 + size of connections array].connections
- an array of connections to Apache HBase.bSkipStoreToCache
- if true, the edges instances will not be
stored into the cache.startSplitID
- the ID of the starting split.key
- name of the property. It can be NULL which implies all edges in
the graph will be returned.bClonedGraph
- if true, each Iterable in the array will use a new
OraclePropertyGraph instance behind the scenes. If there are
writing operations done over the retrieved edges, the associated
property graphs need to be commited for the changes to be persisted
into Apache HBase.Iterable
array object.public int getEdgeTableSplits() throws IOException
IOException
public String getEdgeTabName()
public long getEdgeWriteBufferSize()
public Features getFeatures()
public String getGraphName()
getGraphName
in class OraclePropertyGraphBase
public static org.apache.hadoop.conf.Configuration getHbaseConfiguration(oracle.pgx.config.PgHbaseGraphConfig config)
config
- the PGX PgHbaseGraphConfig objectpublic static OraclePropertyGraph getInstance(org.apache.hadoop.conf.Configuration conf, org.apache.hadoop.hbase.client.HConnection hconn, String szGraphName) throws org.apache.hadoop.hbase.MasterNotRunningException, org.apache.hadoop.hbase.ZooKeeperConnectionException, IOException
conf
- the Configuration object with all the Apache HBase connection information.hconn
- an HConnection
object handling a connection to HBase.szGraphName
- the name of the graphorg.apache.hadoop.hbase.MasterNotRunningException
org.apache.hadoop.hbase.ZooKeeperConnectionException
IOException
public static OraclePropertyGraph getInstance(org.apache.hadoop.conf.Configuration conf, org.apache.hadoop.hbase.client.HConnection hconn, String szGraphName, int initialNumRegions) throws org.apache.hadoop.hbase.MasterNotRunningException, org.apache.hadoop.hbase.ZooKeeperConnectionException, IOException
conf
- the Configuration object with all the Apache HBase connection information.hconn
- an HConnection
object handling a connection to HBase.szGraphName
- the name of the graphinitialNumRegions
- the initial number of regions used for vertex/edge
tables. Both vertex and edge tables will be created using the specified
number of regions. If the graph already exists, a clearRepository operation
must be executed in order to truncate the tables and set the new number of regions.org.apache.hadoop.hbase.MasterNotRunningException
org.apache.hadoop.hbase.ZooKeeperConnectionException
IOException
public static OraclePropertyGraph getInstance(org.apache.hadoop.conf.Configuration conf, org.apache.hadoop.hbase.client.HConnection hconn, String szGraphName, int initialNumRegions, int splitsPerRegion) throws org.apache.hadoop.hbase.MasterNotRunningException, org.apache.hadoop.hbase.ZooKeeperConnectionException, IOException
conf
- the Configuration object with all the Apache HBase connection information.hconn
- an HConnection
object handling a connection to HBase.szGraphName
- the name of the graphinitialNumRegions
- the initial number of regions used for vertex/edge
tables. Both vertex and edge tables will be created using the specified
number of regions. If the graph already exists, a clearRepository operation
must be executed in order to truncate the tables and set the new number of regions.splitsPerRegion
- a positive integer specifying the number of
splits per region to use when scanning vertices/edges.org.apache.hadoop.hbase.MasterNotRunningException
org.apache.hadoop.hbase.ZooKeeperConnectionException
IOException
public static OraclePropertyGraph getInstance(org.apache.hadoop.conf.Configuration conf, org.apache.hadoop.hbase.client.HConnection hconn, String szGraphName, int initialNumRegions, int splitsPerRegion, int blockCacheSize) throws org.apache.hadoop.hbase.MasterNotRunningException, org.apache.hadoop.hbase.ZooKeeperConnectionException, IOException
conf
- the Configuration object with all the Apache HBase connection information.hconn
- an HConnection
object handling a connection to HBase.szGraphName
- the name of the graphinitialNumRegions
- the initial number of regions used for vertex/edge
tables. Both vertex and edge tables will be created using the specified
number of regions. If the graph already exists, a clearRepository operation
must be executed in order to truncate the tables and set the new number of regions.splitsPerRegion
- a positive integer specifying the number of
splits per region to use when scanning vertices/edges.blockCacheSize
- the size of the block cache (in bytes) used for
vertex/edge tables. Blocksize to use when writing out storefiles/hfiles
on these tables.org.apache.hadoop.hbase.MasterNotRunningException
org.apache.hadoop.hbase.ZooKeeperConnectionException
IOException
public static OraclePropertyGraph getInstance(org.apache.hadoop.conf.Configuration conf, org.apache.hadoop.hbase.client.HConnection hconn, String szGraphName, String compress, org.apache.hadoop.hbase.io.encoding.DataBlockEncoding dbe, int splitsPerRegion, int blockCacheSize, int initialVertexNumRegions, int initialEdgeNumRegions) throws org.apache.hadoop.hbase.MasterNotRunningException, org.apache.hadoop.hbase.ZooKeeperConnectionException, IOException
conf
- the Configuration object with all the Apache HBase connection information.hconn
- an HConnection
object handling a connection to HBase.szGraphName
- the name of the graphcompress
- the compression used in the property graph tables. Default value is
set to "SNAPPY"dbe
- the data block encoding used in the property graph tablessplitsPerRegion
- a positive integer specifying the number of
splits per region to use when scanning vertices/edges.blockCacheSize
- the size of the block cache (in bytes) used for
vertex/edge tables. Blocksize to use when writing out storefiles/hfiles
on these tables.initialVertexNumRegions
- the initial number of regions used for vertex
table. Both vertex and edge tables will be created using the specified
number of regions. If the graph already exists, a clearRepository operation
must be executed in order to truncate the tables and set the new number of regions.initialEdgeNumRegions
- the initial number of regions used for edge
table. Both vertex and edge tables will be created using the specified
number of regions. If the graph already exists, a clearRepository operation
must be executed in order to truncate the tables and set the new number of regions.org.apache.hadoop.hbase.MasterNotRunningException
org.apache.hadoop.hbase.ZooKeeperConnectionException
IOException
public static OraclePropertyGraph getInstance(org.apache.hadoop.conf.Configuration conf, org.apache.hadoop.hbase.client.HConnection hconn, String szGraphName, String compress, int initialNumRegions) throws org.apache.hadoop.hbase.MasterNotRunningException, org.apache.hadoop.hbase.ZooKeeperConnectionException, IOException
conf
- the Configuration object with all the Apache HBase connection information.hconn
- an HConnection
object handling a connection to HBase.szGraphName
- the name of the graphcompress
- the compression used in the property graph tables. Default value is
set to "SNAPPY"initialNumRegions
- the initial number of regions used for vertex/edge
tables. Both vertex and edge tables will be created using the specified
number of regions. If the graph already exists, a clearRepository operation
must be executed in order to truncate the tables and set the new number of regions.org.apache.hadoop.hbase.MasterNotRunningException
org.apache.hadoop.hbase.ZooKeeperConnectionException
IOException
public static OraclePropertyGraph getInstance(org.apache.hadoop.conf.Configuration conf, org.apache.hadoop.hbase.client.HConnection hconn, String szGraphName, String compress, int initialNumRegions, int splitsPerRegion) throws org.apache.hadoop.hbase.MasterNotRunningException, org.apache.hadoop.hbase.ZooKeeperConnectionException, IOException
conf
- the Configuration object with all the Apache HBase connection information.hconn
- an HConnection
object handling a connection to HBase.szGraphName
- the name of the graphcompress
- the compression used in the property graph tables. Default value is
set to "SNAPPY"initialNumRegions
- the initial number of regions used for vertex/edge
tables. Both vertex and edge tables will be created using the specified
number of regions. If the graph already exists, a clearRepository operation
must be executed in order to truncate the tables and set the new number of regions.splitsPerRegion
- a positive integer specifying the number of
splits per region to use when scanning vertices/edges.org.apache.hadoop.hbase.MasterNotRunningException
org.apache.hadoop.hbase.ZooKeeperConnectionException
IOException
public static OraclePropertyGraph getInstance(org.apache.hadoop.conf.Configuration conf, org.apache.hadoop.hbase.client.HConnection hconn, String szGraphName, String compress, int initialNumRegions, int splitsPerRegion, int blockCacheSize) throws org.apache.hadoop.hbase.MasterNotRunningException, org.apache.hadoop.hbase.ZooKeeperConnectionException, IOException
conf
- the Configuration object with all the Apache HBase connection information.hconn
- an HConnection
object handling a connection to HBase.szGraphName
- the name of the graphcompress
- the compression used in the property graph tables. Default value is
set to "SNAPPY"initialNumRegions
- the initial number of regions used for vertex/edge
tables. Both vertex and edge tables will be created using the specified
number of regions. If the graph already exists, a clearRepository operation
must be executed in order to truncate the tables and set the new number of regions.splitsPerRegion
- a positive integer specifying the number of
splits per region to use when scanning vertices/edges.blockCacheSize
- the size of the block cache (in bytes) used for
vertex/edge tables. Blocksize to use when writing out storefiles/hfiles
on these tables.org.apache.hadoop.hbase.MasterNotRunningException
org.apache.hadoop.hbase.ZooKeeperConnectionException
IOException
public static OraclePropertyGraph getInstance(org.apache.hadoop.conf.Configuration conf, org.apache.hadoop.hbase.client.HConnection hconn, String szGraphName, String compress, int splitsPerRegion, int blockCacheSize, int initialVertexNumRegions, int initialEdgeNumRegions) throws org.apache.hadoop.hbase.MasterNotRunningException, org.apache.hadoop.hbase.ZooKeeperConnectionException, IOException
conf
- the Configuration object with all the Apache HBase connection information.hconn
- an HConnection
object handling a connection to HBase.szGraphName
- the name of the graphcompress
- the compression used in the property graph tables. Default value is
set to "SNAPPY"splitsPerRegion
- a positive integer specifying the number of
splits per region to use when scanning vertices/edges.blockCacheSize
- the size of the block cache (in bytes) used for
vertex/edge tables. Blocksize to use when writing out storefiles/hfiles
on these tables.initialVertexNumRegions
- the initial number of regions used for vertex
table. Both vertex and edge tables will be created using the specified
number of regions. If the graph already exists, a clearRepository operation
must be executed in order to truncate the tables and set the new number of regions.initialEdgeNumRegions
- the initial number of regions used for edge
table. Both vertex and edge tables will be created using the specified
number of regions. If the graph already exists, a clearRepository operation
must be executed in order to truncate the tables and set the new number of regions.org.apache.hadoop.hbase.MasterNotRunningException
org.apache.hadoop.hbase.ZooKeeperConnectionException
IOException
public static OraclePropertyGraph getInstance(oracle.pgx.config.PgHbaseGraphConfig config) throws org.apache.hadoop.hbase.MasterNotRunningException, org.apache.hadoop.hbase.ZooKeeperConnectionException, IOException
config
- a PgHbaseGraphConfig
object.org.apache.hadoop.hbase.MasterNotRunningException
org.apache.hadoop.hbase.ZooKeeperConnectionException
IOException
public static OraclePropertyGraph getInstanceWithSplits(org.apache.hadoop.conf.Configuration conf, org.apache.hadoop.hbase.client.HConnection hconn, String szGraphName, int splitsPerRegion) throws org.apache.hadoop.hbase.MasterNotRunningException, org.apache.hadoop.hbase.ZooKeeperConnectionException, IOException
conf
- the Configuration object with all the Apache HBase connection information.hconn
- an HConnection
object handling a connection to HBase.szGraphName
- the name of the graphsplitsPerRegion
- a positive integer specifying the number of
splits per region to use when scanning vertices/edges.org.apache.hadoop.hbase.MasterNotRunningException
org.apache.hadoop.hbase.ZooKeeperConnectionException
IOException
public Set<Long> getInvalidEdgeIDSet(int dop, ProgressListener pl)
getInvalidEdgeIDSet
in class OraclePropertyGraphBase
dop
- a positive integer defining the number of threads to use when
parallel scanning the vertices.pl
- a ProgressListener object.public int getLuceneBlockCacheSize()
public String getLuceneTabName()
public long getLuceneWriteBufferSize()
public long getMaxEdgeID(int dop, ProgressListener pl)
getMaxEdgeID
in class OraclePropertyGraphBase
dop
- a positive integer defining the number of threads to use when
parallel scanning the vertices.pl
- a ProgressListener object.public long getMaxVertexID(int dop, ProgressListener pl)
getMaxVertexID
in class OraclePropertyGraphBase
dop
- a positive integer defining the number of threads to use when
parallel scanning the vertices.pl
- a ProgressListener object.public long getMinEdgeID(int dop, ProgressListener pl)
getMinEdgeID
in class OraclePropertyGraphBase
dop
- a positive integer defining the number of threads to use when
parallel scanning the vertices.pl
- a ProgressListener object.public long getMinVertexID(int dop, ProgressListener pl)
getMinVertexID
in class OraclePropertyGraphBase
dop
- a positive integer defining the number of threads to use when
parallel scanning the vertices.pl
- a ProgressListener object.public int getNumSplitsPerRegion()
public long getOperationQueueSize()
public HBaseIndexManager getOracleIndexManager()
getOracleIndexManager
in class OraclePropertyGraphBase
HBaseIndexManager
object.public int getSaltSize()
public int getScanCachingSize()
public int getSecondaryIndexNumBuckets()
public OracleVertex getVertex(Object id)
getVertex
in interface Graph
getVertex
in class OraclePropertyGraphBase
id
- the object identifier of the referenced vertexVertex
objectpublic int getVertexBlockCacheSize()
public Set<Long> getVertexIDSet(int dop, ProgressListener pl)
getVertexIDSet
in class OraclePropertyGraphBase
dop
- a positive integer defining the number of threads to use when
parallel scanning the vertices.pl
- a ProgressListener object.public int getVertexInitialNumRegions()
public OracleVertexBase getVertexInstance(Long vid, boolean bCreateIfAbsentFromCache, boolean bSkipStoreToCache)
getVertexInstance
in class OraclePropertyGraphBase
vid
- the id of the vertexbCreateIfAbsentFromCache
- if false, returns a NULL value if there
is no match in the cache.bSkipStoreToCache
- if true, the vertex instance will not be
stored into the cache.public int getVertexPropertyNames(int dop, int timeout, Set<String> propertyNames, ProgressListener pl)
getVertexPropertyNames
in class OraclePropertyGraphBase
dop
- a positive integer defining the number of threads to use when
parallel scanning the verticestimeout
- time out in the unit of secondspropertyNames
- a set of property names to be returnedpl
- a ProgressListener object.public int getVertexSndIndexBlockCacheSize()
public String getVertexSndIndexTabName()
public long getVertexSndIndexWriteBufferSize()
public int getVertexTableSplits() throws IOException
IOException
public String getVertexTabName()
public long getVertexWriteBufferSize()
public Iterable<Vertex> getVertices()
getVertices
in interface Graph
getVertices
in class OraclePropertyGraphBase
public Iterable<Vertex> getVertices(String[] keys, VertexFilterCallback vfc, OraclePropertyGraphBase.OptimizationFlag flag)
Iterable
to all the vertices in the graph that have any of the specified keys and
satisfy the specified vertex filter callback. The information read back from
the vertices will be complete or partial based on the optimization flag
specified.getVertices
in class OraclePropertyGraphBase
keys
- an array of property key names. It can be NULL which implies all vertices in
the graph satisfying the vertex filter callback will be returned.vfc
- a VertexFilterCallback
object specifying the conditions to keep a
vertex in the Iterable
.flag
- an OptimizationFlag object specifying if a partial or complete vertex object will
be returned. It can be NULL which implies the vertices created must be complete.public Iterable<Vertex> getVertices(String key, Object value, Class dtClass, boolean acceptWildcard, boolean preferredLuceneQuery)
getVertices
in class OraclePropertyGraphBase
key
- the name of the property. It MUST not be NULL.value
- the value of the property. It can be NULL which will cause all vertices with the given key to be returned.dtClass
- the datatype of the value objectacceptWildcard
- specifies if wild cards can be used in the value object. Wild cards can be used only when
an automatic text index on the given key is present.preferredLuceneQuery
- if true, a look up to the automatic index will be executed to find the matches.public Iterable<Vertex>[] getVerticesFromKey(org.apache.hadoop.hbase.client.HConnection[] conns, boolean skipStoreToCache, int startSplitID, String[] keys)
Iterable
objects that hold all the vertices in the graph
that have any of the specified keys and satisfy the specified vertex filter callback
by executing a lookup over the secondary index table.
The information read back from the vertices will be complete or partial based
on the optimization flag specified.
Each element in the Iterable
array uses a separate connection
provided to fetch a subset of the results from the corresponding split.
The splits are determined by # of regions & num of splits per region for the
table.
Note that we assign an integer ID (in the range of [0, N - 1]) to all the
splits in the vertex table with N splits. The subset of splits queried will
consist of those splits with ID value in the range of
[startSplitID, startSplit - 1 + size of connections array].conns
- an array of connections to Apache HBase.skipStoreToCache
- if true, the vertices instances will not be
stored into the cache.startSplitID
- the ID of the starting split.keys
- an array of property key names. It can be NULL which implies all vertices in
the graph satisfying the vertex filter callback will be returned.Iterable
array object.public Iterable<Vertex>[] getVerticesPartitioned(org.apache.hadoop.hbase.client.HConnection[] connections, boolean bSkipStoreToCache, int startSplitID)
Iterable
objects that hold all the vertices in the graph. Each element in
the Iterable
array uses a separate connection provided to fetch a subset of the results from
the corresponding split. The splits are determined by # of regions & num of splits per region for the
table.
Note that we assign an integer ID (in the range of [0, N - 1]) to all the splits in the vertex table with
N splits. The subset of splits queried will consist of those splits with ID value in the range of
[startSplitID, startSplit - 1 + size of connections array].connections
- an array of connections to Apache HBase.bSkipStoreToCache
- if true, the vertices instances will not be
stored into the cache.startSplitID
- the ID of the starting split.Iterable
object.public Iterable<Vertex>[] getVerticesPartitioned(org.apache.hadoop.hbase.client.HConnection[] connections, boolean bSkipStoreToCache, int startSplitID, boolean bClonedGraph)
Iterable
objects that hold all the vertices in the graph.
Each element in the Iterable
array uses a separate connection
provided to fetch a subset of the results from the corresponding split.
The splits are determined by # of regions & num of splits per region for the
table.
Note that we assign an integer ID (in the range of [0, N - 1]) to all the
splits in the vertex table with N splits. The subset of splits queried will
consist of those splits with ID value in the range of
[startSplitID, startSplit - 1 + size of connections array].connections
- an array of connections to Apache HBase.bSkipStoreToCache
- if true, the vertices instances will not be
stored into the cache.startSplitID
- the ID of the starting split.bClonedGraph
- if true, each Iterable in the array will use a new
OraclePropertyGraph instance behind the scenes. If there are
writing operations done over the retrieved vertices, the associated
property graphs need to be commited for the changes to be persisted
into Apache HBase.Iterable
array object.public Iterable<Vertex>[] getVerticesPartitioned(org.apache.hadoop.hbase.client.HConnection[] connections, boolean bSkipStoreToCache, int startSplitID, OraclePropertyGraphBase.OptimizationFlag flag)
Iterable
objects that hold all the vertices in the graph.
The information read back from the vertices will be complete or partial based on the optimization flag
specified.
Each element in the Iterable
array uses a separate connection
provided to fetch a subset of the results from the corresponding split.
The splits are determined by # of regions & num of splits per region for the
table.
Note that we assign an integer ID (in the range of [0, N - 1]) to all the
splits in the vertex table with N splits. The subset of splits queried will
consist of those splits with ID value in the range of
[startSplitID, startSplit - 1 + size of connections array].connections
- an array of connections to Apache HBase.bSkipStoreToCache
- if true, the vertices instances will not be
stored into the cache.flag
- an OptimizationFlag object specifying if a partial or complete vertex object will
be returned. It can be NULL which implies the vertices created must be complete.startSplitID
- the ID of the starting split.Iterable
array object.public Iterable<Vertex>[] getVerticesPartitioned(org.apache.hadoop.hbase.client.HConnection[] connections, boolean bSkipStoreToCache, int startSplitID, String key)
Iterable
objects that hold all the vertices in the
graph that have a particular key.
Each element in the Iterable
array uses a separate connection
provided to fetch a subset of the results from the corresponding split.
The splits are determined by # of regions & num of splits per region for the
table.
Note that we assign an integer ID (in the range of [0, N - 1]) to all the
splits in the vertex table with N splits. The subset of splits queried will
consist of those splits with ID value in the range of
[startSplitID, startSplit - 1 + size of connections array].connections
- an array of connections to Apache HBase.bSkipStoreToCache
- if true, the vertices instances will not be
stored into the cache.startSplitID
- the ID of the starting split.key
- name of the property. It can be NULL which implies all vertices in
the graph will be returned.Iterable
array object.public Iterable<Vertex>[] getVerticesPartitioned(org.apache.hadoop.hbase.client.HConnection[] connections, boolean bSkipStoreToCache, int startSplitID, String[] keys)
Iterable
objects that hold all the vertices in the graph
that have any of the specified keys.
Each element in the Iterable
array uses a separate connection
provided to fetch a subset of the results from the corresponding split.
The splits are determined by # of regions & num of splits per region for the
table.
Note that we assign an integer ID (in the range of [0, N - 1]) to all the
splits in the vertex table with N splits. The subset of splits queried will
consist of those splits with ID value in the range of
[startSplitID, startSplit - 1 + size of connections array].connections
- an array of connections to Apache HBase.bSkipStoreToCache
- if true, the vertices instances will not be
stored into the cache.startSplitID
- the ID of the starting split.keys
- an array of property key names. It can be NULL which implies all vertices in
the graph will be returned.Iterable
array object.public Iterable<Vertex>[] getVerticesPartitioned(org.apache.hadoop.hbase.client.HConnection[] connections, boolean bSkipStoreToCache, int startSplitID, String[] keys, boolean bCloneGraph, VertexFilterCallback vfc)
Iterable
objects that hold all the vertices in the graph
that have any of the specified keys and satisfy the specified vertex filter callback.
Each element in the Iterable
array uses a separate connection
provided to fetch a subset of the results from the corresponding split.
The splits are determined by # of regions & num of splits per region for the
table.
Note that we assign an integer ID (in the range of [0, N - 1]) to all the
splits in the vertex table with N splits. The subset of splits queried will
consist of those splits with ID value in the range of
[startSplitID, startSplit - 1 + size of connections array].connections
- an array of connections to Apache HBase.bSkipStoreToCache
- if true, the vertices instances will not be
stored into the cache.startSplitID
- the ID of the starting split.vfc
- a VertexFilterCallback
object specifying the conditions to keep an
vertex in the Iterable
.keys
- an array of property key names. It can be NULL which implies all vertices in
the graph satisfying the vertex filter callback will be returned.bCloneGraph
- if true, each Iterable in the array will use a new
OraclePropertyGraph instance behind the scenes. If there are
writing operations done over the retrieved vertices, the associated
property graphs need to be commited for the changes to be persisted
into Apache HBase.Iterable
array object.public Iterable<Vertex>[] getVerticesPartitioned(org.apache.hadoop.hbase.client.HConnection[] connections, boolean bSkipStoreToCache, int startSplitID, String[] keys, boolean bCloneGraph, VertexFilterCallback vfc, OraclePropertyGraphBase.OptimizationFlag flag)
Iterable
objects that hold all the vertices in the graph
that have any of the specified keys and satisfy the specified vertex filter callback.
The information read back from the vertices will be complete or partial based
on the optimization flag specified.
Each element in the Iterable
array uses a separate connection
provided to fetch a subset of the results from the corresponding split.
The splits are determined by # of regions & num of splits per region for the
table.
Note that we assign an integer ID (in the range of [0, N - 1]) to all the
splits in the vertex table with N splits. The subset of splits queried will
consist of those splits with ID value in the range of
[startSplitID, startSplit - 1 + size of connections array].connections
- an array of connections to Apache HBase.bSkipStoreToCache
- if true, the vertices instances will not be
stored into the cache.flag
- an OptimizationFlag object specifying if a partial or complete vertex object will
be returned. It can be NULL which implies the vertices created must be complete.startSplitID
- the ID of the starting split.keys
- an array of property key names. It can be NULL which implies all vertices in
the graph satisfying the vertex filter callback will be returned.vfc
- a VertexFilterCallback
object specifying the conditions to keep an
vertex in the Iterable
.bCloneGraph
- if true, each Iterable in the array will use a new
OraclePropertyGraph instance behind the scenes. If there are
writing operations done over the retrieved vertices, the associated
property graphs need to be commited for the changes to be persisted
into Apache HBase.Iterable
array object.public Iterable<Vertex>[] getVerticesPartitioned(org.apache.hadoop.hbase.client.HConnection[] connections, boolean bSkipStoreToCache, int startSplitID, String key, boolean bCloneGraph)
Iterable
objects that hold all the vertices in the
graph that have a particular key.
Each element in the Iterable
array uses a separate connection
provided to fetch a subset of the results from the corresponding split.
The splits are determined by # of regions & num of splits per region for the
table.
Note that we assign an integer ID (in the range of [0, N - 1]) to all the
splits in the vertex table with N splits. The subset of splits queried will
consist of those splits with ID value in the range of
[startSplitID, startSplit - 1 + size of connections array].connections
- an array of connections to Apache HBase.bSkipStoreToCache
- if true, the vertices instances will not be
stored into the cache.startSplitID
- the ID of the starting split.key
- name of the property. It can be NULL which implies all vertices in
the graph will be returned.bCloneGraph
- if true, each Iterable in the array will use a new
OraclePropertyGraph instance behind the scenes. If there are
writing operations done over the retrieved vertices, the associated
property graphs need to be commited for the changes to be persisted
into Apache HBase.Iterable
array object.public Iterable<Vertex>[] getVerticesPartitioned(org.apache.hadoop.hbase.client.HConnection[] connections, boolean bSkipStoreToCache, int startSplitID, VertexFilterCallback vfc, OraclePropertyGraphBase.OptimizationFlag flag)
Iterable
objects that hold all the vertices in the graph that
satisfy the specified vertex filter callback. The information read back from
the vertices will be complete or partial based on the optimization flag
specified.
Each element in the Iterable
array uses a separate connection
provided to fetch a subset of the results from the corresponding split.
The splits are determined by # of regions & num of splits per region for the
table.
Note that we assign an integer ID (in the range of [0, N - 1]) to all the
splits in the vertex table with N splits. The subset of splits queried will
consist of those splits with ID value in the range of
[startSplitID, startSplit - 1 + size of connections array].connections
- an array of connections to Apache HBase.bSkipStoreToCache
- if true, the vertices instances will not be
stored into the cache.flag
- an OptimizationFlag object specifying if a partial or complete vertex object will
be returned. It can be NULL which implies the vertices created must be complete.startSplitID
- the ID of the starting split.vfc
- a VertexFilterCallback
object specifying the conditions to keep an
vertex in the Iterable
.Iterable
array object.public long getWriteBufferSize()
public static org.apache.hadoop.conf.Configuration prepareSecureConfig(org.apache.hadoop.conf.Configuration config, String hbaseSecAuth, String hadoopSecAuth, String hmKerberosPrincipal, String rsKerberosPrincipal, String userPrincipal, String keytab)
config
- the Apache HBase configuration objecthbaseSecAuth
- the hbase authentication kerberos/simplehadoopSecAuth
- the hadoop authentication kerberos/simplehmKerberosPrincipal
- The kerberos principal name that should be used
to run the HMaster process. The principal name should be in the form:
user/hostname@DOMAIN. If "_HOST" is used as the hostname portion, it will
be replaced with the actual hostname of the running instance.rsKerberosPrincipal
- Ex. "hbase/_HOST@EXAMPLE.COM".The kerberos principal name
that should be used to run the HRegionServer process. The principal name
should be in the form: user/hostname@DOMAIN. If _HOST is used as the
hostname portion, it will be replaced with the actual hostname of the
running instance.userPrincipal
- the user to loginkeytab
- the keytab containing the password of the loginConfiguration
Configuration object.public static void quietlyClose(org.apache.hadoop.hbase.client.HTable htable)
htable
- the HTable
object to close.public static void quietlyClose(org.apache.hadoop.hbase.client.HTableInterface htable)
htable
- the HTableInterface
object to close.public void removeAttributeFromAllEdges(EdgeOpCallback eoc, boolean skipStoreToCache, int dop, ProgressListener pl)
EdgeOpCallback
.removeAttributeFromAllEdges
in class OraclePropertyGraphBase
eoc
- a EdgeOpCallback
object.skipStoreToCache
- if true, the edges instances will not be
stored into the cache.dop
- a positive integer defining the number of threads to use when
parallel processing the edges.pl
- a ProgressListener object.public void removeAttributeFromAllVertices(VertexOpCallback voc, boolean skipStoreToCache, int dop, ProgressListener pl)
VertexOpCallback
.removeAttributeFromAllVertices
in class OraclePropertyGraphBase
voc
- a VertexOpCallback
object.skipStoreToCache
- if true, the vertices instances will not be
stored into the cache.dop
- a positive integer defining the number of threads to use when
parallel processing the vertices.pl
- a ProgressListener object.public void removeEdge(Edge edge)
removeEdge
in interface Graph
removeEdge
in class OraclePropertyGraphBase
edge
- an Edge
object to be removedpublic void removeVertex(Vertex vertex)
vertex
- a Vertex
object to be removedpublic static byte[] saltEdge(byte[] ba)
ba
- the edge ID represented as a byte arraypublic static byte[] saltVertex(byte[] ba)
ba
- the vertex ID represented as a byte arraypublic void setCompression(String compress)
compress
- the compression used in the property graph tablespublic void setDataBlockEncoding(org.apache.hadoop.hbase.io.encoding.DataBlockEncoding dbe)
dbe
- the data block encoding used in the property graph tablespublic void setEdgeBlockCacheSize(int blockCSize)
blockCSize
- a positive integer representing the size of the block
cache (in bytes) used in the vertex table.
Default value set to 131072 bytes.public void setEdgeInitialNumRegions(int numRegions)
numRegions
- a positive integer with the initial number of regions for edge table.
Default value set to 4public void setEdgeSndIndexBlockCacheSize(int blockCSize)
blockCSize
- a positive integer representing the size of the block
cache (in bytes) used in the vertex table.
Default value set to 131072 bytes.public void setEdgeSndIndexWriteBufferSize(long writebuffer)
writebuffer
- the size of the write buffer used in the edge
secondary index tablepublic void setEdgeWriteBufferSize(long writebuffer)
writebuffer
- the maximum size of the write buffer used in the edge tablepublic void setIndexLuceneWriteBufferSize(long writebuffer)
writebuffer
- the maximum size of the write buffer used in the index tablepublic void setInitialNumRegions(int numRegions)
numRegions
- the initial number of regions.public void setLuceneBlockCacheSize(int blockCSize)
blockCSize
- a positive integer representing the size of the block
cache (in bytes) used in the vertex table.
Default value set to 131072 bytes.public void setNumSplitsPerRegion(int iNumSplitsPerRegion)
iNumSplitsPerRegion
- a positive integer specifying the number of
splits per region to use when scanning vertices/edges.
Default value set to 1.public void setScanCachingSize(int iScanCachingSize)
iScanCachingSize
- the cache size to use.public void setVertexBlockCacheSize(int blockCSize)
blockCSize
- a positive integer representing the size of the block
cache (in bytes) used in the vertex table. This is
a meta data operation, so changes to this setting will show after
clearing the repository and recreating the vertex tables.
Default value set to 131072 bytes.public void setVertexInitialNumRegions(int numRegions)
numRegions
- a positive integer with the initial number of regions for vertex table.
Default value set to 4public void setVertexSndIndexBlockCacheSize(int blockCSize)
blockCSize
- a positive integer representing the size of the block
cache (in bytes) used in the vertex table.
Default value set to 131072 bytes.public void setVertexSndIndexWriteBufferSize(long writebuffer)
writebuffer
- the maximum size of the write buffer used in the vertex secondary
index tablepublic void setVertexWriteBufferSize(long writebuffer)
writebuffer
- the maximum size of the write buffer used in the vertex tablepublic void setWriteBufferSize(long writebuffer)
writebuffer
- the maximum size of the write buffer used in the vertex tablepublic void shutdown()
Copyright © 2016 Oracle and/or its affiliates. All Rights Reserved.