public class OraclePropertyGraph extends OraclePropertyGraphBase
OraclePropertyGraphBase.CacheStatus, OraclePropertyGraphBase.OptimizationFlagorg.apache.tinkerpop.gremlin.structure.Graph.Exceptions, org.apache.tinkerpop.gremlin.structure.Graph.Features, org.apache.tinkerpop.gremlin.structure.Graph.Hidden, org.apache.tinkerpop.gremlin.structure.Graph.OptIn, org.apache.tinkerpop.gremlin.structure.Graph.OptIns, org.apache.tinkerpop.gremlin.structure.Graph.OptOut, org.apache.tinkerpop.gremlin.structure.Graph.OptOuts, org.apache.tinkerpop.gremlin.structure.Graph.VariablesOraclePropertyGraphConstants.QueueActionMG_ERR_USER_REQ_OP_CANCEL, MSG_E_AUTO_IDX_MUST_SOLR_OR_LUCENE, MSG_E_AUTO_IDX_NOT_EXIST, MSG_ERR_ARG_CANNOT_BE_NULL, MSG_ERR_ID_CANNOT_BE_NULL, MSG_ERR_NO_ELEM_TO_BE_CONSUMED, MSG_ERR_NO_OP_SUPPORTED, MSG_IDX_KEY_NOT_NULL, MSG_IDX_KEYS_NOT_NULL, MSG_TXT_IDX_NOT_FOUND, MSG_V_AUTO_IDX_MUST_SOLR_OR_LUCENE, MSG_V_AUTO_IDX_NOT_EXISTERR_ARG_CANNOT_BE_NULL, ERR_CODE_NOT_IN_LIST, ERR_DOP_CONSTRAINT, ERR_E_AUTO_IDX_MUST_SOLR_OR_LUCENE, ERR_E_AUTO_IDX_NOT_EXIST, ERR_E_DS_MUSTBE_FILE_OR_IS, ERR_E_MAX_LINE_CONSTRAINT, ERR_EDGE_DIR_NOT_BOTH, ERR_EDGE_WORK_TBL_NOT_CLEAN, ERR_GRAPH_NAME_CANT_BENULL, ERR_HIT_EXCEPTION, ERR_HIT_INTERRUPT, ERR_HIT_IO, ERR_HIT_OPGE, ERR_HIT_PARSEE, ERR_ID_CANNOT_BE_NULL, ERR_IDX_KEY_CANNOT_BE_NULL, ERR_IDX_KEY_NOT_NULL, ERR_IDX_KEYS_NOT_NULL, ERR_IN_DS_EDGE_CANTBE_NULL, ERR_IN_DS_VERT_CANTBE_NULL, ERR_IN_E_FLAT_DOESNT_EXIST, ERR_IN_V_FLAT_DOESNT_EXIST, ERR_INPUT_LIST_NOT_NULL, ERR_INPUT_NOT_NULL, ERR_INVALID_CHANGE_ACTION, ERR_INVALID_NUM_FIELDS, ERR_INVALID_NUM_NOARG, ERR_INVALID_NUMBER, ERR_INVALID_VAL, ERR_KEY_CANNOT_BE_ID, ERR_KEY_CANNOT_BE_NULL, ERR_KEY_CANNOT_BE_NULL_EMPTY, ERR_KEY_NOT_SET_TO, ERR_NO_ELEM_TO_BE_CONSUMED, ERR_NO_OP_SUPPORTED, ERR_NOT_IMPLEMENTED_YET, ERR_NOT_ORACLE_V_E, ERR_NUM_PART_CONSTRAINT, ERR_OFFSET_CONSTRAINT, ERR_OFFSET_E_MUST_BE_POS, ERR_OFFSET_V_MUST_BE_POS, ERR_OPT_FLAG_EDGE_INVALID, ERR_OPT_FLAG_VERT_INVALID, ERR_ORA_TEXT_INVALID_KEY, ERR_ORA_TEXT_ONLY_AUTO, ERR_OUTPUT_LIST_NOT_NULL, ERR_PG_ALREADY_EXISTS, ERR_PG_NOT_EMPTY, ERR_PG_NOT_NULL, ERR_SIZE_MUST_BE_POS, ERR_TIMEOUT_TAB_CREATION, ERR_TXT_IDX_NOT_FOUND, ERR_TYPE_ID_NOT_RECO, ERR_UNSUPPORTED_VAL_TYP, ERR_USER_REQ_OP_CANCEL, ERR_V_AUTO_IDX_MUST_SOLR_OR_LUCENE, ERR_V_AUTO_IDX_NOT_EXIST, ERR_V_DS_MUSTBE_FILE_OR_IS, ERR_V_MAX_LINE_CONSTRAINT, ERR_VALUE_CANNOT_BE_NULL, INFO_EMP_K_NO_P_VAL, INFO_NUL_K_NO_P_VALCSV_DATE_TIME_FORMATS, CSV_DT_KEYWORD_BOOLEAN, CSV_DT_KEYWORD_BYTE, CSV_DT_KEYWORD_CHAR, CSV_DT_KEYWORD_DATE, CSV_DT_KEYWORD_DOUBLE, CSV_DT_KEYWORD_FLOAT, CSV_DT_KEYWORD_INTEGER, CSV_DT_KEYWORD_JSON, CSV_DT_KEYWORD_LONG, CSV_DT_KEYWORD_RDF, CSV_DT_KEYWORD_SHORT, CSV_DT_KEYWORD_SPATIAL, CSV_DT_KEYWORD_STRING, OPG_DATETIME_FORMATTYPE_DT_BOOL, TYPE_DT_BYTE, TYPE_DT_CHAR, TYPE_DT_DATE, TYPE_DT_DOUBLE, TYPE_DT_EMPTY, TYPE_DT_FLOAT, TYPE_DT_INTEGER, TYPE_DT_JSON, TYPE_DT_LONG, TYPE_DT_SERI, TYPE_DT_SHORT, TYPE_DT_SPATIAL, TYPE_DT_STRING, TYPE_DT_URI| Modifier and Type | Method and Description |
|---|---|
org.apache.tinkerpop.gremlin.structure.Edge |
addEdge(java.lang.Object id, org.apache.tinkerpop.gremlin.structure.Vertex outVertex, org.apache.tinkerpop.gremlin.structure.Vertex inVertex, java.lang.String label)
This method adds an edge to the graph instance.
|
org.apache.tinkerpop.gremlin.structure.Vertex |
addVertexById(java.lang.Object id)
This method adds an vertex to the graph instance.
|
boolean |
areSecondaryIndicesForAdjacencyVerticesEnabled()
Returns true if secondary indices for adjacency vertices is enabled
|
boolean |
areSecondaryTablesForPropertiesEnabled()
Returns true if secondary tables for vertices/edges properties is enabled
|
void |
clearRepository()
This method removes all vertices and edges from this property graph instance.
|
void |
clearRepository(boolean removeUsingIterators)
This method removes all vertices and edges from this property graph instance.
|
void |
close()
A shutdown function is required to properly close the graph.
|
void |
commit()
Commit changes done to this property graph instace.
|
static byte[] |
concatenate(byte[] ba1, byte[] ba2)
This method returns a byte array that concatenates the two given byte array If one of the argument is null, the return value will be the other argument.
|
static byte[] |
concatenate(byte[] ba1, byte[] ba2, byte[] ba3)
This method returns a byte array that concatenates the two given byte array If one of the argument is null, the return value will be the other argument.
|
long |
countEdges(int dop, ProgressListener pl)
Counts all Edges using parallel scan and the specified DOP.
|
long |
countVertices(int dop, ProgressListener pl)
Counts all vertices using parallel scans and the specified DOP.
|
void |
disableSecondaryIndicesForAdjacencyVertices()
Disables the use of secondary tables for vertices/edges properties
|
void |
disableSecondaryTablesForProperties()
Disables the use of secondary tables for vertices/edges properties
|
void |
dispose()
Closes the graph without commiting any changes
|
void |
dropIndexTable()
This method removes all indices meta data from this property graph instance.
|
java.util.Iterator<org.apache.tinkerpop.gremlin.structure.Edge> |
edgesByProperty(java.lang.String key, java.lang.Object value, java.lang.Class dtClass, boolean acceptWildcard, boolean preferredLuceneQuery)
Return an iterator to all matching edges that have a particular key/value property.
|
java.util.Iterator<org.apache.tinkerpop.gremlin.structure.Edge> |
edgesByPropertyKey(boolean flushUpdates) |
java.util.Iterator<org.apache.tinkerpop.gremlin.structure.Edge> |
edgesByPropertyKey(java.lang.String[] keys, EdgeFilterCallback efc, OraclePropertyGraphBase.OptimizationFlag flag)
Return an
Iterator to all the edges in the graph that have any of the specified keys and satisfy the specified edge filter callback. |
java.util.Iterator<org.apache.tinkerpop.gremlin.structure.Edge> |
edgesByPropertyKey(java.lang.String[] keys, EdgeFilterCallback efc, OraclePropertyGraphBase.OptimizationFlag flag, int dop)
Return an
Iterator to all the edges in the graph that have any of the specified keys and satisfy the specified edge filter callback. |
java.util.Iterator<org.apache.tinkerpop.gremlin.structure.Edge> |
edgesByPropertyKey(java.lang.String[] keys, EdgeFilterCallback efc, OraclePropertyGraphBase.OptimizationFlag flag, int dop, boolean flushUpdates)
Return an
Iterator to all the edges in the graph that have any of the specified keys and satisfy the specified edge filter callback. |
java.util.Iterator<org.apache.tinkerpop.gremlin.structure.Edge>[] |
edgesFromPropertyKey(org.apache.hadoop.hbase.client.Connection[] conns, boolean skipStoreToCache, int startSplitID, java.lang.String[] keys)
Gets an array of
Iterator objects that hold all the edges in the graph that have any of the specified keys and satisfy the specified edge filter callback by executing a lookup over the secondary index table. |
java.util.Iterator<org.apache.tinkerpop.gremlin.structure.Edge>[] |
edgesPartitioned(org.apache.hadoop.hbase.client.Connection[] connections, boolean bSkipStoreToCache, int startSplitID)
Gets an array of
Iterator objects that hold all the edges in the graph. |
java.util.Iterator<org.apache.tinkerpop.gremlin.structure.Edge>[] |
edgesPartitioned(org.apache.hadoop.hbase.client.Connection[] connections, boolean bSkipStoreToCache, int startSplitID, boolean bClonedGraph)
Gets an array of
Iterator objects that hold all the edges in the graph. |
java.util.Iterator<org.apache.tinkerpop.gremlin.structure.Edge>[] |
edgesPartitioned(org.apache.hadoop.hbase.client.Connection[] connections, boolean bSkipStoreToCache, int startSplitID, EdgeFilterCallback efc)
Gets an array of
Iterator objects that hold all the edges in the graph that satisfy the specified edge filter callback. |
java.util.Iterator<org.apache.tinkerpop.gremlin.structure.Edge>[] |
edgesPartitioned(org.apache.hadoop.hbase.client.Connection[] connections, boolean bSkipStoreToCache, int startSplitID, EdgeFilterCallback efc, OraclePropertyGraphBase.OptimizationFlag flag)
Gets an array of
Iterator objects that hold all the edges in the graph that satisfy the specified edge filter callback. |
java.util.Iterator<org.apache.tinkerpop.gremlin.structure.Edge>[] |
edgesPartitioned(org.apache.hadoop.hbase.client.Connection[] connections, boolean bSkipStoreToCache, int startSplitID, OraclePropertyGraphBase.OptimizationFlag flag)
Gets an array of
Iterator objects that hold all the edges in the graph. |
java.util.Iterator<org.apache.tinkerpop.gremlin.structure.Edge>[] |
edgesPartitioned(org.apache.hadoop.hbase.client.Connection[] connections, boolean bSkipStoreToCache, int startSplitID, java.lang.String key)
Gets an array of
Iterator objects that hold all the edges in the graph that have a particular key. |
java.util.Iterator<org.apache.tinkerpop.gremlin.structure.Edge>[] |
edgesPartitioned(org.apache.hadoop.hbase.client.Connection[] connections, boolean bSkipStoreToCache, int startSplitID, java.lang.String[] keys)
Gets an array of
Iterator objects that hold all the edges in the graph that have any of the specified keys. |
java.util.Iterator<org.apache.tinkerpop.gremlin.structure.Edge>[] |
edgesPartitioned(org.apache.hadoop.hbase.client.Connection[] connections, boolean bSkipStoreToCache, int startSplitID, java.lang.String[] keys, boolean bClonedGraph, EdgeFilterCallback efc)
Gets an array of
Iterator objects that hold all the edges in the graph that have any of the specified keys and satisfy the specified edge filter callback. |
java.util.Iterator<org.apache.tinkerpop.gremlin.structure.Edge>[] |
edgesPartitioned(org.apache.hadoop.hbase.client.Connection[] connections, boolean bSkipStoreToCache, int startSplitID, java.lang.String[] keys, boolean bClonedGraph, EdgeFilterCallback efc, OraclePropertyGraphBase.OptimizationFlag flag)
Gets an array of
Iterator objects that hold all the edges in the graph that have any of the specified keys and satisfy the specified edge filter callback. |
java.util.Iterator<org.apache.tinkerpop.gremlin.structure.Edge>[] |
edgesPartitioned(org.apache.hadoop.hbase.client.Connection[] connections, boolean bSkipStoreToCache, int startSplitID, java.lang.String key, boolean bClonedGraph)
Gets an array of
Iterator objects that hold all the edges in the graph that have a particular key. |
void |
enableSecondaryIndicesForAdjacencyVertices()
Enables the use of secondary tables for vertices/edges properties
|
void |
enableSecondaryTablesForProperties()
Enables the use of secondary tables for vertices/edges properties
|
org.apache.tinkerpop.gremlin.structure.Graph.Features |
features()
Gets the particular features of the graph implementation for Tinkerpop 3.X
|
void |
flushHbaseCommits()
Flush HBase tables so property graph changes are commited into Apache HBase.
|
void |
flushUpdates()
Flushes updates into the pertinent HBase tables.
|
java.lang.String |
getCompression()
Get the type of compression used in the property graph tables
|
org.apache.hadoop.hbase.io.compress.Compression.Algorithm |
getCompressionAlgorithm(java.lang.String compress)
An utility to get a Compression Algorithm based on the specified string
|
org.apache.hadoop.conf.Configuration |
getConfiguration()
This method returns the underlying Configuration instance used for HBase
|
org.apache.hadoop.hbase.client.Connection |
getConnection()
Gets the underlying connection for this property graph
|
org.apache.hadoop.hbase.io.encoding.DataBlockEncoding |
getDataBlockEncoding()
Gets the data block encoding algorithm used in block cache and optionally on disk for all property graph tables
|
OracleEdge |
getEdge(java.lang.Object id)
Return the edge referenced by the object identifier.
|
int |
getEdgeBlockCacheSize()
Gets the size of the block cache (in bytes) associated to the edge table
|
int |
getEdgeInitialNumRegions()
Get the initial number of regions defined for the Apache HBase Edge tables associated to this property graph.
|
OracleEdgeBase |
getEdgeInstance(java.lang.Long eid, boolean bCreateIfAbsentFromCache, boolean bSkipStoreToCache)
Returns an instance of OracleEdgeBase.
|
OracleEdgeBase |
getEdgeInstance(OracleVertexBase outVertex, OracleVertexBase inVertex, java.lang.String edgeLabel, java.lang.Long eid, boolean bCreateIfAbsentFromCache, boolean bSkipStoreToCache)
Returns an instance of OracleEdgeBase.
|
int |
getEdgePropertyNames(int dop, int timeout, java.util.Map<java.lang.String,java.lang.Class> propertyNames, ProgressListener pl)
Gets the property names of all edges using parallel scans and the specified DOP.
|
int |
getEdgePropertyNames(int dop, int timeout, java.util.Set<java.lang.String> propertyNames, ProgressListener pl)
Gets the property names of all edges using parallel scans and the specified DOP.
|
int |
getEdgeSndIndexBlockCacheSize()
Gets the size of the block cache (in bytes) associated to the secondary index edge table
|
org.apache.hadoop.hbase.TableName |
getEdgeSndIndexTabName()
Get the name of the Apache HBase table used to store secondary indexes on edges in this property graph.
|
java.util.Set<org.apache.hadoop.hbase.client.RegionInfo> |
getEdgeTableRegionsInfo()
Gets the information of all HBase regions of the edge table
|
int |
getEdgeTableSplits()
Get the number of Edge table splits based on the current number of splits per region associated to this graph and the number of regions in the edge table.
|
org.apache.hadoop.hbase.TableName |
getEdgeTabName()
Get the name of the Apache HBase table used to store edges in this property graph.
|
static org.apache.hadoop.conf.Configuration |
getHbaseConfiguration(PgHbaseGraphConfig config)
Gets a HBase configuration from a PgHbaseGraphConfig object
|
static OraclePropertyGraph |
getInstance(org.apache.hadoop.conf.Configuration conf, org.apache.hadoop.hbase.client.Connection hconn, java.lang.String szGraphName)
Returns an instance of OraclePropertyGraph using the Apache HBase connection and graph name provided.
|
static OraclePropertyGraph |
getInstance(org.apache.hadoop.conf.Configuration conf, org.apache.hadoop.hbase.client.Connection hconn, java.lang.String szGraphName, int initialNumRegions)
Returns an instance of OraclePropertyGraph using the Apache HBase connection and graph name provided.
|
static OraclePropertyGraph |
getInstance(org.apache.hadoop.conf.Configuration conf, org.apache.hadoop.hbase.client.Connection hconn, java.lang.String szGraphName, int initialNumRegions, int splitsPerRegion)
Returns an instance of OraclePropertyGraph using the Apache HBase connection and graph name provided.
|
static OraclePropertyGraph |
getInstance(org.apache.hadoop.conf.Configuration conf, org.apache.hadoop.hbase.client.Connection hconn, java.lang.String szGraphName, int initialNumRegions, int splitsPerRegion, int blockCacheSize)
Returns an instance of OraclePropertyGraph using the Apache HBase connection and graph name provided.
|
static OraclePropertyGraph |
getInstance(org.apache.hadoop.conf.Configuration conf, org.apache.hadoop.hbase.client.Connection hconn, java.lang.String szGraphName, java.lang.String compress, org.apache.hadoop.hbase.io.encoding.DataBlockEncoding dbe, int splitsPerRegion, int blockCacheSize, int initialVertexNumRegions, int initialEdgeNumRegions)
Returns an instance of OraclePropertyGraph using the Apache HBase connection and graph name provided.
|
static OraclePropertyGraph |
getInstance(org.apache.hadoop.conf.Configuration conf, org.apache.hadoop.hbase.client.Connection hconn, java.lang.String szGraphName, java.lang.String compress, int initialNumRegions)
Returns an instance of OraclePropertyGraph using the Apache HBase connection and graph name provided.
|
static OraclePropertyGraph |
getInstance(org.apache.hadoop.conf.Configuration conf, org.apache.hadoop.hbase.client.Connection hconn, java.lang.String szGraphName, java.lang.String compress, int initialNumRegions, int splitsPerRegion)
Returns an instance of OraclePropertyGraph using the Apache HBase connection and graph name provided.
|
static OraclePropertyGraph |
getInstance(org.apache.hadoop.conf.Configuration conf, org.apache.hadoop.hbase.client.Connection hconn, java.lang.String szGraphName, java.lang.String compress, int initialNumRegions, int splitsPerRegion, int blockCacheSize)
Returns an instance of OraclePropertyGraph using the Apache HBase connection and graph name provided.
|
static OraclePropertyGraph |
getInstance(org.apache.hadoop.conf.Configuration conf, org.apache.hadoop.hbase.client.Connection hconn, java.lang.String szGraphName, java.lang.String compress, int splitsPerRegion, int blockCacheSize, int initialVertexNumRegions, int initialEdgeNumRegions)
Returns an instance of OraclePropertyGraph using the Apache HBase connection and graph name provided.
|
static OraclePropertyGraph |
getInstance(PgHbaseGraphConfig config)
Returns an instance of OraclePropertyGraph using a PGX PgHbaseGraphConfig object.
|
static OraclePropertyGraph |
getInstance(PgHbaseGraphConfig config, long maxRowSize)
Returns an instance of OraclePropertyGraph using a PGX PgHbaseGraphConfig object.
|
static OraclePropertyGraph |
getInstanceWithSplits(org.apache.hadoop.conf.Configuration conf, org.apache.hadoop.hbase.client.Connection hconn, java.lang.String szGraphName, int splitsPerRegion)
Returns an instance of OraclePropertyGraph using the Apache HBase connection and graph name provided and the specified number of splits per region.
|
java.util.Set<java.lang.Long> |
getInvalidEdgeIDSet(int dop, ProgressListener pl)
Gets a set of invalid edges that are stored in the property graph.
|
long |
getMaxEdgeID(int dop, ProgressListener pl)
Get the maximum edge ID using parallel scan and the specified DOP.
|
long |
getMaxVertexID(int dop, ProgressListener pl)
Get maximum vertex ID using parallel scans and the specified DOP.
|
long |
getMinEdgeID(int dop, ProgressListener pl)
Get the minimum edge ID using parallel scan with dop threads
|
long |
getMinVertexID(int dop, ProgressListener pl)
Get minimum vertex ID using parallel scans and the specified DOP.
|
int |
getNumSplitsPerRegion()
Get the number of splits per region to use when scanning vertices/edges
|
long |
getOperationQueueSize()
Get the size of the operation queue
|
OracleIndexManager |
getOracleIndexManager()
Gets the OracleIndexManager object associated to this property graph instance.
|
static int |
getSaltSize()
Gets the salt size
|
int |
getScanCachingSize()
Get the caching size used in Scan operations
|
int |
getSecondaryIndexNumBuckets()
Return the number of buckets used to store secondary index data into the vertex/edge secondary index tables
|
org.apache.hadoop.hbase.TableName |
getTextIdxMetadataTabName()
Get the name of the Apache HBase table used to store index metadata in this property graph.
|
OracleVertex |
getVertex(java.lang.Object id)
Return the vertex referenced by the object identifier.
|
int |
getVertexBlockCacheSize()
Gets the size of the block cache (in bytes) associated to the vertex table
|
java.util.Set<java.lang.Long> |
getVertexIDSet(int dop, ProgressListener pl)
Gets a set of Vertex IDs that are stored in the property graph
|
int |
getVertexInitialNumRegions()
Get the initial number of regions defined for the Apache HBase Vertex tables associated to this property graph.
|
OracleVertexBase |
getVertexInstance(java.lang.Long vid, boolean bCreateIfAbsentFromCache, boolean bSkipStoreToCache)
Returns an instance of OracleVertexBase.
|
int |
getVertexPropertyNames(int dop, int timeout, java.util.Map<java.lang.String,java.lang.Class> propertyNames, ProgressListener pl)
Gets the property names of all vertices using parallel scans and the specified DOP.
|
int |
getVertexPropertyNames(int dop, int timeout, java.util.Set<java.lang.String> propertyNames, ProgressListener pl)
Gets the property names of all vertices using parallel scans and the specified DOP.
|
int |
getVertexSndIndexBlockCacheSize()
Gets the size of the block cache (in bytes) associated to the secondary index vertex table
|
org.apache.hadoop.hbase.TableName |
getVertexSndIndexTabName()
Get the name of the Apache HBase table used to store secondary indexes on vertices in this property graph.
|
java.util.Set<org.apache.hadoop.hbase.client.RegionInfo> |
getVertexTableRegionsInfo()
Gets the information of all HBase regions of the vertex table
|
int |
getVertexTableSplits()
Get the number of Vertex table splits based on the current number of splits per region associated to this graph and the number of regions in the vertex table.
|
org.apache.hadoop.hbase.TableName |
getVertexTabName()
Get the name of the Apache HBase table used to store vertices in this property graph.
|
java.util.Iterator<org.apache.tinkerpop.gremlin.structure.Vertex>[] |
getVerticesFromKey(org.apache.hadoop.hbase.client.Connection[] conns, boolean skipStoreToCache, int startSplitID, java.lang.String[] keys)
Gets an array of
Iterator objects that hold all the vertices in the graph that have any of the specified keys and satisfy the specified vertex filter callback by executing a lookup over the secondary index table. |
static org.apache.hadoop.conf.Configuration |
prepareSecureConfig(org.apache.hadoop.conf.Configuration config, java.lang.String hbaseSecAuth, java.lang.String hadoopSecAuth, java.lang.String hmKerberosPrincipal, java.lang.String rsKerberosPrincipal, java.lang.String userPrincipal, java.lang.String keytab)
Prepares the Apache HBase configuration object to work with HBase secure cluster
|
static void |
quietlyClose(org.apache.hadoop.hbase.client.Table htable)
Quietly close the specified Table object.
|
void |
rebuildEdgeSecondaryIndexTable()
Rebuilds the secondary index on edges.
|
void |
rebuildEdgeSecondaryIndexTable(int dop)
Rebuilds the secondary index on edges.
|
void |
rebuildVertexSecondaryIndexTable()
Rebuilds the secondary index on vertices.
|
void |
rebuildVertexSecondaryIndexTable(int dop)
Rebuilds the secondary index on edges.
|
void |
removeEdge(org.apache.tinkerpop.gremlin.structure.Edge edge)
Remove the provided edge from the graph
|
void |
removeVertex(org.apache.tinkerpop.gremlin.structure.Vertex vertex)
Remove the provided vertex from the graph
|
static byte[] |
saltEdge(byte[] ba)
Salts an edge ID represented as a byte array
|
static byte[] |
saltVertex(byte[] ba)
Salts a vertex ID represented as a byte array
|
void |
setCompression(java.lang.String compress)
Set the type of compression used in the property graph tables.
|
void |
setDataBlockEncoding(org.apache.hadoop.hbase.io.encoding.DataBlockEncoding dbe)
Set the data block encoding used in the property graph tables.
|
void |
setEdgeBlockCacheSize(int blockCSize)
Set the size of the block cache (in bytes) associated to the edge table This is a meta data operation, so changes to this setting will show after clearing the repository and recreating the edge tables.
|
void |
setEdgeInitialNumRegions(int numRegions)
Set the initial number of regions defined for the Apache HBase edge tables associated to this property graph.
|
void |
setEdgeSndIndexBlockCacheSize(int blockCSize)
Set the size of the block cache (in bytes) associated to the edge secondary table This is a meta data operation, so changes to this setting will show after clearing the repository and recreating the edge tables.
|
void |
setInitialNumRegions(int numRegions)
Set the initial number of regions defined for the Apache HBase tables associated to this property graph.
|
void |
setNumSplitsPerRegion(int iNumSplitsPerRegion)
Set the number of splits per region to use when scanning vertices/edges
|
void |
setScanCachingSize(int iScanCachingSize)
Set the caching size used in Scan operations
|
void |
setVertexBlockCacheSize(int blockCSize)
Set the size of the block cache (in bytes) associated to the vertex table
|
void |
setVertexInitialNumRegions(int numRegions)
Set the initial number of regions defined for the Apache HBase Vertex tables associated to this property graph.
|
void |
setVertexSndIndexBlockCacheSize(int blockCSize)
Set the size of the block cache (in bytes) associated to the vertex secondary table This is a meta data operation, so changes to this setting will show after clearing the repository and recreating the vertex tables.
|
<T extends org.apache.tinkerpop.gremlin.structure.Element> |
updateGraph(OpCallback<T> oc, boolean skipStoreToCache, int dop, ProgressListener pl, OpCallback.UpdateType updateType)
Adds an attribute to all vertices based on the specified
VertexOpCallback. |
java.util.Iterator<org.apache.tinkerpop.gremlin.structure.Vertex> |
verticesByProperty(java.lang.String key, java.lang.Object value, java.lang.Class dtClass, boolean acceptWildcard, boolean preferredLuceneQuery)
Return an iterable to all matching vertices that have a particular key/value property.
|
java.util.Iterator<org.apache.tinkerpop.gremlin.structure.Vertex> |
verticesByPropertyKey(java.lang.String[] keys, VertexFilterCallback vfc, OraclePropertyGraphBase.OptimizationFlag flag)
Return an
Iterator to all the vertices in the graph that have any of the specified keys and satisfy the specified vertex filter callback. |
java.util.Iterator<org.apache.tinkerpop.gremlin.structure.Vertex> |
verticesByPropertyKey(java.lang.String[] keys, VertexFilterCallback vfc, OraclePropertyGraphBase.OptimizationFlag flag, int dop)
Return an
Iterator to all the vertices in the graph that have any of the specified keys and satisfy the specified vertex filter callback. |
java.util.Iterator<org.apache.tinkerpop.gremlin.structure.Vertex>[] |
verticesPartitioned(org.apache.hadoop.hbase.client.Connection[] connections, boolean bSkipStoreToCache, int startSplitID)
Gets an array of
Iterator objects that hold all the vertices in the graph. |
java.util.Iterator<org.apache.tinkerpop.gremlin.structure.Vertex>[] |
verticesPartitioned(org.apache.hadoop.hbase.client.Connection[] connections, boolean bSkipStoreToCache, int startSplitID, boolean bClonedGraph)
Gets an array of
Iterator objects that hold all the vertices in the graph. |
java.util.Iterator<org.apache.tinkerpop.gremlin.structure.Vertex>[] |
verticesPartitioned(org.apache.hadoop.hbase.client.Connection[] connections, boolean bSkipStoreToCache, int startSplitID, OraclePropertyGraphBase.OptimizationFlag flag)
Gets an array of
Iterator objects that hold all the vertices in the graph. |
java.util.Iterator<org.apache.tinkerpop.gremlin.structure.Vertex>[] |
verticesPartitioned(org.apache.hadoop.hbase.client.Connection[] connections, boolean bSkipStoreToCache, int startSplitID, java.lang.String key)
Gets an array of
Iterator objects that hold all the vertices in the graph that have a particular key. |
java.util.Iterator<org.apache.tinkerpop.gremlin.structure.Vertex>[] |
verticesPartitioned(org.apache.hadoop.hbase.client.Connection[] connections, boolean bSkipStoreToCache, int startSplitID, java.lang.String[] keys)
Gets an array of
Iterator objects that hold all the vertices in the graph that have any of the specified keys. |
java.util.Iterator<org.apache.tinkerpop.gremlin.structure.Vertex>[] |
verticesPartitioned(org.apache.hadoop.hbase.client.Connection[] connections, boolean bSkipStoreToCache, int startSplitID, java.lang.String[] keys, boolean bCloneGraph, VertexFilterCallback vfc)
Gets an array of
Iterator objects that hold all the vertices in the graph that have any of the specified keys and satisfy the specified vertex filter callback. |
java.util.Iterator<org.apache.tinkerpop.gremlin.structure.Vertex>[] |
verticesPartitioned(org.apache.hadoop.hbase.client.Connection[] connections, boolean bSkipStoreToCache, int startSplitID, java.lang.String[] keys, boolean bCloneGraph, VertexFilterCallback vfc, OraclePropertyGraphBase.OptimizationFlag flag)
Gets an array of
Iterator objects that hold all the vertices in the graph that have any of the specified keys and satisfy the specified vertex filter callback. |
java.util.Iterator<org.apache.tinkerpop.gremlin.structure.Vertex>[] |
verticesPartitioned(org.apache.hadoop.hbase.client.Connection[] connections, boolean bSkipStoreToCache, int startSplitID, java.lang.String key, boolean bCloneGraph)
Gets an array of
Iterator objects that hold all the vertices in the graph that have a particular key. |
java.util.Iterator<org.apache.tinkerpop.gremlin.structure.Vertex>[] |
verticesPartitioned(org.apache.hadoop.hbase.client.Connection[] connections, boolean bSkipStoreToCache, int startSplitID, VertexFilterCallback vfc, OraclePropertyGraphBase.OptimizationFlag flag)
Gets an array of
Iterator objects that hold all the vertices in the graph that satisfy the specified vertex filter callback. |
addUpdatedEdge, addUpdatedVertex, addVertex, addVertex, adjacentEdgesFromVertices, adjacentEdgesFromVertices, clearCache, clearUpdateQueue, commitIndices, compute, compute, configuration, createIndex, decodeObject, edges, edges, edgesByIds, edgesByIds, edgesByProperty, edgesByProperty, edgesByProperty, edgesByProperty, edgesByPropertyKey, edgesByPropertyKey, edgesByPropertyKey, getBatchSize, getCacheStatus, getConfig, getDatatypeClassFromValueObjForTextIndex, getDateTimeFormatter, getDefaultEdgeOptFlag, getDefaultIndexParameters, getDefaultVertexOptFlag, getEdgeFilterCallback, getElementsProperties, getGraphName, getJustEdgeIdOptFlag, getJustVertexIdOptFlag, getMaxLabelLengthAllowedInBytes, getMaxPropertyNameLengthAllowedInBytes, getMaxValueLengthAllowedInBytes, getProgressListener, getQueueSize, getShortestPath, getShortestPath, getSimpleDateFormat, getStringForObj, getTextIndexDirectoryPrefix, getVertexFilterCallback, io, isBuiltinElementsCacheDisabled, isDisposed, isEmpty, isShutdown, isSkipRefreshIndicesEnabled, isTransientPropertyGraphInstance, removeEdges, removeEdges, removeEdges, removeEdges, removeVertices, removeVertices, removeVertices, removeVertices, rollback, serializableToStr, setBatchSize, setCacheStatus, setDefaultEdgeOptFlag, setDefaultIndexParameters, setDefaultVertexOptFlag, setEdgeFilterCallback, setProgressListener, setQueueSize, setSkipRefreshIndices, setSolrJavaBinFormat, setTransientPropertyGraphInstance, setVertexFilterCallback, startTransaction, strToSerializable, sum, supportVertexLabels, toString, traversal, traversal, tx, useSolrJavaBinFormat, variables, vertices, vertices, verticesByIds, verticesByIds, verticesByProperty, verticesByProperty, verticesByProperty, verticesByProperty, verticesByPropertyKey, verticesByPropertyKey, verticesByPropertyKeyequals, getClass, hashCode, notify, notifyAll, wait, wait, waitcreateKeyIndex, createKeyIndex, createKeyIndex, dropAllAutoIndices, dropAllIndices, dropAllManualIndices, dropIndex, dropKeyIndex, dropKeyIndex, dropKeyIndex, edgesUsingTextSearch, getAutoIndex, getIndex, getIndexedKeys, getIndexParameters, getIndices, isEdgeAutoIndexEnabled, isVertexAutoIndexEnabled, refreshAutoIndices, validateIndexManager, verticesUsingTextSearchcountEdges, countEdges, countVertices, countVertices, getEdgePropertyNames, getEdgePropertyNames, getElementsProperties, getInvalidEdgeIDSet, getInvalidEdgeIDSet, getMaxEdgeID, getMaxEdgeID, getMaxVertexID, getMaxVertexID, getMinEdgeID, getMinEdgeID, getMinVertexID, getMinVertexID, getVertexIDSet, getVertexIDSet, getVertexPropertyNames, getVertexPropertyNamesaddAttributeToAllEdges, addAttributeToAllEdges, addAttributeToAllEdges, addAttributeToAllVertices, addAttributeToAllVertices, addAttributeToAllVertices, getDOPForMetadataOperations, removeAttributeFromAllEdges, removeAttributeFromAllEdges, removeAttributeFromAllEdges, removeAttributeFromAllVertices, removeAttributeFromAllVertices, removeAttributeFromAllVerticespublic org.apache.tinkerpop.gremlin.structure.Edge addEdge(java.lang.Object id,
org.apache.tinkerpop.gremlin.structure.Vertex outVertex,
org.apache.tinkerpop.gremlin.structure.Vertex inVertex,
java.lang.String label)
addEdge in class OraclePropertyGraphBaseid - the id of the vertexoutVertex - has to be of type OracleVertexinVertex - has to be of type OracleVertexlabel - edge labelOracleEdge object.public org.apache.tinkerpop.gremlin.structure.Vertex addVertexById(java.lang.Object id)
addVertexById in class OraclePropertyGraphBaseid - the id of the vertexOracleVertex object.public boolean areSecondaryIndicesForAdjacencyVerticesEnabled()
public boolean areSecondaryTablesForPropertiesEnabled()
public void clearRepository()
throws org.apache.hadoop.hbase.MasterNotRunningException,
java.io.IOException
clearRepository in class OraclePropertyGraphBaseorg.apache.hadoop.hbase.MasterNotRunningExceptionjava.io.IOExceptionpublic void clearRepository(boolean removeUsingIterators)
throws org.apache.hadoop.hbase.MasterNotRunningException,
java.io.IOException
removeUsingIterators - specifies if the tables should be cleared up by iterating over all the elements instead of doing a table truncate.org.apache.hadoop.hbase.MasterNotRunningExceptionjava.io.IOExceptionpublic void close()
public void commit()
commit in class OraclePropertyGraphBasepublic static byte[] concatenate(byte[] ba1,
byte[] ba2)
public static byte[] concatenate(byte[] ba1,
byte[] ba2,
byte[] ba3)
public long countEdges(int dop,
ProgressListener pl)
dop - a positive integer defining the number of threads to use when parallel scanning the edges.pl - a ProgressListener object.public long countVertices(int dop,
ProgressListener pl)
dop - a positive integer defining the number of threads to use when parallel scanning the vertices.pl - a ProgressListener object.public void disableSecondaryIndicesForAdjacencyVertices()
throws java.io.IOException
java.io.IOExceptionpublic void disableSecondaryTablesForProperties()
throws java.io.IOException
java.io.IOExceptionpublic void dispose()
public void dropIndexTable()
throws org.apache.hadoop.hbase.MasterNotRunningException,
java.io.IOException
org.apache.hadoop.hbase.MasterNotRunningExceptionjava.io.IOExceptionpublic java.util.Iterator<org.apache.tinkerpop.gremlin.structure.Edge> edgesByProperty(java.lang.String key,
java.lang.Object value,
java.lang.Class dtClass,
boolean acceptWildcard,
boolean preferredLuceneQuery)
edgesByProperty in class OraclePropertyGraphBasekey - the name of the property. It MUST not be NULL.value - the value of the property. It can be NULL which will cause all vertices with the given key to be returned.dtClass - the data type of the value object.acceptWildcard - specifies if wild cards can be used in the value object. Wild cards can be used only when an automatic text index on the given key is present.preferredLuceneQuery - if true, a look up to the automatic index will be executed to find the matches.public java.util.Iterator<org.apache.tinkerpop.gremlin.structure.Edge> edgesByPropertyKey(boolean flushUpdates)
public java.util.Iterator<org.apache.tinkerpop.gremlin.structure.Edge> edgesByPropertyKey(java.lang.String[] keys,
EdgeFilterCallback efc,
OraclePropertyGraphBase.OptimizationFlag flag)
Iterator to all the edges in the graph that have any of the specified keys and satisfy the specified edge filter callback. The information read back from the edges will be complete or partial based on the optimization flag specified.edgesByPropertyKey in class OraclePropertyGraphBasekeys - an array of property key names. It can be NULL which implies all vertices in the graph satisfying the edge filter callback will be returned.efc - a EdgeFilterCallback object specifying the conditions to keep a edge in the Iterator.flag - an OptimizationFlag object specifying if a partial or complete edge object will be returned. It can be NULL which implies the edges created must be complete.public java.util.Iterator<org.apache.tinkerpop.gremlin.structure.Edge> edgesByPropertyKey(java.lang.String[] keys,
EdgeFilterCallback efc,
OraclePropertyGraphBase.OptimizationFlag flag,
int dop)
Iterator to all the edges in the graph that have any of the specified keys and satisfy the specified edge filter callback. The information read back from the edges will be complete or partial based on the optimization flag specified.keys - an array of property key names. It can be NULL which implies all vertices in the graph satisfying the edge filter callback will be returned.efc - a EdgeFilterCallback object specifying the conditions to keep a edge in the Iterator.flag - an OptimizationFlag object specifying if a partial or complete edge object will be returned. It can be NULL which implies the edges created must be complete.public java.util.Iterator<org.apache.tinkerpop.gremlin.structure.Edge> edgesByPropertyKey(java.lang.String[] keys,
EdgeFilterCallback efc,
OraclePropertyGraphBase.OptimizationFlag flag,
int dop,
boolean flushUpdates)
Iterator to all the edges in the graph that have any of the specified keys and satisfy the specified edge filter callback. The information read back from the edges will be complete or partial based on the optimization flag specified.keys - an array of property key names. It can be NULL which implies all vertices in the graph satisfying the edge filter callback will be returned.efc - a EdgeFilterCallback object specifying the conditions to keep a edge in the Iterator.flag - an OptimizationFlag object specifying if a partial or complete edge object will be returned. It can be NULL which implies the edges created must be complete.public java.util.Iterator<org.apache.tinkerpop.gremlin.structure.Edge>[] edgesFromPropertyKey(org.apache.hadoop.hbase.client.Connection[] conns,
boolean skipStoreToCache,
int startSplitID,
java.lang.String[] keys)
Iterator objects that hold all the edges in the graph that have any of the specified keys and satisfy the specified edge filter callback by executing a lookup over the secondary index table. The information read back from the edges will be complete or partial based on the optimization flag specified. Each element in the Iterator array uses a separate connection provided to fetch a subset of the results from the corresponding split. The splits are determined by # of regions & num of splits per region for the table. Note that we assign an integer ID (in the range of [0, N - 1]) to all the splits in the vertex table with N splits. The subset of splits queried will consist of those splits with ID value in the range of [startSplitID, startSplit - 1 + size of connections array].conns - an array of connections to Apache HBase.skipStoreToCache - if true, the edges instances will not be stored into the cache.startSplitID - the ID of the starting split.keys - an array of property key names. It can be NULL which implies all edges in the graph satisfying the edge filter callback will be returned.Iterator array object.public java.util.Iterator<org.apache.tinkerpop.gremlin.structure.Edge>[] edgesPartitioned(org.apache.hadoop.hbase.client.Connection[] connections,
boolean bSkipStoreToCache,
int startSplitID)
Iterator objects that hold all the edges in the graph. Each element in the Iterator array uses a separate connection provided to fetch a subset of the results from the corresponding split. The splits are determined by # of regions & num of splits per region for the table. Note that we assign an integer ID (in the range of [0, N - 1]) to all the splits in the vertex table with N splits. The subset of splits queried will consist of those splits with ID value in the range of [startSplitID, startSplit - 1 + size of connections array].connections - an array of connections to Apache HBase.bSkipStoreToCache - if true, the edges instances will not be stored into the cache.startSplitID - the ID of the starting split.Iterator array object.public java.util.Iterator<org.apache.tinkerpop.gremlin.structure.Edge>[] edgesPartitioned(org.apache.hadoop.hbase.client.Connection[] connections,
boolean bSkipStoreToCache,
int startSplitID,
boolean bClonedGraph)
Iterator objects that hold all the edges in the graph. Each element in the Iterator array uses a separate connection provided to fetch a subset of the results from the corresponding split. The splits are determined by # of regions & num of splits per region for the table. Note that we assign an integer ID (in the range of [0, N - 1]) to all the splits in the vertex table with N splits. The subset of splits queried will consist of those splits with ID value in the range of [startSplitID, startSplit - 1 + size of connections array].connections - an array of connections to Apache HBase.bSkipStoreToCache - if true, the edges instances will not be stored into the cache.startSplitID - the ID of the starting split.bClonedGraph - if true, each Iterator in the array will use a new OraclePropertyGraph instance behind the scenes. If there are writing operations done over the retrieved edges, the associated property graphs need to be commited for the changes to be persisted into Apache HBase.Iterator array object.public java.util.Iterator<org.apache.tinkerpop.gremlin.structure.Edge>[] edgesPartitioned(org.apache.hadoop.hbase.client.Connection[] connections,
boolean bSkipStoreToCache,
int startSplitID,
EdgeFilterCallback efc)
Iterator objects that hold all the edges in the graph that satisfy the specified edge filter callback. Each element in the Iterator array uses a separate connection provided to fetch a subset of the results from the corresponding split. The splits are determined by # of regions & num of splits per region for the table. Note that we assign an integer ID (in the range of [0, N - 1]) to all the splits in the vertex table with N splits. The subset of splits queried will consist of those splits with ID value in the range of [startSplitID, startSplit - 1 + size of connections array].connections - an array of connections to Apache HBase.bSkipStoreToCache - if true, the edges instances will not be stored into the cache.startSplitID - the ID of the starting split.efc - a EdgeFilterCallback object specifying the conditions to keep an edge in the Iterator.Iterator array object.public java.util.Iterator<org.apache.tinkerpop.gremlin.structure.Edge>[] edgesPartitioned(org.apache.hadoop.hbase.client.Connection[] connections,
boolean bSkipStoreToCache,
int startSplitID,
EdgeFilterCallback efc,
OraclePropertyGraphBase.OptimizationFlag flag)
Iterator objects that hold all the edges in the graph that satisfy the specified edge filter callback. The information read back from the edges will be complete or partial based on the optimization flag specified. Each element in the Iterator array uses a separate connection provided to fetch a subset of the results from the corresponding split. The splits are determined by # of regions & num of splits per region for the table. Note that we assign an integer ID (in the range of [0, N - 1]) to all the splits in the vertex table with N splits. The subset of splits queried will consist of those splits with ID value in the range of [startSplitID, startSplit - 1 + size of connections array].connections - an array of connections to Apache HBase.bSkipStoreToCache - if true, the edges instances will not be stored into the cache.flag - an OptimizationFlag object specifying if a partial or complete edge object will be returned. It can be NULL which implies the edges created must be complete.startSplitID - the ID of the starting split.efc - a EdgeFilterCallback object specifying the conditions to keep an edge in the Iterator.Iterator array object.public java.util.Iterator<org.apache.tinkerpop.gremlin.structure.Edge>[] edgesPartitioned(org.apache.hadoop.hbase.client.Connection[] connections,
boolean bSkipStoreToCache,
int startSplitID,
OraclePropertyGraphBase.OptimizationFlag flag)
Iterator objects that hold all the edges in the graph. The information read back from the edges will be complete or partial based on the optimization flag specified. Each element in the Iterator array uses a separate connection provided to fetch a subset of the results from the corresponding split. The splits are determined by # of regions & num of splits per region for the table. Note that we assign an integer ID (in the range of [0, N - 1]) to all the splits in the vertex table with N splits. The subset of splits queried will consist of those splits with ID value in the range of [startSplitID, startSplit - 1 + size of connections array].connections - an array of connections to Apache HBase.bSkipStoreToCache - if true, the edges instances will not be stored into the cache.flag - an OptimizationFlag object specifying if a partial or complete vertex object will be returned. It can be NULL which implies the edges created must be complete.startSplitID - the ID of the starting split.Iterator array object.public java.util.Iterator<org.apache.tinkerpop.gremlin.structure.Edge>[] edgesPartitioned(org.apache.hadoop.hbase.client.Connection[] connections,
boolean bSkipStoreToCache,
int startSplitID,
java.lang.String key)
Iterator objects that hold all the edges in the graph that have a particular key. Each element in the Iterator array uses a separate connection provided to fetch a subset of the results from the corresponding split. The splits are determined by # of regions & num of splits per region for the table. Note that we assign an integer ID (in the range of [0, N - 1]) to all the splits in the vertex table with N splits. The subset of splits queried will consist of those splits with ID value in the range of [startSplitID, startSplit - 1 + size of connections array].connections - an array of connections to Apache HBase.bSkipStoreToCache - if true, the edges instances will not be stored into the cache.startSplitID - the ID of the starting split.key - name of the property. It can be NULL which implies all edges in the graph will be returned.Iterator array object.public java.util.Iterator<org.apache.tinkerpop.gremlin.structure.Edge>[] edgesPartitioned(org.apache.hadoop.hbase.client.Connection[] connections,
boolean bSkipStoreToCache,
int startSplitID,
java.lang.String[] keys)
Iterator objects that hold all the edges in the graph that have any of the specified keys. Each element in the Iterator array uses a separate connection provided to fetch a subset of the results from the corresponding split. The splits are determined by # of regions & num of splits per region for the table. Note that we assign an integer ID (in the range of [0, N - 1]) to all the splits in the vertex table with N splits. The subset of splits queried will consist of those splits with ID value in the range of [startSplitID, startSplit - 1 + size of connections array].connections - an array of connections to Apache HBase.bSkipStoreToCache - if true, the edges instances will not be stored into the cache.startSplitID - the ID of the starting split.keys - an array of property key names. It can be NULL which implies all edges in the graph will be returned.Iterator array object.public java.util.Iterator<org.apache.tinkerpop.gremlin.structure.Edge>[] edgesPartitioned(org.apache.hadoop.hbase.client.Connection[] connections,
boolean bSkipStoreToCache,
int startSplitID,
java.lang.String[] keys,
boolean bClonedGraph,
EdgeFilterCallback efc)
Iterator objects that hold all the edges in the graph that have any of the specified keys and satisfy the specified edge filter callback. Each element in the Iterator array uses a separate connection provided to fetch a subset of the results from the corresponding split. The splits are determined by # of regions & num of splits per region for the table. Note that we assign an integer ID (in the range of [0, N - 1]) to all the splits in the vertex table with N splits. The subset of splits queried will consist of those splits with ID value in the range of [startSplitID, startSplit - 1 + size of connections array].connections - an array of connections to Apache HBase.bSkipStoreToCache - if true, the edges instances will not be stored into the cache.startSplitID - the ID of the starting split.efc - a EdgeFilterCallback object specifying the conditions to keep an edge in the Iterator.keys - an array of property key names. It can be NULL which implies all edges in the graph satisfying the edge filter callback will be returned.bClonedGraph - if true, each Iterator in the array will use a new OraclePropertyGraph instance behind the scenes. If there are writing operations done over the retrieved edges, the associated property graphs need to be commited for the changes to be persisted into Apache HBase.Iterator array object.public java.util.Iterator<org.apache.tinkerpop.gremlin.structure.Edge>[] edgesPartitioned(org.apache.hadoop.hbase.client.Connection[] connections,
boolean bSkipStoreToCache,
int startSplitID,
java.lang.String[] keys,
boolean bClonedGraph,
EdgeFilterCallback efc,
OraclePropertyGraphBase.OptimizationFlag flag)
Iterator objects that hold all the edges in the graph that have any of the specified keys and satisfy the specified edge filter callback. The information read back from the edges will be complete or partial based on the optimization flag specified. Each element in the Iterator array uses a separate connection provided to fetch a subset of the results from the corresponding split. The splits are determined by # of regions & num of splits per region for the table. Note that we assign an integer ID (in the range of [0, N - 1]) to all the splits in the vertex table with N splits. The subset of splits queried will consist of those splits with ID value in the range of [startSplitID, startSplit - 1 + size of connections array].connections - an array of connections to Apache HBase.bSkipStoreToCache - if true, the edges instances will not be stored into the cache.flag - an OptimizationFlag object specifying if a partial or complete vertex object will be returned. It can be NULL which implies the edges created must be complete.startSplitID - the ID of the starting split.keys - an array of property key names. It can be NULL which implies all edges in the graph satisfying the edge filter callback will be returned.efc - a EdgeFilterCallback object specifying the conditions to keep an edge in the Iterator.bClonedGraph - if true, each Iterator in the array will use a new OraclePropertyGraph instance behind the scenes. If there are writing operations done over the retrieved edges, the associated property graphs need to be commited for the changes to be persisted into Apache HBase.Iterator array object.public java.util.Iterator<org.apache.tinkerpop.gremlin.structure.Edge>[] edgesPartitioned(org.apache.hadoop.hbase.client.Connection[] connections,
boolean bSkipStoreToCache,
int startSplitID,
java.lang.String key,
boolean bClonedGraph)
Iterator objects that hold all the edges in the graph that have a particular key. Each element in the Iterator array uses a separate connection provided to fetch a subset of the results from the corresponding split. The splits are determined by # of regions & num of splits per region for the table. Note that we assign an integer ID (in the range of [0, N - 1]) to all the splits in the vertex table with N splits. The subset of splits queried will consist of those splits with ID value in the range of [startSplitID, startSplit - 1 + size of connections array].connections - an array of connections to Apache HBase.bSkipStoreToCache - if true, the edges instances will not be stored into the cache.startSplitID - the ID of the starting split.key - name of the property. It can be NULL which implies all edges in the graph will be returned.bClonedGraph - if true, each Iterator in the array will use a new OraclePropertyGraph instance behind the scenes. If there are writing operations done over the retrieved edges, the associated property graphs need to be commited for the changes to be persisted into Apache HBase.Iterator array object.public void enableSecondaryIndicesForAdjacencyVertices()
throws java.io.IOException
java.io.IOExceptionpublic void enableSecondaryTablesForProperties()
throws java.io.IOException
java.io.IOExceptionpublic org.apache.tinkerpop.gremlin.structure.Graph.Features features()
public void flushHbaseCommits()
public void flushUpdates()
flushUpdates in class OraclePropertyGraphBasepublic java.lang.String getCompression()
public org.apache.hadoop.hbase.io.compress.Compression.Algorithm getCompressionAlgorithm(java.lang.String compress)
public org.apache.hadoop.conf.Configuration getConfiguration()
public org.apache.hadoop.hbase.client.Connection getConnection()
public org.apache.hadoop.hbase.io.encoding.DataBlockEncoding getDataBlockEncoding()
public OracleEdge getEdge(java.lang.Object id)
getEdge in class OraclePropertyGraphBaseid - the object identifier of the referenced edgeOracleEdge objectpublic int getEdgeBlockCacheSize()
public int getEdgeInitialNumRegions()
public OracleEdgeBase getEdgeInstance(java.lang.Long eid, boolean bCreateIfAbsentFromCache, boolean bSkipStoreToCache)
getEdgeInstance in class OraclePropertyGraphBaseeid - the id of the edgebCreateIfAbsentFromCache - if false, returns a NULL value if there is no match in the cache.bSkipStoreToCache - if true, the vertex instance will not be stored into the cache.public OracleEdgeBase getEdgeInstance(OracleVertexBase outVertex, OracleVertexBase inVertex, java.lang.String edgeLabel, java.lang.Long eid, boolean bCreateIfAbsentFromCache, boolean bSkipStoreToCache)
getEdgeInstance in class OraclePropertyGraphBaseeid - the id of the edgeoutVertex - the outgoing Vertex of the edgeinVertex - the incoming Vertex of the edgeedgeLabel - the label for the Edge.bCreateIfAbsentFromCache - if false, returns a NULL value if there is no match in the cache.bSkipStoreToCache - if true, the vertex instance will not be stored into the cache.public int getEdgePropertyNames(int dop,
int timeout,
java.util.Map<java.lang.String,java.lang.Class> propertyNames,
ProgressListener pl)
dop - a positive integer defining the number of threads to use when parallel scanning the edgestimeout - time out in the unit of secondspropertyNames - a map of <property name, property class> to be returnedpl - a ProgressListener object.public int getEdgePropertyNames(int dop,
int timeout,
java.util.Set<java.lang.String> propertyNames,
ProgressListener pl)
dop - a positive integer defining the number of threads to use when parallel scanning the edgestimeout - time out in the unit of secondspropertyNames - a set of property names to be returnedpl - a ProgressListener object.public int getEdgeSndIndexBlockCacheSize()
public org.apache.hadoop.hbase.TableName getEdgeSndIndexTabName()
public java.util.Set<org.apache.hadoop.hbase.client.RegionInfo> getEdgeTableRegionsInfo()
throws java.io.IOException
java.io.IOExceptionpublic int getEdgeTableSplits()
throws java.io.IOException
java.io.IOExceptionpublic org.apache.hadoop.hbase.TableName getEdgeTabName()
public static org.apache.hadoop.conf.Configuration getHbaseConfiguration(PgHbaseGraphConfig config)
config - the PGX PgHbaseGraphConfig objectpublic static OraclePropertyGraph getInstance(org.apache.hadoop.conf.Configuration conf, org.apache.hadoop.hbase.client.Connection hconn, java.lang.String szGraphName) throws org.apache.hadoop.hbase.MasterNotRunningException, org.apache.hadoop.hbase.ZooKeeperConnectionException, java.io.IOException
conf - the Configuration object with all the Apache HBase connection information.hconn - an Connection object handling a connection to HBase.szGraphName - the name of the graphorg.apache.hadoop.hbase.MasterNotRunningExceptionorg.apache.hadoop.hbase.ZooKeeperConnectionExceptionjava.io.IOExceptionpublic static OraclePropertyGraph getInstance(org.apache.hadoop.conf.Configuration conf, org.apache.hadoop.hbase.client.Connection hconn, java.lang.String szGraphName, int initialNumRegions) throws org.apache.hadoop.hbase.MasterNotRunningException, org.apache.hadoop.hbase.ZooKeeperConnectionException, java.io.IOException
conf - the Configuration object with all the Apache HBase connection information.hconn - an Connection object handling a connection to HBase.szGraphName - the name of the graphinitialNumRegions - the initial number of regions used for vertex/edge tables. Both vertex and edge tables will be created using the specified number of regions. If the graph already exists, a clearRepository operation must be executed in order to truncate the tables and set the new number of regions.org.apache.hadoop.hbase.MasterNotRunningExceptionorg.apache.hadoop.hbase.ZooKeeperConnectionExceptionjava.io.IOExceptionpublic static OraclePropertyGraph getInstance(org.apache.hadoop.conf.Configuration conf, org.apache.hadoop.hbase.client.Connection hconn, java.lang.String szGraphName, int initialNumRegions, int splitsPerRegion) throws org.apache.hadoop.hbase.MasterNotRunningException, org.apache.hadoop.hbase.ZooKeeperConnectionException, java.io.IOException
conf - the Configuration object with all the Apache HBase connection information.hconn - an Connection object handling a connection to HBase.szGraphName - the name of the graphinitialNumRegions - the initial number of regions used for vertex/edge tables. Both vertex and edge tables will be created using the specified number of regions. If the graph already exists, a clearRepository operation must be executed in order to truncate the tables and set the new number of regions.splitsPerRegion - a positive integer specifying the number of splits per region to use when scanning vertices/edges.org.apache.hadoop.hbase.MasterNotRunningExceptionorg.apache.hadoop.hbase.ZooKeeperConnectionExceptionjava.io.IOExceptionpublic static OraclePropertyGraph getInstance(org.apache.hadoop.conf.Configuration conf, org.apache.hadoop.hbase.client.Connection hconn, java.lang.String szGraphName, int initialNumRegions, int splitsPerRegion, int blockCacheSize) throws org.apache.hadoop.hbase.MasterNotRunningException, org.apache.hadoop.hbase.ZooKeeperConnectionException, java.io.IOException
conf - the Configuration object with all the Apache HBase connection information.hconn - an Connection object handling a connection to HBase.szGraphName - the name of the graphinitialNumRegions - the initial number of regions used for vertex/edge tables. Both vertex and edge tables will be created using the specified number of regions. If the graph already exists, a clearRepository operation must be executed in order to truncate the tables and set the new number of regions.splitsPerRegion - a positive integer specifying the number of splits per region to use when scanning vertices/edges.blockCacheSize - the size of the block cache (in bytes) used for vertex/edge tables. Blocksize to use when writing out storefiles/hfiles on these tables.org.apache.hadoop.hbase.MasterNotRunningExceptionorg.apache.hadoop.hbase.ZooKeeperConnectionExceptionjava.io.IOExceptionpublic static OraclePropertyGraph getInstance(org.apache.hadoop.conf.Configuration conf, org.apache.hadoop.hbase.client.Connection hconn, java.lang.String szGraphName, java.lang.String compress, org.apache.hadoop.hbase.io.encoding.DataBlockEncoding dbe, int splitsPerRegion, int blockCacheSize, int initialVertexNumRegions, int initialEdgeNumRegions) throws org.apache.hadoop.hbase.MasterNotRunningException, org.apache.hadoop.hbase.ZooKeeperConnectionException, java.io.IOException
conf - the Configuration object with all the Apache HBase connection information.hconn - an Connection object handling a connection to HBase.szGraphName - the name of the graphcompress - the compression used in the property graph tables. Default value is set to "SNAPPY"dbe - the data block encoding used in the property graph tablessplitsPerRegion - a positive integer specifying the number of splits per region to use when scanning vertices/edges.blockCacheSize - the size of the block cache (in bytes) used for vertex/edge tables. Blocksize to use when writing out storefiles/hfiles on these tables.initialVertexNumRegions - the initial number of regions used for vertex table. Both vertex and edge tables will be created using the specified number of regions. If the graph already exists, a clearRepository operation must be executed in order to truncate the tables and set the new number of regions.initialEdgeNumRegions - the initial number of regions used for edge table. Both vertex and edge tables will be created using the specified number of regions. If the graph already exists, a clearRepository operation must be executed in order to truncate the tables and set the new number of regions.org.apache.hadoop.hbase.MasterNotRunningExceptionorg.apache.hadoop.hbase.ZooKeeperConnectionExceptionjava.io.IOExceptionpublic static OraclePropertyGraph getInstance(org.apache.hadoop.conf.Configuration conf, org.apache.hadoop.hbase.client.Connection hconn, java.lang.String szGraphName, java.lang.String compress, int initialNumRegions) throws org.apache.hadoop.hbase.MasterNotRunningException, org.apache.hadoop.hbase.ZooKeeperConnectionException, java.io.IOException
conf - the Configuration object with all the Apache HBase connection information.hconn - an Connection object handling a connection to HBase.szGraphName - the name of the graphcompress - the compression used in the property graph tables. Default value is set to "SNAPPY"initialNumRegions - the initial number of regions used for vertex/edge tables. Both vertex and edge tables will be created using the specified number of regions. If the graph already exists, a clearRepository operation must be executed in order to truncate the tables and set the new number of regions.org.apache.hadoop.hbase.MasterNotRunningExceptionorg.apache.hadoop.hbase.ZooKeeperConnectionExceptionjava.io.IOExceptionpublic static OraclePropertyGraph getInstance(org.apache.hadoop.conf.Configuration conf, org.apache.hadoop.hbase.client.Connection hconn, java.lang.String szGraphName, java.lang.String compress, int initialNumRegions, int splitsPerRegion) throws org.apache.hadoop.hbase.MasterNotRunningException, org.apache.hadoop.hbase.ZooKeeperConnectionException, java.io.IOException
conf - the Configuration object with all the Apache HBase connection information.hconn - an Connection object handling a connection to HBase.szGraphName - the name of the graphcompress - the compression used in the property graph tables. Default value is set to "SNAPPY"initialNumRegions - the initial number of regions used for vertex/edge tables. Both vertex and edge tables will be created using the specified number of regions. If the graph already exists, a clearRepository operation must be executed in order to truncate the tables and set the new number of regions.splitsPerRegion - a positive integer specifying the number of splits per region to use when scanning vertices/edges.org.apache.hadoop.hbase.MasterNotRunningExceptionorg.apache.hadoop.hbase.ZooKeeperConnectionExceptionjava.io.IOExceptionpublic static OraclePropertyGraph getInstance(org.apache.hadoop.conf.Configuration conf, org.apache.hadoop.hbase.client.Connection hconn, java.lang.String szGraphName, java.lang.String compress, int initialNumRegions, int splitsPerRegion, int blockCacheSize) throws org.apache.hadoop.hbase.MasterNotRunningException, org.apache.hadoop.hbase.ZooKeeperConnectionException, java.io.IOException
conf - the Configuration object with all the Apache HBase connection information.hconn - an Connection object handling a connection to HBase.szGraphName - the name of the graphcompress - the compression used in the property graph tables. Default value is set to "SNAPPY"initialNumRegions - the initial number of regions used for vertex/edge tables. Both vertex and edge tables will be created using the specified number of regions. If the graph already exists, a clearRepository operation must be executed in order to truncate the tables and set the new number of regions.splitsPerRegion - a positive integer specifying the number of splits per region to use when scanning vertices/edges.blockCacheSize - the size of the block cache (in bytes) used for vertex/edge tables. Blocksize to use when writing out storefiles/hfiles on these tables.org.apache.hadoop.hbase.MasterNotRunningExceptionorg.apache.hadoop.hbase.ZooKeeperConnectionExceptionjava.io.IOExceptionpublic static OraclePropertyGraph getInstance(org.apache.hadoop.conf.Configuration conf, org.apache.hadoop.hbase.client.Connection hconn, java.lang.String szGraphName, java.lang.String compress, int splitsPerRegion, int blockCacheSize, int initialVertexNumRegions, int initialEdgeNumRegions) throws org.apache.hadoop.hbase.MasterNotRunningException, org.apache.hadoop.hbase.ZooKeeperConnectionException, java.io.IOException
conf - the Configuration object with all the Apache HBase connection information.hconn - an Connection object handling a connection to HBase.szGraphName - the name of the graphcompress - the compression used in the property graph tables. Default value is set to "SNAPPY"splitsPerRegion - a positive integer specifying the number of splits per region to use when scanning vertices/edges.blockCacheSize - the size of the block cache (in bytes) used for vertex/edge tables. Blocksize to use when writing out storefiles/hfiles on these tables.initialVertexNumRegions - the initial number of regions used for vertex table. Both vertex and edge tables will be created using the specified number of regions. If the graph already exists, a clearRepository operation must be executed in order to truncate the tables and set the new number of regions.initialEdgeNumRegions - the initial number of regions used for edge table. Both vertex and edge tables will be created using the specified number of regions. If the graph already exists, a clearRepository operation must be executed in order to truncate the tables and set the new number of regions.org.apache.hadoop.hbase.MasterNotRunningExceptionorg.apache.hadoop.hbase.ZooKeeperConnectionExceptionjava.io.IOExceptionpublic static OraclePropertyGraph getInstance(PgHbaseGraphConfig config) throws org.apache.hadoop.hbase.MasterNotRunningException, org.apache.hadoop.hbase.ZooKeeperConnectionException, java.io.IOException
config - a PgHbaseGraphConfig object.org.apache.hadoop.hbase.MasterNotRunningExceptionorg.apache.hadoop.hbase.ZooKeeperConnectionExceptionjava.io.IOExceptionpublic static OraclePropertyGraph getInstance(PgHbaseGraphConfig config, long maxRowSize) throws org.apache.hadoop.hbase.MasterNotRunningException, org.apache.hadoop.hbase.ZooKeeperConnectionException, java.io.IOException
config - a PgHbaseGraphConfig object.org.apache.hadoop.hbase.MasterNotRunningExceptionorg.apache.hadoop.hbase.ZooKeeperConnectionExceptionjava.io.IOExceptionpublic static OraclePropertyGraph getInstanceWithSplits(org.apache.hadoop.conf.Configuration conf, org.apache.hadoop.hbase.client.Connection hconn, java.lang.String szGraphName, int splitsPerRegion) throws org.apache.hadoop.hbase.MasterNotRunningException, org.apache.hadoop.hbase.ZooKeeperConnectionException, java.io.IOException
conf - the Configuration object with all the Apache HBase connection information.hconn - an Connection object handling a connection to HBase.szGraphName - the name of the graphsplitsPerRegion - a positive integer specifying the number of splits per region to use when scanning vertices/edges.org.apache.hadoop.hbase.MasterNotRunningExceptionorg.apache.hadoop.hbase.ZooKeeperConnectionExceptionjava.io.IOExceptionpublic java.util.Set<java.lang.Long> getInvalidEdgeIDSet(int dop,
ProgressListener pl)
dop - a positive integer defining the number of threads to use when parallel scanning the edges and the vertices tables.pl - a ProgressListener object.public long getMaxEdgeID(int dop,
ProgressListener pl)
dop - a positive integer defining the number of threads to use when parallel scanning the vertices.pl - a ProgressListener object.public long getMaxVertexID(int dop,
ProgressListener pl)
dop - a positive integer defining the number of threads to use when parallel scanning the vertices.pl - a ProgressListener object.public long getMinEdgeID(int dop,
ProgressListener pl)
dop - a positive integer defining the number of threads to use when parallel scanning the vertices.pl - a ProgressListener object.public long getMinVertexID(int dop,
ProgressListener pl)
dop - a positive integer defining the number of threads to use when parallel scanning the vertices.pl - a ProgressListener object.public int getNumSplitsPerRegion()
public long getOperationQueueSize()
getOperationQueueSize in class OraclePropertyGraphBasepublic OracleIndexManager getOracleIndexManager()
getOracleIndexManager in interface OracleIndexableGraphgetOracleIndexManager in class OraclePropertyGraphBaseOracleIndexManager object.public static int getSaltSize()
public int getScanCachingSize()
public int getSecondaryIndexNumBuckets()
public org.apache.hadoop.hbase.TableName getTextIdxMetadataTabName()
public OracleVertex getVertex(java.lang.Object id)
getVertex in class OraclePropertyGraphBaseid - the object identifier of the referenced vertexVertex objectpublic int getVertexBlockCacheSize()
public java.util.Set<java.lang.Long> getVertexIDSet(int dop,
ProgressListener pl)
dop - a positive integer defining the number of threads to use when parallel scanning the vertices table.pl - a ProgressListener object.public int getVertexInitialNumRegions()
public OracleVertexBase getVertexInstance(java.lang.Long vid, boolean bCreateIfAbsentFromCache, boolean bSkipStoreToCache)
getVertexInstance in class OraclePropertyGraphBasevid - the id of the vertexbCreateIfAbsentFromCache - if false, returns a NULL value if there is no match in the cache.bSkipStoreToCache - if true, the vertex instance will not be stored into the cache.public int getVertexPropertyNames(int dop,
int timeout,
java.util.Map<java.lang.String,java.lang.Class> propertyNames,
ProgressListener pl)
dop - a positive integer defining the number of threads to use when parallel scanning the verticestimeout - time out in the unit of secondspropertyNames - a map of <property name, property class> to be returnedpl - a ProgressListener object.public int getVertexPropertyNames(int dop,
int timeout,
java.util.Set<java.lang.String> propertyNames,
ProgressListener pl)
dop - a positive integer defining the number of threads to use when parallel scanning the verticestimeout - time out in the unit of secondspropertyNames - a set of property names to be returnedpl - a ProgressListener object.public int getVertexSndIndexBlockCacheSize()
public org.apache.hadoop.hbase.TableName getVertexSndIndexTabName()
public java.util.Set<org.apache.hadoop.hbase.client.RegionInfo> getVertexTableRegionsInfo()
throws java.io.IOException
java.io.IOExceptionpublic int getVertexTableSplits()
throws java.io.IOException
java.io.IOExceptionpublic org.apache.hadoop.hbase.TableName getVertexTabName()
public java.util.Iterator<org.apache.tinkerpop.gremlin.structure.Vertex>[] getVerticesFromKey(org.apache.hadoop.hbase.client.Connection[] conns,
boolean skipStoreToCache,
int startSplitID,
java.lang.String[] keys)
Iterator objects that hold all the vertices in the graph that have any of the specified keys and satisfy the specified vertex filter callback by executing a lookup over the secondary index table. The information read back from the vertices will be complete or partial based on the optimization flag specified. Each element in the Iterator array uses a separate connection provided to fetch a subset of the results from the corresponding split. The splits are determined by # of regions & num of splits per region for the table. Note that we assign an integer ID (in the range of [0, N - 1]) to all the splits in the vertex table with N splits. The subset of splits queried will consist of those splits with ID value in the range of [startSplitID, startSplit - 1 + size of connections array].conns - an array of connections to Apache HBase.skipStoreToCache - if true, the vertices instances will not be stored into the cache.startSplitID - the ID of the starting split.keys - an array of property key names. It can be NULL which implies all vertices in the graph satisfying the vertex filter callback will be returned.Iterator array object.public static org.apache.hadoop.conf.Configuration prepareSecureConfig(org.apache.hadoop.conf.Configuration config,
java.lang.String hbaseSecAuth,
java.lang.String hadoopSecAuth,
java.lang.String hmKerberosPrincipal,
java.lang.String rsKerberosPrincipal,
java.lang.String userPrincipal,
java.lang.String keytab)
config - the Apache HBase configuration objecthbaseSecAuth - the hbase authentication kerberos/simplehadoopSecAuth - the hadoop authentication kerberos/simplehmKerberosPrincipal - The kerberos principal name that should be used to run the HMaster process. The principal name should be in the form: user/hostname@DOMAIN. If "_HOST" is used as the hostname portion, it will be replaced with the actual hostname of the running instance.rsKerberosPrincipal - Ex. "hbase/_HOST@EXAMPLE.COM".The kerberos principal name that should be used to run the HRegionServer process. The principal name should be in the form: user/hostname@DOMAIN. If _HOST is used as the hostname portion, it will be replaced with the actual hostname of the running instance.userPrincipal - the user to loginkeytab - the keytab containing the password of the loginConfiguration Configuration object.public static void quietlyClose(org.apache.hadoop.hbase.client.Table htable)
htable - the Table object to close.public void rebuildEdgeSecondaryIndexTable()
public void rebuildEdgeSecondaryIndexTable(int dop)
public void rebuildVertexSecondaryIndexTable()
public void rebuildVertexSecondaryIndexTable(int dop)
public void removeEdge(org.apache.tinkerpop.gremlin.structure.Edge edge)
removeEdge in class OraclePropertyGraphBaseedge - an Edge object to be removedpublic void removeVertex(org.apache.tinkerpop.gremlin.structure.Vertex vertex)
removeVertex in class OraclePropertyGraphBasevertex - a Vertex object to be removedpublic static byte[] saltEdge(byte[] ba)
ba - the edge ID represented as a byte arraypublic static byte[] saltVertex(byte[] ba)
ba - the vertex ID represented as a byte arraypublic void setCompression(java.lang.String compress)
compress - the compression used in the property graph tablespublic void setDataBlockEncoding(org.apache.hadoop.hbase.io.encoding.DataBlockEncoding dbe)
dbe - the data block encoding used in the property graph tablespublic void setEdgeBlockCacheSize(int blockCSize)
blockCSize - a positive integer representing the size of the block cache (in bytes) used in the vertex table. Default value set to 131072 bytes.public void setEdgeInitialNumRegions(int numRegions)
numRegions - a positive integer with the initial number of regions for edge table. Default value set to 4public void setEdgeSndIndexBlockCacheSize(int blockCSize)
blockCSize - a positive integer representing the size of the block cache (in bytes) used in the vertex table. Default value set to 131072 bytes.public void setInitialNumRegions(int numRegions)
numRegions - the initial number of regions.public void setNumSplitsPerRegion(int iNumSplitsPerRegion)
iNumSplitsPerRegion - a positive integer specifying the number of splits per region to use when scanning vertices/edges. Default value set to 1.public void setScanCachingSize(int iScanCachingSize)
iScanCachingSize - the cache size to use.public void setVertexBlockCacheSize(int blockCSize)
blockCSize - a positive integer representing the size of the block cache (in bytes) used in the vertex table. This is a meta data operation, so changes to this setting will show after clearing the repository and recreating the vertex tables. Default value set to 131072 bytes.public void setVertexInitialNumRegions(int numRegions)
numRegions - a positive integer with the initial number of regions for vertex table. Default value set to 4public void setVertexSndIndexBlockCacheSize(int blockCSize)
blockCSize - a positive integer representing the size of the block cache (in bytes) used in the vertex table. Default value set to 131072 bytes.public <T extends org.apache.tinkerpop.gremlin.structure.Element> void updateGraph(OpCallback<T> oc, boolean skipStoreToCache, int dop, ProgressListener pl, OpCallback.UpdateType updateType)
OracleGraphAttributeUpdaterVertexOpCallback.oc - a OpCallback object.skipStoreToCache - if true, the vertices instances will not be stored into the cache.dop - a positive integer defining the number of threads to use when parallel processing the vertices.pl - a ProgressListener object.public java.util.Iterator<org.apache.tinkerpop.gremlin.structure.Vertex> verticesByProperty(java.lang.String key,
java.lang.Object value,
java.lang.Class dtClass,
boolean acceptWildcard,
boolean preferredLuceneQuery)
OraclePropertyGraphBaseverticesByProperty in class OraclePropertyGraphBasekey - the name of the property. It MUST not be NULL.value - the value of the property. It can be NULL which will cause all vertices with the given key to be returned.dtClass - the class representing the datatype of the property value. This can be used to cast the property value to a different datatype. For example, if the value is "3.01" and dtClass>
is Float.Class, then this value will be treated as a 3.01f. If dtClass>
is set to NULL, then the query will be executed by trying to match the value object to all the supported datatypes.acceptWildcard - specifies if wild cards can be used in the value object. Wild cards can be used only when an automatic text index on the given key is present.preferredLuceneQuery - specifies if the value object is using an Apache Lucene Syntax (further details on Apache Lucene Syntax can be found in https://lucene.apache.org/core/2_9_4/queryparsersyntax.html). If the automatic index is not enabled ahd this flag is set to true, an Exception will be thrown as Lucene Syntax is not supported for database lookups.public java.util.Iterator<org.apache.tinkerpop.gremlin.structure.Vertex> verticesByPropertyKey(java.lang.String[] keys,
VertexFilterCallback vfc,
OraclePropertyGraphBase.OptimizationFlag flag)
Iterator to all the vertices in the graph that have any of the specified keys and satisfy the specified vertex filter callback. The information read back from the vertices will be complete or partial based on the optimization flag specified.verticesByPropertyKey in class OraclePropertyGraphBasekeys - an array of property key names. It can be NULL which implies all vertices in the graph satisfying the vertex filter callback will be returned.vfc - a VertexFilterCallback object specifying the conditions to keep a vertex in the Iterator.flag - an OptimizationFlag object specifying if a partial or complete vertex object will be returned. It can be NULL which implies the vertices created must be complete.public java.util.Iterator<org.apache.tinkerpop.gremlin.structure.Vertex> verticesByPropertyKey(java.lang.String[] keys,
VertexFilterCallback vfc,
OraclePropertyGraphBase.OptimizationFlag flag,
int dop)
Iterator to all the vertices in the graph that have any of the specified keys and satisfy the specified vertex filter callback. The information read back from the vertices will be complete or partial based on the optimization flag specified.keys - an array of property key names. It can be NULL which implies all vertices in the graph satisfying the vertex filter callback will be returned.vfc - a VertexFilterCallback object specifying the conditions to keep a vertex in the Iterator.flag - an OptimizationFlag object specifying if a partial or complete vertex object will be returned. It can be NULL which implies the vertices created must be complete.public java.util.Iterator<org.apache.tinkerpop.gremlin.structure.Vertex>[] verticesPartitioned(org.apache.hadoop.hbase.client.Connection[] connections,
boolean bSkipStoreToCache,
int startSplitID)
Iterator objects that hold all the vertices in the graph. Each element in the Iterator array uses a separate connection provided to fetch a subset of the results from the corresponding split. The splits are determined by # of regions & num of splits per region for the table. Note that we assign an integer ID (in the range of [0, N - 1]) to all the splits in the vertex table with N splits. The subset of splits queried will consist of those splits with ID value in the range of [startSplitID, startSplit - 1 + size of connections array].connections - an array of connections to Apache HBase.bSkipStoreToCache - if true, the vertices instances will not be stored into the cache.startSplitID - the ID of the starting split.Iterator object.public java.util.Iterator<org.apache.tinkerpop.gremlin.structure.Vertex>[] verticesPartitioned(org.apache.hadoop.hbase.client.Connection[] connections,
boolean bSkipStoreToCache,
int startSplitID,
boolean bClonedGraph)
Iterator objects that hold all the vertices in the graph. Each element in the Iterator array uses a separate connection provided to fetch a subset of the results from the corresponding split. The splits are determined by # of regions & num of splits per region for the table. Note that we assign an integer ID (in the range of [0, N - 1]) to all the splits in the vertex table with N splits. The subset of splits queried will consist of those splits with ID value in the range of [startSplitID, startSplit - 1 + size of connections array].connections - an array of connections to Apache HBase.bSkipStoreToCache - if true, the vertices instances will not be stored into the cache.startSplitID - the ID of the starting split.bClonedGraph - if true, each Iterator in the array will use a new OraclePropertyGraph instance behind the scenes. If there are writing operations done over the retrieved vertices, the associated property graphs need to be commited for the changes to be persisted into Apache HBase.Iterator array object.public java.util.Iterator<org.apache.tinkerpop.gremlin.structure.Vertex>[] verticesPartitioned(org.apache.hadoop.hbase.client.Connection[] connections,
boolean bSkipStoreToCache,
int startSplitID,
OraclePropertyGraphBase.OptimizationFlag flag)
Iterator objects that hold all the vertices in the graph. The information read back from the vertices will be complete or partial based on the optimization flag specified. Each element in the Iterator array uses a separate connection provided to fetch a subset of the results from the corresponding split. The splits are determined by # of regions & num of splits per region for the table. Note that we assign an integer ID (in the range of [0, N - 1]) to all the splits in the vertex table with N splits. The subset of splits queried will consist of those splits with ID value in the range of [startSplitID, startSplit - 1 + size of connections array].connections - an array of connections to Apache HBase.bSkipStoreToCache - if true, the vertices instances will not be stored into the cache.flag - an OptimizationFlag object specifying if a partial or complete vertex object will be returned. It can be NULL which implies the vertices created must be complete.startSplitID - the ID of the starting split.Iterator array object.public java.util.Iterator<org.apache.tinkerpop.gremlin.structure.Vertex>[] verticesPartitioned(org.apache.hadoop.hbase.client.Connection[] connections,
boolean bSkipStoreToCache,
int startSplitID,
java.lang.String key)
Iterator objects that hold all the vertices in the graph that have a particular key. Each element in the Iterator array uses a separate connection provided to fetch a subset of the results from the corresponding split. The splits are determined by # of regions & num of splits per region for the table. Note that we assign an integer ID (in the range of [0, N - 1]) to all the splits in the vertex table with N splits. The subset of splits queried will consist of those splits with ID value in the range of [startSplitID, startSplit - 1 + size of connections array].connections - an array of connections to Apache HBase.bSkipStoreToCache - if true, the vertices instances will not be stored into the cache.startSplitID - the ID of the starting split.key - name of the property. It can be NULL which implies all vertices in the graph will be returned.Iterator array object.public java.util.Iterator<org.apache.tinkerpop.gremlin.structure.Vertex>[] verticesPartitioned(org.apache.hadoop.hbase.client.Connection[] connections,
boolean bSkipStoreToCache,
int startSplitID,
java.lang.String[] keys)
Iterator objects that hold all the vertices in the graph that have any of the specified keys. Each element in the Iterator array uses a separate connection provided to fetch a subset of the results from the corresponding split. The splits are determined by # of regions & num of splits per region for the table. Note that we assign an integer ID (in the range of [0, N - 1]) to all the splits in the vertex table with N splits. The subset of splits queried will consist of those splits with ID value in the range of [startSplitID, startSplit - 1 + size of connections array].connections - an array of connections to Apache HBase.bSkipStoreToCache - if true, the vertices instances will not be stored into the cache.startSplitID - the ID of the starting split.keys - an array of property key names. It can be NULL which implies all vertices in the graph will be returned.Iterator array object.public java.util.Iterator<org.apache.tinkerpop.gremlin.structure.Vertex>[] verticesPartitioned(org.apache.hadoop.hbase.client.Connection[] connections,
boolean bSkipStoreToCache,
int startSplitID,
java.lang.String[] keys,
boolean bCloneGraph,
VertexFilterCallback vfc)
Iterator objects that hold all the vertices in the graph that have any of the specified keys and satisfy the specified vertex filter callback. Each element in the Iterator array uses a separate connection provided to fetch a subset of the results from the corresponding split. The splits are determined by # of regions & num of splits per region for the table. Note that we assign an integer ID (in the range of [0, N - 1]) to all the splits in the vertex table with N splits. The subset of splits queried will consist of those splits with ID value in the range of [startSplitID, startSplit - 1 + size of connections array].connections - an array of connections to Apache HBase.bSkipStoreToCache - if true, the vertices instances will not be stored into the cache.startSplitID - the ID of the starting split.vfc - a VertexFilterCallback object specifying the conditions to keep an vertex in the Iterator.keys - an array of property key names. It can be NULL which implies all vertices in the graph satisfying the vertex filter callback will be returned.bCloneGraph - if true, each Iterator in the array will use a new OraclePropertyGraph instance behind the scenes. If there are writing operations done over the retrieved vertices, the associated property graphs need to be commited for the changes to be persisted into Apache HBase.Iterator array object.public java.util.Iterator<org.apache.tinkerpop.gremlin.structure.Vertex>[] verticesPartitioned(org.apache.hadoop.hbase.client.Connection[] connections,
boolean bSkipStoreToCache,
int startSplitID,
java.lang.String[] keys,
boolean bCloneGraph,
VertexFilterCallback vfc,
OraclePropertyGraphBase.OptimizationFlag flag)
Iterator objects that hold all the vertices in the graph that have any of the specified keys and satisfy the specified vertex filter callback. The information read back from the vertices will be complete or partial based on the optimization flag specified. Each element in the Iterator array uses a separate connection provided to fetch a subset of the results from the corresponding split. The splits are determined by # of regions & num of splits per region for the table. Note that we assign an integer ID (in the range of [0, N - 1]) to all the splits in the vertex table with N splits. The subset of splits queried will consist of those splits with ID value in the range of [startSplitID, startSplit - 1 + size of connections array].connections - an array of connections to Apache HBase.bSkipStoreToCache - if true, the vertices instances will not be stored into the cache.flag - an OptimizationFlag object specifying if a partial or complete vertex object will be returned. It can be NULL which implies the vertices created must be complete.startSplitID - the ID of the starting split.keys - an array of property key names. It can be NULL which implies all vertices in the graph satisfying the vertex filter callback will be returned.vfc - a VertexFilterCallback object specifying the conditions to keep an vertex in the Iterator.bCloneGraph - if true, each Iterator in the array will use a new OraclePropertyGraph instance behind the scenes. If there are writing operations done over the retrieved vertices, the associated property graphs need to be commited for the changes to be persisted into Apache HBase.Iterator array object.public java.util.Iterator<org.apache.tinkerpop.gremlin.structure.Vertex>[] verticesPartitioned(org.apache.hadoop.hbase.client.Connection[] connections,
boolean bSkipStoreToCache,
int startSplitID,
java.lang.String key,
boolean bCloneGraph)
Iterator objects that hold all the vertices in the graph that have a particular key. Each element in the Iterator array uses a separate connection provided to fetch a subset of the results from the corresponding split. The splits are determined by # of regions & num of splits per region for the table. Note that we assign an integer ID (in the range of [0, N - 1]) to all the splits in the vertex table with N splits. The subset of splits queried will consist of those splits with ID value in the range of [startSplitID, startSplit - 1 + size of connections array].connections - an array of connections to Apache HBase.bSkipStoreToCache - if true, the vertices instances will not be stored into the cache.startSplitID - the ID of the starting split.key - name of the property. It can be NULL which implies all vertices in the graph will be returned.bCloneGraph - if true, each Iterator in the array will use a new OraclePropertyGraph instance behind the scenes. If there are writing operations done over the retrieved vertices, the associated property graphs need to be commited for the changes to be persisted into Apache HBase.Iterator array object.public java.util.Iterator<org.apache.tinkerpop.gremlin.structure.Vertex>[] verticesPartitioned(org.apache.hadoop.hbase.client.Connection[] connections,
boolean bSkipStoreToCache,
int startSplitID,
VertexFilterCallback vfc,
OraclePropertyGraphBase.OptimizationFlag flag)
Iterator objects that hold all the vertices in the graph that satisfy the specified vertex filter callback. The information read back from the vertices will be complete or partial based on the optimization flag specified. Each element in the Iterator array uses a separate connection provided to fetch a subset of the results from the corresponding split. The splits are determined by # of regions & num of splits per region for the table. Note that we assign an integer ID (in the range of [0, N - 1]) to all the splits in the vertex table with N splits. The subset of splits queried will consist of those splits with ID value in the range of [startSplitID, startSplit - 1 + size of connections array].connections - an array of connections to Apache HBase.bSkipStoreToCache - if true, the vertices instances will not be stored into the cache.flag - an OptimizationFlag object specifying if a partial or complete vertex object will be returned. It can be NULL which implies the vertices created must be complete.startSplitID - the ID of the starting split.vfc - a VertexFilterCallback object specifying the conditions to keep an vertex in the Iterator.Iterator array object.