Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
124 changes: 0 additions & 124 deletions .github/outdated/.travis.yml

This file was deleted.

2 changes: 1 addition & 1 deletion .github/outdated/release.yml
Original file line number Diff line number Diff line change
Expand Up @@ -6,7 +6,7 @@ on:

jobs:
build:
runs-on: ubuntu-20.04
runs-on: ubuntu-22.04
env:
TAG_NAME: ${{ github.ref_name }}
steps:
Expand Down
11 changes: 6 additions & 5 deletions .github/workflows/server-ci.yml
Original file line number Diff line number Diff line change
Expand Up @@ -11,7 +11,7 @@ on:
jobs:
build-server:
# TODO: we need test & replace it to ubuntu-24.04 or ubuntu-latest
runs-on: ubuntu-20.04
runs-on: ubuntu-22.04
env:
USE_STAGE: 'false' # Whether to include the stage repository.
TRAVIS_DIR: hugegraph-server/hugegraph-dist/src/assembly/travis
Expand All @@ -27,7 +27,7 @@ jobs:
strategy:
fail-fast: false
matrix:
BACKEND: [ memory, rocksdb, hbase, cassandra, mysql, postgresql, scylladb ]
BACKEND: [ memory, rocksdb, hbase ]
JAVA_VERSION: [ '11' ]

steps:
Expand All @@ -38,7 +38,7 @@ jobs:

# TODO: Remove this step after install-backend.sh updated
- name: Install Java8 for backend
uses: actions/setup-java@v3
uses: actions/setup-java@v4
with:
java-version: '8'
distribution: 'zulu'
Expand All @@ -48,13 +48,13 @@ jobs:
$TRAVIS_DIR/install-backend.sh $BACKEND && jps -l

- name: Install Java ${{ matrix.JAVA_VERSION }}
uses: actions/setup-java@v3
uses: actions/setup-java@v4
with:
java-version: ${{ matrix.JAVA_VERSION }}
distribution: 'zulu'

- name: Cache Maven packages
uses: actions/cache@v3
uses: actions/cache@v4
with:
path: ~/.m2
key: ${{ runner.os }}-m2-${{ hashFiles('**/pom.xml') }}
Expand Down Expand Up @@ -95,6 +95,7 @@ jobs:
$TRAVIS_DIR/run-tinkerpop-test.sh $BACKEND tinkerpop

- name: Upload coverage to Codecov
# TODO: update to v5 later
uses: codecov/codecov-action@v3
with:
token: ${{ secrets.CODECOV_TOKEN }}
Expand Down
3 changes: 2 additions & 1 deletion README.md
Original file line number Diff line number Diff line change
Expand Up @@ -26,8 +26,9 @@ achieved through the powerful [Gremlin](https://tinkerpop.apache.org/gremlin.htm
- Compliant to [Apache TinkerPop 3](https://tinkerpop.apache.org/), supports [Gremlin](https://tinkerpop.apache.org/gremlin.html) & [Cypher](https://en.wikipedia.org/wiki/Cypher) language
- Schema Metadata Management, including VertexLabel, EdgeLabel, PropertyKey and IndexLabel
- Multi-type Indexes, supporting exact query, range query and complex conditions combination query
- Plug-in Backend Store Driver Framework, support `RocksDB`/`HStore`, `HBase`, `Cassandra/ScyllaDB`, and `MySQL/Postgre` now and easy to add another backend store driver if needed
- Plug-in Backend Store Framework, mainly support `RocksDB`/`HStore` + `HBase` for now and you could choose other backends in the [legacy version](https://hugegraph.apache.org/docs/download/download/) ≤ `1.5.0` (like `MySQL/PG`/`Cassandra` ...)
- Integration with `Flink/Spark/HDFS`, and friendly to connect other big data platforms
- Complete graph ecosystem (including both in/out-memory `Graph Computing` + `Graph Visualization & Tools` + `Graph Learning & AI`, see [here](#3-build-from-source))


## Quick Start
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -39,6 +39,7 @@ edge.cache_type=l2
backend=hstore
serializer=binary

# graph name
store=hugegraph

# pd config
Expand All @@ -50,48 +51,16 @@ task.schedule_period=10
task.retry=0
task.wait_timeout=10

# raft config
raft.mode=false
raft.path=./raft-log
raft.safe_read=true
raft.use_replicator_pipeline=true
raft.election_timeout=10000
raft.snapshot_interval=3600
raft.backend_threads=48
raft.read_index_threads=8
raft.snapshot_threads=4
raft.snapshot_parallel_compress=false
raft.snapshot_compress_threads=4
raft.snapshot_decompress_threads=4
raft.read_strategy=ReadOnlyLeaseBased
raft.queue_size=16384
raft.queue_publish_timeout=60
raft.apply_batch=1
raft.rpc_threads=80
raft.rpc_connect_timeout=5000
raft.rpc_timeout=60
raft.install_snapshot_rpc_timeout=36000

# search config
search.text_analyzer=jieba
search.text_analyzer_mode=INDEX

# rocksdb backend config
### RocksDB backend config
#rocksdb.data_path=/path/to/disk
#rocksdb.wal_path=/path/to/disk


# cassandra backend config
cassandra.host=localhost
cassandra.port=9042
cassandra.username=
cassandra.password=
#cassandra.connect_timeout=5
#cassandra.read_timeout=20
#cassandra.keyspace.strategy=SimpleStrategy
#cassandra.keyspace.replication=3

# hbase backend config
### HBase backend config
#hbase.hosts=localhost
#hbase.port=2181
#hbase.znode_parent=/hbase
Expand All @@ -102,25 +71,3 @@ cassandra.password=
#hbase.enable_partition=true
#hbase.vertex_partitions=10
#hbase.edge_partitions=30

# mysql backend config
#jdbc.driver=com.mysql.jdbc.Driver
#jdbc.url=jdbc:mysql://127.0.0.1:3306
#jdbc.username=root
#jdbc.password=
#jdbc.reconnect_max_times=3
#jdbc.reconnect_interval=3
#jdbc.ssl_mode=false

# postgresql & cockroachdb backend config
#jdbc.driver=org.postgresql.Driver
#jdbc.url=jdbc:postgresql://localhost:5432/
#jdbc.username=postgres
#jdbc.password=
#jdbc.postgresql.connect_database=template1

# palo backend config
#palo.host=127.0.0.1
#palo.poll_interval=10
#palo.temp_dir=./palo-data
#palo.file_limit_size=32
2 changes: 1 addition & 1 deletion hugegraph-server/README.md
Original file line number Diff line number Diff line change
Expand Up @@ -8,4 +8,4 @@ HugeGraph Server consists of two layers of functionality: the graph engine layer
- Backend Interface: Implements the storage of graph data to the backend.

- Storage Layer:
- Storage Backend: Supports multiple built-in storage backends (RocksDB/MySQL/HBase/...) and allows users to extend custom backends without modifying the existing source code.
- Storage Backend: Supports multiple built-in storage backends (RocksDB/Memory/HStore/HBase/...) and allows users to extend custom backends without modifying the existing source code.
Original file line number Diff line number Diff line change
Expand Up @@ -17,6 +17,7 @@

package org.apache.hugegraph.backend.store;

import java.util.List;
import java.util.Map;
import java.util.concurrent.ConcurrentHashMap;

Expand All @@ -26,14 +27,24 @@
import org.apache.hugegraph.backend.store.raft.RaftBackendStoreProvider;
import org.apache.hugegraph.config.CoreOptions;
import org.apache.hugegraph.config.HugeConfig;
import org.apache.hugegraph.util.E;
import org.apache.hugegraph.util.Log;
import org.slf4j.Logger;

/**
* BREAKING CHANGE:
* since 1.7.0, only "hstore, rocksdb, hbase, memory" are supported for backend.
* if you want to use cassandra, mysql, postgresql, cockroachdb or palo as backend,
* please find a version before 1.7.0 of apache hugegraph for your application.
*/
public class BackendProviderFactory {

private static final Logger LOG = Log.logger(BackendProviderFactory.class);

private static Map<String, Class<? extends BackendStoreProvider>> providers;
private static final Map<String, Class<? extends BackendStoreProvider>> providers;

private static final List<String> ALLOWED_BACKENDS = List.of("memory", "rocksdb", "hbase",
"hstore");

static {
providers = new ConcurrentHashMap<>();
Expand All @@ -47,8 +58,7 @@ public static BackendStoreProvider open(HugeGraphParams params) {

BackendStoreProvider provider = newProvider(config);
if (raftMode) {
LOG.info("Opening backend store '{}' in raft mode for graph '{}'",
backend, graph);
LOG.info("Opening backend store '{}' in raft mode for graph '{}'", backend, graph);
provider = new RaftBackendStoreProvider(params, provider);
}
provider.open(graph);
Expand All @@ -57,8 +67,10 @@ public static BackendStoreProvider open(HugeGraphParams params) {

private static BackendStoreProvider newProvider(HugeConfig config) {
String backend = config.get(CoreOptions.BACKEND).toLowerCase();
String graph = config.get(CoreOptions.STORE);
E.checkState(ALLOWED_BACKENDS.contains(backend.toLowerCase()),
"backend is illegal: %s", backend);

String graph = config.get(CoreOptions.STORE);
if (InMemoryDBStoreProvider.matchType(backend)) {
return InMemoryDBStoreProvider.instance(graph);
}
Expand All @@ -68,24 +80,23 @@ private static BackendStoreProvider newProvider(HugeConfig config) {
"Not exists BackendStoreProvider: %s", backend);

assert BackendStoreProvider.class.isAssignableFrom(clazz);
BackendStoreProvider instance = null;
BackendStoreProvider instance;
try {
instance = clazz.newInstance();
instance = clazz.getDeclaredConstructor().newInstance();
} catch (Exception e) {
throw new BackendException(e);
}

BackendException.check(backend.equals(instance.type()),
"BackendStoreProvider with type '%s' " +
"can't be opened by key '%s'",
instance.type(), backend);
"can't be opened by key '%s'", instance.type(), backend);
return instance;
}

@SuppressWarnings({"rawtypes", "unchecked"})
public static void register(String name, String classPath) {
ClassLoader classLoader = BackendProviderFactory.class.getClassLoader();
Class<?> clazz = null;
Class<?> clazz;
try {
clazz = classLoader.loadClass(classPath);
} catch (Exception e) {
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -21,11 +21,6 @@ import org.apache.hugegraph.dist.RegisterUtil
// register all the backend to avoid changes if needs to support other backend
RegisterUtil.registerPlugins()
RegisterUtil.registerRocksDB()
RegisterUtil.registerCassandra()
RegisterUtil.registerScyllaDB()
RegisterUtil.registerHBase()
RegisterUtil.registerMysql()
RegisterUtil.registerPalo()
RegisterUtil.registerPostgresql()

graph = HugeFactory.open('./conf/graphs/hugegraph.properties')
Loading
Loading