getPropertyIds() {
+ return propertyIds;
+ }
+
+ public boolean isEmptyId() {
+ return emptyId;
+ }
+
+ public boolean needSerialize() {
+ return emptyId || (propertyIds != null && propertyIds.size() > 0);
+ }
+
+ @Override
+ public String toString() {
+ return "PropertyList{" +
+ "propertyIds=" + propertyIds +
+ ", isEmpty=" + emptyId +
+ '}';
+ }
+}
diff --git a/hugegraph-store/hg-store-common/src/main/java/org/apache/hugegraph/store/query/QueryTypeParam.java b/hugegraph-store/hg-store-common/src/main/java/org/apache/hugegraph/store/query/QueryTypeParam.java
new file mode 100644
index 0000000000..229657381f
--- /dev/null
+++ b/hugegraph-store/hg-store-common/src/main/java/org/apache/hugegraph/store/query/QueryTypeParam.java
@@ -0,0 +1,265 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hugegraph.store.query;
+
+import java.util.Arrays;
+
+import lombok.Data;
+
+/**
+ * primary index scan:
+ * range scan: start + end
+ * id scan: start + isPrefix (false)
+ * prefix scan: start + isPrefix (true)
+ *
+ * secondary index scan:
+ * default range: start + end + isSecondaryIndex (true)
+ */
+@Data
+public class QueryTypeParam {
+
+ public static final QueryTypeParam EMPTY = new QueryTypeParam();
+ /**
+ * id scan, the hash code of the key.
+ * this code would be calculated by KeyUtil.getOwnerKey
+ * default : -1, scan all partitions. if set, would affect scan partitions of prefix scan and
+ * range scan.
+ */
+ int code = -1;
+ /**
+ * range scan - prefix start, prefix scan, id scan
+ * class: org.apache.hugegraph.id.Id
+ */
+ private byte[] start;
+ /**
+ * range scan - prefix end, prefix scan (null)
+ * class: org.apache.hugegraph.id.Id
+ */
+ private byte[] end;
+ /**
+ * the boundary of range/prefix scan (gt/lt/eq/gte/lte)
+ */
+ private int boundary = 0;
+ /**
+ * whether the start key is id or prefix
+ */
+ private boolean isPrefix = false;
+ /**
+ * whether lookup index table (g+index)
+ */
+ private boolean isSecondaryIndex = false;
+ /**
+ * todo: 从索引反序列化成ID的时候,用于检查id.asBytes()的前缀
+ */
+ private byte[] idPrefix;
+
+ private QueryTypeParam() {
+
+ }
+
+ public QueryTypeParam(byte[] start, byte[] end, int boundary, boolean isPrefix,
+ boolean isSecondaryIndex, int code) {
+ this.start = start;
+ this.end = end;
+ this.boundary = boundary;
+ this.isPrefix = isPrefix;
+ this.isSecondaryIndex = isSecondaryIndex;
+ this.code = code;
+ }
+
+ public QueryTypeParam(byte[] start, byte[] end, int boundary, boolean isPrefix,
+ boolean isSecondaryIndex,
+ int code, byte[] idPrefix) {
+ this.start = start;
+ this.end = end;
+ this.boundary = boundary;
+ this.isPrefix = isPrefix;
+ this.isSecondaryIndex = isSecondaryIndex;
+ this.code = code;
+ this.idPrefix = idPrefix;
+ }
+
+ @Deprecated
+ public static QueryTypeParam ofIdScanParam(byte[] start) {
+ assert (start != null);
+ return new QueryTypeParam(start, null, 0, false, false, -1);
+ }
+
+ /**
+ * primary : id scan
+ *
+ * @param start id key
+ * @param code owner code
+ * @return param
+ */
+ public static QueryTypeParam ofIdScanParam(byte[] start, int code) {
+ assert (start != null);
+ return new QueryTypeParam(start, null, 0, false, false, code);
+ }
+
+ /**
+ * primary : prefix scan
+ *
+ * @param start prefix
+ * @param boundary boundary
+ * @return param
+ */
+ public static QueryTypeParam ofPrefixScanParam(byte[] start, int boundary) {
+ assert (start != null);
+ return new QueryTypeParam(start, null, boundary, true, false, -1);
+ }
+
+ /**
+ * primary : prefix scan
+ *
+ * @param start prefix
+ * @param boundary boundary
+ * @param code used for specify partition
+ * @return param
+ */
+ public static QueryTypeParam ofPrefixScanParam(byte[] start, int boundary, int code) {
+ assert (start != null);
+ return new QueryTypeParam(start, null, boundary, true, false, code);
+ }
+
+ /**
+ * primary : range scan
+ *
+ * @param start start key
+ * @param end end key
+ * @param boundary boundary
+ * @return param
+ */
+ public static QueryTypeParam ofRangeScanParam(byte[] start, byte[] end, int boundary) {
+ assert (start != null && end != null);
+ return new QueryTypeParam(start, end, boundary, false, false, -1);
+ }
+
+ /**
+ * primary : range scan
+ *
+ * @param start start key
+ * @param end end key
+ * @param boundary boundary
+ * @param code use for specify partition
+ * @return param
+ */
+ public static QueryTypeParam ofRangeScanParam(byte[] start, byte[] end, int boundary,
+ int code) {
+ assert (start != null && end != null);
+ return new QueryTypeParam(start, end, boundary, false, false, code);
+ }
+
+ /**
+ * index scan: range scan
+ *
+ * @param start range start
+ * @param end range end
+ * @param boundary boundary
+ * @return param
+ */
+ public static QueryTypeParam ofIndexScanParam(byte[] start, byte[] end, int boundary) {
+ return new QueryTypeParam(start, end, boundary, false, true, -1);
+ }
+
+ /**
+ * index scan: range scan with id prefix check
+ *
+ * @param start range start
+ * @param end range end
+ * @param boundary boundary
+ * @param idPrefix id prefix
+ * @return param
+ */
+ public static QueryTypeParam ofIndexScanParam(byte[] start, byte[] end, int boundary,
+ byte[] idPrefix) {
+ return new QueryTypeParam(start, end, boundary, false, true, -1, idPrefix);
+ }
+
+ /**
+ * index scan : prefix
+ *
+ * @param start prefix
+ * @param boundary boundary
+ * @return param
+ */
+ public static QueryTypeParam ofIndexScanParam(byte[] start, int boundary) {
+ return new QueryTypeParam(start, null, boundary, true, true, -1);
+ }
+
+ /**
+ * index scan : prefix with id prefix check
+ *
+ * @param start prefix
+ * @param boundary boundary
+ * @param idPrefix idPrefix
+ * @return param
+ */
+ public static QueryTypeParam ofIndexScanParam(byte[] start, int boundary, byte[] idPrefix) {
+ return new QueryTypeParam(start, null, boundary, true, true, -1, idPrefix);
+ }
+
+ public byte[] getIdPrefix() {
+ return idPrefix;
+ }
+
+ public void setIdPrefix(byte[] idPrefix) {
+ this.idPrefix = idPrefix;
+ }
+
+ public boolean isIdScan() {
+ return !isPrefix && start != null && start.length > 0 && (end == null || end.length == 0) &&
+ !isSecondaryIndex;
+ }
+
+ public boolean isRangeScan() {
+ return !isPrefix && start != null && start.length > 0 && end != null && end.length > 0 &&
+ !isSecondaryIndex;
+ }
+
+ public boolean isPrefixScan() {
+ return isPrefix && start != null && start.length > 0 && (end == null || end.length == 0) &&
+ !isSecondaryIndex;
+ }
+
+ public boolean isIndexScan() {
+ return isRangeIndexScan() || isPrefixIndexScan();
+ }
+
+ public boolean isRangeIndexScan() {
+ return isSecondaryIndex && !isPrefix && start != null && start.length > 0 && end != null &&
+ end.length > 0;
+ }
+
+ public boolean isPrefixIndexScan() {
+ return isSecondaryIndex && isPrefix && start != null && start.length > 0;
+ }
+
+ @Override
+ public String toString() {
+ return "QueryTypeParam{" +
+ (isSecondaryIndex ? "[S - " : "[P - ") +
+ (end != null ? "Range]" : (isPrefix ? "Prefix]" : "ID]")) +
+ " start=" + Arrays.toString(start) +
+ (end != null ? ", end=" + Arrays.toString(end) : "") +
+ ", boundary=" + boundary +
+ (isIdScan() ? ", code=" + code : "") +
+ (idPrefix != null ? ", idPrefix=" + Arrays.toString(idPrefix) : "") +
+ '}';
+ }
+}
diff --git a/hugegraph-store/hg-store-common/src/main/java/org/apache/hugegraph/store/query/StoreQueryParam.java b/hugegraph-store/hg-store-common/src/main/java/org/apache/hugegraph/store/query/StoreQueryParam.java
new file mode 100644
index 0000000000..c34a3f26a8
--- /dev/null
+++ b/hugegraph-store/hg-store-common/src/main/java/org/apache/hugegraph/store/query/StoreQueryParam.java
@@ -0,0 +1,215 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hugegraph.store.query;
+
+import java.util.HashSet;
+import java.util.List;
+
+import org.apache.hugegraph.id.Id;
+import org.apache.hugegraph.query.ConditionQuery;
+import org.apache.hugegraph.store.query.func.AggregationFunctionParam;
+
+import lombok.Data;
+
+@Data
+public class StoreQueryParam {
+
+ /**
+ * 针对非 Agg:
+ * 如果为空,或者size为0,则不过滤
+ */
+ private final PropertyList properties = PropertyList.of();
+ private final boolean groupBySchemaLabel = false;
+ private final SORT_ORDER sortOrder = SORT_ORDER.ASC;
+ /**
+ * 是否需要对key消重,针对多个query param或者index查询
+ */
+ private final DEDUP_OPTION dedupOption = DEDUP_OPTION.NONE;
+ /**
+ * 结果条数的限制
+ */
+ private final Integer limit = 0;
+ /**
+ * offset目前由server托管,理论上都应该是0
+ */
+ private final Integer offset = 0;
+ /**
+ * 抽样频率
+ */
+ private final double sampleFactor = 1.0;
+ /**
+ * 从index id中构建 base element。在No scan的case下
+ */
+ private final boolean loadPropertyFromIndex = false;
+ /**
+ * 是否解析ttl
+ */
+ private final boolean checkTTL = false;
+ /**
+ * 客户端生成,用于区分相同不同的query
+ */
+ private String queryId;
+ /**
+ * the graph
+ */
+ private String graph;
+ /**
+ * the table name
+ */
+ private String table;
+ /**
+ * 聚合函数列表
+ */
+ private List funcList;
+ /**
+ * 分组列表, 同时也是properties
+ */
+ private List groupBy;
+ /**
+ * 排序字段。
+ * 优先级低于 property.
+ * Agg: 如果不在group by中,id是无效的
+ * 非Agg: 如果不在property中,id是无效的
+ */
+ private List orderBy;
+ /**
+ * 过滤条件
+ */
+ private ConditionQuery conditionQuery;
+ /**
+ * 暂不实现
+ */
+ private List having;
+ private StoreQueryType queryType;
+ private List queryParam;
+ /**
+ * 用于非 order by, 非Agg的查询中
+ */
+ private byte[] position;
+ /**
+ * 将olap表中对应的属性,添加到HgElement上 (Vertex)
+ */
+ private List olapProperties;
+ /**
+ * 索引, 每个内层的元素为and关系,外层为or关系。IndexRange是一个range 查询
+ * 如果 scanType是 INDEX_SCAN,则需要回查原表。
+ */
+ private List> indexes;
+
+ private static void isFalse(boolean expression, String message) {
+
+ if (message == null) {
+ throw new IllegalArgumentException("message is null");
+ }
+
+ if (expression) {
+ throw new IllegalArgumentException(message);
+ }
+ }
+
+ private static boolean isEmpty(List list) {
+ return list == null || list.size() == 0;
+ }
+
+ public void checkQuery() {
+ isFalse(queryId == null, "query id is null");
+ isFalse(graph == null, "graph is null");
+ isFalse(table == null, "table is null");
+
+ isFalse(queryType == null, "queryType is null");
+
+ isFalse(queryType == StoreQueryType.PRIMARY_SCAN && isEmpty(queryParam),
+ "query param is null when PRIMARY_SCAN");
+ // no scan & index scan should have indexes
+ isFalse(queryType == StoreQueryType.NO_SCAN && isEmpty(indexes),
+ "ScanType.NO_SCAN without indexes");
+ isFalse(queryType == StoreQueryType.NO_SCAN &&
+ (indexes.size() != 1 || indexes.get(0).size() != 1),
+ "ScanType.NO_SCAN only support one index");
+ isFalse(loadPropertyFromIndex &&
+ (isEmpty(indexes) || indexes.size() != 1 || indexes.get(0).size() != 1),
+ " loadPropertyFromIndex only support one(must be one) index in no scan");
+
+ isFalse(queryType == StoreQueryType.INDEX_SCAN && isEmpty(indexes),
+ "ScanType.INDEX_SCAN without indexes ");
+
+ isFalse(!isEmpty(groupBy) && !isEmpty(properties.getPropertyIds()) &&
+ !new HashSet<>(groupBy).containsAll(properties.getPropertyIds()),
+ "properties should be subset of groupBy");
+
+ isFalse(!isEmpty(groupBy) && !isEmpty(orderBy) &&
+ !new HashSet<>(groupBy).containsAll(orderBy),
+ "order by should be subset of groupBy");
+
+ // isFalse(properties.isEmptyId() && ! queryParam.stream().allMatch(p -> p.isIdScan()),
+ // "empty property only apply id scan");
+
+ // todo: just group by, no aggregations ??
+ if (funcList != null) {
+ for (var func : funcList) {
+ if (func.getFunctionType() == AggregationFunctionParam.AggregationFunctionType.SUM
+ ||
+ func.getFunctionType() == AggregationFunctionParam.AggregationFunctionType.MAX
+ ||
+ func.getFunctionType() == AggregationFunctionParam.AggregationFunctionType.MIN
+ || func.getFunctionType() ==
+ AggregationFunctionParam.AggregationFunctionType.AVG) {
+ isFalse(func.getField() == null,
+ func.getFunctionType().name() + " has no filed value");
+ }
+
+ if (func.getFunctionType() ==
+ AggregationFunctionParam.AggregationFunctionType.SUM) {
+ // ||func.getFunctionType() == AggregationFunctionParam
+ // .AggregationFunctionType.AVG){
+ isFalse(func.getFiledType() == AggregationFunctionParam.FiledType.STRING,
+ func.getFunctionType().name() + " can not apply a String type");
+ }
+ }
+ }
+
+ isFalse(limit < 0, "limit should be greater than 0");
+ isFalse(sampleFactor < 0 || sampleFactor > 1, "sample factor out of range [0-1]");
+ }
+
+ public enum DEDUP_OPTION {
+ NONE,
+ /**
+ * 模糊去重,使用bitmap
+ */
+ DEDUP,
+ /**
+ * 前N行保证精确去重,之后的非精确
+ */
+ LIMIT_DEDUP,
+ /**
+ * 精确去重,保证准确性
+ */
+ PRECISE_DEDUP
+ }
+
+ public enum SORT_ORDER {
+ ASC,
+ DESC,
+ /**
+ * 仅仅针对全部是ID查询,保持原始传入的id顺序
+ */
+ STRICT_ORDER
+ }
+
+}
diff --git a/hugegraph-store/hg-store-common/src/main/java/org/apache/hugegraph/store/query/StoreQueryType.java b/hugegraph-store/hg-store-common/src/main/java/org/apache/hugegraph/store/query/StoreQueryType.java
new file mode 100644
index 0000000000..d4e46e65bd
--- /dev/null
+++ b/hugegraph-store/hg-store-common/src/main/java/org/apache/hugegraph/store/query/StoreQueryType.java
@@ -0,0 +1,40 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hugegraph.store.query;
+
+public enum StoreQueryType {
+ /**
+ * full table scan
+ */
+ TABLE_SCAN,
+
+ /**
+ * include id, prefix and range
+ */
+ PRIMARY_SCAN,
+
+ /**
+ * index scan that need look up table (g+v, g+e) back.
+ */
+ INDEX_SCAN,
+
+ /**
+ * index scan, without look up table back
+ */
+ NO_SCAN
+}
diff --git a/hugegraph-store/hg-store-common/src/main/java/org/apache/hugegraph/store/query/Tuple2.java b/hugegraph-store/hg-store-common/src/main/java/org/apache/hugegraph/store/query/Tuple2.java
new file mode 100644
index 0000000000..ae50cbdb2a
--- /dev/null
+++ b/hugegraph-store/hg-store-common/src/main/java/org/apache/hugegraph/store/query/Tuple2.java
@@ -0,0 +1,38 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hugegraph.store.query;
+
+import java.io.Serializable;
+
+import lombok.Data;
+
+@Data
+public class Tuple2 implements Serializable {
+
+ private final X v1;
+ private final Y v2;
+
+ public Tuple2(X v1, Y v2) {
+ this.v1 = v1;
+ this.v2 = v2;
+ }
+
+ public static Tuple2 of(X v1, Y v2) {
+ return new Tuple2<>(v1, v2);
+ }
+}
diff --git a/hugegraph-store/hg-store-common/src/main/java/org/apache/hugegraph/store/query/concurrent/AtomicFloat.java b/hugegraph-store/hg-store-common/src/main/java/org/apache/hugegraph/store/query/concurrent/AtomicFloat.java
new file mode 100644
index 0000000000..fa404b00be
--- /dev/null
+++ b/hugegraph-store/hg-store-common/src/main/java/org/apache/hugegraph/store/query/concurrent/AtomicFloat.java
@@ -0,0 +1,165 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hugegraph.store.query.concurrent;
+
+import java.io.Serializable;
+import java.util.concurrent.atomic.AtomicIntegerFieldUpdater;
+
+public class AtomicFloat extends Number implements Serializable, Comparable {
+
+ private static final AtomicIntegerFieldUpdater FIELD_UPDATER;
+
+ static {
+ FIELD_UPDATER = AtomicIntegerFieldUpdater.newUpdater(AtomicFloat.class, "intBits");
+ }
+
+ private volatile int intBits;
+
+ public AtomicFloat() {
+ this.intBits = Float.floatToIntBits(0.0f);
+ }
+
+ public AtomicFloat(float value) {
+ this.intBits = Float.floatToRawIntBits(value);
+ }
+
+ public float get() {
+ return Float.intBitsToFloat(intBits);
+ }
+
+ public final void set(float newValue) {
+ this.intBits = Float.floatToIntBits(newValue);
+ }
+
+ public final float getAndSet(float newValue) {
+ return getAndSetFloat(newValue);
+ }
+
+ public final float getAndAdd(float delta) {
+ return getAndAddFloat(delta);
+ }
+
+ /**
+ * 向当前值添加指定值并返回总和。
+ *
+ * @param delta 需要添加的值
+ * @return 当前值与参数delta的总和
+ */
+ public final float addAndGet(float delta) {
+ return getAndAddFloat(delta) + delta;
+ }
+
+ /**
+ * 计算并添加浮点数。将给定的浮点数delta加到当前的浮点数上,并返回结果。
+ *
+ * @param delta 浮点数的增量值
+ * @return 返回更新后的浮点数
+ */
+ private float getAndAddFloat(float delta) {
+ int oldBits;
+ int newBits;
+ do {
+ oldBits = intBits;
+ newBits = Float.floatToIntBits(Float.intBitsToFloat(oldBits) + delta);
+ } while (!FIELD_UPDATER.compareAndSet(this, oldBits, newBits));
+ return Float.intBitsToFloat(oldBits);
+ }
+
+ /**
+ * 将float值设置为给定的新值,并返回旧值。
+ *
+ * @param newValue 新的float值
+ * @return 旧值
+ */
+ private float getAndSetFloat(float newValue) {
+ int oldBits;
+ int newBits;
+ do {
+ oldBits = intBits;
+ newBits = Float.floatToIntBits(newValue);
+ } while (!FIELD_UPDATER.compareAndSet(this, oldBits, newBits));
+ return Float.intBitsToFloat(oldBits);
+ }
+
+ /**
+ * {@inheritDoc}
+ * 返回值将转换为int类型并返回。
+ *
+ * @return 整型数值
+ */
+ @Override
+ public int intValue() {
+ return (int) get();
+ }
+
+ /**
+ * {@inheritDoc}
+ * 返回一个长整型值。
+ *
+ * @return 长整型值
+ */
+ @Override
+ public long longValue() {
+ return (long) get();
+ }
+
+ /**
+ * {@inheritDoc} 返回当前值的float类型值。
+ */
+ @Override
+ public float floatValue() {
+ return get();
+ }
+
+ /**
+ * {@inheritDoc}
+ * 返回当前对象的值对应的double类型值。
+ *
+ * @return 当前对象的对应double类型值。
+ */
+ @Override
+ public double doubleValue() {
+ return get();
+ }
+
+ /**
+ * {@inheritDoc}
+ * 重写父类方法,实现了浮点数的比较。
+ *
+ * @param o 待比较的浮点数
+ * @return 如果当前浮点数小于o,返回-1;如果相等,返回0;否则返回1
+ */
+ @Override
+ public int compareTo(AtomicFloat o) {
+ return Float.compare(get(), o.get());
+ }
+
+ /**
+ * {@inheritDoc}
+ * 返回字符串表示形式。
+ *
+ * @return 包含整型位(intBits)和值的字符串
+ */
+ @Override
+ public String toString() {
+ return "AtomicFloat{" +
+ "intBits=" + intBits +
+ ", value = " + get() +
+ '}';
+ }
+}
diff --git a/hugegraph-store/hg-store-common/src/main/java/org/apache/hugegraph/store/query/func/AbstractAggregationFunction.java b/hugegraph-store/hg-store-common/src/main/java/org/apache/hugegraph/store/query/func/AbstractAggregationFunction.java
new file mode 100644
index 0000000000..7a4dcf8692
--- /dev/null
+++ b/hugegraph-store/hg-store-common/src/main/java/org/apache/hugegraph/store/query/func/AbstractAggregationFunction.java
@@ -0,0 +1,29 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hugegraph.store.query.func;
+
+public abstract class AbstractAggregationFunction implements AggregationFunction {
+
+ protected volatile U buffer;
+
+ @Override
+ public U getBuffer() {
+ return buffer;
+ }
+}
diff --git a/hugegraph-store/hg-store-common/src/main/java/org/apache/hugegraph/store/query/func/AggregationFunction.java b/hugegraph-store/hg-store-common/src/main/java/org/apache/hugegraph/store/query/func/AggregationFunction.java
new file mode 100644
index 0000000000..d99763baae
--- /dev/null
+++ b/hugegraph-store/hg-store-common/src/main/java/org/apache/hugegraph/store/query/func/AggregationFunction.java
@@ -0,0 +1,67 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hugegraph.store.query.func;
+
+/**
+ * agg function
+ *
+ * @param buffer type
+ * @param record type
+ * @param return type
+ */
+public interface AggregationFunction {
+
+ default void init() {
+ }
+
+ /**
+ * initial value of the merge function
+ *
+ * @return initial value
+ */
+ U createBuffer();
+
+ /**
+ * get the buffer that created by createBuffer()
+ *
+ * @return
+ */
+ U getBuffer();
+
+ /**
+ * the operation when iterator the record
+ *
+ * @param record record
+ */
+ void iterate(R record);
+
+ /**
+ * merge other to buffer
+ *
+ * @param other other buffer
+ */
+ void merge(U other);
+
+ /**
+ * finial aggregator
+ *
+ * @return reduce buffer
+ */
+ T reduce();
+
+}
diff --git a/hugegraph-store/hg-store-common/src/main/java/org/apache/hugegraph/store/query/func/AggregationFunctionParam.java b/hugegraph-store/hg-store-common/src/main/java/org/apache/hugegraph/store/query/func/AggregationFunctionParam.java
new file mode 100644
index 0000000000..dbf32c5fb9
--- /dev/null
+++ b/hugegraph-store/hg-store-common/src/main/java/org/apache/hugegraph/store/query/func/AggregationFunctionParam.java
@@ -0,0 +1,90 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hugegraph.store.query.func;
+
+import org.apache.hugegraph.id.Id;
+
+import lombok.Data;
+
+@Data
+public class AggregationFunctionParam {
+
+ private AggregationFunctionType functionType;
+ /**
+ * the type of aggregation filed.
+ * eg: sum(age): the type is integer
+ */
+ private FiledType filedType;
+ /**
+ * field id
+ */
+ private Id field;
+
+ private AggregationFunctionParam(AggregationFunctionType functionType, FiledType filedType,
+ Id filed) {
+ this.functionType = functionType;
+ this.filedType = filedType;
+ this.field = filed;
+ }
+
+ public static AggregationFunctionParam ofCount() {
+ return new AggregationFunctionParam(AggregationFunctionType.COUNT, FiledType.LONG, null);
+ }
+
+ public static AggregationFunctionParam ofSum(FiledType filedType, Id filed) {
+ return new AggregationFunctionParam(AggregationFunctionType.SUM, filedType, filed);
+ }
+
+ public static AggregationFunctionParam ofMin(FiledType filedType, Id filed) {
+ return new AggregationFunctionParam(AggregationFunctionType.MIN, filedType, filed);
+ }
+
+ public static AggregationFunctionParam ofMax(FiledType filedType, Id filed) {
+ return new AggregationFunctionParam(AggregationFunctionType.MAX, filedType, filed);
+ }
+
+ public static AggregationFunctionParam ofAvg(FiledType filedType, Id filed) {
+ return new AggregationFunctionParam(AggregationFunctionType.AVG, filedType, filed);
+ }
+
+ public enum AggregationFunctionType {
+ COUNT,
+ SUM,
+ MIN,
+ MAX,
+ AVG
+ }
+
+ public enum FiledType {
+ LONG("java.lang.Long"),
+ INTEGER("java.lang.Integer"),
+ FLOAT("java.lang.Float"),
+ DOUBLE("java.lang.Double"),
+ STRING("java.lang.String");
+
+ private final String genericType;
+
+ FiledType(String genericType) {
+ this.genericType = genericType;
+ }
+
+ public String getGenericType() {
+ return genericType;
+ }
+ }
+}
diff --git a/hugegraph-store/hg-store-common/src/main/java/org/apache/hugegraph/store/query/func/AggregationFunctions.java b/hugegraph-store/hg-store-common/src/main/java/org/apache/hugegraph/store/query/func/AggregationFunctions.java
new file mode 100644
index 0000000000..8c946192f5
--- /dev/null
+++ b/hugegraph-store/hg-store-common/src/main/java/org/apache/hugegraph/store/query/func/AggregationFunctions.java
@@ -0,0 +1,532 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hugegraph.store.query.func;
+
+import java.util.concurrent.atomic.AtomicInteger;
+import java.util.concurrent.atomic.AtomicLong;
+import java.util.function.Supplier;
+
+import org.apache.hugegraph.id.Id;
+import org.apache.hugegraph.store.query.Tuple2;
+import org.apache.hugegraph.store.query.concurrent.AtomicFloat;
+
+import com.google.common.util.concurrent.AtomicDouble;
+
+public class AggregationFunctions {
+
+ public static Supplier getAggregationBufferSupplier(String genericType) {
+ switch (genericType) {
+ case "java.lang.Long":
+ return () -> 0L;
+ case "java.lang.Integer":
+ return () -> 0;
+ case "java.lang.Float":
+ // fall through to case "java.lang.Double"
+ case "java.lang.Double":
+ return () -> 0.0;
+ case "java.lang.String":
+ return () -> "";
+ default:
+ throw new RuntimeException("unsupported generic type of buffer: " + genericType);
+ }
+ }
+
+ public static class SumFunction extends UnaryAggregationFunction {
+
+ public SumFunction(Id field, Supplier supplier) {
+ super(field, supplier);
+ }
+
+ public SumFunction(Supplier supplier) {
+ super();
+ this.supplier = supplier;
+ this.buffer = initBuffer();
+ }
+
+ /**
+ * 获取并添加记录
+ *
+ * @param record - 添加的记录
+ */
+ @Override
+ public void iterate(T record) {
+ if (record != null) {
+ switch (buffer.getClass().getName()) {
+ case "java.util.concurrent.atomic.AtomicLong":
+ ((AtomicLong) buffer).getAndAdd((long) record);
+ break;
+ case "java.util.concurrent.atomic.AtomicInteger":
+ ((AtomicInteger) buffer).getAndAdd((Integer) record);
+ break;
+ case "com.google.common.util.concurrent.AtomicDouble":
+ ((AtomicDouble) buffer).getAndAdd((Double) record);
+ break;
+ case "org.apache.hugegraph.store.query.concurrent.AtomicFloat":
+ ((AtomicFloat) buffer).getAndAdd((Float) record);
+ break;
+ default:
+ // throw new Exception ?
+ break;
+ }
+ }
+ }
+
+ /**
+ * {@inheritDoc}
+ * 将另一个 U 对象合并到当前对象。
+ */
+ @Override
+ public void merge(U other) {
+ switch (buffer.getClass().getName()) {
+ case "java.util.concurrent.atomic.AtomicLong":
+ ((AtomicLong) buffer).getAndAdd(((AtomicLong) other).get());
+ break;
+ case "java.util.concurrent.atomic.AtomicInteger":
+ ((AtomicInteger) buffer).getAndAdd(((AtomicInteger) other).get());
+ break;
+ case "com.google.common.util.concurrent.AtomicDouble":
+ ((AtomicDouble) buffer).getAndAdd(((AtomicDouble) other).get());
+ break;
+ case "org.apache.hugegraph.store.query.concurrent.AtomicFloat":
+ ((AtomicFloat) buffer).getAndAdd(((AtomicFloat) other).get());
+ break;
+ default:
+ // throw new Exception ?
+ break;
+ }
+ }
+
+ /**
+ * {@inheritDoc}
+ */
+ @Override
+ public T reduce() {
+ switch (buffer.getClass().getName()) {
+ case "java.util.concurrent.atomic.AtomicLong":
+ return (T) Long.valueOf(((AtomicLong) buffer).get());
+ case "java.util.concurrent.atomic.AtomicInteger":
+ return (T) Integer.valueOf(((AtomicInteger) buffer).get());
+ case "com.google.common.util.concurrent.AtomicDouble":
+ return (T) Double.valueOf(((AtomicDouble) buffer).get());
+ case "org.apache.hugegraph.store.query.concurrent.AtomicFloat":
+ return (T) Float.valueOf(((AtomicFloat) buffer).get());
+ default:
+ // throw new Exception ?
+ break;
+ }
+ return null;
+ }
+
+ /**
+ * {@inheritDoc}
+ * 初始化缓冲区,返回相应类型的 Atomic 引用对象。
+ *
+ * @return 返回初始化后的 Atomic 对象。
+ */
+ @Override
+ protected U initBuffer() {
+ return getInitValue(() -> new AtomicLong(0),
+ () -> new AtomicInteger(0),
+ () -> new AtomicDouble(0.0),
+ () -> new AtomicFloat(0.0f));
+ }
+ }
+
+ public static class MaxFunction extends UnaryAggregationFunction {
+
+ public MaxFunction(Id field, Supplier supplier) {
+ super(field, supplier);
+ }
+
+ public MaxFunction(Supplier supplier) {
+ super();
+ this.supplier = supplier;
+ this.buffer = initBuffer();
+ }
+
+ @Override
+ protected U initBuffer() {
+ return getInitValue(() -> new AtomicLong(Long.MIN_VALUE),
+ () -> new AtomicInteger(Integer.MIN_VALUE),
+ () -> new AtomicDouble(Double.MIN_VALUE),
+ () -> new AtomicFloat(Float.MIN_VALUE));
+ }
+
+ @Override
+ public void iterate(T record) {
+ if (record != null) {
+ // string case
+ if (this.buffer == null && record != null) {
+ this.buffer = (U) record;
+ return;
+ }
+
+ switch (buffer.getClass().getName()) {
+ case "java.util.concurrent.atomic.AtomicLong":
+ if (((AtomicLong) buffer).get() < (long) record) {
+ ((AtomicLong) buffer).set((long) record);
+ }
+ break;
+ case "java.util.concurrent.atomic.AtomicInteger":
+ if (((AtomicInteger) buffer).get() < (int) record) {
+ ((AtomicInteger) buffer).set((int) record);
+ }
+ break;
+ case "com.google.common.util.concurrent.AtomicDouble":
+ if (((AtomicDouble) buffer).get() < (double) record) {
+ ((AtomicDouble) buffer).set((double) record);
+ }
+ break;
+ case "org.apache.hugegraph.store.query.concurrent.AtomicFloat":
+ if (((AtomicFloat) buffer).get() < (float) record) {
+ ((AtomicFloat) buffer).set((float) record);
+ }
+ break;
+
+ case "java.lang.String":
+ this.buffer = (U) maxString((String) buffer, (String) record);
+ break;
+ default:
+ // throw new Exception ?
+ break;
+ }
+ }
+
+ }
+
+ @Override
+ public void merge(U other) {
+ if (this.buffer == null && other != null) {
+ this.buffer = other;
+ return;
+ }
+
+ switch (buffer.getClass().getName()) {
+ case "java.util.concurrent.atomic.AtomicLong":
+ if (((AtomicLong) buffer).get() < ((AtomicLong) other).get()) {
+ ((AtomicLong) buffer).set(((AtomicLong) other).get());
+ }
+ break;
+ case "java.util.concurrent.atomic.AtomicInteger":
+ if (((AtomicInteger) buffer).get() < ((AtomicInteger) other).get()) {
+ ((AtomicInteger) buffer).set(((AtomicInteger) other).get());
+ }
+ break;
+ case "com.google.common.util.concurrent.AtomicDouble":
+ if (((AtomicDouble) buffer).get() < ((AtomicDouble) other).get()) {
+ ((AtomicDouble) buffer).set(((AtomicDouble) other).get());
+ }
+ break;
+ case "org.apache.hugegraph.store.query.concurrent.AtomicFloat":
+ if (((AtomicFloat) buffer).compareTo(((AtomicFloat) other)) < 0) {
+ ((AtomicFloat) buffer).set(((AtomicFloat) other).get());
+ }
+ break;
+ case "java.lang.String":
+ this.buffer = (U) maxString((String) buffer, (String) other);
+ break;
+ default:
+ // throw new Exception ?
+ break;
+ }
+ }
+
+ /**
+ * 获取两个字符串中较长的那个。如果一个为null,则返回另一个。
+ *
+ * @param s1 第一个字符串
+ * @param s2 第二个字符串
+ * @return 较长的字符串
+ */
+ private String maxString(String s1, String s2) {
+ if (s1 == null || s2 == null) {
+ return s1 == null ? s2 : s1;
+ }
+ return s1.compareTo(s2) >= 0 ? s1 : s2;
+ }
+
+ @Override
+ public T reduce() {
+ switch (buffer.getClass().getName()) {
+ case "java.util.concurrent.atomic.AtomicLong":
+ return (T) Long.valueOf(((AtomicLong) this.buffer).get());
+ case "java.util.concurrent.atomic.AtomicInteger":
+ return (T) Integer.valueOf(((AtomicInteger) this.buffer).get());
+ case "com.google.common.util.concurrent.AtomicDouble":
+ return (T) Double.valueOf(((AtomicDouble) this.buffer).get());
+ case "org.apache.hugegraph.store.query.concurrent.AtomicFloat":
+ return (T) Float.valueOf(((AtomicFloat) this.buffer).get());
+ case "java.lang.String":
+ return (T) this.buffer;
+ default:
+ // throw new Exception ?
+ break;
+ }
+ return null;
+ }
+ }
+
+ public static class MinFunction extends UnaryAggregationFunction {
+
+ public MinFunction(Id field, Supplier supplier) {
+ super(field, supplier);
+ }
+
+ public MinFunction(Supplier supplier) {
+ super();
+ this.supplier = supplier;
+ this.buffer = initBuffer();
+ }
+
+ @Override
+ protected U initBuffer() {
+ return getInitValue(() -> new AtomicLong(Long.MAX_VALUE),
+ () -> new AtomicInteger(Integer.MAX_VALUE),
+ () -> new AtomicDouble(Double.MAX_VALUE),
+ () -> new AtomicFloat(Float.MAX_VALUE));
+ }
+
+ @Override
+ public void iterate(T record) {
+ if (record != null) {
+ // string case
+ if (this.buffer == null && record != null) {
+ this.buffer = (U) record;
+ return;
+ }
+
+ switch (buffer.getClass().getName()) {
+ case "java.util.concurrent.atomic.AtomicLong":
+ if (((AtomicLong) buffer).get() < (long) record) {
+ ((AtomicLong) buffer).set((long) record);
+ }
+ break;
+ case "java.util.concurrent.atomic.AtomicInteger":
+ if (((AtomicInteger) buffer).get() < (int) record) {
+ ((AtomicInteger) buffer).set((int) record);
+ }
+ break;
+ case "com.google.common.util.concurrent.AtomicDouble":
+ if (((AtomicDouble) buffer).get() < (double) record) {
+ ((AtomicDouble) buffer).set((double) record);
+ }
+ break;
+ case "org.apache.hugegraph.store.query.concurrent.AtomicFloat":
+ if (((AtomicFloat) buffer).get() < (float) record) {
+ ((AtomicFloat) buffer).set((float) record);
+ }
+ break;
+
+ case "java.lang.String":
+ this.buffer = (U) minString((String) buffer, (String) record);
+ break;
+ default:
+ // throw new Exception ?
+ break;
+ }
+ }
+ }
+
+ @Override
+ public void merge(U other) {
+ if (this.buffer == null && other != null) {
+ this.buffer = other;
+ return;
+ }
+
+ switch (buffer.getClass().getName()) {
+ case "java.util.concurrent.atomic.AtomicLong":
+ if (((AtomicLong) buffer).get() < ((AtomicLong) other).get()) {
+ ((AtomicLong) buffer).set(((AtomicLong) other).get());
+ }
+ break;
+ case "java.util.concurrent.atomic.AtomicInteger":
+ if (((AtomicInteger) buffer).get() < ((AtomicInteger) other).get()) {
+ ((AtomicInteger) buffer).set(((AtomicInteger) other).get());
+ }
+ break;
+ case "com.google.common.util.concurrent.AtomicDouble":
+ if (((AtomicDouble) buffer).get() < ((AtomicDouble) other).get()) {
+ ((AtomicDouble) buffer).set(((AtomicDouble) other).get());
+ }
+ break;
+ case "org.apache.hugegraph.store.query.concurrent.AtomicFloat":
+ if (((AtomicFloat) buffer).compareTo(((AtomicFloat) other)) < 0) {
+ ((AtomicFloat) buffer).set(((AtomicFloat) other).get());
+ }
+ break;
+ case "java.lang.String":
+ this.buffer = (U) minString((String) buffer, (String) other);
+ break;
+ default:
+ // throw new Exception ?
+ break;
+ }
+ }
+
+ /**
+ * 返回两个字符串中的较小值。如果一个值为null则返回另一个值。
+ *
+ * @param s1 第一个需要比较的字符串
+ * @param s2 第二个需要比较的字符串
+ * @return 较小的字符串
+ */
+ private String minString(String s1, String s2) {
+ if (s1 == null || s2 == null) {
+ return s1 == null ? s2 : s1;
+ }
+ return s1.compareTo(s2) <= 0 ? s1 : s2;
+ }
+
+ @Override
+ public T reduce() {
+ switch (buffer.getClass().getName()) {
+ case "java.util.concurrent.atomic.AtomicLong":
+ return (T) Long.valueOf(((AtomicLong) this.buffer).get());
+ case "java.util.concurrent.atomic.AtomicInteger":
+ return (T) Integer.valueOf(((AtomicInteger) this.buffer).get());
+ case "com.google.common.util.concurrent.AtomicDouble":
+ return (T) Double.valueOf(((AtomicDouble) this.buffer).get());
+ case "java.lang.Float":
+ return (T) Float.valueOf(((AtomicFloat) this.buffer).get());
+ case "org.apache.hugegraph.store.query.concurrent.AtomicFloat":
+ return (T) this.buffer;
+ default:
+ // throw new Exception ?
+ break;
+ }
+ return null;
+ }
+
+ }
+
+ public static class AvgFunction extends
+ AbstractAggregationFunction,
+ Double, Double> {
+
+ private final Class filedClassType;
+
+ public AvgFunction(Supplier supplier) {
+ createBuffer();
+ filedClassType = supplier.get().getClass();
+ }
+
+ public Class getFiledClassType() {
+ return filedClassType;
+ }
+
+ /**
+ * 创建缓冲区,返回一个包含两个原子变量的元组。
+ *
+ * @return 包含两个原子变量的元组
+ */
+ @Override
+ public Tuple2 createBuffer() {
+ this.buffer = new Tuple2<>(new AtomicLong(0), new AtomicDouble(0.0));
+ return this.buffer;
+ }
+
+ @Override
+ public void iterate(Double record) {
+ if (record != null) {
+ buffer.getV1().getAndAdd(1);
+ buffer.getV2().getAndAdd(record.doubleValue());
+ }
+ }
+
+ @Override
+ public void merge(Tuple2 other) {
+ buffer.getV1().getAndAdd(other.getV1().get());
+ buffer.getV2().getAndAdd(other.getV2().get());
+ }
+
+ @Override
+ public Double reduce() {
+ if (buffer.getV1().get() == 0) {
+ return Double.NaN;
+ }
+
+ return buffer.getV2().get() / buffer.getV1().get();
+ }
+ }
+
+ public static class CountFunction extends AbstractAggregationFunction {
+
+ public CountFunction() {
+ createBuffer();
+ }
+
+ @Override
+ public AtomicLong createBuffer() {
+ this.buffer = new AtomicLong();
+ return this.buffer;
+ }
+
+ @Override
+ public AtomicLong getBuffer() {
+ return this.buffer;
+ }
+
+ @Override
+ public void iterate(Long record) {
+ this.buffer.getAndIncrement();
+ }
+
+ @Override
+ public void merge(AtomicLong other) {
+ this.buffer.getAndAdd(other.get());
+ }
+
+ @Override
+ public Long reduce() {
+ return this.buffer.get();
+ }
+ }
+
+ /**
+ * 应对 group by 无 aggregator的情况
+ */
+ public static class EmptyFunction implements AggregationFunction {
+
+ @Override
+ public Integer createBuffer() {
+ return 0;
+ }
+
+ @Override
+ public Integer getBuffer() {
+ return 0;
+ }
+
+ @Override
+ public void iterate(Integer record) {
+
+ }
+
+ @Override
+ public void merge(Integer other) {
+
+ }
+
+ @Override
+ public Integer reduce() {
+ return null;
+ }
+ }
+
+}
diff --git a/hugegraph-store/hg-store-common/src/main/java/org/apache/hugegraph/store/query/func/UnaryAggregationFunction.java b/hugegraph-store/hg-store-common/src/main/java/org/apache/hugegraph/store/query/func/UnaryAggregationFunction.java
new file mode 100644
index 0000000000..a2c7737e40
--- /dev/null
+++ b/hugegraph-store/hg-store-common/src/main/java/org/apache/hugegraph/store/query/func/UnaryAggregationFunction.java
@@ -0,0 +1,122 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hugegraph.store.query.func;
+
+import java.util.concurrent.atomic.AtomicInteger;
+import java.util.concurrent.atomic.AtomicLong;
+import java.util.function.Supplier;
+
+import org.apache.hugegraph.id.Id;
+import org.apache.hugegraph.store.query.concurrent.AtomicFloat;
+
+import com.google.common.util.concurrent.AtomicDouble;
+
+/**
+ * base of max, min, sum. (input type equals output type)
+ *
+ * @param buffer type (using for concurrency)
+ * @param record type
+ */
+
+public abstract class UnaryAggregationFunction extends AbstractAggregationFunction {
+
+ /**
+ * create the buffer
+ */
+ protected Supplier supplier;
+
+ /**
+ * filed id
+ */
+ protected Id field;
+
+ /**
+ * type check, filed id and supplier should not be null
+ */
+ protected UnaryAggregationFunction() {
+
+ }
+
+ /**
+ * init the agg function. the generic info of java would be erased during compiling stage,
+ * the supplier is used to save the type info mostly.
+ *
+ * @param field the field of the element
+ * @param supplier use to create buffer.
+ */
+ public UnaryAggregationFunction(Id field, Supplier supplier) {
+ this.field = field;
+ this.supplier = supplier;
+ buffer = createBuffer();
+ }
+
+ public Id getFieldId() {
+ return field;
+ }
+
+ /**
+ * 创建一个新的缓冲区。
+ *
+ * @return 返回创建的新缓冲区。
+ */
+ @Override
+ public U createBuffer() {
+ return initBuffer();
+ }
+
+ protected abstract U initBuffer();
+
+ /**
+ * 获取初始值。
+ *
+ * @param longSupplier Long类型的供给者。
+ * @param integerSupplier Integer类型的供给者。
+ * @param doubleSupplier Double类型的供给者。
+ * @param floatSupplier Float类型的供给者。
+ * @return 返回初始化值的类型,如果没有找到匹配的类型则返回原来的实例。
+ */
+ protected U getInitValue(Supplier longSupplier,
+ Supplier integerSupplier,
+ Supplier doubleSupplier,
+ Supplier floatSupplier) {
+ Object result;
+ var ins = this.supplier.get();
+ switch (ins.getClass().getName()) {
+ case "java.lang.Long":
+ result = longSupplier.get();
+ break;
+ case "java.lang.Integer":
+ result = integerSupplier.get();
+ break;
+ case "java.lang.Double":
+ result = doubleSupplier.get();
+ break;
+ case "java.lang.Float":
+ result = floatSupplier.get();
+ break;
+ case "java.lang.String":
+ result = null;
+ break;
+ default:
+ result = ins;
+ break;
+ }
+
+ return (U) result;
+ }
+}
diff --git a/hugegraph-store/hg-store-common/src/main/java/org/apache/hugegraph/store/query/util/KeyUtil.java b/hugegraph-store/hg-store-common/src/main/java/org/apache/hugegraph/store/query/util/KeyUtil.java
new file mode 100644
index 0000000000..1bb29cbf39
--- /dev/null
+++ b/hugegraph-store/hg-store-common/src/main/java/org/apache/hugegraph/store/query/util/KeyUtil.java
@@ -0,0 +1,68 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hugegraph.store.query.util;
+
+import org.apache.hugegraph.backend.BinaryId;
+import org.apache.hugegraph.id.EdgeId;
+import org.apache.hugegraph.id.Id;
+import org.apache.hugegraph.id.IdUtil;
+import org.apache.hugegraph.serializer.BytesBuffer;
+import org.apache.hugegraph.store.constant.HugeServerTables;
+
+public class KeyUtil {
+
+ private static final byte[] EMPTY_BYTES = new byte[0];
+
+ /**
+ * 使用的是 huge server的算法
+ *
+ * @param key original key
+ * @param table looking up table
+ * @return
+ */
+ public static byte[] getOwnerKey(String table, byte[] key) {
+ if (key == null || key.length == 0) {
+ return EMPTY_BYTES;
+ }
+
+ if (HugeServerTables.isEdgeTable(table)) {
+ var id = (EdgeId) IdUtil.fromBytes(key);
+ return idToBytes(id.ownerVertexId());
+ }
+
+ return key;
+ }
+
+ public static byte[] getOwnerId(Id id) {
+ if (id instanceof BinaryId) {
+ id = ((BinaryId) id).origin();
+ }
+ if (id != null && id.edge()) {
+ id = ((EdgeId) id).ownerVertexId();
+ }
+ return id != null ? id.asBytes() : EMPTY_BYTES;
+
+ }
+
+ public static byte[] idToBytes(Id id) {
+ BytesBuffer buffer = BytesBuffer.allocate(1 + id.length());
+ buffer.writeId(id);
+ return buffer.bytes();
+ }
+
+}
diff --git a/hugegraph-store/hg-store-common/src/main/java/org/apache/hugegraph/store/util/Base58Encoder.java b/hugegraph-store/hg-store-common/src/main/java/org/apache/hugegraph/store/util/Base58Encoder.java
index 617f6dd28f..48be004de4 100644
--- a/hugegraph-store/hg-store-common/src/main/java/org/apache/hugegraph/store/util/Base58Encoder.java
+++ b/hugegraph-store/hg-store-common/src/main/java/org/apache/hugegraph/store/util/Base58Encoder.java
@@ -21,6 +21,7 @@
import java.nio.charset.StandardCharsets;
import java.util.Arrays;
+@Deprecated
public class Base58Encoder {
public static final char[] CHAR_SET =
diff --git a/hugegraph-store/hg-store-common/src/main/java/org/apache/hugegraph/store/util/DefaultThreadFactory.java b/hugegraph-store/hg-store-common/src/main/java/org/apache/hugegraph/store/util/DefaultThreadFactory.java
new file mode 100644
index 0000000000..50c347c212
--- /dev/null
+++ b/hugegraph-store/hg-store-common/src/main/java/org/apache/hugegraph/store/util/DefaultThreadFactory.java
@@ -0,0 +1,49 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hugegraph.store.util;
+
+import java.util.concurrent.ThreadFactory;
+import java.util.concurrent.atomic.AtomicInteger;
+
+/**
+ * @author zhangyingjie
+ * @date 2023/6/13
+ **/
+public class DefaultThreadFactory implements ThreadFactory {
+
+ private final AtomicInteger number = new AtomicInteger(1);
+ private final String namePrefix;
+ private final boolean daemon;
+
+ public DefaultThreadFactory(String prefix, boolean daemon) {
+ this.namePrefix = prefix + "-";
+ this.daemon = daemon;
+ }
+
+ public DefaultThreadFactory(String prefix) {
+ this(prefix, true);
+ }
+
+ @Override
+ public Thread newThread(Runnable r) {
+ Thread t = new Thread(null, r, namePrefix + number.getAndIncrement(), 0);
+ t.setDaemon(daemon);
+ t.setPriority(Thread.NORM_PRIORITY);
+ return t;
+ }
+}
diff --git a/hugegraph-store/hg-store-common/src/main/java/org/apache/hugegraph/store/util/ExecutorUtil.java b/hugegraph-store/hg-store-common/src/main/java/org/apache/hugegraph/store/util/ExecutorUtil.java
new file mode 100644
index 0000000000..ab972c43c7
--- /dev/null
+++ b/hugegraph-store/hg-store-common/src/main/java/org/apache/hugegraph/store/util/ExecutorUtil.java
@@ -0,0 +1,68 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hugegraph.store.util;
+
+import java.util.Map;
+import java.util.concurrent.BlockingQueue;
+import java.util.concurrent.ConcurrentHashMap;
+import java.util.concurrent.LinkedBlockingQueue;
+import java.util.concurrent.SynchronousQueue;
+import java.util.concurrent.ThreadPoolExecutor;
+import java.util.concurrent.TimeUnit;
+
+public final class ExecutorUtil {
+
+ private static final Map pools = new ConcurrentHashMap<>();
+
+ public static ThreadPoolExecutor getThreadPoolExecutor(String name) {
+ if (name == null) {
+ return null;
+ }
+ return pools.get(name);
+ }
+
+ public static ThreadPoolExecutor createExecutor(String name, int coreThreads, int maxThreads,
+ int queueSize) {
+
+ return createExecutor(name, coreThreads, maxThreads, queueSize, true);
+ }
+
+ public static ThreadPoolExecutor createExecutor(String name, int coreThreads, int maxThreads,
+ int queueSize, boolean daemon) {
+ ThreadPoolExecutor res = pools.get(name);
+ if (res != null) {
+ return res;
+ }
+ synchronized (pools) {
+ res = pools.get(name);
+ if (res != null) {
+ return res;
+ }
+ BlockingQueue queue;
+ if (queueSize <= 0) {
+ queue = new SynchronousQueue();
+ } else {
+ queue = new LinkedBlockingQueue<>(queueSize);
+ }
+ res = new ThreadPoolExecutor(coreThreads, maxThreads, 60L, TimeUnit.SECONDS, queue,
+ new DefaultThreadFactory(name, daemon));
+ pools.put(name, res);
+ }
+ return res;
+ }
+}
From 7a3e8fc10c01b384563b41b847d037b5c196e92f Mon Sep 17 00:00:00 2001
From: JisoLya <53420504@qq.com>
Date: Fri, 18 Jul 2025 21:23:02 +0800
Subject: [PATCH 03/35] refactor grpc module
---
.../hg-store-grpc/src/main/proto/query.proto | 122 ++++++++++++++++++
.../src/main/proto/store_common.proto | 7 +
.../src/main/proto/store_session.proto | 6 -
.../src/main/proto/store_state.proto | 9 +-
hugegraph-store/pom.xml | 5 +
5 files changed, 142 insertions(+), 7 deletions(-)
create mode 100644 hugegraph-store/hg-store-grpc/src/main/proto/query.proto
diff --git a/hugegraph-store/hg-store-grpc/src/main/proto/query.proto b/hugegraph-store/hg-store-grpc/src/main/proto/query.proto
new file mode 100644
index 0000000000..fe8b963bf1
--- /dev/null
+++ b/hugegraph-store/hg-store-grpc/src/main/proto/query.proto
@@ -0,0 +1,122 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+syntax = "proto3";
+
+import "store_common.proto";
+
+option java_multiple_files = true;
+option java_package = "org.apache.hugegraph.store.grpc.query";
+option java_outer_classname = "AggPushDownQueryProto";
+
+service QueryService {
+ rpc query(stream QueryRequest) returns (stream QueryResponse) {}
+ // rpc close(google.protobuf.StringValue) returns(google.protobuf.BoolValue) {}
+ // 简单的查询
+ rpc query0(QueryRequest) returns (QueryResponse) {}
+ rpc count(QueryRequest) returns (QueryResponse) {}
+}
+
+enum AggregationType {
+ COUNT = 0;
+ SUM = 1;
+ MIN = 2;
+ MAX = 3;
+ AVG = 4;
+}
+
+message AggregateFunc {
+ AggregationType funcType = 1;
+ bytes field = 2; // count 函数不检查,设置为-1, property id
+ string type = 3; // 初始化buffer类型
+}
+
+enum ScanType {
+ TABLE_SCAN = 0;
+ PRIMARY_SCAN = 1;
+ INDEX_SCAN = 2;
+ NO_SCAN = 3; // 只扫索引
+}
+
+message ScanTypeParam {
+ bytes key_start = 1;
+ bytes key_end = 2;
+ int32 scan_boundary = 3; // range boundary
+ bool is_prefix = 4; // 区分id和prefix
+ bool is_secondary_index = 5; // 区分primary scan or index scan
+ int32 code = 6; // id code
+ bytes id_prefix = 7; // check the element id prefix when parse index
+}
+
+message Index {
+ repeated ScanTypeParam params = 1;
+}
+
+enum DeDupOption {
+ NONE = 0;
+ DEDUP = 1;
+ LIMIT_DEDUP = 2;
+ PRECISE_DEDUP = 3;
+}
+
+message QueryRequest{
+ string queryId = 1;
+ string graph = 2;
+ string table = 3;
+
+ repeated AggregateFunc functions = 4;
+ // 属性剪裁,如果为空,则返回所有的属性, aggregation 作为单独字段,不包含此列
+ // 如果有group by,应该是group by的子集
+ repeated bytes property = 5;
+ repeated bytes group_by = 6; // group by的字段
+ repeated uint32 having = 7; // having 的过滤 (暂不实现),
+ repeated bytes order_by = 8; // order by 字段
+ bool sort_order = 9; // asc or desc
+ bool null_property = 10; // 不使用property,仅仅返回key
+
+ ScanType scan_type = 11; // 表扫描类型, 如果有索引,此项忽略
+
+ repeated ScanTypeParam scan_type_param = 12; // id, prefix 只用到start
+
+ DeDupOption dedup_option = 13; // 是否需要key消重
+
+ bytes condition = 21; // condition
+ bytes position = 24; // 返回offset ~ offset + limit
+ uint32 limit = 23; // page
+ uint32 offset = 25; // offset
+
+ double sample_factor = 31; // 抽样频率,应该小于等于1
+
+ repeated bytes olap_property = 32; // 读取的olap 属性
+
+ // 使用的索引, 第一层为or关系,第二层为 and关系
+ // indexes ((index,index) or (index, index))
+ repeated Index indexes = 41;
+
+ bool load_property_from_index = 42;
+ bool check_ttl = 43;
+ // 按照element的 label id group by
+ bool group_by_schema_label = 44;
+}
+
+message QueryResponse {
+ string query_id = 1;
+ bool is_ok = 2;
+ bool is_finished = 3;
+ string message = 4;
+ repeated Kv data = 5;
+}
diff --git a/hugegraph-store/hg-store-grpc/src/main/proto/store_common.proto b/hugegraph-store/hg-store-grpc/src/main/proto/store_common.proto
index bc45670198..06d161c70f 100644
--- a/hugegraph-store/hg-store-grpc/src/main/proto/store_common.proto
+++ b/hugegraph-store/hg-store-grpc/src/main/proto/store_common.proto
@@ -111,3 +111,10 @@ enum GraphMethod{
GRAPH_METHOD_UNKNOWN = 0;
GRAPH_METHOD_DELETE = 3;
}
+
+message TTLCleanRequest {
+ string graph = 1;
+ int32 partitionId = 2;
+ string table = 3;
+ repeated bytes ids = 4;
+}
diff --git a/hugegraph-store/hg-store-grpc/src/main/proto/store_session.proto b/hugegraph-store/hg-store-grpc/src/main/proto/store_session.proto
index e9cb940881..483a7f1ef5 100644
--- a/hugegraph-store/hg-store-grpc/src/main/proto/store_session.proto
+++ b/hugegraph-store/hg-store-grpc/src/main/proto/store_session.proto
@@ -22,7 +22,6 @@ option java_package = "org.apache.hugegraph.store.grpc.session";
option java_outer_classname = "HgStoreSessionProto";
import "store_common.proto";
-import "store_stream_meta.proto";
service HgStoreSession {
rpc Get2(GetReq) returns (FeedbackRes) {}
@@ -31,7 +30,6 @@ service HgStoreSession {
rpc Table(TableReq) returns (FeedbackRes){};
rpc Graph(GraphReq) returns (FeedbackRes){};
rpc Clean(CleanReq) returns (FeedbackRes) {}
- rpc Count(ScanStreamReq) returns (Agg) {}
}
message TableReq{
@@ -130,7 +128,3 @@ enum PartitionFaultType{
PARTITION_FAULT_TYPE_NOT_LOCAL = 3;
}
-message Agg {
- Header header = 1;
- int64 count = 2;
-}
diff --git a/hugegraph-store/hg-store-grpc/src/main/proto/store_state.proto b/hugegraph-store/hg-store-grpc/src/main/proto/store_state.proto
index d2b0aa3613..50671753f5 100644
--- a/hugegraph-store/hg-store-grpc/src/main/proto/store_state.proto
+++ b/hugegraph-store/hg-store-grpc/src/main/proto/store_state.proto
@@ -32,7 +32,7 @@ service HgStoreState {
// Unsubscribe Store Node state publishing.
rpc UnsubState(SubStateReq) returns (google.protobuf.Empty){}
rpc getScanState(SubStateReq) returns (ScanState){}
-
+ rpc getPeers(PartitionRequest) returns (PeersResponse){}
}
message SubStateReq{
@@ -71,3 +71,10 @@ enum NodeStateType {
message QuotaRequest {
map limits = 1;
}
+
+message PartitionRequest{
+ int32 id = 1;
+}
+message PeersResponse{
+ string peers = 1;
+}
diff --git a/hugegraph-store/pom.xml b/hugegraph-store/pom.xml
index f9cd0bcfb3..d25db72216 100644
--- a/hugegraph-store/pom.xml
+++ b/hugegraph-store/pom.xml
@@ -75,6 +75,11 @@
hg-store-core
${project.version}
+
+ org.apache
+ hugegraph-struct
+ ${project.version}
+
org.apache.hugegraph
hg-store-transfer
From 0de281738c956138e85dde7314ca58e7529e4961 Mon Sep 17 00:00:00 2001
From: JisoLya <53420504@qq.com>
Date: Sat, 19 Jul 2025 18:00:48 +0800
Subject: [PATCH 04/35] refactor rocksDB module
---
.../rocksdb/access/RocksDBFactory.java | 56 +++++---
.../rocksdb/access/RocksDBOptions.java | 32 +----
.../rocksdb/access/RocksDBScanIterator.java | 7 +-
.../rocksdb/access/RocksDBSession.java | 34 ++++-
.../rocksdb/access/SessionOperatorImpl.java | 122 +++++++++++-------
5 files changed, 149 insertions(+), 102 deletions(-)
diff --git a/hugegraph-store/hg-store-rocksdb/src/main/java/org/apache/hugegraph/rocksdb/access/RocksDBFactory.java b/hugegraph-store/hg-store-rocksdb/src/main/java/org/apache/hugegraph/rocksdb/access/RocksDBFactory.java
index ce5dc665a6..2e8e0bae68 100644
--- a/hugegraph-store/hg-store-rocksdb/src/main/java/org/apache/hugegraph/rocksdb/access/RocksDBFactory.java
+++ b/hugegraph-store/hg-store-rocksdb/src/main/java/org/apache/hugegraph/rocksdb/access/RocksDBFactory.java
@@ -29,6 +29,7 @@
import java.util.concurrent.Executors;
import java.util.concurrent.ScheduledExecutorService;
import java.util.concurrent.TimeUnit;
+import java.util.concurrent.atomic.AtomicBoolean;
import java.util.concurrent.locks.ReentrantReadWriteLock;
import java.util.stream.Collectors;
@@ -58,11 +59,28 @@ public final class RocksDBFactory {
private final ReentrantReadWriteLock operateLock;
ScheduledExecutorService scheduledExecutor;
private HugeConfig hugeConfig;
+ private AtomicBoolean closing = new AtomicBoolean(false);
private RocksDBFactory() {
this.operateLock = new ReentrantReadWriteLock();
scheduledExecutor = Executors.newScheduledThreadPool(2);
scheduledExecutor.scheduleWithFixedDelay(() -> {
+ try {
+ dbSessionMap.forEach((k, session) -> {
+ for (var entry : session.getIteratorMap().entrySet()) {
+ String key = entry.getKey();
+ var ts = Long.parseLong(key.split("-")[0]);
+ // output once per 10min
+ var passed = (System.currentTimeMillis() - ts) / 1000 - 600;
+ if (passed > 0 && passed % 10 == 0) {
+ log.info("iterator not close, stack: {}", entry.getValue());
+ }
+ }
+ });
+ } catch (Exception e) {
+ log.error("got error, ", e);
+ }
+
try {
Iterator itr = destroyGraphDBs.listIterator();
while (itr.hasNext()) {
@@ -146,12 +164,30 @@ public RocksDBSession queryGraphDB(String dbName) {
}
return null;
}
+ //TODO is this necessary?
+ class RocksdbEventListener extends AbstractEventListener {
+ @Override
+ public void onCompactionCompleted(RocksDB db, CompactionJobInfo compactionJobInfo) {
+ super.onCompactionCompleted(db, compactionJobInfo);
+ rocksdbChangedListeners.forEach(listener -> {
+ listener.onCompacted(db.getName());
+ });
+ }
+
+ @Override
+ public void onCompactionBegin(final RocksDB db, final CompactionJobInfo compactionJobInfo) {
+ log.info("RocksdbEventListener onCompactionBegin");
+ }
+ }
public RocksDBSession createGraphDB(String dbPath, String dbName) {
return createGraphDB(dbPath, dbName, 0);
}
public RocksDBSession createGraphDB(String dbPath, String dbName, long version) {
+ if (closing.get()) {
+ throw new RuntimeException("db closed");
+ }
operateLock.writeLock().lock();
try {
RocksDBSession dbSession = dbSessionMap.get(dbName);
@@ -231,7 +267,8 @@ public void destroyGraphDB(String dbName) {
}
public void releaseAllGraphDB() {
- log.info("close all rocksdb.");
+ closing.set(true);
+ log.info("closing all rocksdb....");
operateLock.writeLock().lock();
try {
dbSessionMap.forEach((k, v) -> {
@@ -292,24 +329,7 @@ default void onDBSessionReleased(RocksDBSession dbSession) {
}
}
- class RocksdbEventListener extends AbstractEventListener {
-
- @Override
- public void onCompactionCompleted(RocksDB db, CompactionJobInfo compactionJobInfo) {
- super.onCompactionCompleted(db, compactionJobInfo);
- rocksdbChangedListeners.forEach(listener -> {
- listener.onCompacted(db.getName());
- });
- }
-
- @Override
- public void onCompactionBegin(final RocksDB db, final CompactionJobInfo compactionJobInfo) {
- log.info("RocksdbEventListener onCompactionBegin");
- }
- }
-
class DBSessionWatcher {
-
public RocksDBSession dbSession;
public Long timestamp;
diff --git a/hugegraph-store/hg-store-rocksdb/src/main/java/org/apache/hugegraph/rocksdb/access/RocksDBOptions.java b/hugegraph-store/hg-store-rocksdb/src/main/java/org/apache/hugegraph/rocksdb/access/RocksDBOptions.java
index 6f5c35f627..7fcd07f3b8 100644
--- a/hugegraph-store/hg-store-rocksdb/src/main/java/org/apache/hugegraph/rocksdb/access/RocksDBOptions.java
+++ b/hugegraph-store/hg-store-rocksdb/src/main/java/org/apache/hugegraph/rocksdb/access/RocksDBOptions.java
@@ -23,8 +23,6 @@
import static org.apache.hugegraph.config.OptionChecker.rangeDouble;
import static org.apache.hugegraph.config.OptionChecker.rangeInt;
-import java.util.Map;
-
import org.apache.hugegraph.config.ConfigConvOption;
import org.apache.hugegraph.config.ConfigListConvOption;
import org.apache.hugegraph.config.ConfigOption;
@@ -32,7 +30,6 @@
import org.apache.hugegraph.util.Bytes;
import org.rocksdb.CompactionStyle;
import org.rocksdb.CompressionType;
-import org.rocksdb.InfoLogLevel;
public class RocksDBOptions extends OptionHolder {
@@ -91,13 +88,6 @@ public class RocksDBOptions extends OptionHolder {
allowValues("DEBUG", "INFO", "WARN", "ERROR", "FATAL", "HEADER"),
"INFO"
);
- public static final Map LOG_LEVEL_MAPPING =
- Map.of("DEBUG", InfoLogLevel.DEBUG_LEVEL,
- "INFO", InfoLogLevel.INFO_LEVEL,
- "WARN", InfoLogLevel.WARN_LEVEL,
- "ERROR", InfoLogLevel.ERROR_LEVEL,
- "FATAL", InfoLogLevel.FATAL_LEVEL,
- "HEADER", InfoLogLevel.HEADER_LEVEL);
public static final ConfigOption NUM_LEVELS =
new ConfigOption<>(
@@ -106,27 +96,7 @@ public class RocksDBOptions extends OptionHolder {
rangeInt(1, Integer.MAX_VALUE),
7
);
- public static final ConfigOption BLOCK_CACHE_CAPACITY =
- new ConfigOption<>(
- "rocksdb.block_cache_capacity",
- "The amount of block cache in bytes that will be used by all RocksDBs",
- rangeInt(0L, Long.MAX_VALUE),
- 16L * Bytes.GB
- );
- public static final ConfigOption SNAPSHOT_PATH =
- new ConfigOption<>(
- "rocksdb.snapshot_path",
- "The path for storing snapshot of RocksDB.",
- disallowEmpty(),
- "rocksdb-snapshot"
- );
- public static final ConfigOption DISABLE_AUTO_COMPACTION =
- new ConfigOption<>(
- "rocksdb.disable_auto_compaction",
- "Set disable auto compaction.",
- disallowEmpty(),
- false
- );
+
public static final ConfigConvOption COMPACTION_STYLE =
new ConfigConvOption<>(
"rocksdb.compaction_style",
diff --git a/hugegraph-store/hg-store-rocksdb/src/main/java/org/apache/hugegraph/rocksdb/access/RocksDBScanIterator.java b/hugegraph-store/hg-store-rocksdb/src/main/java/org/apache/hugegraph/rocksdb/access/RocksDBScanIterator.java
index ff255d9ea9..dca8179308 100644
--- a/hugegraph-store/hg-store-rocksdb/src/main/java/org/apache/hugegraph/rocksdb/access/RocksDBScanIterator.java
+++ b/hugegraph-store/hg-store-rocksdb/src/main/java/org/apache/hugegraph/rocksdb/access/RocksDBScanIterator.java
@@ -20,6 +20,7 @@
import java.util.Arrays;
import java.util.NoSuchElementException;
import java.util.concurrent.atomic.AtomicBoolean;
+import java.util.function.Consumer;
import org.apache.hugegraph.rocksdb.access.RocksDBSession.BackendColumn;
import org.apache.hugegraph.util.Bytes;
@@ -39,11 +40,13 @@ public class RocksDBScanIterator implements ScanIterator {
private final AtomicBoolean closed = new AtomicBoolean(false);
private final RocksDBSession.RefCounter iterReference;
+ private final Consumer closeOp;
private byte[] key;
private boolean matched;
public RocksDBScanIterator(RocksIterator rawIt, byte[] keyBegin, byte[] keyEnd,
- int scanType, RocksDBSession.RefCounter iterReference) {
+ int scanType, RocksDBSession.RefCounter iterReference,
+ Consumer closeOp) {
this.rawIt = rawIt;
this.keyBegin = keyBegin;
this.keyEnd = keyEnd;
@@ -52,6 +55,7 @@ public RocksDBScanIterator(RocksIterator rawIt, byte[] keyBegin, byte[] keyEnd,
this.key = keyBegin;
this.matched = false;
this.iterReference = iterReference;
+ this.closeOp = closeOp;
this.seek();
}
@@ -226,6 +230,7 @@ public void close() {
if (this.rawIt.isOwningHandle()) {
this.rawIt.close();
}
+ this.closeOp.accept(true);
this.iterReference.release();
}
}
diff --git a/hugegraph-store/hg-store-rocksdb/src/main/java/org/apache/hugegraph/rocksdb/access/RocksDBSession.java b/hugegraph-store/hg-store-rocksdb/src/main/java/org/apache/hugegraph/rocksdb/access/RocksDBSession.java
index c3356de248..9c3005da66 100644
--- a/hugegraph-store/hg-store-rocksdb/src/main/java/org/apache/hugegraph/rocksdb/access/RocksDBSession.java
+++ b/hugegraph-store/hg-store-rocksdb/src/main/java/org/apache/hugegraph/rocksdb/access/RocksDBSession.java
@@ -24,6 +24,7 @@
import java.nio.file.Files;
import java.nio.file.Paths;
import java.util.ArrayList;
+import java.util.Arrays;
import java.util.List;
import java.util.Map;
import java.util.Set;
@@ -50,6 +51,7 @@
import org.rocksdb.DBOptionsInterface;
import org.rocksdb.Env;
import org.rocksdb.FlushOptions;
+import org.rocksdb.InfoLogLevel;
import org.rocksdb.IngestExternalFileOptions;
import org.rocksdb.MutableColumnFamilyOptionsInterface;
import org.rocksdb.MutableDBOptionsInterface;
@@ -63,6 +65,7 @@
import org.rocksdb.WriteBufferManager;
import org.rocksdb.WriteOptions;
+import lombok.Getter;
import lombok.extern.slf4j.Slf4j;
@Slf4j
@@ -83,8 +86,10 @@ public class RocksDBSession implements AutoCloseable, Cloneable {
private DBOptions dbOptions;
private volatile boolean closed = false;
- public RocksDBSession(HugeConfig hugeConfig, String dbDataPath, String graphName,
- long version) {
+ @Getter
+ private Map iteratorMap;
+
+ public RocksDBSession(HugeConfig hugeConfig, String dbDataPath, String graphName, long version) {
this.hugeConfig = hugeConfig;
this.graphName = graphName;
this.cfHandleLock = new ReentrantReadWriteLock();
@@ -93,6 +98,7 @@ public RocksDBSession(HugeConfig hugeConfig, String dbDataPath, String graphName
this.shutdown = new AtomicBoolean(false);
this.writeOptions = new WriteOptions();
this.rocksDbStats = new Statistics();
+ this.iteratorMap = new ConcurrentHashMap<>();
openRocksDB(dbDataPath, version);
}
@@ -107,6 +113,7 @@ private RocksDBSession(RocksDBSession origin) {
this.writeOptions = origin.writeOptions;
this.rocksDbStats = origin.rocksDbStats;
this.shutdown = origin.shutdown;
+ this.iteratorMap = origin.iteratorMap;
this.refCount = origin.refCount;
this.refCount.incrementAndGet();
}
@@ -143,8 +150,8 @@ public static void initOptions(HugeConfig conf,
db.setAllowConcurrentMemtableWrite(true);
db.setEnableWriteThreadAdaptiveYield(true);
}
- db.setInfoLogLevel(
- RocksDBOptions.LOG_LEVEL_MAPPING.get(conf.get(RocksDBOptions.LOG_LEVEL)));
+ db.setInfoLogLevel(InfoLogLevel.valueOf(
+ conf.get(RocksDBOptions.LOG_LEVEL) + "_LEVEL"));
db.setMaxSubcompactions(conf.get(RocksDBOptions.MAX_SUB_COMPACTIONS));
db.setAllowMmapWrites(conf.get(RocksDBOptions.ALLOW_MMAP_WRITES));
db.setAllowMmapReads(conf.get(RocksDBOptions.ALLOW_MMAP_READS));
@@ -430,9 +437,6 @@ private void openRocksDB(String dbDataPath, long version) {
List columnFamilyBytes = RocksDB.listColumnFamilies(new Options(), dbPath);
ColumnFamilyOptions cfOptions = new ColumnFamilyOptions();
- if (hugeConfig.get(RocksDBOptions.DISABLE_AUTO_COMPACTION)) {
- cfOptions.setDisableAutoCompactions(true);
- }
RocksDBSession.initOptions(this.hugeConfig, null, null, cfOptions, cfOptions);
if (columnFamilyBytes.size() > 0) {
@@ -1055,4 +1059,20 @@ public void release() {
}
}
}
+
+ public static String stackToString() {
+ return Arrays.stream(Thread.currentThread().getStackTrace())
+ .map(StackTraceElement::toString)
+ .collect(Collectors.joining("\n\t"));
+ }
+
+ public void addIterator(String key, ScanIterator iterator) {
+ log.debug("add iterator, key {}", key);
+ this.iteratorMap.put(key, stackToString());
+ }
+
+ public void removeIterator(String key) {
+ log.debug("remove iterator key, {}", key);
+ this.iteratorMap.remove(key);
+ }
}
diff --git a/hugegraph-store/hg-store-rocksdb/src/main/java/org/apache/hugegraph/rocksdb/access/SessionOperatorImpl.java b/hugegraph-store/hg-store-rocksdb/src/main/java/org/apache/hugegraph/rocksdb/access/SessionOperatorImpl.java
index 0dd58dc7b7..d8e668391e 100644
--- a/hugegraph-store/hg-store-rocksdb/src/main/java/org/apache/hugegraph/rocksdb/access/SessionOperatorImpl.java
+++ b/hugegraph-store/hg-store-rocksdb/src/main/java/org/apache/hugegraph/rocksdb/access/SessionOperatorImpl.java
@@ -20,6 +20,7 @@
import java.nio.charset.StandardCharsets;
import java.util.Arrays;
import java.util.Iterator;
+import java.util.Random;
import org.apache.hugegraph.rocksdb.access.RocksDBSession.CFHandleLock;
import org.apache.hugegraph.rocksdb.access.util.Asserts;
@@ -264,9 +265,18 @@ public ScanIterator scan(String tableName) {
log.info("no find table : {}", tableName);
return null;
}
- return new RocksDBScanIterator(this.rocksdb().newIterator(handle.get()), null, null,
- ScanIterator.Trait.SCAN_ANY,
- this.session.getRefCounter());
+ String key = getIteratorKey();
+
+ var iterator =
+ new RocksDBScanIterator(
+ this.rocksdb().newIterator(handle.get()),
+ null,
+ null,
+ ScanIterator.Trait.SCAN_ANY,
+ this.session.getRefCounter(),
+ b -> session.removeIterator(key));
+ this.session.addIterator(key, iterator);
+ return iterator;
}
}
@@ -283,9 +293,17 @@ public ScanIterator scan(String tableName, byte[] prefix, int scanType) {
new String(prefix));
return null;
}
- return new RocksDBScanIterator(this.rocksdb().newIterator(handle.get()), prefix, null,
- ScanIterator.Trait.SCAN_PREFIX_BEGIN | scanType,
- this.session.getRefCounter());
+ String key = getIteratorKey();
+ var iterator =
+ new RocksDBScanIterator(
+ this.rocksdb().newIterator(handle.get()),
+ prefix,
+ null,
+ ScanIterator.Trait.SCAN_PREFIX_BEGIN | scanType,
+ this.session.getRefCounter(),
+ b -> session.removeIterator(key));
+ this.session.addIterator(key, iterator);
+ return iterator;
}
}
@@ -296,9 +314,17 @@ public ScanIterator scan(String tableName, byte[] keyFrom, byte[] keyTo, int sca
log.info("no find table: {} for scantype: {}", tableName, scanType);
return null;
}
- return new RocksDBScanIterator(this.rocksdb().newIterator(handle.get()), keyFrom, keyTo,
- scanType,
- this.session.getRefCounter());
+ String key = getIteratorKey();
+ var iterator =
+ new RocksDBScanIterator(
+ this.rocksdb().newIterator(handle.get()),
+ keyFrom,
+ keyTo,
+ scanType,
+ this.session.getRefCounter(),
+ b -> session.removeIterator(key));
+ this.session.addIterator(key, iterator);
+ return iterator;
}
}
@@ -344,53 +370,55 @@ public T next() {
iterator.seekToFirst();
}
}
- if (iterator == null) {
- return null;
- }
- RocksIterator finalIterator = iterator;
- return (T) new ScanIterator() {
- private final ReadOptions holdReadOptions = readOptions;
-
- @Override
- public boolean hasNext() {
- return finalIterator.isValid();
- }
+ if (iterator == null) return null;
+ String key = getIteratorKey();
+ var newIterator = getScanRawIterator(iterator, readOptions, startSeqNum, key);
+ session.addIterator(key, newIterator);
+ return (T) newIterator;
+ }
- @Override
- public boolean isValid() {
- return finalIterator.isValid();
- }
+ @Override
+ public void close() {
+ rocksdb().releaseSnapshot(snapshot);
+ }
- @Override
- public T next() {
- byte[] key = finalIterator.key();
- if (startSeqNum > 0) {
- key = Arrays.copyOfRange(key, 0, key.length - kNumInternalBytes);
- }
- RocksDBSession.BackendColumn col =
- RocksDBSession.BackendColumn.of(key, finalIterator.value());
- finalIterator.next();
- return (T) col;
- }
+ public byte[] position() {
+ return cfName.getBytes(StandardCharsets.UTF_8);
+ }
+ };
+ }
- @Override
- public void close() {
- finalIterator.close();
- holdReadOptions.close();
- }
+ private ScanIterator getScanRawIterator(RocksIterator iterator, ReadOptions readOptions,
+ long startSeqNum, String key) {
+ int kNumInternalBytes = 8; // internal key new 8 bytes suffix
- };
+ return new ScanIterator() {
+ @Override
+ public boolean hasNext() {
+ return iterator.isValid();
}
@Override
- public void close() {
- rocksdb().releaseSnapshot(snapshot);
+ public boolean isValid() {
+ return iterator.isValid();
}
@Override
- public byte[] position() {
- return cfName.getBytes(StandardCharsets.UTF_8);
+ public T next() {
+ byte[] key = iterator.key();
+ if (startSeqNum > 0) {
+ key = Arrays.copyOfRange(key, 0, key.length - kNumInternalBytes);
+ }
+ var col = RocksDBSession.BackendColumn.of(key, iterator.value());
+ iterator.next();
+ return (T) col;
+ }
+ @Override
+ public void close() {
+ iterator.close();
+ readOptions.close();
+ session.removeIterator(key);
}
};
}
@@ -418,4 +446,8 @@ private WriteBatch getBatch() {
}
return this.batch;
}
+
+ private String getIteratorKey() {
+ return System.currentTimeMillis() + "-" + (new Random()).nextLong();
+ }
}
From 928b322591ded9a8588225400602a0c05b0ef8f1 Mon Sep 17 00:00:00 2001
From: JisoLya <523420504@qq.com>
Date: Sun, 20 Jul 2025 16:15:24 +0800
Subject: [PATCH 05/35] add struct module
---
hugegraph-struct/pom.xml | 197 +++
.../apache/hugegraph/HugeGraphSupplier.java | 79 ++
.../org/apache/hugegraph/SchemaDriver.java | 860 ++++++++++++
.../org/apache/hugegraph/SchemaGraph.java | 182 +++
.../apache/hugegraph/analyzer/Analyzer.java | 27 +
.../hugegraph/analyzer/AnalyzerFactory.java | 102 ++
.../hugegraph/analyzer/AnsjAnalyzer.java | 87 ++
.../hugegraph/analyzer/HanLPAnalyzer.java | 108 ++
.../apache/hugegraph/analyzer/IKAnalyzer.java | 73 +
.../hugegraph/analyzer/JcsegAnalyzer.java | 77 +
.../hugegraph/analyzer/JiebaAnalyzer.java | 63 +
.../hugegraph/analyzer/MMSeg4JAnalyzer.java | 92 ++
.../hugegraph/analyzer/SmartCNAnalyzer.java | 66 +
.../hugegraph/analyzer/WordAnalyzer.java | 74 +
.../apache/hugegraph/auth/AuthConstant.java | 30 +
.../apache/hugegraph/auth/TokenGenerator.java | 70 +
.../hugegraph/backend/BackendColumn.java | 69 +
.../apache/hugegraph/backend/BinaryId.java | 103 ++
.../org/apache/hugegraph/backend/Shard.java | 71 +
.../hugegraph/exception/BackendException.java | 53 +
.../exception/ErrorCodeProvider.java | 27 +
.../hugegraph/exception/HugeException.java | 70 +
.../exception/LimitExceedException.java | 33 +
.../exception/NotAllowException.java | 33 +
.../exception/NotFoundException.java | 37 +
.../exception/NotSupportException.java | 34 +
.../java/org/apache/hugegraph/id/EdgeId.java | 350 +++++
.../main/java/org/apache/hugegraph/id/Id.java | 90 ++
.../org/apache/hugegraph/id/IdGenerator.java | 465 +++++++
.../java/org/apache/hugegraph/id/IdUtil.java | 162 +++
.../hugegraph/id/SplicingIdGenerator.java | 150 ++
.../apache/hugegraph/options/AuthOptions.java | 153 ++
.../apache/hugegraph/options/CoreOptions.java | 715 ++++++++++
.../org/apache/hugegraph/query/Aggregate.java | 61 +
.../hugegraph/query/AggregateFuncDefine.java | 33 +
.../org/apache/hugegraph/query/Condition.java | 1045 ++++++++++++++
.../hugegraph/query/ConditionQuery.java | 1239 +++++++++++++++++
.../org/apache/hugegraph/query/IdQuery.java | 127 ++
.../apache/hugegraph/query/MatchedIndex.java | 81 ++
.../org/apache/hugegraph/query/Query.java | 720 ++++++++++
.../serializer/AbstractSerializerAdapter.java | 62 +
.../query/serializer/QueryAdapter.java | 148 ++
.../query/serializer/QueryIdAdapter.java | 46 +
.../apache/hugegraph/schema/EdgeLabel.java | 449 ++++++
.../apache/hugegraph/schema/IndexLabel.java | 498 +++++++
.../apache/hugegraph/schema/PropertyKey.java | 646 +++++++++
.../hugegraph/schema/SchemaElement.java | 259 ++++
.../apache/hugegraph/schema/SchemaLabel.java | 204 +++
.../org/apache/hugegraph/schema/Userdata.java | 64 +
.../apache/hugegraph/schema/VertexLabel.java | 414 ++++++
.../schema/builder/SchemaBuilder.java | 42 +
.../serializer/BinaryElementSerializer.java | 536 +++++++
.../hugegraph/serializer/BytesBuffer.java | 1012 ++++++++++++++
.../serializer/DirectBinarySerializer.java | 128 ++
.../apache/hugegraph/structure/BaseEdge.java | 288 ++++
.../hugegraph/structure/BaseElement.java | 355 +++++
.../hugegraph/structure/BaseProperty.java | 68 +
.../hugegraph/structure/BaseRawElement.java | 57 +
.../hugegraph/structure/BaseVertex.java | 168 +++
.../org/apache/hugegraph/structure/Index.java | 334 +++++
.../apache/hugegraph/structure/KvElement.java | 101 ++
.../structure/builder/IndexBuilder.java | 327 +++++
.../org/apache/hugegraph/type/GraphType.java | 23 +
.../org/apache/hugegraph/type/HugeType.java | 213 +++
.../org/apache/hugegraph/type/Idfiable.java | 27 +
.../apache/hugegraph/type/Indexfiable.java | 29 +
.../org/apache/hugegraph/type/Namifiable.java | 30 +
.../org/apache/hugegraph/type/Propfiable.java | 29 +
.../org/apache/hugegraph/type/Typifiable.java | 26 +
.../apache/hugegraph/type/define/Action.java | 76 +
.../hugegraph/type/define/AggregateType.java | 93 ++
.../hugegraph/type/define/Cardinality.java | 69 +
.../hugegraph/type/define/CollectionType.java | 68 +
.../hugegraph/type/define/DataType.java | 224 +++
.../hugegraph/type/define/Directions.java | 89 ++
.../hugegraph/type/define/EdgeLabelType.java | 72 +
.../hugegraph/type/define/Frequency.java | 51 +
.../hugegraph/type/define/HugeKeys.java | 108 ++
.../hugegraph/type/define/IdStrategy.java | 71 +
.../hugegraph/type/define/IndexType.java | 122 ++
.../hugegraph/type/define/SchemaStatus.java | 67 +
.../hugegraph/type/define/SerialEnum.java | 83 ++
.../hugegraph/type/define/WriteType.java | 67 +
.../java/org/apache/hugegraph/util/Blob.java | 73 +
.../org/apache/hugegraph/util/GraphUtils.java | 34 +
.../org/apache/hugegraph/util/LZ4Util.java | 95 ++
.../apache/hugegraph/util/StringEncoding.java | 203 +++
.../util/collection/CollectionFactory.java | 264 ++++
.../hugegraph/util/collection/IdSet.java | 120 ++
89 files changed, 16740 insertions(+)
create mode 100644 hugegraph-struct/pom.xml
create mode 100644 hugegraph-struct/src/main/java/org/apache/hugegraph/HugeGraphSupplier.java
create mode 100644 hugegraph-struct/src/main/java/org/apache/hugegraph/SchemaDriver.java
create mode 100644 hugegraph-struct/src/main/java/org/apache/hugegraph/SchemaGraph.java
create mode 100644 hugegraph-struct/src/main/java/org/apache/hugegraph/analyzer/Analyzer.java
create mode 100644 hugegraph-struct/src/main/java/org/apache/hugegraph/analyzer/AnalyzerFactory.java
create mode 100644 hugegraph-struct/src/main/java/org/apache/hugegraph/analyzer/AnsjAnalyzer.java
create mode 100644 hugegraph-struct/src/main/java/org/apache/hugegraph/analyzer/HanLPAnalyzer.java
create mode 100644 hugegraph-struct/src/main/java/org/apache/hugegraph/analyzer/IKAnalyzer.java
create mode 100644 hugegraph-struct/src/main/java/org/apache/hugegraph/analyzer/JcsegAnalyzer.java
create mode 100644 hugegraph-struct/src/main/java/org/apache/hugegraph/analyzer/JiebaAnalyzer.java
create mode 100644 hugegraph-struct/src/main/java/org/apache/hugegraph/analyzer/MMSeg4JAnalyzer.java
create mode 100644 hugegraph-struct/src/main/java/org/apache/hugegraph/analyzer/SmartCNAnalyzer.java
create mode 100644 hugegraph-struct/src/main/java/org/apache/hugegraph/analyzer/WordAnalyzer.java
create mode 100644 hugegraph-struct/src/main/java/org/apache/hugegraph/auth/AuthConstant.java
create mode 100644 hugegraph-struct/src/main/java/org/apache/hugegraph/auth/TokenGenerator.java
create mode 100644 hugegraph-struct/src/main/java/org/apache/hugegraph/backend/BackendColumn.java
create mode 100644 hugegraph-struct/src/main/java/org/apache/hugegraph/backend/BinaryId.java
create mode 100644 hugegraph-struct/src/main/java/org/apache/hugegraph/backend/Shard.java
create mode 100644 hugegraph-struct/src/main/java/org/apache/hugegraph/exception/BackendException.java
create mode 100644 hugegraph-struct/src/main/java/org/apache/hugegraph/exception/ErrorCodeProvider.java
create mode 100644 hugegraph-struct/src/main/java/org/apache/hugegraph/exception/HugeException.java
create mode 100644 hugegraph-struct/src/main/java/org/apache/hugegraph/exception/LimitExceedException.java
create mode 100644 hugegraph-struct/src/main/java/org/apache/hugegraph/exception/NotAllowException.java
create mode 100644 hugegraph-struct/src/main/java/org/apache/hugegraph/exception/NotFoundException.java
create mode 100644 hugegraph-struct/src/main/java/org/apache/hugegraph/exception/NotSupportException.java
create mode 100644 hugegraph-struct/src/main/java/org/apache/hugegraph/id/EdgeId.java
create mode 100644 hugegraph-struct/src/main/java/org/apache/hugegraph/id/Id.java
create mode 100644 hugegraph-struct/src/main/java/org/apache/hugegraph/id/IdGenerator.java
create mode 100644 hugegraph-struct/src/main/java/org/apache/hugegraph/id/IdUtil.java
create mode 100644 hugegraph-struct/src/main/java/org/apache/hugegraph/id/SplicingIdGenerator.java
create mode 100644 hugegraph-struct/src/main/java/org/apache/hugegraph/options/AuthOptions.java
create mode 100644 hugegraph-struct/src/main/java/org/apache/hugegraph/options/CoreOptions.java
create mode 100644 hugegraph-struct/src/main/java/org/apache/hugegraph/query/Aggregate.java
create mode 100644 hugegraph-struct/src/main/java/org/apache/hugegraph/query/AggregateFuncDefine.java
create mode 100644 hugegraph-struct/src/main/java/org/apache/hugegraph/query/Condition.java
create mode 100644 hugegraph-struct/src/main/java/org/apache/hugegraph/query/ConditionQuery.java
create mode 100644 hugegraph-struct/src/main/java/org/apache/hugegraph/query/IdQuery.java
create mode 100644 hugegraph-struct/src/main/java/org/apache/hugegraph/query/MatchedIndex.java
create mode 100644 hugegraph-struct/src/main/java/org/apache/hugegraph/query/Query.java
create mode 100644 hugegraph-struct/src/main/java/org/apache/hugegraph/query/serializer/AbstractSerializerAdapter.java
create mode 100644 hugegraph-struct/src/main/java/org/apache/hugegraph/query/serializer/QueryAdapter.java
create mode 100644 hugegraph-struct/src/main/java/org/apache/hugegraph/query/serializer/QueryIdAdapter.java
create mode 100644 hugegraph-struct/src/main/java/org/apache/hugegraph/schema/EdgeLabel.java
create mode 100644 hugegraph-struct/src/main/java/org/apache/hugegraph/schema/IndexLabel.java
create mode 100644 hugegraph-struct/src/main/java/org/apache/hugegraph/schema/PropertyKey.java
create mode 100644 hugegraph-struct/src/main/java/org/apache/hugegraph/schema/SchemaElement.java
create mode 100644 hugegraph-struct/src/main/java/org/apache/hugegraph/schema/SchemaLabel.java
create mode 100644 hugegraph-struct/src/main/java/org/apache/hugegraph/schema/Userdata.java
create mode 100644 hugegraph-struct/src/main/java/org/apache/hugegraph/schema/VertexLabel.java
create mode 100644 hugegraph-struct/src/main/java/org/apache/hugegraph/schema/builder/SchemaBuilder.java
create mode 100644 hugegraph-struct/src/main/java/org/apache/hugegraph/serializer/BinaryElementSerializer.java
create mode 100644 hugegraph-struct/src/main/java/org/apache/hugegraph/serializer/BytesBuffer.java
create mode 100644 hugegraph-struct/src/main/java/org/apache/hugegraph/serializer/DirectBinarySerializer.java
create mode 100644 hugegraph-struct/src/main/java/org/apache/hugegraph/structure/BaseEdge.java
create mode 100644 hugegraph-struct/src/main/java/org/apache/hugegraph/structure/BaseElement.java
create mode 100644 hugegraph-struct/src/main/java/org/apache/hugegraph/structure/BaseProperty.java
create mode 100644 hugegraph-struct/src/main/java/org/apache/hugegraph/structure/BaseRawElement.java
create mode 100644 hugegraph-struct/src/main/java/org/apache/hugegraph/structure/BaseVertex.java
create mode 100644 hugegraph-struct/src/main/java/org/apache/hugegraph/structure/Index.java
create mode 100644 hugegraph-struct/src/main/java/org/apache/hugegraph/structure/KvElement.java
create mode 100644 hugegraph-struct/src/main/java/org/apache/hugegraph/structure/builder/IndexBuilder.java
create mode 100644 hugegraph-struct/src/main/java/org/apache/hugegraph/type/GraphType.java
create mode 100644 hugegraph-struct/src/main/java/org/apache/hugegraph/type/HugeType.java
create mode 100644 hugegraph-struct/src/main/java/org/apache/hugegraph/type/Idfiable.java
create mode 100644 hugegraph-struct/src/main/java/org/apache/hugegraph/type/Indexfiable.java
create mode 100644 hugegraph-struct/src/main/java/org/apache/hugegraph/type/Namifiable.java
create mode 100644 hugegraph-struct/src/main/java/org/apache/hugegraph/type/Propfiable.java
create mode 100644 hugegraph-struct/src/main/java/org/apache/hugegraph/type/Typifiable.java
create mode 100644 hugegraph-struct/src/main/java/org/apache/hugegraph/type/define/Action.java
create mode 100644 hugegraph-struct/src/main/java/org/apache/hugegraph/type/define/AggregateType.java
create mode 100644 hugegraph-struct/src/main/java/org/apache/hugegraph/type/define/Cardinality.java
create mode 100644 hugegraph-struct/src/main/java/org/apache/hugegraph/type/define/CollectionType.java
create mode 100644 hugegraph-struct/src/main/java/org/apache/hugegraph/type/define/DataType.java
create mode 100644 hugegraph-struct/src/main/java/org/apache/hugegraph/type/define/Directions.java
create mode 100644 hugegraph-struct/src/main/java/org/apache/hugegraph/type/define/EdgeLabelType.java
create mode 100644 hugegraph-struct/src/main/java/org/apache/hugegraph/type/define/Frequency.java
create mode 100644 hugegraph-struct/src/main/java/org/apache/hugegraph/type/define/HugeKeys.java
create mode 100644 hugegraph-struct/src/main/java/org/apache/hugegraph/type/define/IdStrategy.java
create mode 100644 hugegraph-struct/src/main/java/org/apache/hugegraph/type/define/IndexType.java
create mode 100644 hugegraph-struct/src/main/java/org/apache/hugegraph/type/define/SchemaStatus.java
create mode 100644 hugegraph-struct/src/main/java/org/apache/hugegraph/type/define/SerialEnum.java
create mode 100644 hugegraph-struct/src/main/java/org/apache/hugegraph/type/define/WriteType.java
create mode 100644 hugegraph-struct/src/main/java/org/apache/hugegraph/util/Blob.java
create mode 100644 hugegraph-struct/src/main/java/org/apache/hugegraph/util/GraphUtils.java
create mode 100644 hugegraph-struct/src/main/java/org/apache/hugegraph/util/LZ4Util.java
create mode 100644 hugegraph-struct/src/main/java/org/apache/hugegraph/util/StringEncoding.java
create mode 100644 hugegraph-struct/src/main/java/org/apache/hugegraph/util/collection/CollectionFactory.java
create mode 100644 hugegraph-struct/src/main/java/org/apache/hugegraph/util/collection/IdSet.java
diff --git a/hugegraph-struct/pom.xml b/hugegraph-struct/pom.xml
new file mode 100644
index 0000000000..dc9759bda5
--- /dev/null
+++ b/hugegraph-struct/pom.xml
@@ -0,0 +1,197 @@
+
+
+
+ 4.0.0
+
+ hugegraph-struct
+
+
+ org.apache.hugegraph
+ hugegraph
+ ${revision}
+ ../pom.xml
+
+
+
+ 17
+ 17
+ UTF-8
+ 25.1-jre
+ 3.5.1
+
+
+
+
+ org.apache.hugegraph
+ hg-pd-client
+ ${project.version}
+
+
+
+ jakarta.ws.rs
+ jakarta.ws.rs-api
+ 3.0.0
+
+
+
+ org.apache.tinkerpop
+ gremlin-test
+ ${tinkerpop.version}
+
+
+
+ com.google.code.gson
+ gson
+ 2.8.9
+
+
+
+ org.apache.hugegraph
+ hugegraph-common
+ ${project.version}
+
+
+ org.glassfish.jersey.core
+ jersey-client
+
+
+
+
+ com.google.guava
+ guava
+ ${guava.version}
+
+
+
+
+
+
+
+ org.apache.tinkerpop
+ gremlin-shaded
+ 3.5.1
+
+
+ org.mindrot
+ jbcrypt
+ 0.4
+
+
+ org.eclipse.collections
+ eclipse-collections-api
+ 10.4.0
+
+
+ org.eclipse.collections
+ eclipse-collections
+ 10.4.0
+
+
+ it.unimi.dsi
+ fastutil
+ 8.1.0
+
+
+ org.lz4
+ lz4-java
+ 1.7.1
+
+
+ org.apache.commons
+ commons-text
+ 1.10.0
+
+
+
+ org.apdplat
+ word
+ 1.3
+
+
+ ch.qos.logback
+ logback-classic
+
+
+ slf4j-api
+ org.slf4j
+
+
+
+
+ org.ansj
+ ansj_seg
+ 5.1.6
+
+
+ com.hankcs
+ hanlp
+ portable-1.5.0
+
+
+ org.apache.lucene
+ lucene-analyzers-smartcn
+ 7.4.0
+
+
+ org.apache.lucene
+ lucene-core
+ 7.4.0
+
+
+ io.jsonwebtoken
+ jjwt-api
+ 0.11.2
+
+
+ io.jsonwebtoken
+ jjwt-impl
+ 0.11.2
+ runtime
+
+
+ io.jsonwebtoken
+ jjwt-jackson
+ 0.11.2
+ runtime
+
+
+ com.huaban
+ jieba-analysis
+ 1.0.2
+
+
+ org.lionsoul
+ jcseg-core
+ 2.2.0
+
+
+ com.chenlb.mmseg4j
+ mmseg4j-core
+ 1.10.0
+
+
+ com.janeluo
+ ikanalyzer
+ 2012_u6
+
+
+
+
+
diff --git a/hugegraph-struct/src/main/java/org/apache/hugegraph/HugeGraphSupplier.java b/hugegraph-struct/src/main/java/org/apache/hugegraph/HugeGraphSupplier.java
new file mode 100644
index 0000000000..91c747676e
--- /dev/null
+++ b/hugegraph-struct/src/main/java/org/apache/hugegraph/HugeGraphSupplier.java
@@ -0,0 +1,79 @@
+/*
+ * Copyright 2017 HugeGraph Authors
+ *
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with this
+ * work for additional information regarding copyright ownership. The ASF
+ * licenses this file to You under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+ * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+ * License for the specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.apache.hugegraph;
+
+import java.util.Collection;
+import java.util.List;
+
+import org.apache.hugegraph.config.HugeConfig;
+import org.apache.hugegraph.util.DateUtil;
+
+import org.apache.hugegraph.id.Id;
+import org.apache.hugegraph.schema.EdgeLabel;
+import org.apache.hugegraph.schema.IndexLabel;
+import org.apache.hugegraph.schema.PropertyKey;
+import org.apache.hugegraph.schema.VertexLabel;
+
+/**
+ * Acturally, it would be better if this interface be called
+ * "HugeGraphSchemaSupplier".
+ */
+public interface HugeGraphSupplier {
+
+ public List mapPkId2Name(Collection ids);
+
+ public List mapIlId2Name(Collection ids);
+
+ public PropertyKey propertyKey(Id key);
+
+ public Collection propertyKeys();
+
+ public VertexLabel vertexLabelOrNone(Id id);
+
+ public boolean existsLinkLabel(Id vertexLabel);
+
+ public VertexLabel vertexLabel(Id label);
+
+ public VertexLabel vertexLabel(String label);
+
+
+ public default EdgeLabel edgeLabelOrNone(Id id) {
+ EdgeLabel el = this.edgeLabel(id);
+ if (el == null) {
+ el = EdgeLabel.undefined(this, id);
+ }
+ return el;
+ }
+ public EdgeLabel edgeLabel(Id label);
+
+ public EdgeLabel edgeLabel(String label);
+
+ public IndexLabel indexLabel(Id id);
+
+ public Collection indexLabels();
+
+ public String name();
+
+ public HugeConfig configuration();
+
+ default long now() {
+ return DateUtil.now().getTime();
+ }
+}
diff --git a/hugegraph-struct/src/main/java/org/apache/hugegraph/SchemaDriver.java b/hugegraph-struct/src/main/java/org/apache/hugegraph/SchemaDriver.java
new file mode 100644
index 0000000000..9ce29c1b8b
--- /dev/null
+++ b/hugegraph-struct/src/main/java/org/apache/hugegraph/SchemaDriver.java
@@ -0,0 +1,860 @@
+/*
+ * Copyright 2017 HugeGraph Authors
+ *
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with this
+ * work for additional information regarding copyright ownership. The ASF
+ * licenses this file to You under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+ * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+ * License for the specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.apache.hugegraph;
+
+import java.io.IOException;
+import java.util.ArrayList;
+import java.util.HashMap;
+import java.util.List;
+import java.util.Map;
+import java.util.Timer;
+import java.util.TimerTask;
+import java.util.concurrent.ConcurrentHashMap;
+import java.util.concurrent.atomic.AtomicReference;
+import java.util.function.Consumer;
+
+import org.apache.hugegraph.util.E;
+import org.apache.hugegraph.util.Log;
+import org.apache.tinkerpop.shaded.jackson.core.JsonProcessingException;
+import org.apache.tinkerpop.shaded.jackson.databind.ObjectMapper;
+import org.slf4j.Logger;
+
+import org.apache.hugegraph.exception.HugeException;
+import org.apache.hugegraph.exception.NotAllowException;
+import org.apache.hugegraph.id.Id;
+import org.apache.hugegraph.pd.client.KvClient;
+import org.apache.hugegraph.pd.client.PDConfig;
+import org.apache.hugegraph.pd.common.PDException;
+import org.apache.hugegraph.pd.grpc.kv.KResponse;
+import org.apache.hugegraph.pd.grpc.kv.ScanPrefixResponse;
+import org.apache.hugegraph.pd.grpc.kv.WatchEvent;
+import org.apache.hugegraph.pd.grpc.kv.WatchResponse;
+import org.apache.hugegraph.pd.grpc.kv.WatchType;
+import org.apache.hugegraph.schema.EdgeLabel;
+import org.apache.hugegraph.schema.IndexLabel;
+import org.apache.hugegraph.schema.PropertyKey;
+import org.apache.hugegraph.schema.SchemaElement;
+import org.apache.hugegraph.schema.VertexLabel;
+import org.apache.hugegraph.type.HugeType;
+
+public class SchemaDriver {
+ private static Logger log = Log.logger(SchemaDriver.class);
+ private static final ObjectMapper MAPPER = new ObjectMapper();
+
+ public static final String DELIMITER = "-";
+ public static final String META_PATH_DELIMITER = "/";
+ public static final String META_PATH_HUGEGRAPH = "HUGEGRAPH";
+ public static final String META_PATH_GRAPHSPACE = "GRAPHSPACE";
+ public static final String META_PATH_GRAPH = "GRAPH";
+ public static final String META_PATH_CLUSTER = "hg";
+ public static final String META_PATH_SCHEMA = "SCHEMA";
+ public static final String META_PATH_GRAPH_CONF = "GRAPH_CONF";
+ public static final String META_PATH_PROPERTY_KEY = "PROPERTY_KEY";
+ public static final String META_PATH_VERTEX_LABEL = "VERTEX_LABEL";
+ public static final String META_PATH_EDGE_LABEL = "EDGE_LABEL";
+ public static final String META_PATH_INDEX_LABEL = "INDEX_LABEL";
+ public static final String META_PATH_NAME = "NAME";
+ public static final String META_PATH_ID = "ID";
+ public static final String META_PATH_EVENT = "EVENT";
+ public static final String META_PATH_REMOVE = "REMOVE";
+ public static final String META_PATH_CLEAR = "CLEAR";
+
+ private static final AtomicReference INSTANCE =
+ new AtomicReference<>();
+ // 用于访问 pd 的 client
+ private final KvClient client;
+
+ private SchemaCaches caches;
+
+ private SchemaDriver(PDConfig pdConfig, int cacheSize,
+ long expiration) {
+ this.client = new KvClient<>(pdConfig);
+ this.caches = new SchemaCaches(cacheSize, expiration);
+ this.listenMetaChanges();
+ log.info(String.format(
+ "The SchemaDriver initialized successfully, cacheSize = %s," +
+ " expiration = %s s", cacheSize, expiration / 1000));
+ }
+
+
+ public static void init(PDConfig pdConfig) {
+ init(pdConfig, 300, 300 * 1000);
+ }
+
+ public static void init(PDConfig pdConfig, int cacheSize, long expiration) {
+ SchemaDriver instance = INSTANCE.get();
+ if (instance != null) {
+ throw new NotAllowException(
+ "The SchemaDriver [cacheSize=%s, expiration=%s, " +
+ "client=%s] has already been initialized and is not " +
+ "allowed to be initialized again", instance.caches.limit(),
+ instance.caches.expiration(), instance.client);
+ }
+ INSTANCE.compareAndSet(null, new SchemaDriver(pdConfig, cacheSize,
+ expiration));
+ }
+
+ public static void destroy() {
+ SchemaDriver instance = INSTANCE.get();
+ if (instance != null) {
+ instance.caches.cancelScheduleCacheClean();
+ instance.caches.destroyAll();
+ INSTANCE.set(null);
+ }
+ }
+
+ public SchemaCaches schemaCaches() {
+ return this.caches;
+ }
+
+ public static SchemaDriver getInstance() {
+ return INSTANCE.get();
+ }
+
+ private void listenMetaChanges() {
+ this.listen(graphSpaceRemoveKey(), this::graphSpaceRemoveHandler);
+ this.listen(graphRemoveKey(), this::graphRemoveHandler);
+ this.listen(graphClearKey(), this::graphClearHandler);
+ this.listen(schemaCacheClearKey(), this::schemaCacheClearHandler);
+ }
+
+ private void schemaCacheClearHandler(T response) {
+ List names = this.extractValuesFromResponse(response);
+ for (String gs : names) {
+ String[] arr = gs.split(DELIMITER);
+ assert arr.length == 2;
+ this.caches.clear(arr[0], arr[1]);
+ log.info(String.format(
+ "Graph '%s' schema clear event is received, deleting all " +
+ "schema caches under '%s'", gs, gs));
+ }
+ }
+
+ private void graphClearHandler(T response) {
+ List names = this.extractValuesFromResponse(response);
+ for (String gs : names) {
+ String[] arr = gs.split(DELIMITER);
+ assert arr.length == 2;
+ this.caches.clear(arr[0], arr[1]);
+ log.info(String.format(
+ "Graph '%s' clear event is received, deleting all " +
+ "schema caches under '%s'", gs, gs));
+ }
+ }
+
+ private void graphRemoveHandler(T response) {
+ List names = this.extractValuesFromResponse(response);
+ for (String gs : names) {
+ String[] arr = gs.split(DELIMITER);
+ assert arr.length == 2;
+ this.caches.destroy(arr[0], arr[1]);
+ log.info(String.format(
+ "Graph '%s' delete event is received, deleting all " +
+ "schema caches under '%s'", gs, gs));
+ }
+ }
+
+ private void graphSpaceRemoveHandler(T response) {
+ List names = this.extractValuesFromResponse(response);
+ for (String gs : names) {
+ this.caches.destroy(gs);
+ log.info(String.format(
+ "graph space '%s' delete event is received, deleting all " +
+ "schema caches under '%s'", gs, gs));
+ }
+ }
+
+
+ public List extractValuesFromResponse(T response) {
+ List values = new ArrayList<>();
+ WatchResponse res = (WatchResponse) response;
+ for (WatchEvent event : res.getEventsList()) {
+ // Skip if not PUT event
+ if (!event.getType().equals(WatchType.Put)) {
+ return null;
+ }
+ String value = event.getCurrent().getValue();
+ values.add(value);
+ }
+ return values;
+ }
+
+
+ public void listen(String key, Consumer consumer) {
+ try {
+ this.client.listen(key, (Consumer) consumer);
+ } catch (PDException e) {
+ throw new HugeException("Failed to listen '%s' to pd", e, key);
+ }
+ }
+
+ public Map graphConfig(String graphSpace, String graph) {
+ String content = this.get(graphConfKey(graphSpace, graph));
+ if (content == null || content.length() == 0) {
+ return new HashMap<>();
+ } else {
+ return fromJson(content, Map.class);
+ }
+ }
+
+ public PropertyKey propertyKey(String graphSpace, String graph, Id id,
+ HugeGraphSupplier schemaGraph) {
+ SchemaElement pk =
+ this.caches.get(graphSpace, graph, HugeType.PROPERTY_KEY, id);
+ if (pk == null) {
+ pk = getPropertyKey(graphSpace, graph, id, schemaGraph);
+ E.checkArgument(pk != null, "no such propertyKey: id = '%s'", id);
+ this.caches.set(graphSpace, graph, HugeType.PROPERTY_KEY, pk.id(), pk);
+ this.caches.set(graphSpace, graph, HugeType.PROPERTY_KEY, pk.name(), pk);
+ }
+ return (PropertyKey) pk;
+ }
+
+ public PropertyKey propertyKey(String graphSpace, String graph,
+ String name, HugeGraphSupplier schemaGraph) {
+ SchemaElement pk =
+ this.caches.get(graphSpace, graph, HugeType.PROPERTY_KEY, name);
+ if (pk == null) {
+ pk = getPropertyKey(graphSpace, graph, name, schemaGraph);
+ E.checkArgument(pk != null, "no such propertyKey: name = '%s'",
+ name);
+ this.caches.set(graphSpace, graph, HugeType.PROPERTY_KEY, pk.id(), pk);
+ this.caches.set(graphSpace, graph, HugeType.PROPERTY_KEY, pk.name(), pk);
+ }
+ return (PropertyKey) pk;
+ }
+
+ public List propertyKeys(String graphSpace, String graph,
+ HugeGraphSupplier schemaGraph) {
+ Map propertyKeysKvs =
+ this.scanWithPrefix(propertyKeyPrefix(graphSpace, graph));
+ List propertyKeys =
+ new ArrayList<>(propertyKeysKvs.size());
+ for (String value : propertyKeysKvs.values()) {
+ PropertyKey pk =
+ PropertyKey.fromMap(fromJson(value, Map.class), schemaGraph);
+ this.caches.set(graphSpace, graph, HugeType.PROPERTY_KEY, pk.id(), pk);
+ this.caches.set(graphSpace, graph, HugeType.PROPERTY_KEY, pk.name(), pk);
+ propertyKeys.add(pk);
+ }
+ return propertyKeys;
+ }
+
+ public List vertexLabels(String graphSpace, String graph,
+ HugeGraphSupplier schemaGraph) {
+ Map vertexLabelKvs = this.scanWithPrefix(
+ vertexLabelPrefix(graphSpace, graph));
+ List vertexLabels =
+ new ArrayList<>(vertexLabelKvs.size());
+ for (String value : vertexLabelKvs.values()) {
+ VertexLabel vl =
+ VertexLabel.fromMap(fromJson(value, Map.class),
+ schemaGraph);
+ this.caches.set(graphSpace, graph, HugeType.VERTEX_LABEL, vl.id(), vl);
+ this.caches.set(graphSpace, graph, HugeType.VERTEX_LABEL, vl.name(), vl);
+ vertexLabels.add(vl);
+ }
+ return vertexLabels;
+ }
+
+ public List edgeLabels(String graphSpace, String graph,
+ HugeGraphSupplier schemaGraph) {
+ Map edgeLabelKvs = this.scanWithPrefix(
+ edgeLabelPrefix(graphSpace, graph));
+ List edgeLabels =
+ new ArrayList<>(edgeLabelKvs.size());
+ for (String value : edgeLabelKvs.values()) {
+ EdgeLabel el =
+ EdgeLabel.fromMap(fromJson(value, Map.class), schemaGraph);
+ this.caches.set(graphSpace, graph, HugeType.EDGE_LABEL, el.id(), el);
+ this.caches.set(graphSpace, graph, HugeType.EDGE_LABEL, el.name(), el);
+ edgeLabels.add(el);
+ }
+ return edgeLabels;
+ }
+
+ public List indexLabels(String graphSpace, String graph,
+ HugeGraphSupplier schemaGraph) {
+ Map indexLabelKvs = this.scanWithPrefix(
+ indexLabelPrefix(graphSpace, graph));
+ List indexLabels =
+ new ArrayList<>(indexLabelKvs.size());
+ for (String value : indexLabelKvs.values()) {
+ IndexLabel il =
+ IndexLabel.fromMap(fromJson(value, Map.class), schemaGraph);
+ this.caches.set(graphSpace, graph, HugeType.INDEX_LABEL, il.id(), il);
+ this.caches.set(graphSpace, graph, HugeType.INDEX_LABEL, il.name(), il);
+ indexLabels.add(il);
+ }
+ return indexLabels;
+ }
+
+ private String propertyKeyPrefix(String graphSpace, String graph) {
+ // HUGEGRAPH/{cluster}/GRAPHSPACE/{graphspace}/GRAPH/{graph
+ // }/SCHEMA/PROPERTY_KEY/NAME
+ return stringJoin(META_PATH_DELIMITER,
+ META_PATH_HUGEGRAPH,
+ META_PATH_CLUSTER,
+ META_PATH_GRAPHSPACE,
+ graphSpace,
+ graph,
+ META_PATH_SCHEMA,
+ META_PATH_PROPERTY_KEY,
+ META_PATH_NAME);
+ }
+
+ private String vertexLabelPrefix(String graphSpace, String graph) {
+ // HUGEGRAPH/{cluster}/GRAPHSPACE/{graphspace}/GRAPH/{graph
+ // }/SCHEMA/VERTEX_LABEL/NAME
+ return stringJoin(META_PATH_DELIMITER,
+ META_PATH_HUGEGRAPH,
+ META_PATH_CLUSTER,
+ META_PATH_GRAPHSPACE,
+ graphSpace,
+ graph,
+ META_PATH_SCHEMA,
+ META_PATH_VERTEX_LABEL,
+ META_PATH_NAME);
+ }
+
+ private String edgeLabelPrefix(String graphSpace, String graph) {
+ // HUGEGRAPH/{cluster}/GRAPHSPACE/{graphspace}/GRAPH/{graph
+ // }/SCHEMA/EDGELABEL/NAME
+ return stringJoin(META_PATH_DELIMITER,
+ META_PATH_HUGEGRAPH,
+ META_PATH_CLUSTER,
+ META_PATH_GRAPHSPACE,
+ graphSpace,
+ graph,
+ META_PATH_SCHEMA,
+ META_PATH_EDGE_LABEL,
+ META_PATH_NAME);
+ }
+
+ private String indexLabelPrefix(String graphSpace, String graph) {
+ // HUGEGRAPH/{cluster}/GRAPHSPACE/{graphspace}/GRAPH/{graph
+ // }/SCHEMA/INDEX_LABEL/NAME
+ return stringJoin(META_PATH_DELIMITER,
+ META_PATH_HUGEGRAPH,
+ META_PATH_CLUSTER,
+ META_PATH_GRAPHSPACE,
+ graphSpace,
+ graph,
+ META_PATH_SCHEMA,
+ META_PATH_INDEX_LABEL,
+ META_PATH_NAME);
+ }
+
+ public VertexLabel vertexLabel(String graphSpace, String graph, Id id,
+ HugeGraphSupplier schemaGraph) {
+ SchemaElement vl =
+ this.caches.get(graphSpace, graph, HugeType.VERTEX_LABEL, id);
+ if (vl == null) {
+ vl = getVertexLabel(graphSpace, graph, id, schemaGraph);
+ E.checkArgument(vl != null, "no such vertex label: id = '%s'", id);
+ this.caches.set(graphSpace, graph, HugeType.VERTEX_LABEL, vl.id(), vl);
+ this.caches.set(graphSpace, graph, HugeType.VERTEX_LABEL, vl.name(), vl);
+ }
+ return (VertexLabel) vl;
+ }
+
+ public VertexLabel vertexLabel(String graphSpace, String graph,
+ String name, HugeGraphSupplier schemaGraph) {
+ SchemaElement vl =
+ this.caches.get(graphSpace, graph, HugeType.VERTEX_LABEL, name);
+ if (vl == null) {
+ vl = getVertexLabel(graphSpace, graph, name, schemaGraph);
+ E.checkArgument(vl != null, "no such vertex label: name = '%s'",
+ name);
+ this.caches.set(graphSpace, graph, HugeType.VERTEX_LABEL, vl.id(), vl);
+ this.caches.set(graphSpace, graph, HugeType.VERTEX_LABEL, vl.name(), vl);
+ }
+ return (VertexLabel) vl;
+ }
+
+ public EdgeLabel edgeLabel(String graphSpace, String graph, Id id,
+ HugeGraphSupplier schemaGraph) {
+ SchemaElement el =
+ this.caches.get(graphSpace, graph, HugeType.EDGE_LABEL, id);
+ if (el == null) {
+ el = getEdgeLabel(graphSpace, graph, id, schemaGraph);
+ E.checkArgument(el != null, "no such edge label: id = '%s'", id);
+ this.caches.set(graphSpace, graph, HugeType.EDGE_LABEL, el.id(), el);
+ this.caches.set(graphSpace, graph, HugeType.EDGE_LABEL, el.name(), el);
+ }
+ return (EdgeLabel) el;
+ }
+
+ public EdgeLabel edgeLabel(String graphSpace, String graph, String name,
+ HugeGraphSupplier schemaGraph) {
+ SchemaElement el =
+ this.caches.get(graphSpace, graph, HugeType.EDGE_LABEL, name);
+ if (el == null) {
+ el = getEdgeLabel(graphSpace, graph, name, schemaGraph);
+ E.checkArgument(el != null, "no such edge label: name = '%s'",
+ name);
+ this.caches.set(graphSpace, graph, HugeType.EDGE_LABEL, el.id(), el);
+ this.caches.set(graphSpace, graph, HugeType.EDGE_LABEL, el.name(), el);
+ }
+ return (EdgeLabel) el;
+ }
+
+ public IndexLabel indexLabel(String graphSpace, String graph, Id id,
+ HugeGraphSupplier schemaGraph) {
+ SchemaElement il =
+ this.caches.get(graphSpace, graph, HugeType.INDEX_LABEL, id);
+ if (il == null) {
+ il = getIndexLabel(graphSpace, graph, id, schemaGraph);
+ E.checkArgument(il != null, "no such index label: id = '%s'", id);
+ this.caches.set(graphSpace, graph, HugeType.INDEX_LABEL, il.id(), il);
+ this.caches.set(graphSpace, graph, HugeType.INDEX_LABEL, il.name(), il);
+ }
+ return (IndexLabel) il;
+ }
+
+ public IndexLabel indexLabel(String graphSpace, String graph, String name,
+ HugeGraphSupplier schemaGraph) {
+ SchemaElement il =
+ this.caches.get(graphSpace, graph, HugeType.INDEX_LABEL, name);
+ if (il == null) {
+ il = getIndexLabel(graphSpace, graph, name, schemaGraph);
+ E.checkArgument(il != null, "no such index label: name = '%s'",
+ name);
+ this.caches.set(graphSpace, graph, HugeType.INDEX_LABEL, il.id(), il);
+ this.caches.set(graphSpace, graph, HugeType.INDEX_LABEL, il.name(), il);
+ }
+ return (IndexLabel) il;
+ }
+
+ private String get(String key) {
+ try {
+ KResponse response = this.client.get(key);
+ return response.getValue();
+ } catch (PDException e) {
+ throw new HugeException("Failed to get '%s' from pd", e, key);
+ }
+ }
+
+ private Map scanWithPrefix(String prefix) {
+ try {
+ ScanPrefixResponse response = this.client.scanPrefix(prefix);
+ return response.getKvsMap();
+ } catch (PDException e) {
+ throw new HugeException("Failed to scanWithPrefix '%s' from pd", e, prefix);
+ }
+ }
+
+ private PropertyKey getPropertyKey(String graphSpace, String graph,
+ Id propertyKey, HugeGraphSupplier schemaGraph) {
+ String content =
+ this.get(propertyKeyIdKey(graphSpace, graph, propertyKey));
+ if (content == null || content.length() == 0) {
+ return null;
+ } else {
+ return PropertyKey.fromMap(fromJson(content, Map.class), schemaGraph);
+ }
+ }
+
+ private PropertyKey getPropertyKey(String graphSpace, String graph,
+ String propertyKey, HugeGraphSupplier schemaGraph) {
+ String content =
+ this.get(propertyKeyNameKey(graphSpace, graph, propertyKey));
+ if (content == null || content.length() == 0) {
+ return null;
+ } else {
+ return PropertyKey.fromMap(fromJson(content, Map.class), schemaGraph);
+ }
+ }
+
+ private VertexLabel getVertexLabel(String graphSpace, String graph,
+ Id vertexLabel, HugeGraphSupplier schemaGraph) {
+ String content =
+ this.get(vertexLabelIdKey(graphSpace, graph, vertexLabel));
+ if (content == null || content.length() == 0) {
+ return null;
+ } else {
+ return VertexLabel.fromMap(fromJson(content, Map.class), schemaGraph);
+ }
+ }
+
+ private VertexLabel getVertexLabel(String graphSpace, String graph,
+ String vertexLabel, HugeGraphSupplier schemaGraph) {
+ String content =
+ this.get(vertexLabelNameKey(graphSpace, graph, vertexLabel));
+ if (content == null || content.length() == 0) {
+ return null;
+ } else {
+ return VertexLabel.fromMap(fromJson(content, Map.class), schemaGraph);
+ }
+ }
+
+ private EdgeLabel getEdgeLabel(String graphSpace, String graph,
+ Id edgeLabel, HugeGraphSupplier schemaGraph) {
+ String content =
+ this.get(edgeLabelIdKey(graphSpace, graph, edgeLabel));
+ if (content == null || content.length() == 0) {
+ return null;
+ } else {
+ return EdgeLabel.fromMap(fromJson(content, Map.class), schemaGraph);
+ }
+ }
+
+ private EdgeLabel getEdgeLabel(String graphSpace, String graph,
+ String edgeLabel, HugeGraphSupplier schemaGraph) {
+ String content =
+ this.get(edgeLabelNameKey(graphSpace, graph, edgeLabel));
+ if (content == null || content.length() == 0) {
+ return null;
+ } else {
+ return EdgeLabel.fromMap(fromJson(content, Map.class), schemaGraph);
+ }
+ }
+
+
+ private IndexLabel getIndexLabel(String graphSpace, String graph,
+ Id indexLabel, HugeGraphSupplier schemaGraph) {
+ String content =
+ this.get(indexLabelIdKey(graphSpace, graph, indexLabel));
+ if (content == null || content.length() == 0) {
+ return null;
+ } else {
+ return IndexLabel.fromMap(fromJson(content, Map.class), schemaGraph);
+ }
+ }
+
+ private IndexLabel getIndexLabel(String graphSpace, String graph,
+ String indexLabel,
+ HugeGraphSupplier schemaGraph) {
+ String content =
+ this.get(indexLabelNameKey(graphSpace, graph, indexLabel));
+ if (content == null || content.length() == 0) {
+ return null;
+ } else {
+ return IndexLabel.fromMap(fromJson(content, Map.class),
+ schemaGraph);
+ }
+ }
+
+
+ private T fromJson(String json, Class clazz) {
+ E.checkState(json != null, "Json value can't be null for '%s'",
+ clazz.getSimpleName());
+ try {
+ return MAPPER.readValue(json, clazz);
+ } catch (IOException e) {
+ throw new HugeException("Can't read json: %s", e, e.getMessage());
+ }
+ }
+
+ private String toJson(Object object) {
+ try {
+ return MAPPER.writeValueAsString(object);
+ } catch (JsonProcessingException e) {
+ throw new HugeException("Can't write json: %s", e, e.getMessage());
+ }
+ }
+
+ private String propertyKeyIdKey(String graphSpace, String graph, Id id) {
+ return idKey(graphSpace, graph, id, HugeType.PROPERTY_KEY);
+ }
+
+ private String propertyKeyNameKey(String graphSpace, String graph,
+ String name) {
+ return nameKey(graphSpace, graph, name, HugeType.PROPERTY_KEY);
+ }
+
+
+ private String vertexLabelIdKey(String graphSpace, String graph, Id id) {
+ return idKey(graphSpace, graph, id, HugeType.VERTEX_LABEL);
+ }
+
+ private String vertexLabelNameKey(String graphSpace, String graph,
+ String name) {
+ return nameKey(graphSpace, graph, name, HugeType.VERTEX_LABEL);
+ }
+
+ private String edgeLabelIdKey(String graphSpace, String graph, Id id) {
+ return idKey(graphSpace, graph, id, HugeType.EDGE_LABEL);
+ }
+
+ private String edgeLabelNameKey(String graphSpace, String graph,
+ String name) {
+ return nameKey(graphSpace, graph, name, HugeType.EDGE_LABEL);
+ }
+
+ private String indexLabelIdKey(String graphSpace, String graph, Id id) {
+ return idKey(graphSpace, graph, id, HugeType.INDEX_LABEL);
+ }
+
+ private String indexLabelNameKey(String graphSpace, String graph,
+ String name) {
+ return nameKey(graphSpace, graph, name, HugeType.INDEX_LABEL);
+ }
+
+ private String graphSpaceRemoveKey() {
+ // HUGEGRAPH/{cluster}/EVENT/GRAPHSPACE/REMOVE
+ return stringJoin(META_PATH_DELIMITER,
+ META_PATH_HUGEGRAPH,
+ META_PATH_CLUSTER,
+ META_PATH_EVENT,
+ META_PATH_GRAPHSPACE,
+ META_PATH_REMOVE);
+ }
+
+ private String graphConfKey(String graphSpace, String graph) {
+ // HUGEGRAPH/{cluster}/GRAPHSPACE/{graphspace}/GRAPH_CONF/{graph}
+ return stringJoin(META_PATH_DELIMITER,
+ META_PATH_HUGEGRAPH,
+ META_PATH_CLUSTER,
+ META_PATH_GRAPHSPACE,
+ graphSpace,
+ META_PATH_GRAPH_CONF,
+ graph);
+ }
+
+ private String nameKey(String graphSpace, String graph,
+ String name, HugeType type) {
+ // HUGEGRAPH/hg/GRAPHSPACE/{graphspace}/{graph}/SCHEMA
+ // /{META_PATH_TYPE}/NAME/{name}
+ return stringJoin(META_PATH_DELIMITER,
+ META_PATH_HUGEGRAPH,
+ META_PATH_CLUSTER,
+ META_PATH_GRAPHSPACE,
+ graphSpace,
+ graph,
+ META_PATH_SCHEMA,
+ hugeType2MetaPath(type),
+ META_PATH_NAME,
+ name);
+ }
+
+ private String idKey(String graphSpace, String graph,
+ Id id, HugeType type) {
+ // HUGEGRAPH/hg/GRAPHSPACE/{graphspace}/{graph}/SCHEMA
+ // /{META_PATH_TYPE}/ID/{id}
+ return stringJoin(META_PATH_DELIMITER,
+ META_PATH_HUGEGRAPH,
+ META_PATH_CLUSTER,
+ META_PATH_GRAPHSPACE,
+ graphSpace,
+ graph,
+ META_PATH_SCHEMA,
+ hugeType2MetaPath(type),
+ META_PATH_ID,
+ id.asString());
+ }
+
+ private String schemaCacheClearKey() {
+ // HUGEGRAPH/{cluster}/EVENT/GRAPH/SCHEMA/CLEAR
+ return stringJoin(META_PATH_DELIMITER,
+ META_PATH_HUGEGRAPH,
+ META_PATH_CLUSTER,
+ META_PATH_EVENT,
+ META_PATH_GRAPH,
+ META_PATH_SCHEMA,
+ META_PATH_CLEAR);
+ }
+
+ private String graphClearKey() {
+ // HUGEGRAPH/{cluster}/EVENT/GRAPH/CLEAR
+ return stringJoin(META_PATH_DELIMITER,
+ META_PATH_HUGEGRAPH,
+ META_PATH_CLUSTER,
+ META_PATH_EVENT,
+ META_PATH_GRAPH,
+ META_PATH_CLEAR);
+ }
+
+ private String graphRemoveKey() {
+ // HUGEGRAPH/{cluster}/EVENT/GRAPH/REMOVE
+ return stringJoin(META_PATH_DELIMITER,
+ META_PATH_HUGEGRAPH,
+ META_PATH_CLUSTER,
+ META_PATH_EVENT,
+ META_PATH_GRAPH,
+ META_PATH_REMOVE);
+ }
+
+ private String hugeType2MetaPath(HugeType type) {
+ String schemaType = null;
+ switch (type) {
+ case PROPERTY_KEY:
+ schemaType = META_PATH_PROPERTY_KEY;
+ break;
+ case VERTEX_LABEL:
+ schemaType = META_PATH_VERTEX_LABEL;
+ break;
+ case EDGE_LABEL:
+ schemaType = META_PATH_EDGE_LABEL;
+ break;
+ case INDEX_LABEL:
+ schemaType = META_PATH_INDEX_LABEL;
+ break;
+ default:
+ throw new AssertionError(String.format(
+ "Invalid HugeType : %s", type));
+ }
+ return schemaType;
+ }
+
+ private static String stringJoin(String delimiter, String... parts) {
+ StringBuilder builder = new StringBuilder();
+ int size = parts.length;
+ for (int i = 0; i < size; i++) {
+ builder.append(parts[i]);
+ if (i < size - 1) {
+ builder.append(delimiter);
+ }
+ }
+ return builder.toString();
+ }
+
+ private static final class SchemaCaches {
+ private final int limit;
+ private final long expiration;
+ private final Timer timer;
+
+ private ConcurrentHashMap> caches;
+
+ public SchemaCaches(int limit, long expiration) {
+ this.expiration = expiration;
+ this.limit = limit;
+ this.timer = new Timer();
+ this.caches = new ConcurrentHashMap<>();
+ scheduleCacheCleanup();
+ }
+
+ public int limit() {
+ return this.limit;
+ }
+
+ public long expiration() {
+ return this.expiration;
+ }
+
+ private void scheduleCacheCleanup() {
+ timer.scheduleAtFixedRate(new TimerTask() {
+ @Override
+ public void run() {
+ log.debug("schedule clear schema caches");
+ clearAll();
+ }
+ }, expiration, expiration);
+ }
+
+ public void cancelScheduleCacheClean() {
+ timer.cancel();
+ }
+
+ public SchemaElement get(String graphSpace, String graph, HugeType type,
+ Id id) {
+ return get(graphSpace, graph, type, id.asString());
+ }
+
+ public SchemaElement get(String graphSpace, String graph, HugeType type,
+ String name) {
+ String graphName = stringJoin(DELIMITER, graphSpace, graph);
+ if (this.caches.get(graphName) == null) {
+ this.caches.put(graphName, new ConcurrentHashMap<>(this.limit));
+ }
+ return this.caches.get(graphName)
+ .get(stringJoin(DELIMITER, type.string(), name));
+ }
+
+ public void set(String graphSpace, String graph, HugeType type, Id id,
+ SchemaElement value) {
+ set(graphSpace, graph, type, id.asString(), value);
+ }
+
+ public void set(String graphSpace, String graph, HugeType type,
+ String name, SchemaElement value) {
+ String graphName = stringJoin(DELIMITER, graphSpace, graph);
+ ConcurrentHashMap
+ schemaCaches = this.caches.get(graphName);
+ if (schemaCaches == null) {
+ schemaCaches = this.caches.put(graphName, new ConcurrentHashMap<>(this.limit));
+ }
+ if (schemaCaches.size() >= limit) {
+ log.info(String.format(
+ "The current '%s''s schemaCaches size '%s' reached " +
+ "limit '%s'", graphName, schemaCaches.size(), limit));
+ return;
+ }
+ schemaCaches.put(stringJoin(DELIMITER, type.string(), name),
+ value);
+ log.debug(String.format("graph '%s' add schema caches '%s'",
+ graphName,
+ stringJoin(DELIMITER, type.string(),
+ name)));
+ }
+
+ public void remove(String graphSpace, String graph, HugeType type,
+ Id id) {
+ remove(graphSpace, graph, type, id.asString());
+ }
+
+ public void remove(String graphSpace, String graph, HugeType type,
+ String name) {
+ String graphName = stringJoin(DELIMITER, graphSpace, graph);
+
+ ConcurrentHashMap
+ schemaCaches = this.caches.get(graphName);
+ schemaCaches.remove(stringJoin(DELIMITER, type.string(), name));
+
+ }
+
+ public void clearAll() {
+ for (String key : this.caches.keySet()) {
+ log.debug(String.format("graph in '%s' schema caches clear",
+ key));
+ this.caches.get(key).clear();
+ }
+ }
+
+ public void clear(String graphSpace, String graph) {
+ ConcurrentHashMap
+ schemaCaches =
+ this.caches.get(stringJoin(DELIMITER, graphSpace, graph));
+ if (schemaCaches != null) {
+ schemaCaches.clear();
+ }
+ }
+
+ public void destroyAll() {
+ this.caches.clear();
+ }
+
+ public void destroy(String graphSpace, String graph) {
+ this.caches.remove(stringJoin(DELIMITER, graphSpace, graph));
+
+ }
+
+ public void destroy(String graphSpace) {
+ for (String key : this.caches.keySet()) {
+ String gs = key.split(DELIMITER)[0];
+ if (gs.equals(graphSpace)) {
+ this.caches.remove(key);
+ }
+ }
+ }
+
+ }
+}
diff --git a/hugegraph-struct/src/main/java/org/apache/hugegraph/SchemaGraph.java b/hugegraph-struct/src/main/java/org/apache/hugegraph/SchemaGraph.java
new file mode 100644
index 0000000000..f20c0d17fb
--- /dev/null
+++ b/hugegraph-struct/src/main/java/org/apache/hugegraph/SchemaGraph.java
@@ -0,0 +1,182 @@
+/*
+ * Copyright 2017 HugeGraph Authors
+ *
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with this
+ * work for additional information regarding copyright ownership. The ASF
+ * licenses this file to You under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+ * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+ * License for the specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.apache.hugegraph;
+
+import org.apache.hugegraph.HugeGraphSupplier;
+import org.apache.hugegraph.SchemaDriver;
+import org.apache.hugegraph.id.Id;
+import org.apache.hugegraph.pd.client.PDConfig;
+import org.apache.hugegraph.schema.*;
+
+import org.apache.commons.configuration2.Configuration;
+import org.apache.commons.configuration2.MapConfiguration;
+import org.apache.hugegraph.config.HugeConfig;
+import org.apache.hugegraph.util.E;
+
+import java.util.ArrayList;
+import java.util.Collection;
+import java.util.List;
+import java.util.Map;
+
+public class SchemaGraph implements HugeGraphSupplier {
+
+ private final String graphSpace;
+ private final String graph;
+ private final PDConfig pdConfig;
+ private HugeConfig config;
+
+ private final SchemaDriver schemaDriver;
+
+ public SchemaGraph(String graphSpace, String graph, PDConfig pdConfig) {
+ this.graphSpace = graphSpace;
+ this.graph = graph;
+ this.pdConfig = pdConfig;
+ this.schemaDriver = schemaDriverInit();
+ this.config = this.loadConfig();
+ }
+
+ private SchemaDriver schemaDriverInit() {
+ if (SchemaDriver.getInstance() == null) {
+ synchronized (SchemaDriver.class) {
+ if (SchemaDriver.getInstance() == null) {
+ SchemaDriver.init(this.pdConfig);
+ }
+ }
+ }
+ return SchemaDriver.getInstance();
+ }
+
+ private HugeConfig loadConfig() {
+ // 加载 PD 中的配置
+ Map configs =
+ schemaDriver.graphConfig(this.graphSpace, this.graph);
+ Configuration propConfig = new MapConfiguration(configs);
+ return new HugeConfig(propConfig);
+ }
+
+ @Override
+ public List mapPkId2Name(Collection ids) {
+ List names = new ArrayList<>(ids.size());
+ for (Id id : ids) {
+ SchemaElement schema = this.propertyKey(id);
+ names.add(schema.name());
+ }
+ return names;
+ }
+
+ @Override
+ public List mapIlId2Name(Collection ids) {
+ List names = new ArrayList<>(ids.size());
+ for (Id id : ids) {
+ SchemaElement schema = this.indexLabel(id);
+ names.add(schema.name());
+ }
+ return names;
+ }
+
+ @Override
+ public HugeConfig configuration(){
+ return this.config;
+ }
+
+ @Override
+ public PropertyKey propertyKey(Id id) {
+ return schemaDriver.propertyKey(this.graphSpace, this.graph, id, this);
+ }
+
+ public PropertyKey propertyKey(String name) {
+ return schemaDriver.propertyKey(this.graphSpace, this.graph, name, this);
+ }
+
+ @Override
+ public Collection propertyKeys() {
+ // TODO
+ return null;
+ }
+
+ @Override
+ public VertexLabel vertexLabelOrNone(Id id) {
+ VertexLabel vl = vertexLabel(id);
+ if (vl == null) {
+ vl = VertexLabel.undefined(null, id);
+ }
+ return vl;
+ }
+
+ @Override
+ public boolean existsLinkLabel(Id vertexLabel) {
+ List edgeLabels =
+ schemaDriver.edgeLabels(this.graphSpace, this.graph, this);
+ for (EdgeLabel edgeLabel : edgeLabels) {
+ if (edgeLabel.linkWithLabel(vertexLabel)) {
+ return true;
+ }
+ }
+ return false;
+ }
+
+ @Override
+ public VertexLabel vertexLabel(Id id) {
+ E.checkArgumentNotNull(id, "Vertex label id can't be null");
+ if (SchemaElement.OLAP_ID.equals(id)) {
+ return VertexLabel.OLAP_VL;
+ }
+ return schemaDriver.vertexLabel(this.graphSpace, this.graph, id, this);
+ }
+
+ @Override
+ public VertexLabel vertexLabel(String name) {
+ E.checkArgumentNotNull(name, "Vertex label name can't be null");
+ E.checkArgument(!name.isEmpty(), "Vertex label name can't be empty");
+ if (SchemaElement.OLAP.equals(name)) {
+ return VertexLabel.OLAP_VL;
+ }
+ return schemaDriver.vertexLabel(this.graphSpace, this.graph, name, this);
+ }
+
+ @Override
+ public EdgeLabel edgeLabel(Id id) {
+ return schemaDriver.edgeLabel(this.graphSpace, this.graph, id, this);
+ }
+
+ @Override
+ public EdgeLabel edgeLabel(String name) {
+ return schemaDriver.edgeLabel(this.graphSpace, this.graph, name, this);
+ }
+
+ @Override
+ public IndexLabel indexLabel(Id id) {
+ return schemaDriver.indexLabel(this.graphSpace, this.graph, id, this);
+ }
+
+ @Override
+ public Collection indexLabels() {
+ return schemaDriver.indexLabels(this.graphSpace, this.graph, this);
+ }
+
+ public IndexLabel indexLabel(String name) {
+ return schemaDriver.indexLabel(this.graphSpace, this.graph, name, this);
+ }
+
+ @Override
+ public String name() {
+ return String.join("-", this.graphSpace, this.graph);
+ }
+}
diff --git a/hugegraph-struct/src/main/java/org/apache/hugegraph/analyzer/Analyzer.java b/hugegraph-struct/src/main/java/org/apache/hugegraph/analyzer/Analyzer.java
new file mode 100644
index 0000000000..4edd2ffa9b
--- /dev/null
+++ b/hugegraph-struct/src/main/java/org/apache/hugegraph/analyzer/Analyzer.java
@@ -0,0 +1,27 @@
+/*
+ * Copyright 2017 HugeGraph Authors
+ *
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with this
+ * work for additional information regarding copyright ownership. The ASF
+ * licenses this file to You under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+ * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+ * License for the specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.apache.hugegraph.analyzer;
+
+import java.util.Set;
+
+public interface Analyzer {
+
+ public Set segment(String text);
+}
diff --git a/hugegraph-struct/src/main/java/org/apache/hugegraph/analyzer/AnalyzerFactory.java b/hugegraph-struct/src/main/java/org/apache/hugegraph/analyzer/AnalyzerFactory.java
new file mode 100644
index 0000000000..bff18ab7b0
--- /dev/null
+++ b/hugegraph-struct/src/main/java/org/apache/hugegraph/analyzer/AnalyzerFactory.java
@@ -0,0 +1,102 @@
+/*
+ * Copyright 2017 HugeGraph Authors
+ *
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with this
+ * work for additional information regarding copyright ownership. The ASF
+ * licenses this file to You under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+ * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+ * License for the specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.apache.hugegraph.analyzer;
+
+
+import org.apache.hugegraph.exception.HugeException;
+
+import java.util.Map;
+import java.util.concurrent.ConcurrentHashMap;
+
+public class AnalyzerFactory {
+
+ private static Map> analyzers;
+
+ static {
+ analyzers = new ConcurrentHashMap<>();
+ }
+
+ public static Analyzer analyzer(String name, String mode) {
+ name = name.toLowerCase();
+ switch (name) {
+ case "word":
+ return new WordAnalyzer(mode);
+ case "ansj":
+ return new AnsjAnalyzer(mode);
+ case "hanlp":
+ return new HanLPAnalyzer(mode);
+ case "smartcn":
+ return new SmartCNAnalyzer(mode);
+ case "jieba":
+ return new JiebaAnalyzer(mode);
+ case "jcseg":
+ return new JcsegAnalyzer(mode);
+ case "mmseg4j":
+ return new MMSeg4JAnalyzer(mode);
+ case "ikanalyzer":
+ return new IKAnalyzer(mode);
+ default:
+ return customizedAnalyzer(name, mode);
+ }
+ }
+
+ private static Analyzer customizedAnalyzer(String name, String mode) {
+ Class extends Analyzer> clazz = analyzers.get(name);
+ if (clazz == null) {
+ throw new HugeException("Not exists analyzer: %s", name);
+ }
+
+ assert Analyzer.class.isAssignableFrom(clazz);
+ try {
+ return clazz.getConstructor(String.class).newInstance(mode);
+ } catch (Exception e) {
+ throw new HugeException(
+ "Failed to construct analyzer '%s' with mode '%s'",
+ e, name, mode);
+ }
+ }
+
+ @SuppressWarnings({ "rawtypes", "unchecked" })
+ public static void register(String name, String classPath) {
+ ClassLoader classLoader = AnalyzerFactory.class.getClassLoader();
+ Class> clazz;
+ try {
+ clazz = classLoader.loadClass(classPath);
+ } catch (Exception e) {
+ throw new HugeException("Load class path '%s' failed",
+ e, classPath);
+ }
+
+ // Check subclass
+ if (!Analyzer.class.isAssignableFrom(clazz)) {
+ throw new HugeException("Class '%s' is not a subclass of " +
+ "class Analyzer", classPath);
+ }
+
+ // Check exists
+ if (analyzers.containsKey(name)) {
+ throw new HugeException("Exists analyzer: %s(%s)",
+ name, analyzers.get(name).getName());
+ }
+
+ // Register class
+ analyzers.put(name, (Class) clazz);
+ }
+}
diff --git a/hugegraph-struct/src/main/java/org/apache/hugegraph/analyzer/AnsjAnalyzer.java b/hugegraph-struct/src/main/java/org/apache/hugegraph/analyzer/AnsjAnalyzer.java
new file mode 100644
index 0000000000..3f041d31f8
--- /dev/null
+++ b/hugegraph-struct/src/main/java/org/apache/hugegraph/analyzer/AnsjAnalyzer.java
@@ -0,0 +1,87 @@
+/*
+ * Copyright 2017 HugeGraph Authors
+ *
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with this
+ * work for additional information regarding copyright ownership. The ASF
+ * licenses this file to You under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+ * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+ * License for the specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.apache.hugegraph.analyzer;
+
+import java.util.List;
+import java.util.Set;
+
+import org.ansj.domain.Result;
+import org.ansj.domain.Term;
+import org.ansj.splitWord.analysis.BaseAnalysis;
+import org.ansj.splitWord.analysis.IndexAnalysis;
+import org.ansj.splitWord.analysis.NlpAnalysis;
+import org.ansj.splitWord.analysis.ToAnalysis;
+import org.apache.hugegraph.config.ConfigException;
+import org.apache.hugegraph.util.InsertionOrderUtil;
+
+import com.google.common.collect.ImmutableList;
+
+/**
+ * Reference from https://my.oschina.net/apdplat/blog/412921
+ */
+public class AnsjAnalyzer implements Analyzer {
+
+ public static final List SUPPORT_MODES = ImmutableList.of(
+ "BaseAnalysis",
+ "IndexAnalysis",
+ "ToAnalysis",
+ "NlpAnalysis"
+ );
+
+ private String analysis;
+
+ public AnsjAnalyzer(String mode) {
+ if (!SUPPORT_MODES.contains(mode)) {
+ throw new ConfigException(
+ "Unsupported segment mode '%s' for ansj analyzer, " +
+ "the available values are %s", mode, SUPPORT_MODES);
+ }
+ this.analysis = mode;
+ }
+
+ @Override
+ public Set segment(String text) {
+ Result terms = null;
+ switch (this.analysis) {
+ case "BaseAnalysis":
+ terms = BaseAnalysis.parse(text);
+ break;
+ case "ToAnalysis":
+ terms = ToAnalysis.parse(text);
+ break;
+ case "NlpAnalysis":
+ terms = NlpAnalysis.parse(text);
+ break;
+ case "IndexAnalysis":
+ terms = IndexAnalysis.parse(text);
+ break;
+ default:
+ throw new AssertionError(String.format(
+ "Unsupported segment mode '%s'", this.analysis));
+ }
+
+ assert terms != null;
+ Set result = InsertionOrderUtil.newSet();
+ for (Term term : terms) {
+ result.add(term.getName());
+ }
+ return result;
+ }
+}
diff --git a/hugegraph-struct/src/main/java/org/apache/hugegraph/analyzer/HanLPAnalyzer.java b/hugegraph-struct/src/main/java/org/apache/hugegraph/analyzer/HanLPAnalyzer.java
new file mode 100644
index 0000000000..b8175e400c
--- /dev/null
+++ b/hugegraph-struct/src/main/java/org/apache/hugegraph/analyzer/HanLPAnalyzer.java
@@ -0,0 +1,108 @@
+/*
+ * Copyright 2017 HugeGraph Authors
+ *
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with this
+ * work for additional information regarding copyright ownership. The ASF
+ * licenses this file to You under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+ * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+ * License for the specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.apache.hugegraph.analyzer;
+
+import java.util.List;
+import java.util.Set;
+
+
+import com.google.common.collect.ImmutableList;
+import com.hankcs.hanlp.seg.Dijkstra.DijkstraSegment;
+import com.hankcs.hanlp.seg.NShort.NShortSegment;
+import com.hankcs.hanlp.seg.Segment;
+import com.hankcs.hanlp.seg.common.Term;
+import com.hankcs.hanlp.tokenizer.IndexTokenizer;
+import com.hankcs.hanlp.tokenizer.NLPTokenizer;
+import com.hankcs.hanlp.tokenizer.SpeedTokenizer;
+import com.hankcs.hanlp.tokenizer.StandardTokenizer;
+
+import org.apache.hugegraph.config.ConfigException;
+import org.apache.hugegraph.util.InsertionOrderUtil;
+
+/**
+ * Reference from https://my.oschina.net/apdplat/blog/412921
+ */
+public class HanLPAnalyzer implements Analyzer {
+
+ public static final List SUPPORT_MODES =
+ ImmutableList.builder()
+ .add("standard")
+ .add("nlp")
+ .add("index")
+ .add("nShort")
+ .add("shortest")
+ .add("speed")
+ .build();
+
+ private static final Segment N_SHORT_SEGMENT =
+ new NShortSegment().enableCustomDictionary(false)
+ .enablePlaceRecognize(true)
+ .enableOrganizationRecognize(true);
+ private static final Segment DIJKSTRA_SEGMENT =
+ new DijkstraSegment().enableCustomDictionary(false)
+ .enablePlaceRecognize(true)
+ .enableOrganizationRecognize(true);
+
+ private String tokenizer;
+
+ public HanLPAnalyzer(String mode) {
+ if (!SUPPORT_MODES.contains(mode)) {
+ throw new ConfigException(
+ "Unsupported segment mode '%s' for hanlp analyzer, " +
+ "the available values are %s", mode, SUPPORT_MODES);
+ }
+ this.tokenizer = mode;
+ }
+
+ @Override
+ public Set segment(String text) {
+ List terms = null;
+ switch (this.tokenizer) {
+ case "standard":
+ terms = StandardTokenizer.segment(text);
+ break;
+ case "nlp":
+ terms = NLPTokenizer.segment(text);
+ break;
+ case "index":
+ terms = IndexTokenizer.segment(text);
+ break;
+ case "nShort":
+ terms = N_SHORT_SEGMENT.seg(text);
+ break;
+ case "shortest":
+ terms = DIJKSTRA_SEGMENT.seg(text);
+ break;
+ case "speed":
+ terms = SpeedTokenizer.segment(text);
+ break;
+ default:
+ throw new AssertionError(String.format(
+ "Unsupported segment mode '%s'", this.tokenizer));
+ }
+
+ assert terms != null;
+ Set result = InsertionOrderUtil.newSet();
+ for (Term term : terms) {
+ result.add(term.word);
+ }
+ return result;
+ }
+}
diff --git a/hugegraph-struct/src/main/java/org/apache/hugegraph/analyzer/IKAnalyzer.java b/hugegraph-struct/src/main/java/org/apache/hugegraph/analyzer/IKAnalyzer.java
new file mode 100644
index 0000000000..a938e8e01f
--- /dev/null
+++ b/hugegraph-struct/src/main/java/org/apache/hugegraph/analyzer/IKAnalyzer.java
@@ -0,0 +1,73 @@
+/*
+ * Copyright 2017 HugeGraph Authors
+ *
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with this
+ * work for additional information regarding copyright ownership. The ASF
+ * licenses this file to You under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+ * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+ * License for the specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.apache.hugegraph.analyzer;
+
+import com.google.common.collect.ImmutableList;
+
+import org.apache.hugegraph.config.ConfigException;
+import org.apache.hugegraph.exception.HugeException;
+import org.apache.hugegraph.util.InsertionOrderUtil;
+import org.wltea.analyzer.core.IKSegmenter;
+import org.wltea.analyzer.core.Lexeme;
+
+import java.io.StringReader;
+import java.util.List;
+import java.util.Set;
+
+/**
+ * Reference from https://my.oschina.net/apdplat/blog/412921
+ */
+public class IKAnalyzer implements Analyzer {
+
+ public static final List SUPPORT_MODES = ImmutableList.of(
+ "smart",
+ "max_word"
+ );
+
+ private boolean smartSegMode;
+ private final IKSegmenter ik;
+
+ public IKAnalyzer(String mode) {
+ if (!SUPPORT_MODES.contains(mode)) {
+ throw new ConfigException(
+ "Unsupported segment mode '%s' for ikanalyzer, " +
+ "the available values are %s", mode, SUPPORT_MODES);
+ }
+ this.smartSegMode = SUPPORT_MODES.get(0).equals(mode);
+ this.ik = new IKSegmenter(new StringReader(""),
+ this.smartSegMode);
+ }
+
+ @Override
+ public Set segment(String text) {
+ Set result = InsertionOrderUtil.newSet();
+ ik.reset(new StringReader(text));
+ try {
+ Lexeme word = null;
+ while ((word = ik.next()) != null) {
+ result.add(word.getLexemeText());
+ }
+ } catch (Exception e) {
+ throw new HugeException("IKAnalyzer segment text '%s' failed",
+ e, text);
+ }
+ return result;
+ }
+}
diff --git a/hugegraph-struct/src/main/java/org/apache/hugegraph/analyzer/JcsegAnalyzer.java b/hugegraph-struct/src/main/java/org/apache/hugegraph/analyzer/JcsegAnalyzer.java
new file mode 100644
index 0000000000..0a69af8384
--- /dev/null
+++ b/hugegraph-struct/src/main/java/org/apache/hugegraph/analyzer/JcsegAnalyzer.java
@@ -0,0 +1,77 @@
+/*
+ * Copyright 2017 HugeGraph Authors
+ *
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with this
+ * work for additional information regarding copyright ownership. The ASF
+ * licenses this file to You under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+ * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+ * License for the specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.apache.hugegraph.analyzer;
+
+import java.io.StringReader;
+import java.util.List;
+import java.util.Set;
+
+import org.apache.hugegraph.config.ConfigException;
+import org.apache.hugegraph.exception.HugeException;
+import org.apache.hugegraph.util.InsertionOrderUtil;
+import org.lionsoul.jcseg.tokenizer.core.ADictionary;
+import org.lionsoul.jcseg.tokenizer.core.DictionaryFactory;
+import org.lionsoul.jcseg.tokenizer.core.ISegment;
+import org.lionsoul.jcseg.tokenizer.core.IWord;
+import org.lionsoul.jcseg.tokenizer.core.JcsegTaskConfig;
+import org.lionsoul.jcseg.tokenizer.core.SegmentFactory;
+import com.google.common.collect.ImmutableList;
+
+/**
+ * Reference from https://my.oschina.net/apdplat/blog/412921
+ */
+public class JcsegAnalyzer implements Analyzer {
+
+ public static final List SUPPORT_MODES = ImmutableList.of(
+ "Simple",
+ "Complex"
+ );
+
+ private static final JcsegTaskConfig CONFIG = new JcsegTaskConfig();
+ private static final ADictionary DIC =
+ DictionaryFactory.createDefaultDictionary(new JcsegTaskConfig());
+
+ private int segMode;
+
+ public JcsegAnalyzer(String mode) {
+ if (!SUPPORT_MODES.contains(mode)) {
+ throw new ConfigException(
+ "Unsupported segment mode '%s' for jcseg analyzer, " +
+ "the available values are %s", mode, SUPPORT_MODES);
+ }
+ this.segMode = SUPPORT_MODES.indexOf(mode) + 1;
+ }
+
+ @Override
+ public Set segment(String text) {
+ Set result = InsertionOrderUtil.newSet();
+ try {
+ Object[] args = new Object[]{new StringReader(text), CONFIG, DIC};
+ ISegment seg = SegmentFactory.createJcseg(this.segMode, args);
+ IWord word = null;
+ while ((word = seg.next()) != null) {
+ result.add(word.getValue());
+ }
+ } catch (Exception e) {
+ throw new HugeException("Jcseg segment text '%s' failed", e, text);
+ }
+ return result;
+ }
+}
diff --git a/hugegraph-struct/src/main/java/org/apache/hugegraph/analyzer/JiebaAnalyzer.java b/hugegraph-struct/src/main/java/org/apache/hugegraph/analyzer/JiebaAnalyzer.java
new file mode 100644
index 0000000000..70cae33268
--- /dev/null
+++ b/hugegraph-struct/src/main/java/org/apache/hugegraph/analyzer/JiebaAnalyzer.java
@@ -0,0 +1,63 @@
+/*
+ * Copyright 2017 HugeGraph Authors
+ *
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with this
+ * work for additional information regarding copyright ownership. The ASF
+ * licenses this file to You under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+ * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+ * License for the specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.apache.hugegraph.analyzer;
+
+import java.util.List;
+import java.util.Set;
+
+import org.apache.hugegraph.config.ConfigException;
+import org.apache.hugegraph.util.InsertionOrderUtil;
+
+import com.google.common.collect.ImmutableList;
+import com.huaban.analysis.jieba.JiebaSegmenter;
+import com.huaban.analysis.jieba.SegToken;
+
+/**
+ * Reference from https://my.oschina.net/apdplat/blog/412921
+ */
+public class JiebaAnalyzer implements Analyzer {
+
+ public static final List SUPPORT_MODES = ImmutableList.of(
+ "SEARCH",
+ "INDEX"
+ );
+
+ private static final JiebaSegmenter JIEBA_SEGMENTER = new JiebaSegmenter();
+
+ private JiebaSegmenter.SegMode segMode;
+
+ public JiebaAnalyzer(String mode) {
+ if (!SUPPORT_MODES.contains(mode)) {
+ throw new ConfigException(
+ "Unsupported segment mode '%s' for jieba analyzer, " +
+ "the available values are %s", mode, SUPPORT_MODES);
+ }
+ this.segMode = JiebaSegmenter.SegMode.valueOf(mode);
+ }
+
+ @Override
+ public Set segment(String text) {
+ Set result = InsertionOrderUtil.newSet();
+ for (SegToken token : JIEBA_SEGMENTER.process(text, this.segMode)) {
+ result.add(token.word);
+ }
+ return result;
+ }
+}
diff --git a/hugegraph-struct/src/main/java/org/apache/hugegraph/analyzer/MMSeg4JAnalyzer.java b/hugegraph-struct/src/main/java/org/apache/hugegraph/analyzer/MMSeg4JAnalyzer.java
new file mode 100644
index 0000000000..3316582f73
--- /dev/null
+++ b/hugegraph-struct/src/main/java/org/apache/hugegraph/analyzer/MMSeg4JAnalyzer.java
@@ -0,0 +1,92 @@
+/*
+ * Copyright 2017 HugeGraph Authors
+ *
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with this
+ * work for additional information regarding copyright ownership. The ASF
+ * licenses this file to You under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+ * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+ * License for the specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.apache.hugegraph.analyzer;
+
+import java.io.StringReader;
+import java.util.List;
+import java.util.Set;
+
+import org.apache.hugegraph.config.ConfigException;
+import org.apache.hugegraph.util.InsertionOrderUtil;
+
+import org.apache.hugegraph.exception.HugeException;
+import com.chenlb.mmseg4j.ComplexSeg;
+import com.chenlb.mmseg4j.Dictionary;
+import com.chenlb.mmseg4j.MMSeg;
+import com.chenlb.mmseg4j.MaxWordSeg;
+import com.chenlb.mmseg4j.Seg;
+import com.chenlb.mmseg4j.SimpleSeg;
+import com.chenlb.mmseg4j.Word;
+import com.google.common.collect.ImmutableList;
+
+/**
+ * Reference from https://my.oschina.net/apdplat/blog/412921
+ */
+public class MMSeg4JAnalyzer implements Analyzer {
+
+ public static final List SUPPORT_MODES = ImmutableList.of(
+ "Simple",
+ "Complex",
+ "MaxWord"
+ );
+
+ private static final Dictionary DIC = Dictionary.getInstance();
+
+ private Seg seg;
+
+ public MMSeg4JAnalyzer(String mode) {
+ if (!SUPPORT_MODES.contains(mode)) {
+ throw new ConfigException(
+ "Unsupported segment mode '%s' for mmseg4j analyzer, " +
+ "the available values are %s", mode, SUPPORT_MODES);
+ }
+ int index = SUPPORT_MODES.indexOf(mode);
+ switch (index) {
+ case 0:
+ this.seg = new SimpleSeg(DIC);
+ break;
+ case 1:
+ this.seg = new ComplexSeg(DIC);
+ break;
+ case 2:
+ this.seg = new MaxWordSeg(DIC);
+ break;
+ default:
+ throw new AssertionError(String.format(
+ "Unsupported segment mode '%s'", this.seg));
+ }
+ }
+
+ @Override
+ public Set segment(String text) {
+ Set result = InsertionOrderUtil.newSet();
+ MMSeg mmSeg = new MMSeg(new StringReader(text), this.seg);
+ try {
+ Word word = null;
+ while ((word = mmSeg.next()) != null) {
+ result.add(word.getString());
+ }
+ } catch (Exception e) {
+ throw new HugeException("MMSeg4j segment text '%s' failed",
+ e, text);
+ }
+ return result;
+ }
+}
diff --git a/hugegraph-struct/src/main/java/org/apache/hugegraph/analyzer/SmartCNAnalyzer.java b/hugegraph-struct/src/main/java/org/apache/hugegraph/analyzer/SmartCNAnalyzer.java
new file mode 100644
index 0000000000..34c0ea2fba
--- /dev/null
+++ b/hugegraph-struct/src/main/java/org/apache/hugegraph/analyzer/SmartCNAnalyzer.java
@@ -0,0 +1,66 @@
+/*
+ * Copyright 2017 HugeGraph Authors
+ *
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with this
+ * work for additional information regarding copyright ownership. The ASF
+ * licenses this file to You under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+ * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+ * License for the specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.apache.hugegraph.analyzer;
+
+import java.io.Reader;
+import java.io.StringReader;
+import java.util.List;
+import java.util.Set;
+
+import org.apache.hugegraph.util.InsertionOrderUtil;
+import org.apache.lucene.analysis.TokenStream;
+import org.apache.lucene.analysis.cn.smart.SmartChineseAnalyzer;
+import org.apache.lucene.analysis.tokenattributes.CharTermAttribute;
+
+import org.apache.hugegraph.exception.HugeException;
+import com.google.common.collect.ImmutableList;
+
+/**
+ * Reference from https://my.oschina.net/apdplat/blog/412921
+ */
+public class SmartCNAnalyzer implements Analyzer {
+
+ public static final List SUPPORT_MODES = ImmutableList.of();
+
+ private static final SmartChineseAnalyzer ANALYZER =
+ new SmartChineseAnalyzer();
+
+ public SmartCNAnalyzer(String mode) {
+ // pass
+ }
+
+ @Override
+ public Set segment(String text) {
+ Set result = InsertionOrderUtil.newSet();
+ Reader reader = new StringReader(text);
+ try (TokenStream tokenStream = ANALYZER.tokenStream("text", reader)) {
+ tokenStream.reset();
+ CharTermAttribute term = null;
+ while (tokenStream.incrementToken()) {
+ term = tokenStream.getAttribute(CharTermAttribute.class);
+ result.add(term.toString());
+ }
+ } catch (Exception e) {
+ throw new HugeException("SmartCN segment text '%s' failed",
+ e, text);
+ }
+ return result;
+ }
+}
diff --git a/hugegraph-struct/src/main/java/org/apache/hugegraph/analyzer/WordAnalyzer.java b/hugegraph-struct/src/main/java/org/apache/hugegraph/analyzer/WordAnalyzer.java
new file mode 100644
index 0000000000..0a7ebd07fc
--- /dev/null
+++ b/hugegraph-struct/src/main/java/org/apache/hugegraph/analyzer/WordAnalyzer.java
@@ -0,0 +1,74 @@
+/*
+ * Copyright 2017 HugeGraph Authors
+ *
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with this
+ * work for additional information regarding copyright ownership. The ASF
+ * licenses this file to You under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+ * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+ * License for the specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.apache.hugegraph.analyzer;
+
+import java.util.List;
+import java.util.Set;
+
+import org.apache.hugegraph.config.ConfigException;
+import org.apache.hugegraph.util.InsertionOrderUtil;
+import org.apdplat.word.WordSegmenter;
+import org.apdplat.word.segmentation.SegmentationAlgorithm;
+import org.apdplat.word.segmentation.Word;
+
+import com.google.common.collect.ImmutableList;
+
+/**
+ * Reference from https://my.oschina.net/apdplat/blog/412921
+ */
+public class WordAnalyzer implements Analyzer {
+
+ public static final List SUPPORT_MODES =
+ ImmutableList.builder()
+ .add("MaximumMatching")
+ .add("ReverseMaximumMatching")
+ .add("MinimumMatching")
+ .add("ReverseMinimumMatching")
+ .add("BidirectionalMaximumMatching")
+ .add("BidirectionalMinimumMatching")
+ .add("BidirectionalMaximumMinimumMatching")
+ .add("FullSegmentation")
+ .add("MinimalWordCount")
+ .add("MaxNgramScore")
+ .add("PureEnglish")
+ .build();
+
+ private SegmentationAlgorithm algorithm;
+
+ public WordAnalyzer(String mode) {
+ try {
+ this.algorithm = SegmentationAlgorithm.valueOf(mode);
+ } catch (Exception e) {
+ throw new ConfigException(
+ "Unsupported segment mode '%s' for word analyzer, " +
+ "the available values are %s", e, mode, SUPPORT_MODES);
+ }
+ }
+
+ @Override
+ public Set segment(String text) {
+ Set result = InsertionOrderUtil.newSet();
+ List words = WordSegmenter.segWithStopWords(text, this.algorithm);
+ for (Word word : words) {
+ result.add(word.getText());
+ }
+ return result;
+ }
+}
diff --git a/hugegraph-struct/src/main/java/org/apache/hugegraph/auth/AuthConstant.java b/hugegraph-struct/src/main/java/org/apache/hugegraph/auth/AuthConstant.java
new file mode 100644
index 0000000000..97bd1a0e1c
--- /dev/null
+++ b/hugegraph-struct/src/main/java/org/apache/hugegraph/auth/AuthConstant.java
@@ -0,0 +1,30 @@
+/*
+ * Copyright 2017 HugeGraph Authors
+ *
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with this
+ * work for additional information regarding copyright ownership. The ASF
+ * licenses this file to You under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+ * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+ * License for the specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.apache.hugegraph.auth;
+
+public interface AuthConstant {
+
+ /*
+ * Fields in token
+ */
+ String TOKEN_USER_NAME = "user_name";
+ String TOKEN_USER_ID = "user_id";
+ String TOKEN_USER_PASSWORD = "user_password";
+}
diff --git a/hugegraph-struct/src/main/java/org/apache/hugegraph/auth/TokenGenerator.java b/hugegraph-struct/src/main/java/org/apache/hugegraph/auth/TokenGenerator.java
new file mode 100644
index 0000000000..f803894fc2
--- /dev/null
+++ b/hugegraph-struct/src/main/java/org/apache/hugegraph/auth/TokenGenerator.java
@@ -0,0 +1,70 @@
+/*
+ * Copyright 2017 HugeGraph Authors
+ *
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with this
+ * work for additional information regarding copyright ownership. The ASF
+ * licenses this file to You under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+ * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+ * License for the specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.apache.hugegraph.auth;
+
+import org.apache.hugegraph.options.AuthOptions;
+
+import io.jsonwebtoken.*;
+import io.jsonwebtoken.security.Keys;
+import jakarta.ws.rs.NotAuthorizedException;
+
+import org.apache.hugegraph.config.HugeConfig;
+
+import javax.crypto.SecretKey;
+
+import java.nio.charset.StandardCharsets;
+import java.util.Date;
+import java.util.Map;
+
+public class TokenGenerator {
+
+ private final SecretKey key;
+
+ public TokenGenerator(HugeConfig config) {
+ String secretKey = config.get(AuthOptions.AUTH_TOKEN_SECRET);
+ this.key = Keys.hmacShaKeyFor(secretKey.getBytes(StandardCharsets.UTF_8));
+ }
+
+ public TokenGenerator(String secretKey) {
+ this.key = Keys.hmacShaKeyFor(secretKey.getBytes(StandardCharsets.UTF_8));
+ }
+
+ public String create(Map payload, long expire) {
+ return Jwts.builder()
+ .setClaims(payload)
+ .setExpiration(new Date(System.currentTimeMillis() + expire))
+ .signWith(this.key, SignatureAlgorithm.HS256)
+ .compact();
+ }
+
+ public Claims verify(String token) {
+ try {
+ Jws claimsJws = Jwts.parserBuilder()
+ .setSigningKey(key)
+ .build()
+ .parseClaimsJws(token);
+ return claimsJws.getBody();
+ } catch (ExpiredJwtException e) {
+ throw new NotAuthorizedException("The token is expired", e);
+ } catch (JwtException e) {
+ throw new NotAuthorizedException("Invalid token", e);
+ }
+ }
+}
diff --git a/hugegraph-struct/src/main/java/org/apache/hugegraph/backend/BackendColumn.java b/hugegraph-struct/src/main/java/org/apache/hugegraph/backend/BackendColumn.java
new file mode 100644
index 0000000000..342f3ff60e
--- /dev/null
+++ b/hugegraph-struct/src/main/java/org/apache/hugegraph/backend/BackendColumn.java
@@ -0,0 +1,69 @@
+/*
+ * Copyright 2017 HugeGraph Authors
+ *
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with this
+ * work for additional information regarding copyright ownership. The ASF
+ * licenses this file to You under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+ * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+ * License for the specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.apache.hugegraph.backend;
+
+import java.util.Arrays;
+
+import org.apache.hugegraph.util.Bytes;
+
+import org.apache.hugegraph.util.StringEncoding;
+
+public class BackendColumn implements Comparable {
+
+ public byte[] name;
+ public byte[] value;
+
+ public static BackendColumn of(byte[] name, byte[] value) {
+ BackendColumn col = new BackendColumn();
+ col.name = name;
+ col.value = value;
+ return col;
+ }
+
+ @Override
+ public String toString() {
+ return String.format("%s=%s",
+ StringEncoding.decode(name),
+ StringEncoding.decode(value));
+ }
+
+ @Override
+ public int compareTo(BackendColumn other) {
+ if (other == null) {
+ return 1;
+ }
+ return Bytes.compare(this.name, other.name);
+ }
+
+ @Override
+ public boolean equals(Object obj) {
+ if (!(obj instanceof BackendColumn)) {
+ return false;
+ }
+ BackendColumn other = (BackendColumn) obj;
+ return Bytes.equals(this.name, other.name) &&
+ Bytes.equals(this.value, other.value);
+ }
+
+ @Override
+ public int hashCode() {
+ return Arrays.hashCode(this.name) | Arrays.hashCode(this.value);
+ }
+}
diff --git a/hugegraph-struct/src/main/java/org/apache/hugegraph/backend/BinaryId.java b/hugegraph-struct/src/main/java/org/apache/hugegraph/backend/BinaryId.java
new file mode 100644
index 0000000000..685a934fd7
--- /dev/null
+++ b/hugegraph-struct/src/main/java/org/apache/hugegraph/backend/BinaryId.java
@@ -0,0 +1,103 @@
+/*
+ * Copyright 2017 HugeGraph Authors
+ *
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with this
+ * work for additional information regarding copyright ownership. The ASF
+ * licenses this file to You under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+ * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+ * License for the specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.apache.hugegraph.backend;
+
+import org.apache.hugegraph.id.Id;
+
+import org.apache.hugegraph.util.Bytes;
+import org.apache.hugegraph.util.E;
+
+import java.nio.ByteBuffer;
+import java.util.Arrays;
+
+public final class BinaryId implements Id {
+
+ private final byte[] bytes;
+ private final Id id;
+
+ public BinaryId(byte[] bytes, Id id) {
+ this.bytes = bytes;
+ this.id = id;
+ }
+
+ public Id origin() {
+ return this.id;
+ }
+
+ @Override
+ public IdType type() {
+ return IdType.UNKNOWN;
+ }
+
+ @Override
+ public Object asObject() {
+ return ByteBuffer.wrap(this.bytes);
+ }
+
+ @Override
+ public String asString() {
+ throw new UnsupportedOperationException();
+ }
+
+ @Override
+ public long asLong() {
+ throw new UnsupportedOperationException();
+ }
+
+ @Override
+ public int compareTo(Id other) {
+ return Bytes.compare(this.bytes, other.asBytes());
+ }
+
+ @Override
+ public byte[] asBytes() {
+ return this.bytes;
+ }
+
+ public byte[] asBytes(int offset) {
+ E.checkArgument(offset < this.bytes.length,
+ "Invalid offset %s, must be < length %s",
+ offset, this.bytes.length);
+ return Arrays.copyOfRange(this.bytes, offset, this.bytes.length);
+ }
+
+ @Override
+ public int length() {
+ return this.bytes.length;
+ }
+
+ @Override
+ public int hashCode() {
+ return ByteBuffer.wrap(this.bytes).hashCode();
+ }
+
+ @Override
+ public boolean equals(Object other) {
+ if (!(other instanceof BinaryId)) {
+ return false;
+ }
+ return Arrays.equals(this.bytes, ((BinaryId) other).bytes);
+ }
+
+ @Override
+ public String toString() {
+ return "0x" + Bytes.toHex(this.bytes);
+ }
+}
diff --git a/hugegraph-struct/src/main/java/org/apache/hugegraph/backend/Shard.java b/hugegraph-struct/src/main/java/org/apache/hugegraph/backend/Shard.java
new file mode 100644
index 0000000000..7d69166c63
--- /dev/null
+++ b/hugegraph-struct/src/main/java/org/apache/hugegraph/backend/Shard.java
@@ -0,0 +1,71 @@
+/*
+ * Copyright 2017 HugeGraph Authors
+ *
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with this
+ * work for additional information regarding copyright ownership. The ASF
+ * licenses this file to You under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+ * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+ * License for the specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.apache.hugegraph.backend;
+
+/**
+ * Shard is used for backend storage (like cassandra, hbase) scanning
+ * operations. Each shard represents a range of tokens for a node.
+ * Reading data from a given shard does not cross multiple nodes.
+ */
+public class Shard {
+
+ // token range start
+ private String start;
+ // token range end
+ private String end;
+ // partitions count in this range
+ private long length;
+
+ public Shard(String start, String end, long length) {
+ this.start = start;
+ this.end = end;
+ this.length = length;
+ }
+
+ public String start() {
+ return this.start;
+ }
+
+ public void start(String start) {
+ this.start = start;
+ }
+
+ public String end() {
+ return this.end;
+ }
+
+ public void end(String end) {
+ this.end = end;
+ }
+
+ public long length() {
+ return this.length;
+ }
+
+ public void length(long length) {
+ this.length = length;
+ }
+
+ @Override
+ public String toString() {
+ return String.format("Shard{start=%s, end=%s, length=%s}",
+ this.start, this.end, this.length);
+ }
+}
diff --git a/hugegraph-struct/src/main/java/org/apache/hugegraph/exception/BackendException.java b/hugegraph-struct/src/main/java/org/apache/hugegraph/exception/BackendException.java
new file mode 100644
index 0000000000..3fffd5ea10
--- /dev/null
+++ b/hugegraph-struct/src/main/java/org/apache/hugegraph/exception/BackendException.java
@@ -0,0 +1,53 @@
+/*
+ * Copyright 2017 HugeGraph Authors
+ *
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with this
+ * work for additional information regarding copyright ownership. The ASF
+ * licenses this file to You under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+ * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+ * License for the specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.apache.hugegraph.exception;
+
+public class BackendException extends HugeException {
+
+ private static final long serialVersionUID = -1947589125372576298L;
+
+ public BackendException(String message) {
+ super(message);
+ }
+
+ public BackendException(String message, Throwable cause) {
+ super(message, cause);
+ }
+
+ public BackendException(String message, Object... args) {
+ super(message, args);
+ }
+
+ public BackendException(String message, Throwable cause, Object... args) {
+ super(message, cause, args);
+ }
+
+ public BackendException(Throwable cause) {
+ this("Exception in backend", cause);
+ }
+
+ public static final void check(boolean expression,
+ String message, Object... args)
+ throws BackendException {
+ if (!expression) {
+ throw new BackendException(message, args);
+ }
+ }
+}
diff --git a/hugegraph-struct/src/main/java/org/apache/hugegraph/exception/ErrorCodeProvider.java b/hugegraph-struct/src/main/java/org/apache/hugegraph/exception/ErrorCodeProvider.java
new file mode 100644
index 0000000000..d5034b703a
--- /dev/null
+++ b/hugegraph-struct/src/main/java/org/apache/hugegraph/exception/ErrorCodeProvider.java
@@ -0,0 +1,27 @@
+/*
+ * Copyright 2017 HugeGraph Authors
+ *
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with this
+ * work for additional information regarding copyright ownership. The ASF
+ * licenses this file to You under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+ * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+ * License for the specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.apache.hugegraph.exception;
+
+public interface ErrorCodeProvider {
+
+ public String format(Object... args);
+
+ public String with(String message);
+}
diff --git a/hugegraph-struct/src/main/java/org/apache/hugegraph/exception/HugeException.java b/hugegraph-struct/src/main/java/org/apache/hugegraph/exception/HugeException.java
new file mode 100644
index 0000000000..b7d8a45882
--- /dev/null
+++ b/hugegraph-struct/src/main/java/org/apache/hugegraph/exception/HugeException.java
@@ -0,0 +1,70 @@
+/*
+ * Copyright 2017 HugeGraph Authors
+ *
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with this
+ * work for additional information regarding copyright ownership. The ASF
+ * licenses this file to You under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+ * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+ * License for the specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.apache.hugegraph.exception;
+
+public class HugeException extends RuntimeException {
+
+ private static final long serialVersionUID = -8711375282196157058L;
+
+ public HugeException(String message) {
+ super(message);
+ }
+
+ public HugeException(ErrorCodeProvider code, String message) {
+ super(code.with(message));
+ }
+
+ public HugeException(String message, Throwable cause) {
+ super(message, cause);
+ }
+
+ public HugeException(ErrorCodeProvider code, String message, Throwable cause) {
+ super(code.with(message), cause);
+ }
+
+ public HugeException(String message, Object... args) {
+ super(String.format(message, args));
+ }
+
+ public HugeException(ErrorCodeProvider code, Object... args) {
+ super(code.format(args));
+ }
+
+ public HugeException(String message, Throwable cause, Object... args) {
+ super(String.format(message, args), cause);
+ }
+
+ public HugeException(ErrorCodeProvider code, Throwable cause, Object... args) {
+ super(code.format(args), cause);
+ }
+
+ public Throwable rootCause() {
+ return rootCause(this);
+ }
+
+ public static Throwable rootCause(Throwable e) {
+ Throwable cause = e;
+ while (cause.getCause() != null) {
+ cause = cause.getCause();
+ }
+ return cause;
+ }
+
+}
diff --git a/hugegraph-struct/src/main/java/org/apache/hugegraph/exception/LimitExceedException.java b/hugegraph-struct/src/main/java/org/apache/hugegraph/exception/LimitExceedException.java
new file mode 100644
index 0000000000..10652dca2c
--- /dev/null
+++ b/hugegraph-struct/src/main/java/org/apache/hugegraph/exception/LimitExceedException.java
@@ -0,0 +1,33 @@
+/*
+ * Copyright 2017 HugeGraph Authors
+ *
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with this
+ * work for additional information regarding copyright ownership. The ASF
+ * licenses this file to You under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+ * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+ * License for the specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.apache.hugegraph.exception;
+
+public class LimitExceedException extends HugeException {
+
+ private static final long serialVersionUID = 7384276720045597709L;
+
+ public LimitExceedException(String message) {
+ super(message);
+ }
+
+ public LimitExceedException(String message, Object... args) {
+ super(message, args);
+ }
+}
diff --git a/hugegraph-struct/src/main/java/org/apache/hugegraph/exception/NotAllowException.java b/hugegraph-struct/src/main/java/org/apache/hugegraph/exception/NotAllowException.java
new file mode 100644
index 0000000000..3781b6d482
--- /dev/null
+++ b/hugegraph-struct/src/main/java/org/apache/hugegraph/exception/NotAllowException.java
@@ -0,0 +1,33 @@
+/*
+ * Copyright 2017 HugeGraph Authors
+ *
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with this
+ * work for additional information regarding copyright ownership. The ASF
+ * licenses this file to You under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+ * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+ * License for the specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.apache.hugegraph.exception;
+
+public class NotAllowException extends HugeException {
+
+ private static final long serialVersionUID = -1407924451828873200L;
+
+ public NotAllowException(String message) {
+ super(message);
+ }
+
+ public NotAllowException(String message, Object... args) {
+ super(message, args);
+ }
+}
diff --git a/hugegraph-struct/src/main/java/org/apache/hugegraph/exception/NotFoundException.java b/hugegraph-struct/src/main/java/org/apache/hugegraph/exception/NotFoundException.java
new file mode 100644
index 0000000000..8567ceb018
--- /dev/null
+++ b/hugegraph-struct/src/main/java/org/apache/hugegraph/exception/NotFoundException.java
@@ -0,0 +1,37 @@
+/*
+ * Copyright 2017 HugeGraph Authors
+ *
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with this
+ * work for additional information regarding copyright ownership. The ASF
+ * licenses this file to You under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+ * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+ * License for the specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.apache.hugegraph.exception;
+
+public class NotFoundException extends HugeException {
+
+ private static final long serialVersionUID = -5912665926327173032L;
+
+ public NotFoundException(String message) {
+ super(message);
+ }
+
+ public NotFoundException(String message, Object... args) {
+ super(message, args);
+ }
+
+ public NotFoundException(String message, Throwable cause, Object... args) {
+ super(message, cause, args);
+ }
+}
diff --git a/hugegraph-struct/src/main/java/org/apache/hugegraph/exception/NotSupportException.java b/hugegraph-struct/src/main/java/org/apache/hugegraph/exception/NotSupportException.java
new file mode 100644
index 0000000000..49d3dad49c
--- /dev/null
+++ b/hugegraph-struct/src/main/java/org/apache/hugegraph/exception/NotSupportException.java
@@ -0,0 +1,34 @@
+/*
+ * Copyright 2017 HugeGraph Authors
+ *
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with this
+ * work for additional information regarding copyright ownership. The ASF
+ * licenses this file to You under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+ * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+ * License for the specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.apache.hugegraph.exception;
+
+public class NotSupportException extends HugeException {
+
+ private static final long serialVersionUID = -2914329541122906234L;
+ private static final String PREFIX = "Not support ";
+
+ public NotSupportException(String message) {
+ super(PREFIX + message);
+ }
+
+ public NotSupportException(String message, Object... args) {
+ super(PREFIX + message, args);
+ }
+}
diff --git a/hugegraph-struct/src/main/java/org/apache/hugegraph/id/EdgeId.java b/hugegraph-struct/src/main/java/org/apache/hugegraph/id/EdgeId.java
new file mode 100644
index 0000000000..2b03e97d33
--- /dev/null
+++ b/hugegraph-struct/src/main/java/org/apache/hugegraph/id/EdgeId.java
@@ -0,0 +1,350 @@
+/*
+ * Copyright 2017 HugeGraph Authors
+ *
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with this
+ * work for additional information regarding copyright ownership. The ASF
+ * licenses this file to You under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+ * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+ * License for the specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.apache.hugegraph.id;
+
+import org.apache.hugegraph.perf.PerfUtil.Watched;
+import org.apache.hugegraph.testutil.Assert;
+import org.apache.hugegraph.util.E;
+
+import org.apache.hugegraph.exception.NotFoundException;
+import org.apache.hugegraph.type.HugeType;
+import org.apache.hugegraph.type.define.Directions;
+import org.apache.hugegraph.type.define.HugeKeys;
+import org.apache.hugegraph.util.StringEncoding;
+
+/**
+ * Class used to format and parse id of edge, the edge id consists of:
+ * EdgeId = { source-vertex-id > direction > parentEdgeLabelId > subEdgeLabelId
+ * >sortKeys > target-vertex-id }
+ * NOTE:
+ * 1. for edges with edgeLabel-type=NORMAL,edgelabelId=parentEdgeLabelId=subEdgeLabelId,
+ * for edges with edgeLabel type=PARENT,edgelabelId = subEdgeLabelId ,
+ * parentEdgeLabelId = edgelabelId.fatherId
+ *
+ * 2.if we use `entry.type()` which is IN or OUT as a part of id,
+ * an edge's id will be different due to different directions (belongs
+ * to 2 owner vertex)
+ */
+public class EdgeId implements Id {
+
+ public static final HugeKeys[] KEYS = new HugeKeys[] {
+ HugeKeys.OWNER_VERTEX,
+ HugeKeys.DIRECTION,
+ HugeKeys.LABEL,
+ HugeKeys.SUB_LABEL,
+ HugeKeys.SORT_VALUES,
+ HugeKeys.OTHER_VERTEX
+ };
+
+ private final Id ownerVertexId;
+ private final Directions direction;
+ private final Id edgeLabelId;
+ private final Id subLabelId;
+ private final String sortValues;
+ private final Id otherVertexId;
+
+ private final boolean directed;
+ private String cache;
+
+
+ public EdgeId(Id ownerVertexId, Directions direction, Id edgeLabelId,
+ Id subLabelId, String sortValues,
+ Id otherVertexId) {
+ this(ownerVertexId, direction, edgeLabelId,
+ subLabelId, sortValues, otherVertexId, false);
+ }
+
+ public EdgeId(Id ownerVertexId, Directions direction, Id edgeLabelId,
+ Id subLabelId, String sortValues,
+ Id otherVertexId, boolean directed) {
+ this.ownerVertexId = ownerVertexId;
+ this.direction = direction;
+ this.edgeLabelId = edgeLabelId;
+ this.sortValues = sortValues;
+ this.subLabelId = subLabelId;
+ this.otherVertexId = otherVertexId;
+ this.directed = directed;
+ this.cache = null;
+ }
+
+ @Watched
+ public EdgeId switchDirection() {
+ Directions direction = this.direction.opposite();
+ return new EdgeId(this.otherVertexId, direction, this.edgeLabelId,
+ this.subLabelId, this.sortValues, this.ownerVertexId,
+ this.directed);
+ }
+
+ public EdgeId directed(boolean directed) {
+ return new EdgeId(this.ownerVertexId, this.direction, this.edgeLabelId,
+ this.subLabelId, this.sortValues, this.otherVertexId, directed);
+ }
+
+ private Id sourceVertexId() {
+ return this.direction == Directions.OUT ?
+ this.ownerVertexId :
+ this.otherVertexId;
+ }
+
+ private Id targetVertexId() {
+ return this.direction == Directions.OUT ?
+ this.otherVertexId :
+ this.ownerVertexId;
+ }
+
+ public Id subLabelId(){
+ return this.subLabelId;
+ }
+
+ public Id ownerVertexId() {
+ return this.ownerVertexId;
+ }
+
+ public Id edgeLabelId() {
+ return this.edgeLabelId;
+ }
+
+ public Directions direction() {
+ return this.direction;
+ }
+
+ public byte directionCode() {
+ return directionToCode(this.direction);
+ }
+
+ public String sortValues() {
+ return this.sortValues;
+ }
+
+ public Id otherVertexId() {
+ return this.otherVertexId;
+ }
+
+ @Override
+ public Object asObject() {
+ return this.asString();
+ }
+
+ @Override
+ public String asString() {
+ if (this.cache != null) {
+ return this.cache;
+ }
+ if (this.directed) {
+ this.cache = SplicingIdGenerator.concat(
+ IdUtil.writeString(this.ownerVertexId),
+ this.direction.type().string(),
+ IdUtil.writeLong(this.edgeLabelId),
+ IdUtil.writeLong(this.subLabelId),
+ this.sortValues,
+ IdUtil.writeString(this.otherVertexId));
+ } else {
+ this.cache = SplicingIdGenerator.concat(
+ IdUtil.writeString(this.sourceVertexId()),
+ IdUtil.writeLong(this.edgeLabelId),
+ IdUtil.writeLong(this.subLabelId),
+ this.sortValues,
+ IdUtil.writeString(this.targetVertexId()));
+ }
+ return this.cache;
+ }
+
+ @Override
+ public long asLong() {
+ throw new UnsupportedOperationException();
+ }
+
+ @Override
+ public byte[] asBytes() {
+ return StringEncoding.encode(this.asString());
+ }
+
+ @Override
+ public int length() {
+ return this.asString().length();
+ }
+
+ @Override
+ public IdType type() {
+ return IdType.EDGE;
+ }
+
+ @Override
+ public int compareTo(Id other) {
+ return this.asString().compareTo(other.asString());
+ }
+
+ @Override
+ public int hashCode() {
+ if (this.directed) {
+ return this.ownerVertexId.hashCode() ^
+ this.direction.hashCode() ^
+ this.edgeLabelId.hashCode() ^
+ this.subLabelId.hashCode() ^
+ this.sortValues.hashCode() ^
+ this.otherVertexId.hashCode();
+ } else {
+ return this.sourceVertexId().hashCode() ^
+ this.edgeLabelId.hashCode() ^
+ this.subLabelId.hashCode() ^
+ this.sortValues.hashCode() ^
+ this.targetVertexId().hashCode();
+ }
+ }
+
+ @Override
+ public boolean equals(Object object) {
+ if (!(object instanceof EdgeId)) {
+ return false;
+ }
+ EdgeId other = (EdgeId) object;
+ if (this.directed) {
+ return this.ownerVertexId.equals(other.ownerVertexId) &&
+ this.direction == other.direction &&
+ this.edgeLabelId.equals(other.edgeLabelId) &&
+ this.sortValues.equals(other.sortValues) &&
+ this.subLabelId.equals(other.subLabelId) &&
+ this.otherVertexId.equals(other.otherVertexId);
+ } else {
+ return this.sourceVertexId().equals(other.sourceVertexId()) &&
+ this.edgeLabelId.equals(other.edgeLabelId) &&
+ this.sortValues.equals(other.sortValues) &&
+ this.subLabelId.equals(other.subLabelId) &&
+ this.targetVertexId().equals(other.targetVertexId());
+ }
+ }
+
+ @Override
+ public String toString() {
+ return this.asString();
+ }
+
+ public static byte directionToCode(Directions direction) {
+ return direction.type().code();
+ }
+
+ public static Directions directionFromCode(byte code) {
+ return (code == HugeType.EDGE_OUT.code()) ? Directions.OUT : Directions.IN;
+ }
+
+ public static boolean isOutDirectionFromCode(byte code) {
+ return code == HugeType.EDGE_OUT.code();
+ }
+
+ public static EdgeId parse(String id) throws NotFoundException {
+ return parse(id, false);
+ }
+
+ public static EdgeId parse(String id, boolean returnNullIfError)
+ throws NotFoundException {
+ String[] idParts = SplicingIdGenerator.split(id);
+ if (!(idParts.length == 5 || idParts.length == 6)) {
+ if (returnNullIfError) {
+ return null;
+ }
+ throw new NotFoundException("Edge id must be formatted as 5~6 " +
+ "parts, but got %s parts: '%s'",
+ idParts.length, id);
+ }
+ try {
+ if (idParts.length == 5) {
+ Id ownerVertexId = IdUtil.readString(idParts[0]);
+ Id edgeLabelId = IdUtil.readLong(idParts[1]);
+ Id subLabelId = IdUtil.readLong(idParts[2]);
+ String sortValues = idParts[3];
+ Id otherVertexId = IdUtil.readString(idParts[4]);
+ return new EdgeId(ownerVertexId, Directions.OUT, edgeLabelId,
+ subLabelId, sortValues, otherVertexId);
+ } else {
+ assert idParts.length == 6;
+ Id ownerVertexId = IdUtil.readString(idParts[0]);
+ HugeType direction = HugeType.fromString(idParts[1]);
+ Id edgeLabelId = IdUtil.readLong(idParts[2]);
+ Id subLabelId = IdUtil.readLong(idParts[3]);
+ String sortValues = idParts[4];
+ Id otherVertexId = IdUtil.readString(idParts[5]);
+ return new EdgeId(ownerVertexId, Directions.convert(direction),
+ edgeLabelId, subLabelId,
+ sortValues, otherVertexId);
+ }
+ } catch (Throwable e) {
+ if (returnNullIfError) {
+ return null;
+ }
+ throw new NotFoundException("Invalid format of edge id '%s'",
+ e, id);
+ }
+ }
+
+ public static Id parseStoredString(String id) {
+ String[] idParts = split(id);
+ E.checkArgument(idParts.length == 5, "Invalid id format: %s", id);
+ Id ownerVertexId = IdUtil.readStoredString(idParts[0]);
+ Id edgeLabelId = IdGenerator.ofStoredString(idParts[1], IdType.LONG);
+ Id subLabelId = IdGenerator.ofStoredString(idParts[2], IdType.LONG);
+ String sortValues = idParts[3];
+ Id otherVertexId = IdUtil.readStoredString(idParts[4]);
+ return new EdgeId(ownerVertexId, Directions.OUT, edgeLabelId,
+ subLabelId, sortValues, otherVertexId);
+ }
+
+ public static String asStoredString(Id id) {
+ EdgeId eid = (EdgeId) id;
+ return SplicingIdGenerator.concat(
+ IdUtil.writeStoredString(eid.sourceVertexId()),
+ IdGenerator.asStoredString(eid.edgeLabelId()),
+ IdGenerator.asStoredString(eid.subLabelId()),
+ eid.sortValues(),
+ IdUtil.writeStoredString(eid.targetVertexId()));
+ }
+
+ public static String concat(String... ids) {
+ return SplicingIdGenerator.concat(ids);
+ }
+
+ public static String[] split(Id id) {
+ return EdgeId.split(id.asString());
+ }
+
+ public static String[] split(String id) {
+ return SplicingIdGenerator.split(id);
+ }
+
+
+ public static void main(String[] args) {
+ EdgeId edgeId1 = new EdgeId(IdGenerator.of("1:marko"), Directions.OUT,
+ IdGenerator.of(1),
+ IdGenerator.of(1), "",
+ IdGenerator.of("1:josh"));
+ EdgeId edgeId2 = new EdgeId(IdGenerator.of("1:marko"), Directions.OUT,
+ IdGenerator.of(1),
+ IdGenerator.of(1), "",
+ IdGenerator.of("1:josh"));
+ EdgeId edgeId3 = new EdgeId(IdGenerator.of("1:josh"), Directions.IN,
+ IdGenerator.of(1),
+ IdGenerator.of(1), "",
+ IdGenerator.of("1:marko"));
+ Assert.assertTrue(edgeId1.equals(edgeId2));
+ Assert.assertTrue(edgeId2.equals(edgeId1));
+ Assert.assertTrue(edgeId1.equals(edgeId3));
+ Assert.assertTrue(edgeId3.equals(edgeId1));
+ }
+
+}
diff --git a/hugegraph-struct/src/main/java/org/apache/hugegraph/id/Id.java b/hugegraph-struct/src/main/java/org/apache/hugegraph/id/Id.java
new file mode 100644
index 0000000000..aeb7810a9d
--- /dev/null
+++ b/hugegraph-struct/src/main/java/org/apache/hugegraph/id/Id.java
@@ -0,0 +1,90 @@
+/*
+ * Copyright 2017 HugeGraph Authors
+ *
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with this
+ * work for additional information regarding copyright ownership. The ASF
+ * licenses this file to You under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+ * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+ * License for the specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.apache.hugegraph.id;
+
+import java.io.Serializable;
+
+import org.apache.hugegraph.util.E;
+
+public interface Id extends Comparable, Serializable {
+
+ public static final int UUID_LENGTH = 16;
+
+ public Object asObject();
+
+ public String asString();
+
+ public long asLong();
+
+ public byte[] asBytes();
+
+ public int length();
+
+ public IdType type();
+
+ public default boolean number() {
+ return this.type() == IdType.LONG;
+ }
+
+ public default boolean uuid() {
+ return this.type() == IdType.UUID;
+ }
+
+ public default boolean string() {
+ return this.type() == IdType.STRING;
+ }
+
+ public default boolean edge() {
+ return this.type() == IdType.EDGE;
+ }
+
+ public enum IdType {
+
+ UNKNOWN,
+ LONG,
+ UUID,
+ STRING,
+ EDGE;
+
+ public char prefix() {
+ if (this == UNKNOWN) {
+ return 'N';
+ }
+ return this.name().charAt(0);
+ }
+
+ public static IdType valueOfPrefix(String id) {
+ E.checkArgument(id != null && id.length() > 0,
+ "Invalid id '%s'", id);
+ switch (id.charAt(0)) {
+ case 'L':
+ return IdType.LONG;
+ case 'U':
+ return IdType.UUID;
+ case 'S':
+ return IdType.STRING;
+ case 'E':
+ return IdType.EDGE;
+ default:
+ return IdType.UNKNOWN;
+ }
+ }
+ }
+}
diff --git a/hugegraph-struct/src/main/java/org/apache/hugegraph/id/IdGenerator.java b/hugegraph-struct/src/main/java/org/apache/hugegraph/id/IdGenerator.java
new file mode 100644
index 0000000000..b6687262db
--- /dev/null
+++ b/hugegraph-struct/src/main/java/org/apache/hugegraph/id/IdGenerator.java
@@ -0,0 +1,465 @@
+/*
+ * Copyright 2017 HugeGraph Authors
+ *
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with this
+ * work for additional information regarding copyright ownership. The ASF
+ * licenses this file to You under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+ * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+ * License for the specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.apache.hugegraph.id;
+
+import org.apache.hugegraph.serializer.BytesBuffer;
+import org.apache.hugegraph.structure.BaseVertex;
+import org.apache.hugegraph.util.StringEncoding;
+import com.google.common.primitives.Longs;
+
+import org.apache.hugegraph.util.E;
+import org.apache.hugegraph.util.LongEncoding;
+import org.apache.hugegraph.util.NumericUtil;
+
+import java.nio.charset.Charset;
+import java.util.Objects;
+import java.util.UUID;
+
+public abstract class IdGenerator {
+
+ public static final Id ZERO = IdGenerator.of(0L);
+
+ public abstract Id generate(BaseVertex vertex);
+
+ public final static Id of(String id) {
+ return new StringId(id);
+ }
+
+ public final static Id of(UUID id) {
+ return new UuidId(id);
+ }
+
+ public final static Id of(String id, boolean uuid) {
+ return uuid ? new UuidId(id) : new StringId(id);
+ }
+
+ public final static Id of(long id) {
+ return new LongId(id);
+ }
+
+ public static Id of(Object id) {
+ if (id instanceof Id) {
+ return (Id) id;
+ } else if (id instanceof String) {
+ return of((String) id);
+ } else if (id instanceof Number) {
+ return of(((Number) id).longValue());
+ } else if (id instanceof UUID) {
+ return of((UUID) id);
+ }
+ return new ObjectId(id);
+ }
+
+ public final static Id of(byte[] bytes, Id.IdType type) {
+ switch (type) {
+ case LONG:
+ return new LongId(bytes);
+ case UUID:
+ return new UuidId(bytes);
+ case STRING:
+ return new StringId(bytes);
+ default:
+ throw new AssertionError("Invalid id type " + type);
+ }
+ }
+
+ public final static Id ofStoredString(String id, Id.IdType type) {
+ switch (type) {
+ case LONG:
+ return of(LongEncoding.decodeSignedB64(id));
+ case UUID:
+ byte[] bytes = StringEncoding.decodeBase64(id);
+ return of(bytes, Id.IdType.UUID);
+ case STRING:
+ return of(id);
+ default:
+ throw new AssertionError("Invalid id type " + type);
+ }
+ }
+
+ public final static String asStoredString(Id id) {
+ switch (id.type()) {
+ case LONG:
+ return LongEncoding.encodeSignedB64(id.asLong());
+ case UUID:
+ return StringEncoding.encodeBase64(id.asBytes());
+ case STRING:
+ return id.asString();
+ default:
+ throw new AssertionError("Invalid id type " + id.type());
+ }
+ }
+
+ public final static Id.IdType idType(Id id) {
+ if (id instanceof LongId) {
+ return Id.IdType.LONG;
+ }
+ if (id instanceof UuidId) {
+ return Id.IdType.UUID;
+ }
+ if (id instanceof StringId) {
+ return Id.IdType.STRING;
+ }
+ if (id instanceof EdgeId) {
+ return Id.IdType.EDGE;
+ }
+ return Id.IdType.UNKNOWN;
+ }
+
+ private final static int compareType(Id id1, Id id2) {
+ return idType(id1).ordinal() - idType(id2).ordinal();
+ }
+
+ /****************************** id defines ******************************/
+
+ public static final class StringId implements Id {
+
+ private final String id;
+ private static final Charset CHARSET = Charset.forName("UTF-8");
+
+ public StringId(String id) {
+ E.checkArgument(!id.isEmpty(), "The id can't be empty");
+ this.id = id;
+ }
+
+ public StringId(byte[] bytes) {
+ this.id = StringEncoding.decode(bytes);
+ }
+
+ @Override
+ public IdType type() {
+ return IdType.STRING;
+ }
+
+ @Override
+ public Object asObject() {
+ return this.id;
+ }
+
+ @Override
+ public String asString() {
+ return this.id;
+ }
+
+ @Override
+ public long asLong() {
+ return Long.parseLong(this.id);
+ }
+
+ @Override
+ public byte[] asBytes() {
+ return this.id.getBytes(CHARSET);
+ }
+
+ @Override
+ public int length() {
+ return this.id.length();
+ }
+
+ @Override
+ public int compareTo(Id other) {
+ int cmp = compareType(this, other);
+ if (cmp != 0) {
+ return cmp;
+ }
+ return this.id.compareTo(other.asString());
+ }
+
+ @Override
+ public int hashCode() {
+ return this.id.hashCode();
+ }
+
+ @Override
+ public boolean equals(Object other) {
+ if (!(other instanceof StringId)) {
+ return false;
+ }
+ return this.id.equals(((StringId) other).id);
+ }
+
+ @Override
+ public String toString() {
+ return this.id;
+ }
+ }
+
+ public static final class LongId extends Number implements Id {
+
+ private static final long serialVersionUID = -7732461469037400190L;
+
+ private final long id;
+
+ public LongId(long id) {
+ this.id = id;
+ }
+
+ public LongId(byte[] bytes) {
+ this.id = NumericUtil.bytesToLong(bytes);
+ }
+
+ @Override
+ public IdType type() {
+ return IdType.LONG;
+ }
+
+ @Override
+ public Object asObject() {
+ return this.id;
+ }
+
+ @Override
+ public String asString() {
+ // TODO: encode with base64
+ return Long.toString(this.id);
+ }
+
+ @Override
+ public long asLong() {
+ return this.id;
+ }
+
+ @Override
+ public byte[] asBytes() {
+ return Longs.toByteArray(this.id);
+ // return NumericUtil.longToBytes(this.id);
+ }
+
+ @Override
+ public int length() {
+ return Long.BYTES;
+ }
+
+ @Override
+ public int compareTo(Id other) {
+ int cmp = compareType(this, other);
+ if (cmp != 0) {
+ return cmp;
+ }
+ return Long.compare(this.id, other.asLong());
+ }
+
+ @Override
+ public int hashCode() {
+ return Long.hashCode(this.id);
+ }
+
+ @Override
+ public boolean equals(Object other) {
+ if (!(other instanceof Number)) {
+ if (idDigitalObject(other)) {
+ return this.id == (long) Double.parseDouble(other.toString());
+ }
+ return false;
+ }
+ return this.id == ((Number) other).longValue();
+ }
+
+ private static boolean idDigitalObject(Object object) {
+ String string = object.toString();
+ for (int i = string.length(); --i >= 0; ) {
+ char c = string.charAt(i);
+ if (!Character.isDigit(c) &&
+ '.' != c) {
+ return false;
+ }
+ }
+ return true;
+ }
+
+ @Override
+ public String toString() {
+ return String.valueOf(this.id);
+ }
+
+ @Override
+ public int intValue() {
+ return (int) this.id;
+ }
+
+ @Override
+ public long longValue() {
+ return this.id;
+ }
+
+ @Override
+ public float floatValue() {
+ return this.id;
+ }
+
+ @Override
+ public double doubleValue() {
+ return this.id;
+ }
+ }
+
+ public static final class UuidId implements Id {
+
+ private final UUID uuid;
+
+ public UuidId(String string) {
+ this(StringEncoding.uuid(string));
+ }
+
+ public UuidId(byte[] bytes) {
+ this(fromBytes(bytes));
+ }
+
+ public UuidId(UUID uuid) {
+ E.checkArgument(uuid != null, "The uuid can't be null");
+ this.uuid = uuid;
+ }
+
+ @Override
+ public IdType type() {
+ return IdType.UUID;
+ }
+
+ @Override
+ public Object asObject() {
+ return this.uuid;
+ }
+
+ @Override
+ public String asString() {
+ return this.uuid.toString();
+ }
+
+ @Override
+ public long asLong() {
+ throw new UnsupportedOperationException();
+ }
+
+ @Override
+ public byte[] asBytes() {
+ BytesBuffer buffer = BytesBuffer.allocate(16);
+ buffer.writeLong(this.uuid.getMostSignificantBits());
+ buffer.writeLong(this.uuid.getLeastSignificantBits());
+ return buffer.bytes();
+ }
+
+ private static UUID fromBytes(byte[] bytes) {
+ E.checkArgument(bytes != null, "The UUID can't be null");
+ BytesBuffer buffer = BytesBuffer.wrap(bytes);
+ long high = buffer.readLong();
+ long low = buffer.readLong();
+ return new UUID(high, low);
+ }
+
+ @Override
+ public int length() {
+ return UUID_LENGTH;
+ }
+
+ @Override
+ public int compareTo(Id other) {
+ E.checkNotNull(other, "compare id");
+ int cmp = compareType(this, other);
+ if (cmp != 0) {
+ return cmp;
+ }
+ return this.uuid.compareTo(((UuidId) other).uuid);
+ }
+
+ @Override
+ public int hashCode() {
+ return this.uuid.hashCode();
+ }
+
+ @Override
+ public boolean equals(Object other) {
+ if (!(other instanceof UuidId)) {
+ return false;
+ }
+ return this.uuid.equals(((UuidId) other).uuid);
+ }
+
+ @Override
+ public String toString() {
+ return this.uuid.toString();
+ }
+ }
+
+ /**
+ * This class is just used by backend store for wrapper object as Id
+ */
+ public static final class ObjectId implements Id {
+
+ private final Object object;
+
+ public ObjectId(Object object) {
+ E.checkNotNull(object, "object");
+ this.object = object;
+ }
+
+ @Override
+ public IdType type() {
+ return IdType.UNKNOWN;
+ }
+
+ @Override
+ public Object asObject() {
+ return this.object;
+ }
+
+ @Override
+ public String asString() {
+ throw new UnsupportedOperationException();
+ }
+
+ @Override
+ public long asLong() {
+ throw new UnsupportedOperationException();
+ }
+
+ @Override
+ public byte[] asBytes() {
+ throw new UnsupportedOperationException();
+ }
+
+ @Override
+ public int length() {
+ throw new UnsupportedOperationException();
+ }
+
+ @Override
+ public int compareTo(Id o) {
+ throw new UnsupportedOperationException();
+ }
+
+ @Override
+ public int hashCode() {
+ return this.object.hashCode();
+ }
+
+ @Override
+ public boolean equals(Object other) {
+ if (!(other instanceof ObjectId)) {
+ return false;
+ }
+ return Objects.equals(this.object, ((ObjectId) other).object);
+ }
+
+ @Override
+ public String toString() {
+ return this.object.toString();
+ }
+ }
+}
diff --git a/hugegraph-struct/src/main/java/org/apache/hugegraph/id/IdUtil.java b/hugegraph-struct/src/main/java/org/apache/hugegraph/id/IdUtil.java
new file mode 100644
index 0000000000..b394c79a12
--- /dev/null
+++ b/hugegraph-struct/src/main/java/org/apache/hugegraph/id/IdUtil.java
@@ -0,0 +1,162 @@
+/*
+ * Copyright 2017 HugeGraph Authors
+ *
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with this
+ * work for additional information regarding copyright ownership. The ASF
+ * licenses this file to You under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+ * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+ * License for the specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.apache.hugegraph.id;
+
+import java.nio.ByteBuffer;
+
+import org.apache.commons.lang3.StringUtils;
+
+import org.apache.hugegraph.serializer.BytesBuffer;
+
+public final class IdUtil {
+
+ public static String writeStoredString(Id id) {
+ String idString;
+ switch (id.type()) {
+ case LONG:
+ case STRING:
+ case UUID:
+ idString = IdGenerator.asStoredString(id);
+ break;
+ case EDGE:
+ idString = EdgeId.asStoredString(id);
+ break;
+ default:
+ throw new AssertionError("Invalid id type " + id.type());
+ }
+ return id.type().prefix() + idString;
+ }
+
+ public static Id readStoredString(String id) {
+ Id.IdType type = Id.IdType.valueOfPrefix(id);
+ String idContent = id.substring(1);
+ switch (type) {
+ case LONG:
+ case STRING:
+ case UUID:
+ return IdGenerator.ofStoredString(idContent, type);
+ case EDGE:
+ return EdgeId.parseStoredString(idContent);
+ default:
+ throw new IllegalArgumentException("Invalid id: " + id);
+ }
+ }
+
+ public static Object writeBinString(Id id) {
+ int len = id.edge() ? BytesBuffer.BUF_EDGE_ID : id.length() + 1;
+ BytesBuffer buffer = BytesBuffer.allocate(len).writeId(id);
+ buffer.forReadWritten();
+ return buffer.asByteBuffer();
+ }
+
+ public static Id readBinString(Object id) {
+ BytesBuffer buffer = BytesBuffer.wrap((ByteBuffer) id);
+ return buffer.readId();
+ }
+
+ public static byte[] asBytes(Id id) {
+ int len = id.edge() ? BytesBuffer.BUF_EDGE_ID : id.length() + 1;
+ BytesBuffer buffer = BytesBuffer.allocate(len).writeId(id);
+ return buffer.bytes();
+ }
+
+ public static Id fromBytes(byte[] bytes) {
+ BytesBuffer buffer = BytesBuffer.wrap(bytes);
+ return buffer.readId();
+ }
+
+
+ public static String writeString(Id id) {
+ String idString = id.asString();
+ StringBuilder sb = new StringBuilder(1 + idString.length());
+ sb.append(id.type().prefix()).append(idString);
+ return sb.toString();
+ }
+
+ public static Id readString(String id) {
+ Id.IdType type = Id.IdType.valueOfPrefix(id);
+ String idContent = id.substring(1);
+ switch (type) {
+ case LONG:
+ return IdGenerator.of(Long.parseLong(idContent));
+ case STRING:
+ case UUID:
+ return IdGenerator.of(idContent, type == Id.IdType.UUID);
+ case EDGE:
+ return EdgeId.parse(idContent);
+ default:
+ throw new IllegalArgumentException("Invalid id: " + id);
+ }
+ }
+
+ public static String writeLong(Id id) {
+ return String.valueOf(id.asLong());
+ }
+
+ public static Id readLong(String id) {
+ return IdGenerator.of(Long.parseLong(id));
+ }
+
+ public static String escape(char splitor, char escape, String... values) {
+ int length = values.length + 4;
+ for (String value : values) {
+ length += value.length();
+ }
+ StringBuilder escaped = new StringBuilder(length);
+ // Do escape for every item in values
+ for (String value : values) {
+ if (escaped.length() > 0) {
+ escaped.append(splitor);
+ }
+
+ if (value.indexOf(splitor) == -1) {
+ escaped.append(value);
+ continue;
+ }
+
+ // Do escape for current item
+ for (int i = 0, n = value.length(); i < n; i++) {
+ char ch = value.charAt(i);
+ if (ch == splitor) {
+ escaped.append(escape);
+ }
+ escaped.append(ch);
+ }
+ }
+ return escaped.toString();
+ }
+
+ public static String[] unescape(String id, String splitor, String escape) {
+ /*
+ * Note that the `splitor`/`escape` maybe special characters in regular
+ * expressions, but this is a frequently called method, for faster
+ * execution, we forbid the use of special characters as delimiter
+ * or escape sign.
+ * The `limit` param -1 in split method can ensure empty string be
+ * splited to a part.
+ */
+ String[] parts = id.split("(?';
+ private static final char ID_SPLITOR = ':';
+ private static final char NAME_SPLITOR = '!';
+
+ public static final String ESCAPE_STR = String.valueOf(ESCAPE);
+ public static final String IDS_SPLITOR_STR = String.valueOf(IDS_SPLITOR);
+ public static final String ID_SPLITOR_STR = String.valueOf(ID_SPLITOR);
+
+ /****************************** id generate ******************************/
+
+ /**
+ * Generate a string id of HugeVertex from Vertex name
+ */
+ @Override
+ public Id generate(BaseVertex vertex) {
+ /*
+ * Hash for row-key which will be evenly distributed.
+ * We can also use LongEncoding.encode() to encode the int/long hash
+ * if needed.
+ * id = String.format("%s%s%s", HashUtil.hash(id), ID_SPLITOR, id);
+ */
+ // TODO: use binary Id with binary fields instead of string id
+ return splicing(vertex.schemaLabel().id().asString(), vertex.name());
+ }
+
+ /**
+ * Concat multiple ids into one composite id with IDS_SPLITOR
+ * @param ids the string id values to be concatted
+ * @return concatted string value
+ */
+ public static String concat(String... ids) {
+ // NOTE: must support string id when using this method
+ return IdUtil.escape(IDS_SPLITOR, ESCAPE, ids);
+ }
+
+ /**
+ * Split a composite id into multiple ids with IDS_SPLITOR
+ * @param ids the string id value to be splitted
+ * @return splitted string values
+ */
+ public static String[] split(String ids) {
+ return IdUtil.unescape(ids, IDS_SPLITOR_STR, ESCAPE_STR);
+ }
+
+ /**
+ * Concat property values with NAME_SPLITOR
+ * @param values the property values to be concatted
+ * @return concatted string value
+ */
+ public static String concatValues(List> values) {
+ // Convert the object list to string array
+ int valuesSize = values.size();
+ String[] parts = new String[valuesSize];
+ for (int i = 0; i < valuesSize; i++) {
+ parts[i] = values.get(i).toString();
+ }
+ return IdUtil.escape(NAME_SPLITOR, ESCAPE, parts);
+ }
+
+ /**
+ * Concat property values with NAME_SPLITOR
+ * @param values the property values to be concatted
+ * @return concatted string value
+ */
+ public static String concatValues(Object... values) {
+ return concatValues(Arrays.asList(values));
+ }
+
+ /**
+ * Concat multiple parts into a single id with ID_SPLITOR
+ * @param parts the string id values to be spliced
+ * @return spliced id object
+ */
+ public static Id splicing(String... parts) {
+ String escaped = IdUtil.escape(ID_SPLITOR, ESCAPE, parts);
+ return IdGenerator.of(escaped);
+ }
+
+ public static Id splicingWithNoEscape(String... parts) {
+ String escaped = String.join(ID_SPLITOR_STR, parts);
+ return IdGenerator.of(escaped);
+ }
+
+ public static Id generateBinaryId(Id id) {
+ if (id instanceof BinaryId) {
+ return id;
+ }
+ BytesBuffer buffer = BytesBuffer.allocate(1 + id.length());
+ BinaryId binaryId = new BinaryId(buffer.writeId(id).bytes(), id);
+ return binaryId;
+ }
+
+ /**
+ * Parse a single id into multiple parts with ID_SPLITOR
+ * @param id the id object to be parsed
+ * @return parsed string id parts
+ */
+ public static String[] parse(Id id) {
+ return IdUtil.unescape(id.asString(), ID_SPLITOR_STR, ESCAPE_STR);
+ }
+}
diff --git a/hugegraph-struct/src/main/java/org/apache/hugegraph/options/AuthOptions.java b/hugegraph-struct/src/main/java/org/apache/hugegraph/options/AuthOptions.java
new file mode 100644
index 0000000000..3ae732e2e2
--- /dev/null
+++ b/hugegraph-struct/src/main/java/org/apache/hugegraph/options/AuthOptions.java
@@ -0,0 +1,153 @@
+/*
+ * Copyright 2017 HugeGraph Authors
+ *
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with this
+ * work for additional information regarding copyright ownership. The ASF
+ * licenses this file to You under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+ * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+ * License for the specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.apache.hugegraph.options;
+
+import org.apache.hugegraph.config.ConfigListOption;
+import org.apache.hugegraph.config.ConfigOption;
+import org.apache.hugegraph.config.OptionHolder;
+
+import java.security.SecureRandom;
+import java.util.Base64;
+
+import static org.apache.hugegraph.config.OptionChecker.*;
+
+public class AuthOptions extends OptionHolder {
+
+ private AuthOptions() {
+ super();
+ }
+
+ private static volatile AuthOptions instance;
+
+ public static synchronized AuthOptions instance() {
+ if (instance == null) {
+ instance = new AuthOptions();
+ instance.registerOptions();
+ }
+ return instance;
+ }
+
+ public static final ConfigOption AUTH_TOKEN_SECRET =
+ new ConfigOption<>(
+ "auth.token_secret",
+ "Secret key of HS256 algorithm.",
+ disallowEmpty(),
+ "FXQXbJtbCLxODc6tGci732pkH1cyf8Qg"
+ );
+
+ public static final ConfigOption AUTH_AUDIT_LOG_RATE =
+ new ConfigOption<>(
+ "auth.audit_log_rate",
+ "The max rate of audit log output per user, " +
+ "default value is 1000 records per second.",
+ rangeDouble(0.0, Double.MAX_VALUE),
+ 1000.0
+ );
+
+ public static final ConfigOption AUTH_PROXY_CACHE_EXPIRE =
+ new ConfigOption<>(
+ "auth.proxy_cache_expire",
+ "The expiration time in seconds of auth cache in " +
+ "auth client.",
+ rangeInt(0L, Long.MAX_VALUE),
+ (1 * 60L)
+ );
+
+ public static final ConfigOption AUTH_CACHE_CAPACITY =
+ new ConfigOption<>(
+ "auth.cache_capacity",
+ "The max cache capacity of each auth cache item.",
+ rangeInt(0L, Long.MAX_VALUE),
+ (1024 * 10L)
+ );
+
+ public static final ConfigOption AUTHENTICATOR =
+ new ConfigOption<>(
+ "auth.authenticator",
+ "The class path of authenticator implementation. " +
+ "e.g., org.apache.hugegraph.auth.StandardAuthenticator, " +
+ "or org.apache.hugegraph.auth.ConfigAuthenticator.",
+ null,
+ ""
+ );
+
+ public static final ConfigOption AUTH_GRAPH_STORE =
+ new ConfigOption<>(
+ "auth.graph_store",
+ "The name of graph used to store authentication information, " +
+ "like users, only for org.apache.hugegraph.auth.StandardAuthenticator.",
+ disallowEmpty(),
+ "hugegraph"
+ );
+
+ public static final ConfigOption AUTH_ADMIN_TOKEN =
+ new ConfigOption<>(
+ "auth.admin_token",
+ "Token for administrator operations, " +
+ "only for org.apache.hugegraph.auth.ConfigAuthenticator.",
+ disallowEmpty(),
+ "162f7848-0b6d-4faf-b557-3a0797869c55"
+ );
+
+ public static final ConfigListOption AUTH_USER_TOKENS =
+ new ConfigListOption<>(
+ "auth.user_tokens",
+ "The map of user tokens with name and password, " +
+ "only for org.apache.hugegraph.auth.ConfigAuthenticator.",
+ disallowEmpty(),
+ "hugegraph:9fd95c9c-711b-415b-b85f-d4df46ba5c31"
+ );
+
+ public static final ConfigOption AUTH_REMOTE_URL =
+ new ConfigOption<>(
+ "auth.remote_url",
+ "If the address is empty, it provide auth service, " +
+ "otherwise it is auth client and also provide auth service " +
+ "through rpc forwarding. The remote url can be set to " +
+ "multiple addresses, which are concat by ','.",
+ null,
+ ""
+ );
+
+ public static final ConfigOption AUTH_CACHE_EXPIRE =
+ new ConfigOption<>(
+ "auth.cache_expire",
+ "The expiration time in seconds of auth cache in " +
+ "auth client and auth server.",
+ rangeInt(0L, Long.MAX_VALUE),
+ (60 * 10L)
+ );
+
+ public static final ConfigOption AUTH_TOKEN_EXPIRE =
+ new ConfigOption<>(
+ "auth.token_expire",
+ "The expiration time in seconds after token created",
+ rangeInt(0L, Long.MAX_VALUE),
+ (3600 * 24L)
+ );
+
+ private static String generateRandomBase64Key() {
+ SecureRandom random = new SecureRandom();
+ // 32 bytes for HMAC-SHA256
+ byte[] bytes = new byte[32];
+ random.nextBytes(bytes);
+ return Base64.getEncoder().encodeToString(bytes);
+ }
+}
diff --git a/hugegraph-struct/src/main/java/org/apache/hugegraph/options/CoreOptions.java b/hugegraph-struct/src/main/java/org/apache/hugegraph/options/CoreOptions.java
new file mode 100644
index 0000000000..70fd163e58
--- /dev/null
+++ b/hugegraph-struct/src/main/java/org/apache/hugegraph/options/CoreOptions.java
@@ -0,0 +1,715 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hugegraph.options;
+
+import static org.apache.hugegraph.config.OptionChecker.allowValues;
+import static org.apache.hugegraph.config.OptionChecker.disallowEmpty;
+import static org.apache.hugegraph.config.OptionChecker.nonNegativeInt;
+import static org.apache.hugegraph.config.OptionChecker.positiveInt;
+import static org.apache.hugegraph.config.OptionChecker.rangeInt;
+import static org.apache.hugegraph.query.Query.COMMIT_BATCH;
+
+import org.apache.hugegraph.config.ConfigConvOption;
+import org.apache.hugegraph.config.ConfigOption;
+import org.apache.hugegraph.config.OptionHolder;
+import org.apache.hugegraph.query.Query;
+import org.apache.hugegraph.type.define.CollectionType;
+import org.apache.hugegraph.util.Bytes;
+
+public class CoreOptions extends OptionHolder {
+
+ public static final int CPUS = Runtime.getRuntime().availableProcessors();
+
+ private CoreOptions() {
+ super();
+ }
+
+ private static volatile CoreOptions instance;
+
+ public static synchronized CoreOptions instance() {
+ if (instance == null) {
+ instance = new CoreOptions();
+ // Should initialize all static members first, then register.
+ instance.registerOptions();
+ }
+ return instance;
+ }
+
+ public static final ConfigOption GREMLIN_GRAPH =
+ new ConfigOption<>(
+ "gremlin.graph",
+ "Gremlin entrance to create graph.",
+ disallowEmpty(),
+ "org.apache.hugegraph.HugeFactory"
+ );
+
+ public static final ConfigOption BACKEND =
+ new ConfigOption<>(
+ "backend",
+ "The data store type.",
+ disallowEmpty(),
+ "memory"
+ );
+
+ public static final ConfigOption STORE =
+ new ConfigOption<>(
+ "store",
+ "The database name like Cassandra Keyspace.",
+ disallowEmpty(),
+ "hugegraph"
+ );
+
+ public static final ConfigOption STORE_GRAPH =
+ new ConfigOption<>(
+ "store.graph",
+ "The graph table name, which store vertex, edge and property.",
+ disallowEmpty(),
+ "g"
+ );
+
+ public static final ConfigOption SERIALIZER =
+ new ConfigOption<>(
+ "serializer",
+ "The serializer for backend store, like: text/binary/cassandra.",
+ disallowEmpty(),
+ "text"
+ );
+
+ public static final ConfigOption RAFT_MODE =
+ new ConfigOption<>(
+ "raft.mode",
+ "Whether the backend storage works in raft mode.",
+ disallowEmpty(),
+ false
+ );
+
+ public static final ConfigOption RAFT_SAFE_READ =
+ new ConfigOption<>(
+ "raft.safe_read",
+ "Whether to use linearly consistent read.",
+ disallowEmpty(),
+ false
+ );
+
+ public static final ConfigOption RAFT_PATH =
+ new ConfigOption<>(
+ "raft.path",
+ "The log path of current raft node.",
+ disallowEmpty(),
+ "./raftlog"
+ );
+
+ public static final ConfigOption RAFT_REPLICATOR_PIPELINE =
+ new ConfigOption<>(
+ "raft.use_replicator_pipeline",
+ "Whether to use replicator line, when turned on it " +
+ "multiple logs can be sent in parallel, and the next log " +
+ "doesn't have to wait for the ack message of the current " +
+ "log to be sent.",
+ disallowEmpty(),
+ true
+ );
+
+ public static final ConfigOption RAFT_ELECTION_TIMEOUT =
+ new ConfigOption<>(
+ "raft.election_timeout",
+ "Timeout in milliseconds to launch a round of election.",
+ rangeInt(0, Integer.MAX_VALUE),
+ 10000
+ );
+
+ public static final ConfigOption RAFT_SNAPSHOT_INTERVAL =
+ new ConfigOption<>(
+ "raft.snapshot_interval",
+ "The interval in seconds to trigger snapshot save.",
+ rangeInt(0, Integer.MAX_VALUE),
+ 3600
+ );
+
+ public static final ConfigOption RAFT_SNAPSHOT_THREADS =
+ new ConfigOption<>(
+ "raft.snapshot_threads",
+ "The thread number used to do snapshot.",
+ rangeInt(0, Integer.MAX_VALUE),
+ 4
+ );
+
+ public static final ConfigOption RAFT_SNAPSHOT_PARALLEL_COMPRESS =
+ new ConfigOption<>(
+ "raft.snapshot_parallel_compress",
+ "Whether to enable parallel compress.",
+ disallowEmpty(),
+ false
+ );
+
+ public static final ConfigOption RAFT_SNAPSHOT_COMPRESS_THREADS =
+ new ConfigOption<>(
+ "raft.snapshot_compress_threads",
+ "The thread number used to do snapshot compress.",
+ rangeInt(0, Integer.MAX_VALUE),
+ 4
+ );
+
+ public static final ConfigOption RAFT_SNAPSHOT_DECOMPRESS_THREADS =
+ new ConfigOption<>(
+ "raft.snapshot_decompress_threads",
+ "The thread number used to do snapshot decompress.",
+ rangeInt(0, Integer.MAX_VALUE),
+ 4
+ );
+
+ public static final ConfigOption RAFT_BACKEND_THREADS =
+ new ConfigOption<>(
+ "raft.backend_threads",
+ "The thread number used to apply task to backend.",
+ rangeInt(0, Integer.MAX_VALUE),
+ CPUS
+ );
+
+ public static final ConfigOption RAFT_READ_INDEX_THREADS =
+ new ConfigOption<>(
+ "raft.read_index_threads",
+ "The thread number used to execute reading index.",
+ rangeInt(0, Integer.MAX_VALUE),
+ 8
+ );
+
+ public static final ConfigOption RAFT_READ_STRATEGY =
+ new ConfigOption<>(
+ "raft.read_strategy",
+ "The linearizability of read strategy.",
+ allowValues("ReadOnlyLeaseBased", "ReadOnlySafe"),
+ "ReadOnlyLeaseBased"
+ );
+
+ public static final ConfigOption RAFT_APPLY_BATCH =
+ new ConfigOption<>(
+ "raft.apply_batch",
+ "The apply batch size to trigger disruptor event handler.",
+ positiveInt(),
+ // jraft default value is 32
+ 1
+ );
+
+ public static final ConfigOption RAFT_QUEUE_SIZE =
+ new ConfigOption<>(
+ "raft.queue_size",
+ "The disruptor buffers size for jraft RaftNode, " +
+ "StateMachine and LogManager.",
+ positiveInt(),
+ // jraft default value is 16384
+ 16384
+ );
+
+ public static final ConfigOption RAFT_QUEUE_PUBLISH_TIMEOUT =
+ new ConfigOption<>(
+ "raft.queue_publish_timeout",
+ "The timeout in second when publish event into disruptor.",
+ positiveInt(),
+ // jraft default value is 10(sec)
+ 60
+ );
+
+ public static final ConfigOption RAFT_RPC_THREADS =
+ new ConfigOption<>(
+ "raft.rpc_threads",
+ "The rpc threads for jraft RPC layer",
+ positiveInt(),
+ // jraft default value is 80
+ Math.max(CPUS * 2, 80)
+ );
+
+ public static final ConfigOption RAFT_RPC_CONNECT_TIMEOUT =
+ new ConfigOption<>(
+ "raft.rpc_connect_timeout",
+ "The rpc connect timeout for jraft rpc.",
+ positiveInt(),
+ // jraft default value is 1000(ms)
+ 5000
+ );
+
+ public static final ConfigOption RAFT_RPC_TIMEOUT =
+ new ConfigOption<>(
+ "raft.rpc_timeout",
+ "The general rpc timeout in seconds for jraft rpc.",
+ positiveInt(),
+ // jraft default value is 5s
+ 60
+ );
+
+ public static final ConfigOption RAFT_INSTALL_SNAPSHOT_TIMEOUT =
+ new ConfigOption<>(
+ "raft.install_snapshot_rpc_timeout",
+ "The install snapshot rpc timeout in seconds for jraft rpc.",
+ positiveInt(),
+ // jraft default value is 5 minutes
+ 10 * 60 * 60
+ );
+
+ public static final ConfigOption RAFT_RPC_BUF_LOW_WATER_MARK =
+ new ConfigOption<>(
+ "raft.rpc_buf_low_water_mark",
+ "The ChannelOutboundBuffer's low water mark of netty, " +
+ "when buffer size less than this size, the method " +
+ "ChannelOutboundBuffer.isWritable() will return true, " +
+ "it means that low downstream pressure or good network.",
+ positiveInt(),
+ 10 * 1024 * 1024
+ );
+
+ public static final ConfigOption RAFT_RPC_BUF_HIGH_WATER_MARK =
+ new ConfigOption<>(
+ "raft.rpc_buf_high_water_mark",
+ "The ChannelOutboundBuffer's high water mark of netty, " +
+ "only when buffer size exceed this size, the method " +
+ "ChannelOutboundBuffer.isWritable() will return false, " +
+ "it means that the downstream pressure is too great to " +
+ "process the request or network is very congestion, " +
+ "upstream needs to limit rate at this time.",
+ positiveInt(),
+ 20 * 1024 * 1024
+ );
+
+ public static final ConfigOption RATE_LIMIT_WRITE =
+ new ConfigOption<>(
+ "rate_limit.write",
+ "The max rate(items/s) to add/update/delete vertices/edges.",
+ rangeInt(0, Integer.MAX_VALUE),
+ 0
+ );
+
+ public static final ConfigOption RATE_LIMIT_READ =
+ new ConfigOption<>(
+ "rate_limit.read",
+ "The max rate(times/s) to execute query of vertices/edges.",
+ rangeInt(0, Integer.MAX_VALUE),
+ 0
+ );
+
+ public static final ConfigOption TASK_SCHEDULE_PERIOD =
+ new ConfigOption<>(
+ "task.schedule_period",
+ "Period time when scheduler to schedule task",
+ rangeInt(0L, Long.MAX_VALUE),
+ 10L
+ );
+
+ public static final ConfigOption TASK_WAIT_TIMEOUT =
+ new ConfigOption<>(
+ "task.wait_timeout",
+ "Timeout in seconds for waiting for the task to " +
+ "complete, such as when truncating or clearing the " +
+ "backend.",
+ rangeInt(0L, Long.MAX_VALUE),
+ 10L
+ );
+
+ public static final ConfigOption TASK_INPUT_SIZE_LIMIT =
+ new ConfigOption<>(
+ "task.input_size_limit",
+ "The job input size limit in bytes.",
+ rangeInt(0L, Bytes.GB),
+ 16 * Bytes.MB
+ );
+
+ public static final ConfigOption TASK_RESULT_SIZE_LIMIT =
+ new ConfigOption<>(
+ "task.result_size_limit",
+ "The job result size limit in bytes.",
+ rangeInt(0L, Bytes.GB),
+ 16 * Bytes.MB
+ );
+
+ public static final ConfigOption TASK_TTL_DELETE_BATCH =
+ new ConfigOption<>(
+ "task.ttl_delete_batch",
+ "The batch size used to delete expired data.",
+ rangeInt(1, 500),
+ 1
+ );
+
+ public static final ConfigOption SCHEDULER_TYPE =
+ new ConfigOption<>(
+ "task.scheduler_type",
+ "The type of scheduler used in distribution system.",
+ allowValues("local", "distributed"),
+ "local"
+ );
+
+ public static final ConfigOption TASK_SYNC_DELETION =
+ new ConfigOption<>(
+ "task.sync_deletion",
+ "Whether to delete schema or expired data synchronously.",
+ disallowEmpty(),
+ false
+ );
+
+ public static final ConfigOption TASK_RETRY =
+ new ConfigOption<>(
+ "task.retry",
+ "Task retry times.",
+ rangeInt(0, 3),
+ 0
+ );
+
+ public static final ConfigOption STORE_CONN_DETECT_INTERVAL =
+ new ConfigOption<>(
+ "store.connection_detect_interval",
+ "The interval in seconds for detecting connections, " +
+ "if the idle time of a connection exceeds this value, " +
+ "detect it and reconnect if needed before using, " +
+ "value 0 means detecting every time.",
+ rangeInt(0L, Long.MAX_VALUE),
+ 600L
+ );
+
+ public static final ConfigOption VERTEX_DEFAULT_LABEL =
+ new ConfigOption<>(
+ "vertex.default_label",
+ "The default vertex label.",
+ disallowEmpty(),
+ "vertex"
+ );
+
+ public static final ConfigOption VERTEX_CHECK_CUSTOMIZED_ID_EXIST =
+ new ConfigOption<>(
+ "vertex.check_customized_id_exist",
+ "Whether to check the vertices exist for those using " +
+ "customized id strategy.",
+ disallowEmpty(),
+ false
+ );
+
+ public static final ConfigOption VERTEX_REMOVE_LEFT_INDEX =
+ new ConfigOption<>(
+ "vertex.remove_left_index_at_overwrite",
+ "Whether remove left index at overwrite.",
+ disallowEmpty(),
+ false
+ );
+
+ public static final ConfigOption VERTEX_ADJACENT_VERTEX_EXIST =
+ new ConfigOption<>(
+ "vertex.check_adjacent_vertex_exist",
+ "Whether to check the adjacent vertices of edges exist.",
+ disallowEmpty(),
+ false
+ );
+
+ public static final ConfigOption VERTEX_ADJACENT_VERTEX_LAZY =
+ new ConfigOption<>(
+ "vertex.lazy_load_adjacent_vertex",
+ "Whether to lazy load adjacent vertices of edges.",
+ disallowEmpty(),
+ true
+ );
+
+ public static final ConfigOption VERTEX_PART_EDGE_COMMIT_SIZE =
+ new ConfigOption<>(
+ "vertex.part_edge_commit_size",
+ "Whether to enable the mode to commit part of edges of " +
+ "vertex, enabled if commit size > 0, 0 meas disabled.",
+ rangeInt(0, (int) Query.DEFAULT_CAPACITY),
+ 5000
+ );
+
+ public static final ConfigOption VERTEX_ENCODE_PK_NUMBER =
+ new ConfigOption<>(
+ "vertex.encode_primary_key_number",
+ "Whether to encode number value of primary key " +
+ "in vertex id.",
+ disallowEmpty(),
+ true
+ );
+
+ public static final ConfigOption VERTEX_TX_CAPACITY =
+ new ConfigOption<>(
+ "vertex.tx_capacity",
+ "The max size(items) of vertices(uncommitted) in " +
+ "transaction.",
+ rangeInt((int)COMMIT_BATCH, 1000000),
+ 10000
+ );
+
+ public static final ConfigOption QUERY_IGNORE_INVALID_DATA =
+ new ConfigOption<>(
+ "query.ignore_invalid_data",
+ "Whether to ignore invalid data of vertex or edge.",
+ disallowEmpty(),
+ true
+ );
+
+ public static final ConfigOption QUERY_OPTIMIZE_AGGR_BY_INDEX =
+ new ConfigOption<>(
+ "query.optimize_aggregate_by_index",
+ "Whether to optimize aggregate query(like count) by index.",
+ disallowEmpty(),
+ false
+ );
+
+ public static final ConfigOption QUERY_BATCH_SIZE =
+ new ConfigOption<>(
+ "query.batch_size",
+ "The size of each batch when querying by batch.",
+ rangeInt(1, (int) Query.DEFAULT_CAPACITY),
+ 1000
+ );
+
+ public static final ConfigOption QUERY_PAGE_SIZE =
+ new ConfigOption<>(
+ "query.page_size",
+ "The size of each page when querying by paging.",
+ rangeInt(1, (int) Query.DEFAULT_CAPACITY),
+ 500
+ );
+
+ public static final ConfigOption QUERY_INDEX_INTERSECT_THRESHOLD =
+ new ConfigOption<>(
+ "query.index_intersect_threshold",
+ "The maximum number of intermediate results to " +
+ "intersect indexes when querying by multiple single " +
+ "index properties.",
+ rangeInt(1, (int) Query.DEFAULT_CAPACITY),
+ 1000
+ );
+
+ public static final ConfigOption QUERY_RAMTABLE_ENABLE =
+ new ConfigOption<>(
+ "query.ramtable_enable",
+ "Whether to enable ramtable for query of adjacent edges.",
+ disallowEmpty(),
+ false
+ );
+
+ public static final ConfigOption QUERY_RAMTABLE_VERTICES_CAPACITY =
+ new ConfigOption<>(
+ "query.ramtable_vertices_capacity",
+ "The maximum number of vertices in ramtable, " +
+ "generally the largest vertex id is used as capacity.",
+ rangeInt(1L, Integer.MAX_VALUE * 2L),
+ 10000000L
+ );
+
+ public static final ConfigOption QUERY_RAMTABLE_EDGES_CAPACITY =
+ new ConfigOption<>(
+ "query.ramtable_edges_capacity",
+ "The maximum number of edges in ramtable, " +
+ "include OUT and IN edges.",
+ rangeInt(1, Integer.MAX_VALUE),
+ 20000000
+ );
+
+ /**
+ * The schema name rule:
+ * 1. Not allowed end with spaces
+ * 2. Not allowed start with '~'
+ */
+ public static final ConfigOption SCHEMA_ILLEGAL_NAME_REGEX =
+ new ConfigOption<>(
+ "schema.illegal_name_regex",
+ "The regex specified the illegal format for schema name.",
+ disallowEmpty(),
+ ".*\\s+$|~.*"
+ );
+
+ public static final ConfigOption SCHEMA_CACHE_CAPACITY =
+ new ConfigOption<>(
+ "schema.cache_capacity",
+ "The max cache size(items) of schema cache.",
+ rangeInt(0L, Long.MAX_VALUE),
+ 10000L
+ );
+
+ public static final ConfigOption VERTEX_CACHE_TYPE =
+ new ConfigOption<>(
+ "vertex.cache_type",
+ "The type of vertex cache, allowed values are [l1, l2].",
+ allowValues("l1", "l2"),
+ "l2"
+ );
+
+ public static final ConfigOption VERTEX_CACHE_CAPACITY =
+ new ConfigOption<>(
+ "vertex.cache_capacity",
+ "The max cache size(items) of vertex cache.",
+ rangeInt(0L, Long.MAX_VALUE),
+ (1000 * 1000 * 10L)
+ );
+
+ public static final ConfigOption VERTEX_CACHE_EXPIRE =
+ new ConfigOption<>(
+ "vertex.cache_expire",
+ "The expiration time in seconds of vertex cache.",
+ rangeInt(0, Integer.MAX_VALUE),
+ (60 * 10)
+ );
+
+ public static final ConfigOption EDGE_CACHE_TYPE =
+ new ConfigOption<>(
+ "edge.cache_type",
+ "The type of edge cache, allowed values are [l1, l2].",
+ allowValues("l1", "l2"),
+ "l2"
+ );
+
+ public static final ConfigOption EDGE_CACHE_CAPACITY =
+ new ConfigOption<>(
+ "edge.cache_capacity",
+ "The max cache size(items) of edge cache.",
+ rangeInt(0L, Long.MAX_VALUE),
+ (1000 * 1000 * 1L)
+ );
+
+ public static final ConfigOption EDGE_CACHE_EXPIRE =
+ new ConfigOption<>(
+ "edge.cache_expire",
+ "The expiration time in seconds of edge cache.",
+ rangeInt(0, Integer.MAX_VALUE),
+ (60 * 10)
+ );
+
+ public static final ConfigOption SNOWFLAKE_WORKER_ID =
+ new ConfigOption<>(
+ "snowflake.worker_id",
+ "The worker id of snowflake id generator.",
+ disallowEmpty(),
+ 0L
+ );
+
+ public static final ConfigOption SNOWFLAKE_DATACENTER_ID =
+ new ConfigOption<>(
+ "snowflake.datacenter_id",
+ "The datacenter id of snowflake id generator.",
+ disallowEmpty(),
+ 0L
+ );
+
+ public static final ConfigOption SNOWFLAKE_FORCE_STRING =
+ new ConfigOption<>(
+ "snowflake.force_string",
+ "Whether to force the snowflake long id to be a string.",
+ disallowEmpty(),
+ false
+ );
+
+ public static final ConfigOption TEXT_ANALYZER =
+ new ConfigOption<>(
+ "search.text_analyzer",
+ "Choose a text analyzer for searching the " +
+ "vertex/edge properties, available type are " +
+ "[ansj, hanlp, smartcn, jieba, jcseg, " +
+ "mmseg4j, ikanalyzer].",
+ disallowEmpty(),
+ "ikanalyzer"
+ );
+
+ public static final ConfigOption TEXT_ANALYZER_MODE =
+ new ConfigOption<>(
+ "search.text_analyzer_mode",
+ "Specify the mode for the text analyzer, " +
+ "the available mode of analyzer are " +
+ "ansj: [BaseAnalysis, IndexAnalysis, ToAnalysis, " +
+ "NlpAnalysis], " +
+ "hanlp: [standard, nlp, index, nShort, shortest, speed], " +
+ "smartcn: [], " +
+ "jieba: [SEARCH, INDEX], " +
+ "jcseg: [Simple, Complex], " +
+ "mmseg4j: [Simple, Complex, MaxWord], " +
+ "ikanalyzer: [smart, max_word]" +
+ "}.",
+ disallowEmpty(),
+ "smart"
+ );
+
+ public static final ConfigOption COMPUTER_CONFIG =
+ new ConfigOption<>(
+ "computer.config",
+ "The config file path of computer job.",
+ disallowEmpty(),
+ "./conf/computer.yaml"
+ );
+
+ public static final ConfigOption OLTP_CONCURRENT_THREADS =
+ new ConfigOption<>(
+ "oltp.concurrent_threads",
+ "Thread number to concurrently execute oltp algorithm.",
+ rangeInt(0, 65535),
+ 10
+ );
+
+ public static final ConfigOption OLTP_CONCURRENT_DEPTH =
+ new ConfigOption<>(
+ "oltp.concurrent_depth",
+ "The min depth to enable concurrent oltp algorithm.",
+ rangeInt(0, 65535),
+ 10
+ );
+
+ public static final ConfigConvOption OLTP_COLLECTION_TYPE =
+ new ConfigConvOption<>(
+ "oltp.collection_type",
+ "The implementation type of collections " +
+ "used in oltp algorithm.",
+ allowValues("JCF", "EC", "FU"),
+ CollectionType::valueOf,
+ "EC"
+ );
+
+ public static final ConfigOption PD_PEERS = new ConfigOption<>(
+ "pd.peers",
+ "The addresses of pd nodes, separated with commas.",
+ disallowEmpty(),
+ "127.0.0.1:8686"
+ );
+
+ public static final ConfigOption MEMORY_MODE = new ConfigOption<>(
+ "memory.mode",
+ "The memory mode used for query in HugeGraph.",
+ disallowEmpty(),
+ "off-heap"
+ );
+
+ public static final ConfigOption MAX_MEMORY_CAPACITY = new ConfigOption<>(
+ "memory.max_capacity",
+ "The maximum memory capacity that can be managed for all queries in HugeGraph.",
+ nonNegativeInt(),
+ Bytes.GB
+ );
+
+ public static final ConfigOption ONE_QUERY_MAX_MEMORY_CAPACITY = new ConfigOption<>(
+ "memory.one_query_max_capacity",
+ "The maximum memory capacity that can be managed for a query in HugeGraph.",
+ nonNegativeInt(),
+ Bytes.MB * 100
+ );
+
+ public static final ConfigOption MEMORY_ALIGNMENT = new ConfigOption<>(
+ "memory.alignment",
+ "The alignment used for round memory size.",
+ nonNegativeInt(),
+ 8L
+ );
+
+ public static final ConfigOption GRAPH_SPACE =
+ new ConfigOption<>(
+ "graphspace",
+ "The graph space name.",
+ null,
+ "DEFAULT"
+ );
+}
diff --git a/hugegraph-struct/src/main/java/org/apache/hugegraph/query/Aggregate.java b/hugegraph-struct/src/main/java/org/apache/hugegraph/query/Aggregate.java
new file mode 100644
index 0000000000..38f1365f67
--- /dev/null
+++ b/hugegraph-struct/src/main/java/org/apache/hugegraph/query/Aggregate.java
@@ -0,0 +1,61 @@
+/*
+ * Copyright 2017 HugeGraph Authors
+ *
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with this
+ * work for additional information regarding copyright ownership. The ASF
+ * licenses this file to You under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+ * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+ * License for the specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.apache.hugegraph.query;
+
+import java.util.Iterator;
+
+@Deprecated
+public class Aggregate {
+
+ private final AggregateFuncDefine
func;
+ private final String column;
+
+ public Aggregate(AggregateFuncDefine func, String column) {
+ this.func = func;
+ this.column = column;
+ }
+
+ public AggregateFuncDefine func() {
+ return this.func;
+ }
+
+ public String column() {
+ return this.column;
+ }
+
+ public boolean countAll() {
+ return this.func.countAll() && this.column == null;
+ }
+
+ public P reduce(Iterator
results) {
+ return this.func.reduce(results);
+ }
+
+ public P defaultValue() {
+ return this.func.defaultValue();
+ }
+
+ @Override
+ public String toString() {
+ return String.format("%s(%s)", this.func.string(),
+ this.column == null ? "*" : this.column);
+ }
+
+}
diff --git a/hugegraph-struct/src/main/java/org/apache/hugegraph/query/AggregateFuncDefine.java b/hugegraph-struct/src/main/java/org/apache/hugegraph/query/AggregateFuncDefine.java
new file mode 100644
index 0000000000..2ef23df42a
--- /dev/null
+++ b/hugegraph-struct/src/main/java/org/apache/hugegraph/query/AggregateFuncDefine.java
@@ -0,0 +1,33 @@
+/*
+ * Copyright 2017 HugeGraph Authors
+ *
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with this
+ * work for additional information regarding copyright ownership. The ASF
+ * licenses this file to You under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+ * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+ * License for the specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.apache.hugegraph.query;
+
+import java.util.Iterator;
+
+/**
+ * 聚合方式定义
+ * @param
+ */
+public interface AggregateFuncDefine
{
+ String string();
+ P defaultValue();
+ P reduce(Iterator
results);
+ boolean countAll();
+}
diff --git a/hugegraph-struct/src/main/java/org/apache/hugegraph/query/Condition.java b/hugegraph-struct/src/main/java/org/apache/hugegraph/query/Condition.java
new file mode 100644
index 0000000000..52c1a03ee8
--- /dev/null
+++ b/hugegraph-struct/src/main/java/org/apache/hugegraph/query/Condition.java
@@ -0,0 +1,1045 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hugegraph.query;
+
+import java.util.ArrayList;
+import java.util.Collection;
+import java.util.Date;
+import java.util.List;
+import java.util.Map;
+import java.util.Objects;
+import java.util.Set;
+import java.util.function.BiFunction;
+import java.util.function.BiPredicate;
+import java.util.regex.Pattern;
+
+import org.apache.commons.lang.ArrayUtils;
+import org.apache.commons.text.similarity.LevenshteinDistance;
+import org.apache.hugegraph.id.Id;
+import org.apache.hugegraph.backend.Shard;
+import org.apache.hugegraph.structure.BaseElement;
+import org.apache.hugegraph.structure.BaseProperty;
+import org.apache.hugegraph.type.define.HugeKeys;
+import org.apache.hugegraph.util.Bytes;
+import org.apache.hugegraph.util.DateUtil;
+import org.apache.hugegraph.util.E;
+import org.apache.hugegraph.util.NumericUtil;
+
+import com.google.common.collect.ImmutableList;
+import com.google.common.collect.ImmutableSet;
+
+public abstract class Condition {
+
+ public enum ConditionType {
+ NONE,
+ RELATION,
+ AND,
+ OR,
+ NOT
+ }
+
+ public enum RelationType implements BiPredicate