diff --git a/conf/hugegraph.license b/conf/hugegraph.license
new file mode 100644
index 0000000000..3cc0c344b4
Binary files /dev/null and b/conf/hugegraph.license differ
diff --git a/hg-pd-cli/pom.xml b/hg-pd-cli/pom.xml
new file mode 100644
index 0000000000..3a776fab02
--- /dev/null
+++ b/hg-pd-cli/pom.xml
@@ -0,0 +1,120 @@
+
+
+
+ hugegraph-pd
+ org.apache.hugegraph
+ ${revision}
+
+ 4.0.0
+
+ hg-pd-cli
+
+
+ 2.12.1
+
+
+
+ org.apache.hugegraph
+ hg-pd-client
+ ${project.version}
+
+
+ junit
+ junit
+ ${junit.version}
+ test
+
+
+ com.alipay.sofa
+ jraft-core
+ ${jraft-core.version}
+
+
+ org.rocksdb
+ rocksdbjni
+
+
+ com.google.protobuf
+ protobuf-java
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+ org.yaml
+ snakeyaml
+ test
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+ org.apache.maven.plugins
+ maven-assembly-plugin
+
+
+ package
+
+ single
+
+
+
+
+
+ org.apache.hugegraph.pd.cli.CliApplication
+
+
+
+
+ jar-with-dependencies
+
+
+
+
+
+
+
+
\ No newline at end of file
diff --git a/hg-pd-cli/src/main/java/org/apache/hugegraph/pd/cli/CliApplication.java b/hg-pd-cli/src/main/java/org/apache/hugegraph/pd/cli/CliApplication.java
new file mode 100644
index 0000000000..33b0c50be3
--- /dev/null
+++ b/hg-pd-cli/src/main/java/org/apache/hugegraph/pd/cli/CliApplication.java
@@ -0,0 +1,40 @@
+package org.apache.hugegraph.pd.cli;
+
+import org.apache.hugegraph.pd.cli.cmd.ChangeRaft;
+import org.apache.hugegraph.pd.cli.cmd.Command;
+import org.apache.hugegraph.pd.cli.cmd.Config;
+import org.apache.hugegraph.pd.cli.cmd.Parameter;
+
+import lombok.extern.slf4j.Slf4j;
+
+@Slf4j
+public class CliApplication {
+
+ public static void main(String[] args) {
+ try {
+ Parameter parameter = Command.toParameter(args);
+ Command command;
+ switch (parameter.getCmd()) {
+ case "config":
+ command = new Config(parameter.getPd());
+ break;
+ case "change_raft":
+ command = new ChangeRaft(parameter.getPd());
+ break;
+// case "check_peers":
+// command = new CheckPeers(parameter.getPd());
+// break;
+ default:
+ log.error("无效的指令");
+ return;
+ }
+ command.action(parameter.getParams());
+ } catch (Exception e) {
+ log.error("main thread error:", e);
+ System.exit(0);
+ } finally {
+
+ }
+
+ }
+}
diff --git a/hg-pd-cli/src/main/java/org/apache/hugegraph/pd/cli/cmd/ChangeRaft.java b/hg-pd-cli/src/main/java/org/apache/hugegraph/pd/cli/cmd/ChangeRaft.java
new file mode 100644
index 0000000000..6c37f76594
--- /dev/null
+++ b/hg-pd-cli/src/main/java/org/apache/hugegraph/pd/cli/cmd/ChangeRaft.java
@@ -0,0 +1,19 @@
+package org.apache.hugegraph.pd.cli.cmd;
+
+import org.apache.hugegraph.pd.common.PDException;
+
+/**
+ * @author zhangyingjie
+ * @date 2023/10/17
+ **/
+public class ChangeRaft extends Command {
+
+ public ChangeRaft(String pd) {
+ super(pd);
+ }
+
+ @Override
+ public void action(String[] params) throws PDException {
+ pdClient.updatePdRaft(params[0]);
+ }
+}
diff --git a/hg-pd-cli/src/main/java/org/apache/hugegraph/pd/cli/cmd/Command.java b/hg-pd-cli/src/main/java/org/apache/hugegraph/pd/cli/cmd/Command.java
new file mode 100644
index 0000000000..a04fb3c00f
--- /dev/null
+++ b/hg-pd-cli/src/main/java/org/apache/hugegraph/pd/cli/cmd/Command.java
@@ -0,0 +1,54 @@
+package org.apache.hugegraph.pd.cli.cmd;
+
+import org.apache.hugegraph.pd.client.PDClient;
+import org.apache.hugegraph.pd.client.PDConfig;
+import org.apache.hugegraph.pd.common.PDException;
+
+/**
+ * @author zhangyingjie
+ * @date 2023/10/17
+ **/
+public abstract class Command {
+
+ protected static String error = "启动参数: 命令, pd地址, 命令参数, 参数分隔符(非必须)";
+ protected PDClient pdClient;
+ protected PDConfig config;
+
+ public Command(String pd) {
+ config = PDConfig.of(pd).setAuthority("store", "");
+ pdClient = PDClient.create(config);
+ }
+
+ public static Parameter toParameter(String[] args) throws PDException {
+ if (args.length < 2) {
+ throw new PDException(-1, error);
+ }
+ Parameter parameter = new Parameter();
+ parameter.setCmd(args[0]);
+ parameter.setPd(args[1]);
+
+ if (args.length == 2) {
+ parameter.setParams(new String[0]);
+ return parameter;
+ }
+
+ if (args.length == 4) {
+ // 之前的逻辑,存在一个分隔符,做兼容
+ String t = args[3];
+ if (t != null && !t.isEmpty() && args[2].contains(t)) {
+ parameter.setParams(args[2].split(t));
+ parameter.setSeparator(t);
+ return parameter;
+ }
+ }
+
+ // 剩余的部分放到 params中
+ String[] params = new String[args.length - 2] ;
+ System.arraycopy(args, 2, params, 0, args.length - 2);
+ parameter.setParams(params);
+
+ return parameter;
+ }
+
+ public abstract void action(String[] params) throws Exception;
+}
diff --git a/hg-pd-cli/src/main/java/org/apache/hugegraph/pd/cli/cmd/Config.java b/hg-pd-cli/src/main/java/org/apache/hugegraph/pd/cli/cmd/Config.java
new file mode 100644
index 0000000000..0e75ec9a07
--- /dev/null
+++ b/hg-pd-cli/src/main/java/org/apache/hugegraph/pd/cli/cmd/Config.java
@@ -0,0 +1,49 @@
+package org.apache.hugegraph.pd.cli.cmd;
+
+import org.apache.hugegraph.pd.common.PDException;
+import org.apache.hugegraph.pd.grpc.Metapb;
+
+/**
+ * @author zhangyingjie
+ * @date 2023/10/17
+ **/
+public class Config extends Command {
+
+ public Config(String pd) {
+ super(pd);
+ }
+
+ @Override
+ public void action(String[] params) throws PDException {
+ String param = params[0];
+ String[] pair = param.split("=");
+ String key = pair[0].trim();
+ Object value = null;
+ if (pair.length > 1) {
+ value = pair[1].trim();
+ }
+ if (value == null) {
+ Metapb.PDConfig pdConfig = pdClient.getPDConfig();
+ switch (key) {
+ case "enableBatchLoad":
+ // value = pdConfig.getEnableBatchLoad();
+ break;
+ case "shardCount":
+ value = pdConfig.getShardCount();
+ break;
+ }
+
+ System.out.println("Get config " + key + "=" + value);
+ } else {
+ Metapb.PDConfig.Builder builder = Metapb.PDConfig.newBuilder();
+ switch (key) {
+ case "enableBatchLoad":
+ // builder.setEnableBatchLoad(Boolean.valueOf((String)value));
+ case "shardCount":
+ builder.setShardCount(Integer.valueOf((String) value));
+ }
+ pdClient.setPDConfig(builder.build());
+ System.out.println("Set config " + key + "=" + value);
+ }
+ }
+}
diff --git a/hg-pd-cli/src/main/java/org/apache/hugegraph/pd/cli/cmd/Parameter.java b/hg-pd-cli/src/main/java/org/apache/hugegraph/pd/cli/cmd/Parameter.java
new file mode 100644
index 0000000000..acbeca2152
--- /dev/null
+++ b/hg-pd-cli/src/main/java/org/apache/hugegraph/pd/cli/cmd/Parameter.java
@@ -0,0 +1,15 @@
+package org.apache.hugegraph.pd.cli.cmd;
+
+import lombok.Data;
+
+/**
+ * @author zhangyingjie
+ * @date 2023/10/20
+ **/
+@Data
+public class Parameter {
+ String cmd;
+ String pd;
+ String[] params;
+ String separator;
+}
diff --git a/hg-pd-cli/src/main/resources/log4j2.xml b/hg-pd-cli/src/main/resources/log4j2.xml
new file mode 100644
index 0000000000..9a045c7500
--- /dev/null
+++ b/hg-pd-cli/src/main/resources/log4j2.xml
@@ -0,0 +1,122 @@
+
+
+
+
+
+ logs
+ hugegraph-pd
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
diff --git a/hg-pd-cli/src/test/java/org/apache/hugegraph/pd/cli/CliTest.java b/hg-pd-cli/src/test/java/org/apache/hugegraph/pd/cli/CliTest.java
new file mode 100644
index 0000000000..f7679c76ce
--- /dev/null
+++ b/hg-pd-cli/src/test/java/org/apache/hugegraph/pd/cli/CliTest.java
@@ -0,0 +1,61 @@
+package org.apache.hugegraph.pd.cli;
+
+import org.apache.hugegraph.pd.common.PDException;
+// import org.junit.Test;
+
+import java.util.Arrays;
+import java.util.List;
+
+public class CliTest {
+ // @Test
+ public void getConfig() throws PDException {
+ CliApplication.main(new String[]{"127.0.0.1:8686", "config", "enableBatchLoad"});
+ }
+ // @Test
+ public void setBatchTrue() throws PDException {
+ CliApplication.main(new String[]{"127.0.0.1:8686", "config", "enableBatchLoad= true "});
+ }
+
+ // @Test
+ public void setBatchFalse() throws PDException {
+ CliApplication.main(new String[]{"127.0.0.1:8686", "config", "enableBatchLoad=false"});
+ }
+
+ // @Test
+ public void getConfig2() throws PDException {
+ CliApplication.main(new String[]{"127.0.0.1:8686", "config", "shardCount"});
+ }
+ // @Test
+ public void setShardCount1() throws PDException {
+ CliApplication.main(new String[]{"127.0.0.1:8686", "config", "shardCount=1"});
+ }
+
+ // @Test
+ public void setShardCount3() throws PDException {
+ CliApplication.main(new String[]{"127.0.0.1:8686", "config", "shardCount=3"});
+ }
+
+ // @Test
+ public void test2(){
+ Integer[] a = new Integer[] { 1, 0, 3, 2};
+ List aa = Arrays.asList(a);
+ System.out.printf(test2sup(aa, aa.size(),0)?"TRUE":"FALSE");
+ }
+ public static boolean test2sup (List arrays, int tail, int res) {
+ System.out.println(String.format("%d %d", tail, res));
+ if (tail == 0) {
+ System.out.println(String.format("a = %d %d", tail, res));
+ return false;
+ } else if(tail == 1) {
+ System.out.println(String.format("b = %d %d", arrays.get(0), res));
+ return (arrays.get(0) == res);
+ } else if(tail == 2) {
+ System.out.println(String.format("c = %d %d %d", arrays.get(0), arrays.get(1), res));
+ return (arrays.get(0) + arrays.get(1) == Math.abs(res)) ||
+ (Math.abs(arrays.get(0) - arrays.get(1)) == Math.abs(res));
+ } else {
+ return test2sup(arrays, tail - 1, res + arrays.get(tail - 1)) ||
+ test2sup(arrays, tail - 1, res - arrays.get(tail - 1));
+ }
+ }
+}
diff --git a/hg-pd-client/src/main/java/org/apache/hugegraph/pd/client/BaseClient.java b/hg-pd-client/src/main/java/org/apache/hugegraph/pd/client/BaseClient.java
new file mode 100644
index 0000000000..c75b1755d1
--- /dev/null
+++ b/hg-pd-client/src/main/java/org/apache/hugegraph/pd/client/BaseClient.java
@@ -0,0 +1,96 @@
+package org.apache.hugegraph.pd.client;
+
+import java.io.Closeable;
+import java.util.function.Function;
+import java.util.function.Predicate;
+
+import org.apache.hugegraph.pd.client.listener.LeaderChangeListener;
+import org.apache.hugegraph.pd.client.rpc.AnyInvoker;
+import org.apache.hugegraph.pd.client.rpc.ConnectionManager;
+import org.apache.hugegraph.pd.client.rpc.ConnectionManagers;
+import org.apache.hugegraph.pd.client.rpc.Invoker;
+import org.apache.hugegraph.pd.client.rpc.LeaderInvoker;
+import org.apache.hugegraph.pd.common.KVPair;
+import org.apache.hugegraph.pd.common.PDException;
+import org.apache.hugegraph.pd.grpc.common.ErrorType;
+import org.apache.hugegraph.pd.grpc.common.Errors;
+import org.apache.hugegraph.pd.grpc.common.RequestHeader;
+import org.apache.hugegraph.pd.grpc.common.ResponseHeader;
+
+import io.grpc.Channel;
+import io.grpc.MethodDescriptor;
+import io.grpc.stub.AbstractBlockingStub;
+import io.grpc.stub.AbstractStub;
+import lombok.Getter;
+import lombok.extern.slf4j.Slf4j;
+
+/**
+ * @author lynn.bond@hotmail.com on 2023/12/20
+ */
+@Slf4j
+public abstract class BaseClient implements Closeable, LeaderChangeListener {
+
+ public static final ResponseHeader OK_HEADER =
+ ResponseHeader.newBuilder().setError(Errors.newBuilder().setType(ErrorType.OK)).build();
+ protected final RequestHeader header = RequestHeader.getDefaultInstance();
+ @Getter
+ private final PDConfig config;
+ @Getter
+ private final ConnectionManager cm;
+ @Getter
+ private final Invoker leaderInvoker;
+ @Getter
+ private final Invoker anyInvoker;
+ private final Function asCreator;
+ private final Function bsCreator;
+
+ protected BaseClient(PDConfig pdConfig, Function asCreator,
+ Function bsCreator) {
+ this.config = pdConfig;
+ this.cm = ConnectionManagers.getInstance().add(pdConfig);
+ this.cm.addClient(this);
+ this.asCreator = asCreator;
+ this.bsCreator = bsCreator;
+ this.leaderInvoker = new LeaderInvoker(this.cm, asCreator, bsCreator);
+ this.anyInvoker = new AnyInvoker(this.cm, asCreator, bsCreator);
+ }
+
+ public ResponseHeader createErrorHeader(int errorCode, String errorMsg) {
+ return ResponseHeader.newBuilder()
+ .setError(Errors.newBuilder().setTypeValue(errorCode).setMessage(errorMsg))
+ .build();
+ }
+
+ public void handleErrors(ResponseHeader header) throws PDException {
+ Errors error = header.getError();
+ if (header.hasError() && error.getType() != ErrorType.OK) {
+ throw new PDException(error.getTypeValue(),
+ String.format("PD request error, error code = %d, msg = %s",
+ Integer.valueOf(error.getTypeValue()), error.getMessage()));
+ }
+ }
+
+ public String getLeaderAddress() {
+ return this.cm.getLeader();
+ }
+
+ protected RespT blockingUnaryCall(MethodDescriptor method, ReqT req) throws
+ PDException {
+ return this.leaderInvoker.blockingCall(method, req);
+ }
+
+ protected RespT blockingUnaryCall(MethodDescriptor method, ReqT req,
+ long timeout) throws PDException {
+ return this.leaderInvoker.blockingCall(method, req, timeout);
+ }
+
+ protected KVPair concurrentBlockingUnaryCall(
+ MethodDescriptor method, ReqT req, Predicate predicate) throws PDException {
+ RespT t = this.anyInvoker.blockingCall(method, req, predicate);
+ return new KVPair(Boolean.valueOf((t != null)), t);
+ }
+
+ public void close() {
+ this.cm.removeClient(this);
+ }
+}
diff --git a/hg-pd-client/src/main/java/org/apache/hugegraph/pd/client/ClientCache.java b/hg-pd-client/src/main/java/org/apache/hugegraph/pd/client/ClientCache.java
new file mode 100644
index 0000000000..7c2c769326
--- /dev/null
+++ b/hg-pd-client/src/main/java/org/apache/hugegraph/pd/client/ClientCache.java
@@ -0,0 +1,402 @@
+package org.apache.hugegraph.pd.client;
+
+import java.util.List;
+import java.util.Map;
+import java.util.Set;
+import java.util.concurrent.ConcurrentHashMap;
+import java.util.concurrent.atomic.AtomicBoolean;
+import java.util.concurrent.locks.ReentrantReadWriteLock;
+import java.util.stream.Collectors;
+
+import org.apache.hugegraph.pd.client.impl.PDApi;
+import lombok.Setter;
+import org.apache.commons.collections4.CollectionUtils;
+
+import org.apache.hugegraph.pd.client.rpc.ConnectionClient;
+import org.apache.hugegraph.pd.common.GraphCache;
+import org.apache.hugegraph.pd.common.KVPair;
+import org.apache.hugegraph.pd.common.PDException;
+import org.apache.hugegraph.pd.common.PartitionUtils;
+import org.apache.hugegraph.pd.grpc.Metapb;
+import org.apache.hugegraph.pd.grpc.Pdpb;
+import org.apache.hugegraph.pd.grpc.watch.WatchResponse;
+import org.apache.hugegraph.pd.grpc.watch.WatchShardGroupResponse;
+import org.apache.hugegraph.pd.watch.NodeEvent;
+import org.apache.hugegraph.pd.watch.PartitionEvent;
+import org.apache.hugegraph.pd.watch.Watcher;
+import com.google.common.collect.RangeMap;
+
+import lombok.Getter;
+import lombok.extern.slf4j.Slf4j;
+
+import static org.apache.hugegraph.pd.watch.NodeEvent.EventType.NODE_OFFLINE;
+
+@Slf4j
+public class ClientCache {
+
+ private final Watcher watcher;
+ private volatile Map> groups;
+ private volatile Map stores;
+ private volatile Map caches = new ConcurrentHashMap<>();
+ private AtomicBoolean initialized = new AtomicBoolean(false);
+ @Getter
+ private ConnectionClient client;
+
+ @Setter
+ private PDApi pdApi;
+
+ public ClientCache(ConnectionClient client, Watcher watcher) {
+ this.groups = new ConcurrentHashMap<>();
+ this.stores = new ConcurrentHashMap<>();
+ this.client = client;
+ this.watcher = watcher;
+ this.watcher.watchPartition(this::watchPartition);
+ this.watcher.watchShardGroup(this::watchShardGroup);
+ this.watcher.watchNode(this::watchNode);
+ }
+
+ private void watchPartition(PartitionEvent response) {
+ invalidPartitionCache(response.getGraph(), response.getPartitionId());
+ if (response.getChangeType() == PartitionEvent.ChangeType.DEL) {
+ removeAll(response.getGraph());
+ }
+ }
+
+ private void watchShardGroup(WatchResponse response) {
+ WatchShardGroupResponse shardResponse = response.getShardGroupResponse();
+ switch (shardResponse.getType()) {
+ case WATCH_CHANGE_TYPE_DEL:
+ deleteShardGroup(shardResponse.getShardGroupId());
+ break;
+ case WATCH_CHANGE_TYPE_ALTER:
+ case WATCH_CHANGE_TYPE_ADD:
+ updateShardGroup(response.getShardGroupResponse().getShardGroup());
+ break;
+ }
+ }
+
+ private void watchNode(NodeEvent response) {
+ if (response.getEventType() == NODE_OFFLINE) {
+ invalidStoreCache(response.getNodeId());
+ } else {
+ // update store, 不更新缓存,会造成 getLeaderStoreAddresses的返回结果
+ try {
+ pdApi.getStore(response.getNodeId());
+ } catch (PDException e) {
+ log.error("getStore exception", e);
+ }
+ }
+ }
+
+ private void invalidStoreCache(long storeId) {
+ removeStore(Long.valueOf(storeId));
+ }
+
+ private void invalidPartitionCache(String graphName, int partitionId) {
+ if (null != getPartitionById(graphName, partitionId)) {
+ removePartition(graphName, partitionId);
+ }
+ }
+
+ private GraphCache getGraphCache(String graphName) {
+ GraphCache graph;
+ if ((graph = this.caches.get(graphName)) == null) {
+ synchronized (this.caches) {
+ if ((graph = this.caches.get(graphName)) == null) {
+ Metapb.Graph.Builder builder = Metapb.Graph.newBuilder().setGraphName(graphName);
+ Metapb.Graph g = builder.build();
+ graph = new GraphCache(g);
+ this.caches.put(graphName, graph);
+ }
+ }
+ }
+ return graph;
+ }
+
+ public KVPair getPartitionById(String graphName, int partId) {
+ try {
+ GraphCache graph = initGraph(graphName);
+ Metapb.Partition partition = graph.getPartition(partId);
+ if (partition == null || !this.groups.containsKey(partId)) {
+ return null;
+ }
+ Metapb.Shard shard = this.groups.get(Integer.valueOf(partId)).getValue();
+ if (shard == null) {
+ return null;
+ }
+ return new KVPair(partition, shard);
+ } catch (Exception e) {
+ throw new RuntimeException(e);
+ }
+ }
+
+ private KVPair getPair(int partId, GraphCache graph) {
+ Metapb.Partition p = graph.getPartition(Integer.valueOf(partId));
+ KVPair pair = this.groups.get(Integer.valueOf(partId));
+ if (p != null && pair != null) {
+ Metapb.Shard s = pair.getValue();
+ if (s == null) {
+ pair.setValue(getLeader(partId));
+ return new KVPair(p, pair.getValue());
+ }
+ return new KVPair(p, s);
+ }
+ return null;
+ }
+
+ public KVPair getPartitionByCode(String graphName, long code) {
+ try {
+ GraphCache graph = initGraph(graphName);
+ RangeMap range = graph.getRange();
+ Integer pId = range.get(Long.valueOf(code));
+ if (pId != null) {
+ return getPair(pId.intValue(), graph);
+ }
+ ReentrantReadWriteLock.ReadLock readLock = graph.getLock().readLock();
+ try {
+ readLock.lock();
+ pId = range.get(Long.valueOf(code));
+ } catch (Exception e) {
+ log.info("get range with error:", e);
+ } finally {
+ readLock.unlock();
+ }
+ if (pId == null) {
+ ReentrantReadWriteLock.WriteLock writeLock = graph.getLock().writeLock();
+ try {
+ writeLock.lock();
+ if ((pId = range.get(Long.valueOf(code))) == null) {
+ graph.reset();
+ initGraph(graph);
+ pId = range.get(Long.valueOf(code));
+ }
+ } catch (Exception e) {
+ log.info("reset with error:", e);
+ } finally {
+ writeLock.unlock();
+ }
+ }
+ if (pId != null) {
+ return getPair(pId.intValue(), graph);
+ }
+ return null;
+ } catch (PDException e) {
+ throw new RuntimeException(e);
+ }
+ }
+
+ private GraphCache initGraph(String graphName) throws PDException {
+ initCache();
+ GraphCache graph = getGraphCache(graphName);
+ if (!graph.getInitialized().get()) {
+ synchronized (graph) {
+ if (!graph.getInitialized().get()) {
+ initGraph(graph);
+ graph.getInitialized().set(true);
+ }
+ }
+ }
+ return graph;
+ }
+
+ private void initGraph(GraphCache graph) throws PDException {
+ Pdpb.CachePartitionResponse pc = this.client.getPartitionCache(graph.getGraph().getGraphName());
+ List ps = pc.getPartitionsList();
+ if (!CollectionUtils.isEmpty(ps)) {
+ graph.init(ps);
+ }
+ }
+
+ private void initCache() throws PDException {
+ if (!this.initialized.get()) {
+ synchronized (this) {
+ if (!this.initialized.get()) {
+ Pdpb.CacheResponse cache = this.client.getClientCache();
+ List shardGroups = cache.getShardsList();
+ for (Metapb.ShardGroup s : shardGroups) {
+ this.groups.put(Integer.valueOf(s.getId()), new KVPair(s, getLeader(s)));
+ }
+ List stores = cache.getStoresList();
+ for (Metapb.Store store : stores) {
+ this.stores.put(Long.valueOf(store.getId()), store);
+ }
+ List graphs = cache.getGraphsList();
+ for (Metapb.Graph g : graphs) {
+ GraphCache c = new GraphCache(g);
+ this.caches.put(g.getGraphName(), c);
+ }
+ this.initialized.set(true);
+ }
+ }
+ }
+ }
+
+ public KVPair getPartitionByKey(String graphName, byte[] key) {
+ int code = PartitionUtils.calcHashcode(key);
+ return getPartitionByCode(graphName, code);
+ }
+
+ public boolean update(String graphName, int partId, Metapb.Partition partition) {
+ GraphCache graph = getGraphCache(graphName);
+ return graph.updatePartition(partition);
+ }
+
+ public void removePartition(String graphName, int partId) {
+ GraphCache graph = getGraphCache(graphName);
+ graph.removePartition(Integer.valueOf(partId));
+ }
+
+ public void removePartitions() {
+ try {
+ this.groups.clear();
+ this.stores.clear();
+ this.caches.clear();
+ this.initialized.set(false);
+ initCache();
+ } catch (Exception e) {
+ throw new RuntimeException(e);
+ }
+ }
+
+ private void removePartitions(GraphCache graph) {
+ try {
+ graph.removePartitions();
+ initGraph(graph.getGraph().getGraphName());
+ } catch (Exception e) {
+ log.warn("remove partitions with error:", e);
+ } finally {
+ }
+ }
+
+ public void removeAll(String graphName) {
+ GraphCache graph = this.caches.get(graphName);
+ if (graph != null) {
+ removePartitions(graph);
+ }
+ }
+
+ private StringBuffer getStack(StackTraceElement[] stackTrace) {
+ StringBuffer sb = new StringBuffer();
+ for (int i = 0; i < stackTrace.length; i++) {
+ StackTraceElement element = stackTrace[i];
+ sb.append(element.toString() + "\n");
+ }
+ return sb;
+ }
+
+ public boolean updateShardGroup(Metapb.ShardGroup shardGroup) {
+ KVPair old = this.groups.get(Integer.valueOf(shardGroup.getId()));
+ Metapb.Shard leader = getLeader(shardGroup);
+ if (old != null) {
+ old.setKey(shardGroup);
+ old.setValue(leader);
+ return false;
+ }
+ this.groups.put(Integer.valueOf(shardGroup.getId()), new KVPair(shardGroup, leader));
+ return true;
+ }
+
+ public void deleteShardGroup(int shardGroupId) {
+ this.groups.remove(Integer.valueOf(shardGroupId));
+ }
+
+ public Metapb.ShardGroup getShardGroup(int groupId) {
+ KVPair pair = this.groups.get(Integer.valueOf(groupId));
+ if (pair != null) {
+ return pair.getKey();
+ }
+ return null;
+ }
+
+ public boolean addStore(Long storeId, Metapb.Store store) {
+ Metapb.Store oldStore = this.stores.get(storeId);
+ if (oldStore != null && oldStore.equals(store)) {
+ return false;
+ }
+ this.stores.put(storeId, store);
+ return true;
+ }
+
+ public Metapb.Store getStoreById(Long storeId) {
+ return this.stores.get(storeId);
+ }
+
+ public void removeStore(Long storeId) {
+ this.stores.remove(storeId);
+ }
+
+ public void reset() {
+ this.groups = new ConcurrentHashMap<>();
+ this.stores = new ConcurrentHashMap<>();
+ this.caches = new ConcurrentHashMap<>();
+ this.initialized.set(false);
+ }
+
+ public Metapb.Shard getLeader(int partitionId) {
+ KVPair pair = this.groups.get(Integer.valueOf(partitionId));
+ if (pair != null) {
+ if (pair.getValue() != null) {
+ return pair.getValue();
+ }
+ for (Metapb.Shard shard : pair.getKey().getShardsList()) {
+ if (shard.getRole() == Metapb.ShardRole.Leader) {
+ pair.setValue(shard);
+ return shard;
+ }
+ }
+ }
+ return null;
+ }
+
+ public Metapb.Shard getLeader(Metapb.ShardGroup shardGroup) {
+ if (shardGroup != null) {
+ for (Metapb.Shard shard : shardGroup.getShardsList()) {
+ if (shard.getRole() == Metapb.ShardRole.Leader) {
+ return shard;
+ }
+ }
+ }
+ return null;
+ }
+
+ public void updateLeader(int partitionId, Metapb.Shard leader) {
+ KVPair pair = this.groups.get(partitionId);
+ if (pair != null && leader != null) {
+ Metapb.Shard l = pair.getValue();
+ if (l == null || leader.getStoreId() != l.getStoreId()) {
+ Metapb.ShardGroup shardGroup = pair.getKey();
+ synchronized (shardGroup) {
+ l = pair.getValue();
+ if (l == null || leader.getStoreId() != l.getStoreId()) {
+ log.info("Change leader of partition {} from {} to {}", partitionId, l.getStoreId(),
+ leader.getStoreId());
+ Metapb.ShardGroup.Builder builder =
+ Metapb.ShardGroup.newBuilder(shardGroup).clearShards();
+ for (Metapb.Shard shard : shardGroup.getShardsList()) {
+ builder.addShards(
+ Metapb.Shard.newBuilder()
+ .setStoreId(shard.getStoreId())
+ .setRole((shard.getStoreId() == leader.getStoreId()) ?
+ Metapb.ShardRole.Leader : Metapb.ShardRole.Follower)
+ .build());
+ }
+ pair.setKey(builder.build());
+ pair.setValue(leader);
+ }
+ }
+ }
+ }
+ }
+
+ public List getLeaderStoreAddresses() throws PDException {
+ initCache();
+ Set storeIds =
+ this.groups.values().stream().map(shardGroupShardKVPair -> shardGroupShardKVPair.getValue()
+ .getStoreId())
+ .collect(Collectors.toSet());
+ return this.stores.values().stream()
+ .filter(store -> storeIds.contains(Long.valueOf(store.getId())))
+ .map(Metapb.Store::getAddress)
+ .collect(Collectors.toList());
+ }
+}
diff --git a/hg-pd-client/src/main/java/org/apache/hugegraph/pd/client/Discoverable.java b/hg-pd-client/src/main/java/org/apache/hugegraph/pd/client/Discoverable.java
new file mode 100644
index 0000000000..0f1ca08a2b
--- /dev/null
+++ b/hg-pd-client/src/main/java/org/apache/hugegraph/pd/client/Discoverable.java
@@ -0,0 +1,18 @@
+package org.apache.hugegraph.pd.client;
+
+import org.apache.hugegraph.pd.grpc.discovery.NodeInfos;
+import org.apache.hugegraph.pd.grpc.discovery.Query;
+
+import java.util.Map;
+
+/**
+ * @author zhangyingjie
+ * @date 2021/12/20
+ **/
+public interface Discoverable {
+
+ NodeInfos getNodeInfos(Query query);
+
+ void scheduleTask();
+ void cancelTask();
+}
diff --git a/hg-pd-client/src/main/java/org/apache/hugegraph/pd/client/DiscoveryClient.java b/hg-pd-client/src/main/java/org/apache/hugegraph/pd/client/DiscoveryClient.java
new file mode 100644
index 0000000000..9d84a8fcc9
--- /dev/null
+++ b/hg-pd-client/src/main/java/org/apache/hugegraph/pd/client/DiscoveryClient.java
@@ -0,0 +1,121 @@
+package org.apache.hugegraph.pd.client;
+
+import java.io.Closeable;
+import java.util.Timer;
+import java.util.TimerTask;
+import java.util.concurrent.TimeUnit;
+import java.util.concurrent.locks.ReentrantReadWriteLock;
+import java.util.function.Consumer;
+
+import org.apache.hugegraph.pd.grpc.discovery.DiscoveryServiceGrpc;
+import org.apache.hugegraph.pd.grpc.discovery.NodeInfo;
+import org.apache.hugegraph.pd.grpc.discovery.NodeInfos;
+import org.apache.hugegraph.pd.grpc.discovery.Query;
+import org.apache.hugegraph.pd.grpc.discovery.RegisterInfo;
+
+import lombok.extern.slf4j.Slf4j;
+
+/**
+ * @author zhangyingjie
+ * @date 2021/12/20
+ **/
+@Slf4j
+public abstract class DiscoveryClient extends BaseClient implements Closeable, Discoverable {
+
+ protected int period; //心跳周期
+ ReentrantReadWriteLock readWriteLock = new ReentrantReadWriteLock();
+ private Timer timer = new Timer("hg-pd-c-serverHeartbeat", true);
+ private long registerTimeout = 30000;
+ private long lockTimeout = 5;
+ private TimerTask task = new TimerTask() {
+ @Override
+ public void run() {
+ boolean locked = false;
+ try {
+ locked = readWriteLock.readLock().tryLock(lockTimeout, TimeUnit.SECONDS);
+ if (locked) {
+ NodeInfo nodeInfo = getRegisterNode();
+ RegisterInfo register;
+ register = getLeaderInvoker().blockingCall(DiscoveryServiceGrpc.getRegisterMethod(),
+ nodeInfo, registerTimeout);
+ Consumer consumer = getRegisterConsumer();
+ if (consumer != null) {
+ try {
+ consumer.accept(register);
+ } catch (Exception e) {
+ log.warn("run consumer when heartbeat with error:", e);
+ }
+ }
+ }
+ } catch (Exception e) {
+ log.error("register with error:", e);
+ } finally {
+ if (locked) {
+ readWriteLock.readLock().unlock();
+ }
+ }
+ }
+ };
+
+ public DiscoveryClient(int delay, PDConfig conf) {
+ super(conf, DiscoveryServiceGrpc::newStub, DiscoveryServiceGrpc::newBlockingStub);
+ this.period = delay;
+ if (this.period > 60000) {
+ this.registerTimeout = this.period / 2;
+ }
+ }
+
+
+ /***
+ * 获取注册节点信息
+ * @param query
+ * @return
+ */
+ @Override
+ public NodeInfos getNodeInfos(Query query) {
+ this.readWriteLock.readLock().lock();
+ NodeInfos nodes = null;
+ try {
+ nodes = getLeaderInvoker().blockingCall(DiscoveryServiceGrpc.getGetNodesMethod(), query);
+ } catch (Exception e) {
+ log.error("Failed to invoke [ getNodeInfos ], query: {} ", query, e);
+ } finally {
+ this.readWriteLock.readLock().unlock();
+ }
+ return nodes;
+ }
+
+ /***
+ * 启动心跳任务
+ */
+ @Override
+ public void scheduleTask() {
+ timer.scheduleAtFixedRate(task, 0, period);
+ }
+
+ abstract NodeInfo getRegisterNode();
+
+ abstract Consumer getRegisterConsumer();
+
+ @Override
+ public void cancelTask() {
+ this.timer.cancel();
+ }
+
+ @Override
+ public void onLeaderChanged(String leader) {
+ }
+
+ @Override
+ public void close() {
+ this.timer.cancel();
+ readWriteLock.writeLock().lock();
+ try {
+ super.close();
+ } catch (Exception e) {
+ log.info("Close channel with error : {}.", e);
+ } finally {
+ readWriteLock.writeLock().unlock();
+ }
+ }
+}
diff --git a/hg-pd-client/src/main/java/org/apache/hugegraph/pd/client/DiscoveryClientImpl.java b/hg-pd-client/src/main/java/org/apache/hugegraph/pd/client/DiscoveryClientImpl.java
new file mode 100644
index 0000000000..89f7f69ea3
--- /dev/null
+++ b/hg-pd-client/src/main/java/org/apache/hugegraph/pd/client/DiscoveryClientImpl.java
@@ -0,0 +1,179 @@
+<<<<<<<< HEAD:hugegraph-pd/hg-pd-client/src/main/java/org/apache/hugegraph/pd/client/DiscoveryClientImpl.java
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+========
+>>>>>>>> d7e3d51dd (3.6.5 -> 4.x diff):hg-pd-client/src/main/java/org/apache/hugegraph/pd/client/DiscoveryClientImpl.java
+package org.apache.hugegraph.pd.client;
+
+import java.util.Map;
+import java.util.function.Consumer;
+
+<<<<<<<< HEAD:hugegraph-pd/hg-pd-client/src/main/java/org/apache/hugegraph/pd/client/DiscoveryClientImpl.java
+import org.apache.hugegraph.pd.common.Useless;
+import org.apache.hugegraph.pd.grpc.discovery.NodeInfo;
+import org.apache.hugegraph.pd.grpc.discovery.RegisterType;
+
+@Useless("discovery related")
+public class DiscoveryClientImpl extends DiscoveryClient {
+
+ private final String id;
+ private final RegisterType type;
+ private final String version;
+ private final String appName;
+ private final int times;
+ private final String address;
+ private final Map labels;
+ private final Consumer registerConsumer;
+========
+import org.apache.hugegraph.pd.grpc.discovery.NodeInfo;
+import org.apache.hugegraph.pd.grpc.discovery.RegisterType;
+
+/**
+ * @author zhangyingjie
+ * @date 2021/12/20
+ **/
+public class DiscoveryClientImpl extends DiscoveryClient {
+
+ private volatile String id ;
+ private RegisterType type; // 心跳类型,备用
+ private String version;
+ private String appName;
+ private int times; // 心跳过期次数,备用
+ private String address;
+ private Map labels;
+ private Consumer registerConsumer;
+ private PDConfig conf;
+
+>>>>>>>> d7e3d51dd (3.6.5 -> 4.x diff):hg-pd-client/src/main/java/org/apache/hugegraph/pd/client/DiscoveryClientImpl.java
+
+ private DiscoveryClientImpl(Builder builder) {
+ super(builder.delay, builder.conf);
+ period = builder.delay;
+ id = builder.id;
+ type = builder.type;
+ version = builder.version;
+ appName = builder.appName;
+ times = builder.times;
+ address = builder.address;
+ labels = builder.labels;
+ registerConsumer = builder.registerConsumer;
+ }
+
+ public static Builder newBuilder() {
+ return new Builder();
+ }
+
+ @Override
+ NodeInfo getRegisterNode() {
+ return NodeInfo.newBuilder().setAddress(this.address)
+ .setVersion(this.version)
+ .setAppName(this.appName).setInterval(this.period)
+ .setId(this.id).putAllLabels(labels).build();
+ }
+
+ @Override
+ Consumer getRegisterConsumer() {
+ return registerConsumer;
+ }
+
+<<<<<<<< HEAD:hugegraph-pd/hg-pd-client/src/main/java/org/apache/hugegraph/pd/client/DiscoveryClientImpl.java
+========
+ @Override
+ public void onLeaderChanged(String leaderAddress) {
+
+ }
+
+
+>>>>>>>> d7e3d51dd (3.6.5 -> 4.x diff):hg-pd-client/src/main/java/org/apache/hugegraph/pd/client/DiscoveryClientImpl.java
+ public static final class Builder {
+
+ private int delay;
+ private String centerAddress;
+ private String id;
+ private RegisterType type;
+ private String address;
+ private Map labels;
+ private String version;
+ private String appName;
+ private int times;
+ private Consumer registerConsumer;
+ private PDConfig conf;
+
+ private Builder() {
+ }
+
+ public Builder setDelay(int val) {
+ delay = val;
+ return this;
+ }
+
+ public Builder setCenterAddress(String val) {
+ centerAddress = val;
+ return this;
+ }
+
+ public Builder setId(String val) {
+ id = val;
+ return this;
+ }
+
+ public Builder setType(RegisterType val) {
+ type = val;
+ return this;
+ }
+
+ public Builder setAddress(String val) {
+ address = val;
+ return this;
+ }
+
+ public Builder setLabels(Map val) {
+ labels = val;
+ return this;
+ }
+
+ public Builder setVersion(String val) {
+ version = val;
+ return this;
+ }
+
+ public Builder setAppName(String val) {
+ appName = val;
+ return this;
+ }
+
+ public Builder setTimes(int val) {
+ times = val;
+ return this;
+ }
+
+ public Builder setPdConfig(PDConfig val) {
+ this.conf = val;
+ return this;
+ }
+
+ public Builder setRegisterConsumer(Consumer registerConsumer) {
+ this.registerConsumer = registerConsumer;
+ return this;
+ }
+
+ public DiscoveryClientImpl build() {
+ return new DiscoveryClientImpl(this);
+ }
+ }
+}
diff --git a/hg-pd-client/src/main/java/org/apache/hugegraph/pd/client/KvClient.java b/hg-pd-client/src/main/java/org/apache/hugegraph/pd/client/KvClient.java
new file mode 100644
index 0000000000..3a225b44e0
--- /dev/null
+++ b/hg-pd-client/src/main/java/org/apache/hugegraph/pd/client/KvClient.java
@@ -0,0 +1,314 @@
+package org.apache.hugegraph.pd.client;
+
+import java.io.Closeable;
+import java.util.HashMap;
+import java.util.LinkedList;
+import java.util.List;
+import java.util.Map;
+import java.util.concurrent.ConcurrentHashMap;
+import java.util.concurrent.ConcurrentMap;
+import java.util.concurrent.Semaphore;
+import java.util.concurrent.atomic.AtomicBoolean;
+import java.util.concurrent.atomic.AtomicLong;
+import java.util.function.Consumer;
+
+import org.apache.hugegraph.pd.client.impl.StreamDelegator;
+import org.apache.hugegraph.pd.common.PDException;
+import org.apache.hugegraph.pd.grpc.common.ErrorType;
+import org.apache.hugegraph.pd.grpc.kv.K;
+import org.apache.hugegraph.pd.grpc.kv.KResponse;
+import org.apache.hugegraph.pd.grpc.kv.Kv;
+import org.apache.hugegraph.pd.grpc.kv.KvResponse;
+import org.apache.hugegraph.pd.grpc.kv.KvServiceGrpc;
+import org.apache.hugegraph.pd.grpc.kv.LockRequest;
+import org.apache.hugegraph.pd.grpc.kv.LockResponse;
+import org.apache.hugegraph.pd.grpc.kv.ScanPrefixResponse;
+import org.apache.hugegraph.pd.grpc.kv.TTLRequest;
+import org.apache.hugegraph.pd.grpc.kv.TTLResponse;
+import org.apache.hugegraph.pd.grpc.kv.WatchEvent;
+import org.apache.hugegraph.pd.grpc.kv.WatchKv;
+import org.apache.hugegraph.pd.grpc.kv.WatchRequest;
+import org.apache.hugegraph.pd.grpc.kv.WatchResponse;
+import org.apache.hugegraph.pd.grpc.kv.WatchType;
+
+import io.grpc.MethodDescriptor;
+import lombok.extern.slf4j.Slf4j;
+
+/**
+ * @author zhangyingjie
+ * @date 2022/6/20
+ **/
+@Slf4j
+public class KvClient extends BaseClient implements Closeable {
+
+ private static String keyListenPrefix = "K-";
+ private static String prefixListenPrefix = "P-";
+ private static String delegatorPrefix = "KV-";
+ private AtomicLong clientId = new AtomicLong(0L);
+ private Semaphore semaphore = new Semaphore(1);
+ private AtomicBoolean closed = new AtomicBoolean(false);
+ private ConcurrentMap delegators = new ConcurrentHashMap<>();
+
+ public KvClient(PDConfig pdConfig) {
+ super(pdConfig, KvServiceGrpc::newStub, KvServiceGrpc::newBlockingStub);
+ }
+
+ public KvResponse put(String key, String value) throws PDException {
+ Kv kv = Kv.newBuilder().setKey(key).setValue(value).build();
+ KvResponse response = blockingUnaryCall(KvServiceGrpc.getPutMethod(), kv);
+ handleErrors(response.getHeader());
+ return response;
+ }
+
+ public KResponse get(String key) throws PDException {
+ K k = K.newBuilder().setKey(key).build();
+ KResponse response = blockingUnaryCall(KvServiceGrpc.getGetMethod(), k);
+ handleErrors(response.getHeader());
+ return response;
+ }
+
+ public KvResponse delete(String key) throws PDException {
+ K k = K.newBuilder().setKey(key).build();
+ KvResponse response = blockingUnaryCall(KvServiceGrpc.getDeleteMethod(), k);
+ handleErrors(response.getHeader());
+ return response;
+ }
+
+ public KvResponse deletePrefix(String prefix) throws PDException {
+ K k = K.newBuilder().setKey(prefix).build();
+ KvResponse response = blockingUnaryCall(KvServiceGrpc.getDeletePrefixMethod(), k);
+ handleErrors(response.getHeader());
+ return response;
+ }
+
+ public ScanPrefixResponse scanPrefix(String prefix) throws PDException {
+ K k = K.newBuilder().setKey(prefix).build();
+ ScanPrefixResponse response = blockingUnaryCall(KvServiceGrpc.getScanPrefixMethod(), k);
+ handleErrors(response.getHeader());
+ return response;
+ }
+
+ public TTLResponse keepTTLAlive(String key) throws PDException {
+ TTLRequest request = TTLRequest.newBuilder().setKey(key).build();
+ TTLResponse response = blockingUnaryCall(KvServiceGrpc.getKeepTTLAliveMethod(), request);
+ handleErrors(response.getHeader());
+ return response;
+ }
+
+ public TTLResponse putTTL(String key, String value, long ttl) throws PDException {
+ TTLRequest request = TTLRequest.newBuilder().setKey(key).setValue(value).setTtl(ttl).build();
+ TTLResponse response = blockingUnaryCall(KvServiceGrpc.getPutTTLMethod(), request);
+ handleErrors(response.getHeader());
+ return response;
+ }
+
+ private void onEvent(WatchResponse value, Consumer consumer) {
+ // log.info("receive message for {},event Count:{}", value.getState(), value.getEventsCount());
+ this.clientId.compareAndSet(0L, value.getClientId());
+ if (value.getEventsCount() != 0) {
+ try {
+ consumer.accept((T) value);
+ } catch (Exception e) {
+ log.info(
+ "an error occurred while executing the client callback method, which should not " +
+ "have happened.Please check the callback method of the client",
+ e);
+ }
+ }
+ }
+
+ public void listen(String key, Consumer consumer) throws PDException {
+ acquire();
+ try {
+ StreamDelegator delegator = createDelegator(keyListenPrefix + key,
+ KvServiceGrpc.getWatchMethod());
+ delegator.listen(getWatchRequest(key), getStreamDataHandler(key, consumer));
+ } catch (Exception e) {
+ release();
+ throw new PDException(ErrorType.PD_UNAVAILABLE, e);
+ }
+ }
+
+ public void listenPrefix(String prefix, Consumer consumer) throws PDException {
+ acquire();
+ try {
+ StreamDelegator delegator = createDelegator(prefixListenPrefix + prefix,
+ KvServiceGrpc.getWatchPrefixMethod());
+ delegator.listen(getWatchRequest(prefix), getStreamDataHandler(prefix, consumer));
+ } catch (Exception e) {
+ release();
+ throw new PDException(ErrorType.PD_UNAVAILABLE, e);
+ }
+ }
+
+ private void acquire() {
+ if (this.clientId.get() == 0L) {
+ try {
+ this.semaphore.acquire();
+ if (this.clientId.get() != 0L) {
+ this.semaphore.release();
+ } else {
+ log.info("wait for client starting....");
+ }
+ } catch (Exception e) {
+ log.error("get semaphore with error:", e);
+ }
+ }
+ }
+
+ private void release() {
+ try {
+ if (this.semaphore.availablePermits() == 0) {
+ this.semaphore.release();
+ log.info("listen finished");
+ }
+ } catch (Exception e) {
+ log.warn("release failed:", e);
+ }
+ }
+
+ public List getWatchList(T response) {
+ List values = new LinkedList<>();
+ List eventsList = response.getEventsList();
+ for (WatchEvent event : eventsList) {
+ if (event.getType() != WatchType.Put) {
+ return null;
+ }
+ String value = event.getCurrent().getValue();
+ values.add(value);
+ }
+ return values;
+ }
+
+ public Map getWatchMap(T response) {
+ Map values = new HashMap<>();
+ List eventsList = response.getEventsList();
+ for (WatchEvent event : eventsList) {
+ if (event.getType() != WatchType.Put) {
+ return null;
+ }
+ WatchKv current = event.getCurrent();
+ String key = current.getKey();
+ String value = current.getValue();
+ values.put(key, value);
+ }
+ return values;
+ }
+
+ public LockResponse lock(String key, long ttl) throws PDException {
+ LockResponse response;
+ acquire();
+ try {
+ LockRequest k =
+ LockRequest.newBuilder().setKey(key).setClientId(this.clientId.get()).setTtl(ttl).build();
+ response = blockingUnaryCall(KvServiceGrpc.getLockMethod(), k);
+ handleErrors(response.getHeader());
+ this.clientId.compareAndSet(0L, response.getClientId());
+ } catch (Exception e) {
+ throw e;
+ } finally {
+ release();
+ }
+ return response;
+ }
+
+ public LockResponse lockWithoutReentrant(String key, long ttl) throws PDException {
+ LockResponse response;
+ acquire();
+ try {
+ LockRequest k =
+ LockRequest.newBuilder().setKey(key).setClientId(this.clientId.get()).setTtl(ttl).build();
+ response = blockingUnaryCall(KvServiceGrpc.getLockWithoutReentrantMethod(), k);
+ handleErrors(response.getHeader());
+ this.clientId.compareAndSet(0L, response.getClientId());
+ } catch (Exception e) {
+ throw e;
+ } finally {
+ release();
+ }
+ return response;
+ }
+
+ public LockResponse isLocked(String key) throws PDException {
+ LockRequest k = LockRequest.newBuilder().setKey(key).setClientId(this.clientId.get()).build();
+ LockResponse response = blockingUnaryCall(KvServiceGrpc.getIsLockedMethod(), k);
+ handleErrors(response.getHeader());
+ return response;
+ }
+
+ public LockResponse unlock(String key) throws PDException {
+ assert this.clientId.get() != 0L;
+ LockRequest k = LockRequest.newBuilder().setKey(key).setClientId(this.clientId.get()).build();
+ LockResponse response = blockingUnaryCall(KvServiceGrpc.getUnlockMethod(), k);
+ handleErrors(response.getHeader());
+ this.clientId.compareAndSet(0L, response.getClientId());
+ assert this.clientId.get() == response.getClientId();
+ return response;
+ }
+
+ public LockResponse keepAlive(String key) throws PDException {
+ assert this.clientId.get() != 0L;
+ LockRequest k = LockRequest.newBuilder().setKey(key).setClientId(this.clientId.get()).build();
+ LockResponse response = blockingUnaryCall(KvServiceGrpc.getKeepAliveMethod(), k);
+ handleErrors(response.getHeader());
+ this.clientId.compareAndSet(0L, response.getClientId());
+ assert this.clientId.get() == response.getClientId();
+ return response;
+ }
+
+ public void close() {
+ this.delegators.entrySet().forEach(d -> d.getValue().close());
+ this.delegators.clear();
+ this.closed.set(true);
+ super.close();
+ }
+
+ private Consumer getStreamDataHandler(String key, Consumer consumer) {
+ return value -> {
+ boolean b;
+ switch (value.getState()) {
+ case Starting:
+ b = this.clientId.compareAndSet(0L, value.getClientId());
+ if (b) {
+ log.info("set watch client id to :{}", Long.valueOf(value.getClientId()));
+ }
+ release();
+ break;
+ case Started:
+ onEvent(value, consumer);
+ break;
+ case Leader_Changed:
+ this.clientId.set(0L);
+ release();
+ onLeaderChanged("");
+ break;
+ }
+ };
+ }
+
+ private void onDelegatorError(Throwable t) {
+ release();
+ if (!this.closed.get()) {
+ this.clientId.set(0L);
+ }
+ }
+
+ private WatchRequest getWatchRequest(String key) {
+ return WatchRequest.newBuilder().setClientId(this.clientId.get()).setKey(key).build();
+ }
+
+ private StreamDelegator createDelegator(String name,
+ MethodDescriptor methodDesc) {
+ StreamDelegator delegator =
+ new StreamDelegator(delegatorPrefix + name, getLeaderInvoker(), methodDesc);
+ this.delegators.put(delegator.getName(), delegator);
+ return delegator;
+ }
+
+ public void onLeaderChanged(String leader) {
+ if (this.closed.get()) {
+ return;
+ }
+ this.delegators.entrySet().parallelStream().forEach(e -> e.getValue().reconnect());
+ }
+}
diff --git a/hg-pd-client/src/main/java/org/apache/hugegraph/pd/client/LicenseClient.java b/hg-pd-client/src/main/java/org/apache/hugegraph/pd/client/LicenseClient.java
new file mode 100644
index 0000000000..710e985604
--- /dev/null
+++ b/hg-pd-client/src/main/java/org/apache/hugegraph/pd/client/LicenseClient.java
@@ -0,0 +1,82 @@
+<<<<<<<< HEAD:hugegraph-pd/hg-pd-client/src/main/java/org/apache/hugegraph/pd/client/LicenseClient.java
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hugegraph.pd.client;
+
+import org.apache.hugegraph.pd.common.KVPair;
+import org.apache.hugegraph.pd.common.Useless;
+import org.apache.hugegraph.pd.grpc.PDGrpc;
+import org.apache.hugegraph.pd.grpc.Pdpb;
+
+import com.google.protobuf.ByteString;
+
+import io.grpc.stub.AbstractBlockingStub;
+import io.grpc.stub.AbstractStub;
+========
+package org.apache.hugegraph.pd.client;
+
+import org.apache.hugegraph.pd.common.KVPair;
+import org.apache.hugegraph.pd.grpc.PDGrpc;
+import org.apache.hugegraph.pd.grpc.Pdpb;
+import org.apache.hugegraph.pd.grpc.common.ErrorType;
+import org.apache.hugegraph.pd.grpc.common.ResponseHeader;
+import com.google.protobuf.ByteString;
+
+>>>>>>>> d7e3d51dd (3.6.5 -> 4.x diff):hg-pd-client/src/main/java/org/apache/hugegraph/pd/client/LicenseClient.java
+import lombok.extern.slf4j.Slf4j;
+
+@Useless("license related")
+@Slf4j
+public class LicenseClient extends BaseClient {
+
+ public LicenseClient(PDConfig config) {
+ super(config, PDGrpc::newStub, PDGrpc::newBlockingStub);
+ }
+
+ public Pdpb.PutLicenseResponse putLicense(byte[] content) {
+ Pdpb.PutLicenseRequest request = Pdpb.PutLicenseRequest.newBuilder()
+ .setContent(
+ ByteString.copyFrom(content))
+ .build();
+ try {
+ KVPair pair = concurrentBlockingUnaryCall(
+ PDGrpc.getPutLicenseMethod(), request,
+ (rs) -> rs.getHeader().getError().getType().equals(ErrorType.OK));
+ if (pair.getKey()) {
+ Pdpb.PutLicenseResponse.Builder builder = Pdpb.PutLicenseResponse.newBuilder();
+ builder.setHeader(OK_HEADER);
+ return builder.build();
+ } else {
+ return pair.getValue();
+ }
+ } catch (Exception e) {
+ e.printStackTrace();
+ log.debug("put license with error:{} ", e);
+<<<<<<<< HEAD:hugegraph-pd/hg-pd-client/src/main/java/org/apache/hugegraph/pd/client/LicenseClient.java
+ Pdpb.ResponseHeader rh =
+ newErrorHeader(Pdpb.ErrorType.LICENSE_ERROR_VALUE, e.getMessage());
+========
+ ResponseHeader rh = createErrorHeader(ErrorType.LICENSE_ERROR_VALUE, e.getMessage());
+>>>>>>>> d7e3d51dd (3.6.5 -> 4.x diff):hg-pd-client/src/main/java/org/apache/hugegraph/pd/client/LicenseClient.java
+ return Pdpb.PutLicenseResponse.newBuilder().setHeader(rh).build();
+ }
+ }
+
+ public void onLeaderChanged(String leader) {
+ }
+}
diff --git a/hg-pd-client/src/main/java/org/apache/hugegraph/pd/client/MetaClient.java b/hg-pd-client/src/main/java/org/apache/hugegraph/pd/client/MetaClient.java
new file mode 100644
index 0000000000..2144260ee2
--- /dev/null
+++ b/hg-pd-client/src/main/java/org/apache/hugegraph/pd/client/MetaClient.java
@@ -0,0 +1,139 @@
+package org.apache.hugegraph.pd.client;
+
+import org.apache.hugegraph.pd.common.PDException;
+import org.apache.hugegraph.pd.grpc.GraphSpaces;
+import org.apache.hugegraph.pd.grpc.Graphs;
+import org.apache.hugegraph.pd.grpc.MetaServiceGrpc;
+import org.apache.hugegraph.pd.grpc.Metapb.Graph;
+import org.apache.hugegraph.pd.grpc.Metapb.GraphSpace;
+import org.apache.hugegraph.pd.grpc.Metapb.Partition;
+import org.apache.hugegraph.pd.grpc.Metapb.ShardGroup;
+import org.apache.hugegraph.pd.grpc.Metapb.Store;
+import org.apache.hugegraph.pd.grpc.Partitions;
+import org.apache.hugegraph.pd.grpc.ShardGroups;
+import org.apache.hugegraph.pd.grpc.Stores;
+import org.apache.hugegraph.pd.grpc.common.NoArg;
+import org.apache.hugegraph.pd.grpc.common.VoidResponse;
+
+import java.io.Closeable;
+
+import static org.apache.hugegraph.pd.grpc.MetaServiceGrpc.getGetGraphSpacesMethod;
+import static org.apache.hugegraph.pd.grpc.MetaServiceGrpc.getGetGraphsMethod;
+import static org.apache.hugegraph.pd.grpc.MetaServiceGrpc.getGetPartitionsMethod;
+import static org.apache.hugegraph.pd.grpc.MetaServiceGrpc.getGetShardGroupsMethod;
+import static org.apache.hugegraph.pd.grpc.MetaServiceGrpc.getGetStoresMethod;
+
+/**
+ * @author zhangyingjie
+ * @date 2023/9/19
+ **/
+public class MetaClient extends BaseClient implements Closeable {
+
+
+ public MetaClient(PDConfig config) {
+ super(config, MetaServiceGrpc::newStub, MetaServiceGrpc::newBlockingStub);
+ }
+
+ /* @Override
+ protected AbstractStub createStub() {
+ return MetaServiceGrpc.newStub(channel);
+ }
+
+ @Override
+ protected AbstractBlockingStub createBlockingStub() {
+ return MetaServiceGrpc.newBlockingStub(channel);
+ }*/
+
+ /**
+ *
+ */
+ public Stores getStores() throws PDException {
+ Stores res = blockingUnaryCall(getGetStoresMethod(), NoArg.newBuilder().build());
+ handleErrors(res.getHeader());
+ return res;
+ }
+
+ /**
+ *
+ */
+ public Partitions getPartitions() throws PDException {
+ Partitions res = blockingUnaryCall(getGetPartitionsMethod(), NoArg.newBuilder().build());
+ handleErrors(res.getHeader());
+ return res;
+ }
+
+ /**
+ *
+ */
+ public ShardGroups getShardGroups() throws PDException {
+ ShardGroups res = blockingUnaryCall(getGetShardGroupsMethod(), NoArg.newBuilder().build());
+ handleErrors(res.getHeader());
+ return res;
+ }
+
+
+ /**
+ *
+ */
+ public GraphSpaces getGraphSpaces() throws PDException {
+ GraphSpaces res = blockingUnaryCall(getGetGraphSpacesMethod(), NoArg.newBuilder().build());
+ handleErrors(res.getHeader());
+ return res;
+ }
+
+ /**
+ *
+ */
+ public Graphs getGraphs() throws PDException {
+ Graphs res = blockingUnaryCall(getGetGraphsMethod(), NoArg.newBuilder().build());
+ handleErrors(res.getHeader());
+ return res;
+ }
+
+ /**
+ *
+ */
+ public void updateStore(Store request) throws PDException {
+ VoidResponse res = blockingUnaryCall(MetaServiceGrpc.getUpdateStoreMethod(), request);
+ handleErrors(res.getHeader());
+ }
+
+ /**
+ *
+ */
+ public void updatePartition(Partition request) throws PDException {
+ VoidResponse res = blockingUnaryCall(MetaServiceGrpc.getUpdatePartitionMethod(), request);
+ handleErrors(res.getHeader());
+ }
+
+ /**
+ *
+ */
+ public void updateShardGroup(ShardGroup request) throws PDException {
+ VoidResponse res = blockingUnaryCall(MetaServiceGrpc.getUpdateShardGroupMethod(), request);
+ handleErrors(res.getHeader());
+ }
+
+ /**
+ *
+ */
+ public void updateGraphSpace(GraphSpace request) throws PDException {
+ VoidResponse res = blockingUnaryCall(MetaServiceGrpc.getUpdateGraphSpaceMethod(), request);
+ handleErrors(res.getHeader());
+ }
+
+ /**
+ *
+ */
+ public void updateGraph(Graph request) throws PDException {
+ VoidResponse res = blockingUnaryCall(MetaServiceGrpc.getUpdateGraphMethod(), request);
+ handleErrors(res.getHeader());
+ }
+
+ @Override
+ public void close() {
+ super.close();
+ }
+
+ public void onLeaderChanged(String leader) {}
+}
diff --git a/hg-pd-client/src/main/java/org/apache/hugegraph/pd/client/PDClient.java b/hg-pd-client/src/main/java/org/apache/hugegraph/pd/client/PDClient.java
new file mode 100644
index 0000000000..82927f9102
--- /dev/null
+++ b/hg-pd-client/src/main/java/org/apache/hugegraph/pd/client/PDClient.java
@@ -0,0 +1,685 @@
+package org.apache.hugegraph.pd.client;
+
+import static org.apache.hugegraph.pd.common.Consts.DEFAULT_STORE_GROUP_ID;
+
+import java.util.List;
+
+import org.apache.hugegraph.pd.client.impl.PDApi;
+import org.apache.hugegraph.pd.client.listener.PDEventListener;
+import org.apache.hugegraph.pd.client.rpc.ConnectionManager;
+import org.apache.hugegraph.pd.common.KVPair;
+import org.apache.hugegraph.pd.common.PDException;
+import org.apache.hugegraph.pd.common.PartitionUtils;
+import org.apache.hugegraph.pd.grpc.ClusterOp;
+import org.apache.hugegraph.pd.grpc.MetaTask;
+import org.apache.hugegraph.pd.grpc.Metapb;
+import org.apache.hugegraph.pd.grpc.Metapb.Partition;
+import org.apache.hugegraph.pd.grpc.Metapb.Shard;
+import org.apache.hugegraph.pd.grpc.Metapb.ShardGroup;
+import org.apache.hugegraph.pd.grpc.PDGrpc;
+import org.apache.hugegraph.pd.grpc.Pdpb;
+import org.apache.hugegraph.pd.grpc.Pdpb.CachePartitionResponse;
+import org.apache.hugegraph.pd.grpc.Pdpb.CacheResponse;
+import org.apache.hugegraph.pd.grpc.Pdpb.GraphStatsResponse;
+import org.apache.hugegraph.pd.pulse.Pulse;
+import org.apache.hugegraph.pd.watch.PDEventRaiser;
+import org.apache.hugegraph.pd.watch.Watcher;
+
+import io.grpc.ManagedChannel;
+import lombok.Getter;
+import lombok.extern.slf4j.Slf4j;
+
+/**
+ * PD客户端实现类
+ *
+ * @author yanjinbing
+ */
+@Slf4j
+public class PDClient extends BaseClient {
+
+ private final PDConfig config;
+ @Getter
+ private final ClientCache cache;
+ private final PulseClient pulse;
+ private final PDEventRaiser events;
+ @Getter
+ private final Watcher pdWatch;
+ private final PDApi pdApi;
+ private final ConnectionManager cm;
+
+ PDClient(PDConfig config) {
+ super(config, PDGrpc::newStub, PDGrpc::newBlockingStub);
+ this.config = config;
+ this.cm = getCm();
+ this.pulse = this.cm.getPulseClient();
+ this.pdWatch = this.cm.getWatcher();
+ this.cache = this.cm.getCache();
+ this.pdApi = new PDApi(this, this.cache);
+ this.cache.setPdApi(this.pdApi);
+ this.events = new PDEventRaiser(this.pdWatch);
+ }
+
+ /**
+ * 创建PDClient对象,并初始化stub
+ *
+ * @param config
+ * @return
+ */
+ public static PDClient create(PDConfig config) {
+ PDClient client = new PDClient(config);
+ return client;
+ }
+
+
+ @Deprecated
+ public static void setChannel(ManagedChannel mc) {
+ log.warn("[PDClient] Invoking a deprecated method [ PDClient::setChannel ].");
+ }
+
+ /**
+ * Return the local PD config.
+ *
+ * @return
+ */
+ public PDConfig getClientConfig() {
+ return this.config;
+ }
+
+
+ /**
+ * Return the PD pulse client.
+ *
+ * @return
+ */
+ public Pulse getPulse() {
+ return this.pulse;
+ }
+
+ public Pulse getPulse(long storeId) {
+ this.pulse.setObserverId(storeId);
+ return this.pulse;
+ }
+
+ /**
+ * Force a reconnection to the PD leader, regardless of whether the current connection is alive or not.
+ */
+ public void forceReconnect() {
+ getCm().reconnect();
+ }
+
+ /**
+ * Begin watching with the leader address.
+ *
+ * @param leader
+ */
+ @Deprecated
+ public void startWatch(String leader) {
+ log.warn("[PDClient] Invoking a deprecated method [ PDClient::startWatch ],");
+ }
+
+ public String getLeaderIp() {
+ return getCm().getLeader();
+ }
+
+ /**
+ * Store注册,返回storeID,初次注册会返回新ID
+ *
+ * @param store
+ * @return
+ */
+ public long registerStore(Metapb.Store store) throws PDException {
+ return this.pdApi.registerStore(store);
+ }
+
+ /**
+ * 根据storeId返回Store对象
+ *
+ * @param storeId
+ * @return
+ * @throws PDException
+ */
+ public Metapb.Store getStore(long storeId) throws PDException {
+ return this.pdApi.getStore(storeId);
+ }
+
+ /**
+ * 更新Store信息,包括上下线等
+ *
+ * @param store
+ * @return
+ */
+ public Metapb.Store updateStore(Metapb.Store store) throws PDException {
+ return this.pdApi.updateStore(store);
+ }
+
+ /**
+ * 返回活跃的Store
+ *
+ * @param graphName
+ * @return
+ */
+ public List getActiveStores(String graphName) throws PDException {
+ return this.pdApi.getActiveStores(graphName);
+ }
+
+ public List getActiveStores() throws PDException {
+ return this.pdApi.getActiveStores();
+ }
+
+ /**
+ * 返回活跃的Store
+ *
+ * @param graphName
+ * @return
+ */
+ public List getAllStores(String graphName) throws PDException {
+ return this.pdApi.getAllStores(graphName);
+ }
+
+ /**
+ * Store心跳,定期调用,保持在线状态
+ *
+ * @param stats
+ * @throws PDException
+ */
+ public Metapb.ClusterStats storeHeartbeat(Metapb.StoreStats stats) throws PDException {
+ return this.pdApi.storeHeartbeat(stats);
+ }
+
+ /**
+ * 查询Key所属分区信息
+ *
+ * @param graphName
+ * @param key
+ * @return
+ * @throws PDException
+ */
+ public KVPair getPartition(String graphName, byte[] key) throws PDException {
+ return this.pdApi.getPartition(graphName, key);
+ }
+
+ public KVPair getPartition(String graphName, byte[] key, int code) throws PDException {
+ return this.pdApi.getPartition(graphName, key, code);
+ }
+
+ /**
+ * 根据hashcode查询所属分区信息
+ *
+ * @param graphName
+ * @param hashCode
+ * @return
+ * @throws PDException
+ */
+ public KVPair getPartitionByCode(String graphName, long hashCode)
+ throws PDException {
+ return this.pdApi.getPartitionByCode(graphName, hashCode);
+ }
+
+ /**
+ * 获取Key的哈希值
+ */
+ public int keyToCode(String graphName, byte[] key) {
+ return PartitionUtils.calcHashcode(key);
+ }
+
+ /**
+ * 根据分区id返回分区信息, RPC请求
+ *
+ * @param graphName
+ * @param partId
+ * @return
+ * @throws PDException
+ */
+ public KVPair getPartitionById(String graphName, int partId) throws PDException {
+ return this.pdApi.getPartitionById(graphName, partId);
+ }
+
+ public ShardGroup getShardGroup(int partId) throws PDException {
+ return this.pdApi.getShardGroup(partId);
+ }
+
+ public ShardGroup getShardGroupDirect(int partId) throws PDException {
+ return this.pdApi.getShardGroupDirect(partId);
+ }
+
+ public void updateShardGroup(ShardGroup shardGroup) throws PDException {
+ this.pdApi.updateShardGroup(shardGroup);
+ }
+
+ /**
+ * 返回startKey和endKey跨越的所有分区信息
+ *
+ * @param graphName
+ * @param startKey
+ * @param endKey
+ * @return
+ * @throws PDException
+ */
+ public List> scanPartitions(String graphName, byte[] startKey,
+ byte[] endKey) throws PDException {
+ return this.pdApi.scanPartitions(graphName, startKey, endKey);
+ }
+
+ /**
+ * 根据条件查询分区信息
+ *
+ * @return
+ * @throws PDException
+ */
+ public List getPartitionsByStore(long storeId) throws PDException {
+
+ return this.pdApi.getPartitionsByStore(storeId);
+ }
+
+ /**
+ * 查找指定store上的指定partitionId
+ *
+ * @return
+ * @throws PDException
+ */
+ public List queryPartitions(long storeId, int partitionId) throws PDException {
+ return this.pdApi.queryPartitions(storeId, partitionId);
+ }
+
+ public List getPartitions(long storeId, String graphName) throws PDException {
+
+ return this.pdApi.getPartitions(storeId, graphName);
+
+ }
+
+ /**
+ * create a graph, requires the graph wouldn't exist before
+ *
+ * @param graph graph
+ * @return graph that created
+ * @throws PDException error occurs
+ */
+ public Metapb.Graph createGraph(Metapb.Graph graph) throws PDException {
+ return this.pdApi.createGraph(graph);
+ }
+
+ /**
+ * update graph, update graph name if exists, otherwise create a new graph
+ *
+ * @param graph the new graph
+ * @return graph that updated
+ * @throws PDException error occurs
+ */
+ public Metapb.Graph setGraph(Metapb.Graph graph) throws PDException {
+ return this.pdApi.setGraph(graph);
+ }
+
+ public Metapb.Graph getGraph(String graphName) throws PDException {
+ return this.pdApi.getGraph(graphName);
+ }
+
+ public Metapb.Graph getGraphWithOutException(String graphName) throws
+ PDException {
+ return this.pdApi.getGraphWithOutException(graphName);
+ }
+
+ public Metapb.Graph delGraph(String graphName) throws PDException {
+ return this.pdApi.delGraph(graphName);
+ }
+
+ public List updatePartition(List partitions) throws PDException {
+ return this.pdApi.updatePartition(partitions);
+
+ }
+
+ public Partition delPartition(String graphName, int partitionId) throws PDException {
+ return this.pdApi.delPartition(graphName, partitionId);
+ }
+
+ /**
+ * 删除分区缓存
+ */
+ public void invalidPartitionCache(String graphName, int partitionId) {
+ this.pdApi.invalidPartitionCache(graphName, partitionId);
+ }
+
+ /**
+ * 删除分区缓存
+ */
+ public void invalidPartitionCache() {
+ // 检查是否存在缓存
+ cache.removePartitions();
+ }
+
+ /**
+ * 删除分区缓存
+ */
+ public void invalidStoreCache(long storeId) {
+ cache.removeStore(storeId);
+ }
+
+ /**
+ * Hugegraph server 调用,Leader发生改变,更新缓存
+ */
+ public void updatePartitionLeader(String graphName, int partId, long leaderStoreId) {
+ this.pdApi.updatePartitionLeader(graphName, partId, leaderStoreId);
+ }
+
+ /**
+ * Hugegraph-store调用,更新缓存
+ *
+ * @param partition
+ */
+ public void updatePartitionCache(Partition partition, Shard leader) {
+ this.pdApi.updatePartitionCache(partition, leader);
+ }
+
+ public Pdpb.GetIdResponse getIdByKey(String key, int delta) throws PDException {
+ return this.pdApi.getIdByKey(key, delta);
+ }
+
+ public Pdpb.ResetIdResponse resetIdByKey(String key) throws PDException {
+ return this.pdApi.resetIdByKey(key);
+ }
+
+ public Metapb.Member getLeader() throws PDException {
+ return this.pdApi.getLeader();
+ }
+
+ public Pdpb.GetMembersResponse getMembers() throws PDException {
+ return this.pdApi.getMembers();
+ }
+
+ public Metapb.ClusterStats getClusterStats() throws PDException {
+ return this.pdApi.getClusterStats(DEFAULT_STORE_GROUP_ID);
+ }
+
+ public Metapb.ClusterStats getClusterStats(long storeId) throws PDException {
+ return this.pdApi.getClusterStats(storeId);
+ }
+
+ public Metapb.ClusterStats getClusterStats(int storeGroupId) throws PDException {
+ return this.pdApi.getClusterStats(storeGroupId);
+ }
+
+ public void addEventListener(PDEventListener listener) {
+ this.events.addListener(listener);
+ }
+
+ public Watcher getWatchClient() {
+ return this.pdWatch;
+ }
+
+ /**
+ * 返回Store状态信息
+ */
+ public List getStoreStatus(boolean offlineExcluded) throws PDException {
+ return this.pdApi.getStoreStatus(offlineExcluded);
+ }
+
+ public void setGraphSpace(String graphSpaceName, long storageLimit) throws PDException {
+ this.pdApi.setGraphSpace(graphSpaceName, storageLimit);
+ }
+
+ public List getGraphSpace(String graphSpaceName) throws
+ PDException {
+ return this.pdApi.getGraphSpace(graphSpaceName);
+ }
+
+ @Deprecated
+ public void setPDConfig(int partitionCount, String peerList, int shardCount, long version) throws
+ PDException {
+ this.pdApi.setPDConfig(partitionCount, peerList, shardCount, version);
+ }
+
+ public void setPDConfig(String peerList, int shardCount, long version) throws PDException {
+ this.pdApi.setPDConfig(0, peerList, shardCount, version);
+ }
+
+ public void setPDConfig(Metapb.PDConfig pdConfig) throws PDException {
+ this.pdApi.setPDConfig(pdConfig);
+ }
+
+ public Metapb.PDConfig getPDConfig() throws PDException {
+ return this.pdApi.getPDConfig();
+ }
+
+ public Metapb.PDConfig getPDConfig(long version) throws PDException {
+ return this.pdApi.getPDConfig(version);
+ }
+
+ public void changePeerList(String peerList) throws PDException {
+ this.pdApi.changePeerList(peerList);
+ }
+
+ /**
+ * 工作模式
+ * Auto:自动分裂,每个Store上分区数达到最大值, 需要指定store group id. store group id 为0, 针对默认分区
+ * 建议使用 splitData(ClusterOp.OperationMode mode, int storeGroupId, List params)
+ * mode = Auto 指定 storeGroupId, params 为空
+ *
+ * @throws PDException
+ */
+ @Deprecated
+ public void splitData() throws PDException {
+ this.pdApi.splitData(ClusterOp.OperationMode.Auto, 0, List.of());
+ }
+
+ /**
+ * 工作模式
+ * Auto:自动分裂,每个Store上分区数达到最大值, 需要指定store group id
+ * Expert:专家模式,需要指定splitParams, 限制 SplitDataParam 在同一个store group中
+ *
+ * @param mode
+ * @param params
+ * @throws PDException
+ */
+ public void splitData(ClusterOp.OperationMode mode, int storeGroupId,
+ List params)
+ throws PDException {
+ this.pdApi.splitData(mode, storeGroupId, params);
+ }
+
+ /**
+ * 针对单个graph的分裂,会扩充partition,造成整体分区数的不一致.
+ * 建议:针对整个store group做分裂. 大小图可以根据分组放到不同的分区中
+ *
+ * @param graphName
+ * @param toCount
+ * @throws PDException
+ */
+ @Deprecated
+ public void splitGraphData(String graphName, int toCount) throws PDException {
+ this.pdApi.splitGraphData(graphName, toCount);
+ }
+
+ /**
+ * 自动转移,达到每个Store上分区数量相同, 建议使用 balancePartition(int storeGroupId), 指定 storeGroupId
+ *
+ * @throws PDException
+ */
+ @Deprecated
+ public void balancePartition() throws PDException {
+ this.pdApi.balancePartition(ClusterOp.OperationMode.Auto, DEFAULT_STORE_GROUP_ID, List.of());
+ }
+
+ public void balancePartition(int storeGroupId) throws PDException {
+ this.pdApi.balancePartition(ClusterOp.OperationMode.Auto, storeGroupId, List.of());
+ }
+
+ /**
+ * 迁移分区 手动模式
+ * //工作模式
+ * // Auto:自动转移,达到每个Store上分区数量相同
+ * // Expert:专家模式,需要指定transferParams
+ *
+ * @param params 指定transferParams, expert 模式, 要求 source store / target store在同一个store group
+ * @throws PDException
+ */
+ public void movePartition(ClusterOp.OperationMode mode, List params) throws
+ PDException {
+ this.pdApi.balancePartition(ClusterOp.OperationMode.Expert, DEFAULT_STORE_GROUP_ID, params);
+ }
+
+ public void reportTask(MetaTask.Task task) throws PDException {
+ this.pdApi.reportTask(task);
+ }
+
+ public Metapb.PartitionStats getPartitionsStats(String graph, int partId) throws PDException {
+ return this.pdApi.getPartitionsStats(graph, partId);
+ }
+
+ /**
+ * 平衡不同store中leader的数量
+ */
+ public void balanceLeaders() throws PDException {
+ this.pdApi.balanceLeaders();
+ }
+
+ /**
+ * 从pd中删除store
+ */
+ public Metapb.Store delStore(long storeId) throws PDException {
+ return this.pdApi.delStore(storeId);
+ }
+
+ /**
+ * 对rocksdb整体进行compaction
+ *
+ * @throws PDException
+ */
+ public void dbCompaction() throws PDException {
+ this.pdApi.dbCompaction();
+ }
+
+ /**
+ * 对rocksdb指定表进行compaction
+ *
+ * @param tableName
+ * @throws PDException
+ */
+ public void dbCompaction(String tableName) throws PDException {
+ this.pdApi.dbCompaction(tableName);
+ }
+
+ /**
+ * 分区合并,把当前的分区缩容至toCount个
+ *
+ * @param toCount 缩容到分区的个数
+ * @throws PDException
+ */
+ @Deprecated
+ public void combineCluster(int toCount) throws PDException {
+ this.pdApi.combineCluster(DEFAULT_STORE_GROUP_ID, toCount);
+ }
+
+ public void combineCluster(int shardGroupId, int toCount) throws PDException {
+ this.pdApi.combineCluster(shardGroupId, toCount);
+ }
+
+ /**
+ * 将单图缩容到 toCount个, 与分裂类似,要保证同个store group中的分区数量一样。
+ * 如果有特殊需求,可以考虑迁移到其他的分组中
+ *
+ * @param graphName graph name
+ * @param toCount target count
+ * @throws PDException
+ */
+ @Deprecated
+ public void combineGraph(String graphName, int toCount) throws PDException {
+ this.pdApi.combineGraph(graphName, toCount);
+ }
+
+ public void deleteShardGroup(int groupId) throws PDException {
+ this.pdApi.deleteShardGroup(groupId);
+ }
+
+ /**
+ * 用于 store的 shard list重建
+ *
+ * @param groupId shard group id
+ * @param shards shard list,delete when shards size is 0
+ */
+ public void updateShardGroupOp(int groupId, List shards) throws PDException {
+ this.pdApi.updateShardGroupOp(groupId, shards);
+ }
+
+ /**
+ * invoke fireChangeShard command
+ *
+ * @param groupId shard group id
+ * @param shards shard list
+ */
+ public void changeShard(int groupId, List shards) throws PDException {
+ this.pdApi.changeShard(groupId, shards);
+ }
+
+ public CacheResponse getClientCache() throws PDException {
+ return this.pdApi.getClientCache();
+ }
+
+ public CachePartitionResponse getPartitionCache(String graph) throws PDException {
+ return this.pdApi.getPartitionCache(graph);
+ }
+
+ public void updatePdRaft(String raftConfig) throws PDException {
+ this.pdApi.updatePdRaft(raftConfig);
+ }
+
+ public long submitBuildIndexTask(Metapb.BuildIndexParam param) throws PDException {
+ return this.pdApi.submitBuildIndexTask(param);
+ }
+
+ public long submitBackupGraphTask(String sourceGraph, String targetGraph) throws PDException {
+ return this.pdApi.submitBackupGraphTask(sourceGraph, targetGraph);
+ }
+
+ @Deprecated
+ public Pdpb.TaskQueryResponse queryBuildIndexTaskStatus(long taskId) throws PDException {
+ return this.queryTaskStatus(taskId);
+ }
+
+ public Pdpb.TaskQueryResponse queryTaskStatus(long taskId) throws PDException {
+ return this.pdApi.queryBuildIndexTaskStatus(taskId);
+ }
+
+ @Deprecated
+ public Pdpb.TaskQueryResponse retryBuildIndexTask(long taskId) throws PDException {
+ return retryTask(taskId);
+ }
+
+ public Pdpb.TaskQueryResponse retryTask(long taskId) throws PDException {
+ return this.pdApi.retryTask(taskId);
+ }
+
+ public GraphStatsResponse getGraphStats(String graphName) throws PDException {
+ return this.pdApi.getGraphStats(graphName);
+ }
+
+ public Metapb.StoreGroup createStoreGroup(int groupId, String name, int partitionCount) throws
+ PDException {
+ return this.pdApi.createStoreGroup(groupId, name, partitionCount);
+ }
+
+ public Metapb.StoreGroup getStoreGroup(int groupId) throws PDException {
+ return this.pdApi.getStoreGroup(groupId);
+ }
+
+ public List getAllStoreGroups() throws PDException {
+ return this.pdApi.getAllStoreGroups();
+ }
+
+ public Metapb.StoreGroup updateStoreGroup(int groupId, String name) throws PDException {
+ return this.pdApi.updateStoreGroup(groupId, name);
+ }
+
+ public List getStoresByStoreGroup(int groupId) throws PDException {
+ return this.pdApi.getStoresByStoreGroup(groupId);
+ }
+
+ public boolean updateStoreGroupRelation(long storeId, int groupId) throws PDException {
+ return this.pdApi.updateStoreGroupRelation(storeId, groupId);
+ }
+
+ public void onLeaderChanged(String leader) {
+ }
+
+ public void close() {
+ super.close();
+ }
+}
diff --git a/hg-pd-client/src/main/java/org/apache/hugegraph/pd/client/PDConfig.java b/hg-pd-client/src/main/java/org/apache/hugegraph/pd/client/PDConfig.java
new file mode 100644
index 0000000000..dfc4766ce2
--- /dev/null
+++ b/hg-pd-client/src/main/java/org/apache/hugegraph/pd/client/PDConfig.java
@@ -0,0 +1,111 @@
+package org.apache.hugegraph.pd.client;
+
+import static java.nio.charset.StandardCharsets.UTF_8;
+
+import java.util.Base64;
+import org.apache.commons.lang3.StringUtils;
+
+import org.apache.hugegraph.pd.client.interceptor.AuthenticationException;
+
+import lombok.Getter;
+import lombok.Setter;
+
+public final class PDConfig {
+ //TODO multi-server
+ private String serverHost = "localhost:9000";
+ private long grpcTimeOut = 60000; // grpc调用超时时间 10秒
+ private boolean enablePDNotify = false; // 是否接收PD异步通知
+ private boolean enableCache = false;
+ private String authority;
+ private String userName = "";
+ private static final int GRPC_DEFAULT_MAX_INBOUND_MESSAGE_SIZE = 1024 * 1024 * 1024;
+ private static final int GRPC_DEFAULT_MAX_OUTBOUND_MESSAGE_SIZE = 1024 * 1024 * 1024;
+ private static int inboundMessageSize = GRPC_DEFAULT_MAX_INBOUND_MESSAGE_SIZE;
+ private static int outboundMessageSize = GRPC_DEFAULT_MAX_OUTBOUND_MESSAGE_SIZE;
+ @Getter
+ @Setter
+ private boolean autoGetPdServers = false;
+
+ private PDConfig() {
+ }
+
+ public static PDConfig of() {
+ return new PDConfig();
+ }
+
+ public static PDConfig of(String serverHost) {
+ PDConfig config = new PDConfig();
+ config.serverHost = serverHost;
+ return config;
+ }
+
+ public static PDConfig of(String serverHost, long timeOut) {
+ PDConfig config = new PDConfig();
+ config.serverHost = serverHost;
+ config.grpcTimeOut = timeOut;
+ return config;
+ }
+
+ public String getServerHost() {
+ return serverHost;
+ }
+
+ public long getGrpcTimeOut() {
+ return grpcTimeOut;
+ }
+
+ @Deprecated
+ public PDConfig setEnablePDNotify(boolean enablePDNotify) {
+ this.enablePDNotify = enablePDNotify;
+ // TODO 临时代码,hugegraph修改完后删除
+ this.enableCache = enablePDNotify;
+ return this;
+ }
+
+ public boolean isEnableCache() {
+ return enableCache;
+ }
+
+ public PDConfig setEnableCache(boolean enableCache) {
+ this.enableCache = enableCache;
+ return this;
+ }
+
+ @Override
+ public String toString() {
+ return "PDConfig{ serverHost='" + serverHost + '\'' + '}';
+ }
+
+ public PDConfig setAuthority(String userName, String pwd) {
+ this.userName = userName;
+ String auth = userName + ':' + pwd;
+ this.authority = new String(Base64.getEncoder().encode(auth.getBytes(UTF_8)));
+ return this;
+ }
+
+ public String getUserName() {
+ return userName;
+ }
+
+ public String getAuthority() {
+ if (StringUtils.isEmpty(this.authority)){
+ throw new AuthenticationException("invalid basic authentication info");
+ }
+ return authority;
+ }
+ public static int getInboundMessageSize() {
+ return inboundMessageSize;
+ }
+
+ public static void setInboundMessageSize(int inboundMessageSize) {
+ PDConfig.inboundMessageSize = inboundMessageSize;
+ }
+
+ public static int getOutboundMessageSize() {
+ return outboundMessageSize;
+ }
+
+ public static void setOutboundMessageSize(int outboundMessageSize) {
+ PDConfig.outboundMessageSize = outboundMessageSize;
+ }
+}
diff --git a/hg-pd-client/src/main/java/org/apache/hugegraph/pd/client/PulseClient.java b/hg-pd-client/src/main/java/org/apache/hugegraph/pd/client/PulseClient.java
new file mode 100644
index 0000000000..e112375964
--- /dev/null
+++ b/hg-pd-client/src/main/java/org/apache/hugegraph/pd/client/PulseClient.java
@@ -0,0 +1,134 @@
+package org.apache.hugegraph.pd.client;
+
+import java.util.Map;
+import java.util.concurrent.ConcurrentHashMap;
+import java.util.concurrent.ExecutorService;
+
+import org.apache.hugegraph.pd.client.impl.StreamDelegator;
+import org.apache.hugegraph.pd.client.impl.StreamDelegatorSender;
+import org.apache.hugegraph.pd.client.support.PDExecutors;
+import org.apache.hugegraph.pd.common.HgAssert;
+import org.apache.hugegraph.pd.grpc.pulse.HgPdPulseGrpc;
+import org.apache.hugegraph.pd.grpc.pulse.PartitionHeartbeatRequest;
+import org.apache.hugegraph.pd.grpc.pulse.PulseRequest;
+import org.apache.hugegraph.pd.grpc.pulse.PulseResponse;
+import org.apache.hugegraph.pd.grpc.pulse.PulseType;
+import org.apache.hugegraph.pd.pulse.DefaultPulseNotifier;
+import org.apache.hugegraph.pd.pulse.Pulse;
+import org.apache.hugegraph.pd.pulse.PulseListener;
+import org.apache.hugegraph.pd.pulse.PulseNotifier;
+import org.apache.hugegraph.pd.pulse.PulseResponseNotice;
+import org.apache.hugegraph.pd.pulse.PulseServerNotice;
+import com.google.protobuf.GeneratedMessageV3;
+
+import lombok.Getter;
+import lombok.Setter;
+import lombok.extern.slf4j.Slf4j;
+
+/**
+ * @author lynn.bond@hotmail.com on 2023/11/20
+ * @version 3.0.1 removed the `noticeParserMap` on 2024/01/08
+ */
+@Slf4j
+public class PulseClient extends BaseClient implements Pulse {
+
+ private final byte[] lock = new byte[0];
+ private final Map> listeners = new ConcurrentHashMap<>();
+ private final Map> notifiers = new ConcurrentHashMap<>();
+ private final Map> delegators =
+ new ConcurrentHashMap<>();
+ private final ExecutorService threadPool = PDExecutors.newQueuingPool("pulse-ack", 1);
+
+ @Getter
+ @Setter
+ private long observerId;
+
+ public PulseClient(PDConfig config) {
+ super(config, HgPdPulseGrpc::newStub, HgPdPulseGrpc::newBlockingStub);
+ }
+
+ public PulseNotifier connect(PulseListener listener) {
+ return connect(PulseType.PULSE_TYPE_PARTITION_HEARTBEAT, listener);
+ }
+
+ public PulseNotifier connect(PulseType pulseType,
+ PulseListener listener) {
+ HgAssert.isArgumentNotNull(listener, "listener");
+ this.listeners.put(pulseType, listener);
+ DefaultPulseNotifier notifier = this.notifiers.get(pulseType);
+ if (notifier == null) {
+ synchronized (this.lock) {
+ notifier = this.notifiers.computeIfAbsent(pulseType,
+ k -> new DefaultPulseNotifier(pulseType, newStreaming(pulseType), this.observerId)
+ );
+ notifier.start();
+ }
+ }
+ return notifier;
+ }
+
+ public boolean resetStub(String host, PulseNotifier notifier) {
+ return true;
+ }
+
+ private StreamDelegatorSender newStreaming(PulseType pulseType) {
+ StreamDelegator delegator = delegators.computeIfAbsent(pulseType,
+ k -> new StreamDelegator(pulseType.name(),
+ getLeaderInvoker(),
+ HgPdPulseGrpc.getPulseMethod()));
+ return delegator.link(response -> handleOnNext(pulseType, (PulseResponse) response));
+ }
+
+ public PulseListener getListener(PulseType pulseType) {
+ return this.listeners.get(pulseType);
+ }
+
+ private PulseServerNotice toPulseResponseNotice(PulseResponse pulseResponse) {
+ return new PulseResponseNotice(pulseResponse.getNoticeId(),
+ e -> ackNotice(
+ pulseResponse.getPulseType(),
+ pulseResponse.getNoticeId(),
+ pulseResponse.getObserverId()),
+ pulseResponse);
+ }
+
+ private void handleOnNext(PulseType pulseType, PulseResponse response) {
+ PulseServerNotice notice = toPulseResponseNotice(response);
+ PulseListener listener = getListener(pulseType);
+ if (listener != null) {
+ try {
+ listener.onNext(response);
+ listener.onNotice(notice);
+ } catch (Throwable e) {
+ log.error("Listener failed to handle notice: \n{}, caused by: ", response, e);
+ }
+ }
+ }
+
+ private void ackNotice(PulseType pulseType, long noticeId, long observerId) {
+ DefaultPulseNotifier> sender = this.notifiers.get(pulseType);
+ if (sender == null) {
+ log.error("Sender is null, pulse type: {}", pulseType);
+ throw new IllegalStateException("Sender is null, pulse type: " + pulseType);
+ }
+ sendAck(sender, noticeId, observerId);
+ }
+
+ private void sendAck(DefaultPulseNotifier> sender, long noticeId, long observerId) {
+ this.threadPool.execute(() -> {
+ log.info("Sending ack, notice id: {}, observer id: {}, ts: {}", noticeId, observerId,
+ System.currentTimeMillis());
+ sender.ack(noticeId, observerId);
+ });
+ }
+
+ public void onLeaderChanged(String leader) {
+ this.delegators.entrySet().parallelStream().forEach(e -> {
+ try {
+ e.getValue().reconnect();
+ } catch (Exception ex) {
+ log.warn("reconnect to leader with error:", ex);
+ }
+ });
+ }
+}
diff --git a/hg-pd-client/src/main/java/org/apache/hugegraph/pd/client/impl/PDApi.java b/hg-pd-client/src/main/java/org/apache/hugegraph/pd/client/impl/PDApi.java
new file mode 100644
index 0000000000..ae1570725e
--- /dev/null
+++ b/hg-pd-client/src/main/java/org/apache/hugegraph/pd/client/impl/PDApi.java
@@ -0,0 +1,2134 @@
+<<<<<<<< HEAD:hugegraph-pd/hg-pd-client/src/main/java/org/apache/hugegraph/pd/client/PDClient.java
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hugegraph.pd.client;
+
+import static org.apache.hugegraph.pd.watch.NodeEvent.EventType.NODE_PD_LEADER_CHANGE;
+
+import java.util.ArrayList;
+import java.util.LinkedList;
+import java.util.List;
+import java.util.Objects;
+import java.util.concurrent.CopyOnWriteArrayList;
+import java.util.concurrent.TimeUnit;
+
+import org.apache.hugegraph.pd.common.KVPair;
+import org.apache.hugegraph.pd.common.PDException;
+import org.apache.hugegraph.pd.common.PartitionUtils;
+import org.apache.hugegraph.pd.grpc.MetaTask;
+import org.apache.hugegraph.pd.grpc.Metapb;
+import org.apache.hugegraph.pd.grpc.Metapb.ShardGroup;
+import org.apache.hugegraph.pd.grpc.PDGrpc;
+import org.apache.hugegraph.pd.grpc.Pdpb;
+import org.apache.hugegraph.pd.grpc.Pdpb.CachePartitionResponse;
+import org.apache.hugegraph.pd.grpc.Pdpb.CacheResponse;
+import org.apache.hugegraph.pd.grpc.Pdpb.GetGraphRequest;
+import org.apache.hugegraph.pd.grpc.Pdpb.GetPartitionByCodeRequest;
+import org.apache.hugegraph.pd.grpc.Pdpb.GetPartitionRequest;
+import org.apache.hugegraph.pd.grpc.Pdpb.GetPartitionResponse;
+import org.apache.hugegraph.pd.grpc.watch.WatchResponse;
+import org.apache.hugegraph.pd.watch.NodeEvent;
+import org.apache.hugegraph.pd.watch.PartitionEvent;
+
+import com.google.protobuf.ByteString;
+
+import io.grpc.ManagedChannel;
+import io.grpc.MethodDescriptor;
+import io.grpc.StatusRuntimeException;
+import io.grpc.stub.AbstractBlockingStub;
+import lombok.extern.slf4j.Slf4j;
+
+/**
+ * PD client implementation class
+ */
+@Slf4j
+public class PDClient {
+
+ private final PDConfig config;
+ private final Pdpb.RequestHeader header;
+ private final ClientCache cache;
+ private final StubProxy stubProxy;
+ private final List eventListeners;
+ private PDWatch.Watcher partitionWatcher;
+ private PDWatch.Watcher storeWatcher;
+ private PDWatch.Watcher graphWatcher;
+ private PDWatch.Watcher shardGroupWatcher;
+ private PDWatch pdWatch;
+
+ private PDClient(PDConfig config) {
+ this.config = config;
+ this.header = Pdpb.RequestHeader.getDefaultInstance();
+ this.stubProxy = new StubProxy(config.getServerHost().split(","));
+ this.eventListeners = new CopyOnWriteArrayList<>();
+ this.cache = new ClientCache(this);
+ }
+
+ /**
+ * Create a PD client object and initialize the stub
+ *
+ * @param config
+ * @return
+ */
+ public static PDClient create(PDConfig config) {
+ return new PDClient(config);
+ }
+
+ private synchronized void newBlockingStub() throws PDException {
+ if (stubProxy.get() != null) {
+ return;
+ }
+
+ String host = newLeaderStub();
+ if (host.isEmpty()) {
+ throw new PDException(Pdpb.ErrorType.PD_UNREACHABLE_VALUE,
+ "PD unreachable, pd.peers=" + config.getServerHost());
+ }
+
+ log.info("PDClient enable cache, init PDWatch object");
+ connectPdWatch(host);
+ }
+
+ public void connectPdWatch(String leader) {
+
+ if (pdWatch != null && Objects.equals(pdWatch.getCurrentHost(), leader) &&
+ pdWatch.checkChannel()) {
+ return;
+ }
+
+ log.info("PDWatch client connect host:{}", leader);
+ pdWatch = new PDWatchImpl(leader);
+
+ partitionWatcher = pdWatch.watchPartition(new PDWatch.Listener<>() {
+ @Override
+ public void onNext(PartitionEvent response) {
+ // log.info("PDClient receive partition event {}-{} {}",
+ // response.getGraph(), response.getPartitionId(), response.getChangeType());
+ invalidPartitionCache(response.getGraph(), response.getPartitionId());
+
+ if (response.getChangeType() == PartitionEvent.ChangeType.DEL) {
+ cache.removeAll(response.getGraph());
+ }
+
+ eventListeners.forEach(listener -> {
+ listener.onPartitionChanged(response);
+ });
+ }
+
+ @Override
+ public void onError(Throwable throwable) {
+ log.error("watchPartition exception {}", throwable.getMessage());
+ closeStub(false);
+ }
+ });
+
+ storeWatcher = pdWatch.watchNode(new PDWatch.Listener<>() {
+ @Override
+ public void onNext(NodeEvent response) {
+ log.info("PDClient receive store event {} {}",
+ response.getEventType(), Long.toHexString(response.getNodeId()));
+
+ if (response.getEventType() == NODE_PD_LEADER_CHANGE) {
+ // pd raft change
+ var leaderIp = response.getGraph();
+ log.info("watchNode: pd leader changed to {}, current watch:{}",
+ leaderIp, pdWatch.getCurrentHost());
+ closeStub(!Objects.equals(pdWatch.getCurrentHost(), leaderIp));
+ connectPdWatch(leaderIp);
+ }
+
+ invalidStoreCache(response.getNodeId());
+ eventListeners.forEach(listener -> {
+ listener.onStoreChanged(response);
+ });
+ }
+
+ @Override
+ public void onError(Throwable throwable) {
+ log.error("watchNode exception {}", throwable.getMessage());
+ closeStub(false);
+ }
+
+ });
+
+ graphWatcher = pdWatch.watchGraph(new PDWatch.Listener<>() {
+ @Override
+ public void onNext(WatchResponse response) {
+ eventListeners.forEach(listener -> {
+ listener.onGraphChanged(response);
+ });
+ }
+
+ @Override
+ public void onError(Throwable throwable) {
+ log.warn("graphWatcher exception {}", throwable.getMessage());
+ }
+ });
+
+ shardGroupWatcher = pdWatch.watchShardGroup(new PDWatch.Listener<>() {
+ @Override
+ public void onNext(WatchResponse response) {
+ var shardResponse = response.getShardGroupResponse();
+ // log.info("PDClient receive shard group event: raft {}-{}", shardResponse
+ // .getShardGroupId(),
+ // shardResponse.getType());
+ if (config.isEnableCache()) {
+ switch (shardResponse.getType()) {
+ case WATCH_CHANGE_TYPE_DEL:
+ cache.deleteShardGroup(shardResponse.getShardGroupId());
+ break;
+ case WATCH_CHANGE_TYPE_ALTER:
+ cache.updateShardGroup(
+ response.getShardGroupResponse().getShardGroup());
+ break;
+ default:
+ break;
+ }
+ }
+ eventListeners.forEach(listener -> listener.onShardGroupChanged(response));
+ }
+
+ @Override
+ public void onError(Throwable throwable) {
+ log.warn("shardGroupWatcher exception {}", throwable.getMessage());
+ }
+ });
+
+ }
+
+ private synchronized void closeStub(boolean closeWatcher) {
+ stubProxy.set(null);
+ cache.reset();
+
+ if (closeWatcher) {
+ if (partitionWatcher != null) {
+ partitionWatcher.close();
+ partitionWatcher = null;
+ }
+ if (storeWatcher != null) {
+ storeWatcher.close();
+ storeWatcher = null;
+ }
+ if (graphWatcher != null) {
+ graphWatcher.close();
+ graphWatcher = null;
+ }
+
+ if (shardGroupWatcher != null) {
+ shardGroupWatcher.close();
+ shardGroupWatcher = null;
+ }
+
+ pdWatch = null;
+ }
+ }
+
+ private PDGrpc.PDBlockingStub getStub() throws PDException {
+ if (stubProxy.get() == null) {
+ newBlockingStub();
+ }
+ return stubProxy.get().withDeadlineAfter(config.getGrpcTimeOut(), TimeUnit.MILLISECONDS);
+ }
+
+ private PDGrpc.PDBlockingStub newStub() throws PDException {
+ if (stubProxy.get() == null) {
+ newBlockingStub();
+ }
+ return PDGrpc.newBlockingStub(stubProxy.get().getChannel())
+ .withDeadlineAfter(config.getGrpcTimeOut(),
+ TimeUnit.MILLISECONDS);
+ }
+
+ private String newLeaderStub() {
+ String leaderHost = "";
+ for (int i = 0; i < stubProxy.getHostCount(); i++) {
+ String host = stubProxy.nextHost();
+ ManagedChannel channel = Channels.getChannel(host);
+
+ PDGrpc.PDBlockingStub stub = PDGrpc.newBlockingStub(channel)
+ .withDeadlineAfter(config.getGrpcTimeOut(),
+ TimeUnit.MILLISECONDS);
+ try {
+ var leaderIp = getLeaderIp(stub);
+ if (!leaderIp.equalsIgnoreCase(host)) {
+ leaderHost = leaderIp;
+ stubProxy.set(PDGrpc.newBlockingStub(channel)
+ .withDeadlineAfter(config.getGrpcTimeOut(),
+ TimeUnit.MILLISECONDS));
+ } else {
+ stubProxy.set(stub);
+ leaderHost = host;
+ }
+ stubProxy.setLeader(leaderIp);
+
+ log.info("PDClient connect to host = {} success", leaderHost);
+ break;
+ } catch (Exception e) {
+ log.error("PDClient connect to {} exception {}, {}", host, e.getMessage(),
+ e.getCause() != null ? e.getCause().getMessage() : "");
+ }
+ }
+ return leaderHost;
+ }
+
+ public String getLeaderIp() {
+
+ return getLeaderIp(stubProxy.get());
+ }
+
+ private String getLeaderIp(PDGrpc.PDBlockingStub stub) {
+ if (stub == null) {
+ try {
+ getStub();
+ return stubProxy.getLeader();
+ } catch (PDException e) {
+ throw new RuntimeException(e);
+ }
+ }
+
+ Pdpb.GetMembersRequest request = Pdpb.GetMembersRequest.newBuilder()
+ .setHeader(header)
+ .build();
+ Metapb.Member leader = stub.getMembers(request).getLeader();
+ return leader.getGrpcUrl();
+ }
+
+ /**
+ * Store registration, the store ID will be returned, and the initial registration will
+ * return a new ID
+========
+package org.apache.hugegraph.pd.client.impl;
+
+import java.util.ArrayList;
+import java.util.HashSet;
+import java.util.List;
+import java.util.Set;
+
+import org.apache.hugegraph.pd.client.ClientCache;
+import org.apache.hugegraph.pd.client.PDClient;
+import org.apache.hugegraph.pd.client.PDConfig;
+import org.apache.hugegraph.pd.client.rpc.ConnectionManager;
+import org.apache.hugegraph.pd.client.rpc.Invoker;
+import org.apache.hugegraph.pd.common.KVPair;
+import org.apache.hugegraph.pd.common.PDException;
+import org.apache.hugegraph.pd.common.PartitionUtils;
+import org.apache.hugegraph.pd.grpc.ClusterOp;
+import org.apache.hugegraph.pd.grpc.MetaTask;
+import org.apache.hugegraph.pd.grpc.Metapb;
+import org.apache.hugegraph.pd.grpc.PDGrpc;
+import org.apache.hugegraph.pd.grpc.Pdpb;
+import org.apache.hugegraph.pd.grpc.StoreGroup;
+import org.apache.hugegraph.pd.grpc.common.ErrorType;
+import org.apache.hugegraph.pd.grpc.common.RequestHeader;
+import org.apache.hugegraph.pd.grpc.common.ResponseHeader;
+import com.google.protobuf.ByteString;
+
+import io.grpc.MethodDescriptor;
+import lombok.extern.slf4j.Slf4j;
+
+/**
+ * @author lynn.bond@hotmail.com on 2023/12/8
+ */
+@Slf4j
+public class PDApi {
+ private final PDConfig config;
+ private final ConnectionManager cm;
+ private final ClientCache cache;
+ private final RequestHeader header = RequestHeader.getDefaultInstance();
+ private final Invoker invoker;
+ private PDClient client;
+
+ public PDApi(PDClient client, ClientCache cache) {
+ this.client = client;
+ this.config = client.getConfig();
+ this.cm = client.getCm();
+ this.cache = cache;
+ this.invoker = client.getLeaderInvoker();
+ }
+
+ private RespT blockingUnaryCall(
+ MethodDescriptor method, ReqT req) throws PDException {
+ return invoker.blockingCall(method, req);
+ }
+
+ private void handleResponseError(ResponseHeader header) throws PDException {
+ var errorType = header.getError().getType();
+ if (header.hasError() && errorType != ErrorType.OK) {
+ throw new PDException(header.getError().getTypeValue(),
+ String.format("PD request error, error code = %d, msg = %s",
+ header.getError().getTypeValue(),
+ header.getError().getMessage()));
+ }
+ }
+
+ /**
+ * Store注册,返回storeID,初次注册会返回新ID
+>>>>>>>> d7e3d51dd (3.6.5 -> 4.x diff):hg-pd-client/src/main/java/org/apache/hugegraph/pd/client/impl/PDApi.java
+ *
+ * @param store
+ * @return
+ */
+ public long registerStore(Metapb.Store store) throws PDException {
+ Pdpb.RegisterStoreRequest request = Pdpb.RegisterStoreRequest.newBuilder()
+<<<<<<<< HEAD:hugegraph-pd/hg-pd-client/src/main/java/org/apache/hugegraph/pd/client/PDClient.java
+ .setHeader(header)
+ .setStore(store).build();
+
+========
+ .setHeader(header)
+ .setStore(store).build();
+>>>>>>>> d7e3d51dd (3.6.5 -> 4.x diff):hg-pd-client/src/main/java/org/apache/hugegraph/pd/client/impl/PDApi.java
+ Pdpb.RegisterStoreResponse response =
+ blockingUnaryCall(PDGrpc.getRegisterStoreMethod(), request);
+ handleResponseError(response.getHeader());
+ return response.getStoreId();
+ }
+
+<<<<<<<< HEAD:hugegraph-pd/hg-pd-client/src/main/java/org/apache/hugegraph/pd/client/PDClient.java
+ /**
+ * Returns the Store object based on the store ID
+ *
+ * @param storeId
+ * @return
+ * @throws PDException
+ */
+ public Metapb.Store getStore(long storeId) throws PDException {
+ Metapb.Store store = cache.getStoreById(storeId);
+ if (store == null) {
+ Pdpb.GetStoreRequest request = Pdpb.GetStoreRequest.newBuilder()
+ .setHeader(header)
+ .setStoreId(storeId).build();
+ Pdpb.GetStoreResponse response = getStub().getStore(request);
+ handleResponseError(response.getHeader());
+ store = response.getStore();
+ if (config.isEnableCache()) {
+ cache.addStore(storeId, store);
+ }
+ }
+ return store;
+ }
+
+ /**
+ * Update the store information, including online and offline
+ *
+ * @param store
+ * @return
+ */
+ public Metapb.Store updateStore(Metapb.Store store) throws PDException {
+ Pdpb.SetStoreRequest request = Pdpb.SetStoreRequest.newBuilder()
+ .setHeader(header)
+ .setStore(store).build();
+
+ Pdpb.SetStoreResponse response = getStub().setStore(request);
+ handleResponseError(response.getHeader());
+ store = response.getStore();
+ if (config.isEnableCache()) {
+ cache.addStore(store.getId(), store);
+ }
+ return store;
+ }
+
+ /**
+ * Return to the active store
+ *
+ * @param graphName
+ * @return
+ */
+ public List getActiveStores(String graphName) throws PDException {
+ List stores = new ArrayList<>();
+ KVPair ptShard = this.getPartitionByCode(graphName, 0);
+ while (ptShard != null) {
+ stores.add(this.getStore(ptShard.getValue().getStoreId()));
+ if (ptShard.getKey().getEndKey() < PartitionUtils.MAX_VALUE) {
+ ptShard = this.getPartitionByCode(graphName, ptShard.getKey().getEndKey());
+ } else {
+ ptShard = null;
+ }
+ }
+ return stores;
+ }
+
+ public List getActiveStores() throws PDException {
+ Pdpb.GetAllStoresRequest request = Pdpb.GetAllStoresRequest.newBuilder()
+ .setHeader(header)
+ .setGraphName("")
+ .setExcludeOfflineStores(true)
+ .build();
+ Pdpb.GetAllStoresResponse response = getStub().getAllStores(request);
+ handleResponseError(response.getHeader());
+ return response.getStoresList();
+
+ }
+
+ /**
+ * Return to the active store
+ *
+ * @param graphName
+ * @return
+ */
+ public List getAllStores(String graphName) throws PDException {
+ Pdpb.GetAllStoresRequest request = Pdpb.GetAllStoresRequest.newBuilder()
+ .setHeader(header)
+ .setGraphName(graphName)
+ .setExcludeOfflineStores(false)
+ .build();
+ Pdpb.GetAllStoresResponse response = getStub().getAllStores(request);
+ handleResponseError(response.getHeader());
+ return response.getStoresList();
+
+ }
+
+ /**
+ * Store heartbeat, call regularly, stay online
+ *
+ * @param stats
+ * @throws PDException
+ */
+ public Metapb.ClusterStats storeHeartbeat(Metapb.StoreStats stats) throws PDException {
+ Pdpb.StoreHeartbeatRequest request = Pdpb.StoreHeartbeatRequest.newBuilder()
+ .setHeader(header)
+ .setStats(stats).build();
+ Pdpb.StoreHeartbeatResponse response = getStub().storeHeartbeat(request);
+ handleResponseError(response.getHeader());
+ return response.getClusterStats();
+ }
+
+ private KVPair getKvPair(String graphName, byte[] key,
+ KVPair partShard) throws
+ PDException {
+ if (partShard == null) {
+ GetPartitionRequest request = GetPartitionRequest.newBuilder()
+ .setHeader(header)
+ .setGraphName(graphName)
+ .setKey(ByteString.copyFrom(key))
+ .build();
+ GetPartitionResponse response =
+========
+ public KVPair getKvPair(String graphName, byte[] key, KVPair partShard) throws PDException {
+ if (partShard == null) {
+ Pdpb.GetPartitionRequest request = Pdpb.GetPartitionRequest.newBuilder()
+ .setHeader(header)
+ .setGraphName(graphName)
+ .setKey(ByteString.copyFrom(key))
+ .build();
+ Pdpb.GetPartitionResponse response =
+>>>>>>>> d7e3d51dd (3.6.5 -> 4.x diff):hg-pd-client/src/main/java/org/apache/hugegraph/pd/client/impl/PDApi.java
+ blockingUnaryCall(PDGrpc.getGetPartitionMethod(), request);
+ handleResponseError(response.getHeader());
+ partShard = new KVPair<>(response.getPartition(), response.getLeader());
+ cache.update(graphName, partShard.getKey().getId(), partShard.getKey());
+ }
+<<<<<<<< HEAD:hugegraph-pd/hg-pd-client/src/main/java/org/apache/hugegraph/pd/client/PDClient.java
+ return partShard;
+ }
+
+ /**
+ * Query the partition to which the key belongs
+ *
+ * @param graphName
+ * @param key
+ * @return
+ * @throws PDException
+ */
+ public KVPair getPartition(String graphName, byte[] key) throws
+ PDException {
+
+ KVPair partShard =
+ this.getPartitionByCode(graphName, PartitionUtils.calcHashcode(key));
+ partShard = getKvPair(graphName, key, partShard);
+ return partShard;
+ }
+
+ public KVPair getPartition(String graphName, byte[] key,
+ int code) throws
+ PDException {
+ KVPair partShard =
+ cache.getPartitionByCode(graphName, code);
+ partShard = getKvPair(graphName, key, partShard);
+========
+
+>>>>>>>> d7e3d51dd (3.6.5 -> 4.x diff):hg-pd-client/src/main/java/org/apache/hugegraph/pd/client/impl/PDApi.java
+ return partShard;
+ }
+
+ /**
+<<<<<<<< HEAD:hugegraph-pd/hg-pd-client/src/main/java/org/apache/hugegraph/pd/client/PDClient.java
+ * Query the partition information based on the hashcode
+========
+ * 根据hashcode查询所属分区信息
+>>>>>>>> d7e3d51dd (3.6.5 -> 4.x diff):hg-pd-client/src/main/java/org/apache/hugegraph/pd/client/impl/PDApi.java
+ *
+ * @param graphName
+ * @param hashCode
+ * @return
+ * @throws PDException
+ */
+ public KVPair getPartitionByCode(String graphName,
+ long hashCode)
+ throws PDException {
+<<<<<<<< HEAD:hugegraph-pd/hg-pd-client/src/main/java/org/apache/hugegraph/pd/client/PDClient.java
+ KVPair partShard =
+ cache.getPartitionByCode(graphName, hashCode);
+ if (partShard == null) {
+ GetPartitionByCodeRequest request = GetPartitionByCodeRequest.newBuilder()
+ .setHeader(header)
+ .setGraphName(graphName)
+ .setCode(hashCode).build();
+ GetPartitionResponse response =
+========
+ // 先查cache,cache没有命中,在调用PD
+ KVPair partShard = cache.getPartitionByCode(graphName, hashCode);
+ if (partShard == null) {
+ Pdpb.GetPartitionByCodeRequest request = Pdpb.GetPartitionByCodeRequest.newBuilder()
+ .setHeader(header)
+ .setGraphName(graphName)
+ .setCode(hashCode).build();
+ Pdpb.GetPartitionResponse response =
+>>>>>>>> d7e3d51dd (3.6.5 -> 4.x diff):hg-pd-client/src/main/java/org/apache/hugegraph/pd/client/impl/PDApi.java
+ blockingUnaryCall(PDGrpc.getGetPartitionByCodeMethod(), request);
+ handleResponseError(response.getHeader());
+ partShard = new KVPair<>(response.getPartition(), response.getLeader());
+ cache.update(graphName, partShard.getKey().getId(), partShard.getKey());
+ cache.updateShardGroup(getShardGroup(partShard.getKey().getId()));
+ }
+
+ if (partShard.getValue() == null) {
+<<<<<<<< HEAD:hugegraph-pd/hg-pd-client/src/main/java/org/apache/hugegraph/pd/client/PDClient.java
+ ShardGroup shardGroup = getShardGroup(partShard.getKey().getId());
+========
+ Metapb.ShardGroup shardGroup = getShardGroup(partShard.getKey().getId());
+>>>>>>>> d7e3d51dd (3.6.5 -> 4.x diff):hg-pd-client/src/main/java/org/apache/hugegraph/pd/client/impl/PDApi.java
+ if (shardGroup != null) {
+ for (var shard : shardGroup.getShardsList()) {
+ if (shard.getRole() == Metapb.ShardRole.Leader) {
+ partShard.setValue(shard);
+ }
+ }
+ } else {
+<<<<<<<< HEAD:hugegraph-pd/hg-pd-client/src/main/java/org/apache/hugegraph/pd/client/PDClient.java
+ log.error("getPartitionByCode: get shard group failed, {}",
+ partShard.getKey().getId());
+========
+ log.error("getPartitionByCode: get shard group failed, {}", partShard.getKey().getId());
+>>>>>>>> d7e3d51dd (3.6.5 -> 4.x diff):hg-pd-client/src/main/java/org/apache/hugegraph/pd/client/impl/PDApi.java
+ }
+ }
+ return partShard;
+ }
+
+<<<<<<<< HEAD:hugegraph-pd/hg-pd-client/src/main/java/org/apache/hugegraph/pd/client/PDClient.java
+ /**
+ * Obtain the hash value of the key
+ */
+ public int keyToCode(String graphName, byte[] key) {
+ return PartitionUtils.calcHashcode(key);
+ }
+
+ /**
+ * Returns partition information based on the partition ID and RPC request
+========
+ /**
+ * 根据分区id返回分区信息, RPC请求
+>>>>>>>> d7e3d51dd (3.6.5 -> 4.x diff):hg-pd-client/src/main/java/org/apache/hugegraph/pd/client/impl/PDApi.java
+ *
+ * @param graphName
+ * @param partId
+ * @return
+ * @throws PDException
+ */
+ public KVPair getPartitionById(String graphName,
+ int partId) throws PDException {
+ KVPair partShard =
+ cache.getPartitionById(graphName, partId);
+ if (partShard == null) {
+ Pdpb.GetPartitionByIDRequest request = Pdpb.GetPartitionByIDRequest.newBuilder()
+<<<<<<<< HEAD:hugegraph-pd/hg-pd-client/src/main/java/org/apache/hugegraph/pd/client/PDClient.java
+ .setHeader(header)
+ .setGraphName(
+ graphName)
+ .setPartitionId(
+ partId)
+ .build();
+ GetPartitionResponse response =
+========
+ .setHeader(header)
+ .setGraphName(graphName)
+ .setPartitionId(partId)
+ .build();
+ Pdpb.GetPartitionResponse response =
+>>>>>>>> d7e3d51dd (3.6.5 -> 4.x diff):hg-pd-client/src/main/java/org/apache/hugegraph/pd/client/impl/PDApi.java
+ blockingUnaryCall(PDGrpc.getGetPartitionByIDMethod(), request);
+ handleResponseError(response.getHeader());
+ partShard = new KVPair<>(response.getPartition(), response.getLeader());
+ if (config.isEnableCache()) {
+ cache.update(graphName, partShard.getKey().getId(), partShard.getKey());
+ cache.updateShardGroup(getShardGroup(partShard.getKey().getId()));
+ }
+ }
+ if (partShard.getValue() == null) {
+ var shardGroup = getShardGroup(partShard.getKey().getId());
+ if (shardGroup != null) {
+ for (var shard : shardGroup.getShardsList()) {
+ if (shard.getRole() == Metapb.ShardRole.Leader) {
+ partShard.setValue(shard);
+ }
+ }
+ } else {
+<<<<<<<< HEAD:hugegraph-pd/hg-pd-client/src/main/java/org/apache/hugegraph/pd/client/PDClient.java
+ log.error("getPartitionById: get shard group failed, {}",
+ partShard.getKey().getId());
+========
+ log.error("getPartitionById: get shard group failed, {}", partShard.getKey().getId());
+>>>>>>>> d7e3d51dd (3.6.5 -> 4.x diff):hg-pd-client/src/main/java/org/apache/hugegraph/pd/client/impl/PDApi.java
+ }
+ }
+ return partShard;
+ }
+
+<<<<<<<< HEAD:hugegraph-pd/hg-pd-client/src/main/java/org/apache/hugegraph/pd/client/PDClient.java
+ public ShardGroup getShardGroup(int partId) throws PDException {
+ ShardGroup group = cache.getShardGroup(partId);
+ if (group == null) {
+ Pdpb.GetShardGroupRequest request = Pdpb.GetShardGroupRequest.newBuilder()
+ .setHeader(header)
+ .setGroupId(partId)
+ .build();
+ Pdpb.GetShardGroupResponse response =
+ blockingUnaryCall(PDGrpc.getGetShardGroupMethod(), request);
+ handleResponseError(response.getHeader());
+ group = response.getShardGroup();
+ if (config.isEnableCache()) {
+ cache.updateShardGroup(group);
+ }
+ }
+ return group;
+========
+
+ public Metapb.ShardGroup getShardGroupDirect(int partId) throws PDException {
+ Pdpb.GetShardGroupRequest request = Pdpb.GetShardGroupRequest.newBuilder()
+ .setHeader(header)
+ .setGroupId(partId)
+ .build();
+ Pdpb.GetShardGroupResponse response = blockingUnaryCall(PDGrpc.getGetShardGroupMethod(), request);
+ handleResponseError(response.getHeader());
+ return response.getShardGroup();
+>>>>>>>> d7e3d51dd (3.6.5 -> 4.x diff):hg-pd-client/src/main/java/org/apache/hugegraph/pd/client/impl/PDApi.java
+ }
+
+ public void updateShardGroup(ShardGroup shardGroup) throws PDException {
+ Pdpb.UpdateShardGroupRequest request = Pdpb.UpdateShardGroupRequest.newBuilder()
+<<<<<<<< HEAD:hugegraph-pd/hg-pd-client/src/main/java/org/apache/hugegraph/pd/client/PDClient.java
+ .setHeader(header)
+ .setShardGroup(
+ shardGroup)
+ .build();
+========
+ .setHeader(header)
+ .setShardGroup(shardGroup)
+ .build();
+>>>>>>>> d7e3d51dd (3.6.5 -> 4.x diff):hg-pd-client/src/main/java/org/apache/hugegraph/pd/client/impl/PDApi.java
+ Pdpb.UpdateShardGroupResponse response =
+ blockingUnaryCall(PDGrpc.getUpdateShardGroupMethod(), request);
+ handleResponseError(response.getHeader());
+
+ if (config.isEnableCache()) {
+ cache.updateShardGroup(shardGroup);
+ }
+ }
+
+ /**
+<<<<<<<< HEAD:hugegraph-pd/hg-pd-client/src/main/java/org/apache/hugegraph/pd/client/PDClient.java
+ * Returns information about all partitions spanned by the start and end keys
+ *
+ * @param graphName
+ * @param startKey
+ * @param endKey
+ * @return
+ * @throws PDException
+ */
+ public List> scanPartitions(String graphName,
+ byte[] startKey,
+ byte[] endKey) throws
+ PDException {
+ List> partitions = new ArrayList<>();
+ KVPair startPartShard = getPartition(graphName, startKey);
+ KVPair endPartShard = getPartition(graphName, endKey);
+ if (startPartShard == null || endPartShard == null) {
+ return null;
+ }
+
+ partitions.add(startPartShard);
+ while (startPartShard.getKey().getEndKey() < endPartShard.getKey().getEndKey()
+ && startPartShard.getKey().getEndKey() < PartitionUtils.MAX_VALUE) {
+ startPartShard = getPartitionByCode(graphName, startPartShard.getKey().getEndKey());
+ partitions.add(startPartShard);
+ }
+ return partitions;
+ }
+
+ /**
+ * Query partition information based on conditions
+========
+ * 根据条件查询分区信息
+>>>>>>>> d7e3d51dd (3.6.5 -> 4.x diff):hg-pd-client/src/main/java/org/apache/hugegraph/pd/client/impl/PDApi.java
+ *
+ * @return
+ * @throws PDException
+ */
+ public List getPartitionsByStore(long storeId) throws PDException {
+
+ Metapb.PartitionQuery query = Metapb.PartitionQuery.newBuilder()
+ .setStoreId(storeId)
+ .build();
+ Pdpb.QueryPartitionsRequest request = Pdpb.QueryPartitionsRequest.newBuilder()
+ .setQuery(query).build();
+ Pdpb.QueryPartitionsResponse response =
+ blockingUnaryCall(PDGrpc.getQueryPartitionsMethod(), request);
+
+ handleResponseError(response.getHeader());
+ return response.getPartitionsList();
+ }
+
+<<<<<<<< HEAD:hugegraph-pd/hg-pd-client/src/main/java/org/apache/hugegraph/pd/client/PDClient.java
+ public List queryPartitions(long storeId, int partitionId) throws
+ PDException {
+========
+ /**
+ * 查找指定store上的指定partitionId
+ *
+ * @return
+ * @throws PDException
+ */
+ public List queryPartitions(long storeId, int partitionId) throws PDException {
+>>>>>>>> d7e3d51dd (3.6.5 -> 4.x diff):hg-pd-client/src/main/java/org/apache/hugegraph/pd/client/impl/PDApi.java
+
+ Metapb.PartitionQuery query = Metapb.PartitionQuery.newBuilder()
+ .setStoreId(storeId)
+ .setPartitionId(partitionId)
+ .build();
+ Pdpb.QueryPartitionsRequest request = Pdpb.QueryPartitionsRequest.newBuilder()
+ .setQuery(query).build();
+ Pdpb.QueryPartitionsResponse response =
+ blockingUnaryCall(PDGrpc.getQueryPartitionsMethod(), request);
+
+ handleResponseError(response.getHeader());
+ return response.getPartitionsList();
+ }
+
+ public List getPartitions(long storeId, String graphName) throws PDException {
+
+ Metapb.PartitionQuery query = Metapb.PartitionQuery.newBuilder()
+ .setStoreId(storeId)
+ .setGraphName(graphName).build();
+ Pdpb.QueryPartitionsRequest request = Pdpb.QueryPartitionsRequest.newBuilder()
+ .setQuery(query).build();
+ Pdpb.QueryPartitionsResponse response =
+ blockingUnaryCall(PDGrpc.getQueryPartitionsMethod(), request);
+
+ handleResponseError(response.getHeader());
+ return response.getPartitionsList();
+
+ }
+
+<<<<<<<< HEAD:hugegraph-pd/hg-pd-client/src/main/java/org/apache/hugegraph/pd/client/PDClient.java
+ public Metapb.Graph setGraph(Metapb.Graph graph) throws PDException {
+ Pdpb.SetGraphRequest request = Pdpb.SetGraphRequest.newBuilder()
+ .setGraph(graph)
+ .build();
+ Pdpb.SetGraphResponse response =
+========
+ public Metapb.Graph createGraph(Metapb.Graph graph) throws PDException {
+ Pdpb.CreateGraphRequest request = Pdpb.CreateGraphRequest.newBuilder()
+ .setGraph(graph)
+ .build();
+ Pdpb.CreateGraphResponse response = blockingUnaryCall(PDGrpc.getCreateGraphMethod(), request);
+ handleResponseError(response.getHeader());
+ return response.getGraph();
+ }
+
+ public Metapb.Graph setGraph(Metapb.Graph graph) throws PDException {
+ Pdpb.CreateGraphRequest request = Pdpb.CreateGraphRequest.newBuilder()
+ .setGraph(graph)
+ .build();
+ Pdpb.CreateGraphResponse response =
+>>>>>>>> d7e3d51dd (3.6.5 -> 4.x diff):hg-pd-client/src/main/java/org/apache/hugegraph/pd/client/impl/PDApi.java
+ blockingUnaryCall(PDGrpc.getSetGraphMethod(), request);
+
+ handleResponseError(response.getHeader());
+ return response.getGraph();
+ }
+
+ public Metapb.Graph getGraph(String graphName) throws PDException {
+ GetGraphRequest request = GetGraphRequest.newBuilder()
+ .setGraphName(graphName)
+ .build();
+ Pdpb.GetGraphResponse response =
+ blockingUnaryCall(PDGrpc.getGetGraphMethod(), request);
+
+ handleResponseError(response.getHeader());
+ return response.getGraph();
+ }
+
+ public Metapb.Graph getGraphWithOutException(String graphName) throws
+<<<<<<<< HEAD:hugegraph-pd/hg-pd-client/src/main/java/org/apache/hugegraph/pd/client/PDClient.java
+ PDException {
+ GetGraphRequest request = GetGraphRequest.newBuilder()
+ .setGraphName(
+ graphName)
+ .build();
+========
+ PDException {
+ Pdpb.GetGraphRequest request = Pdpb.GetGraphRequest.newBuilder()
+ .setGraphName(
+ graphName)
+ .build();
+>>>>>>>> d7e3d51dd (3.6.5 -> 4.x diff):hg-pd-client/src/main/java/org/apache/hugegraph/pd/client/impl/PDApi.java
+ Pdpb.GetGraphResponse response = blockingUnaryCall(
+ PDGrpc.getGetGraphMethod(), request);
+ return response.getGraph();
+ }
+
+ public Metapb.Graph delGraph(String graphName) throws PDException {
+ Pdpb.DelGraphRequest request = Pdpb.DelGraphRequest.newBuilder()
+ .setGraphName(graphName)
+ .build();
+ Pdpb.DelGraphResponse response =
+ blockingUnaryCall(PDGrpc.getDelGraphMethod(), request);
+
+ handleResponseError(response.getHeader());
+ return response.getGraph();
+ }
+
+ public List updatePartition(List partitions) throws
+ PDException {
+
+ Pdpb.UpdatePartitionRequest request = Pdpb.UpdatePartitionRequest.newBuilder()
+ .addAllPartition(
+ partitions)
+ .build();
+ Pdpb.UpdatePartitionResponse response =
+ blockingUnaryCall(PDGrpc.getUpdatePartitionMethod(), request);
+ handleResponseError(response.getHeader());
+ invalidPartitionCache();
+
+ return response.getPartitionList();
+ }
+
+ public Metapb.Partition delPartition(String graphName, int partitionId) throws PDException {
+
+ Pdpb.DelPartitionRequest request = Pdpb.DelPartitionRequest.newBuilder()
+ .setGraphName(graphName)
+ .setPartitionId(partitionId)
+ .build();
+ Pdpb.DelPartitionResponse response =
+ blockingUnaryCall(PDGrpc.getDelPartitionMethod(), request);
+
+ handleResponseError(response.getHeader());
+ invalidPartitionCache(graphName, partitionId);
+ return response.getPartition();
+ }
+<<<<<<<< HEAD:hugegraph-pd/hg-pd-client/src/main/java/org/apache/hugegraph/pd/client/PDClient.java
+
+ /**
+ * Delete the partitioned cache
+ */
+ public void invalidPartitionCache(String graphName, int partitionId) {
+ if (null != cache.getPartitionById(graphName, partitionId)) {
+ cache.removePartition(graphName, partitionId);
+ }
+ }
+
+ /**
+ * Delete the partitioned cache
+ */
+ public void invalidPartitionCache() {
+ cache.removePartitions();
+ }
+
+ /**
+ * Delete the partitioned cache
+ */
+ public void invalidStoreCache(long storeId) {
+ cache.removeStore(storeId);
+ }
+
+ /**
+ * Update the cache
+ */
+ public void updatePartitionLeader(String graphName, int partId, long leaderStoreId) {
+ KVPair partShard = null;
+ try {
+ partShard = this.getPartitionById(graphName, partId);
+
+ if (partShard != null && partShard.getValue().getStoreId() != leaderStoreId) {
+ var shardGroup = this.getShardGroup(partId);
+ Metapb.Shard shard = null;
+ List shards = new ArrayList<>();
+
+ for (Metapb.Shard s : shardGroup.getShardsList()) {
+ if (s.getStoreId() == leaderStoreId) {
+ shard = s;
+ shards.add(Metapb.Shard.newBuilder(s)
+ .setStoreId(s.getStoreId())
+ .setRole(Metapb.ShardRole.Leader).build());
+ } else {
+ shards.add(Metapb.Shard.newBuilder(s)
+ .setStoreId(s.getStoreId())
+ .setRole(Metapb.ShardRole.Follower).build());
+ }
+ }
+
+ if (config.isEnableCache()) {
+ if (shard == null) {
+ cache.removePartition(graphName, partId);
+ }
+ }
+ }
+ } catch (PDException e) {
+ log.error("getPartitionException: {}", e.getMessage());
+ }
+ }
+
+ /**
+ * Update the cache
+ *
+ * @param partition
+ */
+ public void updatePartitionCache(Metapb.Partition partition, Metapb.Shard leader) {
+ if (config.isEnableCache()) {
+ cache.update(partition.getGraphName(), partition.getId(), partition);
+ cache.updateLeader(partition.getId(), leader);
+ }
+ }
+========
+>>>>>>>> d7e3d51dd (3.6.5 -> 4.x diff):hg-pd-client/src/main/java/org/apache/hugegraph/pd/client/impl/PDApi.java
+
+ public Pdpb.GetIdResponse getIdByKey(String key, int delta) throws PDException {
+ Pdpb.GetIdRequest request = Pdpb.GetIdRequest.newBuilder()
+ .setHeader(header)
+ .setKey(key)
+ .setDelta(delta)
+ .build();
+ Pdpb.GetIdResponse response = blockingUnaryCall(PDGrpc.getGetIdMethod(), request);
+ handleResponseError(response.getHeader());
+ return response;
+ }
+
+ public Pdpb.ResetIdResponse resetIdByKey(String key) throws PDException {
+ Pdpb.ResetIdRequest request = Pdpb.ResetIdRequest.newBuilder()
+<<<<<<<< HEAD:hugegraph-pd/hg-pd-client/src/main/java/org/apache/hugegraph/pd/client/PDClient.java
+ .setHeader(header)
+ .setKey(key)
+ .build();
+========
+ .setHeader(header)
+ .setKey(key)
+ .build();
+>>>>>>>> d7e3d51dd (3.6.5 -> 4.x diff):hg-pd-client/src/main/java/org/apache/hugegraph/pd/client/impl/PDApi.java
+ Pdpb.ResetIdResponse response = blockingUnaryCall(PDGrpc.getResetIdMethod(), request);
+ handleResponseError(response.getHeader());
+ return response;
+ }
+
+ public Metapb.Member getLeader() throws PDException {
+ Pdpb.GetMembersRequest request = Pdpb.GetMembersRequest.newBuilder()
+ .setHeader(header)
+ .build();
+ Pdpb.GetMembersResponse response = blockingUnaryCall(PDGrpc.getGetMembersMethod(), request);
+ handleResponseError(response.getHeader());
+ return response.getLeader();
+ }
+
+ public Pdpb.GetMembersResponse getMembers() throws PDException {
+ Pdpb.GetMembersRequest request = Pdpb.GetMembersRequest.newBuilder()
+ .setHeader(header)
+ .build();
+ Pdpb.GetMembersResponse response = blockingUnaryCall(PDGrpc.getGetMembersMethod(), request);
+ handleResponseError(response.getHeader());
+ return response;
+ }
+
+ public Metapb.ClusterStats getClusterStats(long storeId) throws PDException {
+ Pdpb.GetClusterStatsRequest request = Pdpb.GetClusterStatsRequest.newBuilder()
+<<<<<<<< HEAD:hugegraph-pd/hg-pd-client/src/main/java/org/apache/hugegraph/pd/client/PDClient.java
+ .setHeader(header)
+ .build();
+ Pdpb.GetClusterStatsResponse response =
+ blockingUnaryCall(PDGrpc.getGetClusterStatsMethod(), request);
+========
+ .setHeader(header)
+ .setStoreId(storeId)
+ .build();
+ Pdpb.GetClusterStatsResponse response = blockingUnaryCall(PDGrpc.getGetClusterStatsMethod(), request);
+>>>>>>>> d7e3d51dd (3.6.5 -> 4.x diff):hg-pd-client/src/main/java/org/apache/hugegraph/pd/client/impl/PDApi.java
+ handleResponseError(response.getHeader());
+ return response.getCluster();
+ }
+
+<<<<<<<< HEAD:hugegraph-pd/hg-pd-client/src/main/java/org/apache/hugegraph/pd/client/PDClient.java
+ private > RespT
+ blockingUnaryCall(MethodDescriptor method, ReqT req) throws PDException {
+ return blockingUnaryCall(method, req, 1);
+ }
+
+ private > RespT
+ blockingUnaryCall(MethodDescriptor method, ReqT req, int retry) throws
+ PDException {
+ io.grpc.stub.AbstractBlockingStub stub = (AbstractBlockingStub) getStub();
+ try {
+ RespT resp = io.grpc.stub.ClientCalls.blockingUnaryCall(stub.getChannel(), method,
+ stub.getCallOptions(), req);
+ return resp;
+ } catch (Exception e) {
+ log.error(method.getFullMethodName() + " exception, {}", e.getMessage());
+ if (e instanceof StatusRuntimeException) {
+ if (retry < stubProxy.getHostCount()) {
+ closeStub(true);
+ return blockingUnaryCall(method, req, ++retry);
+ }
+========
+ public Metapb.ClusterStats getClusterStats(int storeGroupId) throws PDException {
+ Pdpb.GetClusterStatsRequest request = Pdpb.GetClusterStatsRequest.newBuilder()
+ .setHeader(header)
+ .setStoreGroup(storeGroupId)
+ .build();
+ Pdpb.GetClusterStatsResponse response = blockingUnaryCall(PDGrpc.getGetClusterStatsMethod(), request);
+ handleResponseError(response.getHeader());
+ return response.getCluster();
+ }
+
+ public void changePeerList(String peerList) throws PDException {
+ ClusterOp.ChangePeerListRequest request = ClusterOp.ChangePeerListRequest.newBuilder()
+ .setPeerList(peerList)
+ .setHeader(header).build();
+ ClusterOp.ChangePeerListResponse response =
+ blockingUnaryCall(PDGrpc.getChangePeerListMethod(), request);
+ handleResponseError(response.getHeader());
+ }
+
+ public void reportTask(MetaTask.Task task) throws PDException {
+ ClusterOp.ReportTaskRequest request = ClusterOp.ReportTaskRequest.newBuilder()
+ .setHeader(header)
+ .setTask(task).build();
+ ClusterOp.ReportTaskResponse response = blockingUnaryCall(PDGrpc.getReportTaskMethod(), request);
+ handleResponseError(response.getHeader());
+ }
+
+ public void deleteShardGroup(int groupId) throws PDException {
+ Pdpb.DeleteShardGroupRequest request = Pdpb.DeleteShardGroupRequest
+ .newBuilder()
+ .setHeader(header)
+ .setGroupId(groupId)
+ .build();
+ Pdpb.DeleteShardGroupResponse response =
+ blockingUnaryCall(PDGrpc.getDeleteShardGroupMethod(), request);
+
+ handleResponseError(response.getHeader());
+ }
+
+ public Metapb.ShardGroup getShardGroup(int partId) throws PDException {
+ Metapb.ShardGroup group = cache.getShardGroup(partId);
+ if (group == null) {
+ group = getShardGroupDirect(partId);
+ if (config.isEnableCache()) {
+ cache.updateShardGroup(group);
+>>>>>>>> d7e3d51dd (3.6.5 -> 4.x diff):hg-pd-client/src/main/java/org/apache/hugegraph/pd/client/impl/PDApi.java
+ }
+ }
+ return group;
+ }
+
+<<<<<<<< HEAD:hugegraph-pd/hg-pd-client/src/main/java/org/apache/hugegraph/pd/client/PDClient.java
+ private void handleResponseError(Pdpb.ResponseHeader header) throws
+ PDException {
+ var errorType = header.getError().getType();
+ if (header.hasError() && errorType != Pdpb.ErrorType.OK) {
+
+ throw new PDException(header.getError().getTypeValue(),
+ String.format(
+ "PD request error, error code = %d, msg = %s",
+ header.getError().getTypeValue(),
+ header.getError().getMessage()));
+========
+ public void invalidPartitionCache() {
+ // 检查是否存在缓存
+ cache.removePartitions();
+ }
+
+ /**
+ * 删除分区缓存
+ */
+ public void invalidPartitionCache(String graphName, int partitionId) {
+ // 检查是否存在缓存
+ if (null != cache.getPartitionById(graphName, partitionId)) {
+ cache.removePartition(graphName, partitionId);
+>>>>>>>> d7e3d51dd (3.6.5 -> 4.x diff):hg-pd-client/src/main/java/org/apache/hugegraph/pd/client/impl/PDApi.java
+ }
+
+ }
+
+<<<<<<<< HEAD:hugegraph-pd/hg-pd-client/src/main/java/org/apache/hugegraph/pd/client/PDClient.java
+ public void addEventListener(PDEventListener listener) {
+ eventListeners.add(listener);
+ }
+
+ public PDWatch getWatchClient() {
+ return new PDWatchImpl(stubProxy.getHost());
+ }
+
+ /**
+ * Returns the store status information
+ */
+ public List getStoreStatus(boolean offlineExcluded) throws PDException {
+ Pdpb.GetAllStoresRequest request = Pdpb.GetAllStoresRequest.newBuilder()
+ .setHeader(header)
+ .setExcludeOfflineStores(
+ offlineExcluded)
+ .build();
+ Pdpb.GetAllStoresResponse response = getStub().getStoreStatus(request);
+========
+ /**
+ * 根据storeId返回Store对象
+ *
+ * @param storeId
+ * @return
+ * @throws PDException
+ */
+ public Metapb.Store getStore(long storeId) throws PDException {
+ Metapb.Store store = cache.getStoreById(storeId);
+ if (store == null) {
+ Pdpb.GetStoreRequest request = Pdpb.GetStoreRequest.newBuilder()
+ .setHeader(header)
+ .setStoreId(storeId).build();
+ // Pdpb.GetStoreResponse response = getStub().getStore(request);
+ Pdpb.GetStoreResponse response = blockingUnaryCall(PDGrpc.getGetStoreMethod(), request);
+ handleResponseError(response.getHeader());
+ store = response.getStore();
+ if (config.isEnableCache()) {
+ cache.addStore(storeId, store);
+ }
+ }
+ return store;
+ }
+
+ /**
+ * 更新Store信息,包括上下线等
+ *
+ * @param store
+ * @return
+ */
+ public Metapb.Store updateStore(Metapb.Store store) throws PDException {
+ Pdpb.SetStoreRequest request = Pdpb.SetStoreRequest.newBuilder()
+ .setHeader(header)
+ .setStore(store).build();
+
+ // Pdpb.SetStoreResponse response = getStub().setStore(request);
+ Pdpb.SetStoreResponse response = blockingUnaryCall(PDGrpc.getSetStoreMethod(), request);
+ handleResponseError(response.getHeader());
+ store = response.getStore();
+ if (config.isEnableCache()) {
+ cache.addStore(store.getId(), store);
+ }
+ return store;
+ }
+
+ public List getActiveStores() throws PDException {
+ Pdpb.GetAllStoresRequest request = Pdpb.GetAllStoresRequest.newBuilder()
+ .setHeader(header)
+ .setGraphName("")
+ .setExcludeOfflineStores(true)
+ .build();
+ // Pdpb.GetAllStoresResponse response = getStub().getAllStores(request);
+ Pdpb.GetAllStoresResponse response = blockingUnaryCall(PDGrpc.getGetAllStoresMethod(), request);
+ handleResponseError(response.getHeader());
+ return response.getStoresList();
+
+ }
+
+ /**
+ * 返回活跃的Store
+ *
+ * @param graphName
+ * @return
+ */
+ public List getActiveStores(String graphName) throws PDException {
+ Set stores = new HashSet<>();
+ KVPair ptShard = this.getPartitionByCode(graphName, 0);
+ while (ptShard != null) {
+ stores.add(this.getStore(ptShard.getValue().getStoreId()));
+ if (ptShard.getKey().getEndKey() < PartitionUtils.MAX_VALUE) {
+ ptShard = this.getPartitionByCode(graphName, ptShard.getKey().getEndKey());
+ } else {
+ ptShard = null;
+ }
+ }
+ return new ArrayList<>(stores);
+ }
+
+ /**
+ * 返回活跃的Store
+ *
+ * @param graphName
+ * @return
+ */
+ public List getAllStores(String graphName) throws PDException {
+ Pdpb.GetAllStoresRequest request = Pdpb.GetAllStoresRequest.newBuilder()
+ .setHeader(header)
+ .setGraphName(graphName)
+ .setExcludeOfflineStores(false)
+ .build();
+ // Pdpb.GetAllStoresResponse response = getStub().getAllStores(request);
+ Pdpb.GetAllStoresResponse response = blockingUnaryCall(PDGrpc.getGetAllStoresMethod(), request);
+ handleResponseError(response.getHeader());
+ return response.getStoresList();
+
+ }
+
+ /**
+ * Store心跳,定期调用,保持在线状态
+ *
+ * @param stats
+ * @throws PDException
+ */
+ public Metapb.ClusterStats storeHeartbeat(Metapb.StoreStats stats) throws PDException {
+ Pdpb.StoreHeartbeatRequest request = Pdpb.StoreHeartbeatRequest.newBuilder()
+ .setHeader(header)
+ .setStats(stats).build();
+ // Pdpb.StoreHeartbeatResponse response = getStub().storeHeartbeat(request);
+ Pdpb.StoreHeartbeatResponse response = blockingUnaryCall(PDGrpc.getStoreHeartbeatMethod(), request);
+ handleResponseError(response.getHeader());
+ return response.getClusterStats();
+ }
+
+ /**
+ * 返回Store状态信息
+ */
+ public List getStoreStatus(boolean offlineExcluded) throws PDException {
+ Pdpb.GetAllStoresRequest request = Pdpb.GetAllStoresRequest.newBuilder()
+ .setHeader(header)
+ .setExcludeOfflineStores(offlineExcluded)
+ .build();
+ // Pdpb.GetAllStoresResponse response = getStub().getStoreStatus(request);
+ Pdpb.GetAllStoresResponse response = blockingUnaryCall(PDGrpc.getGetStoreStatusMethod(), request);
+>>>>>>>> d7e3d51dd (3.6.5 -> 4.x diff):hg-pd-client/src/main/java/org/apache/hugegraph/pd/client/impl/PDApi.java
+ handleResponseError(response.getHeader());
+ List stores = response.getStoresList();
+ return stores;
+ }
+
+ public void setGraphSpace(String graphSpaceName, long storageLimit) throws PDException {
+ Metapb.GraphSpace graphSpace = Metapb.GraphSpace.newBuilder().setName(graphSpaceName)
+<<<<<<<< HEAD:hugegraph-pd/hg-pd-client/src/main/java/org/apache/hugegraph/pd/client/PDClient.java
+ .setStorageLimit(storageLimit)
+ .setTimestamp(System.currentTimeMillis())
+ .build();
+========
+ .setStorageLimit(storageLimit)
+ .setTimestamp(System.currentTimeMillis()).build();
+>>>>>>>> d7e3d51dd (3.6.5 -> 4.x diff):hg-pd-client/src/main/java/org/apache/hugegraph/pd/client/impl/PDApi.java
+ Pdpb.SetGraphSpaceRequest request = Pdpb.SetGraphSpaceRequest.newBuilder()
+ .setHeader(header)
+ .setGraphSpace(graphSpace)
+ .build();
+ // Pdpb.SetGraphSpaceResponse response = getStub().setGraphSpace(request);
+ Pdpb.SetGraphSpaceResponse response = blockingUnaryCall(PDGrpc.getSetGraphSpaceMethod(), request);
+ handleResponseError(response.getHeader());
+ }
+
+ public List getGraphSpace(String graphSpaceName) throws
+ PDException {
+ Pdpb.GetGraphSpaceRequest.Builder builder = Pdpb.GetGraphSpaceRequest.newBuilder();
+ Pdpb.GetGraphSpaceRequest request;
+ builder.setHeader(header);
+ if (graphSpaceName != null && graphSpaceName.length() > 0) {
+ builder.setGraphSpaceName(graphSpaceName);
+ }
+ request = builder.build();
+ // Pdpb.GetGraphSpaceResponse response = getStub().getGraphSpace(request);
+ Pdpb.GetGraphSpaceResponse response = blockingUnaryCall(PDGrpc.getGetGraphSpaceMethod(), request);
+ List graphSpaceList = response.getGraphSpaceList();
+ handleResponseError(response.getHeader());
+ return graphSpaceList;
+ }
+
+<<<<<<<< HEAD:hugegraph-pd/hg-pd-client/src/main/java/org/apache/hugegraph/pd/client/PDClient.java
+ public void setPDConfig(int partitionCount, String peerList, int shardCount,
+ long version) throws PDException {
+ Metapb.PDConfig pdConfig = Metapb.PDConfig.newBuilder().setPartitionCount(partitionCount)
+ .setPeersList(peerList).setShardCount(shardCount)
+ .setVersion(version)
+ .setTimestamp(System.currentTimeMillis())
+ .build();
+ Pdpb.SetPDConfigRequest request = Pdpb.SetPDConfigRequest.newBuilder()
+ .setHeader(header)
+ .setPdConfig(pdConfig)
+ .build();
+ Pdpb.SetPDConfigResponse response = getStub().setPDConfig(request);
+========
+ public void setPDConfig(int partitionCount, String peerList, int shardCount, long version) throws
+ PDException {
+ Metapb.PDConfig pdConfig = Metapb.PDConfig.newBuilder()
+ .setPeersList(peerList)
+ .setShardCount(shardCount)
+ .setVersion(version)
+ .setTimestamp(System.currentTimeMillis())
+ .build();
+ Pdpb.SetPDConfigRequest request = Pdpb.SetPDConfigRequest.newBuilder()
+ .setHeader(header)
+ .setPdConfig(pdConfig)
+ .build();
+ // Pdpb.SetPDConfigResponse response = getStub().setPDConfig(request);
+ Pdpb.SetPDConfigResponse response = blockingUnaryCall(PDGrpc.getSetPDConfigMethod(), request);
+ handleResponseError(response.getHeader());
+ }
+
+ public void setPDConfig(Metapb.PDConfig pdConfig) throws PDException {
+ Pdpb.SetPDConfigRequest request = Pdpb.SetPDConfigRequest.newBuilder()
+ .setHeader(header)
+ .setPdConfig(pdConfig)
+ .build();
+ // Pdpb.SetPDConfigResponse response = getStub().setPDConfig(request);
+ Pdpb.SetPDConfigResponse response = blockingUnaryCall(PDGrpc.getSetPDConfigMethod(), request);
+>>>>>>>> d7e3d51dd (3.6.5 -> 4.x diff):hg-pd-client/src/main/java/org/apache/hugegraph/pd/client/impl/PDApi.java
+ handleResponseError(response.getHeader());
+ }
+
+ public Metapb.PDConfig getPDConfig() throws PDException {
+ Pdpb.GetPDConfigRequest request = Pdpb.GetPDConfigRequest.newBuilder()
+<<<<<<<< HEAD:hugegraph-pd/hg-pd-client/src/main/java/org/apache/hugegraph/pd/client/PDClient.java
+ .setHeader(header)
+ .build();
+ Pdpb.GetPDConfigResponse response = getStub().getPDConfig(request);
+========
+ .setHeader(header)
+ .build();
+ // Pdpb.GetPDConfigResponse response = getStub().getPDConfig(request);
+ Pdpb.GetPDConfigResponse response = blockingUnaryCall(PDGrpc.getGetPDConfigMethod(), request);
+>>>>>>>> d7e3d51dd (3.6.5 -> 4.x diff):hg-pd-client/src/main/java/org/apache/hugegraph/pd/client/impl/PDApi.java
+ handleResponseError(response.getHeader());
+ return response.getPdConfig();
+ }
+
+ public void setPDConfig(Metapb.PDConfig pdConfig) throws PDException {
+ Pdpb.SetPDConfigRequest request = Pdpb.SetPDConfigRequest.newBuilder()
+ .setHeader(header)
+ .setPdConfig(pdConfig)
+ .build();
+ Pdpb.SetPDConfigResponse response = getStub().setPDConfig(request);
+ handleResponseError(response.getHeader());
+ }
+
+ public Metapb.PDConfig getPDConfig(long version) throws PDException {
+ Pdpb.GetPDConfigRequest request = Pdpb.GetPDConfigRequest.newBuilder().setHeader(
+ header).setVersion(version).build();
+ // Pdpb.GetPDConfigResponse response = getStub().getPDConfig(request);
+ Pdpb.GetPDConfigResponse response = blockingUnaryCall(PDGrpc.getGetPDConfigMethod(), request);
+ handleResponseError(response.getHeader());
+ return response.getPdConfig();
+ }
+
+<<<<<<<< HEAD:hugegraph-pd/hg-pd-client/src/main/java/org/apache/hugegraph/pd/client/PDClient.java
+ public void changePeerList(String peerList) throws PDException {
+ Pdpb.ChangePeerListRequest request = Pdpb.ChangePeerListRequest.newBuilder()
+ .setPeerList(peerList)
+ .setHeader(header).build();
+ Pdpb.getChangePeerListResponse response =
+ blockingUnaryCall(PDGrpc.getChangePeerListMethod(), request);
+ handleResponseError(response.getHeader());
+ }
+
+ /**
+ * Working mode
+ * Auto: If the number of partitions on each store reaches the maximum value, you need to
+ * specify the store group id. The store group id is 0, which is the default partition
+ * splitData(ClusterOp.OperationMode mode, int storeGroupId, List
+ * params)
+ * mode = Auto storeGroupId, params
+ *
+ * @throws PDException
+ */
+ public void splitData() throws PDException {
+ Pdpb.SplitDataRequest request = Pdpb.SplitDataRequest.newBuilder()
+ .setHeader(header)
+ .setMode(Pdpb.OperationMode.Auto)
+ .build();
+ Pdpb.SplitDataResponse response = getStub().splitData(request);
+ handleResponseError(response.getHeader());
+ }
+
+ /**
+ * Working mode
+ * Auto: If the number of partitions on each store reaches the maximum value, you need to
+ * specify the store group id. The store group id is 0, which is the default partition
+ * Expert: Expert Mode, Specifier is required splitParams, limit SplitDataParam in the same
+ * store group
+ *
+ * @param mode
+ * @param params
+ * @throws PDException
+ */
+ public void splitData(Pdpb.OperationMode mode, List params) throws
+ PDException {
+ Pdpb.SplitDataRequest request = Pdpb.SplitDataRequest.newBuilder()
+ .setHeader(header)
+ .setMode(mode)
+ .addAllParam(params).build();
+ Pdpb.SplitDataResponse response = getStub().splitData(request);
+========
+ public void splitData(ClusterOp.OperationMode mode, int storeGroupId, List params)
+ throws PDException {
+ ClusterOp.SplitDataRequest request = ClusterOp.SplitDataRequest.newBuilder()
+ .setHeader(header)
+ .setMode(mode)
+ .setStoreGroupId(storeGroupId)
+ .addAllParam(params).build();
+ ;
+ ClusterOp.SplitDataResponse response = blockingUnaryCall(PDGrpc.getSplitDataMethod(), request);
+>>>>>>>> d7e3d51dd (3.6.5 -> 4.x diff):hg-pd-client/src/main/java/org/apache/hugegraph/pd/client/impl/PDApi.java
+ handleResponseError(response.getHeader());
+ }
+
+
+ public void splitGraphData(String graphName, int toCount) throws PDException {
+<<<<<<<< HEAD:hugegraph-pd/hg-pd-client/src/main/java/org/apache/hugegraph/pd/client/PDClient.java
+ Pdpb.SplitGraphDataRequest request = Pdpb.SplitGraphDataRequest.newBuilder()
+ .setHeader(header)
+ .setGraphName(graphName)
+ .setToCount(toCount)
+ .build();
+ Pdpb.SplitDataResponse response = getStub().splitGraphData(request);
+========
+ ClusterOp.SplitGraphDataRequest request = ClusterOp.SplitGraphDataRequest.newBuilder()
+ .setHeader(header)
+ .setGraphName(graphName)
+ .setToCount(toCount)
+ .build();
+ // Pdpb.SplitDataResponse response = getStub().splitGraphData(request);
+ ClusterOp.SplitDataResponse response = blockingUnaryCall(PDGrpc.getSplitGraphDataMethod(), request);
+>>>>>>>> d7e3d51dd (3.6.5 -> 4.x diff):hg-pd-client/src/main/java/org/apache/hugegraph/pd/client/impl/PDApi.java
+ handleResponseError(response.getHeader());
+ }
+
+ /**
+<<<<<<<< HEAD:hugegraph-pd/hg-pd-client/src/main/java/org/apache/hugegraph/pd/client/PDClient.java
+ * To automatically transfer to the same number of partitions on each Store, it is
+ * recommended to use balancePartition(int storeGroupId) to specify the storeGroupId
+ *
+ * @throws PDException
+ */
+ public void balancePartition() throws PDException {
+ Pdpb.MovePartitionRequest request = Pdpb.MovePartitionRequest.newBuilder()
+ .setHeader(header)
+ .setMode(
+ Pdpb.OperationMode.Auto)
+ .build();
+ Pdpb.MovePartitionResponse response = getStub().movePartition(request);
+ handleResponseError(response.getHeader());
+ }
+
+ /**
+ * Migrate partitions in manual mode
+ * // Working mode
+ * // Auto: Automatic transfer to the same number of partitions per Store
+ * // Expert: Expert Mode, Specifier is required transferParams
+ *
+ * @param params Designation transferParams, expert mode, request source store / target store
+ * in the same store group
+ * @throws PDException
+ */
+ public void movePartition(Pdpb.OperationMode mode, List params) throws
+ PDException {
+ Pdpb.MovePartitionRequest request = Pdpb.MovePartitionRequest.newBuilder()
+ .setHeader(header)
+ .setMode(mode)
+ .addAllParam(params).build();
+ Pdpb.MovePartitionResponse response = getStub().movePartition(request);
+ handleResponseError(response.getHeader());
+ }
+
+ public void reportTask(MetaTask.Task task) throws PDException {
+ Pdpb.ReportTaskRequest request = Pdpb.ReportTaskRequest.newBuilder()
+ .setHeader(header)
+ .setTask(task).build();
+ Pdpb.ReportTaskResponse response = blockingUnaryCall(PDGrpc.getReportTaskMethod(), request);
+========
+ * 平衡分区
+ * @param mode auto or expert
+ * @param storeGroupId for auto
+ * @param params for expert
+ * @throws PDException errors occurs
+ */
+ public void balancePartition(ClusterOp.OperationMode mode, int storeGroupId,
+ List params) throws PDException {
+ ClusterOp.MovePartitionRequest request = ClusterOp.MovePartitionRequest.newBuilder()
+ .setHeader(header)
+ .setMode(mode)
+ .setStoreGroupId(storeGroupId)
+ .addAllParam(params)
+ .build();
+ ClusterOp.MovePartitionResponse response = blockingUnaryCall(PDGrpc.getMovePartitionMethod(), request);
+>>>>>>>> d7e3d51dd (3.6.5 -> 4.x diff):hg-pd-client/src/main/java/org/apache/hugegraph/pd/client/impl/PDApi.java
+ handleResponseError(response.getHeader());
+ }
+
+ public Metapb.PartitionStats getPartitionsStats(String graph, int partId) throws PDException {
+ Pdpb.GetPartitionStatsRequest request = Pdpb.GetPartitionStatsRequest.newBuilder()
+<<<<<<<< HEAD:hugegraph-pd/hg-pd-client/src/main/java/org/apache/hugegraph/pd/client/PDClient.java
+ .setHeader(header)
+ .setGraphName(graph)
+ .setPartitionId(partId)
+ .build();
+ Pdpb.GetPartitionStatsResponse response = getStub().getPartitionStats(request);
+========
+ .setHeader(header)
+ .setGraphName(graph)
+ .setPartitionId(partId).build();
+ // Pdpb.GetPartitionStatsResponse response = getStub().getPartitionStats(request);
+ Pdpb.GetPartitionStatsResponse response = blockingUnaryCall(PDGrpc.getGetPartitionStatsMethod(), request);
+>>>>>>>> d7e3d51dd (3.6.5 -> 4.x diff):hg-pd-client/src/main/java/org/apache/hugegraph/pd/client/impl/PDApi.java
+ handleResponseError(response.getHeader());
+ return response.getPartitionStats();
+ }
+
+ /**
+ * Balance the number of leaders in different stores
+ */
+ public void balanceLeaders() throws PDException {
+<<<<<<<< HEAD:hugegraph-pd/hg-pd-client/src/main/java/org/apache/hugegraph/pd/client/PDClient.java
+ Pdpb.BalanceLeadersRequest request = Pdpb.BalanceLeadersRequest.newBuilder()
+ .setHeader(header)
+ .build();
+ Pdpb.BalanceLeadersResponse response = getStub().balanceLeaders(request);
+========
+ ClusterOp.BalanceLeadersRequest request = ClusterOp.BalanceLeadersRequest.newBuilder()
+ .setHeader(header)
+ .build();
+ // Pdpb.BalanceLeadersResponse response = getStub().balanceLeaders(request);
+ ClusterOp.BalanceLeadersResponse response = blockingUnaryCall(PDGrpc.getBalanceLeadersMethod(), request);
+>>>>>>>> d7e3d51dd (3.6.5 -> 4.x diff):hg-pd-client/src/main/java/org/apache/hugegraph/pd/client/impl/PDApi.java
+ handleResponseError(response.getHeader());
+ }
+
+ /**
+ * Remove the store from the PD
+ */
+ public Metapb.Store delStore(long storeId) throws PDException {
+ Pdpb.DetStoreRequest request = Pdpb.DetStoreRequest.newBuilder()
+<<<<<<<< HEAD:hugegraph-pd/hg-pd-client/src/main/java/org/apache/hugegraph/pd/client/PDClient.java
+ .setHeader(header)
+ .setStoreId(storeId)
+ .build();
+ Pdpb.DetStoreResponse response = getStub().delStore(request);
+========
+ .setHeader(header)
+ .setStoreId(storeId)
+ .build();
+ // Pdpb.DetStoreResponse response = getStub().delStore(request);
+ Pdpb.DetStoreResponse response = blockingUnaryCall(PDGrpc.getDelStoreMethod(), request);
+>>>>>>>> d7e3d51dd (3.6.5 -> 4.x diff):hg-pd-client/src/main/java/org/apache/hugegraph/pd/client/impl/PDApi.java
+ handleResponseError(response.getHeader());
+ return response.getStore();
+ }
+
+ /**
+<<<<<<<< HEAD:hugegraph-pd/hg-pd-client/src/main/java/org/apache/hugegraph/pd/client/PDClient.java
+ * Compaction on rocksdb as a whole
+========
+ * 对rocksdb整体进行compaction
+>>>>>>>> d7e3d51dd (3.6.5 -> 4.x diff):hg-pd-client/src/main/java/org/apache/hugegraph/pd/client/impl/PDApi.java
+ *
+ * @throws PDException
+ */
+ public void dbCompaction() throws PDException {
+ ClusterOp.DbCompactionRequest request = ClusterOp.DbCompactionRequest
+ .newBuilder()
+ .setHeader(header)
+ .build();
+ // Pdpb.DbCompactionResponse response = getStub().dbCompaction(request);
+ ClusterOp.DbCompactionResponse response = blockingUnaryCall(PDGrpc.getDbCompactionMethod(), request);
+ handleResponseError(response.getHeader());
+ }
+
+ /**
+<<<<<<<< HEAD:hugegraph-pd/hg-pd-client/src/main/java/org/apache/hugegraph/pd/client/PDClient.java
+ * Compaction on rocksdb specified tables
+========
+ * 对rocksdb指定表进行compaction
+>>>>>>>> d7e3d51dd (3.6.5 -> 4.x diff):hg-pd-client/src/main/java/org/apache/hugegraph/pd/client/impl/PDApi.java
+ *
+ * @param tableName
+ * @throws PDException
+ */
+ public void dbCompaction(String tableName) throws PDException {
+ ClusterOp.DbCompactionRequest request = ClusterOp.DbCompactionRequest
+ .newBuilder()
+ .setHeader(header)
+ .setTableName(tableName)
+ .build();
+ // Pdpb.DbCompactionResponse response = getStub().dbCompaction(request);
+ ClusterOp.DbCompactionResponse response = blockingUnaryCall(PDGrpc.getDbCompactionMethod(), request);
+ handleResponseError(response.getHeader());
+ }
+
+ /**
+ * Merge partitions to reduce the current partition to toCount
+ *
+ * @param toCount The number of partitions that can be scaled down
+ * @throws PDException
+ */
+<<<<<<<< HEAD:hugegraph-pd/hg-pd-client/src/main/java/org/apache/hugegraph/pd/client/PDClient.java
+ public void combineCluster(int toCount) throws PDException {
+ Pdpb.CombineClusterRequest request = Pdpb.CombineClusterRequest
+========
+ public void combineCluster(int shardGroupId, int toCount) throws PDException {
+ ClusterOp.CombineClusterRequest request = ClusterOp.CombineClusterRequest
+>>>>>>>> d7e3d51dd (3.6.5 -> 4.x diff):hg-pd-client/src/main/java/org/apache/hugegraph/pd/client/impl/PDApi.java
+ .newBuilder()
+ .setHeader(header)
+ .setStoreGroupId(shardGroupId)
+ .setToCount(toCount)
+ .build();
+ ClusterOp.CombineClusterResponse response = blockingUnaryCall(PDGrpc.getCombineClusterMethod(), request);
+ handleResponseError(response.getHeader());
+ }
+
+ /**
+<<<<<<<< HEAD:hugegraph-pd/hg-pd-client/src/main/java/org/apache/hugegraph/pd/client/PDClient.java
+ * Scaling a single image to toCount is similar to splitting to ensure that the number of
+ * partitions in the same store group is the same.
+ * If you have special requirements, you can consider migrating to other groups
+========
+ * 将单图缩容到 toCount个
+>>>>>>>> d7e3d51dd (3.6.5 -> 4.x diff):hg-pd-client/src/main/java/org/apache/hugegraph/pd/client/impl/PDApi.java
+ *
+ * @param graphName graph name
+ * @param toCount target count
+ * @throws PDException
+ */
+ public void combineGraph(String graphName, int toCount) throws PDException {
+<<<<<<<< HEAD:hugegraph-pd/hg-pd-client/src/main/java/org/apache/hugegraph/pd/client/PDClient.java
+ Pdpb.CombineGraphRequest request = Pdpb.CombineGraphRequest
+========
+ ClusterOp.CombineGraphRequest request = ClusterOp.CombineGraphRequest
+>>>>>>>> d7e3d51dd (3.6.5 -> 4.x diff):hg-pd-client/src/main/java/org/apache/hugegraph/pd/client/impl/PDApi.java
+ .newBuilder()
+ .setHeader(header)
+ .setGraphName(graphName)
+ .setToCount(toCount)
+ .build();
+<<<<<<<< HEAD:hugegraph-pd/hg-pd-client/src/main/java/org/apache/hugegraph/pd/client/PDClient.java
+ Pdpb.CombineGraphResponse response = getStub().combineGraph(request);
+ handleResponseError(response.getHeader());
+ }
+
+ public void deleteShardGroup(int groupId) throws PDException {
+ Pdpb.DeleteShardGroupRequest request = Pdpb.DeleteShardGroupRequest
+ .newBuilder()
+ .setHeader(header)
+ .setGroupId(groupId)
+ .build();
+ Pdpb.DeleteShardGroupResponse response =
+ blockingUnaryCall(PDGrpc.getDeleteShardGroupMethod(), request);
+
+========
+ // Pdpb.CombineGraphResponse response = getStub().combineGraph(request);
+ ClusterOp.CombineGraphResponse response = blockingUnaryCall(PDGrpc.getCombineGraphMethod(), request);
+>>>>>>>> d7e3d51dd (3.6.5 -> 4.x diff):hg-pd-client/src/main/java/org/apache/hugegraph/pd/client/impl/PDApi.java
+ handleResponseError(response.getHeader());
+ }
+
+ /**
+<<<<<<<< HEAD:hugegraph-pd/hg-pd-client/src/main/java/org/apache/hugegraph/pd/client/PDClient.java
+ * Used for the store's shard list rebuild
+========
+ * 用于 store的 shard list重建
+>>>>>>>> d7e3d51dd (3.6.5 -> 4.x diff):hg-pd-client/src/main/java/org/apache/hugegraph/pd/client/impl/PDApi.java
+ *
+ * @param groupId shard group id
+ * @param shards shard list, delete when shards size is 0
+ */
+ public void updateShardGroupOp(int groupId, List shards) throws PDException {
+<<<<<<<< HEAD:hugegraph-pd/hg-pd-client/src/main/java/org/apache/hugegraph/pd/client/PDClient.java
+ Pdpb.ChangeShardRequest request = Pdpb.ChangeShardRequest.newBuilder()
+ .setHeader(header)
+ .setGroupId(groupId)
+ .addAllShards(shards)
+ .build();
+ Pdpb.ChangeShardResponse response = getStub().updateShardGroupOp(request);
+========
+ ClusterOp.ChangeShardRequest request = ClusterOp.ChangeShardRequest.newBuilder()
+ .setHeader(header)
+ .setGroupId(groupId)
+ .addAllShards(shards)
+ .build();
+ // Pdpb.ChangeShardResponse response = getStub().updateShardGroupOp(request);
+ ClusterOp.ChangeShardResponse response = blockingUnaryCall(PDGrpc.getUpdateShardGroupOpMethod(), request);
+>>>>>>>> d7e3d51dd (3.6.5 -> 4.x diff):hg-pd-client/src/main/java/org/apache/hugegraph/pd/client/impl/PDApi.java
+ handleResponseError(response.getHeader());
+ }
+
+ /**
+ * invoke fireChangeShard command
+ *
+ * @param groupId shard group id
+ * @param shards shard list
+ */
+ public void changeShard(int groupId, List shards) throws PDException {
+<<<<<<<< HEAD:hugegraph-pd/hg-pd-client/src/main/java/org/apache/hugegraph/pd/client/PDClient.java
+ Pdpb.ChangeShardRequest request = Pdpb.ChangeShardRequest.newBuilder()
+ .setHeader(header)
+ .setGroupId(groupId)
+ .addAllShards(shards)
+ .build();
+ Pdpb.ChangeShardResponse response = getStub().changeShard(request);
+ handleResponseError(response.getHeader());
+ }
+
+ public ClientCache getCache() {
+ return cache;
+ }
+
+ public CacheResponse getClientCache() throws PDException {
+ GetGraphRequest request = GetGraphRequest.newBuilder().setHeader(header).build();
+ CacheResponse cache = getStub().getCache(request);
+ handleResponseError(cache.getHeader());
+ return cache;
+ }
+
+ public CachePartitionResponse getPartitionCache(String graph) throws PDException {
+ GetGraphRequest request =
+ GetGraphRequest.newBuilder().setHeader(header).setGraphName(graph).build();
+ CachePartitionResponse ps = getStub().getPartitions(request);
+========
+ ClusterOp.ChangeShardRequest request = ClusterOp.ChangeShardRequest.newBuilder()
+ .setHeader(header)
+ .setGroupId(groupId)
+ .addAllShards(shards)
+ .build();
+ // Pdpb.ChangeShardResponse response = getStub().changeShard(request);
+ ClusterOp.ChangeShardResponse response = blockingUnaryCall(PDGrpc.getChangeShardMethod(), request);
+ handleResponseError(response.getHeader());
+ }
+
+ public Pdpb.CacheResponse getClientCache() throws PDException {
+ Pdpb.GetGraphRequest request = Pdpb.GetGraphRequest.newBuilder().setHeader(header).build();
+ // Pdpb.CacheResponse cache = getStub().getCache(request);
+ Pdpb.CacheResponse cache = blockingUnaryCall(PDGrpc.getGetCacheMethod(), request);
+ handleResponseError(cache.getHeader());
+ return cache;
+ }
+
+ public Pdpb.CachePartitionResponse getPartitionCache(String graph) throws PDException {
+ Pdpb.GetGraphRequest request = Pdpb.GetGraphRequest.newBuilder().setHeader(header).setGraphName(graph).build();
+ // Pdpb.CachePartitionResponse ps = getStub().getPartitions(request);
+ Pdpb.CachePartitionResponse ps = blockingUnaryCall(PDGrpc.getGetPartitionsMethod(), request);
+>>>>>>>> d7e3d51dd (3.6.5 -> 4.x diff):hg-pd-client/src/main/java/org/apache/hugegraph/pd/client/impl/PDApi.java
+ handleResponseError(ps.getHeader());
+ return ps;
+ }
+
+ public void updatePdRaft(String raftConfig) throws PDException {
+<<<<<<<< HEAD:hugegraph-pd/hg-pd-client/src/main/java/org/apache/hugegraph/pd/client/PDClient.java
+ Pdpb.UpdatePdRaftRequest request = Pdpb.UpdatePdRaftRequest.newBuilder()
+ .setHeader(header)
+ .setConfig(raftConfig)
+ .build();
+ Pdpb.UpdatePdRaftResponse response = getStub().updatePdRaft(request);
+ handleResponseError(response.getHeader());
+ }
+
+ public interface PDEventListener {
+
+ void onStoreChanged(NodeEvent event);
+
+ void onPartitionChanged(PartitionEvent event);
+
+ void onGraphChanged(WatchResponse event);
+
+ default void onShardGroupChanged(WatchResponse event) {
+ }
+
+ }
+
+ static class StubProxy {
+
+ private final LinkedList hostList = new LinkedList<>();
+ private volatile PDGrpc.PDBlockingStub stub;
+ private String leader;
+
+ public StubProxy(String[] hosts) {
+ for (String host : hosts) {
+ if (!host.isEmpty()) {
+ hostList.offer(host);
+ }
+ }
+ }
+
+ public String nextHost() {
+ String host = hostList.poll();
+ hostList.offer(host);
+ return host;
+ }
+
+ public void set(PDGrpc.PDBlockingStub stub) {
+ this.stub = stub;
+ }
+
+ public PDGrpc.PDBlockingStub get() {
+ return this.stub;
+ }
+
+ public String getHost() {
+ return hostList.peek();
+ }
+
+ public int getHostCount() {
+ return hostList.size();
+ }
+
+ public String getLeader() {
+ return leader;
+ }
+
+ public void setLeader(String leader) {
+ this.leader = leader;
+ }
+ }
+========
+ ClusterOp.UpdatePdRaftRequest request = ClusterOp.UpdatePdRaftRequest.newBuilder()
+ .setHeader(header)
+ .setConfig(raftConfig)
+ .build();
+ // Pdpb.UpdatePdRaftResponse response = getStub().updatePdRaft(request);
+ ClusterOp.UpdatePdRaftResponse response = blockingUnaryCall(PDGrpc.getUpdatePdRaftMethod(), request);
+ handleResponseError(response.getHeader());
+ }
+
+ public long submitBuildIndexTask(Metapb.BuildIndexParam param) throws PDException {
+ Pdpb.IndexTaskCreateRequest request = Pdpb.IndexTaskCreateRequest.newBuilder()
+ .setHeader(header)
+ .setParam(param)
+ .build();
+ // var response = getStub().submitTask(request);
+ var response = blockingUnaryCall(PDGrpc.getSubmitIndexTaskMethod(), request);
+ handleResponseError(response.getHeader());
+ return response.getTaskId();
+ }
+
+ public long submitBackupGraphTask(String sourceGraph, String targetGraph) throws PDException {
+ Pdpb.BackupGraphRequest request = Pdpb.BackupGraphRequest.newBuilder()
+ .setGraphName(sourceGraph)
+ .setTargetGraphName(targetGraph)
+ .build();
+ // var response = getStub().submitTask(request);
+ var response = blockingUnaryCall(PDGrpc.getSubmitBackupGraphTaskMethod(), request);
+ handleResponseError(response.getHeader());
+ return response.getTaskId();
+ }
+
+ public Pdpb.TaskQueryResponse queryBuildIndexTaskStatus(long taskId) throws PDException {
+ Pdpb.TaskQueryRequest request = Pdpb.TaskQueryRequest.newBuilder()
+ .setHeader(header)
+ .setTaskId(taskId)
+ .build();
+ // var response = getStub().queryTaskState(request);
+ var response = blockingUnaryCall(PDGrpc.getQueryTaskStateMethod(), request);
+ handleResponseError(response.getHeader());
+ return response;
+ }
+
+ public Pdpb.TaskQueryResponse retryTask(long taskId) throws PDException {
+ Pdpb.TaskQueryRequest request = Pdpb.TaskQueryRequest.newBuilder()
+ .setHeader(header)
+ .setTaskId(taskId)
+ .build();
+ // var response = getStub().retryIndexTask(request);
+ var response = blockingUnaryCall(PDGrpc.getRetryTaskMethod(), request);
+ handleResponseError(response.getHeader());
+ return response;
+ }
+
+ public Pdpb.GraphStatsResponse getGraphStats(String graphName) throws PDException {
+ Pdpb.GetGraphRequest request =
+ Pdpb.GetGraphRequest.newBuilder().setHeader(header).setGraphName(graphName).build();
+ // Pdpb.GraphStatsResponse graphStats = getStub().getGraphStats(request);
+ Pdpb.GraphStatsResponse graphStats = blockingUnaryCall(PDGrpc.getGetGraphStatsMethod(), request);
+ handleResponseError(graphStats.getHeader());
+ return graphStats;
+ }
+
+ /**
+ * 返回startKey和endKey跨越的所有分区信息
+ *
+ * @param graphName
+ * @param startKey
+ * @param endKey
+ * @return
+ * @throws PDException
+ */
+ public List> scanPartitions(String graphName, byte[] startKey,
+ byte[] endKey) throws PDException {
+ List> partitions = new ArrayList<>();
+ KVPair startPartShard = getPartition(graphName, startKey);
+ KVPair endPartShard = getPartition(graphName, endKey);
+ if (startPartShard == null || endPartShard == null) {
+ return null;
+ }
+ partitions.add(startPartShard);
+ while (startPartShard.getKey().getEndKey() < endPartShard.getKey().getEndKey()
+ && startPartShard.getKey().getEndKey() < PartitionUtils.MAX_VALUE /*排除最后一个分区*/) {
+ startPartShard = getPartitionByCode(graphName, startPartShard.getKey().getEndKey());
+ partitions.add(startPartShard);
+ }
+ return partitions;
+ }
+
+ /**
+ * 查询Key所属分区信息
+ *
+ * @param graphName
+ * @param key
+ * @return
+ * @throws PDException
+ */
+ public KVPair getPartition(String graphName, byte[] key) throws PDException {
+ // 先查cache,cache没有命中,在调用PD
+ KVPair partShard = cache.getPartitionByKey(graphName, key);
+ partShard = getKvPair(graphName, key, partShard);
+ return partShard;
+ }
+
+ public KVPair getPartition(String graphName, byte[] key, int code) throws PDException {
+ KVPair partShard = cache.getPartitionByCode(graphName, code);
+ partShard = getKvPair(graphName, key, partShard);
+ return partShard;
+ }
+
+ /**
+ * Hugegraph-store调用,更新缓存
+ *
+ * @param partition
+ */
+ public void updatePartitionCache(Metapb.Partition partition, Metapb.Shard leader) {
+ if (config.isEnableCache()) {
+ cache.update(partition.getGraphName(), partition.getId(), partition);
+ cache.updateLeader(partition.getId(), leader);
+ }
+ }
+
+ /**
+ * Hugegraph server 调用,Leader发生改变,更新缓存
+ */
+ public void updatePartitionLeader(String graphName, int partId, long leaderStoreId) {
+ KVPair partShard = null;
+ try {
+ partShard = this.getPartitionById(graphName, partId);
+
+ if (partShard != null && partShard.getValue().getStoreId() != leaderStoreId) {
+ var shardGroup = this.getShardGroup(partId);
+ Metapb.Shard shard = null;
+ List shards = new ArrayList<>();
+
+ for (Metapb.Shard s : shardGroup.getShardsList()) {
+ if (s.getStoreId() == leaderStoreId) {
+ shard = s;
+ shards.add(Metapb.Shard.newBuilder(s)
+ .setStoreId(s.getStoreId())
+ .setRole(Metapb.ShardRole.Leader).build());
+ } else {
+ shards.add(Metapb.Shard.newBuilder(s)
+ .setStoreId(s.getStoreId())
+ .setRole(Metapb.ShardRole.Follower).build());
+ }
+ }
+
+ if (config.isEnableCache()) {
+ if (shard == null) {
+ // 分区的shard中未找到leader,说明分区发生了迁移
+ cache.removePartition(graphName, partId);
+ } else {
+ cache.updateLeader(partId, shard);
+ }
+ }
+ }
+ } catch (PDException e) {
+ log.error("getPartitionException: {}", e.getMessage());
+ }
+ }
+
+ public Metapb.StoreGroup createStoreGroup(int groupId, String name, int partitionCount) throws PDException {
+ StoreGroup.CreateStoreGroupRequest request = StoreGroup.CreateStoreGroupRequest.newBuilder()
+ .setHeader(header)
+ .setGroupId(groupId)
+ .setName(name)
+ .setPartitionCount(partitionCount)
+ .build();
+
+ StoreGroup.CreateStoreGroupResponse response = blockingUnaryCall(PDGrpc.getCreateStoreGroupMethod(), request);
+ handleResponseError(response.getHeader());
+ return response.getStoreGroup();
+ }
+
+ public Metapb.StoreGroup getStoreGroup(int groupId) throws PDException {
+ StoreGroup.GetStoreGroupRequest request = StoreGroup.GetStoreGroupRequest.newBuilder()
+ .setHeader(header)
+ .setGroupId(groupId)
+ .build();
+ StoreGroup.GetStoreGroupResponse response = blockingUnaryCall(PDGrpc.getGetStoreGroupMethod(), request);
+ handleResponseError(response.getHeader());
+ return response.getStoreGroup();
+ }
+
+ public List getAllStoreGroups() throws PDException {
+ StoreGroup.GetAllStoreGroupRequest request = StoreGroup.GetAllStoreGroupRequest.newBuilder()
+ .setHeader(header).build();
+ StoreGroup.GetAllStoreGroupResponse response = blockingUnaryCall(PDGrpc.getGetAllStoreGroupMethod(), request);
+ handleResponseError(response.getHeader());
+ return response.getStoreGroupsList();
+ }
+
+ public Metapb.StoreGroup updateStoreGroup(int groupId, String name) throws PDException {
+ StoreGroup.UpdateStoreGroupRequest request = StoreGroup.UpdateStoreGroupRequest.newBuilder().setHeader(header)
+ .setGroupId(groupId)
+ .setName(name)
+ .build();
+ StoreGroup.UpdateStoreGroupResponse response = blockingUnaryCall(PDGrpc.getUpdateStoreGroupMethod(), request);
+ handleResponseError(response.getHeader());
+ return response.getStoreGroup();
+ }
+
+ public List getStoresByStoreGroup(int groupId) throws PDException {
+ StoreGroup.GetGroupStoresRequest request = StoreGroup.GetGroupStoresRequest.newBuilder()
+ .setHeader(header).setStoreGroupId(groupId).build();
+ StoreGroup.GetGroupStoresResponse response = blockingUnaryCall(PDGrpc.getGetStoresByStoreGroupMethod(), request);
+ handleResponseError(response.getHeader());
+ return response.getStoresList();
+ }
+
+ public boolean updateStoreGroupRelation(long storeId, int groupId) throws PDException {
+ StoreGroup.UpdateStoreGroupRelationRequest request = StoreGroup.UpdateStoreGroupRelationRequest.newBuilder()
+ .setHeader(header).setStoreId(storeId)
+ .setStoreGroupId(groupId).build();
+ var response = blockingUnaryCall(PDGrpc.getUpdateStoreGroupRelationMethod(), request);
+ handleResponseError(response.getHeader());
+ return response.getSuccess();
+ }
+
+>>>>>>>> d7e3d51dd (3.6.5 -> 4.x diff):hg-pd-client/src/main/java/org/apache/hugegraph/pd/client/impl/PDApi.java
+}
diff --git a/hg-pd-client/src/main/java/org/apache/hugegraph/pd/client/impl/StreamDelegator.java b/hg-pd-client/src/main/java/org/apache/hugegraph/pd/client/impl/StreamDelegator.java
new file mode 100644
index 0000000000..aace7d144c
--- /dev/null
+++ b/hg-pd-client/src/main/java/org/apache/hugegraph/pd/client/impl/StreamDelegator.java
@@ -0,0 +1,213 @@
+package org.apache.hugegraph.pd.client.impl;
+
+import java.util.concurrent.Future;
+import java.util.concurrent.atomic.AtomicBoolean;
+import java.util.concurrent.atomic.AtomicReference;
+import java.util.function.Consumer;
+
+import org.apache.hugegraph.pd.client.rpc.Invoker;
+import org.apache.hugegraph.pd.common.HgAssert;
+import org.apache.hugegraph.pd.common.PDRuntimeException;
+import org.apache.hugegraph.pd.grpc.common.ErrorType;
+
+import io.grpc.MethodDescriptor;
+import io.grpc.stub.StreamObserver;
+import lombok.Getter;
+import lombok.extern.slf4j.Slf4j;
+
+@Slf4j
+public final class StreamDelegator {
+
+ private final Invoker invoker;
+ private final MethodDescriptor methodDesc;
+ private final AtomicReference requestHolder = new AtomicReference<>();
+ private final AtomicReference state =
+ new AtomicReference<>(StreamDelegatorState.IDLE);
+ private final AtomicReference senderHolder = new AtomicReference<>();
+ private final AtomicReference actionHolder = new AtomicReference<>();
+ private final AtomicBoolean connecting = new AtomicBoolean();
+ private final AtomicBoolean autoReconnect = new AtomicBoolean(true);
+ private final AtomicReference lastError = new AtomicReference<>();
+ @Getter
+ private final String name;
+ private Consumer dataHandler = this::defaultHandler;
+ private Consumer errorHandler = this::defaultHandler;
+ private Consumer completeHandler = this::defaultHandler;
+
+ public StreamDelegator(String delegatorName, Invoker invoker, MethodDescriptor methodDesc) {
+ HgAssert.isArgumentValid(delegatorName, "delegator name");
+ HgAssert.isArgumentNotNull(invoker, "stub invoker");
+ HgAssert.isArgumentNotNull(methodDesc, "methodDesc");
+ this.name = delegatorName;
+ this.invoker = invoker;
+ this.methodDesc = methodDesc;
+ }
+
+ public void close() {
+ StreamDelegatorSender sender = this.senderHolder.get();
+ if (sender != null) {
+ sender.close();
+ }
+ }
+
+ private void defaultHandler(T t) {
+ if (t instanceof Throwable) {
+ error("Default handler received an error:", t);
+ this.lastError.set((Throwable) t);
+ } else if (t != null) {
+ info("Default handler received a stream data: {}", t);
+ }
+ }
+
+ public void listen(ReqT request, Consumer dataHandler) {
+ if (!this.state.compareAndSet(StreamDelegatorState.IDLE, StreamDelegatorState.LISTENING)) {
+ info("It's not in the idle StreamDelegatorState, skip listening.", new Object[0]);
+ throw new IllegalStateException(
+ "It's not in the idle StreamDelegatorState and not via the 'listening' method. ");
+ }
+ if (!this.requestHolder.compareAndSet(null, request)) {
+ info("Already connected, skip listening.", new Object[0]);
+ throw new IllegalStateException("Already connected, UNKNOWN StreamDelegatorState!");
+ }
+ this.dataHandler = dataHandler;
+ try {
+ listen2Server(request, new StreamDelegatorReceiver<>(this));
+ } catch (RuntimeException e) {
+ this.requestHolder.set(null);
+ this.state.set(StreamDelegatorState.IDLE);
+ throw e;
+ }
+ }
+
+ public StreamDelegatorSender link(Consumer dataHandler) {
+ HgAssert.isArgumentNotNull(dataHandler, "data handler");
+ if (!this.state.compareAndSet(StreamDelegatorState.IDLE, StreamDelegatorState.LINKING)) {
+ info("It's not in the idle StreamDelegatorState, skip linking.", new Object[0]);
+ throw new IllegalStateException(
+ "It's not in the idle StreamDelegatorState and not via the 'linking' method.");
+ }
+ if (this.senderHolder.get() != null) {
+ info("Already connected, skip linking.", new Object[0]);
+ return this.senderHolder.get();
+ }
+ if (!this.senderHolder.compareAndSet(null, new StreamDelegatorSender<>(this))) {
+ info("Already connected, skip linking.", new Object[0]);
+ return this.senderHolder.get();
+ }
+ this.dataHandler = dataHandler;
+ try {
+ this.senderHolder.get().setReqStream(this, link2Server(new StreamDelegatorReceiver<>(this)));
+ } catch (Exception e) {
+ this.senderHolder.set(null);
+ this.state.set(StreamDelegatorState.IDLE);
+ throw e;
+ }
+ return this.senderHolder.get();
+ }
+
+ private StreamObserver link2Server(StreamObserver receiver) {
+ try {
+ return this.invoker.streamingCall(this.methodDesc, receiver);
+ } catch (Exception e) {
+ error("Failed to establish a link to the server, method type: {}, caused by: ", methodDesc, e);
+ throw new PDRuntimeException(ErrorType.ERROR_VALUE, e);
+ }
+ }
+
+ private void listen2Server(ReqT request, StreamObserver receiver) {
+ try {
+ this.invoker.serverStreamingCall(this.methodDesc, request, receiver);
+ } catch (Exception e) {
+ error("Failed to set up a listening connection to the server, method type: {}, caused by: ",
+ methodDesc, e);
+ throw new PDRuntimeException(ErrorType.ERROR_VALUE, e);
+ }
+ }
+
+ public void reconnect() {
+ reconnect(null);
+ }
+
+ public void reconnect(Throwable t) {
+ if (this.connecting.compareAndSet(false, true)) {
+ if (t != null) {
+ log.warn("Received an error and trying to reconnect: ", t);
+ }
+ try {
+ AtomicBoolean connected = new AtomicBoolean(false);
+ int count = 0;
+ while (!connected.get()) {
+ try {
+ count++;
+ StreamDelegatorSender sender = this.senderHolder.get();
+ ReqT request = this.requestHolder.get();
+ if (sender == null && request == null) {
+ info("The sender and request are both null, skip reconnecting.");
+ return;
+ }
+ if (sender != null) {
+ info("The [{}]th attempt to [linking]...", count);
+ sender.updateReqStream(link2Server(new StreamDelegatorReceiver<>(this)));
+ } else {
+ info("The [{}]th attempt to [listening]...", count);
+ listen2Server(request, new StreamDelegatorReceiver<>(this));
+ }
+ connected.set(true);
+ break;
+ } catch (Exception e) {
+ try {
+ error("Failed to reconnect, waiting [{}] seconds for the next attempt.", 3);
+ connected.set(false);
+ Thread.sleep(3000L);
+ } catch (InterruptedException ex) {
+ error("Failed to sleep thread and cancel the reconnecting process.", e);
+ }
+ }
+ }
+ if (connected.get()) {
+ info("Reconnect server successfully!");
+ } else {
+ error("Reconnect server failed!");
+ }
+ } catch (Exception e) {
+ warn("Failed to reconnect:", e);
+ } finally {
+ this.connecting.set(false);
+ }
+ }
+ }
+
+ protected void onNext(RespT res) {
+ this.dataHandler.accept(res);
+ }
+
+ protected void onError(Throwable t) {
+ if (this.autoReconnect.get()) {
+ this.invoker.reconnect();
+ } else {
+ log.warn(this.name + " received an error and trying to reconnect: ", t);
+ }
+ }
+
+ protected void onCompleted() {
+ this.completeHandler.accept(null);
+ }
+
+ protected void resetState() {
+ this.senderHolder.set(null);
+ this.requestHolder.set(null);
+ this.state.set(StreamDelegatorState.IDLE);
+ }
+
+ protected void info(String msg, Object... args) {
+ log.info("[" + this.name + "] " + msg, args);
+ }
+
+ protected void error(String msg, Object... args) {
+ log.error("[" + this.name + "] " + msg, args);
+ }
+
+ protected void warn(String msg, Object... args) {
+ log.warn("[" + this.name + "] " + msg, args);
+ }
+}
diff --git a/hg-pd-client/src/main/java/org/apache/hugegraph/pd/client/impl/StreamDelegatorReceiver.java b/hg-pd-client/src/main/java/org/apache/hugegraph/pd/client/impl/StreamDelegatorReceiver.java
new file mode 100644
index 0000000000..371cdcd9fb
--- /dev/null
+++ b/hg-pd-client/src/main/java/org/apache/hugegraph/pd/client/impl/StreamDelegatorReceiver.java
@@ -0,0 +1,31 @@
+package org.apache.hugegraph.pd.client.impl;
+
+import java.util.concurrent.ExecutorService;
+
+import org.apache.hugegraph.pd.client.support.PDExecutors;
+
+import io.grpc.stub.StreamObserver;
+import lombok.extern.slf4j.Slf4j;
+
+@Slf4j
+public class StreamDelegatorReceiver implements StreamObserver {
+
+ private final StreamDelegator delegator;
+ private static ExecutorService connectExecutor =
+ PDExecutors.newDiscardPool("on-error", 8, 8, Integer.MAX_VALUE);
+ public StreamDelegatorReceiver(StreamDelegator delegator) {
+ this.delegator = delegator;
+ }
+
+ public void onNext(RespT res) {
+ this.delegator.onNext(res);
+ }
+
+ public void onError(Throwable t) {
+ connectExecutor.submit(() -> this.delegator.onError(t));
+ }
+
+ public void onCompleted() {
+ this.delegator.onCompleted();
+ }
+}
diff --git a/hg-pd-client/src/main/java/org/apache/hugegraph/pd/client/impl/StreamDelegatorSender.java b/hg-pd-client/src/main/java/org/apache/hugegraph/pd/client/impl/StreamDelegatorSender.java
new file mode 100644
index 0000000000..0502a4998f
--- /dev/null
+++ b/hg-pd-client/src/main/java/org/apache/hugegraph/pd/client/impl/StreamDelegatorSender.java
@@ -0,0 +1,99 @@
+package org.apache.hugegraph.pd.client.impl;
+
+import java.io.Closeable;
+import java.util.concurrent.atomic.AtomicBoolean;
+import java.util.concurrent.atomic.AtomicReference;
+import java.util.function.Consumer;
+
+import org.apache.hugegraph.pd.common.HgAssert;
+import org.apache.hugegraph.pd.common.PDRuntimeException;
+
+import io.grpc.stub.StreamObserver;
+import lombok.extern.slf4j.Slf4j;
+
+@Slf4j
+public class StreamDelegatorSender implements Closeable {
+
+ private final AtomicBoolean isClosed = new AtomicBoolean(true);
+ private AtomicReference> observer = new AtomicReference<>();
+ private Consumer reconnectedConsumer;
+ private StreamDelegator delegator;
+
+ public StreamDelegatorSender(StreamDelegator delegator) {
+ this.delegator = delegator;
+ }
+
+ protected StreamDelegatorSender setReqStream(StreamDelegator delegator,
+ StreamObserver reqStream) {
+ this.delegator = delegator;
+ this.observer.set(reqStream);
+ this.isClosed.set(false);
+ return this;
+ }
+
+ protected StreamDelegatorSender updateReqStream(StreamObserver reqStream) {
+ complete();
+ this.observer.set(reqStream);
+ this.isClosed.set(false);
+ reconnect(null);
+ return this;
+ }
+
+ private void reconnect(Void e) {
+ if (this.reconnectedConsumer != null) {
+ try {
+ this.reconnectedConsumer.accept(e);
+ } catch (Exception ex) {
+ log.error("Failed to invoke [ reconnectedConsumer ], caused by: ", ex);
+ }
+ } else {
+ log.info("Received a reconnection complete event.");
+ }
+ }
+
+ public void onReconnected(Consumer reconnectedConsumer) {
+ HgAssert.isArgumentNotNull(reconnectedConsumer, "connectedConsumer");
+ this.reconnectedConsumer = reconnectedConsumer;
+ }
+
+ public void send(ReqT t) {
+ HgAssert.isArgumentNotNull(t, "request");
+ try {
+ this.observer.get().onNext(t);
+ } catch (Throwable e) {
+ log.error("Failed to send to server, caused by: ", e);
+ this.delegator.reconnect();
+ throw new PDRuntimeException(-1, e);
+ }
+ }
+
+ public void error(String error) {
+ if (!this.isClosed.compareAndSet(false, true)) {
+ log.warn("Aborted sending the error due the closure of the connection.");
+ return;
+ }
+ this.delegator.resetState();
+ Throwable t = new Throwable(error);
+ log.error("Sender failed to invoke [onError], caused by: ", t);
+ this.observer.get().onError(t);
+ }
+
+ public void close() {
+ this.delegator.resetState();
+ if (!this.isClosed.compareAndSet(false, true)) {
+ return;
+ }
+ complete();
+ }
+
+ protected void complete() {
+ try {
+ StreamObserver observer = this.observer.get();
+ if (observer != null) {
+ observer.onCompleted();
+ }
+ } catch (Throwable e) {
+ log.error("Sender failed to invoke [onCompleted], caused by: ", e);
+ }
+ }
+}
diff --git a/hg-pd-client/src/main/java/org/apache/hugegraph/pd/client/impl/StreamDelegatorState.java b/hg-pd-client/src/main/java/org/apache/hugegraph/pd/client/impl/StreamDelegatorState.java
new file mode 100644
index 0000000000..62bffca6d6
--- /dev/null
+++ b/hg-pd-client/src/main/java/org/apache/hugegraph/pd/client/impl/StreamDelegatorState.java
@@ -0,0 +1,9 @@
+package org.apache.hugegraph.pd.client.impl;
+
+/**
+ * @author zhangyingjie
+ * @date 2024/1/31
+ **/
+public enum StreamDelegatorState {
+ IDLE, LINKING, LISTENING, PUSHING;
+}
diff --git a/hg-pd-client/src/main/java/org/apache/hugegraph/pd/client/interceptor/Authentication.java b/hg-pd-client/src/main/java/org/apache/hugegraph/pd/client/interceptor/Authentication.java
new file mode 100644
index 0000000000..3d84674118
--- /dev/null
+++ b/hg-pd-client/src/main/java/org/apache/hugegraph/pd/client/interceptor/Authentication.java
@@ -0,0 +1,72 @@
+package org.apache.hugegraph.pd.client.interceptor;
+
+import org.apache.commons.lang3.StringUtils;
+
+import org.apache.hugegraph.pd.client.rpc.ConnectionManagers;
+import org.apache.hugegraph.pd.common.Cache;
+import org.apache.hugegraph.pd.common.Consts;
+
+import io.grpc.CallOptions;
+import io.grpc.Channel;
+import io.grpc.ClientCall;
+import io.grpc.ClientInterceptor;
+import io.grpc.ForwardingClientCall.SimpleForwardingClientCall;
+import io.grpc.ForwardingClientCallListener.SimpleForwardingClientCallListener;
+import io.grpc.Metadata;
+import io.grpc.MethodDescriptor;
+import lombok.extern.slf4j.Slf4j;
+
+/**
+ * @author zhangyingjie
+ * @date 2023/4/25
+ **/
+@Slf4j
+public class Authentication implements ClientInterceptor {
+
+ private static Cache cache = new Cache();
+ private static long ttl = 3600L;
+ private String authority;
+ private String name;
+
+ public Authentication(String userName, String authority) {
+ assert !StringUtils.isEmpty(userName);
+ this.name = userName;
+ this.authority = authority;
+ }
+
+ public ClientCall interceptCall(MethodDescriptor method,
+ CallOptions callOptions, Channel next) {
+ return new SimpleForwardingClientCall<>(next.newCall(method, callOptions)) {
+ public void sendMessage(ReqT message) {
+ super.sendMessage(message);
+ }
+
+ public void start(Listener listener, Metadata headers) {
+ if (StringUtils.isEmpty(authority) ||
+ StringUtils.isEmpty(name)) {
+ throw new RuntimeException("invalid user name or password,access denied");
+ }
+ headers.put(Consts.CREDENTIAL_KEY, authority);
+ String token = cache.get(name);
+ if (token != null) {
+ headers.put(Consts.TOKEN_KEY, cache.get(name));
+ }
+ SimpleForwardingClientCallListener callListener =
+ new SimpleForwardingClientCallListener(listener) {
+ public void onHeaders(Metadata headers) {
+ super.onHeaders(headers);
+ String t = headers.get(Consts.TOKEN_KEY);
+ if (!StringUtils.isEmpty(t)) {
+ cache.put(name, t, ttl);
+ }
+ String leader = headers.get(Consts.LEADER_KEY);
+ if (!StringUtils.isEmpty(leader)) {
+ ConnectionManagers.getInstance().reset(leader);
+ }
+ }
+ };
+ super.start(callListener, headers);
+ }
+ };
+ }
+}
diff --git a/hg-pd-client/src/main/java/org/apache/hugegraph/pd/client/interceptor/AuthenticationException.java b/hg-pd-client/src/main/java/org/apache/hugegraph/pd/client/interceptor/AuthenticationException.java
new file mode 100644
index 0000000000..5eb26a78d5
--- /dev/null
+++ b/hg-pd-client/src/main/java/org/apache/hugegraph/pd/client/interceptor/AuthenticationException.java
@@ -0,0 +1,16 @@
+package org.apache.hugegraph.pd.client.interceptor;
+
+/**
+ * @author zhangyingjie
+ * @date 2023/8/7
+ **/
+public class AuthenticationException extends RuntimeException{
+
+ public AuthenticationException(String msg) {
+ super(msg);
+ }
+
+ public AuthenticationException(String msg, Throwable cause) {
+ super(msg, cause);
+ }
+}
diff --git a/hg-pd-client/src/main/java/org/apache/hugegraph/pd/client/listener/LeaderChangeListener.java b/hg-pd-client/src/main/java/org/apache/hugegraph/pd/client/listener/LeaderChangeListener.java
new file mode 100644
index 0000000000..527916ec1a
--- /dev/null
+++ b/hg-pd-client/src/main/java/org/apache/hugegraph/pd/client/listener/LeaderChangeListener.java
@@ -0,0 +1,16 @@
+package org.apache.hugegraph.pd.client.listener;
+
+import org.apache.hugegraph.pd.client.rpc.ConnectionManagers;
+
+/**
+ * @author zhangyingjie
+ * @date 2024/1/31
+ **/
+public interface LeaderChangeListener {
+ void onLeaderChanged(String leaderAddress);
+
+ default void onPeerChanged(String[] peers) {
+ ConnectionManagers managers = ConnectionManagers.getInstance();
+ managers.resetPeers(peers);
+ }
+}
diff --git a/hg-pd-client/src/main/java/org/apache/hugegraph/pd/client/listener/PDEventListener.java b/hg-pd-client/src/main/java/org/apache/hugegraph/pd/client/listener/PDEventListener.java
new file mode 100644
index 0000000000..e83f56678c
--- /dev/null
+++ b/hg-pd-client/src/main/java/org/apache/hugegraph/pd/client/listener/PDEventListener.java
@@ -0,0 +1,20 @@
+package org.apache.hugegraph.pd.client.listener;
+
+import org.apache.hugegraph.pd.grpc.watch.WatchResponse;
+import org.apache.hugegraph.pd.watch.NodeEvent;
+import org.apache.hugegraph.pd.watch.PartitionEvent;
+
+/**
+ * @author zhangyingjie
+ * @date 2023/9/14
+ **/
+public interface PDEventListener {
+ void onStoreChanged(NodeEvent event);
+
+ void onPartitionChanged(PartitionEvent event);
+
+ void onGraphChanged(WatchResponse event);
+
+ default void onShardGroupChanged(WatchResponse event) {
+ }
+}
diff --git a/hg-pd-client/src/main/java/org/apache/hugegraph/pd/client/rpc/AnyInvoker.java b/hg-pd-client/src/main/java/org/apache/hugegraph/pd/client/rpc/AnyInvoker.java
new file mode 100644
index 0000000000..4abb313746
--- /dev/null
+++ b/hg-pd-client/src/main/java/org/apache/hugegraph/pd/client/rpc/AnyInvoker.java
@@ -0,0 +1,88 @@
+package org.apache.hugegraph.pd.client.rpc;
+
+import java.util.Objects;
+import java.util.function.Function;
+import java.util.function.Predicate;
+
+import org.apache.hugegraph.pd.client.interceptor.Authentication;
+import org.apache.hugegraph.pd.common.HgAssert;
+import org.apache.hugegraph.pd.common.PDException;
+
+import io.grpc.Channel;
+import io.grpc.ClientCall;
+import io.grpc.MethodDescriptor;
+import io.grpc.stub.AbstractBlockingStub;
+import io.grpc.stub.AbstractStub;
+import io.grpc.stub.ClientCalls;
+import io.grpc.stub.StreamObserver;
+import lombok.extern.slf4j.Slf4j;
+
+/**
+ * @author lynn.bond@hotmail.com on 2023/12/21
+ */
+@Slf4j
+public class AnyInvoker extends Invoker {
+
+ public AnyInvoker(ConnectionManager cm, Function asyncStubCreator,
+ Function blockingStubCreator) {
+ super(cm, asyncStubCreator, blockingStubCreator);
+ }
+
+ private ClientCall newBlockingClientCall(MethodDescriptor method,
+ Channel channel) {
+ Authentication auth = new Authentication(config.getUserName(), config.getAuthority());
+ return auth.interceptCall(method, getBlockingCallOptions(), channel);
+ }
+
+ public RespT blockingCall(MethodDescriptor method, ReqT req) throws
+ PDException {
+ return blockingCall(method, req, resp -> true);
+ }
+
+ public RespT blockingCall(MethodDescriptor method, ReqT req,
+ long timeout) throws PDException {
+ HgAssert.isArgumentNotNull(method, "method");
+ HgAssert.isArgumentNotNull(req, "request");
+ HgAssert.isTrue((timeout >= 0L), "timeout must be positive");
+ return parallelCall(
+ c -> ClientCalls.blockingUnaryCall(c, method, getBlockingCallOptions(timeout), req),
+ resp -> true);
+ }
+
+ public RespT blockingCall(MethodDescriptor method, ReqT req,
+ Predicate predicate) throws PDException {
+ HgAssert.isArgumentNotNull(predicate, "Predicate");
+ return parallelCall(c -> ClientCalls.blockingUnaryCall(c, method, getBlockingCallOptions(), req),
+ predicate);
+ }
+
+ public void serverStreamingCall(MethodDescriptor methodDescriptor, ReqT request,
+ StreamObserver responseObserver) throws PDException {
+ throw new UnsupportedOperationException("Not support server streaming call");
+ }
+
+ public StreamObserver streamingCall(MethodDescriptor method,
+ StreamObserver responseObserver) throws
+ PDException {
+ throw new UnsupportedOperationException("Not support streaming call");
+ }
+
+ private T parallelCall(Function mapper, Predicate predicate) throws PDException {
+ return this.cm.getParallelChannelStream()
+ .map(errorShutdown(mapper))
+ .filter(Objects::nonNull)
+ .filter(predicate)
+ .findAny()
+ .orElse(null);
+ }
+
+ private Function errorShutdown(Function mapper) throws PDException {
+ return channel -> {
+ try {
+ return mapper.apply(channel);
+ } catch (Exception exception) {
+ return null;
+ }
+ };
+ }
+}
diff --git a/hg-pd-client/src/main/java/org/apache/hugegraph/pd/client/rpc/Channels.java b/hg-pd-client/src/main/java/org/apache/hugegraph/pd/client/rpc/Channels.java
new file mode 100644
index 0000000000..a5369be3e3
--- /dev/null
+++ b/hg-pd-client/src/main/java/org/apache/hugegraph/pd/client/rpc/Channels.java
@@ -0,0 +1,193 @@
+package org.apache.hugegraph.pd.client.rpc;
+
+import java.util.ArrayList;
+import java.util.List;
+import java.util.concurrent.ConcurrentHashMap;
+import java.util.concurrent.TimeUnit;
+import java.util.stream.Collectors;
+
+import org.apache.hugegraph.pd.client.PDConfig;
+
+import io.grpc.Channel;
+import io.grpc.ConnectivityState;
+import io.grpc.ManagedChannel;
+import io.grpc.ManagedChannelBuilder;
+import io.grpc.Status;
+import io.grpc.StatusRuntimeException;
+import lombok.extern.slf4j.Slf4j;
+
+@Slf4j
+public final class Channels {
+ private static ConcurrentHashMap channels = new ConcurrentHashMap<>();
+
+ /**
+ * Retrieve a channel with a specific target. If the channel is null or shutdown, create a new one;
+ *
+ * @param target
+ * @return
+ */
+ public static ManagedChannel getChannel(String target) {
+ ManagedChannel channel = channels.get(target);
+ if (!isValidChannel(channel)) {
+ synchronized (channels) {
+ channel = channels.get(target);
+ if (!isValidChannel(channel)) {
+ if (channel != null) {
+ log.info("get channel {}, state:{}", channel, channel.getState(false));
+ }
+ channel = resetChannel(target, channel);
+ }
+ }
+ }
+ return channel;
+ }
+
+ private static ManagedChannel resetChannel(String target, ManagedChannel channel) {
+ closeChannel(channel);
+ channel = ManagedChannelBuilder.forTarget(target)
+ .maxInboundMessageSize(PDConfig.getInboundMessageSize())
+ .usePlaintext().build();
+ channels.put(target, channel);
+ log.info("Because the channel is not available, create a new one for {}", target);
+ return channel;
+ }
+
+ /**
+ * Validate the channel weather it is valid.
+ *
+ * @param channel
+ * @return true if the channel is valid, otherwise false.
+ */
+ public static boolean isValidChannel(ManagedChannel channel) {
+ if (channel == null || channel.isShutdown() || channel.isTerminated()) {
+ return false;
+ }
+ ConnectivityState state = channel.getState(false);
+ if (state == ConnectivityState.READY || state == ConnectivityState.IDLE) {
+ /* Optimistic judgment for increasing the efficiency. */
+ return true;
+ }
+ /* Trying to make a connection. */
+ state = channel.getState(true);
+ if (state == ConnectivityState.IDLE || state == ConnectivityState.READY) {
+ return true;
+ } else {
+ // log.info("Channel {} is invalid, state: {}", channel, state);
+ return false;
+ }
+ }
+
+ /**
+ * Return true if the channel io is broken and need to shut down now.
+ *
+ * @param throwable
+ * @return
+ */
+ public static boolean isIoBrokenError(Throwable throwable) {
+ if (throwable instanceof StatusRuntimeException) {
+ StatusRuntimeException e = (StatusRuntimeException) throwable;
+ return e.getStatus().getCode() == Status.Code.UNAVAILABLE;
+ }
+
+ return false;
+ }
+
+ public static boolean canNotWork(Throwable throwable) {
+ if (throwable instanceof StatusRuntimeException) {
+ StatusRuntimeException e = (StatusRuntimeException) throwable;
+ Status.Code code = e.getStatus().getCode();
+ return code == Status.Code.UNAVAILABLE || code == Status.Code.DEADLINE_EXCEEDED;
+ }
+ return false;
+ }
+ /**
+ * Retrieves all channels
+ *
+ * @return non-null collection
+ */
+ public static List getAllChannels() {
+ return channels.values().stream().collect(Collectors.toList());
+ }
+
+ /**
+ * Closing all channels
+ *
+ * @return
+ */
+ public static boolean closeAllChannels() {
+ /* Clone the list to avoid closing the new channels. */
+ List buff = new ArrayList<>(channels.values());
+ return buff.stream().parallel().allMatch(Channels::closeChannel);
+ }
+
+ /**
+ * Closing a channel.
+ *
+ * @param channel
+ * @return
+ */
+ public static boolean closeChannel(ManagedChannel channel) {
+ if (channel == null || channel.isShutdown() || channel.isTerminated()) {
+ return true;
+ }
+ log.info("Closing the channel: {}", channel);
+ try {
+ channel.shutdown().awaitTermination(1000, TimeUnit.MILLISECONDS);
+ } catch (InterruptedException e) {
+ log.warn("Failed to close channel, caused by InterruptedException:", e);
+ //Thread.currentThread().interrupt();
+ return false;
+ }
+ while (!channel.isShutdown()) {
+ try {
+ log.info("Waiting for channel to be shutdown: {}", channel);
+ TimeUnit.MILLISECONDS.sleep(1000);
+ } catch (InterruptedException e) {
+ log.warn("Failed to close channel, caused by InterruptedException:", e);
+ //Thread.currentThread().interrupt();
+ return false;
+ }
+ }
+ return true;
+ }
+
+ /**
+ * Invoking shutdownNow on a channel and waiting until the timeout occurs.
+ * If the channel is not a ManagedChannel, return directly with no action.
+ *
+ * @param channel
+ * @param timeout timeout in milliseconds
+ */
+ public static void shutdownNow(Channel channel, long timeout) {
+ if (channel == null) {
+ return;
+ }
+
+ if (!(channel instanceof ManagedChannel)) {
+ log.info("Channel is not a ManagedChannel, return.");
+ return;
+ }
+
+ ManagedChannel managedChannel = (ManagedChannel) channel;
+ if (managedChannel.isShutdown() || managedChannel.isTerminated()) {
+ return;
+ }
+
+ log.info("Shutting down the channel: {}", channel);
+
+ try {
+ managedChannel.shutdownNow().awaitTermination(timeout, TimeUnit.MILLISECONDS);
+ } catch (InterruptedException e) {
+ log.warn("Failed to shutdown channel, caused by InterruptedException:", e);
+ //Thread.currentThread().interrupt();
+ return;
+ }
+
+ }
+
+ public static boolean isShutdown(ManagedChannel channel) {
+ if (channel == null || channel.isShutdown() || channel.isTerminated())
+ return true;
+ return false;
+ }
+}
\ No newline at end of file
diff --git a/hg-pd-client/src/main/java/org/apache/hugegraph/pd/client/rpc/ConnectionClient.java b/hg-pd-client/src/main/java/org/apache/hugegraph/pd/client/rpc/ConnectionClient.java
new file mode 100644
index 0000000000..17ead2e61b
--- /dev/null
+++ b/hg-pd-client/src/main/java/org/apache/hugegraph/pd/client/rpc/ConnectionClient.java
@@ -0,0 +1,44 @@
+package org.apache.hugegraph.pd.client.rpc;
+
+import org.apache.hugegraph.pd.client.BaseClient;
+import org.apache.hugegraph.pd.client.PDConfig;
+import org.apache.hugegraph.pd.common.PDException;
+import org.apache.hugegraph.pd.grpc.PDGrpc;
+import org.apache.hugegraph.pd.grpc.Pdpb;
+import org.apache.hugegraph.pd.grpc.common.NoArg;
+
+/**
+ * @author zhangyingjie
+ * @date 2024/1/31
+ **/
+public class ConnectionClient extends BaseClient {
+ public ConnectionClient(PDConfig pdConfig) {
+ super(pdConfig, PDGrpc::newStub, PDGrpc::newBlockingStub);
+ }
+
+ public Pdpb.CacheResponse getClientCache() throws PDException {
+ Pdpb.GetGraphRequest request = Pdpb.GetGraphRequest.newBuilder().setHeader(this.header).build();
+ Pdpb.CacheResponse cache = blockingUnaryCall(PDGrpc.getGetCacheMethod(), request);
+ handleErrors(cache.getHeader());
+ return cache;
+ }
+
+ public Pdpb.CachePartitionResponse getPartitionCache(String graph) throws PDException {
+ Pdpb.GetGraphRequest request =
+ Pdpb.GetGraphRequest.newBuilder().setHeader(this.header).setGraphName(graph).build();
+ Pdpb.CachePartitionResponse ps = blockingUnaryCall(PDGrpc.getGetPartitionsMethod(), request);
+ handleErrors(ps.getHeader());
+ return ps;
+ }
+
+ public Pdpb.GetAllGrpcAddressesResponse getPdAddressesCache() throws PDException {
+ NoArg request = NoArg.newBuilder().build();
+ Pdpb.GetAllGrpcAddressesResponse response =
+ blockingUnaryCall(PDGrpc.getGetAllGrpcAddressesMethod(), request);
+ handleErrors(response.getHeader());
+ return response;
+ }
+
+ public void onLeaderChanged(String leader) {
+ }
+}
diff --git a/hg-pd-client/src/main/java/org/apache/hugegraph/pd/client/rpc/ConnectionManager.java b/hg-pd-client/src/main/java/org/apache/hugegraph/pd/client/rpc/ConnectionManager.java
new file mode 100644
index 0000000000..0d8fbeac71
--- /dev/null
+++ b/hg-pd-client/src/main/java/org/apache/hugegraph/pd/client/rpc/ConnectionManager.java
@@ -0,0 +1,351 @@
+package org.apache.hugegraph.pd.client.rpc;
+
+import java.util.Arrays;
+import java.util.List;
+import java.util.Random;
+import java.util.Set;
+import java.util.concurrent.ConcurrentHashMap;
+import java.util.concurrent.ExecutorService;
+import java.util.concurrent.Future;
+import java.util.concurrent.TimeUnit;
+import java.util.concurrent.atomic.AtomicReference;
+import java.util.concurrent.locks.ReentrantLock;
+import java.util.concurrent.locks.ReentrantReadWriteLock;
+import java.util.function.Function;
+import java.util.stream.Stream;
+
+import javax.annotation.concurrent.ThreadSafe;
+
+import org.apache.commons.lang3.StringUtils;
+
+import org.apache.hugegraph.pd.client.BaseClient;
+import org.apache.hugegraph.pd.client.ClientCache;
+import org.apache.hugegraph.pd.client.PDConfig;
+import org.apache.hugegraph.pd.client.PulseClient;
+import org.apache.hugegraph.pd.client.interceptor.Authentication;
+import org.apache.hugegraph.pd.client.support.PDExecutors;
+import org.apache.hugegraph.pd.common.HgAssert;
+import org.apache.hugegraph.pd.common.PDRuntimeException;
+import org.apache.hugegraph.pd.grpc.PDGrpc;
+import org.apache.hugegraph.pd.grpc.Pdpb;
+import org.apache.hugegraph.pd.grpc.common.ErrorType;
+import org.apache.hugegraph.pd.grpc.common.NoArg;
+import org.apache.hugegraph.pd.grpc.pulse.PdInstructionResponse;
+import org.apache.hugegraph.pd.grpc.pulse.PdInstructionType;
+import org.apache.hugegraph.pd.grpc.pulse.PulseResponse;
+import org.apache.hugegraph.pd.watch.NodeEvent;
+import org.apache.hugegraph.pd.watch.Watcher;
+import org.apache.hugegraph.pd.watch.WatcherImpl;
+
+import io.grpc.Channel;
+import io.grpc.ManagedChannel;
+import io.grpc.StatusRuntimeException;
+import io.grpc.stub.AbstractBlockingStub;
+import lombok.Getter;
+import lombok.extern.slf4j.Slf4j;
+
+/**
+ * @author lynn.bond@hotmail.com on 2023/11/20
+ * @version 1.0
+ */
+@Slf4j
+@ThreadSafe
+public final class ConnectionManager {
+
+ static final long WAITING_CREATE_SECONDS = 60L;
+ static final long WAITING_LEADER_SECONDS = 30L;
+ static final long TIMEOUT_SECONDS = 30L;
+ private static String emptyMsg = "Failed to get leader after " + WAITING_LEADER_SECONDS + " attempts";
+ private static NoArg noArg = NoArg.newBuilder().build();
+ @Getter
+ private final PDConfig config;
+ private final Set clients = ConcurrentHashMap.newKeySet();
+ private final ExecutorService connectExecutor =
+ PDExecutors.newDiscardPool("reconnect", 8, 8, Integer.MAX_VALUE);
+ private final ReentrantReadWriteLock reconnectLock = new ReentrantReadWriteLock(true);
+ private final ReentrantLock resetLock = new ReentrantLock(true);
+ private final AtomicReference leader = new AtomicReference<>();
+ private final Authentication auth;
+ ReentrantReadWriteLock.ReadLock readLock = this.reconnectLock.readLock();
+ ReentrantReadWriteLock.WriteLock writeLock = this.reconnectLock.writeLock();
+ private volatile ManagedChannel leaderChannel = null;
+ private Random random = new Random();
+ private InvokeProxy proxy;
+ @Getter
+ private ClientCache cache;
+ @Getter
+ private PulseClient pulseClient;
+ @Getter
+ private Watcher watcher;
+ @Getter
+ private ConnectionClient connectionClient;
+
+ ConnectionManager(PDConfig config) {
+ this.config = config;
+ String[] addresses = this.config.getServerHost().split(",");
+ this.proxy = new InvokeProxy(addresses);
+ this.auth = new Authentication(config.getUserName(), config.getAuthority());
+ }
+
+ public void init(PulseClient pulseClient, ConnectionClient connectionClient) {
+ this.pulseClient = pulseClient;
+ this.watcher = new WatcherImpl(pulseClient);
+ this.watcher.watchNode(this::onLeaderChanged);
+ this.cache = new ClientCache(connectionClient, this.watcher);
+ this.connectionClient = connectionClient;
+ this.watcher.watchPdPeers(this::onPdPeersChanged);
+ this.setProxyByPd();
+ }
+
+ public void setProxyByPd() {
+ try {
+ if (config.isAutoGetPdServers()) {
+ Pdpb.GetAllGrpcAddressesResponse response = connectionClient.getPdAddressesCache();
+ if (response.getHeader().getError().getType() == ErrorType.OK) {
+ if (response.getAllowed()) {
+ this.proxy = new InvokeProxy(response.getAddressesList().toArray(new String[0]));
+ log.info("Get pd servers from cache: {}", response.getAddressesList());
+ }
+ } else {
+ log.warn("Failed to get pd servers from cache, {}", response);
+ }
+ }
+ } catch (Exception e) {
+ log.warn("Failed to get pd servers from cache, ", e);
+ }
+ }
+
+ private void onPdPeersChanged(PulseResponse pulseResponse) {
+ PdInstructionResponse ir = pulseResponse.getInstructionResponse();
+ if (ir != null && ir.getInstructionType() == PdInstructionType.CHANGE_PEERS) {
+ updatePeers(ir.getPeersList().toArray(new String[0]));
+ }
+ }
+
+ public String getLeaderFromPD() {
+ for (int i = 0; i < WAITING_LEADER_SECONDS; i++) {
+ String next = "";
+ List hosts = this.proxy.getHosts();
+ int hostCount = hosts.size();
+ int startIndex = this.random.nextInt(hostCount);
+ int endIndex = startIndex + hostCount;
+ ManagedChannel channel = null;
+ long start = System.currentTimeMillis();
+ PDGrpc.PDBlockingStub stub = null;
+ for (int j = startIndex; j < endIndex; j++) {
+ try {
+ if (j >= hostCount) {
+ next = hosts.get(j - hostCount);
+ } else {
+ next = hosts.get(j);
+ }
+ start = System.currentTimeMillis();
+ channel = Channels.getChannel(next);
+ stub = PDGrpc.newBlockingStub(channel)
+ .withDeadlineAfter(TIMEOUT_SECONDS, TimeUnit.SECONDS)
+ .withInterceptors(this.auth);
+ Pdpb.GetLeaderGrpcAddressResponse response = stub.getLeaderGrpcAddress(noArg);
+ pulseClient.handleErrors(response.getHeader());
+ String leader = response.getAddress();
+ if (!StringUtils.isEmpty(leader)) {
+ log.info("Get leader address: {} from {}", leader, next);
+ return leader;
+ }
+ } catch (StatusRuntimeException se) {
+ if (i > 5) {
+ log.warn("Channel {} may be unavailable, state:{}, last:{} ms, option:{}, " +
+ "exception: ",
+ stub.getChannel(),
+ channel.getState(false), System.currentTimeMillis() - start,
+ stub.getCallOptions(), se.getStatus());
+ }
+ } catch (Exception e) {
+ log.warn(String.format("Failed to get leader by address: %s, ", next), e);
+ }
+ }
+ try {
+ Thread.sleep(1000L);
+ } catch (Exception exception) {
+ }
+ }
+ return "";
+ }
+
+ public String getLeader() {
+ try {
+ return this.leader.get();
+ } catch (Exception e) {
+ log.error("Failed to get leader address, caused by:", e);
+ return "";
+ }
+ }
+
+ public long getDefaultDeadline() {
+ return this.config.getGrpcTimeOut();
+ }
+
+ public void addClient(BaseClient client) {
+ this.clients.add(client);
+ }
+
+ public void removeClient(BaseClient client) {
+ this.clients.remove(client);
+ }
+
+ public void reconnect() {
+ reconnect("", false);
+ }
+
+ public void reconnect(boolean recheck) {
+ reconnect("", recheck);
+ }
+
+ public void reconnect(String leaderAddress, boolean recheck) {
+ long start = System.currentTimeMillis();
+ boolean locked = this.writeLock.tryLock();
+ if (locked) {
+ try {
+ if (recheck && !Channels.isShutdown(this.leaderChannel)) {
+ return;
+ }
+ if (StringUtils.isEmpty(leaderAddress)) {
+ leaderAddress = getLeaderFromPD();
+ if (StringUtils.isEmpty(leaderAddress)) {
+ throw new PDRuntimeException(ErrorType.PD_RAFT_NOT_READY_VALUE, emptyMsg);
+ } else {
+ log.info("Get leader address: {}", leaderAddress);
+ }
+ }
+ update(leaderAddress, start, false);
+ } catch (Exception e) {
+ throw e;
+ } finally {
+ this.writeLock.unlock();
+ }
+ } else {
+ boolean readLocked = false;
+ try {
+ readLocked = this.readLock.tryLock(WAITING_CREATE_SECONDS, TimeUnit.SECONDS);
+ } catch (Exception e) {
+
+ } finally {
+ if (readLocked) {
+ this.readLock.unlock();
+ }
+ }
+ }
+ }
+
+ private void update(String leaderAddress, long start, boolean blocking) {
+ String currentAddress = this.leader.get();
+ if (!leaderAddress.equals(currentAddress) || !Channels.isValidChannel(leaderChannel)) {
+ this.leader.set(leaderAddress);
+ this.leaderChannel = Channels.getChannel(leaderAddress);
+ String finalLeaderAddress = leaderAddress;
+ Future> future = this.connectExecutor.submit(() -> resetClients(finalLeaderAddress));
+ if (blocking) {
+ try {
+ future.get();
+ } catch (Exception e) {
+ log.warn("Failed to reset clients, caused by:", e);
+ }
+ }
+ long end = System.currentTimeMillis();
+ log.info("Reset leader from {} to {} in {} ms", currentAddress, leaderAddress, end - start);
+ }
+ }
+
+ public void updatePeers(String[] endpoints) {
+ boolean locked = resetLock.tryLock();
+ if (locked) {
+ try {
+ log.warn("Update PD peers to {}", Arrays.toString(endpoints));
+ this.proxy = new InvokeProxy(endpoints);
+ reconnect();
+ log.warn("PD peers updated.");
+ } finally {
+ resetLock.unlock();
+ }
+ }
+ }
+
+ public void close() {
+ PDExecutors.asyncCallback(() -> Boolean.valueOf(close(10L)), b -> {
+ if (b.booleanValue()) {
+ log.info("Closed all channels held by this PDConnectionManager.");
+ } else {
+ log.warn("Failed to close all channels held by this PDConnectionManager.");
+ }
+ });
+ }
+
+ public boolean close(long timeout) {
+ return PDExecutors.awaitTask(this::closeAllChannels, "Close all channels",
+ timeout).booleanValue();
+ }
+
+ private boolean closeAllChannels() {
+ return this.proxy.getHosts().parallelStream().map(Channels::getChannel)
+ .allMatch(Channels::closeChannel);
+ }
+
+ public Stream getParallelChannelStream() {
+ return this.proxy.getHosts().parallelStream().map(Channels::getChannel)
+ .filter(Channels::isValidChannel);
+ }
+
+ private boolean resetClients(String leaderAddress) {
+ for (BaseClient client : this.clients) {
+ try {
+ client.onLeaderChanged(leaderAddress);
+ } catch (Exception e) {
+ log.warn(String.format("Failed to let client %s reconnect, caused by:", client.getClass()),
+ e);
+ }
+ }
+ return true;
+ }
+
+ public Channel getValidChannel() {
+ return this.proxy.getHosts().stream().map(Channels::getChannel).filter(Channels::isValidChannel)
+ .findFirst().orElse(null);
+ }
+
+ public > T createAsyncStub(Function stubCreator) {
+ HgAssert.isArgumentNotNull(stubCreator, "The stub creator can't be null");
+ return withAsyncParams(stubCreator.apply(getValidChannel()));
+ }
+
+ public > T createBlockingStub(Function stubCreator) {
+ HgAssert.isArgumentNotNull(stubCreator, "The stub creator can't be null");
+ return createBlockingStub(stubCreator, getValidChannel());
+ }
+
+ private > T createBlockingStub(Function creator,
+ Channel channel) {
+ return withBlockingParams(creator.apply(channel));
+ }
+
+ public > T withAsyncParams(T stub) {
+ HgAssert.isArgumentNotNull(stub, "The stub can't be null");
+ return stub.withMaxInboundMessageSize(PDConfig.getInboundMessageSize()).withInterceptors(auth);
+ }
+
+ public > T withBlockingParams(T stub) {
+ HgAssert.isArgumentNotNull(stub, "The stub can't be null");
+ return stub.withMaxInboundMessageSize(PDConfig.getInboundMessageSize()).withInterceptors(auth);
+ }
+
+ public Channel getLeaderChannel() {
+ if (this.leaderChannel == null || Channels.isShutdown(this.leaderChannel)) {
+ reconnect(true);
+ }
+ return this.leaderChannel;
+ }
+
+ private void onLeaderChanged(NodeEvent response) {
+ if (response.getEventType() == NodeEvent.EventType.NODE_PD_LEADER_CHANGE) {
+ reconnect();
+ }
+ }
+}
\ No newline at end of file
diff --git a/hg-pd-client/src/main/java/org/apache/hugegraph/pd/client/rpc/ConnectionManagers.java b/hg-pd-client/src/main/java/org/apache/hugegraph/pd/client/rpc/ConnectionManagers.java
new file mode 100644
index 0000000000..0f1c202b6b
--- /dev/null
+++ b/hg-pd-client/src/main/java/org/apache/hugegraph/pd/client/rpc/ConnectionManagers.java
@@ -0,0 +1,81 @@
+package org.apache.hugegraph.pd.client.rpc;
+
+import java.util.concurrent.ConcurrentHashMap;
+import java.util.concurrent.ConcurrentMap;
+
+import org.apache.commons.lang3.StringUtils;
+
+import org.apache.hugegraph.pd.client.PDConfig;
+import org.apache.hugegraph.pd.client.PulseClient;
+
+import lombok.extern.slf4j.Slf4j;
+
+/**
+ * @author zhangyingjie
+ * @date 2024/1/31
+ **/
+@Slf4j
+public class ConnectionManagers {
+
+ private static final ConnectionManagers INSTANCE = new ConnectionManagers();
+ private static ConcurrentMap cms = new ConcurrentHashMap<>();
+
+ public static ConnectionManagers getInstance() {
+ return INSTANCE;
+ }
+
+ public synchronized ConnectionManager add(PDConfig config) {
+ String pds = config.getServerHost();
+ String[] hosts = pds.split(",");
+ ConnectionManager manager = null;
+ if (hosts.length > 0 && !StringUtils.isEmpty(hosts[0])) {
+ manager = cms.get(hosts[0]);
+ if (manager == null) {
+ manager = new ConnectionManager(config);
+ cms.put(hosts[0], manager);
+ PulseClient pulseClient = new PulseClient(config);
+ ConnectionClient connectionClient = new ConnectionClient(config);
+ manager.init(pulseClient, connectionClient);
+ }
+ for (int i = 1; i < hosts.length; i++) {
+ cms.putIfAbsent(hosts[i], manager);
+ }
+ }
+ return manager;
+ }
+
+ public ConnectionManager get(String host) {
+ return cms.get(host);
+ }
+
+ public ConnectionManager get(PDConfig config) {
+ String pds = config.getServerHost();
+ String[] hosts = pds.split(",");
+ for (String host : hosts) {
+ ConnectionManager manager = cms.get(host);
+ if (manager != null) {
+ return manager;
+ }
+ }
+ return null;
+ }
+
+ public void reset(String leader) {
+ ConnectionManager manager = cms.get(leader);
+ if (manager == null) {
+ return;
+ }
+ manager.reconnect(leader, false);
+ }
+
+ public void resetPeers(String[] peers) {
+ for (String peer : peers) {
+ ConnectionManager manager = cms.get(peer);
+ if (manager == null) {
+ continue;
+ }
+ manager.updatePeers(peers);
+ break;
+ }
+ }
+}
diff --git a/hg-pd-client/src/main/java/org/apache/hugegraph/pd/client/rpc/InvokeProxy.java b/hg-pd-client/src/main/java/org/apache/hugegraph/pd/client/rpc/InvokeProxy.java
new file mode 100644
index 0000000000..5192134e52
--- /dev/null
+++ b/hg-pd-client/src/main/java/org/apache/hugegraph/pd/client/rpc/InvokeProxy.java
@@ -0,0 +1,39 @@
+package org.apache.hugegraph.pd.client.rpc;
+
+import java.util.ArrayList;
+import java.util.List;
+
+import org.apache.hugegraph.pd.grpc.PDGrpc;
+
+import lombok.Getter;
+import lombok.Setter;
+
+/**
+ * @author zhangyingjie
+ * @date 2024/1/31
+ **/
+public class InvokeProxy {
+
+ @Getter
+ @Setter
+ private volatile PDGrpc.PDBlockingStub stub;
+ @Getter
+ @Setter
+ private String leader;
+ @Getter
+ private List hosts;
+
+ public InvokeProxy(String[] switcher) {
+ updateHosts(switcher);
+ }
+
+ private void updateHosts(String[] switcher) {
+ List l = new ArrayList<>(switcher.length);
+ for (String host : switcher) {
+ if (!host.isEmpty()) {
+ l.add(host);
+ }
+ }
+ this.hosts = l;
+ }
+}
diff --git a/hg-pd-client/src/main/java/org/apache/hugegraph/pd/client/rpc/Invoker.java b/hg-pd-client/src/main/java/org/apache/hugegraph/pd/client/rpc/Invoker.java
new file mode 100644
index 0000000000..0adef093b2
--- /dev/null
+++ b/hg-pd-client/src/main/java/org/apache/hugegraph/pd/client/rpc/Invoker.java
@@ -0,0 +1,84 @@
+package org.apache.hugegraph.pd.client.rpc;
+
+import java.util.concurrent.TimeUnit;
+import java.util.function.Function;
+import java.util.function.Predicate;
+
+import org.apache.hugegraph.pd.client.PDConfig;
+import org.apache.hugegraph.pd.common.HgAssert;
+import org.apache.hugegraph.pd.common.PDException;
+
+import io.grpc.CallOptions;
+import io.grpc.Channel;
+import io.grpc.MethodDescriptor;
+import io.grpc.stub.AbstractBlockingStub;
+import io.grpc.stub.AbstractStub;
+import io.grpc.stub.StreamObserver;
+
+public abstract class Invoker {
+ protected ConnectionManager cm;
+ protected Function asCreator;
+ protected Function bsCreator;
+ protected AbstractStub> asyncStub;
+ protected AbstractBlockingStub> blockingStub;
+ protected PDConfig config;
+
+ public Invoker(ConnectionManager cm, Function asCreator,
+ Function bsCreator) {
+ this.cm = cm;
+ this.config = this.cm.getConfig();
+ this.asCreator = asCreator;
+ this.bsCreator = bsCreator;
+ }
+
+ public abstract RespT blockingCall(MethodDescriptor paramMethodDescriptor,
+ ReqT paramReqT) throws PDException;
+
+ public abstract RespT blockingCall(MethodDescriptor paramMethodDescriptor,
+ ReqT paramReqT, long paramLong) throws PDException;
+
+ public RespT blockingCall(MethodDescriptor method, ReqT req,
+ Predicate predicate) throws PDException {
+ HgAssert.isArgumentNotNull(predicate, "The predicate can't be null");
+ RespT respT = blockingCall(method, req);
+ if (predicate.test(respT)) {
+ return respT;
+ }
+ return null;
+ }
+
+ public abstract void serverStreamingCall(
+ MethodDescriptor methodDescriptor, ReqT paramReqT,
+ StreamObserver paramStreamObserver) throws PDException;
+
+ public abstract StreamObserver streamingCall(
+ MethodDescriptor paramMethodDescriptor,
+ StreamObserver paramStreamObserver) throws PDException;
+
+ protected CallOptions getBlockingCallOptions() {
+ return getBlockingCallOptions(this.cm.getDefaultDeadline());
+ }
+
+ protected CallOptions getBlockingCallOptions(long duration) {
+ if (this.blockingStub == null) {
+ this.blockingStub = this.cm.createBlockingStub(this.bsCreator);
+ }
+ return this.blockingStub.getCallOptions()
+ .withDeadlineAfter(duration, TimeUnit.MILLISECONDS);
+ }
+
+ protected CallOptions getStreamingCallOptions() {
+ if (this.asyncStub == null) {
+ this.asyncStub = this.cm.createAsyncStub(this.asCreator);
+ }
+ return this.asyncStub.getCallOptions();
+ }
+
+ protected Channel getChannel() {
+ return this.cm.getLeaderChannel();
+ }
+
+ public void reconnect() {
+ this.cm.reconnect();
+ }
+}
diff --git a/hg-pd-client/src/main/java/org/apache/hugegraph/pd/client/rpc/LeaderInvoker.java b/hg-pd-client/src/main/java/org/apache/hugegraph/pd/client/rpc/LeaderInvoker.java
new file mode 100644
index 0000000000..6d4ed6d8b4
--- /dev/null
+++ b/hg-pd-client/src/main/java/org/apache/hugegraph/pd/client/rpc/LeaderInvoker.java
@@ -0,0 +1,110 @@
+package org.apache.hugegraph.pd.client.rpc;
+
+import java.util.function.Function;
+
+import org.apache.hugegraph.pd.client.interceptor.Authentication;
+import org.apache.hugegraph.pd.common.PDException;
+import org.apache.hugegraph.pd.common.PDRuntimeException;
+import org.apache.hugegraph.pd.grpc.common.ErrorType;
+
+import io.grpc.Channel;
+import io.grpc.ClientCall;
+import io.grpc.MethodDescriptor;
+import io.grpc.Status;
+import io.grpc.StatusRuntimeException;
+import io.grpc.stub.AbstractBlockingStub;
+import io.grpc.stub.AbstractStub;
+import io.grpc.stub.ClientCalls;
+import io.grpc.stub.StreamObserver;
+import lombok.extern.slf4j.Slf4j;
+
+@Slf4j
+public class LeaderInvoker extends Invoker {
+
+ private static final int MAX_RETRY = 10;
+ private Authentication auth = new Authentication(this.config.getUserName(), this.config.getAuthority());
+
+ public LeaderInvoker(ConnectionManager cm, Function asCreator,
+ Function bsCreator) {
+ super(cm, asCreator, bsCreator);
+ }
+
+ public RespT blockingCall(MethodDescriptor