From c9ae8961acd022e5ff2f5ea7932305d248f079fd Mon Sep 17 00:00:00 2001 From: Peng Junzhi <201250214@smail.nju.edu.cn> Date: Sun, 18 May 2025 01:00:12 +0800 Subject: [PATCH] failure: failed to merge 4.0 due to repositry sturcture not consistent --- conf/hugegraph.license | Bin 0 -> 848 bytes hg-pd-cli/pom.xml | 120 + .../hugegraph/pd/cli/CliApplication.java | 40 + .../hugegraph/pd/cli/cmd/ChangeRaft.java | 19 + .../apache/hugegraph/pd/cli/cmd/Command.java | 54 + .../apache/hugegraph/pd/cli/cmd/Config.java | 49 + .../hugegraph/pd/cli/cmd/Parameter.java | 15 + hg-pd-cli/src/main/resources/log4j2.xml | 122 + .../org/apache/hugegraph/pd/cli/CliTest.java | 61 + .../hugegraph/pd/client/BaseClient.java | 96 + .../hugegraph/pd/client/ClientCache.java | 402 +++ .../hugegraph/pd/client/Discoverable.java | 18 + .../hugegraph/pd/client/DiscoveryClient.java | 121 + .../pd/client/DiscoveryClientImpl.java | 179 ++ .../apache/hugegraph/pd/client/KvClient.java | 314 ++ .../hugegraph/pd/client/LicenseClient.java | 82 + .../hugegraph/pd/client/MetaClient.java | 139 + .../apache/hugegraph/pd/client/PDClient.java | 685 ++++ .../apache/hugegraph/pd/client/PDConfig.java | 111 + .../hugegraph/pd/client/PulseClient.java | 134 + .../hugegraph/pd/client/impl/PDApi.java | 2134 +++++++++++++ .../pd/client/impl/StreamDelegator.java | 213 ++ .../client/impl/StreamDelegatorReceiver.java | 31 + .../pd/client/impl/StreamDelegatorSender.java | 99 + .../pd/client/impl/StreamDelegatorState.java | 9 + .../pd/client/interceptor/Authentication.java | 72 + .../interceptor/AuthenticationException.java | 16 + .../client/listener/LeaderChangeListener.java | 16 + .../pd/client/listener/PDEventListener.java | 20 + .../hugegraph/pd/client/rpc/AnyInvoker.java | 88 + .../hugegraph/pd/client/rpc/Channels.java | 193 ++ .../pd/client/rpc/ConnectionClient.java | 44 + .../pd/client/rpc/ConnectionManager.java | 351 ++ .../pd/client/rpc/ConnectionManagers.java | 81 + .../hugegraph/pd/client/rpc/InvokeProxy.java | 39 + .../hugegraph/pd/client/rpc/Invoker.java | 84 + .../pd/client/rpc/LeaderInvoker.java | 110 + .../pd/client/support/PDExecutors.java | 249 ++ .../pd/pulse/DefaultPulseListener.java | 59 + .../pd/pulse/DefaultPulseNotifier.java | 66 + .../hugegraph/pd/pulse/PartitionNotice.java | 24 + .../org/apache/hugegraph/pd/pulse/Pulse.java | 67 + .../hugegraph/pd/pulse/PulseListener.java | 33 + .../hugegraph/pd/pulse/PulseNotifier.java | 11 + .../pd/pulse/PulseResponseNotice.java | 44 + .../hugegraph/pd/pulse/PulseServerNotice.java | 20 + .../apache/hugegraph/pd/watch/NodeEvent.java | 105 + .../hugegraph/pd/watch/PDEventRaiser.java | 56 + .../pd/watch/PDWatchPulseConverter.java | 83 + .../hugegraph/pd/watch/PartitionEvent.java | 96 + .../hugegraph/pd/watch/WatchListener.java | 9 + .../apache/hugegraph/pd/watch/WatchType.java | 16 + .../apache/hugegraph/pd/watch/Watcher.java | 85 + .../hugegraph/pd/watch/WatcherImpl.java | 91 + .../hugegraph/pd/PartitionCacheTest.java | 82 + .../hugegraph/pd/StoreRegisterTest.java | 165 + .../pd/client/DiscoveryClientImplTest.java | 136 + .../pd/client/LicenseClientImplTest.java | 114 + .../pd/client/test/HgPDTestUtil.java | 78 + hg-pd-client/src/test/resources/log4j2.xml | 85 + hg-pd-common/pom.xml | 69 + .../org/apache/hugegraph/pd/common/Cache.java | 95 + .../apache/hugegraph/pd/common/Consts.java | 23 + .../pd/common/DefaultThreadFactory.java | 26 + .../hugegraph/pd/common/GraphCache.java | 152 + .../apache/hugegraph/pd/common/HgAssert.java | 130 + .../apache/hugegraph/pd/common/KVPair.java | 135 + .../hugegraph/pd/common/PDException.java | 51 + .../pd/common/PDRuntimeException.java | 36 + .../hugegraph/pd/common/PartitionCache.java | 547 ++++ .../hugegraph/pd/common/PartitionUtils.java | 28 + .../pd/util/DefaultThreadFactory.java | 32 + .../hugegraph/pd/util/ExecutorUtil.java | 51 + .../hugegraph/pd/util/ResponseUtil.java | 55 + .../apache/hugegraph/pd/ConfigService.java | 181 ++ .../org/apache/hugegraph/pd/IdService.java | 61 + .../org/apache/hugegraph/pd/KvService.java | 364 +++ .../org/apache/hugegraph/pd/LogService.java | 81 + .../apache/hugegraph/pd/PartitionService.java | 2116 ++++++++++++ .../apache/hugegraph/pd/RegistryService.java | 30 + .../hugegraph/pd/StoreMonitorDataService.java | 276 ++ .../apache/hugegraph/pd/StoreNodeService.java | 1668 ++++++++++ .../hugegraph/pd/TaskScheduleService.java | 1193 +++++++ .../apache/hugegraph/pd/config/PDConfig.java | 460 +++ .../apache/hugegraph/pd/config/Server.java | 21 + .../apache/hugegraph/pd/consts/PoolNames.java | 16 + .../PartitionInstructionListener.java | 31 + .../pd/listener/PartitionStatusListener.java | 11 + .../pd/listener/ShardGroupStatusListener.java | 9 + .../pd/listener/StoreStatusListener.java | 13 + .../hugegraph/pd/meta/ConfigMetaStore.java | 100 + .../hugegraph/pd/meta/DiscoveryMetaStore.java | 119 + .../apache/hugegraph/pd/meta/IdMetaStore.java | 269 ++ .../org/apache/hugegraph/pd/meta/LogMeta.java | 35 + .../hugegraph/pd/meta/MetadataFactory.java | 63 + .../hugegraph/pd/meta/MetadataKeyHelper.java | 522 +++ .../pd/meta/MetadataRocksDBStore.java | 215 ++ .../hugegraph/pd/meta/MetadataStoreBase.java | 139 + .../hugegraph/pd/meta/PartitionMeta.java | 416 +++ .../apache/hugegraph/pd/meta/PulseStore.java | 68 + .../hugegraph/pd/meta/StoreInfoMeta.java | 272 ++ .../hugegraph/pd/meta/TaskInfoMeta.java | 159 + .../pd/raft/FutureClosureAdapter.java | 29 + .../apache/hugegraph/pd/raft/KVOperation.java | 176 + .../hugegraph/pd/raft/KVStoreClosure.java | 16 + .../apache/hugegraph/pd/raft/PeerUtil.java | 47 + .../apache/hugegraph/pd/raft/RaftEngine.java | 394 +++ .../hugegraph/pd/raft/RaftRpcClient.java | 92 + .../hugegraph/pd/raft/RaftRpcProcessor.java | 134 + .../hugegraph/pd/raft/RaftStateListener.java | 5 + .../hugegraph/pd/raft/RaftStateMachine.java | 409 +++ .../hugegraph/pd/raft/RaftTaskHandler.java | 10 + .../apache/hugegraph/pd/raft/ZipUtils.java | 64 + .../hugegraph/pd/service/MetadataService.java | 209 ++ .../pd/store/BaseKVStoreClosure.java | 32 + .../apache/hugegraph/pd/store/HgKVStore.java | 40 + .../hugegraph/pd/store/HgKVStoreImpl.java | 375 +++ .../org/apache/hugegraph/pd/store/KV.java | 27 + .../hugegraph/pd/store/RaftKVStore.java | 395 +++ .../hugegraph/pd/MonitorServiceTest.java | 90 + .../hugegraph/pd/PartitionServiceTest.java | 29 + .../hugegraph/pd/StoreNodeServiceTest.java | 440 +++ .../org/apache/hugegraph/pd/UnitTestBase.java | 14 + .../pd/common/PartitionUtilsTest.java | 27 + .../hugegraph/pd/store/HgKVStoreImplTest.java | 124 + .../assembly/static/bin/start-hugegraph-pd.sh | 107 + .../src/assembly/static/conf/application.yml | 62 + .../static/conf/application.yml.template | 58 + .../assembly/static/conf/hugegraph.license | Bin 0 -> 848 bytes hg-pd-grpc/src/main/proto/discovery.proto | 54 + hg-pd-grpc/src/main/proto/metaTask.proto | 51 + .../hugegraph/pd/boot/HugePDServer.java | 25 + .../hugegraph/pd/boot/ShutdownHook.java | 74 + .../pd/license/CommonLicenseManager.java | 106 + .../hugegraph/pd/license/ExtraParam.java | 114 + .../pd/license/LicenseVerifierService.java | 379 +++ .../pd/license/LicenseVerifyManager.java | 56 + .../pd/license/LicenseVerifyParam.java | 43 + .../hugegraph/pd/metrics/MetricsConfig.java | 29 + .../hugegraph/pd/metrics/PDMetrics.java | 200 ++ .../apache/hugegraph/pd/model/DemoModel.java | 55 + .../hugegraph/pd/model/GraphRestRequest.java | 9 + .../pd/model/GraphSpaceRestRequest.java | 8 + .../hugegraph/pd/model/GraphStatistics.java | 78 + .../apache/hugegraph/pd/model/Partition.java | 75 + .../hugegraph/pd/model/PeerRestRequest.java | 8 + .../pd/model/RegistryQueryRestRequest.java | 17 + .../pd/model/RegistryRestRequest.java | 20 + .../pd/model/RegistryRestResponse.java | 20 + .../hugegraph/pd/model/RestApiResponse.java | 41 + .../apache/hugegraph/pd/model/SDConfig.java | 143 + .../org/apache/hugegraph/pd/model/Shard.java | 34 + .../hugegraph/pd/model/StoreRestRequest.java | 8 + .../hugegraph/pd/model/TimeRangeRequest.java | 13 + .../pd/notice/NoticeBroadcaster.java | 282 ++ .../hugegraph/pd/notice/NoticeDeliver.java | 16 + .../pd/pulse/AbstractObserverSubject.java | 298 ++ .../pd/pulse/BroadcasterFactory.java | 113 + .../apache/hugegraph/pd/pulse/ChangeType.java | 27 + .../pd/pulse/GraphChangeSubject.java | 43 + .../hugegraph/pd/pulse/NoticeParseUtil.java | 75 + .../hugegraph/pd/pulse/PDPulseSubjects.java | 236 ++ .../pd/pulse/PartitionChangeSubject.java | 42 + .../pd/pulse/PartitionHeartbeatSubject.java | 49 + .../pd/pulse/PdInstructionSubject.java | 45 + .../pd/pulse/PulseDurableProvider.java | 86 + .../pd/pulse/PulseDurableProviderImpl.java | 168 + .../hugegraph/pd/pulse/PulseListener.java | 25 + .../hugegraph/pd/pulse/PulseNotices.java | 65 + .../hugegraph/pd/pulse/RetryingHub.java | 184 ++ .../hugegraph/pd/pulse/RetryingSwitch.java | 268 ++ .../pd/pulse/ShardGroupChangeSubject.java | 45 + .../pd/pulse/StoreNodeChangeSubject.java | 45 + .../pd/pulse/SubjectIndividualObserver.java | 155 + .../PartitionInstructionListenerImpl.java | 78 + .../impl/PartitionStatusListenerImpl.java | 22 + .../pd/pulse/impl/PulseListenerImpl.java | 36 + .../impl/ShardGroupStatusListenerImpl.java | 33 + .../pulse/impl/StoreStatusListenerImpl.java | 41 + .../org/apache/hugegraph/pd/rest/API.java | 206 ++ .../apache/hugegraph/pd/rest/GraphAPI.java | 141 + .../hugegraph/pd/rest/GraphSpaceAPI.java | 72 + .../apache/hugegraph/pd/rest/IndexAPI.java | 333 ++ .../apache/hugegraph/pd/rest/MemberAPI.java | 305 ++ .../hugegraph/pd/rest/PartitionAPI.java | 516 +++ .../apache/hugegraph/pd/rest/RegistryAPI.java | 235 ++ .../apache/hugegraph/pd/rest/SDConfigAPI.java | 84 + .../apache/hugegraph/pd/rest/ShardAPI.java | 134 + .../apache/hugegraph/pd/rest/StoreAPI.java | 420 +++ .../org/apache/hugegraph/pd/rest/TaskAPI.java | 111 + .../org/apache/hugegraph/pd/rest/TestAPI.java | 192 ++ .../interceptor/AuthenticationConfigurer.java | 24 + .../rest/interceptor/RestAuthentication.java | 64 + .../pd/service/DiscoveryService.java | 119 + .../pd/service/KvServiceGrpcImpl.java | 628 ++++ .../pd/service/MetaServiceGrpcImpl.java | 308 ++ .../hugegraph/pd/service/PDPulseService.java | 40 + .../hugegraph/pd/service/PDRestService.java | 366 +++ .../hugegraph/pd/service/PDService.java | 2823 +++++++++++++++++ .../hugegraph/pd/service/SDConfigService.java | 250 ++ .../hugegraph/pd/service/ServiceGrpc.java | 102 + .../hugegraph/pd/service/UpgradeService.java | 88 + .../service/interceptor/Authentication.java | 107 + .../interceptor/GrpcAuthentication.java | 62 + .../interceptor/RedirectInterceptor.java | 68 + .../pd/upgrade/VersionScriptFactory.java | 41 + .../pd/upgrade/VersionUpgradeScript.java | 39 + .../upgrade/scripts/PartitionMetaUpgrade.java | 134 + .../pd/upgrade/scripts/TaskCleanUpgrade.java | 47 + .../apache/hugegraph/pd/util/DateUtil.java | 59 + .../hugegraph/pd/util/HgExecutorUtil.java | 164 + .../apache/hugegraph/pd/util/HgMapCache.java | 82 + .../org/apache/hugegraph/pd/util/IdUtil.java | 32 + .../apache/hugegraph/pd/util/TokenUtil.java | 81 + .../pd/util/grpc/GRpcServerConfig.java | 29 + .../pd/util/grpc/StreamObserverUtil.java | 32 + .../hugegraph/pd/watch/KvWatchSubject.java | 329 ++ .../src/test/java/live/PDServer0.java | 24 + .../src/test/java/live/PDServer1.java | 22 + .../src/test/java/live/PDServer13.java | 20 + .../src/test/java/live/PDServer2.java | 22 + .../src/test/java/live/PDServer3.java | 23 + .../test/resources/application-server1.yml | 67 + .../test/resources/application-server13.yml | 54 + .../org/apache/hugegraph/pd/BaseTest.java | 21 + .../hugegraph/pd/cli/BaseCliToolsTest.java | 19 + .../hugegraph/pd/cli/CliToolsSuiteTest.java | 17 + .../org/apache/hugegraph/pd/cli/MainTest.java | 40 + .../hugegraph/pd/client/BaseClientTest.java | 28 + .../hugegraph/pd/client/ChangingLeader.java | 37 + .../pd/client/DiscoveryClientTest.java | 65 + .../hugegraph/pd/client/KvClientTest.java | 74 + .../pd/client/PDClientSuiteTest.java | 18 + .../hugegraph/pd/client/PDClientTest.java | 436 +++ .../hugegraph/pd/client/PDPulseTest.java | 156 + .../hugegraph/pd/client/PDWatchTest.java | 79 + .../hugegraph/pd/common/BaseCommonTest.java | 17 + .../hugegraph/pd/common/CommonSuiteTest.java | 25 + .../hugegraph/pd/common/HgAssertTest.java | 136 + .../hugegraph/pd/common/KVPairTest.java | 56 + .../pd/common/MetadataKeyHelperTest.java | 233 ++ .../pd/common/PartitionCacheTest.java | 374 +++ .../pd/common/PartitionUtilsTest.java | 16 + .../hugegraph/pd/core/BaseCoreTest.java | 61 + .../hugegraph/pd/core/PDCoreSuiteTest.java | 19 + .../pd/core/StoreNodeServiceTest.java | 104 + .../pd/core/meta/MetadataKeyHelperTest.java | 19 + .../hugegraph/pd/grpc/BaseGrpcTest.java | 20 + .../hugegraph/pd/grpc/GrpcSuiteTest.java | 15 + .../hugegraph/pd/service/BaseServerTest.java | 45 + .../pd/service/ConfigServiceTest.java | 144 + .../hugegraph/pd/service/IdServiceTest.java | 91 + .../hugegraph/pd/service/KvServiceTest.java | 43 + .../hugegraph/pd/service/LogServiceTest.java | 36 + .../pd/service/PartitionServiceTest.java | 120 + .../hugegraph/pd/service/PdTestBase.java | 268 ++ .../hugegraph/pd/service/RestApiTest.java | 167 + .../hugegraph/pd/service/ServerSuiteTest.java | 24 + .../service/StoreMonitorDataServiceTest.java | 63 + .../pd/service/StoreNodeServiceNewTest.java | 48 + .../pd/service/StoreServiceTest.java | 933 ++++++ .../pd/service/TaskScheduleServiceTest.java | 105 + .../hugegraph/pd/test/HgPDTestUtil.java | 78 + hugegraph-pd/hg-pd-client/pom.xml | 24 +- .../pd/client/DiscoveryClientImpl.java | 36 +- .../hugegraph/pd/client/LicenseClient.java | 37 +- .../apache/hugegraph/pd/client/PDClient.java | 828 ++++- .../apache/hugegraph/pd/client/PDWatch.java | 140 - .../hugegraph/pd/client/PDWatchImpl.java | 204 -- .../apache/hugegraph/pd/common/HgAssert.java | 19 +- .../apache/hugegraph/pd/common/KVPair.java | 3 + .../hugegraph/pd/common/PartitionCache.java | 99 +- hugegraph-pd/hg-pd-core/pom.xml | 15 +- .../org/apache/hugegraph/pd/KvService.java | 49 + .../org/apache/hugegraph/pd/LogService.java | 14 + .../apache/hugegraph/pd/PartitionService.java | 622 +++- .../hugegraph/pd/StoreMonitorDataService.java | 11 + .../apache/hugegraph/pd/StoreNodeService.java | 654 +++- .../hugegraph/pd/TaskScheduleService.java | 407 ++- .../apache/hugegraph/pd/config/PDConfig.java | 186 +- .../hugegraph/pd/meta/ConfigMetaStore.java | 29 + .../hugegraph/pd/meta/DiscoveryMetaStore.java | 12 + .../apache/hugegraph/pd/meta/IdMetaStore.java | 11 + .../hugegraph/pd/meta/MetadataKeyHelper.java | 151 +- .../pd/meta/MetadataRocksDBStore.java | 42 +- .../hugegraph/pd/meta/MetadataStoreBase.java | 19 + .../hugegraph/pd/meta/PartitionMeta.java | 133 + .../hugegraph/pd/meta/StoreInfoMeta.java | 68 + .../hugegraph/pd/meta/TaskInfoMeta.java | 30 +- .../apache/hugegraph/pd/raft/KVOperation.java | 10 + .../hugegraph/pd/raft/RaftRpcClient.java | 4 + .../hugegraph/pd/raft/RaftRpcProcessor.java | 4 + .../hugegraph/pd/raft/RaftStateMachine.java | 167 +- .../hugegraph/pd/store/HgKVStoreImpl.java | 48 +- .../hugegraph/pd/store/RaftKVStore.java | 83 +- hugegraph-pd/hg-pd-dist/pom.xml | 4 +- .../src/assembly/static/conf/log4j2.xml | 44 +- hugegraph-pd/hg-pd-grpc/pom.xml | 33 + .../src/main/proto/cluster_op.proto | 134 + .../hg-pd-grpc/src/main/proto/common.proto | 104 + .../hg-pd-grpc/src/main/proto/kv.proto | 33 +- .../hg-pd-grpc/src/main/proto/meta.proto | 50 + .../hg-pd-grpc/src/main/proto/pdpb.proto | 138 +- .../hg-pd-grpc/src/main/proto/pulse.proto | 226 ++ .../src/main/proto/store_group.proto | 72 + .../hg-pd-grpc/src/main/proto/watch.proto | 86 + hugegraph-pd/hg-pd-service/pom.xml | 79 + .../hugegraph/pd/model/PromTargetsModel.java | 52 + .../pd/notice/NoticeBroadcaster.java | 107 + .../org/apache/hugegraph/pd/rest/API.java | 23 + .../apache/hugegraph/pd/rest/IndexAPI.java | 72 +- .../apache/hugegraph/pd/rest/MemberAPI.java | 77 +- .../hugegraph/pd/rest/PartitionAPI.java | 46 +- .../apache/hugegraph/pd/rest/RegistryAPI.java | 49 +- .../apache/hugegraph/pd/rest/ShardAPI.java | 18 +- .../apache/hugegraph/pd/rest/StoreAPI.java | 70 +- .../org/apache/hugegraph/pd/rest/TaskAPI.java | 9 + .../pd/service/KvServiceGrpcImpl.java | 38 + .../hugegraph/pd/service/PDRestService.java | 115 +- .../hugegraph/pd/service/PDService.java | 1182 ++++++- .../upgrade/scripts/PartitionMetaUpgrade.java | 12 + .../hugegraph/pd/watch/KvWatchSubject.java | 46 +- .../hugegraph/pd/watch/PDWatchSubject.java | 215 -- .../src/main/resources/log4j2.xml | 18 +- .../hugegraph/pd/client/PDClientTest.java | 55 +- .../pd/client/StoreRegisterTest.java | 38 + .../hugegraph/pd/common/HgAssertTest.java | 4 + .../hugegraph/pd/core/ConfigServiceTest.java | 43 +- .../hugegraph/pd/core/PDCoreTestBase.java | 57 +- .../hugegraph/pd/core/StoreServiceTest.java | 115 +- .../pd/core/meta/MetadataKeyHelperTest.java | 7 + .../pd/core/store/HgKVStoreImplTest.java | 11 + .../apache/hugegraph/pd/rest/RestApiTest.java | 46 + start_pd_server.sh | 44 + start_store_server.sh | 44 + 335 files changed, 46119 insertions(+), 1020 deletions(-) create mode 100644 conf/hugegraph.license create mode 100644 hg-pd-cli/pom.xml create mode 100644 hg-pd-cli/src/main/java/org/apache/hugegraph/pd/cli/CliApplication.java create mode 100644 hg-pd-cli/src/main/java/org/apache/hugegraph/pd/cli/cmd/ChangeRaft.java create mode 100644 hg-pd-cli/src/main/java/org/apache/hugegraph/pd/cli/cmd/Command.java create mode 100644 hg-pd-cli/src/main/java/org/apache/hugegraph/pd/cli/cmd/Config.java create mode 100644 hg-pd-cli/src/main/java/org/apache/hugegraph/pd/cli/cmd/Parameter.java create mode 100644 hg-pd-cli/src/main/resources/log4j2.xml create mode 100644 hg-pd-cli/src/test/java/org/apache/hugegraph/pd/cli/CliTest.java create mode 100644 hg-pd-client/src/main/java/org/apache/hugegraph/pd/client/BaseClient.java create mode 100644 hg-pd-client/src/main/java/org/apache/hugegraph/pd/client/ClientCache.java create mode 100644 hg-pd-client/src/main/java/org/apache/hugegraph/pd/client/Discoverable.java create mode 100644 hg-pd-client/src/main/java/org/apache/hugegraph/pd/client/DiscoveryClient.java create mode 100644 hg-pd-client/src/main/java/org/apache/hugegraph/pd/client/DiscoveryClientImpl.java create mode 100644 hg-pd-client/src/main/java/org/apache/hugegraph/pd/client/KvClient.java create mode 100644 hg-pd-client/src/main/java/org/apache/hugegraph/pd/client/LicenseClient.java create mode 100644 hg-pd-client/src/main/java/org/apache/hugegraph/pd/client/MetaClient.java create mode 100644 hg-pd-client/src/main/java/org/apache/hugegraph/pd/client/PDClient.java create mode 100644 hg-pd-client/src/main/java/org/apache/hugegraph/pd/client/PDConfig.java create mode 100644 hg-pd-client/src/main/java/org/apache/hugegraph/pd/client/PulseClient.java create mode 100644 hg-pd-client/src/main/java/org/apache/hugegraph/pd/client/impl/PDApi.java create mode 100644 hg-pd-client/src/main/java/org/apache/hugegraph/pd/client/impl/StreamDelegator.java create mode 100644 hg-pd-client/src/main/java/org/apache/hugegraph/pd/client/impl/StreamDelegatorReceiver.java create mode 100644 hg-pd-client/src/main/java/org/apache/hugegraph/pd/client/impl/StreamDelegatorSender.java create mode 100644 hg-pd-client/src/main/java/org/apache/hugegraph/pd/client/impl/StreamDelegatorState.java create mode 100644 hg-pd-client/src/main/java/org/apache/hugegraph/pd/client/interceptor/Authentication.java create mode 100644 hg-pd-client/src/main/java/org/apache/hugegraph/pd/client/interceptor/AuthenticationException.java create mode 100644 hg-pd-client/src/main/java/org/apache/hugegraph/pd/client/listener/LeaderChangeListener.java create mode 100644 hg-pd-client/src/main/java/org/apache/hugegraph/pd/client/listener/PDEventListener.java create mode 100644 hg-pd-client/src/main/java/org/apache/hugegraph/pd/client/rpc/AnyInvoker.java create mode 100644 hg-pd-client/src/main/java/org/apache/hugegraph/pd/client/rpc/Channels.java create mode 100644 hg-pd-client/src/main/java/org/apache/hugegraph/pd/client/rpc/ConnectionClient.java create mode 100644 hg-pd-client/src/main/java/org/apache/hugegraph/pd/client/rpc/ConnectionManager.java create mode 100644 hg-pd-client/src/main/java/org/apache/hugegraph/pd/client/rpc/ConnectionManagers.java create mode 100644 hg-pd-client/src/main/java/org/apache/hugegraph/pd/client/rpc/InvokeProxy.java create mode 100644 hg-pd-client/src/main/java/org/apache/hugegraph/pd/client/rpc/Invoker.java create mode 100644 hg-pd-client/src/main/java/org/apache/hugegraph/pd/client/rpc/LeaderInvoker.java create mode 100644 hg-pd-client/src/main/java/org/apache/hugegraph/pd/client/support/PDExecutors.java create mode 100644 hg-pd-client/src/main/java/org/apache/hugegraph/pd/pulse/DefaultPulseListener.java create mode 100644 hg-pd-client/src/main/java/org/apache/hugegraph/pd/pulse/DefaultPulseNotifier.java create mode 100644 hg-pd-client/src/main/java/org/apache/hugegraph/pd/pulse/PartitionNotice.java create mode 100644 hg-pd-client/src/main/java/org/apache/hugegraph/pd/pulse/Pulse.java create mode 100644 hg-pd-client/src/main/java/org/apache/hugegraph/pd/pulse/PulseListener.java create mode 100644 hg-pd-client/src/main/java/org/apache/hugegraph/pd/pulse/PulseNotifier.java create mode 100644 hg-pd-client/src/main/java/org/apache/hugegraph/pd/pulse/PulseResponseNotice.java create mode 100644 hg-pd-client/src/main/java/org/apache/hugegraph/pd/pulse/PulseServerNotice.java create mode 100644 hg-pd-client/src/main/java/org/apache/hugegraph/pd/watch/NodeEvent.java create mode 100644 hg-pd-client/src/main/java/org/apache/hugegraph/pd/watch/PDEventRaiser.java create mode 100644 hg-pd-client/src/main/java/org/apache/hugegraph/pd/watch/PDWatchPulseConverter.java create mode 100644 hg-pd-client/src/main/java/org/apache/hugegraph/pd/watch/PartitionEvent.java create mode 100644 hg-pd-client/src/main/java/org/apache/hugegraph/pd/watch/WatchListener.java create mode 100644 hg-pd-client/src/main/java/org/apache/hugegraph/pd/watch/WatchType.java create mode 100644 hg-pd-client/src/main/java/org/apache/hugegraph/pd/watch/Watcher.java create mode 100644 hg-pd-client/src/main/java/org/apache/hugegraph/pd/watch/WatcherImpl.java create mode 100644 hg-pd-client/src/test/java/org/apache/hugegraph/pd/PartitionCacheTest.java create mode 100644 hg-pd-client/src/test/java/org/apache/hugegraph/pd/StoreRegisterTest.java create mode 100644 hg-pd-client/src/test/java/org/apache/hugegraph/pd/client/DiscoveryClientImplTest.java create mode 100644 hg-pd-client/src/test/java/org/apache/hugegraph/pd/client/LicenseClientImplTest.java create mode 100644 hg-pd-client/src/test/java/org/apache/hugegraph/pd/client/test/HgPDTestUtil.java create mode 100644 hg-pd-client/src/test/resources/log4j2.xml create mode 100644 hg-pd-common/pom.xml create mode 100644 hg-pd-common/src/main/java/org/apache/hugegraph/pd/common/Cache.java create mode 100644 hg-pd-common/src/main/java/org/apache/hugegraph/pd/common/Consts.java create mode 100644 hg-pd-common/src/main/java/org/apache/hugegraph/pd/common/DefaultThreadFactory.java create mode 100644 hg-pd-common/src/main/java/org/apache/hugegraph/pd/common/GraphCache.java create mode 100644 hg-pd-common/src/main/java/org/apache/hugegraph/pd/common/HgAssert.java create mode 100644 hg-pd-common/src/main/java/org/apache/hugegraph/pd/common/KVPair.java create mode 100644 hg-pd-common/src/main/java/org/apache/hugegraph/pd/common/PDException.java create mode 100644 hg-pd-common/src/main/java/org/apache/hugegraph/pd/common/PDRuntimeException.java create mode 100644 hg-pd-common/src/main/java/org/apache/hugegraph/pd/common/PartitionCache.java create mode 100644 hg-pd-common/src/main/java/org/apache/hugegraph/pd/common/PartitionUtils.java create mode 100644 hg-pd-common/src/main/java/org/apache/hugegraph/pd/util/DefaultThreadFactory.java create mode 100644 hg-pd-common/src/main/java/org/apache/hugegraph/pd/util/ExecutorUtil.java create mode 100644 hg-pd-common/src/main/java/org/apache/hugegraph/pd/util/ResponseUtil.java create mode 100644 hg-pd-core/src/main/java/org/apache/hugegraph/pd/ConfigService.java create mode 100644 hg-pd-core/src/main/java/org/apache/hugegraph/pd/IdService.java create mode 100644 hg-pd-core/src/main/java/org/apache/hugegraph/pd/KvService.java create mode 100644 hg-pd-core/src/main/java/org/apache/hugegraph/pd/LogService.java create mode 100644 hg-pd-core/src/main/java/org/apache/hugegraph/pd/PartitionService.java create mode 100644 hg-pd-core/src/main/java/org/apache/hugegraph/pd/RegistryService.java create mode 100644 hg-pd-core/src/main/java/org/apache/hugegraph/pd/StoreMonitorDataService.java create mode 100644 hg-pd-core/src/main/java/org/apache/hugegraph/pd/StoreNodeService.java create mode 100644 hg-pd-core/src/main/java/org/apache/hugegraph/pd/TaskScheduleService.java create mode 100644 hg-pd-core/src/main/java/org/apache/hugegraph/pd/config/PDConfig.java create mode 100644 hg-pd-core/src/main/java/org/apache/hugegraph/pd/config/Server.java create mode 100644 hg-pd-core/src/main/java/org/apache/hugegraph/pd/consts/PoolNames.java create mode 100644 hg-pd-core/src/main/java/org/apache/hugegraph/pd/listener/PartitionInstructionListener.java create mode 100644 hg-pd-core/src/main/java/org/apache/hugegraph/pd/listener/PartitionStatusListener.java create mode 100644 hg-pd-core/src/main/java/org/apache/hugegraph/pd/listener/ShardGroupStatusListener.java create mode 100644 hg-pd-core/src/main/java/org/apache/hugegraph/pd/listener/StoreStatusListener.java create mode 100644 hg-pd-core/src/main/java/org/apache/hugegraph/pd/meta/ConfigMetaStore.java create mode 100644 hg-pd-core/src/main/java/org/apache/hugegraph/pd/meta/DiscoveryMetaStore.java create mode 100644 hg-pd-core/src/main/java/org/apache/hugegraph/pd/meta/IdMetaStore.java create mode 100644 hg-pd-core/src/main/java/org/apache/hugegraph/pd/meta/LogMeta.java create mode 100644 hg-pd-core/src/main/java/org/apache/hugegraph/pd/meta/MetadataFactory.java create mode 100644 hg-pd-core/src/main/java/org/apache/hugegraph/pd/meta/MetadataKeyHelper.java create mode 100644 hg-pd-core/src/main/java/org/apache/hugegraph/pd/meta/MetadataRocksDBStore.java create mode 100644 hg-pd-core/src/main/java/org/apache/hugegraph/pd/meta/MetadataStoreBase.java create mode 100644 hg-pd-core/src/main/java/org/apache/hugegraph/pd/meta/PartitionMeta.java create mode 100644 hg-pd-core/src/main/java/org/apache/hugegraph/pd/meta/PulseStore.java create mode 100644 hg-pd-core/src/main/java/org/apache/hugegraph/pd/meta/StoreInfoMeta.java create mode 100644 hg-pd-core/src/main/java/org/apache/hugegraph/pd/meta/TaskInfoMeta.java create mode 100644 hg-pd-core/src/main/java/org/apache/hugegraph/pd/raft/FutureClosureAdapter.java create mode 100644 hg-pd-core/src/main/java/org/apache/hugegraph/pd/raft/KVOperation.java create mode 100644 hg-pd-core/src/main/java/org/apache/hugegraph/pd/raft/KVStoreClosure.java create mode 100644 hg-pd-core/src/main/java/org/apache/hugegraph/pd/raft/PeerUtil.java create mode 100644 hg-pd-core/src/main/java/org/apache/hugegraph/pd/raft/RaftEngine.java create mode 100644 hg-pd-core/src/main/java/org/apache/hugegraph/pd/raft/RaftRpcClient.java create mode 100644 hg-pd-core/src/main/java/org/apache/hugegraph/pd/raft/RaftRpcProcessor.java create mode 100644 hg-pd-core/src/main/java/org/apache/hugegraph/pd/raft/RaftStateListener.java create mode 100644 hg-pd-core/src/main/java/org/apache/hugegraph/pd/raft/RaftStateMachine.java create mode 100644 hg-pd-core/src/main/java/org/apache/hugegraph/pd/raft/RaftTaskHandler.java create mode 100644 hg-pd-core/src/main/java/org/apache/hugegraph/pd/raft/ZipUtils.java create mode 100644 hg-pd-core/src/main/java/org/apache/hugegraph/pd/service/MetadataService.java create mode 100644 hg-pd-core/src/main/java/org/apache/hugegraph/pd/store/BaseKVStoreClosure.java create mode 100644 hg-pd-core/src/main/java/org/apache/hugegraph/pd/store/HgKVStore.java create mode 100644 hg-pd-core/src/main/java/org/apache/hugegraph/pd/store/HgKVStoreImpl.java create mode 100644 hg-pd-core/src/main/java/org/apache/hugegraph/pd/store/KV.java create mode 100644 hg-pd-core/src/main/java/org/apache/hugegraph/pd/store/RaftKVStore.java create mode 100644 hg-pd-core/src/test/java/org/apache/hugegraph/pd/MonitorServiceTest.java create mode 100644 hg-pd-core/src/test/java/org/apache/hugegraph/pd/PartitionServiceTest.java create mode 100644 hg-pd-core/src/test/java/org/apache/hugegraph/pd/StoreNodeServiceTest.java create mode 100644 hg-pd-core/src/test/java/org/apache/hugegraph/pd/UnitTestBase.java create mode 100644 hg-pd-core/src/test/java/org/apache/hugegraph/pd/common/PartitionUtilsTest.java create mode 100644 hg-pd-core/src/test/java/org/apache/hugegraph/pd/store/HgKVStoreImplTest.java create mode 100644 hg-pd-dist/src/assembly/static/bin/start-hugegraph-pd.sh create mode 100644 hg-pd-dist/src/assembly/static/conf/application.yml create mode 100644 hg-pd-dist/src/assembly/static/conf/application.yml.template create mode 100644 hg-pd-dist/src/assembly/static/conf/hugegraph.license create mode 100644 hg-pd-grpc/src/main/proto/discovery.proto create mode 100644 hg-pd-grpc/src/main/proto/metaTask.proto create mode 100644 hg-pd-service/src/main/java/org/apache/hugegraph/pd/boot/HugePDServer.java create mode 100644 hg-pd-service/src/main/java/org/apache/hugegraph/pd/boot/ShutdownHook.java create mode 100644 hg-pd-service/src/main/java/org/apache/hugegraph/pd/license/CommonLicenseManager.java create mode 100644 hg-pd-service/src/main/java/org/apache/hugegraph/pd/license/ExtraParam.java create mode 100644 hg-pd-service/src/main/java/org/apache/hugegraph/pd/license/LicenseVerifierService.java create mode 100644 hg-pd-service/src/main/java/org/apache/hugegraph/pd/license/LicenseVerifyManager.java create mode 100644 hg-pd-service/src/main/java/org/apache/hugegraph/pd/license/LicenseVerifyParam.java create mode 100644 hg-pd-service/src/main/java/org/apache/hugegraph/pd/metrics/MetricsConfig.java create mode 100644 hg-pd-service/src/main/java/org/apache/hugegraph/pd/metrics/PDMetrics.java create mode 100644 hg-pd-service/src/main/java/org/apache/hugegraph/pd/model/DemoModel.java create mode 100644 hg-pd-service/src/main/java/org/apache/hugegraph/pd/model/GraphRestRequest.java create mode 100644 hg-pd-service/src/main/java/org/apache/hugegraph/pd/model/GraphSpaceRestRequest.java create mode 100644 hg-pd-service/src/main/java/org/apache/hugegraph/pd/model/GraphStatistics.java create mode 100644 hg-pd-service/src/main/java/org/apache/hugegraph/pd/model/Partition.java create mode 100644 hg-pd-service/src/main/java/org/apache/hugegraph/pd/model/PeerRestRequest.java create mode 100644 hg-pd-service/src/main/java/org/apache/hugegraph/pd/model/RegistryQueryRestRequest.java create mode 100644 hg-pd-service/src/main/java/org/apache/hugegraph/pd/model/RegistryRestRequest.java create mode 100644 hg-pd-service/src/main/java/org/apache/hugegraph/pd/model/RegistryRestResponse.java create mode 100644 hg-pd-service/src/main/java/org/apache/hugegraph/pd/model/RestApiResponse.java create mode 100644 hg-pd-service/src/main/java/org/apache/hugegraph/pd/model/SDConfig.java create mode 100644 hg-pd-service/src/main/java/org/apache/hugegraph/pd/model/Shard.java create mode 100644 hg-pd-service/src/main/java/org/apache/hugegraph/pd/model/StoreRestRequest.java create mode 100644 hg-pd-service/src/main/java/org/apache/hugegraph/pd/model/TimeRangeRequest.java create mode 100644 hg-pd-service/src/main/java/org/apache/hugegraph/pd/notice/NoticeBroadcaster.java create mode 100644 hg-pd-service/src/main/java/org/apache/hugegraph/pd/notice/NoticeDeliver.java create mode 100644 hg-pd-service/src/main/java/org/apache/hugegraph/pd/pulse/AbstractObserverSubject.java create mode 100644 hg-pd-service/src/main/java/org/apache/hugegraph/pd/pulse/BroadcasterFactory.java create mode 100644 hg-pd-service/src/main/java/org/apache/hugegraph/pd/pulse/ChangeType.java create mode 100644 hg-pd-service/src/main/java/org/apache/hugegraph/pd/pulse/GraphChangeSubject.java create mode 100644 hg-pd-service/src/main/java/org/apache/hugegraph/pd/pulse/NoticeParseUtil.java create mode 100644 hg-pd-service/src/main/java/org/apache/hugegraph/pd/pulse/PDPulseSubjects.java create mode 100644 hg-pd-service/src/main/java/org/apache/hugegraph/pd/pulse/PartitionChangeSubject.java create mode 100644 hg-pd-service/src/main/java/org/apache/hugegraph/pd/pulse/PartitionHeartbeatSubject.java create mode 100644 hg-pd-service/src/main/java/org/apache/hugegraph/pd/pulse/PdInstructionSubject.java create mode 100644 hg-pd-service/src/main/java/org/apache/hugegraph/pd/pulse/PulseDurableProvider.java create mode 100644 hg-pd-service/src/main/java/org/apache/hugegraph/pd/pulse/PulseDurableProviderImpl.java create mode 100644 hg-pd-service/src/main/java/org/apache/hugegraph/pd/pulse/PulseListener.java create mode 100644 hg-pd-service/src/main/java/org/apache/hugegraph/pd/pulse/PulseNotices.java create mode 100644 hg-pd-service/src/main/java/org/apache/hugegraph/pd/pulse/RetryingHub.java create mode 100644 hg-pd-service/src/main/java/org/apache/hugegraph/pd/pulse/RetryingSwitch.java create mode 100644 hg-pd-service/src/main/java/org/apache/hugegraph/pd/pulse/ShardGroupChangeSubject.java create mode 100644 hg-pd-service/src/main/java/org/apache/hugegraph/pd/pulse/StoreNodeChangeSubject.java create mode 100644 hg-pd-service/src/main/java/org/apache/hugegraph/pd/pulse/SubjectIndividualObserver.java create mode 100644 hg-pd-service/src/main/java/org/apache/hugegraph/pd/pulse/impl/PartitionInstructionListenerImpl.java create mode 100644 hg-pd-service/src/main/java/org/apache/hugegraph/pd/pulse/impl/PartitionStatusListenerImpl.java create mode 100644 hg-pd-service/src/main/java/org/apache/hugegraph/pd/pulse/impl/PulseListenerImpl.java create mode 100644 hg-pd-service/src/main/java/org/apache/hugegraph/pd/pulse/impl/ShardGroupStatusListenerImpl.java create mode 100644 hg-pd-service/src/main/java/org/apache/hugegraph/pd/pulse/impl/StoreStatusListenerImpl.java create mode 100644 hg-pd-service/src/main/java/org/apache/hugegraph/pd/rest/API.java create mode 100644 hg-pd-service/src/main/java/org/apache/hugegraph/pd/rest/GraphAPI.java create mode 100644 hg-pd-service/src/main/java/org/apache/hugegraph/pd/rest/GraphSpaceAPI.java create mode 100644 hg-pd-service/src/main/java/org/apache/hugegraph/pd/rest/IndexAPI.java create mode 100644 hg-pd-service/src/main/java/org/apache/hugegraph/pd/rest/MemberAPI.java create mode 100644 hg-pd-service/src/main/java/org/apache/hugegraph/pd/rest/PartitionAPI.java create mode 100644 hg-pd-service/src/main/java/org/apache/hugegraph/pd/rest/RegistryAPI.java create mode 100644 hg-pd-service/src/main/java/org/apache/hugegraph/pd/rest/SDConfigAPI.java create mode 100644 hg-pd-service/src/main/java/org/apache/hugegraph/pd/rest/ShardAPI.java create mode 100644 hg-pd-service/src/main/java/org/apache/hugegraph/pd/rest/StoreAPI.java create mode 100644 hg-pd-service/src/main/java/org/apache/hugegraph/pd/rest/TaskAPI.java create mode 100644 hg-pd-service/src/main/java/org/apache/hugegraph/pd/rest/TestAPI.java create mode 100644 hg-pd-service/src/main/java/org/apache/hugegraph/pd/rest/interceptor/AuthenticationConfigurer.java create mode 100644 hg-pd-service/src/main/java/org/apache/hugegraph/pd/rest/interceptor/RestAuthentication.java create mode 100644 hg-pd-service/src/main/java/org/apache/hugegraph/pd/service/DiscoveryService.java create mode 100644 hg-pd-service/src/main/java/org/apache/hugegraph/pd/service/KvServiceGrpcImpl.java create mode 100644 hg-pd-service/src/main/java/org/apache/hugegraph/pd/service/MetaServiceGrpcImpl.java create mode 100644 hg-pd-service/src/main/java/org/apache/hugegraph/pd/service/PDPulseService.java create mode 100644 hg-pd-service/src/main/java/org/apache/hugegraph/pd/service/PDRestService.java create mode 100644 hg-pd-service/src/main/java/org/apache/hugegraph/pd/service/PDService.java create mode 100644 hg-pd-service/src/main/java/org/apache/hugegraph/pd/service/SDConfigService.java create mode 100644 hg-pd-service/src/main/java/org/apache/hugegraph/pd/service/ServiceGrpc.java create mode 100644 hg-pd-service/src/main/java/org/apache/hugegraph/pd/service/UpgradeService.java create mode 100644 hg-pd-service/src/main/java/org/apache/hugegraph/pd/service/interceptor/Authentication.java create mode 100644 hg-pd-service/src/main/java/org/apache/hugegraph/pd/service/interceptor/GrpcAuthentication.java create mode 100644 hg-pd-service/src/main/java/org/apache/hugegraph/pd/service/interceptor/RedirectInterceptor.java create mode 100644 hg-pd-service/src/main/java/org/apache/hugegraph/pd/upgrade/VersionScriptFactory.java create mode 100644 hg-pd-service/src/main/java/org/apache/hugegraph/pd/upgrade/VersionUpgradeScript.java create mode 100644 hg-pd-service/src/main/java/org/apache/hugegraph/pd/upgrade/scripts/PartitionMetaUpgrade.java create mode 100644 hg-pd-service/src/main/java/org/apache/hugegraph/pd/upgrade/scripts/TaskCleanUpgrade.java create mode 100644 hg-pd-service/src/main/java/org/apache/hugegraph/pd/util/DateUtil.java create mode 100644 hg-pd-service/src/main/java/org/apache/hugegraph/pd/util/HgExecutorUtil.java create mode 100644 hg-pd-service/src/main/java/org/apache/hugegraph/pd/util/HgMapCache.java create mode 100644 hg-pd-service/src/main/java/org/apache/hugegraph/pd/util/IdUtil.java create mode 100644 hg-pd-service/src/main/java/org/apache/hugegraph/pd/util/TokenUtil.java create mode 100644 hg-pd-service/src/main/java/org/apache/hugegraph/pd/util/grpc/GRpcServerConfig.java create mode 100644 hg-pd-service/src/main/java/org/apache/hugegraph/pd/util/grpc/StreamObserverUtil.java create mode 100644 hg-pd-service/src/main/java/org/apache/hugegraph/pd/watch/KvWatchSubject.java create mode 100644 hg-pd-service/src/test/java/live/PDServer0.java create mode 100644 hg-pd-service/src/test/java/live/PDServer1.java create mode 100644 hg-pd-service/src/test/java/live/PDServer13.java create mode 100644 hg-pd-service/src/test/java/live/PDServer2.java create mode 100644 hg-pd-service/src/test/java/live/PDServer3.java create mode 100644 hg-pd-service/src/test/resources/application-server1.yml create mode 100644 hg-pd-service/src/test/resources/application-server13.yml create mode 100644 hg-pd-test/src/main/java/org/apache/hugegraph/pd/BaseTest.java create mode 100644 hg-pd-test/src/main/java/org/apache/hugegraph/pd/cli/BaseCliToolsTest.java create mode 100644 hg-pd-test/src/main/java/org/apache/hugegraph/pd/cli/CliToolsSuiteTest.java create mode 100644 hg-pd-test/src/main/java/org/apache/hugegraph/pd/cli/MainTest.java create mode 100644 hg-pd-test/src/main/java/org/apache/hugegraph/pd/client/BaseClientTest.java create mode 100644 hg-pd-test/src/main/java/org/apache/hugegraph/pd/client/ChangingLeader.java create mode 100644 hg-pd-test/src/main/java/org/apache/hugegraph/pd/client/DiscoveryClientTest.java create mode 100644 hg-pd-test/src/main/java/org/apache/hugegraph/pd/client/KvClientTest.java create mode 100644 hg-pd-test/src/main/java/org/apache/hugegraph/pd/client/PDClientSuiteTest.java create mode 100644 hg-pd-test/src/main/java/org/apache/hugegraph/pd/client/PDClientTest.java create mode 100644 hg-pd-test/src/main/java/org/apache/hugegraph/pd/client/PDPulseTest.java create mode 100644 hg-pd-test/src/main/java/org/apache/hugegraph/pd/client/PDWatchTest.java create mode 100644 hg-pd-test/src/main/java/org/apache/hugegraph/pd/common/BaseCommonTest.java create mode 100644 hg-pd-test/src/main/java/org/apache/hugegraph/pd/common/CommonSuiteTest.java create mode 100644 hg-pd-test/src/main/java/org/apache/hugegraph/pd/common/HgAssertTest.java create mode 100644 hg-pd-test/src/main/java/org/apache/hugegraph/pd/common/KVPairTest.java create mode 100644 hg-pd-test/src/main/java/org/apache/hugegraph/pd/common/MetadataKeyHelperTest.java create mode 100644 hg-pd-test/src/main/java/org/apache/hugegraph/pd/common/PartitionCacheTest.java create mode 100644 hg-pd-test/src/main/java/org/apache/hugegraph/pd/common/PartitionUtilsTest.java create mode 100644 hg-pd-test/src/main/java/org/apache/hugegraph/pd/core/BaseCoreTest.java create mode 100644 hg-pd-test/src/main/java/org/apache/hugegraph/pd/core/PDCoreSuiteTest.java create mode 100644 hg-pd-test/src/main/java/org/apache/hugegraph/pd/core/StoreNodeServiceTest.java create mode 100644 hg-pd-test/src/main/java/org/apache/hugegraph/pd/core/meta/MetadataKeyHelperTest.java create mode 100644 hg-pd-test/src/main/java/org/apache/hugegraph/pd/grpc/BaseGrpcTest.java create mode 100644 hg-pd-test/src/main/java/org/apache/hugegraph/pd/grpc/GrpcSuiteTest.java create mode 100644 hg-pd-test/src/main/java/org/apache/hugegraph/pd/service/BaseServerTest.java create mode 100644 hg-pd-test/src/main/java/org/apache/hugegraph/pd/service/ConfigServiceTest.java create mode 100644 hg-pd-test/src/main/java/org/apache/hugegraph/pd/service/IdServiceTest.java create mode 100644 hg-pd-test/src/main/java/org/apache/hugegraph/pd/service/KvServiceTest.java create mode 100644 hg-pd-test/src/main/java/org/apache/hugegraph/pd/service/LogServiceTest.java create mode 100644 hg-pd-test/src/main/java/org/apache/hugegraph/pd/service/PartitionServiceTest.java create mode 100644 hg-pd-test/src/main/java/org/apache/hugegraph/pd/service/PdTestBase.java create mode 100644 hg-pd-test/src/main/java/org/apache/hugegraph/pd/service/RestApiTest.java create mode 100644 hg-pd-test/src/main/java/org/apache/hugegraph/pd/service/ServerSuiteTest.java create mode 100644 hg-pd-test/src/main/java/org/apache/hugegraph/pd/service/StoreMonitorDataServiceTest.java create mode 100644 hg-pd-test/src/main/java/org/apache/hugegraph/pd/service/StoreNodeServiceNewTest.java create mode 100644 hg-pd-test/src/main/java/org/apache/hugegraph/pd/service/StoreServiceTest.java create mode 100644 hg-pd-test/src/main/java/org/apache/hugegraph/pd/service/TaskScheduleServiceTest.java create mode 100644 hg-pd-test/src/main/java/org/apache/hugegraph/pd/test/HgPDTestUtil.java delete mode 100644 hugegraph-pd/hg-pd-client/src/main/java/org/apache/hugegraph/pd/client/PDWatch.java delete mode 100644 hugegraph-pd/hg-pd-client/src/main/java/org/apache/hugegraph/pd/client/PDWatchImpl.java create mode 100644 hugegraph-pd/hg-pd-grpc/src/main/proto/cluster_op.proto create mode 100644 hugegraph-pd/hg-pd-grpc/src/main/proto/common.proto create mode 100644 hugegraph-pd/hg-pd-grpc/src/main/proto/meta.proto create mode 100644 hugegraph-pd/hg-pd-grpc/src/main/proto/pulse.proto create mode 100644 hugegraph-pd/hg-pd-grpc/src/main/proto/store_group.proto create mode 100644 hugegraph-pd/hg-pd-grpc/src/main/proto/watch.proto delete mode 100644 hugegraph-pd/hg-pd-service/src/main/java/org/apache/hugegraph/pd/watch/PDWatchSubject.java create mode 100644 start_pd_server.sh create mode 100644 start_store_server.sh diff --git a/conf/hugegraph.license b/conf/hugegraph.license new file mode 100644 index 0000000000000000000000000000000000000000..3cc0c344b4a6c13836715fb5ba5b4b14749a24a6 GIT binary patch literal 848 zcmV-W1F!sHhn&I!I)GnwHK;#FaR|4FN4%n_lEcO%-pD`UE$4 z;}s8Uc0vW;3h8Db>Ut5-RbDLDFg4sj+R1}DC@hY_76QKi-1E@=2sXEFS4T*78c^SV z;JwjN3~NucPY5@MO0Dptt3WmuL zvLdjwz)(1zbffoKZ<*L6@x0=h@f}gk&EsRw<1VG!*N5-I@TBGOGeHlvFY;EY z92byd^2&`ZhdrK?UR^s6Nsut(Lm)~**LQi*%)Hdf{yL!^$DUl92wViz_}K7xBsv8m zr_d)pw5k#!C3!={-c16PI$O)?0fdkBJ$mg1d&zY--|5eJYoIou0N_i7R`8)*x*wmZ zv-vnKj!vCg@gJ%q-sId;{wH+RI298*As9_V>gPOHR!-(HolCt-)C{?yG--Q>n|Fn> z&T#y@sK^bQOxk!SJXC4rX5+`-KBil3N+ + + + hugegraph-pd + org.apache.hugegraph + ${revision} + + 4.0.0 + + hg-pd-cli + + + 2.12.1 + + + + org.apache.hugegraph + hg-pd-client + ${project.version} + + + junit + junit + ${junit.version} + test + + + com.alipay.sofa + jraft-core + ${jraft-core.version} + + + org.rocksdb + rocksdbjni + + + com.google.protobuf + protobuf-java + + + + + + + + + + + + + + + org.yaml + snakeyaml + test + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + org.apache.maven.plugins + maven-assembly-plugin + + + package + + single + + + + + + org.apache.hugegraph.pd.cli.CliApplication + + + + + jar-with-dependencies + + + + + + + + \ No newline at end of file diff --git a/hg-pd-cli/src/main/java/org/apache/hugegraph/pd/cli/CliApplication.java b/hg-pd-cli/src/main/java/org/apache/hugegraph/pd/cli/CliApplication.java new file mode 100644 index 0000000000..33b0c50be3 --- /dev/null +++ b/hg-pd-cli/src/main/java/org/apache/hugegraph/pd/cli/CliApplication.java @@ -0,0 +1,40 @@ +package org.apache.hugegraph.pd.cli; + +import org.apache.hugegraph.pd.cli.cmd.ChangeRaft; +import org.apache.hugegraph.pd.cli.cmd.Command; +import org.apache.hugegraph.pd.cli.cmd.Config; +import org.apache.hugegraph.pd.cli.cmd.Parameter; + +import lombok.extern.slf4j.Slf4j; + +@Slf4j +public class CliApplication { + + public static void main(String[] args) { + try { + Parameter parameter = Command.toParameter(args); + Command command; + switch (parameter.getCmd()) { + case "config": + command = new Config(parameter.getPd()); + break; + case "change_raft": + command = new ChangeRaft(parameter.getPd()); + break; +// case "check_peers": +// command = new CheckPeers(parameter.getPd()); +// break; + default: + log.error("无效的指令"); + return; + } + command.action(parameter.getParams()); + } catch (Exception e) { + log.error("main thread error:", e); + System.exit(0); + } finally { + + } + + } +} diff --git a/hg-pd-cli/src/main/java/org/apache/hugegraph/pd/cli/cmd/ChangeRaft.java b/hg-pd-cli/src/main/java/org/apache/hugegraph/pd/cli/cmd/ChangeRaft.java new file mode 100644 index 0000000000..6c37f76594 --- /dev/null +++ b/hg-pd-cli/src/main/java/org/apache/hugegraph/pd/cli/cmd/ChangeRaft.java @@ -0,0 +1,19 @@ +package org.apache.hugegraph.pd.cli.cmd; + +import org.apache.hugegraph.pd.common.PDException; + +/** + * @author zhangyingjie + * @date 2023/10/17 + **/ +public class ChangeRaft extends Command { + + public ChangeRaft(String pd) { + super(pd); + } + + @Override + public void action(String[] params) throws PDException { + pdClient.updatePdRaft(params[0]); + } +} diff --git a/hg-pd-cli/src/main/java/org/apache/hugegraph/pd/cli/cmd/Command.java b/hg-pd-cli/src/main/java/org/apache/hugegraph/pd/cli/cmd/Command.java new file mode 100644 index 0000000000..a04fb3c00f --- /dev/null +++ b/hg-pd-cli/src/main/java/org/apache/hugegraph/pd/cli/cmd/Command.java @@ -0,0 +1,54 @@ +package org.apache.hugegraph.pd.cli.cmd; + +import org.apache.hugegraph.pd.client.PDClient; +import org.apache.hugegraph.pd.client.PDConfig; +import org.apache.hugegraph.pd.common.PDException; + +/** + * @author zhangyingjie + * @date 2023/10/17 + **/ +public abstract class Command { + + protected static String error = "启动参数: 命令, pd地址, 命令参数, 参数分隔符(非必须)"; + protected PDClient pdClient; + protected PDConfig config; + + public Command(String pd) { + config = PDConfig.of(pd).setAuthority("store", ""); + pdClient = PDClient.create(config); + } + + public static Parameter toParameter(String[] args) throws PDException { + if (args.length < 2) { + throw new PDException(-1, error); + } + Parameter parameter = new Parameter(); + parameter.setCmd(args[0]); + parameter.setPd(args[1]); + + if (args.length == 2) { + parameter.setParams(new String[0]); + return parameter; + } + + if (args.length == 4) { + // 之前的逻辑,存在一个分隔符,做兼容 + String t = args[3]; + if (t != null && !t.isEmpty() && args[2].contains(t)) { + parameter.setParams(args[2].split(t)); + parameter.setSeparator(t); + return parameter; + } + } + + // 剩余的部分放到 params中 + String[] params = new String[args.length - 2] ; + System.arraycopy(args, 2, params, 0, args.length - 2); + parameter.setParams(params); + + return parameter; + } + + public abstract void action(String[] params) throws Exception; +} diff --git a/hg-pd-cli/src/main/java/org/apache/hugegraph/pd/cli/cmd/Config.java b/hg-pd-cli/src/main/java/org/apache/hugegraph/pd/cli/cmd/Config.java new file mode 100644 index 0000000000..0e75ec9a07 --- /dev/null +++ b/hg-pd-cli/src/main/java/org/apache/hugegraph/pd/cli/cmd/Config.java @@ -0,0 +1,49 @@ +package org.apache.hugegraph.pd.cli.cmd; + +import org.apache.hugegraph.pd.common.PDException; +import org.apache.hugegraph.pd.grpc.Metapb; + +/** + * @author zhangyingjie + * @date 2023/10/17 + **/ +public class Config extends Command { + + public Config(String pd) { + super(pd); + } + + @Override + public void action(String[] params) throws PDException { + String param = params[0]; + String[] pair = param.split("="); + String key = pair[0].trim(); + Object value = null; + if (pair.length > 1) { + value = pair[1].trim(); + } + if (value == null) { + Metapb.PDConfig pdConfig = pdClient.getPDConfig(); + switch (key) { + case "enableBatchLoad": + // value = pdConfig.getEnableBatchLoad(); + break; + case "shardCount": + value = pdConfig.getShardCount(); + break; + } + + System.out.println("Get config " + key + "=" + value); + } else { + Metapb.PDConfig.Builder builder = Metapb.PDConfig.newBuilder(); + switch (key) { + case "enableBatchLoad": + // builder.setEnableBatchLoad(Boolean.valueOf((String)value)); + case "shardCount": + builder.setShardCount(Integer.valueOf((String) value)); + } + pdClient.setPDConfig(builder.build()); + System.out.println("Set config " + key + "=" + value); + } + } +} diff --git a/hg-pd-cli/src/main/java/org/apache/hugegraph/pd/cli/cmd/Parameter.java b/hg-pd-cli/src/main/java/org/apache/hugegraph/pd/cli/cmd/Parameter.java new file mode 100644 index 0000000000..acbeca2152 --- /dev/null +++ b/hg-pd-cli/src/main/java/org/apache/hugegraph/pd/cli/cmd/Parameter.java @@ -0,0 +1,15 @@ +package org.apache.hugegraph.pd.cli.cmd; + +import lombok.Data; + +/** + * @author zhangyingjie + * @date 2023/10/20 + **/ +@Data +public class Parameter { + String cmd; + String pd; + String[] params; + String separator; +} diff --git a/hg-pd-cli/src/main/resources/log4j2.xml b/hg-pd-cli/src/main/resources/log4j2.xml new file mode 100644 index 0000000000..9a045c7500 --- /dev/null +++ b/hg-pd-cli/src/main/resources/log4j2.xml @@ -0,0 +1,122 @@ + + + + + + logs + hugegraph-pd + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/hg-pd-cli/src/test/java/org/apache/hugegraph/pd/cli/CliTest.java b/hg-pd-cli/src/test/java/org/apache/hugegraph/pd/cli/CliTest.java new file mode 100644 index 0000000000..f7679c76ce --- /dev/null +++ b/hg-pd-cli/src/test/java/org/apache/hugegraph/pd/cli/CliTest.java @@ -0,0 +1,61 @@ +package org.apache.hugegraph.pd.cli; + +import org.apache.hugegraph.pd.common.PDException; +// import org.junit.Test; + +import java.util.Arrays; +import java.util.List; + +public class CliTest { + // @Test + public void getConfig() throws PDException { + CliApplication.main(new String[]{"127.0.0.1:8686", "config", "enableBatchLoad"}); + } + // @Test + public void setBatchTrue() throws PDException { + CliApplication.main(new String[]{"127.0.0.1:8686", "config", "enableBatchLoad= true "}); + } + + // @Test + public void setBatchFalse() throws PDException { + CliApplication.main(new String[]{"127.0.0.1:8686", "config", "enableBatchLoad=false"}); + } + + // @Test + public void getConfig2() throws PDException { + CliApplication.main(new String[]{"127.0.0.1:8686", "config", "shardCount"}); + } + // @Test + public void setShardCount1() throws PDException { + CliApplication.main(new String[]{"127.0.0.1:8686", "config", "shardCount=1"}); + } + + // @Test + public void setShardCount3() throws PDException { + CliApplication.main(new String[]{"127.0.0.1:8686", "config", "shardCount=3"}); + } + + // @Test + public void test2(){ + Integer[] a = new Integer[] { 1, 0, 3, 2}; + List aa = Arrays.asList(a); + System.out.printf(test2sup(aa, aa.size(),0)?"TRUE":"FALSE"); + } + public static boolean test2sup (List arrays, int tail, int res) { + System.out.println(String.format("%d %d", tail, res)); + if (tail == 0) { + System.out.println(String.format("a = %d %d", tail, res)); + return false; + } else if(tail == 1) { + System.out.println(String.format("b = %d %d", arrays.get(0), res)); + return (arrays.get(0) == res); + } else if(tail == 2) { + System.out.println(String.format("c = %d %d %d", arrays.get(0), arrays.get(1), res)); + return (arrays.get(0) + arrays.get(1) == Math.abs(res)) || + (Math.abs(arrays.get(0) - arrays.get(1)) == Math.abs(res)); + } else { + return test2sup(arrays, tail - 1, res + arrays.get(tail - 1)) || + test2sup(arrays, tail - 1, res - arrays.get(tail - 1)); + } + } +} diff --git a/hg-pd-client/src/main/java/org/apache/hugegraph/pd/client/BaseClient.java b/hg-pd-client/src/main/java/org/apache/hugegraph/pd/client/BaseClient.java new file mode 100644 index 0000000000..c75b1755d1 --- /dev/null +++ b/hg-pd-client/src/main/java/org/apache/hugegraph/pd/client/BaseClient.java @@ -0,0 +1,96 @@ +package org.apache.hugegraph.pd.client; + +import java.io.Closeable; +import java.util.function.Function; +import java.util.function.Predicate; + +import org.apache.hugegraph.pd.client.listener.LeaderChangeListener; +import org.apache.hugegraph.pd.client.rpc.AnyInvoker; +import org.apache.hugegraph.pd.client.rpc.ConnectionManager; +import org.apache.hugegraph.pd.client.rpc.ConnectionManagers; +import org.apache.hugegraph.pd.client.rpc.Invoker; +import org.apache.hugegraph.pd.client.rpc.LeaderInvoker; +import org.apache.hugegraph.pd.common.KVPair; +import org.apache.hugegraph.pd.common.PDException; +import org.apache.hugegraph.pd.grpc.common.ErrorType; +import org.apache.hugegraph.pd.grpc.common.Errors; +import org.apache.hugegraph.pd.grpc.common.RequestHeader; +import org.apache.hugegraph.pd.grpc.common.ResponseHeader; + +import io.grpc.Channel; +import io.grpc.MethodDescriptor; +import io.grpc.stub.AbstractBlockingStub; +import io.grpc.stub.AbstractStub; +import lombok.Getter; +import lombok.extern.slf4j.Slf4j; + +/** + * @author lynn.bond@hotmail.com on 2023/12/20 + */ +@Slf4j +public abstract class BaseClient implements Closeable, LeaderChangeListener { + + public static final ResponseHeader OK_HEADER = + ResponseHeader.newBuilder().setError(Errors.newBuilder().setType(ErrorType.OK)).build(); + protected final RequestHeader header = RequestHeader.getDefaultInstance(); + @Getter + private final PDConfig config; + @Getter + private final ConnectionManager cm; + @Getter + private final Invoker leaderInvoker; + @Getter + private final Invoker anyInvoker; + private final Function asCreator; + private final Function bsCreator; + + protected BaseClient(PDConfig pdConfig, Function asCreator, + Function bsCreator) { + this.config = pdConfig; + this.cm = ConnectionManagers.getInstance().add(pdConfig); + this.cm.addClient(this); + this.asCreator = asCreator; + this.bsCreator = bsCreator; + this.leaderInvoker = new LeaderInvoker(this.cm, asCreator, bsCreator); + this.anyInvoker = new AnyInvoker(this.cm, asCreator, bsCreator); + } + + public ResponseHeader createErrorHeader(int errorCode, String errorMsg) { + return ResponseHeader.newBuilder() + .setError(Errors.newBuilder().setTypeValue(errorCode).setMessage(errorMsg)) + .build(); + } + + public void handleErrors(ResponseHeader header) throws PDException { + Errors error = header.getError(); + if (header.hasError() && error.getType() != ErrorType.OK) { + throw new PDException(error.getTypeValue(), + String.format("PD request error, error code = %d, msg = %s", + Integer.valueOf(error.getTypeValue()), error.getMessage())); + } + } + + public String getLeaderAddress() { + return this.cm.getLeader(); + } + + protected RespT blockingUnaryCall(MethodDescriptor method, ReqT req) throws + PDException { + return this.leaderInvoker.blockingCall(method, req); + } + + protected RespT blockingUnaryCall(MethodDescriptor method, ReqT req, + long timeout) throws PDException { + return this.leaderInvoker.blockingCall(method, req, timeout); + } + + protected KVPair concurrentBlockingUnaryCall( + MethodDescriptor method, ReqT req, Predicate predicate) throws PDException { + RespT t = this.anyInvoker.blockingCall(method, req, predicate); + return new KVPair(Boolean.valueOf((t != null)), t); + } + + public void close() { + this.cm.removeClient(this); + } +} diff --git a/hg-pd-client/src/main/java/org/apache/hugegraph/pd/client/ClientCache.java b/hg-pd-client/src/main/java/org/apache/hugegraph/pd/client/ClientCache.java new file mode 100644 index 0000000000..7c2c769326 --- /dev/null +++ b/hg-pd-client/src/main/java/org/apache/hugegraph/pd/client/ClientCache.java @@ -0,0 +1,402 @@ +package org.apache.hugegraph.pd.client; + +import java.util.List; +import java.util.Map; +import java.util.Set; +import java.util.concurrent.ConcurrentHashMap; +import java.util.concurrent.atomic.AtomicBoolean; +import java.util.concurrent.locks.ReentrantReadWriteLock; +import java.util.stream.Collectors; + +import org.apache.hugegraph.pd.client.impl.PDApi; +import lombok.Setter; +import org.apache.commons.collections4.CollectionUtils; + +import org.apache.hugegraph.pd.client.rpc.ConnectionClient; +import org.apache.hugegraph.pd.common.GraphCache; +import org.apache.hugegraph.pd.common.KVPair; +import org.apache.hugegraph.pd.common.PDException; +import org.apache.hugegraph.pd.common.PartitionUtils; +import org.apache.hugegraph.pd.grpc.Metapb; +import org.apache.hugegraph.pd.grpc.Pdpb; +import org.apache.hugegraph.pd.grpc.watch.WatchResponse; +import org.apache.hugegraph.pd.grpc.watch.WatchShardGroupResponse; +import org.apache.hugegraph.pd.watch.NodeEvent; +import org.apache.hugegraph.pd.watch.PartitionEvent; +import org.apache.hugegraph.pd.watch.Watcher; +import com.google.common.collect.RangeMap; + +import lombok.Getter; +import lombok.extern.slf4j.Slf4j; + +import static org.apache.hugegraph.pd.watch.NodeEvent.EventType.NODE_OFFLINE; + +@Slf4j +public class ClientCache { + + private final Watcher watcher; + private volatile Map> groups; + private volatile Map stores; + private volatile Map caches = new ConcurrentHashMap<>(); + private AtomicBoolean initialized = new AtomicBoolean(false); + @Getter + private ConnectionClient client; + + @Setter + private PDApi pdApi; + + public ClientCache(ConnectionClient client, Watcher watcher) { + this.groups = new ConcurrentHashMap<>(); + this.stores = new ConcurrentHashMap<>(); + this.client = client; + this.watcher = watcher; + this.watcher.watchPartition(this::watchPartition); + this.watcher.watchShardGroup(this::watchShardGroup); + this.watcher.watchNode(this::watchNode); + } + + private void watchPartition(PartitionEvent response) { + invalidPartitionCache(response.getGraph(), response.getPartitionId()); + if (response.getChangeType() == PartitionEvent.ChangeType.DEL) { + removeAll(response.getGraph()); + } + } + + private void watchShardGroup(WatchResponse response) { + WatchShardGroupResponse shardResponse = response.getShardGroupResponse(); + switch (shardResponse.getType()) { + case WATCH_CHANGE_TYPE_DEL: + deleteShardGroup(shardResponse.getShardGroupId()); + break; + case WATCH_CHANGE_TYPE_ALTER: + case WATCH_CHANGE_TYPE_ADD: + updateShardGroup(response.getShardGroupResponse().getShardGroup()); + break; + } + } + + private void watchNode(NodeEvent response) { + if (response.getEventType() == NODE_OFFLINE) { + invalidStoreCache(response.getNodeId()); + } else { + // update store, 不更新缓存,会造成 getLeaderStoreAddresses的返回结果 + try { + pdApi.getStore(response.getNodeId()); + } catch (PDException e) { + log.error("getStore exception", e); + } + } + } + + private void invalidStoreCache(long storeId) { + removeStore(Long.valueOf(storeId)); + } + + private void invalidPartitionCache(String graphName, int partitionId) { + if (null != getPartitionById(graphName, partitionId)) { + removePartition(graphName, partitionId); + } + } + + private GraphCache getGraphCache(String graphName) { + GraphCache graph; + if ((graph = this.caches.get(graphName)) == null) { + synchronized (this.caches) { + if ((graph = this.caches.get(graphName)) == null) { + Metapb.Graph.Builder builder = Metapb.Graph.newBuilder().setGraphName(graphName); + Metapb.Graph g = builder.build(); + graph = new GraphCache(g); + this.caches.put(graphName, graph); + } + } + } + return graph; + } + + public KVPair getPartitionById(String graphName, int partId) { + try { + GraphCache graph = initGraph(graphName); + Metapb.Partition partition = graph.getPartition(partId); + if (partition == null || !this.groups.containsKey(partId)) { + return null; + } + Metapb.Shard shard = this.groups.get(Integer.valueOf(partId)).getValue(); + if (shard == null) { + return null; + } + return new KVPair(partition, shard); + } catch (Exception e) { + throw new RuntimeException(e); + } + } + + private KVPair getPair(int partId, GraphCache graph) { + Metapb.Partition p = graph.getPartition(Integer.valueOf(partId)); + KVPair pair = this.groups.get(Integer.valueOf(partId)); + if (p != null && pair != null) { + Metapb.Shard s = pair.getValue(); + if (s == null) { + pair.setValue(getLeader(partId)); + return new KVPair(p, pair.getValue()); + } + return new KVPair(p, s); + } + return null; + } + + public KVPair getPartitionByCode(String graphName, long code) { + try { + GraphCache graph = initGraph(graphName); + RangeMap range = graph.getRange(); + Integer pId = range.get(Long.valueOf(code)); + if (pId != null) { + return getPair(pId.intValue(), graph); + } + ReentrantReadWriteLock.ReadLock readLock = graph.getLock().readLock(); + try { + readLock.lock(); + pId = range.get(Long.valueOf(code)); + } catch (Exception e) { + log.info("get range with error:", e); + } finally { + readLock.unlock(); + } + if (pId == null) { + ReentrantReadWriteLock.WriteLock writeLock = graph.getLock().writeLock(); + try { + writeLock.lock(); + if ((pId = range.get(Long.valueOf(code))) == null) { + graph.reset(); + initGraph(graph); + pId = range.get(Long.valueOf(code)); + } + } catch (Exception e) { + log.info("reset with error:", e); + } finally { + writeLock.unlock(); + } + } + if (pId != null) { + return getPair(pId.intValue(), graph); + } + return null; + } catch (PDException e) { + throw new RuntimeException(e); + } + } + + private GraphCache initGraph(String graphName) throws PDException { + initCache(); + GraphCache graph = getGraphCache(graphName); + if (!graph.getInitialized().get()) { + synchronized (graph) { + if (!graph.getInitialized().get()) { + initGraph(graph); + graph.getInitialized().set(true); + } + } + } + return graph; + } + + private void initGraph(GraphCache graph) throws PDException { + Pdpb.CachePartitionResponse pc = this.client.getPartitionCache(graph.getGraph().getGraphName()); + List ps = pc.getPartitionsList(); + if (!CollectionUtils.isEmpty(ps)) { + graph.init(ps); + } + } + + private void initCache() throws PDException { + if (!this.initialized.get()) { + synchronized (this) { + if (!this.initialized.get()) { + Pdpb.CacheResponse cache = this.client.getClientCache(); + List shardGroups = cache.getShardsList(); + for (Metapb.ShardGroup s : shardGroups) { + this.groups.put(Integer.valueOf(s.getId()), new KVPair(s, getLeader(s))); + } + List stores = cache.getStoresList(); + for (Metapb.Store store : stores) { + this.stores.put(Long.valueOf(store.getId()), store); + } + List graphs = cache.getGraphsList(); + for (Metapb.Graph g : graphs) { + GraphCache c = new GraphCache(g); + this.caches.put(g.getGraphName(), c); + } + this.initialized.set(true); + } + } + } + } + + public KVPair getPartitionByKey(String graphName, byte[] key) { + int code = PartitionUtils.calcHashcode(key); + return getPartitionByCode(graphName, code); + } + + public boolean update(String graphName, int partId, Metapb.Partition partition) { + GraphCache graph = getGraphCache(graphName); + return graph.updatePartition(partition); + } + + public void removePartition(String graphName, int partId) { + GraphCache graph = getGraphCache(graphName); + graph.removePartition(Integer.valueOf(partId)); + } + + public void removePartitions() { + try { + this.groups.clear(); + this.stores.clear(); + this.caches.clear(); + this.initialized.set(false); + initCache(); + } catch (Exception e) { + throw new RuntimeException(e); + } + } + + private void removePartitions(GraphCache graph) { + try { + graph.removePartitions(); + initGraph(graph.getGraph().getGraphName()); + } catch (Exception e) { + log.warn("remove partitions with error:", e); + } finally { + } + } + + public void removeAll(String graphName) { + GraphCache graph = this.caches.get(graphName); + if (graph != null) { + removePartitions(graph); + } + } + + private StringBuffer getStack(StackTraceElement[] stackTrace) { + StringBuffer sb = new StringBuffer(); + for (int i = 0; i < stackTrace.length; i++) { + StackTraceElement element = stackTrace[i]; + sb.append(element.toString() + "\n"); + } + return sb; + } + + public boolean updateShardGroup(Metapb.ShardGroup shardGroup) { + KVPair old = this.groups.get(Integer.valueOf(shardGroup.getId())); + Metapb.Shard leader = getLeader(shardGroup); + if (old != null) { + old.setKey(shardGroup); + old.setValue(leader); + return false; + } + this.groups.put(Integer.valueOf(shardGroup.getId()), new KVPair(shardGroup, leader)); + return true; + } + + public void deleteShardGroup(int shardGroupId) { + this.groups.remove(Integer.valueOf(shardGroupId)); + } + + public Metapb.ShardGroup getShardGroup(int groupId) { + KVPair pair = this.groups.get(Integer.valueOf(groupId)); + if (pair != null) { + return pair.getKey(); + } + return null; + } + + public boolean addStore(Long storeId, Metapb.Store store) { + Metapb.Store oldStore = this.stores.get(storeId); + if (oldStore != null && oldStore.equals(store)) { + return false; + } + this.stores.put(storeId, store); + return true; + } + + public Metapb.Store getStoreById(Long storeId) { + return this.stores.get(storeId); + } + + public void removeStore(Long storeId) { + this.stores.remove(storeId); + } + + public void reset() { + this.groups = new ConcurrentHashMap<>(); + this.stores = new ConcurrentHashMap<>(); + this.caches = new ConcurrentHashMap<>(); + this.initialized.set(false); + } + + public Metapb.Shard getLeader(int partitionId) { + KVPair pair = this.groups.get(Integer.valueOf(partitionId)); + if (pair != null) { + if (pair.getValue() != null) { + return pair.getValue(); + } + for (Metapb.Shard shard : pair.getKey().getShardsList()) { + if (shard.getRole() == Metapb.ShardRole.Leader) { + pair.setValue(shard); + return shard; + } + } + } + return null; + } + + public Metapb.Shard getLeader(Metapb.ShardGroup shardGroup) { + if (shardGroup != null) { + for (Metapb.Shard shard : shardGroup.getShardsList()) { + if (shard.getRole() == Metapb.ShardRole.Leader) { + return shard; + } + } + } + return null; + } + + public void updateLeader(int partitionId, Metapb.Shard leader) { + KVPair pair = this.groups.get(partitionId); + if (pair != null && leader != null) { + Metapb.Shard l = pair.getValue(); + if (l == null || leader.getStoreId() != l.getStoreId()) { + Metapb.ShardGroup shardGroup = pair.getKey(); + synchronized (shardGroup) { + l = pair.getValue(); + if (l == null || leader.getStoreId() != l.getStoreId()) { + log.info("Change leader of partition {} from {} to {}", partitionId, l.getStoreId(), + leader.getStoreId()); + Metapb.ShardGroup.Builder builder = + Metapb.ShardGroup.newBuilder(shardGroup).clearShards(); + for (Metapb.Shard shard : shardGroup.getShardsList()) { + builder.addShards( + Metapb.Shard.newBuilder() + .setStoreId(shard.getStoreId()) + .setRole((shard.getStoreId() == leader.getStoreId()) ? + Metapb.ShardRole.Leader : Metapb.ShardRole.Follower) + .build()); + } + pair.setKey(builder.build()); + pair.setValue(leader); + } + } + } + } + } + + public List getLeaderStoreAddresses() throws PDException { + initCache(); + Set storeIds = + this.groups.values().stream().map(shardGroupShardKVPair -> shardGroupShardKVPair.getValue() + .getStoreId()) + .collect(Collectors.toSet()); + return this.stores.values().stream() + .filter(store -> storeIds.contains(Long.valueOf(store.getId()))) + .map(Metapb.Store::getAddress) + .collect(Collectors.toList()); + } +} diff --git a/hg-pd-client/src/main/java/org/apache/hugegraph/pd/client/Discoverable.java b/hg-pd-client/src/main/java/org/apache/hugegraph/pd/client/Discoverable.java new file mode 100644 index 0000000000..0f1ca08a2b --- /dev/null +++ b/hg-pd-client/src/main/java/org/apache/hugegraph/pd/client/Discoverable.java @@ -0,0 +1,18 @@ +package org.apache.hugegraph.pd.client; + +import org.apache.hugegraph.pd.grpc.discovery.NodeInfos; +import org.apache.hugegraph.pd.grpc.discovery.Query; + +import java.util.Map; + +/** + * @author zhangyingjie + * @date 2021/12/20 + **/ +public interface Discoverable { + + NodeInfos getNodeInfos(Query query); + + void scheduleTask(); + void cancelTask(); +} diff --git a/hg-pd-client/src/main/java/org/apache/hugegraph/pd/client/DiscoveryClient.java b/hg-pd-client/src/main/java/org/apache/hugegraph/pd/client/DiscoveryClient.java new file mode 100644 index 0000000000..9d84a8fcc9 --- /dev/null +++ b/hg-pd-client/src/main/java/org/apache/hugegraph/pd/client/DiscoveryClient.java @@ -0,0 +1,121 @@ +package org.apache.hugegraph.pd.client; + +import java.io.Closeable; +import java.util.Timer; +import java.util.TimerTask; +import java.util.concurrent.TimeUnit; +import java.util.concurrent.locks.ReentrantReadWriteLock; +import java.util.function.Consumer; + +import org.apache.hugegraph.pd.grpc.discovery.DiscoveryServiceGrpc; +import org.apache.hugegraph.pd.grpc.discovery.NodeInfo; +import org.apache.hugegraph.pd.grpc.discovery.NodeInfos; +import org.apache.hugegraph.pd.grpc.discovery.Query; +import org.apache.hugegraph.pd.grpc.discovery.RegisterInfo; + +import lombok.extern.slf4j.Slf4j; + +/** + * @author zhangyingjie + * @date 2021/12/20 + **/ +@Slf4j +public abstract class DiscoveryClient extends BaseClient implements Closeable, Discoverable { + + protected int period; //心跳周期 + ReentrantReadWriteLock readWriteLock = new ReentrantReadWriteLock(); + private Timer timer = new Timer("hg-pd-c-serverHeartbeat", true); + private long registerTimeout = 30000; + private long lockTimeout = 5; + private TimerTask task = new TimerTask() { + @Override + public void run() { + boolean locked = false; + try { + locked = readWriteLock.readLock().tryLock(lockTimeout, TimeUnit.SECONDS); + if (locked) { + NodeInfo nodeInfo = getRegisterNode(); + RegisterInfo register; + register = getLeaderInvoker().blockingCall(DiscoveryServiceGrpc.getRegisterMethod(), + nodeInfo, registerTimeout); + Consumer consumer = getRegisterConsumer(); + if (consumer != null) { + try { + consumer.accept(register); + } catch (Exception e) { + log.warn("run consumer when heartbeat with error:", e); + } + } + } + } catch (Exception e) { + log.error("register with error:", e); + } finally { + if (locked) { + readWriteLock.readLock().unlock(); + } + } + } + }; + + public DiscoveryClient(int delay, PDConfig conf) { + super(conf, DiscoveryServiceGrpc::newStub, DiscoveryServiceGrpc::newBlockingStub); + this.period = delay; + if (this.period > 60000) { + this.registerTimeout = this.period / 2; + } + } + + + /*** + * 获取注册节点信息 + * @param query + * @return + */ + @Override + public NodeInfos getNodeInfos(Query query) { + this.readWriteLock.readLock().lock(); + NodeInfos nodes = null; + try { + nodes = getLeaderInvoker().blockingCall(DiscoveryServiceGrpc.getGetNodesMethod(), query); + } catch (Exception e) { + log.error("Failed to invoke [ getNodeInfos ], query: {} ", query, e); + } finally { + this.readWriteLock.readLock().unlock(); + } + return nodes; + } + + /*** + * 启动心跳任务 + */ + @Override + public void scheduleTask() { + timer.scheduleAtFixedRate(task, 0, period); + } + + abstract NodeInfo getRegisterNode(); + + abstract Consumer getRegisterConsumer(); + + @Override + public void cancelTask() { + this.timer.cancel(); + } + + @Override + public void onLeaderChanged(String leader) { + } + + @Override + public void close() { + this.timer.cancel(); + readWriteLock.writeLock().lock(); + try { + super.close(); + } catch (Exception e) { + log.info("Close channel with error : {}.", e); + } finally { + readWriteLock.writeLock().unlock(); + } + } +} diff --git a/hg-pd-client/src/main/java/org/apache/hugegraph/pd/client/DiscoveryClientImpl.java b/hg-pd-client/src/main/java/org/apache/hugegraph/pd/client/DiscoveryClientImpl.java new file mode 100644 index 0000000000..89f7f69ea3 --- /dev/null +++ b/hg-pd-client/src/main/java/org/apache/hugegraph/pd/client/DiscoveryClientImpl.java @@ -0,0 +1,179 @@ +<<<<<<<< HEAD:hugegraph-pd/hg-pd-client/src/main/java/org/apache/hugegraph/pd/client/DiscoveryClientImpl.java +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +======== +>>>>>>>> d7e3d51dd (3.6.5 -> 4.x diff):hg-pd-client/src/main/java/org/apache/hugegraph/pd/client/DiscoveryClientImpl.java +package org.apache.hugegraph.pd.client; + +import java.util.Map; +import java.util.function.Consumer; + +<<<<<<<< HEAD:hugegraph-pd/hg-pd-client/src/main/java/org/apache/hugegraph/pd/client/DiscoveryClientImpl.java +import org.apache.hugegraph.pd.common.Useless; +import org.apache.hugegraph.pd.grpc.discovery.NodeInfo; +import org.apache.hugegraph.pd.grpc.discovery.RegisterType; + +@Useless("discovery related") +public class DiscoveryClientImpl extends DiscoveryClient { + + private final String id; + private final RegisterType type; + private final String version; + private final String appName; + private final int times; + private final String address; + private final Map labels; + private final Consumer registerConsumer; +======== +import org.apache.hugegraph.pd.grpc.discovery.NodeInfo; +import org.apache.hugegraph.pd.grpc.discovery.RegisterType; + +/** + * @author zhangyingjie + * @date 2021/12/20 + **/ +public class DiscoveryClientImpl extends DiscoveryClient { + + private volatile String id ; + private RegisterType type; // 心跳类型,备用 + private String version; + private String appName; + private int times; // 心跳过期次数,备用 + private String address; + private Map labels; + private Consumer registerConsumer; + private PDConfig conf; + +>>>>>>>> d7e3d51dd (3.6.5 -> 4.x diff):hg-pd-client/src/main/java/org/apache/hugegraph/pd/client/DiscoveryClientImpl.java + + private DiscoveryClientImpl(Builder builder) { + super(builder.delay, builder.conf); + period = builder.delay; + id = builder.id; + type = builder.type; + version = builder.version; + appName = builder.appName; + times = builder.times; + address = builder.address; + labels = builder.labels; + registerConsumer = builder.registerConsumer; + } + + public static Builder newBuilder() { + return new Builder(); + } + + @Override + NodeInfo getRegisterNode() { + return NodeInfo.newBuilder().setAddress(this.address) + .setVersion(this.version) + .setAppName(this.appName).setInterval(this.period) + .setId(this.id).putAllLabels(labels).build(); + } + + @Override + Consumer getRegisterConsumer() { + return registerConsumer; + } + +<<<<<<<< HEAD:hugegraph-pd/hg-pd-client/src/main/java/org/apache/hugegraph/pd/client/DiscoveryClientImpl.java +======== + @Override + public void onLeaderChanged(String leaderAddress) { + + } + + +>>>>>>>> d7e3d51dd (3.6.5 -> 4.x diff):hg-pd-client/src/main/java/org/apache/hugegraph/pd/client/DiscoveryClientImpl.java + public static final class Builder { + + private int delay; + private String centerAddress; + private String id; + private RegisterType type; + private String address; + private Map labels; + private String version; + private String appName; + private int times; + private Consumer registerConsumer; + private PDConfig conf; + + private Builder() { + } + + public Builder setDelay(int val) { + delay = val; + return this; + } + + public Builder setCenterAddress(String val) { + centerAddress = val; + return this; + } + + public Builder setId(String val) { + id = val; + return this; + } + + public Builder setType(RegisterType val) { + type = val; + return this; + } + + public Builder setAddress(String val) { + address = val; + return this; + } + + public Builder setLabels(Map val) { + labels = val; + return this; + } + + public Builder setVersion(String val) { + version = val; + return this; + } + + public Builder setAppName(String val) { + appName = val; + return this; + } + + public Builder setTimes(int val) { + times = val; + return this; + } + + public Builder setPdConfig(PDConfig val) { + this.conf = val; + return this; + } + + public Builder setRegisterConsumer(Consumer registerConsumer) { + this.registerConsumer = registerConsumer; + return this; + } + + public DiscoveryClientImpl build() { + return new DiscoveryClientImpl(this); + } + } +} diff --git a/hg-pd-client/src/main/java/org/apache/hugegraph/pd/client/KvClient.java b/hg-pd-client/src/main/java/org/apache/hugegraph/pd/client/KvClient.java new file mode 100644 index 0000000000..3a225b44e0 --- /dev/null +++ b/hg-pd-client/src/main/java/org/apache/hugegraph/pd/client/KvClient.java @@ -0,0 +1,314 @@ +package org.apache.hugegraph.pd.client; + +import java.io.Closeable; +import java.util.HashMap; +import java.util.LinkedList; +import java.util.List; +import java.util.Map; +import java.util.concurrent.ConcurrentHashMap; +import java.util.concurrent.ConcurrentMap; +import java.util.concurrent.Semaphore; +import java.util.concurrent.atomic.AtomicBoolean; +import java.util.concurrent.atomic.AtomicLong; +import java.util.function.Consumer; + +import org.apache.hugegraph.pd.client.impl.StreamDelegator; +import org.apache.hugegraph.pd.common.PDException; +import org.apache.hugegraph.pd.grpc.common.ErrorType; +import org.apache.hugegraph.pd.grpc.kv.K; +import org.apache.hugegraph.pd.grpc.kv.KResponse; +import org.apache.hugegraph.pd.grpc.kv.Kv; +import org.apache.hugegraph.pd.grpc.kv.KvResponse; +import org.apache.hugegraph.pd.grpc.kv.KvServiceGrpc; +import org.apache.hugegraph.pd.grpc.kv.LockRequest; +import org.apache.hugegraph.pd.grpc.kv.LockResponse; +import org.apache.hugegraph.pd.grpc.kv.ScanPrefixResponse; +import org.apache.hugegraph.pd.grpc.kv.TTLRequest; +import org.apache.hugegraph.pd.grpc.kv.TTLResponse; +import org.apache.hugegraph.pd.grpc.kv.WatchEvent; +import org.apache.hugegraph.pd.grpc.kv.WatchKv; +import org.apache.hugegraph.pd.grpc.kv.WatchRequest; +import org.apache.hugegraph.pd.grpc.kv.WatchResponse; +import org.apache.hugegraph.pd.grpc.kv.WatchType; + +import io.grpc.MethodDescriptor; +import lombok.extern.slf4j.Slf4j; + +/** + * @author zhangyingjie + * @date 2022/6/20 + **/ +@Slf4j +public class KvClient extends BaseClient implements Closeable { + + private static String keyListenPrefix = "K-"; + private static String prefixListenPrefix = "P-"; + private static String delegatorPrefix = "KV-"; + private AtomicLong clientId = new AtomicLong(0L); + private Semaphore semaphore = new Semaphore(1); + private AtomicBoolean closed = new AtomicBoolean(false); + private ConcurrentMap delegators = new ConcurrentHashMap<>(); + + public KvClient(PDConfig pdConfig) { + super(pdConfig, KvServiceGrpc::newStub, KvServiceGrpc::newBlockingStub); + } + + public KvResponse put(String key, String value) throws PDException { + Kv kv = Kv.newBuilder().setKey(key).setValue(value).build(); + KvResponse response = blockingUnaryCall(KvServiceGrpc.getPutMethod(), kv); + handleErrors(response.getHeader()); + return response; + } + + public KResponse get(String key) throws PDException { + K k = K.newBuilder().setKey(key).build(); + KResponse response = blockingUnaryCall(KvServiceGrpc.getGetMethod(), k); + handleErrors(response.getHeader()); + return response; + } + + public KvResponse delete(String key) throws PDException { + K k = K.newBuilder().setKey(key).build(); + KvResponse response = blockingUnaryCall(KvServiceGrpc.getDeleteMethod(), k); + handleErrors(response.getHeader()); + return response; + } + + public KvResponse deletePrefix(String prefix) throws PDException { + K k = K.newBuilder().setKey(prefix).build(); + KvResponse response = blockingUnaryCall(KvServiceGrpc.getDeletePrefixMethod(), k); + handleErrors(response.getHeader()); + return response; + } + + public ScanPrefixResponse scanPrefix(String prefix) throws PDException { + K k = K.newBuilder().setKey(prefix).build(); + ScanPrefixResponse response = blockingUnaryCall(KvServiceGrpc.getScanPrefixMethod(), k); + handleErrors(response.getHeader()); + return response; + } + + public TTLResponse keepTTLAlive(String key) throws PDException { + TTLRequest request = TTLRequest.newBuilder().setKey(key).build(); + TTLResponse response = blockingUnaryCall(KvServiceGrpc.getKeepTTLAliveMethod(), request); + handleErrors(response.getHeader()); + return response; + } + + public TTLResponse putTTL(String key, String value, long ttl) throws PDException { + TTLRequest request = TTLRequest.newBuilder().setKey(key).setValue(value).setTtl(ttl).build(); + TTLResponse response = blockingUnaryCall(KvServiceGrpc.getPutTTLMethod(), request); + handleErrors(response.getHeader()); + return response; + } + + private void onEvent(WatchResponse value, Consumer consumer) { + // log.info("receive message for {},event Count:{}", value.getState(), value.getEventsCount()); + this.clientId.compareAndSet(0L, value.getClientId()); + if (value.getEventsCount() != 0) { + try { + consumer.accept((T) value); + } catch (Exception e) { + log.info( + "an error occurred while executing the client callback method, which should not " + + "have happened.Please check the callback method of the client", + e); + } + } + } + + public void listen(String key, Consumer consumer) throws PDException { + acquire(); + try { + StreamDelegator delegator = createDelegator(keyListenPrefix + key, + KvServiceGrpc.getWatchMethod()); + delegator.listen(getWatchRequest(key), getStreamDataHandler(key, consumer)); + } catch (Exception e) { + release(); + throw new PDException(ErrorType.PD_UNAVAILABLE, e); + } + } + + public void listenPrefix(String prefix, Consumer consumer) throws PDException { + acquire(); + try { + StreamDelegator delegator = createDelegator(prefixListenPrefix + prefix, + KvServiceGrpc.getWatchPrefixMethod()); + delegator.listen(getWatchRequest(prefix), getStreamDataHandler(prefix, consumer)); + } catch (Exception e) { + release(); + throw new PDException(ErrorType.PD_UNAVAILABLE, e); + } + } + + private void acquire() { + if (this.clientId.get() == 0L) { + try { + this.semaphore.acquire(); + if (this.clientId.get() != 0L) { + this.semaphore.release(); + } else { + log.info("wait for client starting...."); + } + } catch (Exception e) { + log.error("get semaphore with error:", e); + } + } + } + + private void release() { + try { + if (this.semaphore.availablePermits() == 0) { + this.semaphore.release(); + log.info("listen finished"); + } + } catch (Exception e) { + log.warn("release failed:", e); + } + } + + public List getWatchList(T response) { + List values = new LinkedList<>(); + List eventsList = response.getEventsList(); + for (WatchEvent event : eventsList) { + if (event.getType() != WatchType.Put) { + return null; + } + String value = event.getCurrent().getValue(); + values.add(value); + } + return values; + } + + public Map getWatchMap(T response) { + Map values = new HashMap<>(); + List eventsList = response.getEventsList(); + for (WatchEvent event : eventsList) { + if (event.getType() != WatchType.Put) { + return null; + } + WatchKv current = event.getCurrent(); + String key = current.getKey(); + String value = current.getValue(); + values.put(key, value); + } + return values; + } + + public LockResponse lock(String key, long ttl) throws PDException { + LockResponse response; + acquire(); + try { + LockRequest k = + LockRequest.newBuilder().setKey(key).setClientId(this.clientId.get()).setTtl(ttl).build(); + response = blockingUnaryCall(KvServiceGrpc.getLockMethod(), k); + handleErrors(response.getHeader()); + this.clientId.compareAndSet(0L, response.getClientId()); + } catch (Exception e) { + throw e; + } finally { + release(); + } + return response; + } + + public LockResponse lockWithoutReentrant(String key, long ttl) throws PDException { + LockResponse response; + acquire(); + try { + LockRequest k = + LockRequest.newBuilder().setKey(key).setClientId(this.clientId.get()).setTtl(ttl).build(); + response = blockingUnaryCall(KvServiceGrpc.getLockWithoutReentrantMethod(), k); + handleErrors(response.getHeader()); + this.clientId.compareAndSet(0L, response.getClientId()); + } catch (Exception e) { + throw e; + } finally { + release(); + } + return response; + } + + public LockResponse isLocked(String key) throws PDException { + LockRequest k = LockRequest.newBuilder().setKey(key).setClientId(this.clientId.get()).build(); + LockResponse response = blockingUnaryCall(KvServiceGrpc.getIsLockedMethod(), k); + handleErrors(response.getHeader()); + return response; + } + + public LockResponse unlock(String key) throws PDException { + assert this.clientId.get() != 0L; + LockRequest k = LockRequest.newBuilder().setKey(key).setClientId(this.clientId.get()).build(); + LockResponse response = blockingUnaryCall(KvServiceGrpc.getUnlockMethod(), k); + handleErrors(response.getHeader()); + this.clientId.compareAndSet(0L, response.getClientId()); + assert this.clientId.get() == response.getClientId(); + return response; + } + + public LockResponse keepAlive(String key) throws PDException { + assert this.clientId.get() != 0L; + LockRequest k = LockRequest.newBuilder().setKey(key).setClientId(this.clientId.get()).build(); + LockResponse response = blockingUnaryCall(KvServiceGrpc.getKeepAliveMethod(), k); + handleErrors(response.getHeader()); + this.clientId.compareAndSet(0L, response.getClientId()); + assert this.clientId.get() == response.getClientId(); + return response; + } + + public void close() { + this.delegators.entrySet().forEach(d -> d.getValue().close()); + this.delegators.clear(); + this.closed.set(true); + super.close(); + } + + private Consumer getStreamDataHandler(String key, Consumer consumer) { + return value -> { + boolean b; + switch (value.getState()) { + case Starting: + b = this.clientId.compareAndSet(0L, value.getClientId()); + if (b) { + log.info("set watch client id to :{}", Long.valueOf(value.getClientId())); + } + release(); + break; + case Started: + onEvent(value, consumer); + break; + case Leader_Changed: + this.clientId.set(0L); + release(); + onLeaderChanged(""); + break; + } + }; + } + + private void onDelegatorError(Throwable t) { + release(); + if (!this.closed.get()) { + this.clientId.set(0L); + } + } + + private WatchRequest getWatchRequest(String key) { + return WatchRequest.newBuilder().setClientId(this.clientId.get()).setKey(key).build(); + } + + private StreamDelegator createDelegator(String name, + MethodDescriptor methodDesc) { + StreamDelegator delegator = + new StreamDelegator(delegatorPrefix + name, getLeaderInvoker(), methodDesc); + this.delegators.put(delegator.getName(), delegator); + return delegator; + } + + public void onLeaderChanged(String leader) { + if (this.closed.get()) { + return; + } + this.delegators.entrySet().parallelStream().forEach(e -> e.getValue().reconnect()); + } +} diff --git a/hg-pd-client/src/main/java/org/apache/hugegraph/pd/client/LicenseClient.java b/hg-pd-client/src/main/java/org/apache/hugegraph/pd/client/LicenseClient.java new file mode 100644 index 0000000000..710e985604 --- /dev/null +++ b/hg-pd-client/src/main/java/org/apache/hugegraph/pd/client/LicenseClient.java @@ -0,0 +1,82 @@ +<<<<<<<< HEAD:hugegraph-pd/hg-pd-client/src/main/java/org/apache/hugegraph/pd/client/LicenseClient.java +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hugegraph.pd.client; + +import org.apache.hugegraph.pd.common.KVPair; +import org.apache.hugegraph.pd.common.Useless; +import org.apache.hugegraph.pd.grpc.PDGrpc; +import org.apache.hugegraph.pd.grpc.Pdpb; + +import com.google.protobuf.ByteString; + +import io.grpc.stub.AbstractBlockingStub; +import io.grpc.stub.AbstractStub; +======== +package org.apache.hugegraph.pd.client; + +import org.apache.hugegraph.pd.common.KVPair; +import org.apache.hugegraph.pd.grpc.PDGrpc; +import org.apache.hugegraph.pd.grpc.Pdpb; +import org.apache.hugegraph.pd.grpc.common.ErrorType; +import org.apache.hugegraph.pd.grpc.common.ResponseHeader; +import com.google.protobuf.ByteString; + +>>>>>>>> d7e3d51dd (3.6.5 -> 4.x diff):hg-pd-client/src/main/java/org/apache/hugegraph/pd/client/LicenseClient.java +import lombok.extern.slf4j.Slf4j; + +@Useless("license related") +@Slf4j +public class LicenseClient extends BaseClient { + + public LicenseClient(PDConfig config) { + super(config, PDGrpc::newStub, PDGrpc::newBlockingStub); + } + + public Pdpb.PutLicenseResponse putLicense(byte[] content) { + Pdpb.PutLicenseRequest request = Pdpb.PutLicenseRequest.newBuilder() + .setContent( + ByteString.copyFrom(content)) + .build(); + try { + KVPair pair = concurrentBlockingUnaryCall( + PDGrpc.getPutLicenseMethod(), request, + (rs) -> rs.getHeader().getError().getType().equals(ErrorType.OK)); + if (pair.getKey()) { + Pdpb.PutLicenseResponse.Builder builder = Pdpb.PutLicenseResponse.newBuilder(); + builder.setHeader(OK_HEADER); + return builder.build(); + } else { + return pair.getValue(); + } + } catch (Exception e) { + e.printStackTrace(); + log.debug("put license with error:{} ", e); +<<<<<<<< HEAD:hugegraph-pd/hg-pd-client/src/main/java/org/apache/hugegraph/pd/client/LicenseClient.java + Pdpb.ResponseHeader rh = + newErrorHeader(Pdpb.ErrorType.LICENSE_ERROR_VALUE, e.getMessage()); +======== + ResponseHeader rh = createErrorHeader(ErrorType.LICENSE_ERROR_VALUE, e.getMessage()); +>>>>>>>> d7e3d51dd (3.6.5 -> 4.x diff):hg-pd-client/src/main/java/org/apache/hugegraph/pd/client/LicenseClient.java + return Pdpb.PutLicenseResponse.newBuilder().setHeader(rh).build(); + } + } + + public void onLeaderChanged(String leader) { + } +} diff --git a/hg-pd-client/src/main/java/org/apache/hugegraph/pd/client/MetaClient.java b/hg-pd-client/src/main/java/org/apache/hugegraph/pd/client/MetaClient.java new file mode 100644 index 0000000000..2144260ee2 --- /dev/null +++ b/hg-pd-client/src/main/java/org/apache/hugegraph/pd/client/MetaClient.java @@ -0,0 +1,139 @@ +package org.apache.hugegraph.pd.client; + +import org.apache.hugegraph.pd.common.PDException; +import org.apache.hugegraph.pd.grpc.GraphSpaces; +import org.apache.hugegraph.pd.grpc.Graphs; +import org.apache.hugegraph.pd.grpc.MetaServiceGrpc; +import org.apache.hugegraph.pd.grpc.Metapb.Graph; +import org.apache.hugegraph.pd.grpc.Metapb.GraphSpace; +import org.apache.hugegraph.pd.grpc.Metapb.Partition; +import org.apache.hugegraph.pd.grpc.Metapb.ShardGroup; +import org.apache.hugegraph.pd.grpc.Metapb.Store; +import org.apache.hugegraph.pd.grpc.Partitions; +import org.apache.hugegraph.pd.grpc.ShardGroups; +import org.apache.hugegraph.pd.grpc.Stores; +import org.apache.hugegraph.pd.grpc.common.NoArg; +import org.apache.hugegraph.pd.grpc.common.VoidResponse; + +import java.io.Closeable; + +import static org.apache.hugegraph.pd.grpc.MetaServiceGrpc.getGetGraphSpacesMethod; +import static org.apache.hugegraph.pd.grpc.MetaServiceGrpc.getGetGraphsMethod; +import static org.apache.hugegraph.pd.grpc.MetaServiceGrpc.getGetPartitionsMethod; +import static org.apache.hugegraph.pd.grpc.MetaServiceGrpc.getGetShardGroupsMethod; +import static org.apache.hugegraph.pd.grpc.MetaServiceGrpc.getGetStoresMethod; + +/** + * @author zhangyingjie + * @date 2023/9/19 + **/ +public class MetaClient extends BaseClient implements Closeable { + + + public MetaClient(PDConfig config) { + super(config, MetaServiceGrpc::newStub, MetaServiceGrpc::newBlockingStub); + } + + /* @Override + protected AbstractStub createStub() { + return MetaServiceGrpc.newStub(channel); + } + + @Override + protected AbstractBlockingStub createBlockingStub() { + return MetaServiceGrpc.newBlockingStub(channel); + }*/ + + /** + * + */ + public Stores getStores() throws PDException { + Stores res = blockingUnaryCall(getGetStoresMethod(), NoArg.newBuilder().build()); + handleErrors(res.getHeader()); + return res; + } + + /** + * + */ + public Partitions getPartitions() throws PDException { + Partitions res = blockingUnaryCall(getGetPartitionsMethod(), NoArg.newBuilder().build()); + handleErrors(res.getHeader()); + return res; + } + + /** + * + */ + public ShardGroups getShardGroups() throws PDException { + ShardGroups res = blockingUnaryCall(getGetShardGroupsMethod(), NoArg.newBuilder().build()); + handleErrors(res.getHeader()); + return res; + } + + + /** + * + */ + public GraphSpaces getGraphSpaces() throws PDException { + GraphSpaces res = blockingUnaryCall(getGetGraphSpacesMethod(), NoArg.newBuilder().build()); + handleErrors(res.getHeader()); + return res; + } + + /** + * + */ + public Graphs getGraphs() throws PDException { + Graphs res = blockingUnaryCall(getGetGraphsMethod(), NoArg.newBuilder().build()); + handleErrors(res.getHeader()); + return res; + } + + /** + * + */ + public void updateStore(Store request) throws PDException { + VoidResponse res = blockingUnaryCall(MetaServiceGrpc.getUpdateStoreMethod(), request); + handleErrors(res.getHeader()); + } + + /** + * + */ + public void updatePartition(Partition request) throws PDException { + VoidResponse res = blockingUnaryCall(MetaServiceGrpc.getUpdatePartitionMethod(), request); + handleErrors(res.getHeader()); + } + + /** + * + */ + public void updateShardGroup(ShardGroup request) throws PDException { + VoidResponse res = blockingUnaryCall(MetaServiceGrpc.getUpdateShardGroupMethod(), request); + handleErrors(res.getHeader()); + } + + /** + * + */ + public void updateGraphSpace(GraphSpace request) throws PDException { + VoidResponse res = blockingUnaryCall(MetaServiceGrpc.getUpdateGraphSpaceMethod(), request); + handleErrors(res.getHeader()); + } + + /** + * + */ + public void updateGraph(Graph request) throws PDException { + VoidResponse res = blockingUnaryCall(MetaServiceGrpc.getUpdateGraphMethod(), request); + handleErrors(res.getHeader()); + } + + @Override + public void close() { + super.close(); + } + + public void onLeaderChanged(String leader) {} +} diff --git a/hg-pd-client/src/main/java/org/apache/hugegraph/pd/client/PDClient.java b/hg-pd-client/src/main/java/org/apache/hugegraph/pd/client/PDClient.java new file mode 100644 index 0000000000..82927f9102 --- /dev/null +++ b/hg-pd-client/src/main/java/org/apache/hugegraph/pd/client/PDClient.java @@ -0,0 +1,685 @@ +package org.apache.hugegraph.pd.client; + +import static org.apache.hugegraph.pd.common.Consts.DEFAULT_STORE_GROUP_ID; + +import java.util.List; + +import org.apache.hugegraph.pd.client.impl.PDApi; +import org.apache.hugegraph.pd.client.listener.PDEventListener; +import org.apache.hugegraph.pd.client.rpc.ConnectionManager; +import org.apache.hugegraph.pd.common.KVPair; +import org.apache.hugegraph.pd.common.PDException; +import org.apache.hugegraph.pd.common.PartitionUtils; +import org.apache.hugegraph.pd.grpc.ClusterOp; +import org.apache.hugegraph.pd.grpc.MetaTask; +import org.apache.hugegraph.pd.grpc.Metapb; +import org.apache.hugegraph.pd.grpc.Metapb.Partition; +import org.apache.hugegraph.pd.grpc.Metapb.Shard; +import org.apache.hugegraph.pd.grpc.Metapb.ShardGroup; +import org.apache.hugegraph.pd.grpc.PDGrpc; +import org.apache.hugegraph.pd.grpc.Pdpb; +import org.apache.hugegraph.pd.grpc.Pdpb.CachePartitionResponse; +import org.apache.hugegraph.pd.grpc.Pdpb.CacheResponse; +import org.apache.hugegraph.pd.grpc.Pdpb.GraphStatsResponse; +import org.apache.hugegraph.pd.pulse.Pulse; +import org.apache.hugegraph.pd.watch.PDEventRaiser; +import org.apache.hugegraph.pd.watch.Watcher; + +import io.grpc.ManagedChannel; +import lombok.Getter; +import lombok.extern.slf4j.Slf4j; + +/** + * PD客户端实现类 + * + * @author yanjinbing + */ +@Slf4j +public class PDClient extends BaseClient { + + private final PDConfig config; + @Getter + private final ClientCache cache; + private final PulseClient pulse; + private final PDEventRaiser events; + @Getter + private final Watcher pdWatch; + private final PDApi pdApi; + private final ConnectionManager cm; + + PDClient(PDConfig config) { + super(config, PDGrpc::newStub, PDGrpc::newBlockingStub); + this.config = config; + this.cm = getCm(); + this.pulse = this.cm.getPulseClient(); + this.pdWatch = this.cm.getWatcher(); + this.cache = this.cm.getCache(); + this.pdApi = new PDApi(this, this.cache); + this.cache.setPdApi(this.pdApi); + this.events = new PDEventRaiser(this.pdWatch); + } + + /** + * 创建PDClient对象,并初始化stub + * + * @param config + * @return + */ + public static PDClient create(PDConfig config) { + PDClient client = new PDClient(config); + return client; + } + + + @Deprecated + public static void setChannel(ManagedChannel mc) { + log.warn("[PDClient] Invoking a deprecated method [ PDClient::setChannel ]."); + } + + /** + * Return the local PD config. + * + * @return + */ + public PDConfig getClientConfig() { + return this.config; + } + + + /** + * Return the PD pulse client. + * + * @return + */ + public Pulse getPulse() { + return this.pulse; + } + + public Pulse getPulse(long storeId) { + this.pulse.setObserverId(storeId); + return this.pulse; + } + + /** + * Force a reconnection to the PD leader, regardless of whether the current connection is alive or not. + */ + public void forceReconnect() { + getCm().reconnect(); + } + + /** + * Begin watching with the leader address. + * + * @param leader + */ + @Deprecated + public void startWatch(String leader) { + log.warn("[PDClient] Invoking a deprecated method [ PDClient::startWatch ],"); + } + + public String getLeaderIp() { + return getCm().getLeader(); + } + + /** + * Store注册,返回storeID,初次注册会返回新ID + * + * @param store + * @return + */ + public long registerStore(Metapb.Store store) throws PDException { + return this.pdApi.registerStore(store); + } + + /** + * 根据storeId返回Store对象 + * + * @param storeId + * @return + * @throws PDException + */ + public Metapb.Store getStore(long storeId) throws PDException { + return this.pdApi.getStore(storeId); + } + + /** + * 更新Store信息,包括上下线等 + * + * @param store + * @return + */ + public Metapb.Store updateStore(Metapb.Store store) throws PDException { + return this.pdApi.updateStore(store); + } + + /** + * 返回活跃的Store + * + * @param graphName + * @return + */ + public List getActiveStores(String graphName) throws PDException { + return this.pdApi.getActiveStores(graphName); + } + + public List getActiveStores() throws PDException { + return this.pdApi.getActiveStores(); + } + + /** + * 返回活跃的Store + * + * @param graphName + * @return + */ + public List getAllStores(String graphName) throws PDException { + return this.pdApi.getAllStores(graphName); + } + + /** + * Store心跳,定期调用,保持在线状态 + * + * @param stats + * @throws PDException + */ + public Metapb.ClusterStats storeHeartbeat(Metapb.StoreStats stats) throws PDException { + return this.pdApi.storeHeartbeat(stats); + } + + /** + * 查询Key所属分区信息 + * + * @param graphName + * @param key + * @return + * @throws PDException + */ + public KVPair getPartition(String graphName, byte[] key) throws PDException { + return this.pdApi.getPartition(graphName, key); + } + + public KVPair getPartition(String graphName, byte[] key, int code) throws PDException { + return this.pdApi.getPartition(graphName, key, code); + } + + /** + * 根据hashcode查询所属分区信息 + * + * @param graphName + * @param hashCode + * @return + * @throws PDException + */ + public KVPair getPartitionByCode(String graphName, long hashCode) + throws PDException { + return this.pdApi.getPartitionByCode(graphName, hashCode); + } + + /** + * 获取Key的哈希值 + */ + public int keyToCode(String graphName, byte[] key) { + return PartitionUtils.calcHashcode(key); + } + + /** + * 根据分区id返回分区信息, RPC请求 + * + * @param graphName + * @param partId + * @return + * @throws PDException + */ + public KVPair getPartitionById(String graphName, int partId) throws PDException { + return this.pdApi.getPartitionById(graphName, partId); + } + + public ShardGroup getShardGroup(int partId) throws PDException { + return this.pdApi.getShardGroup(partId); + } + + public ShardGroup getShardGroupDirect(int partId) throws PDException { + return this.pdApi.getShardGroupDirect(partId); + } + + public void updateShardGroup(ShardGroup shardGroup) throws PDException { + this.pdApi.updateShardGroup(shardGroup); + } + + /** + * 返回startKey和endKey跨越的所有分区信息 + * + * @param graphName + * @param startKey + * @param endKey + * @return + * @throws PDException + */ + public List> scanPartitions(String graphName, byte[] startKey, + byte[] endKey) throws PDException { + return this.pdApi.scanPartitions(graphName, startKey, endKey); + } + + /** + * 根据条件查询分区信息 + * + * @return + * @throws PDException + */ + public List getPartitionsByStore(long storeId) throws PDException { + + return this.pdApi.getPartitionsByStore(storeId); + } + + /** + * 查找指定store上的指定partitionId + * + * @return + * @throws PDException + */ + public List queryPartitions(long storeId, int partitionId) throws PDException { + return this.pdApi.queryPartitions(storeId, partitionId); + } + + public List getPartitions(long storeId, String graphName) throws PDException { + + return this.pdApi.getPartitions(storeId, graphName); + + } + + /** + * create a graph, requires the graph wouldn't exist before + * + * @param graph graph + * @return graph that created + * @throws PDException error occurs + */ + public Metapb.Graph createGraph(Metapb.Graph graph) throws PDException { + return this.pdApi.createGraph(graph); + } + + /** + * update graph, update graph name if exists, otherwise create a new graph + * + * @param graph the new graph + * @return graph that updated + * @throws PDException error occurs + */ + public Metapb.Graph setGraph(Metapb.Graph graph) throws PDException { + return this.pdApi.setGraph(graph); + } + + public Metapb.Graph getGraph(String graphName) throws PDException { + return this.pdApi.getGraph(graphName); + } + + public Metapb.Graph getGraphWithOutException(String graphName) throws + PDException { + return this.pdApi.getGraphWithOutException(graphName); + } + + public Metapb.Graph delGraph(String graphName) throws PDException { + return this.pdApi.delGraph(graphName); + } + + public List updatePartition(List partitions) throws PDException { + return this.pdApi.updatePartition(partitions); + + } + + public Partition delPartition(String graphName, int partitionId) throws PDException { + return this.pdApi.delPartition(graphName, partitionId); + } + + /** + * 删除分区缓存 + */ + public void invalidPartitionCache(String graphName, int partitionId) { + this.pdApi.invalidPartitionCache(graphName, partitionId); + } + + /** + * 删除分区缓存 + */ + public void invalidPartitionCache() { + // 检查是否存在缓存 + cache.removePartitions(); + } + + /** + * 删除分区缓存 + */ + public void invalidStoreCache(long storeId) { + cache.removeStore(storeId); + } + + /** + * Hugegraph server 调用,Leader发生改变,更新缓存 + */ + public void updatePartitionLeader(String graphName, int partId, long leaderStoreId) { + this.pdApi.updatePartitionLeader(graphName, partId, leaderStoreId); + } + + /** + * Hugegraph-store调用,更新缓存 + * + * @param partition + */ + public void updatePartitionCache(Partition partition, Shard leader) { + this.pdApi.updatePartitionCache(partition, leader); + } + + public Pdpb.GetIdResponse getIdByKey(String key, int delta) throws PDException { + return this.pdApi.getIdByKey(key, delta); + } + + public Pdpb.ResetIdResponse resetIdByKey(String key) throws PDException { + return this.pdApi.resetIdByKey(key); + } + + public Metapb.Member getLeader() throws PDException { + return this.pdApi.getLeader(); + } + + public Pdpb.GetMembersResponse getMembers() throws PDException { + return this.pdApi.getMembers(); + } + + public Metapb.ClusterStats getClusterStats() throws PDException { + return this.pdApi.getClusterStats(DEFAULT_STORE_GROUP_ID); + } + + public Metapb.ClusterStats getClusterStats(long storeId) throws PDException { + return this.pdApi.getClusterStats(storeId); + } + + public Metapb.ClusterStats getClusterStats(int storeGroupId) throws PDException { + return this.pdApi.getClusterStats(storeGroupId); + } + + public void addEventListener(PDEventListener listener) { + this.events.addListener(listener); + } + + public Watcher getWatchClient() { + return this.pdWatch; + } + + /** + * 返回Store状态信息 + */ + public List getStoreStatus(boolean offlineExcluded) throws PDException { + return this.pdApi.getStoreStatus(offlineExcluded); + } + + public void setGraphSpace(String graphSpaceName, long storageLimit) throws PDException { + this.pdApi.setGraphSpace(graphSpaceName, storageLimit); + } + + public List getGraphSpace(String graphSpaceName) throws + PDException { + return this.pdApi.getGraphSpace(graphSpaceName); + } + + @Deprecated + public void setPDConfig(int partitionCount, String peerList, int shardCount, long version) throws + PDException { + this.pdApi.setPDConfig(partitionCount, peerList, shardCount, version); + } + + public void setPDConfig(String peerList, int shardCount, long version) throws PDException { + this.pdApi.setPDConfig(0, peerList, shardCount, version); + } + + public void setPDConfig(Metapb.PDConfig pdConfig) throws PDException { + this.pdApi.setPDConfig(pdConfig); + } + + public Metapb.PDConfig getPDConfig() throws PDException { + return this.pdApi.getPDConfig(); + } + + public Metapb.PDConfig getPDConfig(long version) throws PDException { + return this.pdApi.getPDConfig(version); + } + + public void changePeerList(String peerList) throws PDException { + this.pdApi.changePeerList(peerList); + } + + /** + * 工作模式 + * Auto:自动分裂,每个Store上分区数达到最大值, 需要指定store group id. store group id 为0, 针对默认分区 + * 建议使用 splitData(ClusterOp.OperationMode mode, int storeGroupId, List params) + * mode = Auto 指定 storeGroupId, params 为空 + * + * @throws PDException + */ + @Deprecated + public void splitData() throws PDException { + this.pdApi.splitData(ClusterOp.OperationMode.Auto, 0, List.of()); + } + + /** + * 工作模式 + * Auto:自动分裂,每个Store上分区数达到最大值, 需要指定store group id + * Expert:专家模式,需要指定splitParams, 限制 SplitDataParam 在同一个store group中 + * + * @param mode + * @param params + * @throws PDException + */ + public void splitData(ClusterOp.OperationMode mode, int storeGroupId, + List params) + throws PDException { + this.pdApi.splitData(mode, storeGroupId, params); + } + + /** + * 针对单个graph的分裂,会扩充partition,造成整体分区数的不一致. + * 建议:针对整个store group做分裂. 大小图可以根据分组放到不同的分区中 + * + * @param graphName + * @param toCount + * @throws PDException + */ + @Deprecated + public void splitGraphData(String graphName, int toCount) throws PDException { + this.pdApi.splitGraphData(graphName, toCount); + } + + /** + * 自动转移,达到每个Store上分区数量相同, 建议使用 balancePartition(int storeGroupId), 指定 storeGroupId + * + * @throws PDException + */ + @Deprecated + public void balancePartition() throws PDException { + this.pdApi.balancePartition(ClusterOp.OperationMode.Auto, DEFAULT_STORE_GROUP_ID, List.of()); + } + + public void balancePartition(int storeGroupId) throws PDException { + this.pdApi.balancePartition(ClusterOp.OperationMode.Auto, storeGroupId, List.of()); + } + + /** + * 迁移分区 手动模式 + * //工作模式 + * // Auto:自动转移,达到每个Store上分区数量相同 + * // Expert:专家模式,需要指定transferParams + * + * @param params 指定transferParams, expert 模式, 要求 source store / target store在同一个store group + * @throws PDException + */ + public void movePartition(ClusterOp.OperationMode mode, List params) throws + PDException { + this.pdApi.balancePartition(ClusterOp.OperationMode.Expert, DEFAULT_STORE_GROUP_ID, params); + } + + public void reportTask(MetaTask.Task task) throws PDException { + this.pdApi.reportTask(task); + } + + public Metapb.PartitionStats getPartitionsStats(String graph, int partId) throws PDException { + return this.pdApi.getPartitionsStats(graph, partId); + } + + /** + * 平衡不同store中leader的数量 + */ + public void balanceLeaders() throws PDException { + this.pdApi.balanceLeaders(); + } + + /** + * 从pd中删除store + */ + public Metapb.Store delStore(long storeId) throws PDException { + return this.pdApi.delStore(storeId); + } + + /** + * 对rocksdb整体进行compaction + * + * @throws PDException + */ + public void dbCompaction() throws PDException { + this.pdApi.dbCompaction(); + } + + /** + * 对rocksdb指定表进行compaction + * + * @param tableName + * @throws PDException + */ + public void dbCompaction(String tableName) throws PDException { + this.pdApi.dbCompaction(tableName); + } + + /** + * 分区合并,把当前的分区缩容至toCount个 + * + * @param toCount 缩容到分区的个数 + * @throws PDException + */ + @Deprecated + public void combineCluster(int toCount) throws PDException { + this.pdApi.combineCluster(DEFAULT_STORE_GROUP_ID, toCount); + } + + public void combineCluster(int shardGroupId, int toCount) throws PDException { + this.pdApi.combineCluster(shardGroupId, toCount); + } + + /** + * 将单图缩容到 toCount个, 与分裂类似,要保证同个store group中的分区数量一样。 + * 如果有特殊需求,可以考虑迁移到其他的分组中 + * + * @param graphName graph name + * @param toCount target count + * @throws PDException + */ + @Deprecated + public void combineGraph(String graphName, int toCount) throws PDException { + this.pdApi.combineGraph(graphName, toCount); + } + + public void deleteShardGroup(int groupId) throws PDException { + this.pdApi.deleteShardGroup(groupId); + } + + /** + * 用于 store的 shard list重建 + * + * @param groupId shard group id + * @param shards shard list,delete when shards size is 0 + */ + public void updateShardGroupOp(int groupId, List shards) throws PDException { + this.pdApi.updateShardGroupOp(groupId, shards); + } + + /** + * invoke fireChangeShard command + * + * @param groupId shard group id + * @param shards shard list + */ + public void changeShard(int groupId, List shards) throws PDException { + this.pdApi.changeShard(groupId, shards); + } + + public CacheResponse getClientCache() throws PDException { + return this.pdApi.getClientCache(); + } + + public CachePartitionResponse getPartitionCache(String graph) throws PDException { + return this.pdApi.getPartitionCache(graph); + } + + public void updatePdRaft(String raftConfig) throws PDException { + this.pdApi.updatePdRaft(raftConfig); + } + + public long submitBuildIndexTask(Metapb.BuildIndexParam param) throws PDException { + return this.pdApi.submitBuildIndexTask(param); + } + + public long submitBackupGraphTask(String sourceGraph, String targetGraph) throws PDException { + return this.pdApi.submitBackupGraphTask(sourceGraph, targetGraph); + } + + @Deprecated + public Pdpb.TaskQueryResponse queryBuildIndexTaskStatus(long taskId) throws PDException { + return this.queryTaskStatus(taskId); + } + + public Pdpb.TaskQueryResponse queryTaskStatus(long taskId) throws PDException { + return this.pdApi.queryBuildIndexTaskStatus(taskId); + } + + @Deprecated + public Pdpb.TaskQueryResponse retryBuildIndexTask(long taskId) throws PDException { + return retryTask(taskId); + } + + public Pdpb.TaskQueryResponse retryTask(long taskId) throws PDException { + return this.pdApi.retryTask(taskId); + } + + public GraphStatsResponse getGraphStats(String graphName) throws PDException { + return this.pdApi.getGraphStats(graphName); + } + + public Metapb.StoreGroup createStoreGroup(int groupId, String name, int partitionCount) throws + PDException { + return this.pdApi.createStoreGroup(groupId, name, partitionCount); + } + + public Metapb.StoreGroup getStoreGroup(int groupId) throws PDException { + return this.pdApi.getStoreGroup(groupId); + } + + public List getAllStoreGroups() throws PDException { + return this.pdApi.getAllStoreGroups(); + } + + public Metapb.StoreGroup updateStoreGroup(int groupId, String name) throws PDException { + return this.pdApi.updateStoreGroup(groupId, name); + } + + public List getStoresByStoreGroup(int groupId) throws PDException { + return this.pdApi.getStoresByStoreGroup(groupId); + } + + public boolean updateStoreGroupRelation(long storeId, int groupId) throws PDException { + return this.pdApi.updateStoreGroupRelation(storeId, groupId); + } + + public void onLeaderChanged(String leader) { + } + + public void close() { + super.close(); + } +} diff --git a/hg-pd-client/src/main/java/org/apache/hugegraph/pd/client/PDConfig.java b/hg-pd-client/src/main/java/org/apache/hugegraph/pd/client/PDConfig.java new file mode 100644 index 0000000000..dfc4766ce2 --- /dev/null +++ b/hg-pd-client/src/main/java/org/apache/hugegraph/pd/client/PDConfig.java @@ -0,0 +1,111 @@ +package org.apache.hugegraph.pd.client; + +import static java.nio.charset.StandardCharsets.UTF_8; + +import java.util.Base64; +import org.apache.commons.lang3.StringUtils; + +import org.apache.hugegraph.pd.client.interceptor.AuthenticationException; + +import lombok.Getter; +import lombok.Setter; + +public final class PDConfig { + //TODO multi-server + private String serverHost = "localhost:9000"; + private long grpcTimeOut = 60000; // grpc调用超时时间 10秒 + private boolean enablePDNotify = false; // 是否接收PD异步通知 + private boolean enableCache = false; + private String authority; + private String userName = ""; + private static final int GRPC_DEFAULT_MAX_INBOUND_MESSAGE_SIZE = 1024 * 1024 * 1024; + private static final int GRPC_DEFAULT_MAX_OUTBOUND_MESSAGE_SIZE = 1024 * 1024 * 1024; + private static int inboundMessageSize = GRPC_DEFAULT_MAX_INBOUND_MESSAGE_SIZE; + private static int outboundMessageSize = GRPC_DEFAULT_MAX_OUTBOUND_MESSAGE_SIZE; + @Getter + @Setter + private boolean autoGetPdServers = false; + + private PDConfig() { + } + + public static PDConfig of() { + return new PDConfig(); + } + + public static PDConfig of(String serverHost) { + PDConfig config = new PDConfig(); + config.serverHost = serverHost; + return config; + } + + public static PDConfig of(String serverHost, long timeOut) { + PDConfig config = new PDConfig(); + config.serverHost = serverHost; + config.grpcTimeOut = timeOut; + return config; + } + + public String getServerHost() { + return serverHost; + } + + public long getGrpcTimeOut() { + return grpcTimeOut; + } + + @Deprecated + public PDConfig setEnablePDNotify(boolean enablePDNotify) { + this.enablePDNotify = enablePDNotify; + // TODO 临时代码,hugegraph修改完后删除 + this.enableCache = enablePDNotify; + return this; + } + + public boolean isEnableCache() { + return enableCache; + } + + public PDConfig setEnableCache(boolean enableCache) { + this.enableCache = enableCache; + return this; + } + + @Override + public String toString() { + return "PDConfig{ serverHost='" + serverHost + '\'' + '}'; + } + + public PDConfig setAuthority(String userName, String pwd) { + this.userName = userName; + String auth = userName + ':' + pwd; + this.authority = new String(Base64.getEncoder().encode(auth.getBytes(UTF_8))); + return this; + } + + public String getUserName() { + return userName; + } + + public String getAuthority() { + if (StringUtils.isEmpty(this.authority)){ + throw new AuthenticationException("invalid basic authentication info"); + } + return authority; + } + public static int getInboundMessageSize() { + return inboundMessageSize; + } + + public static void setInboundMessageSize(int inboundMessageSize) { + PDConfig.inboundMessageSize = inboundMessageSize; + } + + public static int getOutboundMessageSize() { + return outboundMessageSize; + } + + public static void setOutboundMessageSize(int outboundMessageSize) { + PDConfig.outboundMessageSize = outboundMessageSize; + } +} diff --git a/hg-pd-client/src/main/java/org/apache/hugegraph/pd/client/PulseClient.java b/hg-pd-client/src/main/java/org/apache/hugegraph/pd/client/PulseClient.java new file mode 100644 index 0000000000..e112375964 --- /dev/null +++ b/hg-pd-client/src/main/java/org/apache/hugegraph/pd/client/PulseClient.java @@ -0,0 +1,134 @@ +package org.apache.hugegraph.pd.client; + +import java.util.Map; +import java.util.concurrent.ConcurrentHashMap; +import java.util.concurrent.ExecutorService; + +import org.apache.hugegraph.pd.client.impl.StreamDelegator; +import org.apache.hugegraph.pd.client.impl.StreamDelegatorSender; +import org.apache.hugegraph.pd.client.support.PDExecutors; +import org.apache.hugegraph.pd.common.HgAssert; +import org.apache.hugegraph.pd.grpc.pulse.HgPdPulseGrpc; +import org.apache.hugegraph.pd.grpc.pulse.PartitionHeartbeatRequest; +import org.apache.hugegraph.pd.grpc.pulse.PulseRequest; +import org.apache.hugegraph.pd.grpc.pulse.PulseResponse; +import org.apache.hugegraph.pd.grpc.pulse.PulseType; +import org.apache.hugegraph.pd.pulse.DefaultPulseNotifier; +import org.apache.hugegraph.pd.pulse.Pulse; +import org.apache.hugegraph.pd.pulse.PulseListener; +import org.apache.hugegraph.pd.pulse.PulseNotifier; +import org.apache.hugegraph.pd.pulse.PulseResponseNotice; +import org.apache.hugegraph.pd.pulse.PulseServerNotice; +import com.google.protobuf.GeneratedMessageV3; + +import lombok.Getter; +import lombok.Setter; +import lombok.extern.slf4j.Slf4j; + +/** + * @author lynn.bond@hotmail.com on 2023/11/20 + * @version 3.0.1 removed the `noticeParserMap` on 2024/01/08 + */ +@Slf4j +public class PulseClient extends BaseClient implements Pulse { + + private final byte[] lock = new byte[0]; + private final Map> listeners = new ConcurrentHashMap<>(); + private final Map> notifiers = new ConcurrentHashMap<>(); + private final Map> delegators = + new ConcurrentHashMap<>(); + private final ExecutorService threadPool = PDExecutors.newQueuingPool("pulse-ack", 1); + + @Getter + @Setter + private long observerId; + + public PulseClient(PDConfig config) { + super(config, HgPdPulseGrpc::newStub, HgPdPulseGrpc::newBlockingStub); + } + + public PulseNotifier connect(PulseListener listener) { + return connect(PulseType.PULSE_TYPE_PARTITION_HEARTBEAT, listener); + } + + public PulseNotifier connect(PulseType pulseType, + PulseListener listener) { + HgAssert.isArgumentNotNull(listener, "listener"); + this.listeners.put(pulseType, listener); + DefaultPulseNotifier notifier = this.notifiers.get(pulseType); + if (notifier == null) { + synchronized (this.lock) { + notifier = this.notifiers.computeIfAbsent(pulseType, + k -> new DefaultPulseNotifier(pulseType, newStreaming(pulseType), this.observerId) + ); + notifier.start(); + } + } + return notifier; + } + + public boolean resetStub(String host, PulseNotifier notifier) { + return true; + } + + private StreamDelegatorSender newStreaming(PulseType pulseType) { + StreamDelegator delegator = delegators.computeIfAbsent(pulseType, + k -> new StreamDelegator(pulseType.name(), + getLeaderInvoker(), + HgPdPulseGrpc.getPulseMethod())); + return delegator.link(response -> handleOnNext(pulseType, (PulseResponse) response)); + } + + public PulseListener getListener(PulseType pulseType) { + return this.listeners.get(pulseType); + } + + private PulseServerNotice toPulseResponseNotice(PulseResponse pulseResponse) { + return new PulseResponseNotice(pulseResponse.getNoticeId(), + e -> ackNotice( + pulseResponse.getPulseType(), + pulseResponse.getNoticeId(), + pulseResponse.getObserverId()), + pulseResponse); + } + + private void handleOnNext(PulseType pulseType, PulseResponse response) { + PulseServerNotice notice = toPulseResponseNotice(response); + PulseListener listener = getListener(pulseType); + if (listener != null) { + try { + listener.onNext(response); + listener.onNotice(notice); + } catch (Throwable e) { + log.error("Listener failed to handle notice: \n{}, caused by: ", response, e); + } + } + } + + private void ackNotice(PulseType pulseType, long noticeId, long observerId) { + DefaultPulseNotifier sender = this.notifiers.get(pulseType); + if (sender == null) { + log.error("Sender is null, pulse type: {}", pulseType); + throw new IllegalStateException("Sender is null, pulse type: " + pulseType); + } + sendAck(sender, noticeId, observerId); + } + + private void sendAck(DefaultPulseNotifier sender, long noticeId, long observerId) { + this.threadPool.execute(() -> { + log.info("Sending ack, notice id: {}, observer id: {}, ts: {}", noticeId, observerId, + System.currentTimeMillis()); + sender.ack(noticeId, observerId); + }); + } + + public void onLeaderChanged(String leader) { + this.delegators.entrySet().parallelStream().forEach(e -> { + try { + e.getValue().reconnect(); + } catch (Exception ex) { + log.warn("reconnect to leader with error:", ex); + } + }); + } +} diff --git a/hg-pd-client/src/main/java/org/apache/hugegraph/pd/client/impl/PDApi.java b/hg-pd-client/src/main/java/org/apache/hugegraph/pd/client/impl/PDApi.java new file mode 100644 index 0000000000..ae1570725e --- /dev/null +++ b/hg-pd-client/src/main/java/org/apache/hugegraph/pd/client/impl/PDApi.java @@ -0,0 +1,2134 @@ +<<<<<<<< HEAD:hugegraph-pd/hg-pd-client/src/main/java/org/apache/hugegraph/pd/client/PDClient.java +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hugegraph.pd.client; + +import static org.apache.hugegraph.pd.watch.NodeEvent.EventType.NODE_PD_LEADER_CHANGE; + +import java.util.ArrayList; +import java.util.LinkedList; +import java.util.List; +import java.util.Objects; +import java.util.concurrent.CopyOnWriteArrayList; +import java.util.concurrent.TimeUnit; + +import org.apache.hugegraph.pd.common.KVPair; +import org.apache.hugegraph.pd.common.PDException; +import org.apache.hugegraph.pd.common.PartitionUtils; +import org.apache.hugegraph.pd.grpc.MetaTask; +import org.apache.hugegraph.pd.grpc.Metapb; +import org.apache.hugegraph.pd.grpc.Metapb.ShardGroup; +import org.apache.hugegraph.pd.grpc.PDGrpc; +import org.apache.hugegraph.pd.grpc.Pdpb; +import org.apache.hugegraph.pd.grpc.Pdpb.CachePartitionResponse; +import org.apache.hugegraph.pd.grpc.Pdpb.CacheResponse; +import org.apache.hugegraph.pd.grpc.Pdpb.GetGraphRequest; +import org.apache.hugegraph.pd.grpc.Pdpb.GetPartitionByCodeRequest; +import org.apache.hugegraph.pd.grpc.Pdpb.GetPartitionRequest; +import org.apache.hugegraph.pd.grpc.Pdpb.GetPartitionResponse; +import org.apache.hugegraph.pd.grpc.watch.WatchResponse; +import org.apache.hugegraph.pd.watch.NodeEvent; +import org.apache.hugegraph.pd.watch.PartitionEvent; + +import com.google.protobuf.ByteString; + +import io.grpc.ManagedChannel; +import io.grpc.MethodDescriptor; +import io.grpc.StatusRuntimeException; +import io.grpc.stub.AbstractBlockingStub; +import lombok.extern.slf4j.Slf4j; + +/** + * PD client implementation class + */ +@Slf4j +public class PDClient { + + private final PDConfig config; + private final Pdpb.RequestHeader header; + private final ClientCache cache; + private final StubProxy stubProxy; + private final List eventListeners; + private PDWatch.Watcher partitionWatcher; + private PDWatch.Watcher storeWatcher; + private PDWatch.Watcher graphWatcher; + private PDWatch.Watcher shardGroupWatcher; + private PDWatch pdWatch; + + private PDClient(PDConfig config) { + this.config = config; + this.header = Pdpb.RequestHeader.getDefaultInstance(); + this.stubProxy = new StubProxy(config.getServerHost().split(",")); + this.eventListeners = new CopyOnWriteArrayList<>(); + this.cache = new ClientCache(this); + } + + /** + * Create a PD client object and initialize the stub + * + * @param config + * @return + */ + public static PDClient create(PDConfig config) { + return new PDClient(config); + } + + private synchronized void newBlockingStub() throws PDException { + if (stubProxy.get() != null) { + return; + } + + String host = newLeaderStub(); + if (host.isEmpty()) { + throw new PDException(Pdpb.ErrorType.PD_UNREACHABLE_VALUE, + "PD unreachable, pd.peers=" + config.getServerHost()); + } + + log.info("PDClient enable cache, init PDWatch object"); + connectPdWatch(host); + } + + public void connectPdWatch(String leader) { + + if (pdWatch != null && Objects.equals(pdWatch.getCurrentHost(), leader) && + pdWatch.checkChannel()) { + return; + } + + log.info("PDWatch client connect host:{}", leader); + pdWatch = new PDWatchImpl(leader); + + partitionWatcher = pdWatch.watchPartition(new PDWatch.Listener<>() { + @Override + public void onNext(PartitionEvent response) { + // log.info("PDClient receive partition event {}-{} {}", + // response.getGraph(), response.getPartitionId(), response.getChangeType()); + invalidPartitionCache(response.getGraph(), response.getPartitionId()); + + if (response.getChangeType() == PartitionEvent.ChangeType.DEL) { + cache.removeAll(response.getGraph()); + } + + eventListeners.forEach(listener -> { + listener.onPartitionChanged(response); + }); + } + + @Override + public void onError(Throwable throwable) { + log.error("watchPartition exception {}", throwable.getMessage()); + closeStub(false); + } + }); + + storeWatcher = pdWatch.watchNode(new PDWatch.Listener<>() { + @Override + public void onNext(NodeEvent response) { + log.info("PDClient receive store event {} {}", + response.getEventType(), Long.toHexString(response.getNodeId())); + + if (response.getEventType() == NODE_PD_LEADER_CHANGE) { + // pd raft change + var leaderIp = response.getGraph(); + log.info("watchNode: pd leader changed to {}, current watch:{}", + leaderIp, pdWatch.getCurrentHost()); + closeStub(!Objects.equals(pdWatch.getCurrentHost(), leaderIp)); + connectPdWatch(leaderIp); + } + + invalidStoreCache(response.getNodeId()); + eventListeners.forEach(listener -> { + listener.onStoreChanged(response); + }); + } + + @Override + public void onError(Throwable throwable) { + log.error("watchNode exception {}", throwable.getMessage()); + closeStub(false); + } + + }); + + graphWatcher = pdWatch.watchGraph(new PDWatch.Listener<>() { + @Override + public void onNext(WatchResponse response) { + eventListeners.forEach(listener -> { + listener.onGraphChanged(response); + }); + } + + @Override + public void onError(Throwable throwable) { + log.warn("graphWatcher exception {}", throwable.getMessage()); + } + }); + + shardGroupWatcher = pdWatch.watchShardGroup(new PDWatch.Listener<>() { + @Override + public void onNext(WatchResponse response) { + var shardResponse = response.getShardGroupResponse(); + // log.info("PDClient receive shard group event: raft {}-{}", shardResponse + // .getShardGroupId(), + // shardResponse.getType()); + if (config.isEnableCache()) { + switch (shardResponse.getType()) { + case WATCH_CHANGE_TYPE_DEL: + cache.deleteShardGroup(shardResponse.getShardGroupId()); + break; + case WATCH_CHANGE_TYPE_ALTER: + cache.updateShardGroup( + response.getShardGroupResponse().getShardGroup()); + break; + default: + break; + } + } + eventListeners.forEach(listener -> listener.onShardGroupChanged(response)); + } + + @Override + public void onError(Throwable throwable) { + log.warn("shardGroupWatcher exception {}", throwable.getMessage()); + } + }); + + } + + private synchronized void closeStub(boolean closeWatcher) { + stubProxy.set(null); + cache.reset(); + + if (closeWatcher) { + if (partitionWatcher != null) { + partitionWatcher.close(); + partitionWatcher = null; + } + if (storeWatcher != null) { + storeWatcher.close(); + storeWatcher = null; + } + if (graphWatcher != null) { + graphWatcher.close(); + graphWatcher = null; + } + + if (shardGroupWatcher != null) { + shardGroupWatcher.close(); + shardGroupWatcher = null; + } + + pdWatch = null; + } + } + + private PDGrpc.PDBlockingStub getStub() throws PDException { + if (stubProxy.get() == null) { + newBlockingStub(); + } + return stubProxy.get().withDeadlineAfter(config.getGrpcTimeOut(), TimeUnit.MILLISECONDS); + } + + private PDGrpc.PDBlockingStub newStub() throws PDException { + if (stubProxy.get() == null) { + newBlockingStub(); + } + return PDGrpc.newBlockingStub(stubProxy.get().getChannel()) + .withDeadlineAfter(config.getGrpcTimeOut(), + TimeUnit.MILLISECONDS); + } + + private String newLeaderStub() { + String leaderHost = ""; + for (int i = 0; i < stubProxy.getHostCount(); i++) { + String host = stubProxy.nextHost(); + ManagedChannel channel = Channels.getChannel(host); + + PDGrpc.PDBlockingStub stub = PDGrpc.newBlockingStub(channel) + .withDeadlineAfter(config.getGrpcTimeOut(), + TimeUnit.MILLISECONDS); + try { + var leaderIp = getLeaderIp(stub); + if (!leaderIp.equalsIgnoreCase(host)) { + leaderHost = leaderIp; + stubProxy.set(PDGrpc.newBlockingStub(channel) + .withDeadlineAfter(config.getGrpcTimeOut(), + TimeUnit.MILLISECONDS)); + } else { + stubProxy.set(stub); + leaderHost = host; + } + stubProxy.setLeader(leaderIp); + + log.info("PDClient connect to host = {} success", leaderHost); + break; + } catch (Exception e) { + log.error("PDClient connect to {} exception {}, {}", host, e.getMessage(), + e.getCause() != null ? e.getCause().getMessage() : ""); + } + } + return leaderHost; + } + + public String getLeaderIp() { + + return getLeaderIp(stubProxy.get()); + } + + private String getLeaderIp(PDGrpc.PDBlockingStub stub) { + if (stub == null) { + try { + getStub(); + return stubProxy.getLeader(); + } catch (PDException e) { + throw new RuntimeException(e); + } + } + + Pdpb.GetMembersRequest request = Pdpb.GetMembersRequest.newBuilder() + .setHeader(header) + .build(); + Metapb.Member leader = stub.getMembers(request).getLeader(); + return leader.getGrpcUrl(); + } + + /** + * Store registration, the store ID will be returned, and the initial registration will + * return a new ID +======== +package org.apache.hugegraph.pd.client.impl; + +import java.util.ArrayList; +import java.util.HashSet; +import java.util.List; +import java.util.Set; + +import org.apache.hugegraph.pd.client.ClientCache; +import org.apache.hugegraph.pd.client.PDClient; +import org.apache.hugegraph.pd.client.PDConfig; +import org.apache.hugegraph.pd.client.rpc.ConnectionManager; +import org.apache.hugegraph.pd.client.rpc.Invoker; +import org.apache.hugegraph.pd.common.KVPair; +import org.apache.hugegraph.pd.common.PDException; +import org.apache.hugegraph.pd.common.PartitionUtils; +import org.apache.hugegraph.pd.grpc.ClusterOp; +import org.apache.hugegraph.pd.grpc.MetaTask; +import org.apache.hugegraph.pd.grpc.Metapb; +import org.apache.hugegraph.pd.grpc.PDGrpc; +import org.apache.hugegraph.pd.grpc.Pdpb; +import org.apache.hugegraph.pd.grpc.StoreGroup; +import org.apache.hugegraph.pd.grpc.common.ErrorType; +import org.apache.hugegraph.pd.grpc.common.RequestHeader; +import org.apache.hugegraph.pd.grpc.common.ResponseHeader; +import com.google.protobuf.ByteString; + +import io.grpc.MethodDescriptor; +import lombok.extern.slf4j.Slf4j; + +/** + * @author lynn.bond@hotmail.com on 2023/12/8 + */ +@Slf4j +public class PDApi { + private final PDConfig config; + private final ConnectionManager cm; + private final ClientCache cache; + private final RequestHeader header = RequestHeader.getDefaultInstance(); + private final Invoker invoker; + private PDClient client; + + public PDApi(PDClient client, ClientCache cache) { + this.client = client; + this.config = client.getConfig(); + this.cm = client.getCm(); + this.cache = cache; + this.invoker = client.getLeaderInvoker(); + } + + private RespT blockingUnaryCall( + MethodDescriptor method, ReqT req) throws PDException { + return invoker.blockingCall(method, req); + } + + private void handleResponseError(ResponseHeader header) throws PDException { + var errorType = header.getError().getType(); + if (header.hasError() && errorType != ErrorType.OK) { + throw new PDException(header.getError().getTypeValue(), + String.format("PD request error, error code = %d, msg = %s", + header.getError().getTypeValue(), + header.getError().getMessage())); + } + } + + /** + * Store注册,返回storeID,初次注册会返回新ID +>>>>>>>> d7e3d51dd (3.6.5 -> 4.x diff):hg-pd-client/src/main/java/org/apache/hugegraph/pd/client/impl/PDApi.java + * + * @param store + * @return + */ + public long registerStore(Metapb.Store store) throws PDException { + Pdpb.RegisterStoreRequest request = Pdpb.RegisterStoreRequest.newBuilder() +<<<<<<<< HEAD:hugegraph-pd/hg-pd-client/src/main/java/org/apache/hugegraph/pd/client/PDClient.java + .setHeader(header) + .setStore(store).build(); + +======== + .setHeader(header) + .setStore(store).build(); +>>>>>>>> d7e3d51dd (3.6.5 -> 4.x diff):hg-pd-client/src/main/java/org/apache/hugegraph/pd/client/impl/PDApi.java + Pdpb.RegisterStoreResponse response = + blockingUnaryCall(PDGrpc.getRegisterStoreMethod(), request); + handleResponseError(response.getHeader()); + return response.getStoreId(); + } + +<<<<<<<< HEAD:hugegraph-pd/hg-pd-client/src/main/java/org/apache/hugegraph/pd/client/PDClient.java + /** + * Returns the Store object based on the store ID + * + * @param storeId + * @return + * @throws PDException + */ + public Metapb.Store getStore(long storeId) throws PDException { + Metapb.Store store = cache.getStoreById(storeId); + if (store == null) { + Pdpb.GetStoreRequest request = Pdpb.GetStoreRequest.newBuilder() + .setHeader(header) + .setStoreId(storeId).build(); + Pdpb.GetStoreResponse response = getStub().getStore(request); + handleResponseError(response.getHeader()); + store = response.getStore(); + if (config.isEnableCache()) { + cache.addStore(storeId, store); + } + } + return store; + } + + /** + * Update the store information, including online and offline + * + * @param store + * @return + */ + public Metapb.Store updateStore(Metapb.Store store) throws PDException { + Pdpb.SetStoreRequest request = Pdpb.SetStoreRequest.newBuilder() + .setHeader(header) + .setStore(store).build(); + + Pdpb.SetStoreResponse response = getStub().setStore(request); + handleResponseError(response.getHeader()); + store = response.getStore(); + if (config.isEnableCache()) { + cache.addStore(store.getId(), store); + } + return store; + } + + /** + * Return to the active store + * + * @param graphName + * @return + */ + public List getActiveStores(String graphName) throws PDException { + List stores = new ArrayList<>(); + KVPair ptShard = this.getPartitionByCode(graphName, 0); + while (ptShard != null) { + stores.add(this.getStore(ptShard.getValue().getStoreId())); + if (ptShard.getKey().getEndKey() < PartitionUtils.MAX_VALUE) { + ptShard = this.getPartitionByCode(graphName, ptShard.getKey().getEndKey()); + } else { + ptShard = null; + } + } + return stores; + } + + public List getActiveStores() throws PDException { + Pdpb.GetAllStoresRequest request = Pdpb.GetAllStoresRequest.newBuilder() + .setHeader(header) + .setGraphName("") + .setExcludeOfflineStores(true) + .build(); + Pdpb.GetAllStoresResponse response = getStub().getAllStores(request); + handleResponseError(response.getHeader()); + return response.getStoresList(); + + } + + /** + * Return to the active store + * + * @param graphName + * @return + */ + public List getAllStores(String graphName) throws PDException { + Pdpb.GetAllStoresRequest request = Pdpb.GetAllStoresRequest.newBuilder() + .setHeader(header) + .setGraphName(graphName) + .setExcludeOfflineStores(false) + .build(); + Pdpb.GetAllStoresResponse response = getStub().getAllStores(request); + handleResponseError(response.getHeader()); + return response.getStoresList(); + + } + + /** + * Store heartbeat, call regularly, stay online + * + * @param stats + * @throws PDException + */ + public Metapb.ClusterStats storeHeartbeat(Metapb.StoreStats stats) throws PDException { + Pdpb.StoreHeartbeatRequest request = Pdpb.StoreHeartbeatRequest.newBuilder() + .setHeader(header) + .setStats(stats).build(); + Pdpb.StoreHeartbeatResponse response = getStub().storeHeartbeat(request); + handleResponseError(response.getHeader()); + return response.getClusterStats(); + } + + private KVPair getKvPair(String graphName, byte[] key, + KVPair partShard) throws + PDException { + if (partShard == null) { + GetPartitionRequest request = GetPartitionRequest.newBuilder() + .setHeader(header) + .setGraphName(graphName) + .setKey(ByteString.copyFrom(key)) + .build(); + GetPartitionResponse response = +======== + public KVPair getKvPair(String graphName, byte[] key, KVPair partShard) throws PDException { + if (partShard == null) { + Pdpb.GetPartitionRequest request = Pdpb.GetPartitionRequest.newBuilder() + .setHeader(header) + .setGraphName(graphName) + .setKey(ByteString.copyFrom(key)) + .build(); + Pdpb.GetPartitionResponse response = +>>>>>>>> d7e3d51dd (3.6.5 -> 4.x diff):hg-pd-client/src/main/java/org/apache/hugegraph/pd/client/impl/PDApi.java + blockingUnaryCall(PDGrpc.getGetPartitionMethod(), request); + handleResponseError(response.getHeader()); + partShard = new KVPair<>(response.getPartition(), response.getLeader()); + cache.update(graphName, partShard.getKey().getId(), partShard.getKey()); + } +<<<<<<<< HEAD:hugegraph-pd/hg-pd-client/src/main/java/org/apache/hugegraph/pd/client/PDClient.java + return partShard; + } + + /** + * Query the partition to which the key belongs + * + * @param graphName + * @param key + * @return + * @throws PDException + */ + public KVPair getPartition(String graphName, byte[] key) throws + PDException { + + KVPair partShard = + this.getPartitionByCode(graphName, PartitionUtils.calcHashcode(key)); + partShard = getKvPair(graphName, key, partShard); + return partShard; + } + + public KVPair getPartition(String graphName, byte[] key, + int code) throws + PDException { + KVPair partShard = + cache.getPartitionByCode(graphName, code); + partShard = getKvPair(graphName, key, partShard); +======== + +>>>>>>>> d7e3d51dd (3.6.5 -> 4.x diff):hg-pd-client/src/main/java/org/apache/hugegraph/pd/client/impl/PDApi.java + return partShard; + } + + /** +<<<<<<<< HEAD:hugegraph-pd/hg-pd-client/src/main/java/org/apache/hugegraph/pd/client/PDClient.java + * Query the partition information based on the hashcode +======== + * 根据hashcode查询所属分区信息 +>>>>>>>> d7e3d51dd (3.6.5 -> 4.x diff):hg-pd-client/src/main/java/org/apache/hugegraph/pd/client/impl/PDApi.java + * + * @param graphName + * @param hashCode + * @return + * @throws PDException + */ + public KVPair getPartitionByCode(String graphName, + long hashCode) + throws PDException { +<<<<<<<< HEAD:hugegraph-pd/hg-pd-client/src/main/java/org/apache/hugegraph/pd/client/PDClient.java + KVPair partShard = + cache.getPartitionByCode(graphName, hashCode); + if (partShard == null) { + GetPartitionByCodeRequest request = GetPartitionByCodeRequest.newBuilder() + .setHeader(header) + .setGraphName(graphName) + .setCode(hashCode).build(); + GetPartitionResponse response = +======== + // 先查cache,cache没有命中,在调用PD + KVPair partShard = cache.getPartitionByCode(graphName, hashCode); + if (partShard == null) { + Pdpb.GetPartitionByCodeRequest request = Pdpb.GetPartitionByCodeRequest.newBuilder() + .setHeader(header) + .setGraphName(graphName) + .setCode(hashCode).build(); + Pdpb.GetPartitionResponse response = +>>>>>>>> d7e3d51dd (3.6.5 -> 4.x diff):hg-pd-client/src/main/java/org/apache/hugegraph/pd/client/impl/PDApi.java + blockingUnaryCall(PDGrpc.getGetPartitionByCodeMethod(), request); + handleResponseError(response.getHeader()); + partShard = new KVPair<>(response.getPartition(), response.getLeader()); + cache.update(graphName, partShard.getKey().getId(), partShard.getKey()); + cache.updateShardGroup(getShardGroup(partShard.getKey().getId())); + } + + if (partShard.getValue() == null) { +<<<<<<<< HEAD:hugegraph-pd/hg-pd-client/src/main/java/org/apache/hugegraph/pd/client/PDClient.java + ShardGroup shardGroup = getShardGroup(partShard.getKey().getId()); +======== + Metapb.ShardGroup shardGroup = getShardGroup(partShard.getKey().getId()); +>>>>>>>> d7e3d51dd (3.6.5 -> 4.x diff):hg-pd-client/src/main/java/org/apache/hugegraph/pd/client/impl/PDApi.java + if (shardGroup != null) { + for (var shard : shardGroup.getShardsList()) { + if (shard.getRole() == Metapb.ShardRole.Leader) { + partShard.setValue(shard); + } + } + } else { +<<<<<<<< HEAD:hugegraph-pd/hg-pd-client/src/main/java/org/apache/hugegraph/pd/client/PDClient.java + log.error("getPartitionByCode: get shard group failed, {}", + partShard.getKey().getId()); +======== + log.error("getPartitionByCode: get shard group failed, {}", partShard.getKey().getId()); +>>>>>>>> d7e3d51dd (3.6.5 -> 4.x diff):hg-pd-client/src/main/java/org/apache/hugegraph/pd/client/impl/PDApi.java + } + } + return partShard; + } + +<<<<<<<< HEAD:hugegraph-pd/hg-pd-client/src/main/java/org/apache/hugegraph/pd/client/PDClient.java + /** + * Obtain the hash value of the key + */ + public int keyToCode(String graphName, byte[] key) { + return PartitionUtils.calcHashcode(key); + } + + /** + * Returns partition information based on the partition ID and RPC request +======== + /** + * 根据分区id返回分区信息, RPC请求 +>>>>>>>> d7e3d51dd (3.6.5 -> 4.x diff):hg-pd-client/src/main/java/org/apache/hugegraph/pd/client/impl/PDApi.java + * + * @param graphName + * @param partId + * @return + * @throws PDException + */ + public KVPair getPartitionById(String graphName, + int partId) throws PDException { + KVPair partShard = + cache.getPartitionById(graphName, partId); + if (partShard == null) { + Pdpb.GetPartitionByIDRequest request = Pdpb.GetPartitionByIDRequest.newBuilder() +<<<<<<<< HEAD:hugegraph-pd/hg-pd-client/src/main/java/org/apache/hugegraph/pd/client/PDClient.java + .setHeader(header) + .setGraphName( + graphName) + .setPartitionId( + partId) + .build(); + GetPartitionResponse response = +======== + .setHeader(header) + .setGraphName(graphName) + .setPartitionId(partId) + .build(); + Pdpb.GetPartitionResponse response = +>>>>>>>> d7e3d51dd (3.6.5 -> 4.x diff):hg-pd-client/src/main/java/org/apache/hugegraph/pd/client/impl/PDApi.java + blockingUnaryCall(PDGrpc.getGetPartitionByIDMethod(), request); + handleResponseError(response.getHeader()); + partShard = new KVPair<>(response.getPartition(), response.getLeader()); + if (config.isEnableCache()) { + cache.update(graphName, partShard.getKey().getId(), partShard.getKey()); + cache.updateShardGroup(getShardGroup(partShard.getKey().getId())); + } + } + if (partShard.getValue() == null) { + var shardGroup = getShardGroup(partShard.getKey().getId()); + if (shardGroup != null) { + for (var shard : shardGroup.getShardsList()) { + if (shard.getRole() == Metapb.ShardRole.Leader) { + partShard.setValue(shard); + } + } + } else { +<<<<<<<< HEAD:hugegraph-pd/hg-pd-client/src/main/java/org/apache/hugegraph/pd/client/PDClient.java + log.error("getPartitionById: get shard group failed, {}", + partShard.getKey().getId()); +======== + log.error("getPartitionById: get shard group failed, {}", partShard.getKey().getId()); +>>>>>>>> d7e3d51dd (3.6.5 -> 4.x diff):hg-pd-client/src/main/java/org/apache/hugegraph/pd/client/impl/PDApi.java + } + } + return partShard; + } + +<<<<<<<< HEAD:hugegraph-pd/hg-pd-client/src/main/java/org/apache/hugegraph/pd/client/PDClient.java + public ShardGroup getShardGroup(int partId) throws PDException { + ShardGroup group = cache.getShardGroup(partId); + if (group == null) { + Pdpb.GetShardGroupRequest request = Pdpb.GetShardGroupRequest.newBuilder() + .setHeader(header) + .setGroupId(partId) + .build(); + Pdpb.GetShardGroupResponse response = + blockingUnaryCall(PDGrpc.getGetShardGroupMethod(), request); + handleResponseError(response.getHeader()); + group = response.getShardGroup(); + if (config.isEnableCache()) { + cache.updateShardGroup(group); + } + } + return group; +======== + + public Metapb.ShardGroup getShardGroupDirect(int partId) throws PDException { + Pdpb.GetShardGroupRequest request = Pdpb.GetShardGroupRequest.newBuilder() + .setHeader(header) + .setGroupId(partId) + .build(); + Pdpb.GetShardGroupResponse response = blockingUnaryCall(PDGrpc.getGetShardGroupMethod(), request); + handleResponseError(response.getHeader()); + return response.getShardGroup(); +>>>>>>>> d7e3d51dd (3.6.5 -> 4.x diff):hg-pd-client/src/main/java/org/apache/hugegraph/pd/client/impl/PDApi.java + } + + public void updateShardGroup(ShardGroup shardGroup) throws PDException { + Pdpb.UpdateShardGroupRequest request = Pdpb.UpdateShardGroupRequest.newBuilder() +<<<<<<<< HEAD:hugegraph-pd/hg-pd-client/src/main/java/org/apache/hugegraph/pd/client/PDClient.java + .setHeader(header) + .setShardGroup( + shardGroup) + .build(); +======== + .setHeader(header) + .setShardGroup(shardGroup) + .build(); +>>>>>>>> d7e3d51dd (3.6.5 -> 4.x diff):hg-pd-client/src/main/java/org/apache/hugegraph/pd/client/impl/PDApi.java + Pdpb.UpdateShardGroupResponse response = + blockingUnaryCall(PDGrpc.getUpdateShardGroupMethod(), request); + handleResponseError(response.getHeader()); + + if (config.isEnableCache()) { + cache.updateShardGroup(shardGroup); + } + } + + /** +<<<<<<<< HEAD:hugegraph-pd/hg-pd-client/src/main/java/org/apache/hugegraph/pd/client/PDClient.java + * Returns information about all partitions spanned by the start and end keys + * + * @param graphName + * @param startKey + * @param endKey + * @return + * @throws PDException + */ + public List> scanPartitions(String graphName, + byte[] startKey, + byte[] endKey) throws + PDException { + List> partitions = new ArrayList<>(); + KVPair startPartShard = getPartition(graphName, startKey); + KVPair endPartShard = getPartition(graphName, endKey); + if (startPartShard == null || endPartShard == null) { + return null; + } + + partitions.add(startPartShard); + while (startPartShard.getKey().getEndKey() < endPartShard.getKey().getEndKey() + && startPartShard.getKey().getEndKey() < PartitionUtils.MAX_VALUE) { + startPartShard = getPartitionByCode(graphName, startPartShard.getKey().getEndKey()); + partitions.add(startPartShard); + } + return partitions; + } + + /** + * Query partition information based on conditions +======== + * 根据条件查询分区信息 +>>>>>>>> d7e3d51dd (3.6.5 -> 4.x diff):hg-pd-client/src/main/java/org/apache/hugegraph/pd/client/impl/PDApi.java + * + * @return + * @throws PDException + */ + public List getPartitionsByStore(long storeId) throws PDException { + + Metapb.PartitionQuery query = Metapb.PartitionQuery.newBuilder() + .setStoreId(storeId) + .build(); + Pdpb.QueryPartitionsRequest request = Pdpb.QueryPartitionsRequest.newBuilder() + .setQuery(query).build(); + Pdpb.QueryPartitionsResponse response = + blockingUnaryCall(PDGrpc.getQueryPartitionsMethod(), request); + + handleResponseError(response.getHeader()); + return response.getPartitionsList(); + } + +<<<<<<<< HEAD:hugegraph-pd/hg-pd-client/src/main/java/org/apache/hugegraph/pd/client/PDClient.java + public List queryPartitions(long storeId, int partitionId) throws + PDException { +======== + /** + * 查找指定store上的指定partitionId + * + * @return + * @throws PDException + */ + public List queryPartitions(long storeId, int partitionId) throws PDException { +>>>>>>>> d7e3d51dd (3.6.5 -> 4.x diff):hg-pd-client/src/main/java/org/apache/hugegraph/pd/client/impl/PDApi.java + + Metapb.PartitionQuery query = Metapb.PartitionQuery.newBuilder() + .setStoreId(storeId) + .setPartitionId(partitionId) + .build(); + Pdpb.QueryPartitionsRequest request = Pdpb.QueryPartitionsRequest.newBuilder() + .setQuery(query).build(); + Pdpb.QueryPartitionsResponse response = + blockingUnaryCall(PDGrpc.getQueryPartitionsMethod(), request); + + handleResponseError(response.getHeader()); + return response.getPartitionsList(); + } + + public List getPartitions(long storeId, String graphName) throws PDException { + + Metapb.PartitionQuery query = Metapb.PartitionQuery.newBuilder() + .setStoreId(storeId) + .setGraphName(graphName).build(); + Pdpb.QueryPartitionsRequest request = Pdpb.QueryPartitionsRequest.newBuilder() + .setQuery(query).build(); + Pdpb.QueryPartitionsResponse response = + blockingUnaryCall(PDGrpc.getQueryPartitionsMethod(), request); + + handleResponseError(response.getHeader()); + return response.getPartitionsList(); + + } + +<<<<<<<< HEAD:hugegraph-pd/hg-pd-client/src/main/java/org/apache/hugegraph/pd/client/PDClient.java + public Metapb.Graph setGraph(Metapb.Graph graph) throws PDException { + Pdpb.SetGraphRequest request = Pdpb.SetGraphRequest.newBuilder() + .setGraph(graph) + .build(); + Pdpb.SetGraphResponse response = +======== + public Metapb.Graph createGraph(Metapb.Graph graph) throws PDException { + Pdpb.CreateGraphRequest request = Pdpb.CreateGraphRequest.newBuilder() + .setGraph(graph) + .build(); + Pdpb.CreateGraphResponse response = blockingUnaryCall(PDGrpc.getCreateGraphMethod(), request); + handleResponseError(response.getHeader()); + return response.getGraph(); + } + + public Metapb.Graph setGraph(Metapb.Graph graph) throws PDException { + Pdpb.CreateGraphRequest request = Pdpb.CreateGraphRequest.newBuilder() + .setGraph(graph) + .build(); + Pdpb.CreateGraphResponse response = +>>>>>>>> d7e3d51dd (3.6.5 -> 4.x diff):hg-pd-client/src/main/java/org/apache/hugegraph/pd/client/impl/PDApi.java + blockingUnaryCall(PDGrpc.getSetGraphMethod(), request); + + handleResponseError(response.getHeader()); + return response.getGraph(); + } + + public Metapb.Graph getGraph(String graphName) throws PDException { + GetGraphRequest request = GetGraphRequest.newBuilder() + .setGraphName(graphName) + .build(); + Pdpb.GetGraphResponse response = + blockingUnaryCall(PDGrpc.getGetGraphMethod(), request); + + handleResponseError(response.getHeader()); + return response.getGraph(); + } + + public Metapb.Graph getGraphWithOutException(String graphName) throws +<<<<<<<< HEAD:hugegraph-pd/hg-pd-client/src/main/java/org/apache/hugegraph/pd/client/PDClient.java + PDException { + GetGraphRequest request = GetGraphRequest.newBuilder() + .setGraphName( + graphName) + .build(); +======== + PDException { + Pdpb.GetGraphRequest request = Pdpb.GetGraphRequest.newBuilder() + .setGraphName( + graphName) + .build(); +>>>>>>>> d7e3d51dd (3.6.5 -> 4.x diff):hg-pd-client/src/main/java/org/apache/hugegraph/pd/client/impl/PDApi.java + Pdpb.GetGraphResponse response = blockingUnaryCall( + PDGrpc.getGetGraphMethod(), request); + return response.getGraph(); + } + + public Metapb.Graph delGraph(String graphName) throws PDException { + Pdpb.DelGraphRequest request = Pdpb.DelGraphRequest.newBuilder() + .setGraphName(graphName) + .build(); + Pdpb.DelGraphResponse response = + blockingUnaryCall(PDGrpc.getDelGraphMethod(), request); + + handleResponseError(response.getHeader()); + return response.getGraph(); + } + + public List updatePartition(List partitions) throws + PDException { + + Pdpb.UpdatePartitionRequest request = Pdpb.UpdatePartitionRequest.newBuilder() + .addAllPartition( + partitions) + .build(); + Pdpb.UpdatePartitionResponse response = + blockingUnaryCall(PDGrpc.getUpdatePartitionMethod(), request); + handleResponseError(response.getHeader()); + invalidPartitionCache(); + + return response.getPartitionList(); + } + + public Metapb.Partition delPartition(String graphName, int partitionId) throws PDException { + + Pdpb.DelPartitionRequest request = Pdpb.DelPartitionRequest.newBuilder() + .setGraphName(graphName) + .setPartitionId(partitionId) + .build(); + Pdpb.DelPartitionResponse response = + blockingUnaryCall(PDGrpc.getDelPartitionMethod(), request); + + handleResponseError(response.getHeader()); + invalidPartitionCache(graphName, partitionId); + return response.getPartition(); + } +<<<<<<<< HEAD:hugegraph-pd/hg-pd-client/src/main/java/org/apache/hugegraph/pd/client/PDClient.java + + /** + * Delete the partitioned cache + */ + public void invalidPartitionCache(String graphName, int partitionId) { + if (null != cache.getPartitionById(graphName, partitionId)) { + cache.removePartition(graphName, partitionId); + } + } + + /** + * Delete the partitioned cache + */ + public void invalidPartitionCache() { + cache.removePartitions(); + } + + /** + * Delete the partitioned cache + */ + public void invalidStoreCache(long storeId) { + cache.removeStore(storeId); + } + + /** + * Update the cache + */ + public void updatePartitionLeader(String graphName, int partId, long leaderStoreId) { + KVPair partShard = null; + try { + partShard = this.getPartitionById(graphName, partId); + + if (partShard != null && partShard.getValue().getStoreId() != leaderStoreId) { + var shardGroup = this.getShardGroup(partId); + Metapb.Shard shard = null; + List shards = new ArrayList<>(); + + for (Metapb.Shard s : shardGroup.getShardsList()) { + if (s.getStoreId() == leaderStoreId) { + shard = s; + shards.add(Metapb.Shard.newBuilder(s) + .setStoreId(s.getStoreId()) + .setRole(Metapb.ShardRole.Leader).build()); + } else { + shards.add(Metapb.Shard.newBuilder(s) + .setStoreId(s.getStoreId()) + .setRole(Metapb.ShardRole.Follower).build()); + } + } + + if (config.isEnableCache()) { + if (shard == null) { + cache.removePartition(graphName, partId); + } + } + } + } catch (PDException e) { + log.error("getPartitionException: {}", e.getMessage()); + } + } + + /** + * Update the cache + * + * @param partition + */ + public void updatePartitionCache(Metapb.Partition partition, Metapb.Shard leader) { + if (config.isEnableCache()) { + cache.update(partition.getGraphName(), partition.getId(), partition); + cache.updateLeader(partition.getId(), leader); + } + } +======== +>>>>>>>> d7e3d51dd (3.6.5 -> 4.x diff):hg-pd-client/src/main/java/org/apache/hugegraph/pd/client/impl/PDApi.java + + public Pdpb.GetIdResponse getIdByKey(String key, int delta) throws PDException { + Pdpb.GetIdRequest request = Pdpb.GetIdRequest.newBuilder() + .setHeader(header) + .setKey(key) + .setDelta(delta) + .build(); + Pdpb.GetIdResponse response = blockingUnaryCall(PDGrpc.getGetIdMethod(), request); + handleResponseError(response.getHeader()); + return response; + } + + public Pdpb.ResetIdResponse resetIdByKey(String key) throws PDException { + Pdpb.ResetIdRequest request = Pdpb.ResetIdRequest.newBuilder() +<<<<<<<< HEAD:hugegraph-pd/hg-pd-client/src/main/java/org/apache/hugegraph/pd/client/PDClient.java + .setHeader(header) + .setKey(key) + .build(); +======== + .setHeader(header) + .setKey(key) + .build(); +>>>>>>>> d7e3d51dd (3.6.5 -> 4.x diff):hg-pd-client/src/main/java/org/apache/hugegraph/pd/client/impl/PDApi.java + Pdpb.ResetIdResponse response = blockingUnaryCall(PDGrpc.getResetIdMethod(), request); + handleResponseError(response.getHeader()); + return response; + } + + public Metapb.Member getLeader() throws PDException { + Pdpb.GetMembersRequest request = Pdpb.GetMembersRequest.newBuilder() + .setHeader(header) + .build(); + Pdpb.GetMembersResponse response = blockingUnaryCall(PDGrpc.getGetMembersMethod(), request); + handleResponseError(response.getHeader()); + return response.getLeader(); + } + + public Pdpb.GetMembersResponse getMembers() throws PDException { + Pdpb.GetMembersRequest request = Pdpb.GetMembersRequest.newBuilder() + .setHeader(header) + .build(); + Pdpb.GetMembersResponse response = blockingUnaryCall(PDGrpc.getGetMembersMethod(), request); + handleResponseError(response.getHeader()); + return response; + } + + public Metapb.ClusterStats getClusterStats(long storeId) throws PDException { + Pdpb.GetClusterStatsRequest request = Pdpb.GetClusterStatsRequest.newBuilder() +<<<<<<<< HEAD:hugegraph-pd/hg-pd-client/src/main/java/org/apache/hugegraph/pd/client/PDClient.java + .setHeader(header) + .build(); + Pdpb.GetClusterStatsResponse response = + blockingUnaryCall(PDGrpc.getGetClusterStatsMethod(), request); +======== + .setHeader(header) + .setStoreId(storeId) + .build(); + Pdpb.GetClusterStatsResponse response = blockingUnaryCall(PDGrpc.getGetClusterStatsMethod(), request); +>>>>>>>> d7e3d51dd (3.6.5 -> 4.x diff):hg-pd-client/src/main/java/org/apache/hugegraph/pd/client/impl/PDApi.java + handleResponseError(response.getHeader()); + return response.getCluster(); + } + +<<<<<<<< HEAD:hugegraph-pd/hg-pd-client/src/main/java/org/apache/hugegraph/pd/client/PDClient.java + private > RespT + blockingUnaryCall(MethodDescriptor method, ReqT req) throws PDException { + return blockingUnaryCall(method, req, 1); + } + + private > RespT + blockingUnaryCall(MethodDescriptor method, ReqT req, int retry) throws + PDException { + io.grpc.stub.AbstractBlockingStub stub = (AbstractBlockingStub) getStub(); + try { + RespT resp = io.grpc.stub.ClientCalls.blockingUnaryCall(stub.getChannel(), method, + stub.getCallOptions(), req); + return resp; + } catch (Exception e) { + log.error(method.getFullMethodName() + " exception, {}", e.getMessage()); + if (e instanceof StatusRuntimeException) { + if (retry < stubProxy.getHostCount()) { + closeStub(true); + return blockingUnaryCall(method, req, ++retry); + } +======== + public Metapb.ClusterStats getClusterStats(int storeGroupId) throws PDException { + Pdpb.GetClusterStatsRequest request = Pdpb.GetClusterStatsRequest.newBuilder() + .setHeader(header) + .setStoreGroup(storeGroupId) + .build(); + Pdpb.GetClusterStatsResponse response = blockingUnaryCall(PDGrpc.getGetClusterStatsMethod(), request); + handleResponseError(response.getHeader()); + return response.getCluster(); + } + + public void changePeerList(String peerList) throws PDException { + ClusterOp.ChangePeerListRequest request = ClusterOp.ChangePeerListRequest.newBuilder() + .setPeerList(peerList) + .setHeader(header).build(); + ClusterOp.ChangePeerListResponse response = + blockingUnaryCall(PDGrpc.getChangePeerListMethod(), request); + handleResponseError(response.getHeader()); + } + + public void reportTask(MetaTask.Task task) throws PDException { + ClusterOp.ReportTaskRequest request = ClusterOp.ReportTaskRequest.newBuilder() + .setHeader(header) + .setTask(task).build(); + ClusterOp.ReportTaskResponse response = blockingUnaryCall(PDGrpc.getReportTaskMethod(), request); + handleResponseError(response.getHeader()); + } + + public void deleteShardGroup(int groupId) throws PDException { + Pdpb.DeleteShardGroupRequest request = Pdpb.DeleteShardGroupRequest + .newBuilder() + .setHeader(header) + .setGroupId(groupId) + .build(); + Pdpb.DeleteShardGroupResponse response = + blockingUnaryCall(PDGrpc.getDeleteShardGroupMethod(), request); + + handleResponseError(response.getHeader()); + } + + public Metapb.ShardGroup getShardGroup(int partId) throws PDException { + Metapb.ShardGroup group = cache.getShardGroup(partId); + if (group == null) { + group = getShardGroupDirect(partId); + if (config.isEnableCache()) { + cache.updateShardGroup(group); +>>>>>>>> d7e3d51dd (3.6.5 -> 4.x diff):hg-pd-client/src/main/java/org/apache/hugegraph/pd/client/impl/PDApi.java + } + } + return group; + } + +<<<<<<<< HEAD:hugegraph-pd/hg-pd-client/src/main/java/org/apache/hugegraph/pd/client/PDClient.java + private void handleResponseError(Pdpb.ResponseHeader header) throws + PDException { + var errorType = header.getError().getType(); + if (header.hasError() && errorType != Pdpb.ErrorType.OK) { + + throw new PDException(header.getError().getTypeValue(), + String.format( + "PD request error, error code = %d, msg = %s", + header.getError().getTypeValue(), + header.getError().getMessage())); +======== + public void invalidPartitionCache() { + // 检查是否存在缓存 + cache.removePartitions(); + } + + /** + * 删除分区缓存 + */ + public void invalidPartitionCache(String graphName, int partitionId) { + // 检查是否存在缓存 + if (null != cache.getPartitionById(graphName, partitionId)) { + cache.removePartition(graphName, partitionId); +>>>>>>>> d7e3d51dd (3.6.5 -> 4.x diff):hg-pd-client/src/main/java/org/apache/hugegraph/pd/client/impl/PDApi.java + } + + } + +<<<<<<<< HEAD:hugegraph-pd/hg-pd-client/src/main/java/org/apache/hugegraph/pd/client/PDClient.java + public void addEventListener(PDEventListener listener) { + eventListeners.add(listener); + } + + public PDWatch getWatchClient() { + return new PDWatchImpl(stubProxy.getHost()); + } + + /** + * Returns the store status information + */ + public List getStoreStatus(boolean offlineExcluded) throws PDException { + Pdpb.GetAllStoresRequest request = Pdpb.GetAllStoresRequest.newBuilder() + .setHeader(header) + .setExcludeOfflineStores( + offlineExcluded) + .build(); + Pdpb.GetAllStoresResponse response = getStub().getStoreStatus(request); +======== + /** + * 根据storeId返回Store对象 + * + * @param storeId + * @return + * @throws PDException + */ + public Metapb.Store getStore(long storeId) throws PDException { + Metapb.Store store = cache.getStoreById(storeId); + if (store == null) { + Pdpb.GetStoreRequest request = Pdpb.GetStoreRequest.newBuilder() + .setHeader(header) + .setStoreId(storeId).build(); + // Pdpb.GetStoreResponse response = getStub().getStore(request); + Pdpb.GetStoreResponse response = blockingUnaryCall(PDGrpc.getGetStoreMethod(), request); + handleResponseError(response.getHeader()); + store = response.getStore(); + if (config.isEnableCache()) { + cache.addStore(storeId, store); + } + } + return store; + } + + /** + * 更新Store信息,包括上下线等 + * + * @param store + * @return + */ + public Metapb.Store updateStore(Metapb.Store store) throws PDException { + Pdpb.SetStoreRequest request = Pdpb.SetStoreRequest.newBuilder() + .setHeader(header) + .setStore(store).build(); + + // Pdpb.SetStoreResponse response = getStub().setStore(request); + Pdpb.SetStoreResponse response = blockingUnaryCall(PDGrpc.getSetStoreMethod(), request); + handleResponseError(response.getHeader()); + store = response.getStore(); + if (config.isEnableCache()) { + cache.addStore(store.getId(), store); + } + return store; + } + + public List getActiveStores() throws PDException { + Pdpb.GetAllStoresRequest request = Pdpb.GetAllStoresRequest.newBuilder() + .setHeader(header) + .setGraphName("") + .setExcludeOfflineStores(true) + .build(); + // Pdpb.GetAllStoresResponse response = getStub().getAllStores(request); + Pdpb.GetAllStoresResponse response = blockingUnaryCall(PDGrpc.getGetAllStoresMethod(), request); + handleResponseError(response.getHeader()); + return response.getStoresList(); + + } + + /** + * 返回活跃的Store + * + * @param graphName + * @return + */ + public List getActiveStores(String graphName) throws PDException { + Set stores = new HashSet<>(); + KVPair ptShard = this.getPartitionByCode(graphName, 0); + while (ptShard != null) { + stores.add(this.getStore(ptShard.getValue().getStoreId())); + if (ptShard.getKey().getEndKey() < PartitionUtils.MAX_VALUE) { + ptShard = this.getPartitionByCode(graphName, ptShard.getKey().getEndKey()); + } else { + ptShard = null; + } + } + return new ArrayList<>(stores); + } + + /** + * 返回活跃的Store + * + * @param graphName + * @return + */ + public List getAllStores(String graphName) throws PDException { + Pdpb.GetAllStoresRequest request = Pdpb.GetAllStoresRequest.newBuilder() + .setHeader(header) + .setGraphName(graphName) + .setExcludeOfflineStores(false) + .build(); + // Pdpb.GetAllStoresResponse response = getStub().getAllStores(request); + Pdpb.GetAllStoresResponse response = blockingUnaryCall(PDGrpc.getGetAllStoresMethod(), request); + handleResponseError(response.getHeader()); + return response.getStoresList(); + + } + + /** + * Store心跳,定期调用,保持在线状态 + * + * @param stats + * @throws PDException + */ + public Metapb.ClusterStats storeHeartbeat(Metapb.StoreStats stats) throws PDException { + Pdpb.StoreHeartbeatRequest request = Pdpb.StoreHeartbeatRequest.newBuilder() + .setHeader(header) + .setStats(stats).build(); + // Pdpb.StoreHeartbeatResponse response = getStub().storeHeartbeat(request); + Pdpb.StoreHeartbeatResponse response = blockingUnaryCall(PDGrpc.getStoreHeartbeatMethod(), request); + handleResponseError(response.getHeader()); + return response.getClusterStats(); + } + + /** + * 返回Store状态信息 + */ + public List getStoreStatus(boolean offlineExcluded) throws PDException { + Pdpb.GetAllStoresRequest request = Pdpb.GetAllStoresRequest.newBuilder() + .setHeader(header) + .setExcludeOfflineStores(offlineExcluded) + .build(); + // Pdpb.GetAllStoresResponse response = getStub().getStoreStatus(request); + Pdpb.GetAllStoresResponse response = blockingUnaryCall(PDGrpc.getGetStoreStatusMethod(), request); +>>>>>>>> d7e3d51dd (3.6.5 -> 4.x diff):hg-pd-client/src/main/java/org/apache/hugegraph/pd/client/impl/PDApi.java + handleResponseError(response.getHeader()); + List stores = response.getStoresList(); + return stores; + } + + public void setGraphSpace(String graphSpaceName, long storageLimit) throws PDException { + Metapb.GraphSpace graphSpace = Metapb.GraphSpace.newBuilder().setName(graphSpaceName) +<<<<<<<< HEAD:hugegraph-pd/hg-pd-client/src/main/java/org/apache/hugegraph/pd/client/PDClient.java + .setStorageLimit(storageLimit) + .setTimestamp(System.currentTimeMillis()) + .build(); +======== + .setStorageLimit(storageLimit) + .setTimestamp(System.currentTimeMillis()).build(); +>>>>>>>> d7e3d51dd (3.6.5 -> 4.x diff):hg-pd-client/src/main/java/org/apache/hugegraph/pd/client/impl/PDApi.java + Pdpb.SetGraphSpaceRequest request = Pdpb.SetGraphSpaceRequest.newBuilder() + .setHeader(header) + .setGraphSpace(graphSpace) + .build(); + // Pdpb.SetGraphSpaceResponse response = getStub().setGraphSpace(request); + Pdpb.SetGraphSpaceResponse response = blockingUnaryCall(PDGrpc.getSetGraphSpaceMethod(), request); + handleResponseError(response.getHeader()); + } + + public List getGraphSpace(String graphSpaceName) throws + PDException { + Pdpb.GetGraphSpaceRequest.Builder builder = Pdpb.GetGraphSpaceRequest.newBuilder(); + Pdpb.GetGraphSpaceRequest request; + builder.setHeader(header); + if (graphSpaceName != null && graphSpaceName.length() > 0) { + builder.setGraphSpaceName(graphSpaceName); + } + request = builder.build(); + // Pdpb.GetGraphSpaceResponse response = getStub().getGraphSpace(request); + Pdpb.GetGraphSpaceResponse response = blockingUnaryCall(PDGrpc.getGetGraphSpaceMethod(), request); + List graphSpaceList = response.getGraphSpaceList(); + handleResponseError(response.getHeader()); + return graphSpaceList; + } + +<<<<<<<< HEAD:hugegraph-pd/hg-pd-client/src/main/java/org/apache/hugegraph/pd/client/PDClient.java + public void setPDConfig(int partitionCount, String peerList, int shardCount, + long version) throws PDException { + Metapb.PDConfig pdConfig = Metapb.PDConfig.newBuilder().setPartitionCount(partitionCount) + .setPeersList(peerList).setShardCount(shardCount) + .setVersion(version) + .setTimestamp(System.currentTimeMillis()) + .build(); + Pdpb.SetPDConfigRequest request = Pdpb.SetPDConfigRequest.newBuilder() + .setHeader(header) + .setPdConfig(pdConfig) + .build(); + Pdpb.SetPDConfigResponse response = getStub().setPDConfig(request); +======== + public void setPDConfig(int partitionCount, String peerList, int shardCount, long version) throws + PDException { + Metapb.PDConfig pdConfig = Metapb.PDConfig.newBuilder() + .setPeersList(peerList) + .setShardCount(shardCount) + .setVersion(version) + .setTimestamp(System.currentTimeMillis()) + .build(); + Pdpb.SetPDConfigRequest request = Pdpb.SetPDConfigRequest.newBuilder() + .setHeader(header) + .setPdConfig(pdConfig) + .build(); + // Pdpb.SetPDConfigResponse response = getStub().setPDConfig(request); + Pdpb.SetPDConfigResponse response = blockingUnaryCall(PDGrpc.getSetPDConfigMethod(), request); + handleResponseError(response.getHeader()); + } + + public void setPDConfig(Metapb.PDConfig pdConfig) throws PDException { + Pdpb.SetPDConfigRequest request = Pdpb.SetPDConfigRequest.newBuilder() + .setHeader(header) + .setPdConfig(pdConfig) + .build(); + // Pdpb.SetPDConfigResponse response = getStub().setPDConfig(request); + Pdpb.SetPDConfigResponse response = blockingUnaryCall(PDGrpc.getSetPDConfigMethod(), request); +>>>>>>>> d7e3d51dd (3.6.5 -> 4.x diff):hg-pd-client/src/main/java/org/apache/hugegraph/pd/client/impl/PDApi.java + handleResponseError(response.getHeader()); + } + + public Metapb.PDConfig getPDConfig() throws PDException { + Pdpb.GetPDConfigRequest request = Pdpb.GetPDConfigRequest.newBuilder() +<<<<<<<< HEAD:hugegraph-pd/hg-pd-client/src/main/java/org/apache/hugegraph/pd/client/PDClient.java + .setHeader(header) + .build(); + Pdpb.GetPDConfigResponse response = getStub().getPDConfig(request); +======== + .setHeader(header) + .build(); + // Pdpb.GetPDConfigResponse response = getStub().getPDConfig(request); + Pdpb.GetPDConfigResponse response = blockingUnaryCall(PDGrpc.getGetPDConfigMethod(), request); +>>>>>>>> d7e3d51dd (3.6.5 -> 4.x diff):hg-pd-client/src/main/java/org/apache/hugegraph/pd/client/impl/PDApi.java + handleResponseError(response.getHeader()); + return response.getPdConfig(); + } + + public void setPDConfig(Metapb.PDConfig pdConfig) throws PDException { + Pdpb.SetPDConfigRequest request = Pdpb.SetPDConfigRequest.newBuilder() + .setHeader(header) + .setPdConfig(pdConfig) + .build(); + Pdpb.SetPDConfigResponse response = getStub().setPDConfig(request); + handleResponseError(response.getHeader()); + } + + public Metapb.PDConfig getPDConfig(long version) throws PDException { + Pdpb.GetPDConfigRequest request = Pdpb.GetPDConfigRequest.newBuilder().setHeader( + header).setVersion(version).build(); + // Pdpb.GetPDConfigResponse response = getStub().getPDConfig(request); + Pdpb.GetPDConfigResponse response = blockingUnaryCall(PDGrpc.getGetPDConfigMethod(), request); + handleResponseError(response.getHeader()); + return response.getPdConfig(); + } + +<<<<<<<< HEAD:hugegraph-pd/hg-pd-client/src/main/java/org/apache/hugegraph/pd/client/PDClient.java + public void changePeerList(String peerList) throws PDException { + Pdpb.ChangePeerListRequest request = Pdpb.ChangePeerListRequest.newBuilder() + .setPeerList(peerList) + .setHeader(header).build(); + Pdpb.getChangePeerListResponse response = + blockingUnaryCall(PDGrpc.getChangePeerListMethod(), request); + handleResponseError(response.getHeader()); + } + + /** + * Working mode + * Auto: If the number of partitions on each store reaches the maximum value, you need to + * specify the store group id. The store group id is 0, which is the default partition + * splitData(ClusterOp.OperationMode mode, int storeGroupId, List + * params) + * mode = Auto storeGroupId, params + * + * @throws PDException + */ + public void splitData() throws PDException { + Pdpb.SplitDataRequest request = Pdpb.SplitDataRequest.newBuilder() + .setHeader(header) + .setMode(Pdpb.OperationMode.Auto) + .build(); + Pdpb.SplitDataResponse response = getStub().splitData(request); + handleResponseError(response.getHeader()); + } + + /** + * Working mode + * Auto: If the number of partitions on each store reaches the maximum value, you need to + * specify the store group id. The store group id is 0, which is the default partition + * Expert: Expert Mode, Specifier is required splitParams, limit SplitDataParam in the same + * store group + * + * @param mode + * @param params + * @throws PDException + */ + public void splitData(Pdpb.OperationMode mode, List params) throws + PDException { + Pdpb.SplitDataRequest request = Pdpb.SplitDataRequest.newBuilder() + .setHeader(header) + .setMode(mode) + .addAllParam(params).build(); + Pdpb.SplitDataResponse response = getStub().splitData(request); +======== + public void splitData(ClusterOp.OperationMode mode, int storeGroupId, List params) + throws PDException { + ClusterOp.SplitDataRequest request = ClusterOp.SplitDataRequest.newBuilder() + .setHeader(header) + .setMode(mode) + .setStoreGroupId(storeGroupId) + .addAllParam(params).build(); + ; + ClusterOp.SplitDataResponse response = blockingUnaryCall(PDGrpc.getSplitDataMethod(), request); +>>>>>>>> d7e3d51dd (3.6.5 -> 4.x diff):hg-pd-client/src/main/java/org/apache/hugegraph/pd/client/impl/PDApi.java + handleResponseError(response.getHeader()); + } + + + public void splitGraphData(String graphName, int toCount) throws PDException { +<<<<<<<< HEAD:hugegraph-pd/hg-pd-client/src/main/java/org/apache/hugegraph/pd/client/PDClient.java + Pdpb.SplitGraphDataRequest request = Pdpb.SplitGraphDataRequest.newBuilder() + .setHeader(header) + .setGraphName(graphName) + .setToCount(toCount) + .build(); + Pdpb.SplitDataResponse response = getStub().splitGraphData(request); +======== + ClusterOp.SplitGraphDataRequest request = ClusterOp.SplitGraphDataRequest.newBuilder() + .setHeader(header) + .setGraphName(graphName) + .setToCount(toCount) + .build(); + // Pdpb.SplitDataResponse response = getStub().splitGraphData(request); + ClusterOp.SplitDataResponse response = blockingUnaryCall(PDGrpc.getSplitGraphDataMethod(), request); +>>>>>>>> d7e3d51dd (3.6.5 -> 4.x diff):hg-pd-client/src/main/java/org/apache/hugegraph/pd/client/impl/PDApi.java + handleResponseError(response.getHeader()); + } + + /** +<<<<<<<< HEAD:hugegraph-pd/hg-pd-client/src/main/java/org/apache/hugegraph/pd/client/PDClient.java + * To automatically transfer to the same number of partitions on each Store, it is + * recommended to use balancePartition(int storeGroupId) to specify the storeGroupId + * + * @throws PDException + */ + public void balancePartition() throws PDException { + Pdpb.MovePartitionRequest request = Pdpb.MovePartitionRequest.newBuilder() + .setHeader(header) + .setMode( + Pdpb.OperationMode.Auto) + .build(); + Pdpb.MovePartitionResponse response = getStub().movePartition(request); + handleResponseError(response.getHeader()); + } + + /** + * Migrate partitions in manual mode + * // Working mode + * // Auto: Automatic transfer to the same number of partitions per Store + * // Expert: Expert Mode, Specifier is required transferParams + * + * @param params Designation transferParams, expert mode, request source store / target store + * in the same store group + * @throws PDException + */ + public void movePartition(Pdpb.OperationMode mode, List params) throws + PDException { + Pdpb.MovePartitionRequest request = Pdpb.MovePartitionRequest.newBuilder() + .setHeader(header) + .setMode(mode) + .addAllParam(params).build(); + Pdpb.MovePartitionResponse response = getStub().movePartition(request); + handleResponseError(response.getHeader()); + } + + public void reportTask(MetaTask.Task task) throws PDException { + Pdpb.ReportTaskRequest request = Pdpb.ReportTaskRequest.newBuilder() + .setHeader(header) + .setTask(task).build(); + Pdpb.ReportTaskResponse response = blockingUnaryCall(PDGrpc.getReportTaskMethod(), request); +======== + * 平衡分区 + * @param mode auto or expert + * @param storeGroupId for auto + * @param params for expert + * @throws PDException errors occurs + */ + public void balancePartition(ClusterOp.OperationMode mode, int storeGroupId, + List params) throws PDException { + ClusterOp.MovePartitionRequest request = ClusterOp.MovePartitionRequest.newBuilder() + .setHeader(header) + .setMode(mode) + .setStoreGroupId(storeGroupId) + .addAllParam(params) + .build(); + ClusterOp.MovePartitionResponse response = blockingUnaryCall(PDGrpc.getMovePartitionMethod(), request); +>>>>>>>> d7e3d51dd (3.6.5 -> 4.x diff):hg-pd-client/src/main/java/org/apache/hugegraph/pd/client/impl/PDApi.java + handleResponseError(response.getHeader()); + } + + public Metapb.PartitionStats getPartitionsStats(String graph, int partId) throws PDException { + Pdpb.GetPartitionStatsRequest request = Pdpb.GetPartitionStatsRequest.newBuilder() +<<<<<<<< HEAD:hugegraph-pd/hg-pd-client/src/main/java/org/apache/hugegraph/pd/client/PDClient.java + .setHeader(header) + .setGraphName(graph) + .setPartitionId(partId) + .build(); + Pdpb.GetPartitionStatsResponse response = getStub().getPartitionStats(request); +======== + .setHeader(header) + .setGraphName(graph) + .setPartitionId(partId).build(); + // Pdpb.GetPartitionStatsResponse response = getStub().getPartitionStats(request); + Pdpb.GetPartitionStatsResponse response = blockingUnaryCall(PDGrpc.getGetPartitionStatsMethod(), request); +>>>>>>>> d7e3d51dd (3.6.5 -> 4.x diff):hg-pd-client/src/main/java/org/apache/hugegraph/pd/client/impl/PDApi.java + handleResponseError(response.getHeader()); + return response.getPartitionStats(); + } + + /** + * Balance the number of leaders in different stores + */ + public void balanceLeaders() throws PDException { +<<<<<<<< HEAD:hugegraph-pd/hg-pd-client/src/main/java/org/apache/hugegraph/pd/client/PDClient.java + Pdpb.BalanceLeadersRequest request = Pdpb.BalanceLeadersRequest.newBuilder() + .setHeader(header) + .build(); + Pdpb.BalanceLeadersResponse response = getStub().balanceLeaders(request); +======== + ClusterOp.BalanceLeadersRequest request = ClusterOp.BalanceLeadersRequest.newBuilder() + .setHeader(header) + .build(); + // Pdpb.BalanceLeadersResponse response = getStub().balanceLeaders(request); + ClusterOp.BalanceLeadersResponse response = blockingUnaryCall(PDGrpc.getBalanceLeadersMethod(), request); +>>>>>>>> d7e3d51dd (3.6.5 -> 4.x diff):hg-pd-client/src/main/java/org/apache/hugegraph/pd/client/impl/PDApi.java + handleResponseError(response.getHeader()); + } + + /** + * Remove the store from the PD + */ + public Metapb.Store delStore(long storeId) throws PDException { + Pdpb.DetStoreRequest request = Pdpb.DetStoreRequest.newBuilder() +<<<<<<<< HEAD:hugegraph-pd/hg-pd-client/src/main/java/org/apache/hugegraph/pd/client/PDClient.java + .setHeader(header) + .setStoreId(storeId) + .build(); + Pdpb.DetStoreResponse response = getStub().delStore(request); +======== + .setHeader(header) + .setStoreId(storeId) + .build(); + // Pdpb.DetStoreResponse response = getStub().delStore(request); + Pdpb.DetStoreResponse response = blockingUnaryCall(PDGrpc.getDelStoreMethod(), request); +>>>>>>>> d7e3d51dd (3.6.5 -> 4.x diff):hg-pd-client/src/main/java/org/apache/hugegraph/pd/client/impl/PDApi.java + handleResponseError(response.getHeader()); + return response.getStore(); + } + + /** +<<<<<<<< HEAD:hugegraph-pd/hg-pd-client/src/main/java/org/apache/hugegraph/pd/client/PDClient.java + * Compaction on rocksdb as a whole +======== + * 对rocksdb整体进行compaction +>>>>>>>> d7e3d51dd (3.6.5 -> 4.x diff):hg-pd-client/src/main/java/org/apache/hugegraph/pd/client/impl/PDApi.java + * + * @throws PDException + */ + public void dbCompaction() throws PDException { + ClusterOp.DbCompactionRequest request = ClusterOp.DbCompactionRequest + .newBuilder() + .setHeader(header) + .build(); + // Pdpb.DbCompactionResponse response = getStub().dbCompaction(request); + ClusterOp.DbCompactionResponse response = blockingUnaryCall(PDGrpc.getDbCompactionMethod(), request); + handleResponseError(response.getHeader()); + } + + /** +<<<<<<<< HEAD:hugegraph-pd/hg-pd-client/src/main/java/org/apache/hugegraph/pd/client/PDClient.java + * Compaction on rocksdb specified tables +======== + * 对rocksdb指定表进行compaction +>>>>>>>> d7e3d51dd (3.6.5 -> 4.x diff):hg-pd-client/src/main/java/org/apache/hugegraph/pd/client/impl/PDApi.java + * + * @param tableName + * @throws PDException + */ + public void dbCompaction(String tableName) throws PDException { + ClusterOp.DbCompactionRequest request = ClusterOp.DbCompactionRequest + .newBuilder() + .setHeader(header) + .setTableName(tableName) + .build(); + // Pdpb.DbCompactionResponse response = getStub().dbCompaction(request); + ClusterOp.DbCompactionResponse response = blockingUnaryCall(PDGrpc.getDbCompactionMethod(), request); + handleResponseError(response.getHeader()); + } + + /** + * Merge partitions to reduce the current partition to toCount + * + * @param toCount The number of partitions that can be scaled down + * @throws PDException + */ +<<<<<<<< HEAD:hugegraph-pd/hg-pd-client/src/main/java/org/apache/hugegraph/pd/client/PDClient.java + public void combineCluster(int toCount) throws PDException { + Pdpb.CombineClusterRequest request = Pdpb.CombineClusterRequest +======== + public void combineCluster(int shardGroupId, int toCount) throws PDException { + ClusterOp.CombineClusterRequest request = ClusterOp.CombineClusterRequest +>>>>>>>> d7e3d51dd (3.6.5 -> 4.x diff):hg-pd-client/src/main/java/org/apache/hugegraph/pd/client/impl/PDApi.java + .newBuilder() + .setHeader(header) + .setStoreGroupId(shardGroupId) + .setToCount(toCount) + .build(); + ClusterOp.CombineClusterResponse response = blockingUnaryCall(PDGrpc.getCombineClusterMethod(), request); + handleResponseError(response.getHeader()); + } + + /** +<<<<<<<< HEAD:hugegraph-pd/hg-pd-client/src/main/java/org/apache/hugegraph/pd/client/PDClient.java + * Scaling a single image to toCount is similar to splitting to ensure that the number of + * partitions in the same store group is the same. + * If you have special requirements, you can consider migrating to other groups +======== + * 将单图缩容到 toCount个 +>>>>>>>> d7e3d51dd (3.6.5 -> 4.x diff):hg-pd-client/src/main/java/org/apache/hugegraph/pd/client/impl/PDApi.java + * + * @param graphName graph name + * @param toCount target count + * @throws PDException + */ + public void combineGraph(String graphName, int toCount) throws PDException { +<<<<<<<< HEAD:hugegraph-pd/hg-pd-client/src/main/java/org/apache/hugegraph/pd/client/PDClient.java + Pdpb.CombineGraphRequest request = Pdpb.CombineGraphRequest +======== + ClusterOp.CombineGraphRequest request = ClusterOp.CombineGraphRequest +>>>>>>>> d7e3d51dd (3.6.5 -> 4.x diff):hg-pd-client/src/main/java/org/apache/hugegraph/pd/client/impl/PDApi.java + .newBuilder() + .setHeader(header) + .setGraphName(graphName) + .setToCount(toCount) + .build(); +<<<<<<<< HEAD:hugegraph-pd/hg-pd-client/src/main/java/org/apache/hugegraph/pd/client/PDClient.java + Pdpb.CombineGraphResponse response = getStub().combineGraph(request); + handleResponseError(response.getHeader()); + } + + public void deleteShardGroup(int groupId) throws PDException { + Pdpb.DeleteShardGroupRequest request = Pdpb.DeleteShardGroupRequest + .newBuilder() + .setHeader(header) + .setGroupId(groupId) + .build(); + Pdpb.DeleteShardGroupResponse response = + blockingUnaryCall(PDGrpc.getDeleteShardGroupMethod(), request); + +======== + // Pdpb.CombineGraphResponse response = getStub().combineGraph(request); + ClusterOp.CombineGraphResponse response = blockingUnaryCall(PDGrpc.getCombineGraphMethod(), request); +>>>>>>>> d7e3d51dd (3.6.5 -> 4.x diff):hg-pd-client/src/main/java/org/apache/hugegraph/pd/client/impl/PDApi.java + handleResponseError(response.getHeader()); + } + + /** +<<<<<<<< HEAD:hugegraph-pd/hg-pd-client/src/main/java/org/apache/hugegraph/pd/client/PDClient.java + * Used for the store's shard list rebuild +======== + * 用于 store的 shard list重建 +>>>>>>>> d7e3d51dd (3.6.5 -> 4.x diff):hg-pd-client/src/main/java/org/apache/hugegraph/pd/client/impl/PDApi.java + * + * @param groupId shard group id + * @param shards shard list, delete when shards size is 0 + */ + public void updateShardGroupOp(int groupId, List shards) throws PDException { +<<<<<<<< HEAD:hugegraph-pd/hg-pd-client/src/main/java/org/apache/hugegraph/pd/client/PDClient.java + Pdpb.ChangeShardRequest request = Pdpb.ChangeShardRequest.newBuilder() + .setHeader(header) + .setGroupId(groupId) + .addAllShards(shards) + .build(); + Pdpb.ChangeShardResponse response = getStub().updateShardGroupOp(request); +======== + ClusterOp.ChangeShardRequest request = ClusterOp.ChangeShardRequest.newBuilder() + .setHeader(header) + .setGroupId(groupId) + .addAllShards(shards) + .build(); + // Pdpb.ChangeShardResponse response = getStub().updateShardGroupOp(request); + ClusterOp.ChangeShardResponse response = blockingUnaryCall(PDGrpc.getUpdateShardGroupOpMethod(), request); +>>>>>>>> d7e3d51dd (3.6.5 -> 4.x diff):hg-pd-client/src/main/java/org/apache/hugegraph/pd/client/impl/PDApi.java + handleResponseError(response.getHeader()); + } + + /** + * invoke fireChangeShard command + * + * @param groupId shard group id + * @param shards shard list + */ + public void changeShard(int groupId, List shards) throws PDException { +<<<<<<<< HEAD:hugegraph-pd/hg-pd-client/src/main/java/org/apache/hugegraph/pd/client/PDClient.java + Pdpb.ChangeShardRequest request = Pdpb.ChangeShardRequest.newBuilder() + .setHeader(header) + .setGroupId(groupId) + .addAllShards(shards) + .build(); + Pdpb.ChangeShardResponse response = getStub().changeShard(request); + handleResponseError(response.getHeader()); + } + + public ClientCache getCache() { + return cache; + } + + public CacheResponse getClientCache() throws PDException { + GetGraphRequest request = GetGraphRequest.newBuilder().setHeader(header).build(); + CacheResponse cache = getStub().getCache(request); + handleResponseError(cache.getHeader()); + return cache; + } + + public CachePartitionResponse getPartitionCache(String graph) throws PDException { + GetGraphRequest request = + GetGraphRequest.newBuilder().setHeader(header).setGraphName(graph).build(); + CachePartitionResponse ps = getStub().getPartitions(request); +======== + ClusterOp.ChangeShardRequest request = ClusterOp.ChangeShardRequest.newBuilder() + .setHeader(header) + .setGroupId(groupId) + .addAllShards(shards) + .build(); + // Pdpb.ChangeShardResponse response = getStub().changeShard(request); + ClusterOp.ChangeShardResponse response = blockingUnaryCall(PDGrpc.getChangeShardMethod(), request); + handleResponseError(response.getHeader()); + } + + public Pdpb.CacheResponse getClientCache() throws PDException { + Pdpb.GetGraphRequest request = Pdpb.GetGraphRequest.newBuilder().setHeader(header).build(); + // Pdpb.CacheResponse cache = getStub().getCache(request); + Pdpb.CacheResponse cache = blockingUnaryCall(PDGrpc.getGetCacheMethod(), request); + handleResponseError(cache.getHeader()); + return cache; + } + + public Pdpb.CachePartitionResponse getPartitionCache(String graph) throws PDException { + Pdpb.GetGraphRequest request = Pdpb.GetGraphRequest.newBuilder().setHeader(header).setGraphName(graph).build(); + // Pdpb.CachePartitionResponse ps = getStub().getPartitions(request); + Pdpb.CachePartitionResponse ps = blockingUnaryCall(PDGrpc.getGetPartitionsMethod(), request); +>>>>>>>> d7e3d51dd (3.6.5 -> 4.x diff):hg-pd-client/src/main/java/org/apache/hugegraph/pd/client/impl/PDApi.java + handleResponseError(ps.getHeader()); + return ps; + } + + public void updatePdRaft(String raftConfig) throws PDException { +<<<<<<<< HEAD:hugegraph-pd/hg-pd-client/src/main/java/org/apache/hugegraph/pd/client/PDClient.java + Pdpb.UpdatePdRaftRequest request = Pdpb.UpdatePdRaftRequest.newBuilder() + .setHeader(header) + .setConfig(raftConfig) + .build(); + Pdpb.UpdatePdRaftResponse response = getStub().updatePdRaft(request); + handleResponseError(response.getHeader()); + } + + public interface PDEventListener { + + void onStoreChanged(NodeEvent event); + + void onPartitionChanged(PartitionEvent event); + + void onGraphChanged(WatchResponse event); + + default void onShardGroupChanged(WatchResponse event) { + } + + } + + static class StubProxy { + + private final LinkedList hostList = new LinkedList<>(); + private volatile PDGrpc.PDBlockingStub stub; + private String leader; + + public StubProxy(String[] hosts) { + for (String host : hosts) { + if (!host.isEmpty()) { + hostList.offer(host); + } + } + } + + public String nextHost() { + String host = hostList.poll(); + hostList.offer(host); + return host; + } + + public void set(PDGrpc.PDBlockingStub stub) { + this.stub = stub; + } + + public PDGrpc.PDBlockingStub get() { + return this.stub; + } + + public String getHost() { + return hostList.peek(); + } + + public int getHostCount() { + return hostList.size(); + } + + public String getLeader() { + return leader; + } + + public void setLeader(String leader) { + this.leader = leader; + } + } +======== + ClusterOp.UpdatePdRaftRequest request = ClusterOp.UpdatePdRaftRequest.newBuilder() + .setHeader(header) + .setConfig(raftConfig) + .build(); + // Pdpb.UpdatePdRaftResponse response = getStub().updatePdRaft(request); + ClusterOp.UpdatePdRaftResponse response = blockingUnaryCall(PDGrpc.getUpdatePdRaftMethod(), request); + handleResponseError(response.getHeader()); + } + + public long submitBuildIndexTask(Metapb.BuildIndexParam param) throws PDException { + Pdpb.IndexTaskCreateRequest request = Pdpb.IndexTaskCreateRequest.newBuilder() + .setHeader(header) + .setParam(param) + .build(); + // var response = getStub().submitTask(request); + var response = blockingUnaryCall(PDGrpc.getSubmitIndexTaskMethod(), request); + handleResponseError(response.getHeader()); + return response.getTaskId(); + } + + public long submitBackupGraphTask(String sourceGraph, String targetGraph) throws PDException { + Pdpb.BackupGraphRequest request = Pdpb.BackupGraphRequest.newBuilder() + .setGraphName(sourceGraph) + .setTargetGraphName(targetGraph) + .build(); + // var response = getStub().submitTask(request); + var response = blockingUnaryCall(PDGrpc.getSubmitBackupGraphTaskMethod(), request); + handleResponseError(response.getHeader()); + return response.getTaskId(); + } + + public Pdpb.TaskQueryResponse queryBuildIndexTaskStatus(long taskId) throws PDException { + Pdpb.TaskQueryRequest request = Pdpb.TaskQueryRequest.newBuilder() + .setHeader(header) + .setTaskId(taskId) + .build(); + // var response = getStub().queryTaskState(request); + var response = blockingUnaryCall(PDGrpc.getQueryTaskStateMethod(), request); + handleResponseError(response.getHeader()); + return response; + } + + public Pdpb.TaskQueryResponse retryTask(long taskId) throws PDException { + Pdpb.TaskQueryRequest request = Pdpb.TaskQueryRequest.newBuilder() + .setHeader(header) + .setTaskId(taskId) + .build(); + // var response = getStub().retryIndexTask(request); + var response = blockingUnaryCall(PDGrpc.getRetryTaskMethod(), request); + handleResponseError(response.getHeader()); + return response; + } + + public Pdpb.GraphStatsResponse getGraphStats(String graphName) throws PDException { + Pdpb.GetGraphRequest request = + Pdpb.GetGraphRequest.newBuilder().setHeader(header).setGraphName(graphName).build(); + // Pdpb.GraphStatsResponse graphStats = getStub().getGraphStats(request); + Pdpb.GraphStatsResponse graphStats = blockingUnaryCall(PDGrpc.getGetGraphStatsMethod(), request); + handleResponseError(graphStats.getHeader()); + return graphStats; + } + + /** + * 返回startKey和endKey跨越的所有分区信息 + * + * @param graphName + * @param startKey + * @param endKey + * @return + * @throws PDException + */ + public List> scanPartitions(String graphName, byte[] startKey, + byte[] endKey) throws PDException { + List> partitions = new ArrayList<>(); + KVPair startPartShard = getPartition(graphName, startKey); + KVPair endPartShard = getPartition(graphName, endKey); + if (startPartShard == null || endPartShard == null) { + return null; + } + partitions.add(startPartShard); + while (startPartShard.getKey().getEndKey() < endPartShard.getKey().getEndKey() + && startPartShard.getKey().getEndKey() < PartitionUtils.MAX_VALUE /*排除最后一个分区*/) { + startPartShard = getPartitionByCode(graphName, startPartShard.getKey().getEndKey()); + partitions.add(startPartShard); + } + return partitions; + } + + /** + * 查询Key所属分区信息 + * + * @param graphName + * @param key + * @return + * @throws PDException + */ + public KVPair getPartition(String graphName, byte[] key) throws PDException { + // 先查cache,cache没有命中,在调用PD + KVPair partShard = cache.getPartitionByKey(graphName, key); + partShard = getKvPair(graphName, key, partShard); + return partShard; + } + + public KVPair getPartition(String graphName, byte[] key, int code) throws PDException { + KVPair partShard = cache.getPartitionByCode(graphName, code); + partShard = getKvPair(graphName, key, partShard); + return partShard; + } + + /** + * Hugegraph-store调用,更新缓存 + * + * @param partition + */ + public void updatePartitionCache(Metapb.Partition partition, Metapb.Shard leader) { + if (config.isEnableCache()) { + cache.update(partition.getGraphName(), partition.getId(), partition); + cache.updateLeader(partition.getId(), leader); + } + } + + /** + * Hugegraph server 调用,Leader发生改变,更新缓存 + */ + public void updatePartitionLeader(String graphName, int partId, long leaderStoreId) { + KVPair partShard = null; + try { + partShard = this.getPartitionById(graphName, partId); + + if (partShard != null && partShard.getValue().getStoreId() != leaderStoreId) { + var shardGroup = this.getShardGroup(partId); + Metapb.Shard shard = null; + List shards = new ArrayList<>(); + + for (Metapb.Shard s : shardGroup.getShardsList()) { + if (s.getStoreId() == leaderStoreId) { + shard = s; + shards.add(Metapb.Shard.newBuilder(s) + .setStoreId(s.getStoreId()) + .setRole(Metapb.ShardRole.Leader).build()); + } else { + shards.add(Metapb.Shard.newBuilder(s) + .setStoreId(s.getStoreId()) + .setRole(Metapb.ShardRole.Follower).build()); + } + } + + if (config.isEnableCache()) { + if (shard == null) { + // 分区的shard中未找到leader,说明分区发生了迁移 + cache.removePartition(graphName, partId); + } else { + cache.updateLeader(partId, shard); + } + } + } + } catch (PDException e) { + log.error("getPartitionException: {}", e.getMessage()); + } + } + + public Metapb.StoreGroup createStoreGroup(int groupId, String name, int partitionCount) throws PDException { + StoreGroup.CreateStoreGroupRequest request = StoreGroup.CreateStoreGroupRequest.newBuilder() + .setHeader(header) + .setGroupId(groupId) + .setName(name) + .setPartitionCount(partitionCount) + .build(); + + StoreGroup.CreateStoreGroupResponse response = blockingUnaryCall(PDGrpc.getCreateStoreGroupMethod(), request); + handleResponseError(response.getHeader()); + return response.getStoreGroup(); + } + + public Metapb.StoreGroup getStoreGroup(int groupId) throws PDException { + StoreGroup.GetStoreGroupRequest request = StoreGroup.GetStoreGroupRequest.newBuilder() + .setHeader(header) + .setGroupId(groupId) + .build(); + StoreGroup.GetStoreGroupResponse response = blockingUnaryCall(PDGrpc.getGetStoreGroupMethod(), request); + handleResponseError(response.getHeader()); + return response.getStoreGroup(); + } + + public List getAllStoreGroups() throws PDException { + StoreGroup.GetAllStoreGroupRequest request = StoreGroup.GetAllStoreGroupRequest.newBuilder() + .setHeader(header).build(); + StoreGroup.GetAllStoreGroupResponse response = blockingUnaryCall(PDGrpc.getGetAllStoreGroupMethod(), request); + handleResponseError(response.getHeader()); + return response.getStoreGroupsList(); + } + + public Metapb.StoreGroup updateStoreGroup(int groupId, String name) throws PDException { + StoreGroup.UpdateStoreGroupRequest request = StoreGroup.UpdateStoreGroupRequest.newBuilder().setHeader(header) + .setGroupId(groupId) + .setName(name) + .build(); + StoreGroup.UpdateStoreGroupResponse response = blockingUnaryCall(PDGrpc.getUpdateStoreGroupMethod(), request); + handleResponseError(response.getHeader()); + return response.getStoreGroup(); + } + + public List getStoresByStoreGroup(int groupId) throws PDException { + StoreGroup.GetGroupStoresRequest request = StoreGroup.GetGroupStoresRequest.newBuilder() + .setHeader(header).setStoreGroupId(groupId).build(); + StoreGroup.GetGroupStoresResponse response = blockingUnaryCall(PDGrpc.getGetStoresByStoreGroupMethod(), request); + handleResponseError(response.getHeader()); + return response.getStoresList(); + } + + public boolean updateStoreGroupRelation(long storeId, int groupId) throws PDException { + StoreGroup.UpdateStoreGroupRelationRequest request = StoreGroup.UpdateStoreGroupRelationRequest.newBuilder() + .setHeader(header).setStoreId(storeId) + .setStoreGroupId(groupId).build(); + var response = blockingUnaryCall(PDGrpc.getUpdateStoreGroupRelationMethod(), request); + handleResponseError(response.getHeader()); + return response.getSuccess(); + } + +>>>>>>>> d7e3d51dd (3.6.5 -> 4.x diff):hg-pd-client/src/main/java/org/apache/hugegraph/pd/client/impl/PDApi.java +} diff --git a/hg-pd-client/src/main/java/org/apache/hugegraph/pd/client/impl/StreamDelegator.java b/hg-pd-client/src/main/java/org/apache/hugegraph/pd/client/impl/StreamDelegator.java new file mode 100644 index 0000000000..aace7d144c --- /dev/null +++ b/hg-pd-client/src/main/java/org/apache/hugegraph/pd/client/impl/StreamDelegator.java @@ -0,0 +1,213 @@ +package org.apache.hugegraph.pd.client.impl; + +import java.util.concurrent.Future; +import java.util.concurrent.atomic.AtomicBoolean; +import java.util.concurrent.atomic.AtomicReference; +import java.util.function.Consumer; + +import org.apache.hugegraph.pd.client.rpc.Invoker; +import org.apache.hugegraph.pd.common.HgAssert; +import org.apache.hugegraph.pd.common.PDRuntimeException; +import org.apache.hugegraph.pd.grpc.common.ErrorType; + +import io.grpc.MethodDescriptor; +import io.grpc.stub.StreamObserver; +import lombok.Getter; +import lombok.extern.slf4j.Slf4j; + +@Slf4j +public final class StreamDelegator { + + private final Invoker invoker; + private final MethodDescriptor methodDesc; + private final AtomicReference requestHolder = new AtomicReference<>(); + private final AtomicReference state = + new AtomicReference<>(StreamDelegatorState.IDLE); + private final AtomicReference senderHolder = new AtomicReference<>(); + private final AtomicReference actionHolder = new AtomicReference<>(); + private final AtomicBoolean connecting = new AtomicBoolean(); + private final AtomicBoolean autoReconnect = new AtomicBoolean(true); + private final AtomicReference lastError = new AtomicReference<>(); + @Getter + private final String name; + private Consumer dataHandler = this::defaultHandler; + private Consumer errorHandler = this::defaultHandler; + private Consumer completeHandler = this::defaultHandler; + + public StreamDelegator(String delegatorName, Invoker invoker, MethodDescriptor methodDesc) { + HgAssert.isArgumentValid(delegatorName, "delegator name"); + HgAssert.isArgumentNotNull(invoker, "stub invoker"); + HgAssert.isArgumentNotNull(methodDesc, "methodDesc"); + this.name = delegatorName; + this.invoker = invoker; + this.methodDesc = methodDesc; + } + + public void close() { + StreamDelegatorSender sender = this.senderHolder.get(); + if (sender != null) { + sender.close(); + } + } + + private void defaultHandler(T t) { + if (t instanceof Throwable) { + error("Default handler received an error:", t); + this.lastError.set((Throwable) t); + } else if (t != null) { + info("Default handler received a stream data: {}", t); + } + } + + public void listen(ReqT request, Consumer dataHandler) { + if (!this.state.compareAndSet(StreamDelegatorState.IDLE, StreamDelegatorState.LISTENING)) { + info("It's not in the idle StreamDelegatorState, skip listening.", new Object[0]); + throw new IllegalStateException( + "It's not in the idle StreamDelegatorState and not via the 'listening' method. "); + } + if (!this.requestHolder.compareAndSet(null, request)) { + info("Already connected, skip listening.", new Object[0]); + throw new IllegalStateException("Already connected, UNKNOWN StreamDelegatorState!"); + } + this.dataHandler = dataHandler; + try { + listen2Server(request, new StreamDelegatorReceiver<>(this)); + } catch (RuntimeException e) { + this.requestHolder.set(null); + this.state.set(StreamDelegatorState.IDLE); + throw e; + } + } + + public StreamDelegatorSender link(Consumer dataHandler) { + HgAssert.isArgumentNotNull(dataHandler, "data handler"); + if (!this.state.compareAndSet(StreamDelegatorState.IDLE, StreamDelegatorState.LINKING)) { + info("It's not in the idle StreamDelegatorState, skip linking.", new Object[0]); + throw new IllegalStateException( + "It's not in the idle StreamDelegatorState and not via the 'linking' method."); + } + if (this.senderHolder.get() != null) { + info("Already connected, skip linking.", new Object[0]); + return this.senderHolder.get(); + } + if (!this.senderHolder.compareAndSet(null, new StreamDelegatorSender<>(this))) { + info("Already connected, skip linking.", new Object[0]); + return this.senderHolder.get(); + } + this.dataHandler = dataHandler; + try { + this.senderHolder.get().setReqStream(this, link2Server(new StreamDelegatorReceiver<>(this))); + } catch (Exception e) { + this.senderHolder.set(null); + this.state.set(StreamDelegatorState.IDLE); + throw e; + } + return this.senderHolder.get(); + } + + private StreamObserver link2Server(StreamObserver receiver) { + try { + return this.invoker.streamingCall(this.methodDesc, receiver); + } catch (Exception e) { + error("Failed to establish a link to the server, method type: {}, caused by: ", methodDesc, e); + throw new PDRuntimeException(ErrorType.ERROR_VALUE, e); + } + } + + private void listen2Server(ReqT request, StreamObserver receiver) { + try { + this.invoker.serverStreamingCall(this.methodDesc, request, receiver); + } catch (Exception e) { + error("Failed to set up a listening connection to the server, method type: {}, caused by: ", + methodDesc, e); + throw new PDRuntimeException(ErrorType.ERROR_VALUE, e); + } + } + + public void reconnect() { + reconnect(null); + } + + public void reconnect(Throwable t) { + if (this.connecting.compareAndSet(false, true)) { + if (t != null) { + log.warn("Received an error and trying to reconnect: ", t); + } + try { + AtomicBoolean connected = new AtomicBoolean(false); + int count = 0; + while (!connected.get()) { + try { + count++; + StreamDelegatorSender sender = this.senderHolder.get(); + ReqT request = this.requestHolder.get(); + if (sender == null && request == null) { + info("The sender and request are both null, skip reconnecting."); + return; + } + if (sender != null) { + info("The [{}]th attempt to [linking]...", count); + sender.updateReqStream(link2Server(new StreamDelegatorReceiver<>(this))); + } else { + info("The [{}]th attempt to [listening]...", count); + listen2Server(request, new StreamDelegatorReceiver<>(this)); + } + connected.set(true); + break; + } catch (Exception e) { + try { + error("Failed to reconnect, waiting [{}] seconds for the next attempt.", 3); + connected.set(false); + Thread.sleep(3000L); + } catch (InterruptedException ex) { + error("Failed to sleep thread and cancel the reconnecting process.", e); + } + } + } + if (connected.get()) { + info("Reconnect server successfully!"); + } else { + error("Reconnect server failed!"); + } + } catch (Exception e) { + warn("Failed to reconnect:", e); + } finally { + this.connecting.set(false); + } + } + } + + protected void onNext(RespT res) { + this.dataHandler.accept(res); + } + + protected void onError(Throwable t) { + if (this.autoReconnect.get()) { + this.invoker.reconnect(); + } else { + log.warn(this.name + " received an error and trying to reconnect: ", t); + } + } + + protected void onCompleted() { + this.completeHandler.accept(null); + } + + protected void resetState() { + this.senderHolder.set(null); + this.requestHolder.set(null); + this.state.set(StreamDelegatorState.IDLE); + } + + protected void info(String msg, Object... args) { + log.info("[" + this.name + "] " + msg, args); + } + + protected void error(String msg, Object... args) { + log.error("[" + this.name + "] " + msg, args); + } + + protected void warn(String msg, Object... args) { + log.warn("[" + this.name + "] " + msg, args); + } +} diff --git a/hg-pd-client/src/main/java/org/apache/hugegraph/pd/client/impl/StreamDelegatorReceiver.java b/hg-pd-client/src/main/java/org/apache/hugegraph/pd/client/impl/StreamDelegatorReceiver.java new file mode 100644 index 0000000000..371cdcd9fb --- /dev/null +++ b/hg-pd-client/src/main/java/org/apache/hugegraph/pd/client/impl/StreamDelegatorReceiver.java @@ -0,0 +1,31 @@ +package org.apache.hugegraph.pd.client.impl; + +import java.util.concurrent.ExecutorService; + +import org.apache.hugegraph.pd.client.support.PDExecutors; + +import io.grpc.stub.StreamObserver; +import lombok.extern.slf4j.Slf4j; + +@Slf4j +public class StreamDelegatorReceiver implements StreamObserver { + + private final StreamDelegator delegator; + private static ExecutorService connectExecutor = + PDExecutors.newDiscardPool("on-error", 8, 8, Integer.MAX_VALUE); + public StreamDelegatorReceiver(StreamDelegator delegator) { + this.delegator = delegator; + } + + public void onNext(RespT res) { + this.delegator.onNext(res); + } + + public void onError(Throwable t) { + connectExecutor.submit(() -> this.delegator.onError(t)); + } + + public void onCompleted() { + this.delegator.onCompleted(); + } +} diff --git a/hg-pd-client/src/main/java/org/apache/hugegraph/pd/client/impl/StreamDelegatorSender.java b/hg-pd-client/src/main/java/org/apache/hugegraph/pd/client/impl/StreamDelegatorSender.java new file mode 100644 index 0000000000..0502a4998f --- /dev/null +++ b/hg-pd-client/src/main/java/org/apache/hugegraph/pd/client/impl/StreamDelegatorSender.java @@ -0,0 +1,99 @@ +package org.apache.hugegraph.pd.client.impl; + +import java.io.Closeable; +import java.util.concurrent.atomic.AtomicBoolean; +import java.util.concurrent.atomic.AtomicReference; +import java.util.function.Consumer; + +import org.apache.hugegraph.pd.common.HgAssert; +import org.apache.hugegraph.pd.common.PDRuntimeException; + +import io.grpc.stub.StreamObserver; +import lombok.extern.slf4j.Slf4j; + +@Slf4j +public class StreamDelegatorSender implements Closeable { + + private final AtomicBoolean isClosed = new AtomicBoolean(true); + private AtomicReference> observer = new AtomicReference<>(); + private Consumer reconnectedConsumer; + private StreamDelegator delegator; + + public StreamDelegatorSender(StreamDelegator delegator) { + this.delegator = delegator; + } + + protected StreamDelegatorSender setReqStream(StreamDelegator delegator, + StreamObserver reqStream) { + this.delegator = delegator; + this.observer.set(reqStream); + this.isClosed.set(false); + return this; + } + + protected StreamDelegatorSender updateReqStream(StreamObserver reqStream) { + complete(); + this.observer.set(reqStream); + this.isClosed.set(false); + reconnect(null); + return this; + } + + private void reconnect(Void e) { + if (this.reconnectedConsumer != null) { + try { + this.reconnectedConsumer.accept(e); + } catch (Exception ex) { + log.error("Failed to invoke [ reconnectedConsumer ], caused by: ", ex); + } + } else { + log.info("Received a reconnection complete event."); + } + } + + public void onReconnected(Consumer reconnectedConsumer) { + HgAssert.isArgumentNotNull(reconnectedConsumer, "connectedConsumer"); + this.reconnectedConsumer = reconnectedConsumer; + } + + public void send(ReqT t) { + HgAssert.isArgumentNotNull(t, "request"); + try { + this.observer.get().onNext(t); + } catch (Throwable e) { + log.error("Failed to send to server, caused by: ", e); + this.delegator.reconnect(); + throw new PDRuntimeException(-1, e); + } + } + + public void error(String error) { + if (!this.isClosed.compareAndSet(false, true)) { + log.warn("Aborted sending the error due the closure of the connection."); + return; + } + this.delegator.resetState(); + Throwable t = new Throwable(error); + log.error("Sender failed to invoke [onError], caused by: ", t); + this.observer.get().onError(t); + } + + public void close() { + this.delegator.resetState(); + if (!this.isClosed.compareAndSet(false, true)) { + return; + } + complete(); + } + + protected void complete() { + try { + StreamObserver observer = this.observer.get(); + if (observer != null) { + observer.onCompleted(); + } + } catch (Throwable e) { + log.error("Sender failed to invoke [onCompleted], caused by: ", e); + } + } +} diff --git a/hg-pd-client/src/main/java/org/apache/hugegraph/pd/client/impl/StreamDelegatorState.java b/hg-pd-client/src/main/java/org/apache/hugegraph/pd/client/impl/StreamDelegatorState.java new file mode 100644 index 0000000000..62bffca6d6 --- /dev/null +++ b/hg-pd-client/src/main/java/org/apache/hugegraph/pd/client/impl/StreamDelegatorState.java @@ -0,0 +1,9 @@ +package org.apache.hugegraph.pd.client.impl; + +/** + * @author zhangyingjie + * @date 2024/1/31 + **/ +public enum StreamDelegatorState { + IDLE, LINKING, LISTENING, PUSHING; +} diff --git a/hg-pd-client/src/main/java/org/apache/hugegraph/pd/client/interceptor/Authentication.java b/hg-pd-client/src/main/java/org/apache/hugegraph/pd/client/interceptor/Authentication.java new file mode 100644 index 0000000000..3d84674118 --- /dev/null +++ b/hg-pd-client/src/main/java/org/apache/hugegraph/pd/client/interceptor/Authentication.java @@ -0,0 +1,72 @@ +package org.apache.hugegraph.pd.client.interceptor; + +import org.apache.commons.lang3.StringUtils; + +import org.apache.hugegraph.pd.client.rpc.ConnectionManagers; +import org.apache.hugegraph.pd.common.Cache; +import org.apache.hugegraph.pd.common.Consts; + +import io.grpc.CallOptions; +import io.grpc.Channel; +import io.grpc.ClientCall; +import io.grpc.ClientInterceptor; +import io.grpc.ForwardingClientCall.SimpleForwardingClientCall; +import io.grpc.ForwardingClientCallListener.SimpleForwardingClientCallListener; +import io.grpc.Metadata; +import io.grpc.MethodDescriptor; +import lombok.extern.slf4j.Slf4j; + +/** + * @author zhangyingjie + * @date 2023/4/25 + **/ +@Slf4j +public class Authentication implements ClientInterceptor { + + private static Cache cache = new Cache(); + private static long ttl = 3600L; + private String authority; + private String name; + + public Authentication(String userName, String authority) { + assert !StringUtils.isEmpty(userName); + this.name = userName; + this.authority = authority; + } + + public ClientCall interceptCall(MethodDescriptor method, + CallOptions callOptions, Channel next) { + return new SimpleForwardingClientCall<>(next.newCall(method, callOptions)) { + public void sendMessage(ReqT message) { + super.sendMessage(message); + } + + public void start(Listener listener, Metadata headers) { + if (StringUtils.isEmpty(authority) || + StringUtils.isEmpty(name)) { + throw new RuntimeException("invalid user name or password,access denied"); + } + headers.put(Consts.CREDENTIAL_KEY, authority); + String token = cache.get(name); + if (token != null) { + headers.put(Consts.TOKEN_KEY, cache.get(name)); + } + SimpleForwardingClientCallListener callListener = + new SimpleForwardingClientCallListener(listener) { + public void onHeaders(Metadata headers) { + super.onHeaders(headers); + String t = headers.get(Consts.TOKEN_KEY); + if (!StringUtils.isEmpty(t)) { + cache.put(name, t, ttl); + } + String leader = headers.get(Consts.LEADER_KEY); + if (!StringUtils.isEmpty(leader)) { + ConnectionManagers.getInstance().reset(leader); + } + } + }; + super.start(callListener, headers); + } + }; + } +} diff --git a/hg-pd-client/src/main/java/org/apache/hugegraph/pd/client/interceptor/AuthenticationException.java b/hg-pd-client/src/main/java/org/apache/hugegraph/pd/client/interceptor/AuthenticationException.java new file mode 100644 index 0000000000..5eb26a78d5 --- /dev/null +++ b/hg-pd-client/src/main/java/org/apache/hugegraph/pd/client/interceptor/AuthenticationException.java @@ -0,0 +1,16 @@ +package org.apache.hugegraph.pd.client.interceptor; + +/** + * @author zhangyingjie + * @date 2023/8/7 + **/ +public class AuthenticationException extends RuntimeException{ + + public AuthenticationException(String msg) { + super(msg); + } + + public AuthenticationException(String msg, Throwable cause) { + super(msg, cause); + } +} diff --git a/hg-pd-client/src/main/java/org/apache/hugegraph/pd/client/listener/LeaderChangeListener.java b/hg-pd-client/src/main/java/org/apache/hugegraph/pd/client/listener/LeaderChangeListener.java new file mode 100644 index 0000000000..527916ec1a --- /dev/null +++ b/hg-pd-client/src/main/java/org/apache/hugegraph/pd/client/listener/LeaderChangeListener.java @@ -0,0 +1,16 @@ +package org.apache.hugegraph.pd.client.listener; + +import org.apache.hugegraph.pd.client.rpc.ConnectionManagers; + +/** + * @author zhangyingjie + * @date 2024/1/31 + **/ +public interface LeaderChangeListener { + void onLeaderChanged(String leaderAddress); + + default void onPeerChanged(String[] peers) { + ConnectionManagers managers = ConnectionManagers.getInstance(); + managers.resetPeers(peers); + } +} diff --git a/hg-pd-client/src/main/java/org/apache/hugegraph/pd/client/listener/PDEventListener.java b/hg-pd-client/src/main/java/org/apache/hugegraph/pd/client/listener/PDEventListener.java new file mode 100644 index 0000000000..e83f56678c --- /dev/null +++ b/hg-pd-client/src/main/java/org/apache/hugegraph/pd/client/listener/PDEventListener.java @@ -0,0 +1,20 @@ +package org.apache.hugegraph.pd.client.listener; + +import org.apache.hugegraph.pd.grpc.watch.WatchResponse; +import org.apache.hugegraph.pd.watch.NodeEvent; +import org.apache.hugegraph.pd.watch.PartitionEvent; + +/** + * @author zhangyingjie + * @date 2023/9/14 + **/ +public interface PDEventListener { + void onStoreChanged(NodeEvent event); + + void onPartitionChanged(PartitionEvent event); + + void onGraphChanged(WatchResponse event); + + default void onShardGroupChanged(WatchResponse event) { + } +} diff --git a/hg-pd-client/src/main/java/org/apache/hugegraph/pd/client/rpc/AnyInvoker.java b/hg-pd-client/src/main/java/org/apache/hugegraph/pd/client/rpc/AnyInvoker.java new file mode 100644 index 0000000000..4abb313746 --- /dev/null +++ b/hg-pd-client/src/main/java/org/apache/hugegraph/pd/client/rpc/AnyInvoker.java @@ -0,0 +1,88 @@ +package org.apache.hugegraph.pd.client.rpc; + +import java.util.Objects; +import java.util.function.Function; +import java.util.function.Predicate; + +import org.apache.hugegraph.pd.client.interceptor.Authentication; +import org.apache.hugegraph.pd.common.HgAssert; +import org.apache.hugegraph.pd.common.PDException; + +import io.grpc.Channel; +import io.grpc.ClientCall; +import io.grpc.MethodDescriptor; +import io.grpc.stub.AbstractBlockingStub; +import io.grpc.stub.AbstractStub; +import io.grpc.stub.ClientCalls; +import io.grpc.stub.StreamObserver; +import lombok.extern.slf4j.Slf4j; + +/** + * @author lynn.bond@hotmail.com on 2023/12/21 + */ +@Slf4j +public class AnyInvoker extends Invoker { + + public AnyInvoker(ConnectionManager cm, Function asyncStubCreator, + Function blockingStubCreator) { + super(cm, asyncStubCreator, blockingStubCreator); + } + + private ClientCall newBlockingClientCall(MethodDescriptor method, + Channel channel) { + Authentication auth = new Authentication(config.getUserName(), config.getAuthority()); + return auth.interceptCall(method, getBlockingCallOptions(), channel); + } + + public RespT blockingCall(MethodDescriptor method, ReqT req) throws + PDException { + return blockingCall(method, req, resp -> true); + } + + public RespT blockingCall(MethodDescriptor method, ReqT req, + long timeout) throws PDException { + HgAssert.isArgumentNotNull(method, "method"); + HgAssert.isArgumentNotNull(req, "request"); + HgAssert.isTrue((timeout >= 0L), "timeout must be positive"); + return parallelCall( + c -> ClientCalls.blockingUnaryCall(c, method, getBlockingCallOptions(timeout), req), + resp -> true); + } + + public RespT blockingCall(MethodDescriptor method, ReqT req, + Predicate predicate) throws PDException { + HgAssert.isArgumentNotNull(predicate, "Predicate"); + return parallelCall(c -> ClientCalls.blockingUnaryCall(c, method, getBlockingCallOptions(), req), + predicate); + } + + public void serverStreamingCall(MethodDescriptor methodDescriptor, ReqT request, + StreamObserver responseObserver) throws PDException { + throw new UnsupportedOperationException("Not support server streaming call"); + } + + public StreamObserver streamingCall(MethodDescriptor method, + StreamObserver responseObserver) throws + PDException { + throw new UnsupportedOperationException("Not support streaming call"); + } + + private T parallelCall(Function mapper, Predicate predicate) throws PDException { + return this.cm.getParallelChannelStream() + .map(errorShutdown(mapper)) + .filter(Objects::nonNull) + .filter(predicate) + .findAny() + .orElse(null); + } + + private Function errorShutdown(Function mapper) throws PDException { + return channel -> { + try { + return mapper.apply(channel); + } catch (Exception exception) { + return null; + } + }; + } +} diff --git a/hg-pd-client/src/main/java/org/apache/hugegraph/pd/client/rpc/Channels.java b/hg-pd-client/src/main/java/org/apache/hugegraph/pd/client/rpc/Channels.java new file mode 100644 index 0000000000..a5369be3e3 --- /dev/null +++ b/hg-pd-client/src/main/java/org/apache/hugegraph/pd/client/rpc/Channels.java @@ -0,0 +1,193 @@ +package org.apache.hugegraph.pd.client.rpc; + +import java.util.ArrayList; +import java.util.List; +import java.util.concurrent.ConcurrentHashMap; +import java.util.concurrent.TimeUnit; +import java.util.stream.Collectors; + +import org.apache.hugegraph.pd.client.PDConfig; + +import io.grpc.Channel; +import io.grpc.ConnectivityState; +import io.grpc.ManagedChannel; +import io.grpc.ManagedChannelBuilder; +import io.grpc.Status; +import io.grpc.StatusRuntimeException; +import lombok.extern.slf4j.Slf4j; + +@Slf4j +public final class Channels { + private static ConcurrentHashMap channels = new ConcurrentHashMap<>(); + + /** + * Retrieve a channel with a specific target. If the channel is null or shutdown, create a new one; + * + * @param target + * @return + */ + public static ManagedChannel getChannel(String target) { + ManagedChannel channel = channels.get(target); + if (!isValidChannel(channel)) { + synchronized (channels) { + channel = channels.get(target); + if (!isValidChannel(channel)) { + if (channel != null) { + log.info("get channel {}, state:{}", channel, channel.getState(false)); + } + channel = resetChannel(target, channel); + } + } + } + return channel; + } + + private static ManagedChannel resetChannel(String target, ManagedChannel channel) { + closeChannel(channel); + channel = ManagedChannelBuilder.forTarget(target) + .maxInboundMessageSize(PDConfig.getInboundMessageSize()) + .usePlaintext().build(); + channels.put(target, channel); + log.info("Because the channel is not available, create a new one for {}", target); + return channel; + } + + /** + * Validate the channel weather it is valid. + * + * @param channel + * @return true if the channel is valid, otherwise false. + */ + public static boolean isValidChannel(ManagedChannel channel) { + if (channel == null || channel.isShutdown() || channel.isTerminated()) { + return false; + } + ConnectivityState state = channel.getState(false); + if (state == ConnectivityState.READY || state == ConnectivityState.IDLE) { + /* Optimistic judgment for increasing the efficiency. */ + return true; + } + /* Trying to make a connection. */ + state = channel.getState(true); + if (state == ConnectivityState.IDLE || state == ConnectivityState.READY) { + return true; + } else { + // log.info("Channel {} is invalid, state: {}", channel, state); + return false; + } + } + + /** + * Return true if the channel io is broken and need to shut down now. + * + * @param throwable + * @return + */ + public static boolean isIoBrokenError(Throwable throwable) { + if (throwable instanceof StatusRuntimeException) { + StatusRuntimeException e = (StatusRuntimeException) throwable; + return e.getStatus().getCode() == Status.Code.UNAVAILABLE; + } + + return false; + } + + public static boolean canNotWork(Throwable throwable) { + if (throwable instanceof StatusRuntimeException) { + StatusRuntimeException e = (StatusRuntimeException) throwable; + Status.Code code = e.getStatus().getCode(); + return code == Status.Code.UNAVAILABLE || code == Status.Code.DEADLINE_EXCEEDED; + } + return false; + } + /** + * Retrieves all channels + * + * @return non-null collection + */ + public static List getAllChannels() { + return channels.values().stream().collect(Collectors.toList()); + } + + /** + * Closing all channels + * + * @return + */ + public static boolean closeAllChannels() { + /* Clone the list to avoid closing the new channels. */ + List buff = new ArrayList<>(channels.values()); + return buff.stream().parallel().allMatch(Channels::closeChannel); + } + + /** + * Closing a channel. + * + * @param channel + * @return + */ + public static boolean closeChannel(ManagedChannel channel) { + if (channel == null || channel.isShutdown() || channel.isTerminated()) { + return true; + } + log.info("Closing the channel: {}", channel); + try { + channel.shutdown().awaitTermination(1000, TimeUnit.MILLISECONDS); + } catch (InterruptedException e) { + log.warn("Failed to close channel, caused by InterruptedException:", e); + //Thread.currentThread().interrupt(); + return false; + } + while (!channel.isShutdown()) { + try { + log.info("Waiting for channel to be shutdown: {}", channel); + TimeUnit.MILLISECONDS.sleep(1000); + } catch (InterruptedException e) { + log.warn("Failed to close channel, caused by InterruptedException:", e); + //Thread.currentThread().interrupt(); + return false; + } + } + return true; + } + + /** + * Invoking shutdownNow on a channel and waiting until the timeout occurs. + * If the channel is not a ManagedChannel, return directly with no action. + * + * @param channel + * @param timeout timeout in milliseconds + */ + public static void shutdownNow(Channel channel, long timeout) { + if (channel == null) { + return; + } + + if (!(channel instanceof ManagedChannel)) { + log.info("Channel is not a ManagedChannel, return."); + return; + } + + ManagedChannel managedChannel = (ManagedChannel) channel; + if (managedChannel.isShutdown() || managedChannel.isTerminated()) { + return; + } + + log.info("Shutting down the channel: {}", channel); + + try { + managedChannel.shutdownNow().awaitTermination(timeout, TimeUnit.MILLISECONDS); + } catch (InterruptedException e) { + log.warn("Failed to shutdown channel, caused by InterruptedException:", e); + //Thread.currentThread().interrupt(); + return; + } + + } + + public static boolean isShutdown(ManagedChannel channel) { + if (channel == null || channel.isShutdown() || channel.isTerminated()) + return true; + return false; + } +} \ No newline at end of file diff --git a/hg-pd-client/src/main/java/org/apache/hugegraph/pd/client/rpc/ConnectionClient.java b/hg-pd-client/src/main/java/org/apache/hugegraph/pd/client/rpc/ConnectionClient.java new file mode 100644 index 0000000000..17ead2e61b --- /dev/null +++ b/hg-pd-client/src/main/java/org/apache/hugegraph/pd/client/rpc/ConnectionClient.java @@ -0,0 +1,44 @@ +package org.apache.hugegraph.pd.client.rpc; + +import org.apache.hugegraph.pd.client.BaseClient; +import org.apache.hugegraph.pd.client.PDConfig; +import org.apache.hugegraph.pd.common.PDException; +import org.apache.hugegraph.pd.grpc.PDGrpc; +import org.apache.hugegraph.pd.grpc.Pdpb; +import org.apache.hugegraph.pd.grpc.common.NoArg; + +/** + * @author zhangyingjie + * @date 2024/1/31 + **/ +public class ConnectionClient extends BaseClient { + public ConnectionClient(PDConfig pdConfig) { + super(pdConfig, PDGrpc::newStub, PDGrpc::newBlockingStub); + } + + public Pdpb.CacheResponse getClientCache() throws PDException { + Pdpb.GetGraphRequest request = Pdpb.GetGraphRequest.newBuilder().setHeader(this.header).build(); + Pdpb.CacheResponse cache = blockingUnaryCall(PDGrpc.getGetCacheMethod(), request); + handleErrors(cache.getHeader()); + return cache; + } + + public Pdpb.CachePartitionResponse getPartitionCache(String graph) throws PDException { + Pdpb.GetGraphRequest request = + Pdpb.GetGraphRequest.newBuilder().setHeader(this.header).setGraphName(graph).build(); + Pdpb.CachePartitionResponse ps = blockingUnaryCall(PDGrpc.getGetPartitionsMethod(), request); + handleErrors(ps.getHeader()); + return ps; + } + + public Pdpb.GetAllGrpcAddressesResponse getPdAddressesCache() throws PDException { + NoArg request = NoArg.newBuilder().build(); + Pdpb.GetAllGrpcAddressesResponse response = + blockingUnaryCall(PDGrpc.getGetAllGrpcAddressesMethod(), request); + handleErrors(response.getHeader()); + return response; + } + + public void onLeaderChanged(String leader) { + } +} diff --git a/hg-pd-client/src/main/java/org/apache/hugegraph/pd/client/rpc/ConnectionManager.java b/hg-pd-client/src/main/java/org/apache/hugegraph/pd/client/rpc/ConnectionManager.java new file mode 100644 index 0000000000..0d8fbeac71 --- /dev/null +++ b/hg-pd-client/src/main/java/org/apache/hugegraph/pd/client/rpc/ConnectionManager.java @@ -0,0 +1,351 @@ +package org.apache.hugegraph.pd.client.rpc; + +import java.util.Arrays; +import java.util.List; +import java.util.Random; +import java.util.Set; +import java.util.concurrent.ConcurrentHashMap; +import java.util.concurrent.ExecutorService; +import java.util.concurrent.Future; +import java.util.concurrent.TimeUnit; +import java.util.concurrent.atomic.AtomicReference; +import java.util.concurrent.locks.ReentrantLock; +import java.util.concurrent.locks.ReentrantReadWriteLock; +import java.util.function.Function; +import java.util.stream.Stream; + +import javax.annotation.concurrent.ThreadSafe; + +import org.apache.commons.lang3.StringUtils; + +import org.apache.hugegraph.pd.client.BaseClient; +import org.apache.hugegraph.pd.client.ClientCache; +import org.apache.hugegraph.pd.client.PDConfig; +import org.apache.hugegraph.pd.client.PulseClient; +import org.apache.hugegraph.pd.client.interceptor.Authentication; +import org.apache.hugegraph.pd.client.support.PDExecutors; +import org.apache.hugegraph.pd.common.HgAssert; +import org.apache.hugegraph.pd.common.PDRuntimeException; +import org.apache.hugegraph.pd.grpc.PDGrpc; +import org.apache.hugegraph.pd.grpc.Pdpb; +import org.apache.hugegraph.pd.grpc.common.ErrorType; +import org.apache.hugegraph.pd.grpc.common.NoArg; +import org.apache.hugegraph.pd.grpc.pulse.PdInstructionResponse; +import org.apache.hugegraph.pd.grpc.pulse.PdInstructionType; +import org.apache.hugegraph.pd.grpc.pulse.PulseResponse; +import org.apache.hugegraph.pd.watch.NodeEvent; +import org.apache.hugegraph.pd.watch.Watcher; +import org.apache.hugegraph.pd.watch.WatcherImpl; + +import io.grpc.Channel; +import io.grpc.ManagedChannel; +import io.grpc.StatusRuntimeException; +import io.grpc.stub.AbstractBlockingStub; +import lombok.Getter; +import lombok.extern.slf4j.Slf4j; + +/** + * @author lynn.bond@hotmail.com on 2023/11/20 + * @version 1.0 + */ +@Slf4j +@ThreadSafe +public final class ConnectionManager { + + static final long WAITING_CREATE_SECONDS = 60L; + static final long WAITING_LEADER_SECONDS = 30L; + static final long TIMEOUT_SECONDS = 30L; + private static String emptyMsg = "Failed to get leader after " + WAITING_LEADER_SECONDS + " attempts"; + private static NoArg noArg = NoArg.newBuilder().build(); + @Getter + private final PDConfig config; + private final Set clients = ConcurrentHashMap.newKeySet(); + private final ExecutorService connectExecutor = + PDExecutors.newDiscardPool("reconnect", 8, 8, Integer.MAX_VALUE); + private final ReentrantReadWriteLock reconnectLock = new ReentrantReadWriteLock(true); + private final ReentrantLock resetLock = new ReentrantLock(true); + private final AtomicReference leader = new AtomicReference<>(); + private final Authentication auth; + ReentrantReadWriteLock.ReadLock readLock = this.reconnectLock.readLock(); + ReentrantReadWriteLock.WriteLock writeLock = this.reconnectLock.writeLock(); + private volatile ManagedChannel leaderChannel = null; + private Random random = new Random(); + private InvokeProxy proxy; + @Getter + private ClientCache cache; + @Getter + private PulseClient pulseClient; + @Getter + private Watcher watcher; + @Getter + private ConnectionClient connectionClient; + + ConnectionManager(PDConfig config) { + this.config = config; + String[] addresses = this.config.getServerHost().split(","); + this.proxy = new InvokeProxy(addresses); + this.auth = new Authentication(config.getUserName(), config.getAuthority()); + } + + public void init(PulseClient pulseClient, ConnectionClient connectionClient) { + this.pulseClient = pulseClient; + this.watcher = new WatcherImpl(pulseClient); + this.watcher.watchNode(this::onLeaderChanged); + this.cache = new ClientCache(connectionClient, this.watcher); + this.connectionClient = connectionClient; + this.watcher.watchPdPeers(this::onPdPeersChanged); + this.setProxyByPd(); + } + + public void setProxyByPd() { + try { + if (config.isAutoGetPdServers()) { + Pdpb.GetAllGrpcAddressesResponse response = connectionClient.getPdAddressesCache(); + if (response.getHeader().getError().getType() == ErrorType.OK) { + if (response.getAllowed()) { + this.proxy = new InvokeProxy(response.getAddressesList().toArray(new String[0])); + log.info("Get pd servers from cache: {}", response.getAddressesList()); + } + } else { + log.warn("Failed to get pd servers from cache, {}", response); + } + } + } catch (Exception e) { + log.warn("Failed to get pd servers from cache, ", e); + } + } + + private void onPdPeersChanged(PulseResponse pulseResponse) { + PdInstructionResponse ir = pulseResponse.getInstructionResponse(); + if (ir != null && ir.getInstructionType() == PdInstructionType.CHANGE_PEERS) { + updatePeers(ir.getPeersList().toArray(new String[0])); + } + } + + public String getLeaderFromPD() { + for (int i = 0; i < WAITING_LEADER_SECONDS; i++) { + String next = ""; + List hosts = this.proxy.getHosts(); + int hostCount = hosts.size(); + int startIndex = this.random.nextInt(hostCount); + int endIndex = startIndex + hostCount; + ManagedChannel channel = null; + long start = System.currentTimeMillis(); + PDGrpc.PDBlockingStub stub = null; + for (int j = startIndex; j < endIndex; j++) { + try { + if (j >= hostCount) { + next = hosts.get(j - hostCount); + } else { + next = hosts.get(j); + } + start = System.currentTimeMillis(); + channel = Channels.getChannel(next); + stub = PDGrpc.newBlockingStub(channel) + .withDeadlineAfter(TIMEOUT_SECONDS, TimeUnit.SECONDS) + .withInterceptors(this.auth); + Pdpb.GetLeaderGrpcAddressResponse response = stub.getLeaderGrpcAddress(noArg); + pulseClient.handleErrors(response.getHeader()); + String leader = response.getAddress(); + if (!StringUtils.isEmpty(leader)) { + log.info("Get leader address: {} from {}", leader, next); + return leader; + } + } catch (StatusRuntimeException se) { + if (i > 5) { + log.warn("Channel {} may be unavailable, state:{}, last:{} ms, option:{}, " + + "exception: ", + stub.getChannel(), + channel.getState(false), System.currentTimeMillis() - start, + stub.getCallOptions(), se.getStatus()); + } + } catch (Exception e) { + log.warn(String.format("Failed to get leader by address: %s, ", next), e); + } + } + try { + Thread.sleep(1000L); + } catch (Exception exception) { + } + } + return ""; + } + + public String getLeader() { + try { + return this.leader.get(); + } catch (Exception e) { + log.error("Failed to get leader address, caused by:", e); + return ""; + } + } + + public long getDefaultDeadline() { + return this.config.getGrpcTimeOut(); + } + + public void addClient(BaseClient client) { + this.clients.add(client); + } + + public void removeClient(BaseClient client) { + this.clients.remove(client); + } + + public void reconnect() { + reconnect("", false); + } + + public void reconnect(boolean recheck) { + reconnect("", recheck); + } + + public void reconnect(String leaderAddress, boolean recheck) { + long start = System.currentTimeMillis(); + boolean locked = this.writeLock.tryLock(); + if (locked) { + try { + if (recheck && !Channels.isShutdown(this.leaderChannel)) { + return; + } + if (StringUtils.isEmpty(leaderAddress)) { + leaderAddress = getLeaderFromPD(); + if (StringUtils.isEmpty(leaderAddress)) { + throw new PDRuntimeException(ErrorType.PD_RAFT_NOT_READY_VALUE, emptyMsg); + } else { + log.info("Get leader address: {}", leaderAddress); + } + } + update(leaderAddress, start, false); + } catch (Exception e) { + throw e; + } finally { + this.writeLock.unlock(); + } + } else { + boolean readLocked = false; + try { + readLocked = this.readLock.tryLock(WAITING_CREATE_SECONDS, TimeUnit.SECONDS); + } catch (Exception e) { + + } finally { + if (readLocked) { + this.readLock.unlock(); + } + } + } + } + + private void update(String leaderAddress, long start, boolean blocking) { + String currentAddress = this.leader.get(); + if (!leaderAddress.equals(currentAddress) || !Channels.isValidChannel(leaderChannel)) { + this.leader.set(leaderAddress); + this.leaderChannel = Channels.getChannel(leaderAddress); + String finalLeaderAddress = leaderAddress; + Future future = this.connectExecutor.submit(() -> resetClients(finalLeaderAddress)); + if (blocking) { + try { + future.get(); + } catch (Exception e) { + log.warn("Failed to reset clients, caused by:", e); + } + } + long end = System.currentTimeMillis(); + log.info("Reset leader from {} to {} in {} ms", currentAddress, leaderAddress, end - start); + } + } + + public void updatePeers(String[] endpoints) { + boolean locked = resetLock.tryLock(); + if (locked) { + try { + log.warn("Update PD peers to {}", Arrays.toString(endpoints)); + this.proxy = new InvokeProxy(endpoints); + reconnect(); + log.warn("PD peers updated."); + } finally { + resetLock.unlock(); + } + } + } + + public void close() { + PDExecutors.asyncCallback(() -> Boolean.valueOf(close(10L)), b -> { + if (b.booleanValue()) { + log.info("Closed all channels held by this PDConnectionManager."); + } else { + log.warn("Failed to close all channels held by this PDConnectionManager."); + } + }); + } + + public boolean close(long timeout) { + return PDExecutors.awaitTask(this::closeAllChannels, "Close all channels", + timeout).booleanValue(); + } + + private boolean closeAllChannels() { + return this.proxy.getHosts().parallelStream().map(Channels::getChannel) + .allMatch(Channels::closeChannel); + } + + public Stream getParallelChannelStream() { + return this.proxy.getHosts().parallelStream().map(Channels::getChannel) + .filter(Channels::isValidChannel); + } + + private boolean resetClients(String leaderAddress) { + for (BaseClient client : this.clients) { + try { + client.onLeaderChanged(leaderAddress); + } catch (Exception e) { + log.warn(String.format("Failed to let client %s reconnect, caused by:", client.getClass()), + e); + } + } + return true; + } + + public Channel getValidChannel() { + return this.proxy.getHosts().stream().map(Channels::getChannel).filter(Channels::isValidChannel) + .findFirst().orElse(null); + } + + public > T createAsyncStub(Function stubCreator) { + HgAssert.isArgumentNotNull(stubCreator, "The stub creator can't be null"); + return withAsyncParams(stubCreator.apply(getValidChannel())); + } + + public > T createBlockingStub(Function stubCreator) { + HgAssert.isArgumentNotNull(stubCreator, "The stub creator can't be null"); + return createBlockingStub(stubCreator, getValidChannel()); + } + + private > T createBlockingStub(Function creator, + Channel channel) { + return withBlockingParams(creator.apply(channel)); + } + + public > T withAsyncParams(T stub) { + HgAssert.isArgumentNotNull(stub, "The stub can't be null"); + return stub.withMaxInboundMessageSize(PDConfig.getInboundMessageSize()).withInterceptors(auth); + } + + public > T withBlockingParams(T stub) { + HgAssert.isArgumentNotNull(stub, "The stub can't be null"); + return stub.withMaxInboundMessageSize(PDConfig.getInboundMessageSize()).withInterceptors(auth); + } + + public Channel getLeaderChannel() { + if (this.leaderChannel == null || Channels.isShutdown(this.leaderChannel)) { + reconnect(true); + } + return this.leaderChannel; + } + + private void onLeaderChanged(NodeEvent response) { + if (response.getEventType() == NodeEvent.EventType.NODE_PD_LEADER_CHANGE) { + reconnect(); + } + } +} \ No newline at end of file diff --git a/hg-pd-client/src/main/java/org/apache/hugegraph/pd/client/rpc/ConnectionManagers.java b/hg-pd-client/src/main/java/org/apache/hugegraph/pd/client/rpc/ConnectionManagers.java new file mode 100644 index 0000000000..0f1c202b6b --- /dev/null +++ b/hg-pd-client/src/main/java/org/apache/hugegraph/pd/client/rpc/ConnectionManagers.java @@ -0,0 +1,81 @@ +package org.apache.hugegraph.pd.client.rpc; + +import java.util.concurrent.ConcurrentHashMap; +import java.util.concurrent.ConcurrentMap; + +import org.apache.commons.lang3.StringUtils; + +import org.apache.hugegraph.pd.client.PDConfig; +import org.apache.hugegraph.pd.client.PulseClient; + +import lombok.extern.slf4j.Slf4j; + +/** + * @author zhangyingjie + * @date 2024/1/31 + **/ +@Slf4j +public class ConnectionManagers { + + private static final ConnectionManagers INSTANCE = new ConnectionManagers(); + private static ConcurrentMap cms = new ConcurrentHashMap<>(); + + public static ConnectionManagers getInstance() { + return INSTANCE; + } + + public synchronized ConnectionManager add(PDConfig config) { + String pds = config.getServerHost(); + String[] hosts = pds.split(","); + ConnectionManager manager = null; + if (hosts.length > 0 && !StringUtils.isEmpty(hosts[0])) { + manager = cms.get(hosts[0]); + if (manager == null) { + manager = new ConnectionManager(config); + cms.put(hosts[0], manager); + PulseClient pulseClient = new PulseClient(config); + ConnectionClient connectionClient = new ConnectionClient(config); + manager.init(pulseClient, connectionClient); + } + for (int i = 1; i < hosts.length; i++) { + cms.putIfAbsent(hosts[i], manager); + } + } + return manager; + } + + public ConnectionManager get(String host) { + return cms.get(host); + } + + public ConnectionManager get(PDConfig config) { + String pds = config.getServerHost(); + String[] hosts = pds.split(","); + for (String host : hosts) { + ConnectionManager manager = cms.get(host); + if (manager != null) { + return manager; + } + } + return null; + } + + public void reset(String leader) { + ConnectionManager manager = cms.get(leader); + if (manager == null) { + return; + } + manager.reconnect(leader, false); + } + + public void resetPeers(String[] peers) { + for (String peer : peers) { + ConnectionManager manager = cms.get(peer); + if (manager == null) { + continue; + } + manager.updatePeers(peers); + break; + } + } +} diff --git a/hg-pd-client/src/main/java/org/apache/hugegraph/pd/client/rpc/InvokeProxy.java b/hg-pd-client/src/main/java/org/apache/hugegraph/pd/client/rpc/InvokeProxy.java new file mode 100644 index 0000000000..5192134e52 --- /dev/null +++ b/hg-pd-client/src/main/java/org/apache/hugegraph/pd/client/rpc/InvokeProxy.java @@ -0,0 +1,39 @@ +package org.apache.hugegraph.pd.client.rpc; + +import java.util.ArrayList; +import java.util.List; + +import org.apache.hugegraph.pd.grpc.PDGrpc; + +import lombok.Getter; +import lombok.Setter; + +/** + * @author zhangyingjie + * @date 2024/1/31 + **/ +public class InvokeProxy { + + @Getter + @Setter + private volatile PDGrpc.PDBlockingStub stub; + @Getter + @Setter + private String leader; + @Getter + private List hosts; + + public InvokeProxy(String[] switcher) { + updateHosts(switcher); + } + + private void updateHosts(String[] switcher) { + List l = new ArrayList<>(switcher.length); + for (String host : switcher) { + if (!host.isEmpty()) { + l.add(host); + } + } + this.hosts = l; + } +} diff --git a/hg-pd-client/src/main/java/org/apache/hugegraph/pd/client/rpc/Invoker.java b/hg-pd-client/src/main/java/org/apache/hugegraph/pd/client/rpc/Invoker.java new file mode 100644 index 0000000000..0adef093b2 --- /dev/null +++ b/hg-pd-client/src/main/java/org/apache/hugegraph/pd/client/rpc/Invoker.java @@ -0,0 +1,84 @@ +package org.apache.hugegraph.pd.client.rpc; + +import java.util.concurrent.TimeUnit; +import java.util.function.Function; +import java.util.function.Predicate; + +import org.apache.hugegraph.pd.client.PDConfig; +import org.apache.hugegraph.pd.common.HgAssert; +import org.apache.hugegraph.pd.common.PDException; + +import io.grpc.CallOptions; +import io.grpc.Channel; +import io.grpc.MethodDescriptor; +import io.grpc.stub.AbstractBlockingStub; +import io.grpc.stub.AbstractStub; +import io.grpc.stub.StreamObserver; + +public abstract class Invoker { + protected ConnectionManager cm; + protected Function asCreator; + protected Function bsCreator; + protected AbstractStub asyncStub; + protected AbstractBlockingStub blockingStub; + protected PDConfig config; + + public Invoker(ConnectionManager cm, Function asCreator, + Function bsCreator) { + this.cm = cm; + this.config = this.cm.getConfig(); + this.asCreator = asCreator; + this.bsCreator = bsCreator; + } + + public abstract RespT blockingCall(MethodDescriptor paramMethodDescriptor, + ReqT paramReqT) throws PDException; + + public abstract RespT blockingCall(MethodDescriptor paramMethodDescriptor, + ReqT paramReqT, long paramLong) throws PDException; + + public RespT blockingCall(MethodDescriptor method, ReqT req, + Predicate predicate) throws PDException { + HgAssert.isArgumentNotNull(predicate, "The predicate can't be null"); + RespT respT = blockingCall(method, req); + if (predicate.test(respT)) { + return respT; + } + return null; + } + + public abstract void serverStreamingCall( + MethodDescriptor methodDescriptor, ReqT paramReqT, + StreamObserver paramStreamObserver) throws PDException; + + public abstract StreamObserver streamingCall( + MethodDescriptor paramMethodDescriptor, + StreamObserver paramStreamObserver) throws PDException; + + protected CallOptions getBlockingCallOptions() { + return getBlockingCallOptions(this.cm.getDefaultDeadline()); + } + + protected CallOptions getBlockingCallOptions(long duration) { + if (this.blockingStub == null) { + this.blockingStub = this.cm.createBlockingStub(this.bsCreator); + } + return this.blockingStub.getCallOptions() + .withDeadlineAfter(duration, TimeUnit.MILLISECONDS); + } + + protected CallOptions getStreamingCallOptions() { + if (this.asyncStub == null) { + this.asyncStub = this.cm.createAsyncStub(this.asCreator); + } + return this.asyncStub.getCallOptions(); + } + + protected Channel getChannel() { + return this.cm.getLeaderChannel(); + } + + public void reconnect() { + this.cm.reconnect(); + } +} diff --git a/hg-pd-client/src/main/java/org/apache/hugegraph/pd/client/rpc/LeaderInvoker.java b/hg-pd-client/src/main/java/org/apache/hugegraph/pd/client/rpc/LeaderInvoker.java new file mode 100644 index 0000000000..6d4ed6d8b4 --- /dev/null +++ b/hg-pd-client/src/main/java/org/apache/hugegraph/pd/client/rpc/LeaderInvoker.java @@ -0,0 +1,110 @@ +package org.apache.hugegraph.pd.client.rpc; + +import java.util.function.Function; + +import org.apache.hugegraph.pd.client.interceptor.Authentication; +import org.apache.hugegraph.pd.common.PDException; +import org.apache.hugegraph.pd.common.PDRuntimeException; +import org.apache.hugegraph.pd.grpc.common.ErrorType; + +import io.grpc.Channel; +import io.grpc.ClientCall; +import io.grpc.MethodDescriptor; +import io.grpc.Status; +import io.grpc.StatusRuntimeException; +import io.grpc.stub.AbstractBlockingStub; +import io.grpc.stub.AbstractStub; +import io.grpc.stub.ClientCalls; +import io.grpc.stub.StreamObserver; +import lombok.extern.slf4j.Slf4j; + +@Slf4j +public class LeaderInvoker extends Invoker { + + private static final int MAX_RETRY = 10; + private Authentication auth = new Authentication(this.config.getUserName(), this.config.getAuthority()); + + public LeaderInvoker(ConnectionManager cm, Function asCreator, + Function bsCreator) { + super(cm, asCreator, bsCreator); + } + + public RespT blockingCall(MethodDescriptor method, ReqT req) throws + PDException { + long deadline = this.cm.getDefaultDeadline(); + return blockingCall(method, req, deadline); + } + + public RespT blockingCall(MethodDescriptor method, ReqT req, + long timeout) throws PDException { + return recurCall(channel -> { + ClientCall call = + this.auth.interceptCall(method, getBlockingCallOptions(timeout), channel); + return ClientCalls.blockingUnaryCall(call, req); + }, method.getFullMethodName(), req, 0); + } + + public void serverStreamingCall(MethodDescriptor methodDescriptor, ReqT request, + StreamObserver observer) throws PDException { + recurCall(channel -> { + ClientCall call = + this.auth.interceptCall(methodDescriptor, getStreamingCallOptions(), channel); + ClientCalls.asyncServerStreamingCall(call, request, observer); + return Boolean.valueOf(true); + }, methodDescriptor.getFullMethodName(), request, 0); + } + + public StreamObserver streamingCall(MethodDescriptor method, + StreamObserver observer) throws + PDException { + return recurCall(channel -> { + ClientCall call = + this.auth.interceptCall(method, getStreamingCallOptions(), channel); + return ClientCalls.asyncBidiStreamingCall(call, observer); + }, method.getFullMethodName(), observer, 0); + } + + private S recurCall(Function call, String methodName, Q req, int retry) throws + PDException { + S t; + Channel channel = getChannel(); + try { + t = call.apply(channel); + } catch (StatusRuntimeException | PDRuntimeException e) { + if (e instanceof StatusRuntimeException){ + if (((StatusRuntimeException) e).getStatus().getCode().equals(Status.Code.UNAUTHENTICATED)){ + throw new PDException(ErrorType.PD_UNAUTHENTICATED, e); + } + } + if (retry == MAX_RETRY) { + String s = req.toString(); + log.error("Failed to call [{}] in [{}] times, req: {}, caused by:", methodName, MAX_RETRY, s, + e); + throw new PDException(ErrorType.PD_UNAVAILABLE, e); + } + if (retry > 1 && Channels.canNotWork(e)) { + Status status = ((StatusRuntimeException) e).getStatus(); + if (status.getCode() == Status.Code.UNAVAILABLE) { + cm.reconnect(true); + } else { + cm.reconnect(); + } + } + try { + log.info("Retrying to call [{}] after [{}] seconds.", methodName, retry); + synchronized (e) { + e.wait(retry * 1000L + 100L); + } + } catch (Exception ex) { + log.error("Failed to sleep, caused by:", ex); + throw new PDException(ErrorType.ERROR, ex); + } + return recurCall(call, methodName, req, retry + 1); + } catch (Throwable e) { + log.error("Failed to call [{}] without retrying, req: {}, caused by:", methodName, + req.toString(), e); + throw new PDException(ErrorType.ERROR, e); + } + return t; + } +} diff --git a/hg-pd-client/src/main/java/org/apache/hugegraph/pd/client/support/PDExecutors.java b/hg-pd-client/src/main/java/org/apache/hugegraph/pd/client/support/PDExecutors.java new file mode 100644 index 0000000000..9c555ada6a --- /dev/null +++ b/hg-pd-client/src/main/java/org/apache/hugegraph/pd/client/support/PDExecutors.java @@ -0,0 +1,249 @@ +package org.apache.hugegraph.pd.client.support; + +import org.apache.hugegraph.pd.common.HgAssert; +import com.google.common.util.concurrent.ThreadFactoryBuilder; +import lombok.extern.slf4j.Slf4j; + +import java.util.concurrent.BlockingQueue; +import java.util.concurrent.Callable; +import java.util.concurrent.ExecutionException; +import java.util.concurrent.ExecutorService; +import java.util.concurrent.Future; +import java.util.concurrent.LinkedBlockingQueue; +import java.util.concurrent.RejectedExecutionHandler; +import java.util.concurrent.SynchronousQueue; +import java.util.concurrent.ThreadFactory; +import java.util.concurrent.ThreadPoolExecutor; +import java.util.concurrent.TimeUnit; +import java.util.concurrent.TimeoutException; +import java.util.function.Consumer; +import java.util.function.Supplier; + +/** + * @author lynn.bond@hotmail.com on 2023/12/6 + */ +@Slf4j +public final class PDExecutors { + private static final String POOL_PREFIX_NAME = "hg-pd-c-"; + private static final RejectedExecutionHandler CALLER_RUNS = new ThreadPoolExecutor.CallerRunsPolicy(); + private static final RejectedExecutionHandler ABORT_HANDLER = new ThreadPoolExecutor.AbortPolicy(); + private static final RejectedExecutionHandler DISCARD_HANDLER = new ThreadPoolExecutor.DiscardPolicy(); + private static final ExecutorService COMMON_POOL = newCachePool("common", 10, 1024); + + private PDExecutors() { + } + + /** + * Set up a thread pool to handle tasks fetched from an unlimited(Integer.MAX_VALUE) queue. + * If the queue is full, the task will be run by caller. + * + * @param poolName The name of the thread pool. + * @param maxThreads The maximum number of threads in the pool. + * @return + */ + public static ExecutorService newQueuingPool(String poolName, int maxThreads) { + return newQueuingPool(poolName, maxThreads, Integer.MAX_VALUE); + } + + /** + * Set up a thread pool to handle tasks fetched from a specific size queue. + * If the queue is full, the task will be run by caller. + * + * @param poolName The name of the thread pool. + * @param maxThreads The maximum number of threads in the pool. + * @param queueSize The maximum number of tasks in the queue. + * @return + */ + public static ExecutorService newQueuingPool(String poolName, int maxThreads, int queueSize) { + HgAssert.isArgumentValid(poolName, "poolName"); + HgAssert.isFalse(maxThreads <= 0, "The number of maxThreads must be positive"); + HgAssert.isFalse(queueSize <= 0, "The number of queueSize must be positive"); + + return createExecutor(poolName, 0, maxThreads, queueSize, CALLER_RUNS); + } + + /** + * Set up a thread pool to handle tasks without queuing. The pool size increases when all threads are busy. + * If the pool size reaches maxThreads, the task will be run by caller. + * + * @param poolName The name of the thread pool. + * @param coreThreads The size of the core threads, which will not be destroyed. + * @param maxThreads The maximum number of threads in the pool. + * @return + */ + public static ExecutorService newCachePool(String poolName, int coreThreads, int maxThreads) { + HgAssert.isArgumentValid(poolName, "poolName"); + HgAssert.isFalse(coreThreads < 0, "The number of coreThreads must be positive or zero"); + HgAssert.isFalse(maxThreads <= 0, "The number of maxThreads must be positive"); + + return createExecutor(poolName, coreThreads, maxThreads, 0, CALLER_RUNS); + } + + /** + * Set up a thread pool with a fixed size holding all the threads throughout whole life cycle + * without destroying any of them. + * All tasks submitted append to an unlimited(Integer.MAX_VALUE) queue firstly. + * If the queue is full, the task will be aborted. + * + * @param poolName The name of the thread pool. + * @param coreThreads The number of core threads in the pool. + * @return + */ + public static ExecutorService newFixedPool(String poolName, int coreThreads) { + HgAssert.isArgumentValid(poolName, "poolName"); + HgAssert.isFalse(coreThreads <= 0, "The number of threads must be positive"); + + return createExecutor(poolName, coreThreads, coreThreads, Integer.MAX_VALUE, ABORT_HANDLER); + } + + /** + * Create a thread pool with the specified name and discarding the task if the queue is full. + * + * @param poolName The name of the thread pool. + * @param maxThreads The maximum number of threads in the pool. + * @param queueSize The maximum number of tasks in the queue. + * @return + */ + public static ExecutorService newDiscardPool(String poolName, int maxThreads, int queueSize) { + return newDiscardPool(poolName, 0, maxThreads, queueSize); + } + + /** + * Create a thread pool with the specified name and discarding the task if the queue is full. + * + * @param poolName + * @param coreThreads The size of the core threads, which will not be destroyed. + * @param maxThreads The maximum number of threads in the pool. + * @param queueSize The maximum number of tasks in the queue. + * @return + */ + public static ExecutorService newDiscardPool(String poolName, int coreThreads, int maxThreads, int queueSize) { + HgAssert.isArgumentValid(poolName, "poolName"); + HgAssert.isFalse(coreThreads < 0, "The number of threads positive or zero"); + HgAssert.isFalse(maxThreads <= 0, "The number of threads must be positive"); + HgAssert.isFalse(queueSize < 0, "The queue size must be positive or zero"); + + return createExecutor(poolName, coreThreads, maxThreads, queueSize, DISCARD_HANDLER); + } + + /** + * Invoking the `supplier` asynchronously and invoking the `handler` when it is completed. + * + * @param supplier A supplier of a synchronous procedure that returns a result. + * @param handler A consumer of the synchronous result. + * @param + */ + public static void asyncCallback(Supplier supplier, Consumer handler) { + HgAssert.isArgumentNotNull(supplier, "supplier"); + HgAssert.isArgumentNotNull(handler, "handler"); + + COMMON_POOL.execute(() -> { + handler.accept(supplier.get()); + }); + } + + /** + * Executing a task using a new thread. + * + * @param task + * @param taskName + */ + public static void newThreadTask(Runnable task, String taskName) { + HgAssert.isArgumentNotNull(task, "task"); + HgAssert.isArgumentValid(taskName, "task name"); + newTaskThread(taskName, task).start(); + } + + /** + * Attempting to execute a task without a result using the common pool. + * + * @param task + * @param taskName + * @param timeoutSec + */ + public static void awaitTask(Runnable task, String taskName, long timeoutSec) { + awaitTask(() -> { + task.run(); + return true; + }, taskName, timeoutSec); + } + + public static void awaitTask(ExecutorService executor, Runnable task, String taskName, long timeoutSec) { + awaitTask(executor, () -> { + task.run(); + return true; + }, taskName, timeoutSec); + } + + /** + * Attempting to execute a task using the common pool with a specified timeout (in seconds). + */ + public static T awaitTask(Callable task, String taskName, long timeoutSec) { + return awaitTask(COMMON_POOL, task, taskName, timeoutSec); + } + + /** + * Attempting to execute a task with a specified timeout (in seconds). + */ + public static T awaitTask(ExecutorService executor, Callable task, String taskName, long timeoutSec) { + HgAssert.isArgumentNotNull(executor, "executor"); + HgAssert.isArgumentNotNull(task, "task"); + HgAssert.isArgumentValid(taskName, "task name"); + HgAssert.isFalse(timeoutSec < 0, "The timeout must be positive"); + + Future future = executor.submit(task); + T result = null; + + try { + result = future.get(timeoutSec, TimeUnit.SECONDS); + } catch (InterruptedException e) { + log.warn("Task [ {} ] interrupted. error:", taskName, e); + } catch (ExecutionException e) { + log.error("Task [ {} ] execution failed. Caused by: ", taskName, e); + } catch (TimeoutException e) { + log.warn("Task [ {} ] did not complete within the specified timeout: [ {} ] seconds.", taskName, timeoutSec); + future.cancel(true); + } + + return result; + } + + private T catchError(Supplier task) { + try { + return task.get(); + } catch (Exception e) { + log.warn("Caught a failure of a task, error message: {}", e.getMessage()); + } + + return null; + } + + private static ThreadPoolExecutor createExecutor(String name, int coreThreads, int maxThreads, + int queueSize, RejectedExecutionHandler handler) { + BlockingQueue queue; + + if (queueSize <= 0) { + queue = new SynchronousQueue(); + } else { + queue = new LinkedBlockingQueue<>(queueSize); + } + + return new ThreadPoolExecutor(coreThreads, maxThreads, + 60L, TimeUnit.SECONDS, + queue, newFactory(name), handler); + } + + private static Thread newTaskThread(String taskName, Runnable task) { + Thread thread = new Thread(task, POOL_PREFIX_NAME + taskName.toLowerCase()); + thread.setDaemon(true); + return thread; + } + + private static ThreadFactory newFactory(String poolName) { + return new ThreadFactoryBuilder().setDaemon(true).setNameFormat(getPoolName(poolName)).build(); + } + + private static String getPoolName(String name) { + return POOL_PREFIX_NAME + name.toLowerCase() + "-%d"; + } +} diff --git a/hg-pd-client/src/main/java/org/apache/hugegraph/pd/pulse/DefaultPulseListener.java b/hg-pd-client/src/main/java/org/apache/hugegraph/pd/pulse/DefaultPulseListener.java new file mode 100644 index 0000000000..a8535a8a42 --- /dev/null +++ b/hg-pd-client/src/main/java/org/apache/hugegraph/pd/pulse/DefaultPulseListener.java @@ -0,0 +1,59 @@ +package org.apache.hugegraph.pd.pulse; + +import java.util.List; +import java.util.concurrent.CopyOnWriteArrayList; +import java.util.function.Function; + +import org.apache.hugegraph.pd.grpc.pulse.PulseResponse; +import org.apache.hugegraph.pd.watch.WatchListener; + +public class DefaultPulseListener implements PulseListener { + private final List> watchListeners = new CopyOnWriteArrayList<>(); + + private final Function responseParser; + + public DefaultPulseListener(Function responseParser) { + this.responseParser = responseParser; + } + + public void addWatchListener(WatchListener listener) { + this.watchListeners.add(listener); + } + + public void removeWatchListener(WatchListener listener) { + this.watchListeners.remove(listener); + } + + private void raiseData(PulseResponse response) { + for (WatchListener listener : this.watchListeners) { + listener.onNext(this.responseParser.apply(response)); + } + } + + private void raiseError(Throwable throwable) { + for (WatchListener listener : this.watchListeners) { + listener.onError(throwable); + } + } + + private void raiseCompleted() { + for (WatchListener listener : this.watchListeners) { + listener.onCompleted(); + } + } + + public void onNext(PulseResponse response) { + } + + public void onNotice(PulseServerNotice notice) { + raiseData(notice.getContent()); + } + + public void onError(Throwable throwable) { + raiseError(throwable); + } + + public void onCompleted() { + raiseCompleted(); + } +} diff --git a/hg-pd-client/src/main/java/org/apache/hugegraph/pd/pulse/DefaultPulseNotifier.java b/hg-pd-client/src/main/java/org/apache/hugegraph/pd/pulse/DefaultPulseNotifier.java new file mode 100644 index 0000000000..c618b21ddb --- /dev/null +++ b/hg-pd-client/src/main/java/org/apache/hugegraph/pd/pulse/DefaultPulseNotifier.java @@ -0,0 +1,66 @@ +package org.apache.hugegraph.pd.pulse; + +import org.apache.hugegraph.pd.client.impl.StreamDelegatorSender; +import org.apache.hugegraph.pd.grpc.pulse.PartitionHeartbeatRequest; +import org.apache.hugegraph.pd.grpc.pulse.PulseAckRequest; +import org.apache.hugegraph.pd.grpc.pulse.PulseCreateRequest; +import org.apache.hugegraph.pd.grpc.pulse.PulseNoticeRequest; +import org.apache.hugegraph.pd.grpc.pulse.PulseRequest; +import org.apache.hugegraph.pd.grpc.pulse.PulseType; + +public class DefaultPulseNotifier implements PulseNotifier { + + private final PulseType pulseType; + private final StreamDelegatorSender sender; + + private long observerId; + + public DefaultPulseNotifier(PulseType pulseType, StreamDelegatorSender sender, long observerId) { + this.pulseType = pulseType; + this.sender = sender; + this.observerId = observerId; + this.sender.onReconnected(o -> start()); + } + + public void start() { + send(PulseRequest.newBuilder() + .setCreateRequest(PulseCreateRequest.newBuilder() + .setPulseType(this.pulseType) + .setObserverId(this.observerId) + )); + } + + public void ack(long noticeId, long observerId) { + send(PulseRequest.newBuilder() + .setAckRequest( + PulseAckRequest.newBuilder().setNoticeId(noticeId) + .setObserverId(observerId))); + } + + public void send(PulseRequest.Builder builder) { + this.sender.send(builder.build()); + } + + @Override + public void close() { + this.sender.close(); + } + + public void notifyServer(T request) { + this.sender.send(this.getRequest(request)); + } + + public void crash(String error) { + this.sender.error(error); + } + + private PulseRequest getRequest(T requestBuilder) { + PulseNoticeRequest.Builder builder = PulseNoticeRequest.newBuilder(); + if (requestBuilder instanceof PartitionHeartbeatRequest.Builder) { + builder.setPartitionHeartbeatRequest((PartitionHeartbeatRequest.Builder) requestBuilder); + } else { + throw new IllegalStateException("Unregistered request type: " + requestBuilder.getClass()); + } + return PulseRequest.newBuilder().setNoticeRequest(builder).build(); + } +} diff --git a/hg-pd-client/src/main/java/org/apache/hugegraph/pd/pulse/PartitionNotice.java b/hg-pd-client/src/main/java/org/apache/hugegraph/pd/pulse/PartitionNotice.java new file mode 100644 index 0000000000..bce7c47381 --- /dev/null +++ b/hg-pd-client/src/main/java/org/apache/hugegraph/pd/pulse/PartitionNotice.java @@ -0,0 +1,24 @@ +package org.apache.hugegraph.pd.pulse; + +import java.util.function.Consumer; + +import org.apache.hugegraph.pd.grpc.pulse.PulseResponse; + +/** + * @author lynn.bond@hotmail.com created on 2022/2/13 + */ +public class PartitionNotice extends PulseResponseNotice { + + public PartitionNotice(long noticeId, Consumer ackConsumer, PulseResponse content) { + super(noticeId,ackConsumer, content); + } + + @Override + public String toString() { + final StringBuffer sb = new StringBuffer("PartitionNotice{"); + sb.append("noticeId=").append(noticeId); + sb.append(", content=").append(content); + sb.append('}'); + return sb.toString(); + } +} diff --git a/hg-pd-client/src/main/java/org/apache/hugegraph/pd/pulse/Pulse.java b/hg-pd-client/src/main/java/org/apache/hugegraph/pd/pulse/Pulse.java new file mode 100644 index 0000000000..f20a9d411b --- /dev/null +++ b/hg-pd-client/src/main/java/org/apache/hugegraph/pd/pulse/Pulse.java @@ -0,0 +1,67 @@ +package org.apache.hugegraph.pd.pulse; + +import java.util.function.Consumer; + +import org.apache.hugegraph.pd.grpc.pulse.PartitionHeartbeatRequest; +import org.apache.hugegraph.pd.grpc.pulse.PulseResponse; + +/** + * Bidirectional communication interface of pd-client and pd-server + * @author lynn.bond@hotmail.com created on 2021/11/9 + */ +public interface Pulse { + + /** + * + * @param listener + * @return + */ + PulseNotifier connect(PulseListener listener); + + /** + * 切换成新的host。做 channel/host的检查,如果需要关闭,notifier调用close方法。 + * + * @param host new host + * @param notifier notifier + * @return true if create new stub, otherwise false + */ + @Deprecated + boolean resetStub(String host, PulseNotifier notifier); + + /*** inner static methods ***/ + static PulseListener listener(Consumer onNext) { + return listener(onNext, t -> {}, () -> {}); + } + + static PulseListener listener(Consumer onNext, Consumer onError) { + return listener(onNext, onError, () -> {}); + } + + static PulseListener listener(Consumer onNext, Runnable onCompleted) { + return listener(onNext, t -> {}, onCompleted); + } + + static PulseListener listener(Consumer onNext, Consumer onError, Runnable onCompleted) { + return new PulseListener() { + @Override + public void onNext(T response) { + onNext.accept(response); + } + + @Override + public void onNotice(PulseServerNotice notice) { + + } + + @Override + public void onError(Throwable throwable) { + onError.accept(throwable); + } + + @Override + public void onCompleted() { + onCompleted.run(); + } + }; + } +} \ No newline at end of file diff --git a/hg-pd-client/src/main/java/org/apache/hugegraph/pd/pulse/PulseListener.java b/hg-pd-client/src/main/java/org/apache/hugegraph/pd/pulse/PulseListener.java new file mode 100644 index 0000000000..d760157dfc --- /dev/null +++ b/hg-pd-client/src/main/java/org/apache/hugegraph/pd/pulse/PulseListener.java @@ -0,0 +1,33 @@ +package org.apache.hugegraph.pd.pulse; + + +public interface PulseListener { + /** + * Invoked on new events. + * + * @param response the response. + */ + @Deprecated + default void onNext(T response){}; + + /** + * Invoked on new events. + * @param notice a wrapper of response + */ + default void onNotice(PulseServerNotice notice){ + notice.ack(); + } + + /** + * Invoked on errors. + * + * @param throwable the error. + */ + void onError(Throwable throwable); + + /** + * Invoked on completion. + */ + void onCompleted(); + +} diff --git a/hg-pd-client/src/main/java/org/apache/hugegraph/pd/pulse/PulseNotifier.java b/hg-pd-client/src/main/java/org/apache/hugegraph/pd/pulse/PulseNotifier.java new file mode 100644 index 0000000000..163bb40b9b --- /dev/null +++ b/hg-pd-client/src/main/java/org/apache/hugegraph/pd/pulse/PulseNotifier.java @@ -0,0 +1,11 @@ +package org.apache.hugegraph.pd.pulse; + + +import java.io.Closeable; + +public interface PulseNotifier extends Closeable { + + void notifyServer(T paramT); + + void crash(String paramString); +} diff --git a/hg-pd-client/src/main/java/org/apache/hugegraph/pd/pulse/PulseResponseNotice.java b/hg-pd-client/src/main/java/org/apache/hugegraph/pd/pulse/PulseResponseNotice.java new file mode 100644 index 0000000000..f78130ef40 --- /dev/null +++ b/hg-pd-client/src/main/java/org/apache/hugegraph/pd/pulse/PulseResponseNotice.java @@ -0,0 +1,44 @@ +package org.apache.hugegraph.pd.pulse; + +import org.apache.hugegraph.pd.grpc.pulse.PulseResponse; + +import java.util.function.Consumer; + +/** + * @author lynn.bond@hotmail.com created on 2023/12/07 + */ +public class PulseResponseNotice implements PulseServerNotice{ + protected final long noticeId; + protected final Consumer ackConsumer; + protected final PulseResponse content; + + public PulseResponseNotice(long noticeId, Consumer ackConsumer, PulseResponse content) { + this.noticeId = noticeId; + this.ackConsumer = ackConsumer; + this.content = content; + } + + @Override + public void ack() { + this.ackConsumer.accept(this.noticeId); + } + + @Override + public long getNoticeId() { + return this.noticeId; + } + + @Override + public PulseResponse getContent() { + return this.content; + } + + @Override + public String toString() { + final StringBuffer sb = new StringBuffer("PulseNotice{"); + sb.append("noticeId=").append(noticeId); + sb.append(", content=").append(content); + sb.append('}'); + return sb.toString(); + } +} diff --git a/hg-pd-client/src/main/java/org/apache/hugegraph/pd/pulse/PulseServerNotice.java b/hg-pd-client/src/main/java/org/apache/hugegraph/pd/pulse/PulseServerNotice.java new file mode 100644 index 0000000000..03d298f4c8 --- /dev/null +++ b/hg-pd-client/src/main/java/org/apache/hugegraph/pd/pulse/PulseServerNotice.java @@ -0,0 +1,20 @@ +package org.apache.hugegraph.pd.pulse; + +/** + * @author lynn.bond@hotmail.com created on 2022/2/13 + */ +public interface PulseServerNotice { + /** + * @throws RuntimeException when failed to send ack-message to pd-server + */ + void ack(); + + long getNoticeId(); + + /** + * Return a response object of gRPC stream. + * @return + */ + T getContent(); + +} diff --git a/hg-pd-client/src/main/java/org/apache/hugegraph/pd/watch/NodeEvent.java b/hg-pd-client/src/main/java/org/apache/hugegraph/pd/watch/NodeEvent.java new file mode 100644 index 0000000000..0379a74954 --- /dev/null +++ b/hg-pd-client/src/main/java/org/apache/hugegraph/pd/watch/NodeEvent.java @@ -0,0 +1,105 @@ +package org.apache.hugegraph.pd.watch; + +import org.apache.hugegraph.pd.grpc.pulse.PulseNodeResponse; +import org.apache.hugegraph.pd.grpc.pulse.StoreNodeEventType; +import org.apache.hugegraph.pd.grpc.watch.NodeEventType; + +import java.util.Objects; + +/** + * @author lynn.bond@hotmail.com created on 2021/11/4 + */ +public class NodeEvent { + private String graph; + private long nodeId; + private EventType eventType; + + public static NodeEvent of(PulseNodeResponse res){ + if (res == null){ + throw new IllegalArgumentException("PulseNodeResponse can not be null"); + } + return new NodeEvent(res.getGraph(), res.getNodeId(), + NodeEvent.EventType.grpcTypeOf(res.getNodeEventType())); + } + + public NodeEvent(String graph, long nodeId, EventType eventType) { + this.graph=graph; + this.nodeId=nodeId; + this.eventType=eventType; + } + + public String getGraph() { + return graph; + } + + public long getNodeId() { + return nodeId; + } + + public EventType getEventType() { + return eventType; + } + + public enum EventType { + UNKNOWN, + NODE_ONLINE, + NODE_OFFLINE, + NODE_RAFT_CHANGE, + NODE_PD_LEADER_CHANGE; + + public static EventType grpcTypeOf(NodeEventType grpcType) { + switch (grpcType) { + case NODE_EVENT_TYPE_NODE_ONLINE: + return NODE_ONLINE; + case NODE_EVENT_TYPE_NODE_OFFLINE: + return NODE_OFFLINE; + case NODE_EVENT_TYPE_NODE_RAFT_CHANGE: + return NODE_RAFT_CHANGE; + case NODE_EVENT_TYPE_PD_LEADER_CHANGE: + return NODE_PD_LEADER_CHANGE; + default: + return UNKNOWN; + } + + } + public static EventType grpcTypeOf(StoreNodeEventType grpcType) { + switch (grpcType) { + case STORE_NODE_EVENT_TYPE_NODE_ONLINE: + return NODE_ONLINE; + case STORE_NODE_EVENT_TYPE_NODE_OFFLINE: + return NODE_OFFLINE; + case STORE_NODE_EVENT_TYPE_NODE_RAFT_CHANGE: + return NODE_RAFT_CHANGE; + case STORE_NODE_EVENT_TYPE_PD_LEADER_CHANGE: + return NODE_PD_LEADER_CHANGE; + default: + return UNKNOWN; + } + + } + + } + + @Override + public boolean equals(Object o) { + if (this == o) return true; + if (o == null || getClass() != o.getClass()) return false; + NodeEvent nodeEvent = (NodeEvent) o; + return nodeId == nodeEvent.nodeId && Objects.equals(graph, + nodeEvent.graph) && eventType == nodeEvent.eventType; + } + + @Override + public int hashCode() { + return Objects.hash(graph, nodeId, eventType); + } + + @Override + public String toString() { + return "NodeEvent{" + + "graph='" + graph + '\'' + + ", nodeId=" + nodeId + + ", eventType=" + eventType + + '}'; + } +} diff --git a/hg-pd-client/src/main/java/org/apache/hugegraph/pd/watch/PDEventRaiser.java b/hg-pd-client/src/main/java/org/apache/hugegraph/pd/watch/PDEventRaiser.java new file mode 100644 index 0000000000..ccb5a1aca1 --- /dev/null +++ b/hg-pd-client/src/main/java/org/apache/hugegraph/pd/watch/PDEventRaiser.java @@ -0,0 +1,56 @@ +package org.apache.hugegraph.pd.watch; + +import java.util.List; +import java.util.concurrent.CopyOnWriteArrayList; + +import org.apache.hugegraph.pd.client.listener.PDEventListener; +import org.apache.hugegraph.pd.common.HgAssert; +import org.apache.hugegraph.pd.grpc.watch.WatchResponse; + +import lombok.extern.slf4j.Slf4j; + +/** + * @author lynn.bond@hotmail.com on 2023/12/7 + */ +@Slf4j +public class PDEventRaiser { + private final List listeners = new CopyOnWriteArrayList<>(); + private final Watcher pdWatch; + + public PDEventRaiser(Watcher pdWatch) { + this.pdWatch = pdWatch; + this.pdWatch.watchPartition(this::raisePartitionEvent); + this.pdWatch.watchGraph(this::raiseGraphEvent); + this.pdWatch.watchShardGroup(this::raiseShardGroupEvent); + this.pdWatch.watchNode(this::raiseNodeEvent); + } + + public void addListener(PDEventListener listener) { + HgAssert.isArgumentNotNull(listener, "PDEventListener"); + this.listeners.add(listener); + } + + public void removeListener(PDEventListener listener) { + HgAssert.isArgumentNotNull(listener, "PDEventListener"); + this.listeners.remove(listener); + } + + void raisePartitionEvent(PartitionEvent response) { + listeners.forEach(listener -> listener.onPartitionChanged(response)); + } + + void raiseGraphEvent(WatchResponse response) { + listeners.forEach(listener -> listener.onGraphChanged(response)); + } + + void raiseShardGroupEvent(WatchResponse response) { + listeners.forEach(listener -> listener.onShardGroupChanged(response)); + } + + void raiseNodeEvent(NodeEvent response) { + log.info("PDClient receive store event {} {}", + response.getEventType(), Long.toHexString(response.getNodeId())); + listeners.forEach(listener -> listener.onStoreChanged(response)); + } + +} diff --git a/hg-pd-client/src/main/java/org/apache/hugegraph/pd/watch/PDWatchPulseConverter.java b/hg-pd-client/src/main/java/org/apache/hugegraph/pd/watch/PDWatchPulseConverter.java new file mode 100644 index 0000000000..5c4c5d46b8 --- /dev/null +++ b/hg-pd-client/src/main/java/org/apache/hugegraph/pd/watch/PDWatchPulseConverter.java @@ -0,0 +1,83 @@ +package org.apache.hugegraph.pd.watch; + +import org.apache.hugegraph.pd.grpc.pulse.PulseChangeType; +import org.apache.hugegraph.pd.grpc.pulse.PulseGraphResponse; +import org.apache.hugegraph.pd.grpc.pulse.PulseResponse; +import org.apache.hugegraph.pd.grpc.pulse.PulseShardGroupResponse; +import org.apache.hugegraph.pd.grpc.watch.WatchChangeType; +import org.apache.hugegraph.pd.grpc.watch.WatchGraphResponse; +import org.apache.hugegraph.pd.grpc.watch.WatchResponse; +import org.apache.hugegraph.pd.grpc.watch.WatchShardGroupResponse; +import org.apache.hugegraph.pd.grpc.watch.WatchType; + + +/** + * @author lynn.bond@hotmail.com on 2023/11/8 + */ +abstract class PDWatchPulseConverter { + + static WatchResponse toWatchShardGroupResponse(PulseResponse pulse) { + WatchResponse.Builder builder = WatchResponse.newBuilder() + .setWatchType(WatchType.WATCH_TYPE_SHARD_GROUP_CHANGE); + + if (pulse == null) { + return builder.build(); + } + + builder.setWatcherId(pulse.getObserverId()) + .setNoticeId(pulse.getNoticeId()); + + PulseShardGroupResponse origin = pulse.getShardGroupResponse(); + if (origin == null) { + return builder.build(); + } + + builder.setShardGroupResponse(WatchShardGroupResponse.newBuilder() + .setShardGroupId(origin.getShardGroupId()) + .setType(toWatchChangeType(origin.getType())) + .setShardGroup(origin.getShardGroup()) + .build()); + + return builder.build(); + } + + static WatchResponse toWatchGraphResponse(PulseResponse pulse) { + WatchResponse.Builder builder = WatchResponse.newBuilder() + .setWatchType(WatchType.WATCH_TYPE_GRAPH_CHANGE); + + if (pulse == null) { + return builder.build(); + } + + builder.setWatcherId(pulse.getObserverId()) + .setNoticeId(pulse.getNoticeId()); + + PulseGraphResponse origin = pulse.getGraphResponse(); + if (origin == null){ + return builder.build(); + } + + builder.setGraphResponse(WatchGraphResponse.newBuilder() + .setGraph(origin.getGraph()) + .build()); + + return builder.build(); + } + + private static WatchChangeType toWatchChangeType(PulseChangeType type) { + switch (type) { + case PULSE_CHANGE_TYPE_UNKNOWN: + return WatchChangeType.WATCH_CHANGE_TYPE_UNKNOWN; + case PULSE_CHANGE_TYPE_ADD: + return WatchChangeType.WATCH_CHANGE_TYPE_ADD; + case PULSE_CHANGE_TYPE_ALTER: + return WatchChangeType.WATCH_CHANGE_TYPE_ALTER; + case PULSE_CHANGE_TYPE_DEL: + return WatchChangeType.WATCH_CHANGE_TYPE_DEL; + case PULSE_CHANGE_TYPE_SPECIAL1: + return WatchChangeType.WATCH_CHANGE_TYPE_SPECIAL1; + } + + return WatchChangeType.WATCH_CHANGE_TYPE_UNKNOWN; + } +} diff --git a/hg-pd-client/src/main/java/org/apache/hugegraph/pd/watch/PartitionEvent.java b/hg-pd-client/src/main/java/org/apache/hugegraph/pd/watch/PartitionEvent.java new file mode 100644 index 0000000000..3496b2d07f --- /dev/null +++ b/hg-pd-client/src/main/java/org/apache/hugegraph/pd/watch/PartitionEvent.java @@ -0,0 +1,96 @@ +package org.apache.hugegraph.pd.watch; + +import org.apache.hugegraph.pd.grpc.pulse.PulseChangeType; +import org.apache.hugegraph.pd.grpc.pulse.PulsePartitionResponse; +import org.apache.hugegraph.pd.grpc.watch.WatchChangeType; + +import java.util.Objects; + +/** + * @author lynn.bond@hotmail.com created on 2021/11/4 + */ +public class PartitionEvent { + private String graph; + private int partitionId; + private ChangeType changeType; + + public static PartitionEvent of(PulsePartitionResponse res ) { + if (res == null){ + throw new IllegalArgumentException("res can not be null"); + } + return new PartitionEvent(res.getGraph(), res.getPartitionId(), + PartitionEvent.ChangeType.grpcTypeOf(res.getChangeType())); + } + + public PartitionEvent(String graph, int partitionId, ChangeType changeType) { + this.graph = graph; + this.partitionId = partitionId; + this.changeType = changeType; + } + + public String getGraph() { + return this.graph; + } + + public int getPartitionId() { + return this.partitionId; + } + + public ChangeType getChangeType() { + return this.changeType; + } + + public enum ChangeType { + UNKNOWN, + ADD, + ALTER, + DEL; + + public static ChangeType grpcTypeOf(WatchChangeType grpcType) { + switch (grpcType) { + case WATCH_CHANGE_TYPE_ADD: + return ADD; + case WATCH_CHANGE_TYPE_ALTER: + return ALTER; + case WATCH_CHANGE_TYPE_DEL: + return DEL; + default: + return UNKNOWN; + } + } + public static ChangeType grpcTypeOf(PulseChangeType grpcType) { + switch (grpcType) { + case PULSE_CHANGE_TYPE_ADD: + return ADD; + case PULSE_CHANGE_TYPE_ALTER: + return ALTER; + case PULSE_CHANGE_TYPE_DEL: + return DEL; + default: + return UNKNOWN; + } + } + } + + @Override + public boolean equals(Object o) { + if (this == o) return true; + if (o == null || getClass() != o.getClass()) return false; + PartitionEvent that = (PartitionEvent) o; + return partitionId == that.partitionId && Objects.equals(graph, that.graph) && changeType == that.changeType; + } + + @Override + public int hashCode() { + return Objects.hash(graph, partitionId, changeType); + } + + @Override + public String toString() { + return "PartitionEvent{" + + "graph='" + graph + '\'' + + ", partitionId=" + partitionId + + ", changeType=" + changeType + + '}'; + } +} diff --git a/hg-pd-client/src/main/java/org/apache/hugegraph/pd/watch/WatchListener.java b/hg-pd-client/src/main/java/org/apache/hugegraph/pd/watch/WatchListener.java new file mode 100644 index 0000000000..a0967e4874 --- /dev/null +++ b/hg-pd-client/src/main/java/org/apache/hugegraph/pd/watch/WatchListener.java @@ -0,0 +1,9 @@ +package org.apache.hugegraph.pd.watch; + +public interface WatchListener { + void onNext(T paramT); + + void onError(Throwable paramThrowable); + + default void onCompleted() {} +} diff --git a/hg-pd-client/src/main/java/org/apache/hugegraph/pd/watch/WatchType.java b/hg-pd-client/src/main/java/org/apache/hugegraph/pd/watch/WatchType.java new file mode 100644 index 0000000000..cafd4dd895 --- /dev/null +++ b/hg-pd-client/src/main/java/org/apache/hugegraph/pd/watch/WatchType.java @@ -0,0 +1,16 @@ +package org.apache.hugegraph.pd.watch; + +/** + * @author lynn.bond@hotmail.com created on 2021/11/4 + */ +enum WatchType { + + PARTITION_CHANGE(10); + + private int value; + + private WatchType(int value){ + this.value=value; + } + +} diff --git a/hg-pd-client/src/main/java/org/apache/hugegraph/pd/watch/Watcher.java b/hg-pd-client/src/main/java/org/apache/hugegraph/pd/watch/Watcher.java new file mode 100644 index 0000000000..d246cb9c79 --- /dev/null +++ b/hg-pd-client/src/main/java/org/apache/hugegraph/pd/watch/Watcher.java @@ -0,0 +1,85 @@ +package org.apache.hugegraph.pd.watch; + +import java.io.Closeable; +import java.util.function.Consumer; + +import org.apache.hugegraph.pd.grpc.pulse.PulseResponse; +import org.apache.hugegraph.pd.grpc.watch.WatchResponse; + +/** + * @author lynn.bond@hotmail.com created on 2021/11/4 + */ +public interface Watcher { + + static WatchListener listen(Consumer onNext) { + return listen(onNext, t -> { + + }, () -> { + + }); + } + + static WatchListener listen(Consumer onNext, Consumer onError) { + return listen(onNext, onError, () -> { + + }); + } + + static WatchListener listen(Consumer onNext, Runnable onCompleted) { + return listen(onNext, t -> { + + }, onCompleted); + } + + static WatchListener listen(final Consumer onNext, final Consumer onError, + final Runnable onCompleted) { + return new WatchListener() { + public void onNext(T response) { + onNext.accept(response); + } + + public void onError(Throwable throwable) { + onError.accept(throwable); + } + + public void onCompleted() { + onCompleted.run(); + } + }; + } + + default Closeable watchPartition(Consumer consumer) { + return watchPartition(listen(consumer)); + } + + Closeable watchPartition(WatchListener paramWatchListener); + + default Closeable watchNode(Consumer consumer) { + return watchNode(listen(consumer)); + } + + Closeable watchNode(WatchListener paramWatchListener); + + default Closeable watchGraph(Consumer consumer) { + return watchGraph(listen(consumer)); + } + + Closeable watchGraph(WatchListener paramWatchListener); + + default Closeable watchShardGroup(Consumer consumer) { + return watchShardGroup(listen(consumer)); + } + + Closeable watchShardGroup(WatchListener paramWatchListener); + + default Closeable watchPdPeers(Consumer consumer) { + return watchPdPeers(listen(consumer)); + } + Closeable watchPdPeers(WatchListener paramWatchListener); + + @Deprecated + String getCurrentHost(); + + @Deprecated + boolean checkChannel(); +} diff --git a/hg-pd-client/src/main/java/org/apache/hugegraph/pd/watch/WatcherImpl.java b/hg-pd-client/src/main/java/org/apache/hugegraph/pd/watch/WatcherImpl.java new file mode 100644 index 0000000000..3d0fe8d2f3 --- /dev/null +++ b/hg-pd-client/src/main/java/org/apache/hugegraph/pd/watch/WatcherImpl.java @@ -0,0 +1,91 @@ +package org.apache.hugegraph.pd.watch; + +import java.io.Closeable; +import java.util.function.Function; + +import org.apache.hugegraph.pd.client.PulseClient; +import org.apache.hugegraph.pd.grpc.pulse.PulseResponse; +import org.apache.hugegraph.pd.grpc.pulse.PulseType; +import org.apache.hugegraph.pd.grpc.watch.WatchResponse; +import org.apache.hugegraph.pd.pulse.DefaultPulseListener; + +import lombok.extern.slf4j.Slf4j; + +/** + * @author lynn.bond@hotmail.com on 2023/12/7 + */ +@Slf4j +public class WatcherImpl implements Watcher { + + private final PulseClient pulse; + private final Function converter = response -> response; + + public WatcherImpl(PulseClient pulse) { + this.pulse = pulse; + DefaultPulseListener nodeListener = new DefaultPulseListener<>(WatcherImpl::parseNodeEvent); + DefaultPulseListener partitionListener = new DefaultPulseListener<>(WatcherImpl::parsePartitionEvent); + DefaultPulseListener graphListener = new DefaultPulseListener<>(WatcherImpl::parseGraphEvent); + DefaultPulseListener shardGroupListener = + new DefaultPulseListener<>(WatcherImpl::parseShardGroupEvent); + DefaultPulseListener instructListener = new DefaultPulseListener<>(converter); + this.pulse.connect(PulseType.PULSE_TYPE_STORE_NODE_CHANGE, nodeListener); + this.pulse.connect(PulseType.PULSE_TYPE_PARTITION_CHANGE, partitionListener); + this.pulse.connect(PulseType.PULSE_TYPE_GRAPH_CHANGE, graphListener); + this.pulse.connect(PulseType.PULSE_TYPE_SHARD_GROUP_CHANGE, shardGroupListener); + this.pulse.connect(PulseType.PULSE_TYPE_PD_INSTRUCTION, instructListener); + } + + private static NodeEvent parseNodeEvent(PulseResponse response) { + return NodeEvent.of(response.getNodeResponse()); + } + + private static PartitionEvent parsePartitionEvent(PulseResponse response) { + return PartitionEvent.of(response.getPartitionResponse()); + } + + private static WatchResponse parseGraphEvent(PulseResponse response) { + return PDWatchPulseConverter.toWatchGraphResponse(response); + } + + private static WatchResponse parseShardGroupEvent(PulseResponse response) { + return PDWatchPulseConverter.toWatchShardGroupResponse(response); + } + + @Deprecated + public String getCurrentHost() { + return this.pulse.getLeaderAddress(); + } + + @Deprecated + public boolean checkChannel() { + return true; + } + + public Closeable watch(PulseType type, WatchListener listener) { + DefaultPulseListener pulseListener = + (DefaultPulseListener) pulse.getListener(type); + pulseListener.addWatchListener(listener); + return () -> pulseListener.removeWatchListener(listener); + } + + public Closeable watchPartition(WatchListener listener) { + return watch(PulseType.PULSE_TYPE_PARTITION_CHANGE, listener); + } + + public Closeable watchNode(WatchListener listener) { + return watch(PulseType.PULSE_TYPE_STORE_NODE_CHANGE, listener); + } + + public Closeable watchGraph(WatchListener listener) { + return watch(PulseType.PULSE_TYPE_GRAPH_CHANGE, listener); + } + + public Closeable watchShardGroup(WatchListener listener) { + return watch(PulseType.PULSE_TYPE_SHARD_GROUP_CHANGE, listener); + } + + public Closeable watchPdPeers(WatchListener listener) { + return watch(PulseType.PULSE_TYPE_PD_INSTRUCTION, listener); + } + +} diff --git a/hg-pd-client/src/test/java/org/apache/hugegraph/pd/PartitionCacheTest.java b/hg-pd-client/src/test/java/org/apache/hugegraph/pd/PartitionCacheTest.java new file mode 100644 index 0000000000..df887111be --- /dev/null +++ b/hg-pd-client/src/test/java/org/apache/hugegraph/pd/PartitionCacheTest.java @@ -0,0 +1,82 @@ +package org.apache.hugegraph.pd; + +import org.apache.hugegraph.pd.common.KVPair; +import org.apache.hugegraph.pd.common.PartitionCache; +import org.apache.hugegraph.pd.grpc.Metapb; +import com.google.common.collect.Range; +import com.google.common.collect.RangeMap; +import com.google.common.collect.TreeRangeMap; +// import org.junit.Test; + +import java.util.ArrayList; +import java.util.HashMap; +import java.util.List; +import java.util.Map; + +public class PartitionCacheTest { + + // @Test + public void test(){ + PartitionCache cache = new PartitionCache(); + for(int i = 0; i< 10; i++) { + KVPair partShards = new KVPair<>(Metapb.Partition.newBuilder() + .setStartKey(i*10) + .setEndKey((i+1)*10) + .build(),null); + cache.updatePartition("aa", i, partShards.getKey()); + } + + for(int i = 0; i<100; i++){ + KVPair partShards = cache.getPartitionByCode("aa", i); + System.out.println(" " + i + " " + partShards.getKey().getStartKey()); + } + } + + + // @Test + public void test1(){ + Map> keyToPartIdCache = new HashMap<>(); + // graphName + PartitionID组成key + Map> partitionCache = new HashMap<>(); + + // 缓存全部Store,用于全库查询,需要优化 + Map> allStoresCache = new HashMap<>(); + + keyToPartIdCache.put("a", TreeRangeMap.create()); + + keyToPartIdCache.get("a") + .put(Range.closedOpen(1L, 2L), 1); + + allStoresCache.put("a", new ArrayList<>()); + allStoresCache.get("a").add(Metapb.Store.newBuilder().setId(34).build()); + + + Map> keyToPartIdCache2 = cloneKeyToPartIdCache(keyToPartIdCache); + System.out.println(keyToPartIdCache2.size()); + } + + public Map> cloneKeyToPartIdCache(Map> cache) { + Map> cacheClone = new HashMap<>(); + cache.forEach((k1, v1) -> { + cacheClone.put(k1, TreeRangeMap.create()); + v1.asMapOfRanges().forEach((k2, v2) -> { + cacheClone.get(k1).put(k2, v2); + }); + }); + return cacheClone; + } + + public Map> + clonePartitionCache(Map> cache) { + Map> cacheClone = new HashMap<>(); + cacheClone.putAll(cache); + return cacheClone; + } + + public Map> + cloneStoreCache(Map> cache) { + Map> cacheClone = new HashMap<>(); + cacheClone.putAll(cache); + return cacheClone; + } +} diff --git a/hg-pd-client/src/test/java/org/apache/hugegraph/pd/StoreRegisterTest.java b/hg-pd-client/src/test/java/org/apache/hugegraph/pd/StoreRegisterTest.java new file mode 100644 index 0000000000..060c7d8469 --- /dev/null +++ b/hg-pd-client/src/test/java/org/apache/hugegraph/pd/StoreRegisterTest.java @@ -0,0 +1,165 @@ +<<<<<<<< HEAD:hugegraph-pd/hg-pd-test/src/main/java/org/apache/hugegraph/pd/client/StoreRegisterTest.java +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hugegraph.pd.client; +======== +package org.apache.hugegraph.pd; + +import org.apache.hugegraph.pd.client.PDClient; +import org.apache.hugegraph.pd.client.PDConfig; +import org.apache.hugegraph.pd.pulse.Pulse; +import org.apache.hugegraph.pd.common.KVPair; +import org.apache.hugegraph.pd.common.PDException; +import org.apache.hugegraph.pd.grpc.Metapb; +import org.apache.hugegraph.pd.grpc.pulse.PartitionHeartbeatRequest; +import org.apache.hugegraph.pd.grpc.pulse.PulseResponse; +import org.apache.hugegraph.pd.pulse.PulseListener; +import org.apache.hugegraph.pd.pulse.PulseNotifier; +import org.apache.hugegraph.pd.pulse.PulseServerNotice; +import org.junit.Assert; +import org.junit.BeforeClass; +// import org.junit.Test; +>>>>>>>> d7e3d51dd (3.6.5 -> 4.x diff):hg-pd-client/src/test/java/org/apache/hugegraph/pd/StoreRegisterTest.java + +import java.nio.charset.StandardCharsets; +import java.util.List; + +<<<<<<<< HEAD:hugegraph-pd/hg-pd-test/src/main/java/org/apache/hugegraph/pd/client/StoreRegisterTest.java +import org.apache.hugegraph.pd.common.KVPair; +import org.apache.hugegraph.pd.common.PDException; +import org.apache.hugegraph.pd.grpc.Metapb; +import org.apache.hugegraph.pd.grpc.pulse.PartitionHeartbeatRequest; +import org.apache.hugegraph.pd.grpc.pulse.PulseResponse; +import org.apache.hugegraph.pd.pulse.PulseServerNotice; +import org.junit.Assert; +import org.junit.BeforeClass; +import org.junit.Ignore; +import org.junit.Test; + +public class StoreRegisterTest { + + private static PDClient pdClient; +======== +public class StoreRegisterTest { + private static PDClient pdClient; + private static PDConfig config; + private long storeId = 0; +>>>>>>>> d7e3d51dd (3.6.5 -> 4.x diff):hg-pd-client/src/test/java/org/apache/hugegraph/pd/StoreRegisterTest.java + private final String storeAddr = "localhost"; + private final String graphName = "default/hugegraph/g"; + private long storeId = 0; + + @BeforeClass +<<<<<<<< HEAD:hugegraph-pd/hg-pd-test/src/main/java/org/apache/hugegraph/pd/client/StoreRegisterTest.java + public static void beforeClass() { + PDConfig config = PDConfig.of("localhost:8686"); +======== + public static void beforeClass() throws Exception { + config = PDConfig.of("localhost:8686"); +>>>>>>>> d7e3d51dd (3.6.5 -> 4.x diff):hg-pd-client/src/test/java/org/apache/hugegraph/pd/StoreRegisterTest.java + config.setEnableCache(true); + pdClient = PDClient.create(config); + } + + @Test + public void testRegisterStore() throws PDException { + Metapb.Store store = Metapb.Store.newBuilder().setAddress(storeAddr).build(); + storeId = pdClient.registerStore(store); + Assert.assertTrue("RegisterStore store_id = " + storeId, storeId != 0); + } + + @Test + public void testGetStore() throws PDException { + testRegisterStore(); + Metapb.Store store = pdClient.getStore(storeId); + Assert.assertEquals(storeAddr, store.getAddress()); + System.out.println(store); + } + + @Ignore // no active store + @Test + public void testGetActiveStores() throws PDException { + testRegisterStore(); + List stores = pdClient.getActiveStores(graphName); + stores.forEach((e) -> { + System.out.println("-------------------------------------"); + System.out.println(e); + }); + } + + @Ignore // no active store + @Test + public void testStoreHeartbeat() throws PDException { + testRegisterStore(); + Metapb.StoreStats stats = Metapb.StoreStats.newBuilder().setStoreId(storeId).build(); + pdClient.storeHeartbeat(stats); + List stores = pdClient.getActiveStores(graphName); + boolean exist = false; + for (Metapb.Store store : stores) { + if (store.getId() == storeId) { + exist = true; + break; + } + } + Assert.assertTrue(exist); + } + + @Ignore // no active store + @Test + public void testPartitionHeartbeat() throws PDException { + testRegisterStore(); +<<<<<<<< HEAD:hugegraph-pd/hg-pd-test/src/main/java/org/apache/hugegraph/pd/client/StoreRegisterTest.java + PDPulse pdPulse = new PDPulseImpl(pdClient.getLeaderIp()); + + PDPulse.Notifier notifier = pdPulse.connectPartition( + new PDPulse.Listener<>() { +======== + Pulse pdPulse = pdClient.getPulse(); + + PulseNotifier notifier = pdPulse.connect( + new PulseListener() { +>>>>>>>> d7e3d51dd (3.6.5 -> 4.x diff):hg-pd-client/src/test/java/org/apache/hugegraph/pd/StoreRegisterTest.java + + @Override + public void onNext(PulseResponse response) { + + } + + @Override + public void onNotice(PulseServerNotice notice) { + + } + + @Override + public void onError(Throwable throwable) { + + } + + @Override + public void onCompleted() { + + } + }); + KVPair partShard = + pdClient.getPartition("test", "1".getBytes(StandardCharsets.UTF_8)); + notifier.notifyServer(PartitionHeartbeatRequest.newBuilder().setStates( + Metapb.PartitionStats.newBuilder().addGraphName("test") + .setId(partShard.getKey().getId()) + .setLeader(Metapb.Shard.newBuilder().setStoreId(1).build()))); + } +} diff --git a/hg-pd-client/src/test/java/org/apache/hugegraph/pd/client/DiscoveryClientImplTest.java b/hg-pd-client/src/test/java/org/apache/hugegraph/pd/client/DiscoveryClientImplTest.java new file mode 100644 index 0000000000..64903c6164 --- /dev/null +++ b/hg-pd-client/src/test/java/org/apache/hugegraph/pd/client/DiscoveryClientImplTest.java @@ -0,0 +1,136 @@ +package org.apache.hugegraph.pd.client; + +import org.apache.hugegraph.pd.grpc.discovery.NodeInfos; +import org.apache.hugegraph.pd.grpc.discovery.Query; +import org.junit.Assert; +// import org.junit.Test; + +import java.util.HashMap; +import java.util.Map; +import java.util.Vector; +import java.util.concurrent.CountDownLatch; +import java.util.concurrent.atomic.AtomicLong; + +/** + * @author zhangyingjie + * @date 2021/12/21 + **/ +public class DiscoveryClientImplTest { + + String address = "localhost:80"; + int delay = 1000; + int wait = delay * 3 + 500; + + // @Test + public void registerStore() throws InterruptedException { + + HashMap labels = new HashMap<>(); + + labels.put("metrics","/actuator/prometheus"); + labels.put("target","10.81.116.77:8520"); + labels.put("scheme","http"); + labels.put("__relabeling","http"); + labels.put("no_relabeling","http"); + getClient("store", "address1",labels); + + labels.put("metrics","/actuator/prometheus"); + labels.put("target","10.81.116.78:8520"); + labels.put("scheme","http"); + getClient("store", "address2",labels); + + labels.put("metrics","/actuator/prometheus"); + labels.put("target","10.81.116.79:8520"); + labels.put("scheme","http"); + getClient("store", "address3",labels); + + labels.put("metrics","/actuator/prometheus"); + labels.put("target","10.81.116.78:8620"); + labels.put("scheme","http"); + getClient("pd", "address1",labels); + + labels.put("metrics","/graph/metrics"); + labels.put("target","10.37.1.1:9200"); + labels.put("scheme","https"); + getClient("hugegraph", "address1",labels); + } + + // @Test + public void testNodes() throws InterruptedException { + String appName = "hugegraph"; + register(appName, address); + } + + // @Test + public void testMultiNode() throws InterruptedException { + for (int i = 0; i < 2; i++) { + register("app" + String.valueOf(i), address + i); + } + } + + // @Test + public void testParallelMultiNode() throws InterruptedException { + CountDownLatch latch = new CountDownLatch(30); + Vector exceptions = new Vector<>(); + for (int i = 0; i < 30; i++) { + int finalI = i; + new Thread(() -> { + try { + for (int j = 0; j < 3; j++) { + register("app" + finalI, address + j); + } + } catch (Exception e) { + exceptions.add(e); + } finally { + latch.countDown(); + } + }).start(); + } + latch.await(); + Assert.assertTrue(exceptions.size() == 0); + } + private static AtomicLong label = new AtomicLong(); + private void register(String appName, String address) throws InterruptedException { + + HashMap labels = new HashMap<>(); + String labelValue = String.valueOf(label.incrementAndGet()); + labels.put("address",labelValue); + labels.put("address1",labelValue); + Query query = Query.newBuilder().setAppName( + appName).setVersion("0.13.0").putAllLabels(labels).build(); + DiscoveryClientImpl discoveryClient = getClient(appName, address, labels); + Thread.sleep(10000); + NodeInfos nodeInfos1 = discoveryClient.getNodeInfos(query); + Assert.assertTrue(nodeInfos1.getInfoCount() == 1); + DiscoveryClientImpl discoveryClient1 = getClient(appName, address + 0,labels); + Thread.sleep(10000); + Assert.assertTrue( + discoveryClient.getNodeInfos(query).getInfoCount() == 2); + Query query1 = Query.newBuilder().setAppName( + appName).setVersion("0.12.0").putAllLabels(labels).build(); + Assert.assertTrue( + discoveryClient.getNodeInfos(query1).getInfoCount() == 0); + discoveryClient.cancelTask(); + discoveryClient1.cancelTask(); + Thread.sleep(wait); + NodeInfos nodeInfos = discoveryClient.getNodeInfos(query); + System.out.println(nodeInfos); + Assert.assertTrue(nodeInfos.getInfoCount() == 0); + discoveryClient.close(); + discoveryClient1.close(); + } + + private DiscoveryClientImpl getClient(String appName, String address,Map labels) { + DiscoveryClientImpl discoveryClient = null; + try{ + discoveryClient = DiscoveryClientImpl.newBuilder().setCenterAddress( + "localhost:8687,localhost:8686,localhost:8688").setAddress(address).setAppName( + appName).setDelay(delay).setVersion("0.13.0").setId( + "0").setLabels(labels).build(); + discoveryClient.scheduleTask(); + } catch(Exception e){ + e.printStackTrace(); + } + + return discoveryClient; + } +} \ No newline at end of file diff --git a/hg-pd-client/src/test/java/org/apache/hugegraph/pd/client/LicenseClientImplTest.java b/hg-pd-client/src/test/java/org/apache/hugegraph/pd/client/LicenseClientImplTest.java new file mode 100644 index 0000000000..eed8f7c087 --- /dev/null +++ b/hg-pd-client/src/test/java/org/apache/hugegraph/pd/client/LicenseClientImplTest.java @@ -0,0 +1,114 @@ +package org.apache.hugegraph.pd.client; + +import org.apache.hugegraph.pd.grpc.Pdpb; +import org.apache.hugegraph.pd.grpc.common.ErrorType; +import org.apache.hugegraph.pd.grpc.common.Errors; +import org.apache.hugegraph.pd.grpc.kv.KResponse; +import org.apache.hugegraph.pd.grpc.kv.KvResponse; +import lombok.extern.slf4j.Slf4j; +import org.apache.commons.io.FileUtils; +// import org.junit.Test; +import org.yaml.snakeyaml.Yaml; + +import java.io.File; +import java.util.Iterator; +import java.util.Map; +import java.util.Properties; + +/** + * @author zhangyingjie + * @date 2021/12/21 + **/ +@Slf4j +public class LicenseClientImplTest { + + // @Test + public void putLicense() { + PDConfig pdConfig = PDConfig.of("localhost:8686,localhost:8687,localhost:8688"); + //PDConfig pdConfig = PDConfig.of("localhost:8686"); + pdConfig.setEnableCache(true); + try (LicenseClient c = new LicenseClient(pdConfig)) { + File file = new File("../conf/hugegraph.license"); + byte[] bytes = FileUtils.readFileToByteArray(file); + Pdpb.PutLicenseResponse putLicenseResponse = c.putLicense(bytes); + Errors error = putLicenseResponse.getHeader().getError(); + log.info(error.getMessage()); + assert error.getType().equals(ErrorType.OK); + } catch (Exception e) { + log.error("put license with error: {}", e); + } + } + + // @Test + public void getKv() { + PDConfig pdConfig = PDConfig.of("10.157.12.36:8686"); + pdConfig.setEnableCache(true); + try (KvClient c = new KvClient(pdConfig)) { + KResponse kResponse = c.get("S:FS"); + Errors error = kResponse.getHeader().getError(); + log.info(error.getMessage()); + assert error.getType().equals(ErrorType.OK); + Properties ymlConfig = getYmlConfig(kResponse.getValue()); + Object property = ymlConfig.get("rocksdb.write_buffer_size"); + assert property.toString().equals("32000000"); + } catch (Exception e) { + log.error("put license with error: {}", e); + } + } + // @Test + public void putKv() { + PDConfig pdConfig = PDConfig.of("10.14.139.70:8688"); + pdConfig.setEnableCache(true); + try (KvClient c = new KvClient(pdConfig)) { + long l = System.currentTimeMillis(); + KvResponse kvResponse = c.put("S:Timestamp", String.valueOf(l)); + Errors error = kvResponse.getHeader().getError(); + log.info(error.getMessage()); + assert error.getType().equals(ErrorType.OK); + } catch (Exception e) { + log.error("put license with error: {}", e); + } + } + // @Test + public void putKvLocal() { + PDConfig pdConfig = PDConfig.of("localhost:8686"); + pdConfig.setEnableCache(true); + try (KvClient c = new KvClient(pdConfig)) { + long l = System.currentTimeMillis(); + KvResponse kvResponse = c.put("S:Timestamp", String.valueOf(l)); + Errors error = kvResponse.getHeader().getError(); + log.info(error.getMessage()); + assert error.getType().equals(ErrorType.OK); + } catch (Exception e) { + log.error("put license with error: {}", e); + } + } + + private Properties getYmlConfig(String yml) { + Yaml yaml = new Yaml(); + Iterable load = yaml.loadAll(yml); + Iterator iterator = load.iterator(); + Properties properties = new Properties(); + while (iterator.hasNext()) { + Map next = (Map) iterator.next(); + map2Properties(next, "", properties); + } + return properties; + } + + private void map2Properties(Map map, String prefix, Properties properties) { + + for (Map.Entry entry : map.entrySet()) { + String key = entry.getKey(); + String newPrefix = prefix == null || prefix.length() == 0 ? key : prefix + "." + key; + Object value = entry.getValue(); + if (!(value instanceof Map)) { + properties.put(newPrefix, value); + } else { + map2Properties((Map) value, newPrefix, properties); + } + + } + } + +} \ No newline at end of file diff --git a/hg-pd-client/src/test/java/org/apache/hugegraph/pd/client/test/HgPDTestUtil.java b/hg-pd-client/src/test/java/org/apache/hugegraph/pd/client/test/HgPDTestUtil.java new file mode 100644 index 0000000000..f82535e210 --- /dev/null +++ b/hg-pd-client/src/test/java/org/apache/hugegraph/pd/client/test/HgPDTestUtil.java @@ -0,0 +1,78 @@ +package org.apache.hugegraph.pd.client.test; + +import java.nio.ByteBuffer; +import java.nio.charset.StandardCharsets; +import java.util.Iterator; +import java.util.List; + +/** + * @author lynn.bond@hotmail.com created on 2021/11/8 + */ +public class HgPDTestUtil { + + public static void println(Object str) { + System.out.println(str); + } + + public static String toStr(byte[] b) { + if (b == null) return ""; + if (b.length == 0) return ""; + return new String(b, StandardCharsets.UTF_8); + } + + public static byte[] toBytes(String str) { + if (str == null) return null; + return str.getBytes(StandardCharsets.UTF_8); + } + + public static byte[] toBytes(long l) { + ByteBuffer buffer = ByteBuffer.allocate(Long.BYTES); + buffer.putLong(l); + return buffer.array(); + } + + private static byte[] toBytes(final int i) { + ByteBuffer buffer = ByteBuffer.allocate(Integer.BYTES); + buffer.putInt(i); + return buffer.array(); + } + + public static long toLong(byte[] bytes) { + ByteBuffer buffer = ByteBuffer.allocate(Long.BYTES); + buffer.put(bytes); + buffer.flip();//need flip + return buffer.getLong(); + } + + public static long toInt(byte[] bytes) { + ByteBuffer buffer = ByteBuffer.allocate(Integer.BYTES); + buffer.put(bytes); + buffer.flip();//need flip + return buffer.getInt(); + } + + public static String padLeftZeros(String str, int n) { + return String.format("%1$" + n + "s", str).replace(' ', '0'); + } + + public static String toSuffix(int num, int length) { + return "-" + padLeftZeros(String.valueOf(num), length); + } + + public static int amountOf(List list) { + if (list == null) { + return 0; + } + return list.size(); + } + + public static int amountOf(Iterator iterator) { + if (iterator == null) return 0; + int count = 0; + while (iterator.hasNext()) { + iterator.next(); + count++; + } + return count; + } +} diff --git a/hg-pd-client/src/test/resources/log4j2.xml b/hg-pd-client/src/test/resources/log4j2.xml new file mode 100644 index 0000000000..ec861f17a9 --- /dev/null +++ b/hg-pd-client/src/test/resources/log4j2.xml @@ -0,0 +1,85 @@ + + + + + + logs + hg-store-client + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/hg-pd-common/pom.xml b/hg-pd-common/pom.xml new file mode 100644 index 0000000000..7ba70e43c0 --- /dev/null +++ b/hg-pd-common/pom.xml @@ -0,0 +1,69 @@ + + + + 4.0.0 + + org.apache.hugegraph + hugegraph-pd + ${revision} + + hg-pd-common + + + 11 + 11 + + + + + org.apache.hugegraph + hg-pd-grpc + ${project.version} + + + org.projectlombok + lombok + ${lombok.version} + + + org.apache.commons + commons-collections4 + 4.4 + + + org.apache.logging.log4j + log4j-slf4j-impl + ${log4j2.version} + + + com.google.protobuf + protobuf-java-util + 3.17.2 + + + com.google.errorprone + error_prone_annotations + + + + + + + + org.apache.maven.plugins + maven-source-plugin + + + attach-sources + + jar + + + + + + + + \ No newline at end of file diff --git a/hg-pd-common/src/main/java/org/apache/hugegraph/pd/common/Cache.java b/hg-pd-common/src/main/java/org/apache/hugegraph/pd/common/Cache.java new file mode 100644 index 0000000000..3938cbe645 --- /dev/null +++ b/hg-pd-common/src/main/java/org/apache/hugegraph/pd/common/Cache.java @@ -0,0 +1,95 @@ +package org.apache.hugegraph.pd.common; + +import java.io.Closeable; +import java.io.IOException; +import java.util.Map; +import java.util.concurrent.ConcurrentHashMap; +import java.util.concurrent.ConcurrentMap; +import java.util.concurrent.Executors; +import java.util.concurrent.ScheduledExecutorService; +import java.util.concurrent.ScheduledFuture; +import java.util.concurrent.TimeUnit; + +/** + * @author zhangyingjie + * @date 2023/4/24 + **/ +public class Cache implements Closeable { + + ScheduledExecutorService ex = Executors.newSingleThreadScheduledExecutor(new DefaultThreadFactory("hg-cache")); + private ConcurrentMap map = new ConcurrentHashMap(); + private ScheduledFuture future; + private Runnable checker = () -> { + for (Map.Entry e : map.entrySet()) { + if (e.getValue().getValue() == null) { + map.remove(e.getKey()); + } + } + }; + + public Cache() { + future = ex.scheduleWithFixedDelay(checker, 1, 1, TimeUnit.SECONDS); + } + + public CacheValue put(String key, T value, long ttl) { + return map.put(key, new CacheValue(value, ttl)); + } + + public T get(String key) { + CacheValue value = map.get(key); + if (value == null) { + return null; + } + T t = value.getValue(); + if (t == null) { + map.remove(key); + } + return t; + } + + public boolean keepAlive(String key, long ttl) { + CacheValue value = map.get(key); + if (value == null) { + return false; + } + value.keepAlive(ttl); + return true; + } + + @Override + public void close() throws IOException { + try { + future.cancel(true); + ex.shutdownNow(); + } catch (Exception e) { + try { + ex.shutdownNow(); + } catch (Exception ex) { + + } + } + } + + private class CacheValue { + + private final T value; + long outTime; + + protected CacheValue(T value, long ttl) { + this.value = value; + this.outTime = System.currentTimeMillis() + ttl; + } + + protected T getValue() { + if (System.currentTimeMillis() >= outTime) { + return null; + } + return value; + } + + protected void keepAlive(long ttl) { + this.outTime = System.currentTimeMillis() + ttl; + } + + } +} diff --git a/hg-pd-common/src/main/java/org/apache/hugegraph/pd/common/Consts.java b/hg-pd-common/src/main/java/org/apache/hugegraph/pd/common/Consts.java new file mode 100644 index 0000000000..2a6ee9870e --- /dev/null +++ b/hg-pd-common/src/main/java/org/apache/hugegraph/pd/common/Consts.java @@ -0,0 +1,23 @@ +package org.apache.hugegraph.pd.common; + +import io.grpc.Metadata; + +/** + * @author zhangyingjie + * @date 2023/4/25 + **/ +public class Consts { + public static final Metadata.Key CREDENTIAL_KEY = Metadata.Key.of("credential", + Metadata.ASCII_STRING_MARSHALLER); + public static final Metadata.Key TOKEN_KEY = Metadata.Key.of("Pd-Token", + Metadata.ASCII_STRING_MARSHALLER); + + public static final Metadata.Key LEADER_KEY = Metadata.Key.of("leader", + Metadata.ASCII_STRING_MARSHALLER); + + public static final int DEFAULT_STORE_GROUP_ID = 0; + /** + * store group 的partition间隔 + */ + public static final int PARTITION_GAP = 1000; +} diff --git a/hg-pd-common/src/main/java/org/apache/hugegraph/pd/common/DefaultThreadFactory.java b/hg-pd-common/src/main/java/org/apache/hugegraph/pd/common/DefaultThreadFactory.java new file mode 100644 index 0000000000..baaa070301 --- /dev/null +++ b/hg-pd-common/src/main/java/org/apache/hugegraph/pd/common/DefaultThreadFactory.java @@ -0,0 +1,26 @@ +package org.apache.hugegraph.pd.common; + +import java.util.concurrent.ThreadFactory; +import java.util.concurrent.atomic.AtomicInteger; + +/** + * @author zhangyingjie + * @date 2023/4/25 + **/ +public class DefaultThreadFactory implements ThreadFactory { + + private final AtomicInteger number = new AtomicInteger(1); + private final String prefix; + + public DefaultThreadFactory(String prefix) { + this.prefix = prefix + "-"; + } + + @Override + public Thread newThread(Runnable r) { + Thread t = new Thread(r, prefix + number.getAndIncrement()); + t.setDaemon(true); + t.setPriority(Thread.NORM_PRIORITY); + return t; + } +} diff --git a/hg-pd-common/src/main/java/org/apache/hugegraph/pd/common/GraphCache.java b/hg-pd-common/src/main/java/org/apache/hugegraph/pd/common/GraphCache.java new file mode 100644 index 0000000000..2d31844ff6 --- /dev/null +++ b/hg-pd-common/src/main/java/org/apache/hugegraph/pd/common/GraphCache.java @@ -0,0 +1,152 @@ +package org.apache.hugegraph.pd.common; + +import java.util.List; +import java.util.Map; +import java.util.Objects; +import java.util.concurrent.ConcurrentHashMap; +import java.util.concurrent.atomic.AtomicBoolean; +import java.util.concurrent.locks.ReentrantReadWriteLock; +import java.util.concurrent.locks.ReentrantReadWriteLock.WriteLock; + +import org.apache.commons.collections4.CollectionUtils; + +import org.apache.hugegraph.pd.grpc.Metapb.Graph; +import org.apache.hugegraph.pd.grpc.Metapb.Partition; +import com.google.common.collect.Range; +import com.google.common.collect.RangeMap; +import com.google.common.collect.TreeRangeMap; + +import lombok.Data; +import lombok.extern.slf4j.Slf4j; + +/** + * @author zhangyingjie + * @date 2023/6/6 + **/ +@Slf4j +@Data +public class GraphCache { + + private Graph graph; + private AtomicBoolean initialized = new AtomicBoolean(false); + private AtomicBoolean writing = new AtomicBoolean(false); + private ReentrantReadWriteLock lock = new ReentrantReadWriteLock(); + private Map state = new ConcurrentHashMap<>(); + private Map partitions = new ConcurrentHashMap<>(); + private volatile RangeMap range = TreeRangeMap.create(); + + public GraphCache(Graph graph) { + this.graph = graph; + } + + public void init(List ps) { + Map gps = new ConcurrentHashMap<>(ps.size(), 1); + if (!CollectionUtils.isEmpty(ps)) { + WriteLock lock = getLock().writeLock(); + try { + lock.lock(); + for (Partition p : ps) { + gps.put(p.getId(), p); + range.put(Range.closedOpen(p.getStartKey(), p.getEndKey()), p.getId()); + } + } catch (Exception e) { + log.warn("init graph with error:", e); + } finally { + lock.unlock(); + } + } + setPartitions(gps); + + } + + public Partition getPartition(Integer id) { + return partitions.get(id); + } + + public Partition addPartition(Integer id, Partition p) { + return partitions.put(id, p); + } + + public Partition removePartition(Integer id) { + Partition p = partitions.get(id); + if (p != null) { + RangeMap range = getRange(); + if (Objects.equals(p.getId(), range.get(p.getStartKey())) && + Objects.equals(p.getId(), range.get(p.getEndKey() - 1))) { + WriteLock lock = getLock().writeLock(); + lock.lock(); + try { + range.remove(range.getEntry(p.getStartKey()).getKey()); + } catch (Exception e) { + log.warn("remove partition with error:", e); + } finally { + lock.unlock(); + } + } + } + return partitions.remove(id); + } + + public void removePartitions() { + getState().clear(); + RangeMap range = getRange(); + WriteLock lock = getLock().writeLock(); + try { + lock.lock(); + if (range != null) { + range.clear(); + } + } catch (Exception e) { + log.warn("remove partition with error:", e); + } finally { + lock.unlock(); + } + getPartitions().clear(); + getInitialized().set(false); + } + + + /* + * 需要外部加写锁 + * */ + public void reset() { + partitions.clear(); + try { + range.clear(); + } catch (Exception e) { + + } + } + + public boolean updatePartition(Partition partition) { + int partId = partition.getId(); + Partition p = getPartition(partId); + if (p != null && p.equals(partition)) { + return false; + } + WriteLock lock = getLock().writeLock(); + try { + lock.lock(); + RangeMap range = getRange(); + addPartition(partId, partition); + try { + if (p != null) { + // old [1-3) 被 [2-3)覆盖了。当 [1-3) 变成[1-2) 不应该删除原先的[1-3) + // 当确认老的 start, end 都是自己的时候,才可以删除老的. (即还没覆盖) + if (Objects.equals(partId, range.get(partition.getStartKey())) && + Objects.equals(partId, range.get(partition.getEndKey() - 1))) { + range.remove(range.getEntry(partition.getStartKey()).getKey()); + } + } + range.put(Range.closedOpen(partition.getStartKey(), partition.getEndKey()), partId); + } catch (Exception e) { + log.warn("update partition with error:", e); + } + } catch (Exception e) { + throw new RuntimeException(e); + } finally { + lock.unlock(); + } + return true; + } +} diff --git a/hg-pd-common/src/main/java/org/apache/hugegraph/pd/common/HgAssert.java b/hg-pd-common/src/main/java/org/apache/hugegraph/pd/common/HgAssert.java new file mode 100644 index 0000000000..ab36d709bc --- /dev/null +++ b/hg-pd-common/src/main/java/org/apache/hugegraph/pd/common/HgAssert.java @@ -0,0 +1,130 @@ +<<<<<<<< HEAD:hugegraph-pd/hg-pd-common/src/main/java/org/apache/hugegraph/pd/common/HgAssert.java +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +======== +>>>>>>>> d7e3d51dd (3.6.5 -> 4.x diff):hg-pd-common/src/main/java/org/apache/hugegraph/pd/common/HgAssert.java +package org.apache.hugegraph.pd.common; + +import java.util.Collection; +import java.util.Map; + +public final class HgAssert { + + public static void isTrue(boolean expression, String message) { + if (message == null) { + throw new IllegalArgumentException("message is null"); + } + + if (!expression) { + throw new IllegalArgumentException(message); + } + } + + public static void assertTrue(boolean expression, String... message) { + if (!expression) { + throw new IllegalArgumentException(String.join("", message)); + } + } + + public static void isFalse(boolean expression, String message) { + isTrue(!expression, message); + } + + public static void assertFalse(boolean expression, String... message) { + assertTrue(!expression, message); + } + + public static void isArgumentValid(byte[] bytes, String parameter) { + assertFalse(isInvalid(bytes), "The argument is invalid: ", parameter); + } + + public static void isArgumentValid(String str, String parameter) { + assertFalse(isInvalid(str), "The argument is invalid: ", parameter); + } + + public static void isArgumentNotNull(Object obj, String parameter) { + assertTrue(obj != null, "The argument is null: ", parameter); + } + + public static void istValid(byte[] bytes, String msg) { + isFalse(isInvalid(bytes), msg); + } + + public static void isValid(String str, String msg) { + isFalse(isInvalid(str), msg); + } + + public static void isNotNull(Object obj, String msg) { + isTrue(obj != null, msg); + } + + public static boolean isContains(Object[] objs, Object obj) { + if (objs == null || objs.length == 0 || obj == null) { + return false; + } + for (Object item : objs) { + if (obj.equals(item)) { + return true; + } + } + return false; + } + + public static boolean isInvalid(String... strs) { + if (strs == null || strs.length == 0) { + return true; + } + for (String item : strs) { + if (item == null || "".equals(item.trim())) { + return true; + } + } + return false; + } + + public static boolean isInvalid(byte[] bytes) { + return bytes == null || bytes.length == 0; + } + + public static boolean isInvalid(Map map) { + return map == null || map.isEmpty(); + } + + public static boolean isInvalid(Collection list) { + return list == null || list.isEmpty(); + } + + public static boolean isContains(Collection list, T item) { + if (list == null || item == null) { + return false; + } + return list.contains(item); + } + + public static boolean isNull(Object... objs) { + if (objs == null) { + return true; + } + for (Object item : objs) { + if (item == null) { + return true; + } + } + return false; + } +} diff --git a/hg-pd-common/src/main/java/org/apache/hugegraph/pd/common/KVPair.java b/hg-pd-common/src/main/java/org/apache/hugegraph/pd/common/KVPair.java new file mode 100644 index 0000000000..771064086b --- /dev/null +++ b/hg-pd-common/src/main/java/org/apache/hugegraph/pd/common/KVPair.java @@ -0,0 +1,135 @@ +<<<<<<<< HEAD:hugegraph-pd/hg-pd-common/src/main/java/org/apache/hugegraph/pd/common/KVPair.java +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +======== +>>>>>>>> d7e3d51dd (3.6.5 -> 4.x diff):hg-pd-common/src/main/java/org/apache/hugegraph/pd/common/KVPair.java +package org.apache.hugegraph.pd.common; + +import java.io.Serializable; +import java.util.Objects; + +public class KVPair implements Serializable { + + /** + * Key of this Pair. + */ + private K key; + /** + * Value of this this Pair. + */ + private V value; + + /** + * Creates a new pair + * + * @param key The key for this pair + * @param value The value to use for this pair + */ + public KVPair(K key, V value) { + this.key = key; + this.value = value; + } + + /** + * Gets the key for this pair. + * + * @return key for this pair + */ + public K getKey() { + return key; + } + + public void setKey(K key) { + this.key = key; + } + + /** + * Gets the value for this pair. + * + * @return value for this pair + */ + public V getValue() { + return value; + } + + public void setValue(V value) { + this.value = value; + } + + /** + *

String representation of this + * Pair.

+ * + *

The default name/value delimiter '=' is always used.

+ * + * @return String representation of this Pair + */ + @Override + public String toString() { + return key + "=" + value; + } + + /** + *

Generate a hash code for this Pair.

+ * + *

The hash code is calculated using both the name and + * the value of the Pair.

+ * + * @return hash code for this Pair + */ + @Override + public int hashCode() { + // name's hashCode is multiplied by an arbitrary prime number (13) + // in order to make sure there is a difference in the hashCode between + // these two parameters: + // name: a value: aa + // name: aa value: a + return key.hashCode() * 13 + (value == null ? 0 : value.hashCode()); + } + + /** + *

Test this Pair for equality with another + * Object.

+ * + *

If the Object to be tested is not a + * Pair or is null, then this method + * returns false.

+ * + *

Two Pairs are considered equal if and only if + * both the names and values are equal.

+ * + * @param o the Object to test for + * equality with this Pair + * @return true if the given Object is + * equal to this Pair else false + */ + @Override + public boolean equals(Object o) { + if (this == o) { + return true; + } + if (o instanceof KVPair) { + KVPair pair = (KVPair) o; + if (!Objects.equals(key, pair.key)) { + return false; + } + return Objects.equals(value, pair.value); + } + return false; + } +} diff --git a/hg-pd-common/src/main/java/org/apache/hugegraph/pd/common/PDException.java b/hg-pd-common/src/main/java/org/apache/hugegraph/pd/common/PDException.java new file mode 100644 index 0000000000..4e9358d683 --- /dev/null +++ b/hg-pd-common/src/main/java/org/apache/hugegraph/pd/common/PDException.java @@ -0,0 +1,51 @@ +package org.apache.hugegraph.pd.common; + +import org.apache.hugegraph.pd.grpc.common.ErrorType; + +public class PDException extends Exception{ + private int errorCode = 0; + + public PDException(int error) { + super(String.format("Error code = %d", error)); + this.errorCode = error; + } + + public PDException(int error, String msg) { + super(msg); + this.errorCode = error; + } + + public PDException(ErrorType errorType) { + super(errorType.name()); + this.errorCode = errorType.getNumber(); + } + + public PDException(ErrorType errorType, String message) { + super(message); + this.errorCode = errorType.getNumber(); + } + + public PDException(ErrorType errorType, Throwable e) { + super(errorType.name(), e); + this.errorCode = errorType.getNumber(); + } + + public PDException(ErrorType errorType, String message, Throwable e) { + super(message, e); + this.errorCode = errorType.getNumber(); + } + + public PDException(int error, Throwable e) { + super(e); + this.errorCode = error; + } + + public PDException(int error, String msg, Throwable e) { + super(msg, e); + this.errorCode = error; + } + + public int getErrorCode() { + return errorCode; + } +} diff --git a/hg-pd-common/src/main/java/org/apache/hugegraph/pd/common/PDRuntimeException.java b/hg-pd-common/src/main/java/org/apache/hugegraph/pd/common/PDRuntimeException.java new file mode 100644 index 0000000000..1be1dea183 --- /dev/null +++ b/hg-pd-common/src/main/java/org/apache/hugegraph/pd/common/PDRuntimeException.java @@ -0,0 +1,36 @@ +package org.apache.hugegraph.pd.common; + +/** + * @author zhangyingjie + * @date 2022/8/1 + **/ +public class PDRuntimeException extends RuntimeException { + + // public static final int LICENSE_ERROR = -11; + + private int errorCode = 0; + + public PDRuntimeException(int error) { + super(String.format("Error code = %d", error)); + this.errorCode = error; + } + + public PDRuntimeException(int error, String msg) { + super(msg); + this.errorCode = error; + } + + public PDRuntimeException(int error, Throwable e) { + super(e); + this.errorCode = error; + } + + public PDRuntimeException(int error, String msg, Throwable e) { + super(msg, e); + this.errorCode = error; + } + + public int getErrorCode() { + return errorCode; + } +} diff --git a/hg-pd-common/src/main/java/org/apache/hugegraph/pd/common/PartitionCache.java b/hg-pd-common/src/main/java/org/apache/hugegraph/pd/common/PartitionCache.java new file mode 100644 index 0000000000..d27054c032 --- /dev/null +++ b/hg-pd-common/src/main/java/org/apache/hugegraph/pd/common/PartitionCache.java @@ -0,0 +1,547 @@ +<<<<<<<< HEAD:hugegraph-pd/hg-pd-common/src/main/java/org/apache/hugegraph/pd/common/PartitionCache.java +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hugegraph.pd.common; +======== +package org.apache.hugegraph.pd.common; + +import org.apache.hugegraph.pd.grpc.Metapb; +import com.google.common.collect.Range; +import com.google.common.collect.RangeMap; +import com.google.common.collect.TreeRangeMap; +>>>>>>>> d7e3d51dd (3.6.5 -> 4.x diff):hg-pd-common/src/main/java/org/apache/hugegraph/pd/common/PartitionCache.java + +import java.util.ArrayList; +import java.util.List; +import java.util.Map; +import java.util.Objects; +import java.util.concurrent.ConcurrentHashMap; +import java.util.concurrent.atomic.AtomicBoolean; +import java.util.concurrent.locks.Lock; +import java.util.concurrent.locks.ReadWriteLock; +import java.util.concurrent.locks.ReentrantReadWriteLock; + +<<<<<<<< HEAD:hugegraph-pd/hg-pd-common/src/main/java/org/apache/hugegraph/pd/common/PartitionCache.java +import org.apache.hugegraph.pd.grpc.Metapb; + +import com.google.common.collect.Range; +import com.google.common.collect.RangeMap; +import com.google.common.collect.TreeRangeMap; + +/** + * abandon copy on write way + * 1. When the number of graph * partitions is extremely large, the efficiency is severely + * reduced and cannot be used + */ +public class PartitionCache { + + private final ReadWriteLock readWriteLock = new ReentrantReadWriteLock(); + private final Map locks = new HashMap<>(); +======== +/** + * 放弃copy on write的方式 + * 1. 在 graph * partition 数量极多的时候,效率严重下降,不能用 + */ +public class PartitionCache { + + // 每张图一个缓存 + private volatile Map> keyToPartIdCache; + // graphName + PartitionID组成key + private volatile Map> partitionCache; + + private volatile Map shardGroupCache; + + private volatile Map storeCache; + + private volatile Map graphCache; + // 读写锁对象 + private ReadWriteLock readWriteLock = new ReentrantReadWriteLock(); +>>>>>>>> d7e3d51dd (3.6.5 -> 4.x diff):hg-pd-common/src/main/java/org/apache/hugegraph/pd/common/PartitionCache.java + Lock writeLock = readWriteLock.writeLock(); + // One cache per graph + private volatile Map> keyToPartIdCache; + // graphName + PartitionID + private volatile Map> partitionCache; + private volatile Map shardGroupCache; + private volatile Map storeCache; + private volatile Map graphCache; + + private volatile Map locks = new ConcurrentHashMap<>(); + + public PartitionCache() { + keyToPartIdCache = new ConcurrentHashMap<>(); + partitionCache = new ConcurrentHashMap<>(); + shardGroupCache = new ConcurrentHashMap<>(); + storeCache = new ConcurrentHashMap<>(); + graphCache = new ConcurrentHashMap<>(); + } + + private AtomicBoolean getOrCreateGraphLock(String graphName) { + var lock = this.locks.get(graphName); + if (lock == null) { + try { + writeLock.lock(); + if ((lock = this.locks.get(graphName)) == null) { + lock = new AtomicBoolean(); + locks.put(graphName, lock); + } +<<<<<<<< HEAD:hugegraph-pd/hg-pd-common/src/main/java/org/apache/hugegraph/pd/common/PartitionCache.java + } finally { +======== + }finally { +>>>>>>>> d7e3d51dd (3.6.5 -> 4.x diff):hg-pd-common/src/main/java/org/apache/hugegraph/pd/common/PartitionCache.java + writeLock.unlock(); + } + } + return lock; + } + + public void waitGraphLock(String graphName) { + var lock = getOrCreateGraphLock(graphName); + while (lock.get()) { + Thread.onSpinWait(); + } + } + + public void lockGraph(String graphName) { + var lock = getOrCreateGraphLock(graphName); +<<<<<<<< HEAD:hugegraph-pd/hg-pd-common/src/main/java/org/apache/hugegraph/pd/common/PartitionCache.java + while (!lock.compareAndSet(false, true)) { +======== + while (! lock.compareAndSet(false, true)) { +>>>>>>>> d7e3d51dd (3.6.5 -> 4.x diff):hg-pd-common/src/main/java/org/apache/hugegraph/pd/common/PartitionCache.java + Thread.onSpinWait(); + } + } + + public void unlockGraph(String graphName) { + var lock = getOrCreateGraphLock(graphName); + lock.set(false); + } + + /** + * Returns partition information based on partitionId + * + * @param graphName + * @param partId + * @return + */ + public KVPair getPartitionById(String graphName, int partId) { + waitGraphLock(graphName); + var graphs = partitionCache.get(graphName); + if (graphs != null) { +<<<<<<<< HEAD:hugegraph-pd/hg-pd-common/src/main/java/org/apache/hugegraph/pd/common/PartitionCache.java + var partition = graphs.get(partId); +======== + var partition = graphs.get(partId ); +>>>>>>>> d7e3d51dd (3.6.5 -> 4.x diff):hg-pd-common/src/main/java/org/apache/hugegraph/pd/common/PartitionCache.java + if (partition != null) { + return new KVPair<>(partition, getLeaderShard(partId)); + } + } + + return null; + } + + /** + * Returns the partition information where the key is located + * + * @param key + * @return + */ + public KVPair getPartitionByKey(String graphName, byte[] key) { + int code = PartitionUtils.calcHashcode(key); + return getPartitionByCode(graphName, code); + } + + /** + * Returns partition information based on the hashcode of the key + * + * @param graphName + * @param code + * @return + */ + public KVPair getPartitionByCode(String graphName, long code) { + waitGraphLock(graphName); + RangeMap rangeMap = keyToPartIdCache.get(graphName); + if (rangeMap != null) { + Integer partId = rangeMap.get(code); + if (partId != null) { + return getPartitionById(graphName, partId); + } + } + return null; + } + + public List getPartitions(String graphName) { + waitGraphLock(graphName); + + List partitions = new ArrayList<>(); +<<<<<<<< HEAD:hugegraph-pd/hg-pd-common/src/main/java/org/apache/hugegraph/pd/common/PartitionCache.java + if (!partitionCache.containsKey(graphName)) { + return partitions; + } + partitionCache.get(graphName).forEach((k, v) -> { + partitions.add(v); +======== + if (! partitionCache.containsKey(graphName)) { + return partitions; + } + partitionCache.get(graphName).forEach((k,v) -> { + partitions.add(v); +>>>>>>>> d7e3d51dd (3.6.5 -> 4.x diff):hg-pd-common/src/main/java/org/apache/hugegraph/pd/common/PartitionCache.java + }); + + return partitions; + } + + public boolean addPartition(String graphName, int partId, Metapb.Partition partition) { + waitGraphLock(graphName); + Metapb.Partition old = null; + + if (partitionCache.containsKey(graphName)) { + old = partitionCache.get(graphName).get(partId); + } + + if (old != null && old.equals(partition)) { + return false; + } + try { + + lockGraph(graphName); +<<<<<<<< HEAD:hugegraph-pd/hg-pd-common/src/main/java/org/apache/hugegraph/pd/common/PartitionCache.java + + partitionCache.computeIfAbsent(graphName, k -> new HashMap<>()).put(partId, partition); + + if (old != null) { + // old [1-3] is covered by [2-3]. When [1-3) becomes [1-2], the original [1-3] + // should not be deleted + // When you confirm that the old start and end are your own, you can delete the + // old ones. (i.e. not covered yet) +======== + if (! partitionCache.containsKey(graphName)) { + partitionCache.put(graphName, new ConcurrentHashMap<>()); + } + + partitionCache.get(graphName).put(partId, partition); + + if (old != null) { + // old [1-3) 被 [2-3)覆盖了。当 [1-3) 变成[1-2) 不应该删除原先的[1-3) + // 当确认老的 start, end 都是自己的时候,才可以删除老的. (即还没覆盖) +>>>>>>>> d7e3d51dd (3.6.5 -> 4.x diff):hg-pd-common/src/main/java/org/apache/hugegraph/pd/common/PartitionCache.java + var graphRange = keyToPartIdCache.get(graphName); + if (Objects.equals(partition.getId(), graphRange.get(partition.getStartKey())) && + Objects.equals(partition.getId(), graphRange.get(partition.getEndKey() - 1))) { + graphRange.remove(graphRange.getEntry(partition.getStartKey()).getKey()); + } + } + +<<<<<<<< HEAD:hugegraph-pd/hg-pd-common/src/main/java/org/apache/hugegraph/pd/common/PartitionCache.java + keyToPartIdCache.computeIfAbsent(graphName, k -> TreeRangeMap.create()) + .put(Range.closedOpen(partition.getStartKey(), + partition.getEndKey()), partId); +======== + if (! keyToPartIdCache.containsKey(graphName)) { + keyToPartIdCache.put(graphName, TreeRangeMap.create()); + } + keyToPartIdCache.get(graphName).put(Range.closedOpen(partition.getStartKey(), + partition.getEndKey()), partId); + +>>>>>>>> d7e3d51dd (3.6.5 -> 4.x diff):hg-pd-common/src/main/java/org/apache/hugegraph/pd/common/PartitionCache.java + } finally { + unlockGraph(graphName); + } + return true; + } + + public void updatePartition(String graphName, int partId, Metapb.Partition partition) { + try { + lockGraph(graphName); + Metapb.Partition old = null; + var graphs = partitionCache.get(graphName); + if (graphs != null) { + old = graphs.get(partId); + } + + if (old != null) { + var graphRange = keyToPartIdCache.get(graphName); + if (Objects.equals(partition.getId(), graphRange.get(partition.getStartKey())) && + Objects.equals(partition.getId(), graphRange.get(partition.getEndKey() - 1))) { + graphRange.remove(graphRange.getEntry(partition.getStartKey()).getKey()); + } + } + +<<<<<<<< HEAD:hugegraph-pd/hg-pd-common/src/main/java/org/apache/hugegraph/pd/common/PartitionCache.java + partitionCache.computeIfAbsent(graphName, k -> new HashMap<>()).put(partId, partition); + keyToPartIdCache.computeIfAbsent(graphName, k -> TreeRangeMap.create()) + .put(Range.closedOpen(partition.getStartKey(), partition.getEndKey()), + partId); +======== + partitionCache.computeIfAbsent(graphName, k -> new ConcurrentHashMap<>()).put(partId, partition); + keyToPartIdCache.computeIfAbsent(graphName, k -> TreeRangeMap.create()) + .put(Range.closedOpen(partition.getStartKey(), partition.getEndKey()), partId); +>>>>>>>> d7e3d51dd (3.6.5 -> 4.x diff):hg-pd-common/src/main/java/org/apache/hugegraph/pd/common/PartitionCache.java + } finally { + unlockGraph(graphName); + } + } + + public boolean updatePartition(Metapb.Partition partition) { + + var graphName = partition.getGraphName(); + var partitionId = partition.getId(); + + var old = getPartitionById(graphName, partitionId); + if (old != null && Objects.equals(partition, old.getKey())) { + return false; + } + + updatePartition(graphName, partitionId, partition); + return true; + } + + public void removePartition(String graphName, int partId) { + try { + lockGraph(graphName); + var partition = partitionCache.get(graphName).remove(partId); + if (partition != null) { + var graphRange = keyToPartIdCache.get(graphName); + + if (Objects.equals(partition.getId(), graphRange.get(partition.getStartKey())) && + Objects.equals(partition.getId(), graphRange.get(partition.getEndKey() - 1))) { + graphRange.remove(graphRange.getEntry(partition.getStartKey()).getKey()); + } + } + } finally { + unlockGraph(graphName); + } + } + + /** + * remove partition id of graph name + * + * @param graphName + * @param id + */ + public void remove(String graphName, int id) { + removePartition(graphName, id); + } + + /** + * remove all partitions + */ + public void removePartitions() { + writeLock.lock(); + try { +<<<<<<<< HEAD:hugegraph-pd/hg-pd-common/src/main/java/org/apache/hugegraph/pd/common/PartitionCache.java + partitionCache = new HashMap<>(); + keyToPartIdCache = new HashMap<>(); +======== + partitionCache = new ConcurrentHashMap<>(); + keyToPartIdCache = new ConcurrentHashMap<>(); +>>>>>>>> d7e3d51dd (3.6.5 -> 4.x diff):hg-pd-common/src/main/java/org/apache/hugegraph/pd/common/PartitionCache.java + locks.clear(); + } finally { + writeLock.unlock(); + } + } + + /** + * remove partition cache of graphName + * + * @param graphName + */ + public void removeAll(String graphName) { + try { + lockGraph(graphName); + partitionCache.remove(graphName); + keyToPartIdCache.remove(graphName); + locks.remove(graphName); + } finally { + unlockGraph(graphName); + } + } + + private String makePartitionKey(String graphName, int partId) { + return graphName + "/" + partId; + } + + public boolean updateShardGroup(Metapb.ShardGroup shardGroup) { + Metapb.ShardGroup oldShardGroup = shardGroupCache.get(shardGroup.getId()); + if (oldShardGroup != null && oldShardGroup.equals(shardGroup)) { + return false; + } + shardGroupCache.put(shardGroup.getId(), shardGroup); + return true; + } + + public void deleteShardGroup(int shardGroupId) { + shardGroupCache.remove(shardGroupId); + } + + public Metapb.ShardGroup getShardGroup(int groupId) { + return shardGroupCache.get(groupId); + } + + public Map getShardGroups() { + return this.shardGroupCache; + } + + public boolean addStore(Long storeId, Metapb.Store store) { + Metapb.Store oldStore = storeCache.get(storeId); + if (oldStore != null && oldStore.equals(store)) { + return false; + } + storeCache.put(storeId, store); + return true; + } + + public Metapb.Store getStoreById(Long storeId) { + return storeCache.get(storeId); + } + + public void removeStore(Long storeId) { + storeCache.remove(storeId); + } + + public boolean hasGraph(String graphName) { + return !getPartitions(graphName).isEmpty() || getGraph(graphName) != null; + } + + public void updateGraph(Metapb.Graph graph) { + if (Objects.equals(graph, getGraph(graph.getGraphName()))) { + return; + } + graphCache.put(graph.getGraphName(), graph); + } + + public Metapb.Graph getGraph(String graphName) { + return graphCache.get(graphName); + } + + public List getGraphs() { + List graphs = new ArrayList<>(); + graphCache.forEach((k, v) -> { + graphs.add(v); + }); + return graphs; + } + + public void reset() { + writeLock.lock(); + try { + partitionCache = new ConcurrentHashMap<>(); + keyToPartIdCache = new ConcurrentHashMap<>(); + shardGroupCache = new ConcurrentHashMap<>(); + storeCache = new ConcurrentHashMap<>(); + graphCache = new ConcurrentHashMap<>(); + locks.clear(); + } finally { + writeLock.unlock(); + } + } + + public void clear() { + reset(); + } + + public String debugCacheByGraphName(String graphName) { + StringBuilder builder = new StringBuilder(); + builder.append("Graph:").append(graphName).append(", cache info: range info: {"); + var rangeMap = keyToPartIdCache.get(graphName); + builder.append(rangeMap == null ? "" : rangeMap).append("}"); + + if (rangeMap != null) { + builder.append(", partition info : {"); + rangeMap.asMapOfRanges().forEach((k, v) -> { + var partition = partitionCache.get(graphName).get(v); + builder.append("[part_id:").append(v); + if (partition != null) { + builder.append(", start_key:").append(partition.getStartKey()) + .append(", end_key:").append(partition.getEndKey()) + .append(", state:").append(partition.getState().name()); + } + builder.append("], "); + }); + builder.append("}"); + } + + builder.append(", graph info:{"); + var graph = graphCache.get(graphName); + if (graph != null) { + builder.append("partition_count:").append(graph.getPartitionCount()) + .append(", state:").append(graph.getState().name()); + } + builder.append("}]"); + return builder.toString(); + } + + public Metapb.Shard getLeaderShard(int partitionId) { + var shardGroup = shardGroupCache.get(partitionId); + if (shardGroup != null) { + for (Metapb.Shard shard : shardGroup.getShardsList()) { + if (shard.getRole() == Metapb.ShardRole.Leader) { + return shard; + } + } + } + + return null; + } + + public void updateShardGroupLeader(int partitionId, Metapb.Shard leader) { + if (shardGroupCache.containsKey(partitionId) && leader != null) { + if (!Objects.equals(getLeaderShard(partitionId), leader)) { + var shardGroup = shardGroupCache.get(partitionId); + var builder = Metapb.ShardGroup.newBuilder(shardGroup).clearShards(); + for (var shard : shardGroup.getShardsList()) { + builder.addShards( + Metapb.Shard.newBuilder() + .setStoreId(shard.getStoreId()) + .setRole(shard.getStoreId() == leader.getStoreId() ? + Metapb.ShardRole.Leader : + Metapb.ShardRole.Follower) + .build() + ); + } + shardGroupCache.put(partitionId, builder.build()); + } + } + } + + public String debugShardGroup() { + StringBuilder builder = new StringBuilder(); + builder.append("shard group cache:{"); + shardGroupCache.forEach((partitionId, shardGroup) -> { + builder.append(partitionId).append("::{") + .append("version:").append(shardGroup.getVersion()) + .append(", conf_version:").append(shardGroup.getConfVer()) + .append(", state:").append(shardGroup.getState().name()) + .append(", shards:["); + + for (var shard : shardGroup.getShardsList()) { + builder.append("{store_id:").append(shard.getStoreId()) + .append(", role:").append(shard.getRole().name()) + .append("},"); + } + builder.append("], "); + }); + builder.append("}"); + return builder.toString(); + } +} diff --git a/hg-pd-common/src/main/java/org/apache/hugegraph/pd/common/PartitionUtils.java b/hg-pd-common/src/main/java/org/apache/hugegraph/pd/common/PartitionUtils.java new file mode 100644 index 0000000000..b94225387c --- /dev/null +++ b/hg-pd-common/src/main/java/org/apache/hugegraph/pd/common/PartitionUtils.java @@ -0,0 +1,28 @@ +package org.apache.hugegraph.pd.common; + +public class PartitionUtils { + + public static final int MAX_VALUE = 0xffff; + + /** + * 计算key的hashcode + * + * @param key + * @return hashcode + */ + public static int calcHashcode(byte[] key) { + final int p = 16777619; + int hash = (int) 2166136261L; + for (byte element : key) + hash = (hash ^ element) * p; + hash += hash << 13; + hash ^= hash >> 7; + hash += hash << 3; + hash ^= hash >> 17; + hash += hash << 5; + hash = hash & PartitionUtils.MAX_VALUE; + if ( hash == PartitionUtils.MAX_VALUE ) + hash = PartitionUtils.MAX_VALUE - 1; + return hash; + } +} diff --git a/hg-pd-common/src/main/java/org/apache/hugegraph/pd/util/DefaultThreadFactory.java b/hg-pd-common/src/main/java/org/apache/hugegraph/pd/util/DefaultThreadFactory.java new file mode 100644 index 0000000000..130e3998c6 --- /dev/null +++ b/hg-pd-common/src/main/java/org/apache/hugegraph/pd/util/DefaultThreadFactory.java @@ -0,0 +1,32 @@ +package org.apache.hugegraph.pd.util; + +import java.util.concurrent.ThreadFactory; +import java.util.concurrent.atomic.AtomicInteger; + +/** + * @author zhangyingjie + * @date 2023/6/13 + **/ +public class DefaultThreadFactory implements ThreadFactory { + + private final AtomicInteger number = new AtomicInteger(1); + private final String namePrefix; + private boolean daemon; + + public DefaultThreadFactory(String prefix, boolean daemon) { + this.namePrefix = prefix + "-"; + this.daemon = daemon; + } + + public DefaultThreadFactory(String prefix) { + this(prefix, true); + } + + @Override + public Thread newThread(Runnable r) { + Thread t = new Thread(null, r, namePrefix + number.getAndIncrement(), 0); + t.setDaemon(daemon); + t.setPriority(Thread.NORM_PRIORITY); + return t; + } +} diff --git a/hg-pd-common/src/main/java/org/apache/hugegraph/pd/util/ExecutorUtil.java b/hg-pd-common/src/main/java/org/apache/hugegraph/pd/util/ExecutorUtil.java new file mode 100644 index 0000000000..9b949f4950 --- /dev/null +++ b/hg-pd-common/src/main/java/org/apache/hugegraph/pd/util/ExecutorUtil.java @@ -0,0 +1,51 @@ +package org.apache.hugegraph.pd.util; + +import java.util.Map; +import java.util.concurrent.BlockingQueue; +import java.util.concurrent.ConcurrentHashMap; +import java.util.concurrent.LinkedBlockingQueue; +import java.util.concurrent.SynchronousQueue; +import java.util.concurrent.ThreadPoolExecutor; +import java.util.concurrent.TimeUnit; + +public final class ExecutorUtil { + + private static Map pools = new ConcurrentHashMap<>(); + + public static ThreadPoolExecutor getThreadPoolExecutor(String name) { + if (name == null) { + return null; + } + return pools.get(name); + } + + public static ThreadPoolExecutor createExecutor(String name, int coreThreads, int maxThreads, + int queueSize) { + + return createExecutor(name, coreThreads, maxThreads, queueSize, true); + } + + public static ThreadPoolExecutor createExecutor(String name, int coreThreads, int maxThreads, + int queueSize, boolean daemon) { + ThreadPoolExecutor res = pools.get(name); + if (res != null) { + return res; + } + synchronized (pools) { + res = pools.get(name); + if (res != null) { + return res; + } + BlockingQueue queue; + if (queueSize <= 0) { + queue = new SynchronousQueue(); + } else { + queue = new LinkedBlockingQueue<>(queueSize); + } + res = new ThreadPoolExecutor(coreThreads, maxThreads, 60L, TimeUnit.SECONDS, queue, + new DefaultThreadFactory(name, daemon)); + pools.put(name, res); + } + return res; + } +} diff --git a/hg-pd-common/src/main/java/org/apache/hugegraph/pd/util/ResponseUtil.java b/hg-pd-common/src/main/java/org/apache/hugegraph/pd/util/ResponseUtil.java new file mode 100644 index 0000000000..39ddb484de --- /dev/null +++ b/hg-pd-common/src/main/java/org/apache/hugegraph/pd/util/ResponseUtil.java @@ -0,0 +1,55 @@ +package org.apache.hugegraph.pd.util; + +import static org.apache.hugegraph.pd.grpc.common.ErrorType.CLIENT_INVALID_PARAMETER_VALUE; +import static org.apache.hugegraph.pd.grpc.common.ErrorType.OK_VALUE; +import static org.apache.hugegraph.pd.grpc.common.ErrorType.PD_NOT_LEADER_VALUE; +import static org.apache.hugegraph.pd.grpc.common.ErrorType.PD_UNAVAILABLE_VALUE; +import static org.apache.hugegraph.pd.grpc.common.ErrorType.WARNING_VALUE; + +import java.util.HashMap; +import java.util.Map; + +import org.apache.hugegraph.pd.grpc.common.ErrorType; +import org.apache.hugegraph.pd.grpc.common.ResponseHeader; +import com.google.protobuf.util.JsonFormat; + +/** + * @author zhangyingjie + * @date 2024/1/11 + **/ +public class ResponseUtil { + + private static Map details = new HashMap<>() {{ + put(OK_VALUE, "OK"); + put(WARNING_VALUE, "The correct result is obtained, but switching nodes is a better option"); + put(PD_UNAVAILABLE_VALUE, "The Pd cannot provide services. Switch the node and try again"); + put(PD_NOT_LEADER_VALUE, "The Pd is not the leader. Switch the node and try again"); + put(CLIENT_INVALID_PARAMETER_VALUE, "The parameters passed by the client are incorrect"); + }}; + + public static String getMessageByCode(int code) { + ErrorType type = ErrorType.forNumber(code); + if (type != null) { + return type.name(); + } else { + return ""; + } + } + + public static String getDetailByCode(int code) { + String detail = details.get(code); + if (detail != null) { + return detail; + } else { + return getMessageByCode(code); + } + } + + public static String toJson(ResponseHeader header) { + try { + return JsonFormat.printer().print(header); + } catch (Exception e) { + return "{}"; + } + } +} diff --git a/hg-pd-core/src/main/java/org/apache/hugegraph/pd/ConfigService.java b/hg-pd-core/src/main/java/org/apache/hugegraph/pd/ConfigService.java new file mode 100644 index 0000000000..2afee8bd5b --- /dev/null +++ b/hg-pd-core/src/main/java/org/apache/hugegraph/pd/ConfigService.java @@ -0,0 +1,181 @@ +package org.apache.hugegraph.pd; + +import org.apache.hugegraph.pd.common.PDException; +import org.apache.hugegraph.pd.config.PDConfig; +import org.apache.hugegraph.pd.grpc.Metapb; +import org.apache.hugegraph.pd.meta.ConfigMetaStore; +import org.apache.hugegraph.pd.meta.MetadataFactory; +import org.apache.hugegraph.pd.raft.RaftStateListener; +import lombok.extern.slf4j.Slf4j; + +import java.util.HashMap; +import java.util.HashSet; +import java.util.List; +import java.util.Map; +import java.util.Set; + +@Slf4j +public class ConfigService implements RaftStateListener { + + private PDConfig pdConfig; + private ConfigMetaStore meta; + + public ConfigService(PDConfig config) { + this.pdConfig = config; + config.setConfigService(this); + meta = MetadataFactory.newConfigMeta(config); + } + + public Metapb.PDConfig getPDConfig(long version) throws PDException { + return this.meta.getPdConfig(version); + } + + public Metapb.PDConfig getPDConfig() throws PDException { + return this.meta.getPdConfig(0); + } + + public Metapb.PDConfig setPDConfig(Metapb.PDConfig mConfig) throws PDException { + Metapb.PDConfig oldCfg = getPDConfig(); + Metapb.PDConfig.Builder builder = oldCfg.toBuilder().mergeFrom(mConfig) + .setVersion(oldCfg.getVersion() + 1) + .setTimestamp(System.currentTimeMillis()); + mConfig = this.meta.setPdConfig(builder.build()); + log.info("PDConfig has been modified, new PDConfig is {}", mConfig); + updatePDConfig(mConfig); + return mConfig; + } + + public List getGraphSpace(String graphSpaceName) throws PDException { + return this.meta.getGraphSpace(graphSpaceName); + } + + public Metapb.GraphSpace setGraphSpace(Metapb.GraphSpace graphSpace) throws PDException { + return this.meta.setGraphSpace(graphSpace.toBuilder() + .setTimestamp(System.currentTimeMillis()) + .build()); + } + + /** + * 从存储中读取配置项,并覆盖全局的PDConfig对象 + * @return + */ + public PDConfig loadConfig() { + try { + Metapb.PDConfig mConfig = this.meta.getPdConfig(0); + if ( mConfig == null ){ + // todo : 初始化配置, store group + mConfig = Metapb.PDConfig.newBuilder() + .setShardCount(pdConfig.getPartition().getShardCount()) + .setVersion(1) + .setTimestamp(System.currentTimeMillis()) + .setMaxShardsPerStore(pdConfig.getPartition().getMaxShardsPerStore()) + .build(); + this.meta.setPdConfig(mConfig); + } + + pdConfig = updatePDConfig(mConfig); + // 考虑版本升级 + loadStoreGroup(); + } catch (Exception e) { + log.error("ConfigService loadConfig exception:", e); + } + return pdConfig; + } + + private void loadStoreGroup() throws PDException { + var groups = getAllStoreGroup(); + if (groups.isEmpty()) { + String storeList = pdConfig.getInitialStoreList(); + Map> groupMap = new HashMap<>(); + + // group id -> { store address } + for (String store : storeList.split(",")) { + String[] arr = store.split("/"); + int groupId = -1; + String storeAddress = ""; + if (arr.length == 1) { + groupId = 0; + storeAddress = arr[0]; + } else if (arr.length == 2){ + groupId = Integer.parseInt(arr[1]); + storeAddress = arr[0]; + } else { + throw new PDException(-1, "Invalid store list: " + storeList); + } + + if (! groupMap.containsKey(groupId)) { + groupMap.put(groupId, new HashSet<>()); + } + groupMap.get(groupId).add(storeAddress); + } + + var pdConfig = getPDConfig(); + for (var entry : groupMap.entrySet()) { + int count = entry.getValue().size() * pdConfig.getMaxShardsPerStore() / pdConfig.getShardCount(); + var group = Metapb.StoreGroup.newBuilder() + .setGroupId(entry.getKey()) + .setName("") + .setPartitionCount(count) + .build(); + meta.saveStoreGroup(group); + } + } + } + + public synchronized PDConfig updatePDConfig(Metapb.PDConfig mConfig){ + log.info("update pd config: mConfig:{}", mConfig); + pdConfig.getPartition().setShardCount(mConfig.getShardCount()); + pdConfig.getPartition().setMaxShardsPerStore(mConfig.getMaxShardsPerStore()); + return pdConfig; + } + + public synchronized void setPartitionCount(int storeGroupId, int count) throws PDException { + var storeGroup = meta.getStoreGroup(storeGroupId); + if (storeGroup != null) { + log.info("update the partition count of store group {} to {}", storeGroupId, count); + meta.saveStoreGroup(storeGroup.toBuilder().setPartitionCount(count).build()); + } + } + + public synchronized Metapb.StoreGroup createStoreGroup(int storeGroupId, String name, int partitionCount) + throws PDException { + return meta.saveStoreGroup(Metapb.StoreGroup.newBuilder() + .setGroupId(storeGroupId) + .setName(name) + .setPartitionCount(partitionCount) + .build()); + } + + public synchronized Metapb.StoreGroup updateStoreGroup(int storeGroupId, String name) throws PDException { + var storeGroup = meta.getStoreGroup(storeGroupId); + if (storeGroup != null) { + return meta.saveStoreGroup(storeGroup.toBuilder().setName(name).build()); + } + return null; + } + + /** + * meta store中的数量 + * 由于可能会受分区分裂/合并的影响,原始的partition count不推荐使用 + * + * @return partition count of cluster + * @throws PDException when io error + */ + public int getPartitionCount(int storeGroupId) throws PDException { + var group = getStoreGroup(storeGroupId); + return group == null ? 0 : group.getPartitionCount(); + } + + public List getAllStoreGroup() throws PDException { + return meta.getStoreGroups(); + } + + public Metapb.StoreGroup getStoreGroup(int groupId) throws PDException { + return meta.getStoreGroup(groupId); + } + + @Override + public void onRaftLeaderChanged() { + loadConfig(); + } +} diff --git a/hg-pd-core/src/main/java/org/apache/hugegraph/pd/IdService.java b/hg-pd-core/src/main/java/org/apache/hugegraph/pd/IdService.java new file mode 100644 index 0000000000..f5790dc7dd --- /dev/null +++ b/hg-pd-core/src/main/java/org/apache/hugegraph/pd/IdService.java @@ -0,0 +1,61 @@ +package org.apache.hugegraph.pd; + +import org.apache.hugegraph.pd.common.PDException; +import org.apache.hugegraph.pd.config.PDConfig; +import org.apache.hugegraph.pd.meta.IdMetaStore; +import org.apache.hugegraph.pd.meta.MetadataFactory; + +public class IdService { + + public PDConfig getPdConfig() { + return pdConfig; + } + + public void setPdConfig(PDConfig pdConfig) { + this.pdConfig = pdConfig; + } + + private PDConfig pdConfig; + private IdMetaStore meta; + + public IdService(PDConfig config) { + this.pdConfig = config; + meta = MetadataFactory.newHugeServerMeta(config); + } + + public long getId(String key, int delta) throws PDException { + return meta.getId(key, delta); + } + + public void resetId(String key) throws PDException { + meta.resetId(key); + } + + /** + * 获取自增循环不重复id, 达到上限后从0开始自增.自动跳过正在使用的cid + * @param key + * @param max + * @return + * @throws PDException + */ + public long getCId(String key, long max) throws PDException { + return meta.getCId(key, max); + } + public long getCId(String key, String name, long max) throws PDException { + return meta.getCId(key, name, max); + } + + /** + * 删除一个自增循环id + * @param key + * @param cid + * @return + * @throws PDException + */ + public long delCId(String key, long cid) throws PDException { + return meta.delCId(key, cid); + } + public long delCIdDelay(String key, String name, long cid) throws PDException { + return meta.delCIdDelay(key, name, cid); + } +} diff --git a/hg-pd-core/src/main/java/org/apache/hugegraph/pd/KvService.java b/hg-pd-core/src/main/java/org/apache/hugegraph/pd/KvService.java new file mode 100644 index 0000000000..0e57cd241d --- /dev/null +++ b/hg-pd-core/src/main/java/org/apache/hugegraph/pd/KvService.java @@ -0,0 +1,364 @@ +<<<<<<<< HEAD:hugegraph-pd/hg-pd-core/src/main/java/org/apache/hugegraph/pd/KvService.java +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +======== +>>>>>>>> d7e3d51dd (3.6.5 -> 4.x diff):hg-pd-core/src/main/java/org/apache/hugegraph/pd/KvService.java +package org.apache.hugegraph.pd; + +import java.nio.charset.Charset; +import java.util.HashMap; +import java.util.LinkedList; +import java.util.List; +import java.util.Map; + +import org.apache.hugegraph.pd.common.PDException; +import org.apache.hugegraph.pd.config.PDConfig; +<<<<<<<< HEAD:hugegraph-pd/hg-pd-core/src/main/java/org/apache/hugegraph/pd/KvService.java +import org.apache.hugegraph.pd.grpc.kv.Kv; +import org.apache.hugegraph.pd.grpc.kv.V; +======== +>>>>>>>> d7e3d51dd (3.6.5 -> 4.x diff):hg-pd-core/src/main/java/org/apache/hugegraph/pd/KvService.java +import org.apache.hugegraph.pd.meta.MetadataKeyHelper; +import org.apache.hugegraph.pd.meta.MetadataRocksDBStore; +import org.apache.hugegraph.pd.store.KV; +import org.springframework.stereotype.Service; + +<<<<<<<< HEAD:hugegraph-pd/hg-pd-core/src/main/java/org/apache/hugegraph/pd/KvService.java +======== +import org.apache.hugegraph.pd.grpc.kv.Kv; +import org.apache.hugegraph.pd.grpc.kv.V; +>>>>>>>> d7e3d51dd (3.6.5 -> 4.x diff):hg-pd-core/src/main/java/org/apache/hugegraph/pd/KvService.java +import com.google.protobuf.InvalidProtocolBufferException; + +import lombok.extern.slf4j.Slf4j; + +/** + * + **/ +@Slf4j +@Service +public class KvService { + + public static final char KV_DELIMITER = '@'; + private static final String TTL_PREFIX = "T"; + private static final String KV_PREFIX = "K"; + private static final String LOCK_PREFIX = "L"; + private static final String KV_PREFIX_DELIMITER = KV_PREFIX + KV_DELIMITER; + private static final byte[] EMPTY_VALUE = new byte[0]; + private final MetadataRocksDBStore meta; + private PDConfig pdConfig; + + public KvService(PDConfig config) { + this.pdConfig = config; + meta = new MetadataRocksDBStore(config); + } + + public static String getKey(Object... keys) { + StringBuilder builder = MetadataKeyHelper.getStringBuilderHelper(); + builder.append(KV_PREFIX).append(KV_DELIMITER); + for (Object key : keys) { + builder.append(key == null ? "" : key).append(KV_DELIMITER); + } + return builder.substring(0, builder.length() - 1); + } + + public static byte[] getKeyBytes(Object... keys) { + String key = getKey(keys); + return key.getBytes(Charset.defaultCharset()); + } + + public static String getKeyWithoutPrefix(Object... keys) { + StringBuilder builder = MetadataKeyHelper.getStringBuilderHelper(); + for (Object key : keys) { + builder.append(key == null ? "" : key).append(KV_DELIMITER); + } + return builder.substring(0, builder.length() - 1); + } + + public static String getDelimiter() { + return String.valueOf(KV_DELIMITER); + } + + public PDConfig getPdConfig() { + return pdConfig; + } + + public void setPdConfig(PDConfig pdConfig) { + this.pdConfig = pdConfig; + } + + public void put(String key, String value) throws PDException { + V storeValue = V.newBuilder().setValue(value).setTtl(0).build(); + meta.put(getStoreKey(key), storeValue.toByteArray()); + // log.warn("add key with key-{}:value-{}", key, value); + } + + public void put(String key, String value, long ttl) throws PDException { + long curTime = System.currentTimeMillis(); + curTime += ttl; + V storeValue = V.newBuilder().setValue(value).setSt(ttl).setTtl(curTime).build(); + meta.put(getStoreKey(key), storeValue.toByteArray()); + meta.put(getTTLStoreKey(key, curTime), EMPTY_VALUE); + // log.warn("add key with key-{}:value-{}:ttl-{}", key, value, ttl); + } + + public String get(String key) throws PDException { + byte[] storeKey = getStoreKey(key); + return get(storeKey); + } + + public String get(byte[] keyBytes) throws PDException { + byte[] bytes = meta.getOne(keyBytes); + String v = getValue(keyBytes, bytes); + return v; + } + + private String getValue(byte[] keyBytes, byte[] valueBytes) throws PDException { + if (valueBytes == null || valueBytes.length == 0) { + return ""; + } + try { + V v = V.parseFrom(valueBytes); + if (v.getTtl() == 0 || v.getTtl() >= System.currentTimeMillis()) { + return v.getValue(); + } else { + meta.remove(keyBytes); + meta.remove(getTTLStoreKey(new String(keyBytes), v.getTtl())); + } + } catch (Exception e) { + log.error("parse value with error:{}", e.getMessage()); + throw new PDException(-1, e.getMessage()); + } + return null; + } + + public boolean keepAlive(String key) throws PDException { + byte[] bytes = meta.getOne(getStoreKey(key)); + try { + if (bytes == null || bytes.length == 0) { + return false; + } + V v = V.parseFrom(bytes); + if (v != null) { + long ttl = v.getTtl(); + long st = v.getSt(); + meta.remove(getTTLStoreKey(key, ttl)); + put(key, v.getValue(), st); + return true; + } else { + return false; + } + } catch (InvalidProtocolBufferException e) { + throw new PDException(-1, e.getMessage()); + } + } + + public Kv delete(String key) throws PDException { + byte[] storeKey = getStoreKey(key); + String value = this.get(storeKey); + meta.remove(storeKey); + Kv.Builder builder = Kv.newBuilder().setKey(key); + if (value != null) { + builder.setValue(value); + } + Kv kv = builder.build(); + // log.warn("delete kv with key :{}", key); + return kv; + } + + public List deleteWithPrefix(String key) throws PDException { + byte[] storeKey = getStoreKey(key); + //TODO to many rows for scan + List kvList = meta.scanPrefix(storeKey); + LinkedList kvs = new LinkedList<>(); + for (KV kv : kvList) { + String kvKey = new String(kv.getKey()).replaceFirst(KV_PREFIX_DELIMITER, ""); + String kvValue = getValue(kv.getKey(), kv.getValue()); + if (kvValue != null) { + kvs.add(Kv.newBuilder().setKey(kvKey).setValue(kvValue).build()); + } + } + meta.removeByPrefix(storeKey); + // log.warn("delete kv with key prefix :{}", key); + return kvs; + } + + /** + * scan result ranged from key start and key end + * + * @param keyStart + * @param keyEnd + * @return Records + * @throws PDException + */ + public Map scanRange(String keyStart, String keyEnd) throws PDException { + List list = meta.scanRange(getStoreKey(keyStart), getStoreKey(keyEnd)); + Map map = new HashMap<>(); + for (KV kv : list) { + String kvKey = new String(kv.getKey()).replaceFirst(KV_PREFIX_DELIMITER, ""); + String kvValue = getValue(kv.getKey(), kv.getValue()); + if (kvValue != null) { + map.put(kvKey, kvValue); + } + } + return map; + } + + public Map scanWithPrefix(String key) throws PDException { + List kvList = meta.scanPrefix(getStoreKey(key)); + HashMap map = new HashMap<>(); + for (KV kv : kvList) { + String kvKey = new String(kv.getKey()).replaceFirst(KV_PREFIX_DELIMITER, ""); + String kvValue = getValue(kv.getKey(), kv.getValue()); + if (kvValue != null) { + map.put(kvKey, kvValue); + } + } + return map; + } + + public boolean locked(String key) throws PDException { + String lockKey = KvService.getKeyWithoutPrefix(KvService.LOCK_PREFIX, key); +<<<<<<<< HEAD:hugegraph-pd/hg-pd-core/src/main/java/org/apache/hugegraph/pd/KvService.java + Map allLock = scanWithPrefix(lockKey); + return allLock != null && allLock.size() != 0; +======== + Map allLock = scanWithPrefix(lockKey + KV_DELIMITER); + if (allLock == null || allLock.size() == 0) { + return false; + } else { + return true; + } +>>>>>>>> d7e3d51dd (3.6.5 -> 4.x diff):hg-pd-core/src/main/java/org/apache/hugegraph/pd/KvService.java + } + + private boolean owned(String key, long clientId) throws PDException { + String lockKey = KvService.getKeyWithoutPrefix(KvService.LOCK_PREFIX, key); +<<<<<<<< HEAD:hugegraph-pd/hg-pd-core/src/main/java/org/apache/hugegraph/pd/KvService.java + Map allLock = scanWithPrefix(lockKey); + if (allLock.size() == 0) { + return true; + } +======== + Map allLock = scanWithPrefix(lockKey + KV_DELIMITER); + if (allLock.size() == 0) return true; +>>>>>>>> d7e3d51dd (3.6.5 -> 4.x diff):hg-pd-core/src/main/java/org/apache/hugegraph/pd/KvService.java + for (Map.Entry entry : allLock.entrySet()) { + String entryKey = entry.getKey(); + String[] split = entryKey.split(String.valueOf(KV_DELIMITER)); + if (Long.valueOf(split[split.length - 1]).equals(clientId)) { + return true; + } + } + return false; + } + + public boolean lock(String key, long ttl, long clientId) throws PDException { + //TODO lock improvement + synchronized (KvService.class) { + if (!owned(key, clientId)) { + return false; + } + put(getLockKey(key, clientId), " ", ttl); + return true; + } + } + + public boolean lockWithoutReentrant(String key, long ttl, + long clientId) throws PDException { + synchronized (KvService.class) { + if (locked(key)) { + return false; + } + put(getLockKey(key, clientId), " ", ttl); + return true; + } + } + + public boolean unlock(String key, long clientId) throws PDException { + synchronized (KvService.class) { + if (!owned(key, clientId)) { + return false; + } + delete(getLockKey(key, clientId)); + return true; + } + } + + public boolean keepAlive(String key, long clientId) throws PDException { + String lockKey = getLockKey(key, clientId); + return keepAlive(lockKey); + } + + public String getLockKey(String key, long clientId) { + return getKeyWithoutPrefix(LOCK_PREFIX, key, clientId); + } + + public byte[] getStoreKey(String key) { + return getKeyBytes(key); + } + + public byte[] getTTLStoreKey(String key, long time) { + return getKeyBytes(TTL_PREFIX, time, key); + } + + public void clearTTLData() { + try { + byte[] ttlStartKey = getTTLStoreKey("", 0); + byte[] ttlEndKey = getTTLStoreKey("", System.currentTimeMillis()); + List kvList = meta.scanRange(ttlStartKey, ttlEndKey); + for (KV kv : kvList) { + String key = new String(kv.getKey()); + int index = key.indexOf(KV_DELIMITER, 2); + String delKey = key.substring(index + 1); + delete(delKey); + meta.remove(kv.getKey()); + } + } catch (Exception e) { + log.error("clear ttl data with error :", e); + } + } + + public MetadataRocksDBStore getMeta() { + return meta; + } +<<<<<<<< HEAD:hugegraph-pd/hg-pd-core/src/main/java/org/apache/hugegraph/pd/KvService.java +======== + + public static byte[] getKeyBytes(Object... keys) { + String key = getKey(keys); + return key.getBytes(Charset.defaultCharset()); + } + + public static String getKeyWithoutPrefix(Object... keys) { + StringBuilder builder = MetadataKeyHelper.getStringBuilderHelper(); + for (Object key : keys) { + builder.append(key == null ? "" : key).append(KV_DELIMITER); + } + return builder.substring(0, builder.length() - 1); + } + + public static String getDelimiter() { + return String.valueOf(KV_DELIMITER); + } + + public MetadataRocksDBStore getMeta() { + return meta; + } +>>>>>>>> d7e3d51dd (3.6.5 -> 4.x diff):hg-pd-core/src/main/java/org/apache/hugegraph/pd/KvService.java +} diff --git a/hg-pd-core/src/main/java/org/apache/hugegraph/pd/LogService.java b/hg-pd-core/src/main/java/org/apache/hugegraph/pd/LogService.java new file mode 100644 index 0000000000..c752a6c572 --- /dev/null +++ b/hg-pd-core/src/main/java/org/apache/hugegraph/pd/LogService.java @@ -0,0 +1,81 @@ +<<<<<<<< HEAD:hugegraph-pd/hg-pd-core/src/main/java/org/apache/hugegraph/pd/LogService.java +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hugegraph.pd; +======== +package org.apache.hugegraph.pd; + +import org.apache.hugegraph.pd.common.PDException; +import org.apache.hugegraph.pd.config.PDConfig; +import org.apache.hugegraph.pd.grpc.Metapb; +import org.apache.hugegraph.pd.meta.LogMeta; +import org.apache.hugegraph.pd.meta.MetadataFactory; +import com.google.protobuf.Any; +import com.google.protobuf.GeneratedMessageV3; +import lombok.extern.slf4j.Slf4j; +import org.springframework.stereotype.Service; +>>>>>>>> d7e3d51dd (3.6.5 -> 4.x diff):hg-pd-core/src/main/java/org/apache/hugegraph/pd/LogService.java + +import java.util.List; + +import org.apache.hugegraph.pd.common.PDException; +import org.apache.hugegraph.pd.config.PDConfig; +import org.apache.hugegraph.pd.grpc.Metapb; +import org.apache.hugegraph.pd.meta.LogMeta; +import org.apache.hugegraph.pd.meta.MetadataFactory; +import org.springframework.stereotype.Service; + +import com.google.protobuf.Any; +import com.google.protobuf.GeneratedMessageV3; + +import lombok.extern.slf4j.Slf4j; + +@Slf4j +@Service +public class LogService { + + public static final String GRPC = "GRPC"; + public static final String REST = "REST"; + public static final String TASK = "TASK"; + public static final String NODE_CHANGE = "NODE_CHANGE"; + public static final String PARTITION_CHANGE = "PARTITION_CHANGE"; + private final LogMeta logMeta; + + public LogService(PDConfig pdConfig) { + logMeta = MetadataFactory.newLogMeta(pdConfig); + } + + public List getLog(String action, Long start, Long end) throws PDException { + return logMeta.getLog(action, start, end); + } + + public void insertLog(String action, String message, GeneratedMessageV3 target) { + try { + Metapb.LogRecord logRecord = Metapb.LogRecord.newBuilder() + .setAction(action) + .setMessage(message) + .setTimestamp(System.currentTimeMillis()) + .setObject(Any.pack(target)) + .build(); + logMeta.insertLog(logRecord); + } catch (PDException e) { + log.debug("Insert log with error:{}", e); + } + + } +} diff --git a/hg-pd-core/src/main/java/org/apache/hugegraph/pd/PartitionService.java b/hg-pd-core/src/main/java/org/apache/hugegraph/pd/PartitionService.java new file mode 100644 index 0000000000..0e64e0bc73 --- /dev/null +++ b/hg-pd-core/src/main/java/org/apache/hugegraph/pd/PartitionService.java @@ -0,0 +1,2116 @@ +<<<<<<<< HEAD:hugegraph-pd/hg-pd-core/src/main/java/org/apache/hugegraph/pd/PartitionService.java +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hugegraph.pd; +======== +package org.apache.hugegraph.pd; + +import org.apache.hugegraph.pd.common.Consts; +import org.apache.hugegraph.pd.common.KVPair; +import org.apache.hugegraph.pd.common.PDException; +import org.apache.hugegraph.pd.common.PartitionUtils; +import org.apache.hugegraph.pd.config.PDConfig; +import org.apache.hugegraph.pd.grpc.MetaTask; +import org.apache.hugegraph.pd.grpc.Metapb; +import org.apache.hugegraph.pd.grpc.common.ErrorType; +import org.apache.hugegraph.pd.grpc.pulse.ChangeShard; +import org.apache.hugegraph.pd.grpc.pulse.CleanPartition; +import org.apache.hugegraph.pd.grpc.pulse.CleanType; +import org.apache.hugegraph.pd.grpc.pulse.ConfChangeType; +import org.apache.hugegraph.pd.grpc.pulse.DbCompaction; +import org.apache.hugegraph.pd.grpc.pulse.MovePartition; +import org.apache.hugegraph.pd.grpc.pulse.PartitionKeyRange; +import org.apache.hugegraph.pd.grpc.pulse.SplitPartition; +import org.apache.hugegraph.pd.grpc.pulse.TransferLeader; +import org.apache.hugegraph.pd.listener.PartitionInstructionListener; +import org.apache.hugegraph.pd.listener.PartitionStatusListener; +import org.apache.hugegraph.pd.listener.StoreStatusListener; +import org.apache.hugegraph.pd.meta.MetadataFactory; +import org.apache.hugegraph.pd.meta.PartitionMeta; +import org.apache.hugegraph.pd.meta.TaskInfoMeta; +import org.apache.hugegraph.pd.raft.RaftStateListener; +import lombok.extern.slf4j.Slf4j; +import org.apache.commons.collections4.SetUtils; +import org.apache.commons.lang3.StringUtils; +>>>>>>>> d7e3d51dd (3.6.5 -> 4.x diff):hg-pd-core/src/main/java/org/apache/hugegraph/pd/PartitionService.java + +import java.util.ArrayList; +import java.util.Collections; +import java.util.Comparator; +import java.util.HashMap; +import java.util.HashSet; +import java.util.List; +import java.util.Map; +import java.util.Optional; +import java.util.stream.Collectors; + +<<<<<<<< HEAD:hugegraph-pd/hg-pd-core/src/main/java/org/apache/hugegraph/pd/PartitionService.java +import org.apache.commons.lang3.StringUtils; +import org.apache.hugegraph.pd.common.KVPair; +import org.apache.hugegraph.pd.common.PDException; +import org.apache.hugegraph.pd.common.PartitionUtils; +import org.apache.hugegraph.pd.config.PDConfig; +import org.apache.hugegraph.pd.grpc.MetaTask; +import org.apache.hugegraph.pd.grpc.Metapb; +import org.apache.hugegraph.pd.grpc.Pdpb; +import org.apache.hugegraph.pd.grpc.pulse.ChangeShard; +import org.apache.hugegraph.pd.grpc.pulse.CleanPartition; +import org.apache.hugegraph.pd.grpc.pulse.CleanType; +import org.apache.hugegraph.pd.grpc.pulse.ConfChangeType; +import org.apache.hugegraph.pd.grpc.pulse.DbCompaction; +import org.apache.hugegraph.pd.grpc.pulse.MovePartition; +import org.apache.hugegraph.pd.grpc.pulse.PartitionKeyRange; +import org.apache.hugegraph.pd.grpc.pulse.SplitPartition; +import org.apache.hugegraph.pd.grpc.pulse.TransferLeader; +import org.apache.hugegraph.pd.meta.MetadataFactory; +import org.apache.hugegraph.pd.meta.PartitionMeta; +import org.apache.hugegraph.pd.meta.TaskInfoMeta; +import org.apache.hugegraph.pd.raft.RaftStateListener; + +import lombok.extern.slf4j.Slf4j; +======== + +>>>>>>>> d7e3d51dd (3.6.5 -> 4.x diff):hg-pd-core/src/main/java/org/apache/hugegraph/pd/PartitionService.java + +/** + * Partition management + */ +@Slf4j +public class PartitionService implements RaftStateListener { + +<<<<<<<< HEAD:hugegraph-pd/hg-pd-core/src/main/java/org/apache/hugegraph/pd/PartitionService.java + private final long Partition_Version_Skip = 0x0F; +======== + // private final long Partition_Version_Skip = 0x0F; + private final PartitionMeta partitionMeta; +>>>>>>>> d7e3d51dd (3.6.5 -> 4.x diff):hg-pd-core/src/main/java/org/apache/hugegraph/pd/PartitionService.java + private final StoreNodeService storeService; + private final PartitionMeta partitionMeta; + private final PDConfig pdConfig; + // Partition command listening + private final List instructionListeners; + +<<<<<<<< HEAD:hugegraph-pd/hg-pd-core/src/main/java/org/apache/hugegraph/pd/PartitionService.java + // Partition status listeners +======== + private final PDConfig pdConfig; + + private final ConfigService configService; + + // 分区命令监听 + private final List instructionListeners; + + // 分区状态监听 +>>>>>>>> d7e3d51dd (3.6.5 -> 4.x diff):hg-pd-core/src/main/java/org/apache/hugegraph/pd/PartitionService.java + private final List statusListeners; + + public PartitionService(PDConfig config, StoreNodeService storeService, ConfigService configService) { + this.pdConfig = config; + this.storeService = storeService; + partitionMeta = MetadataFactory.newPartitionMeta(config); +<<<<<<<< HEAD:hugegraph-pd/hg-pd-core/src/main/java/org/apache/hugegraph/pd/PartitionService.java + instructionListeners = + Collections.synchronizedList(new ArrayList()); + statusListeners = Collections.synchronizedList(new ArrayList()); +======== + instructionListeners = Collections.synchronizedList(new ArrayList<>()); + statusListeners = Collections.synchronizedList(new ArrayList<>()); + this.configService = configService; +>>>>>>>> d7e3d51dd (3.6.5 -> 4.x diff):hg-pd-core/src/main/java/org/apache/hugegraph/pd/PartitionService.java + } + + public void init() throws PDException { + partitionMeta.init(); + storeService.addStatusListener(new StoreStatusListener() { + @Override + public void onStoreStatusChanged(Metapb.Store store, Metapb.StoreState old, + Metapb.StoreState status) { + if (status == Metapb.StoreState.Tombstone) { + // When the store is stopped, notify all partitions of the store and migrate + // the data + storeOffline(store); + } + } + + @Override + public void onGraphChange(Metapb.Graph graph, + Metapb.GraphState stateOld, + Metapb.GraphState stateNew) { + + } + + @Override + public void onStoreRaftChanged(Metapb.Store store) { + + } + }); + } + + /** + * return key partition + * + * @param graphName + * @param key + * @return + */ + public Metapb.PartitionShard getPartitionShard(String graphName, byte[] key) throws + PDException { + long code = PartitionUtils.calcHashcode(key); + return getPartitionByCode(graphName, code); + } + + /** + * Returns the partition to which it belongs based on the hashcode + * + * @param graphName + * @param code + * @return + */ +<<<<<<<< HEAD:hugegraph-pd/hg-pd-core/src/main/java/org/apache/hugegraph/pd/PartitionService.java + public Metapb.PartitionShard getPartitionByCode(String graphName, long code) throws + PDException { + if (code < 0 || code >= PartitionUtils.MAX_VALUE) { + throw new PDException(Pdpb.ErrorType.NOT_FOUND_VALUE, "code error"); +======== + public Metapb.PartitionShard getPartitionByCode(String graphName, long code) throws PDException { + if ( code < 0 || code >= PartitionUtils.MAX_VALUE) { + throw new PDException(ErrorType.NOT_FOUND_VALUE, "code error"); +>>>>>>>> d7e3d51dd (3.6.5 -> 4.x diff):hg-pd-core/src/main/java/org/apache/hugegraph/pd/PartitionService.java + } + // Find the partition ID based on the code, and if it doesn't find, create a new partition + Metapb.Partition partition = partitionMeta.getPartitionByCode(graphName, code); + + if (partition == null) { + synchronized (this) { + if ((partition = partitionMeta.getPartitionByCode(graphName, code)) == null) { + partition = newPartition(graphName, code); + } + } + } + + Metapb.PartitionShard partShard = Metapb.PartitionShard.newBuilder() +<<<<<<<< HEAD:hugegraph-pd/hg-pd-core/src/main/java/org/apache/hugegraph/pd/PartitionService.java + .setPartition(partition) + .setLeader(storeService.getLeader( + partition, 0)) + .build(); + log.debug( + "{} Partition get code = {}, partition id = {}, start = {}, end = {}, leader = {}", + graphName, (code), partition.getId(), partition.getStartKey(), + partition.getEndKey(), partShard.getLeader()); +======== + .setPartition(partition) + .setLeader(storeService.getLeader(partition, 0)) + .build(); + log.debug("{} Partition get code = {}, partition id = {}, start = {}, end = {}, leader = {}", + graphName, (code), partition.getId(), partition.getStartKey(), partition.getEndKey(), + partShard.getLeader()); +>>>>>>>> d7e3d51dd (3.6.5 -> 4.x diff):hg-pd-core/src/main/java/org/apache/hugegraph/pd/PartitionService.java + + return partShard; + } + + /** + * Returns partition information based on ID + * + * @param graphName + * @param partId + * @return + * @throws PDException + */ + public Metapb.PartitionShard getPartitionShardById(String graphName, int partId) throws + PDException { + Metapb.Partition partition = partitionMeta.getPartitionById(graphName, partId); + if (partition != null) { + var shard = storeService.getLeader(partition, 0); + if (shard != null) { + return Metapb.PartitionShard.newBuilder() + .setPartition(partition) + // 此处需要返回正确的leader,暂时默认取第一个 + .setLeader(shard) + .build(); + } + + } +<<<<<<<< HEAD:hugegraph-pd/hg-pd-core/src/main/java/org/apache/hugegraph/pd/PartitionService.java + + Metapb.PartitionShard partShard = Metapb.PartitionShard.newBuilder() + .setPartition(partition) + .setLeader(storeService.getLeader( + partition, 0)) + .build(); + + return partShard; +======== + return null; +>>>>>>>> d7e3d51dd (3.6.5 -> 4.x diff):hg-pd-core/src/main/java/org/apache/hugegraph/pd/PartitionService.java + } + + public Metapb.Partition getPartitionById(String graphName, int partId) throws PDException { + return partitionMeta.getPartitionById(graphName, partId); + } + + public List getPartitionById(int partId) throws PDException { + return partitionMeta.getPartitionById(partId); + } + + /** + * Get all partitions of the graph + */ + public List getPartitions() { + return partitionMeta.getPartitions(); + } + + public List getPartitions(String graphName) { + if (StringUtils.isAllEmpty(graphName)) { + return partitionMeta.getPartitions(); + } + return partitionMeta.getPartitions(graphName); + } + + /** + * Find all the partitions on the store + * + * @param store + * @return + */ + public List getPartitionByStore(Metapb.Store store) throws PDException { + + List partitions = new ArrayList<>(); +<<<<<<<< HEAD:hugegraph-pd/hg-pd-core/src/main/java/org/apache/hugegraph/pd/PartitionService.java + getGraphs().forEach(graph -> { + getPartitions(graph.getGraphName()).forEach(partition -> { + try { + storeService.getShardGroup(partition.getId()).getShardsList().forEach(shard -> { + if (shard.getStoreId() == store.getId()) { + partitions.add(partition); + } + }); + } catch (PDException e) { + throw new RuntimeException(e); + } + }); + }); + return partitions; +======== + for (Metapb.ShardGroup group : storeService.getShardGroupsByStore(store.getId())) { + partitions.addAll(getPartitionById(group.getId())); + } + return partitions; +>>>>>>>> d7e3d51dd (3.6.5 -> 4.x diff):hg-pd-core/src/main/java/org/apache/hugegraph/pd/PartitionService.java + } + + /** + * Creates a new partition + * + * @param graphName + * @return + */ + private Metapb.Partition newPartition(String graphName, long code) throws PDException { + Metapb.Graph graph = partitionMeta.getGraph(graphName); + if (graph == null) { + throw new PDException(ErrorType.GRAPH_NOT_EXISTS, "graph not exists:" + graphName); + } + + int partitionSize = PartitionUtils.MAX_VALUE / graph.getPartitionCount(); + if (PartitionUtils.MAX_VALUE % graph.getPartitionCount() != 0) { + // There is a remainder, and the partition is inexhaustible + partitionSize++; + } + +<<<<<<<< HEAD:hugegraph-pd/hg-pd-core/src/main/java/org/apache/hugegraph/pd/PartitionService.java + int partitionId = (int) (code / partitionSize); + long startKey = (long) partitionSize * partitionId; + long endKey = (long) partitionSize * (partitionId + 1); + + // Check Local + Metapb.Partition partition = partitionMeta.getPartitionById(graphName, partitionId); +======== + int offset = (int) (code / partitionSize); + return newPartition(graph, offset); + } + + private Metapb.Partition newPartition(Metapb.Graph graph, int offset) throws PDException { + int partitionId = getPartitionId(graph, offset); + + int partitionSize = PartitionUtils.MAX_VALUE / graph.getPartitionCount(); + if (PartitionUtils.MAX_VALUE % graph.getPartitionCount() != 0) { + // 有余数,分区除不尽 + partitionSize++; + } + + // 检查本地 + Metapb.Partition partition = partitionMeta.getPartitionById(graph.getGraphName(), partitionId); +>>>>>>>> d7e3d51dd (3.6.5 -> 4.x diff):hg-pd-core/src/main/java/org/apache/hugegraph/pd/PartitionService.java + if (partition == null) { + log.info("alloc partition for graph: {}, partition id: {}" , graph.getGraphName(), partitionId); + storeService.allocShards(graph, partitionId); + + long startKey = (long) partitionSize * offset; + long endKey = (long) partitionSize * (offset + 1); + + // Assign a store + partition = Metapb.Partition.newBuilder() +<<<<<<<< HEAD:hugegraph-pd/hg-pd-core/src/main/java/org/apache/hugegraph/pd/PartitionService.java + .setId(partitionId) + .setVersion(0) + .setState(Metapb.PartitionState.PState_Normal) + .setStartKey(startKey) + .setEndKey(endKey) + .setGraphName(graphName) + .build(); +======== + .setId(partitionId) + .setVersion(0) + .setState(Metapb.PartitionState.PState_Normal) + .setStartKey(startKey) + .setEndKey(endKey) + .setGraphName(graph.getGraphName()) + .build(); +>>>>>>>> d7e3d51dd (3.6.5 -> 4.x diff):hg-pd-core/src/main/java/org/apache/hugegraph/pd/PartitionService.java + + log.info("Create newPartition {}", partition); + } + + partitionMeta.updatePartition(partition); + + return partition; + } + + public void allocGraphPartitions(Metapb.Graph graph) throws PDException { + for (int i = 0; i < graph.getPartitionCount(); i++) { + newPartition(graph, i); + } + } + + /** + * 计算graph的分区id。 partition gap * store group id + offset + * + * @param graph graph + * @param offset 偏移量,从0开始 + * @return new partition id + * @throws PDException + */ + private int getPartitionId(Metapb.Graph graph, int offset) { + return graph.getStoreGroupId() * Consts.PARTITION_GAP + offset; + } + + /** + * compute graph partition id, partition gap * store group id + offset + * + * @param graph graph + * @param offset offset + * @return new partition id + * @throws PDException + */ + protected int getPartitionId(String graphName, byte[] key) throws PDException { + int code = PartitionUtils.calcHashcode(key); + Metapb.Partition partition = partitionMeta.getPartitionByCode(graphName, code); + return partition != null ? partition.getId() : -1; + } + + /** + * Gets all partitions spanned by the key range + * For the time being, hashcode is used for calculation, and the normal practice is to query + * based on the key + * + * @param graphName + * @param startKey + * @param endKey + */ + public List scanPartitions(String graphName, byte[] startKey, + byte[] endKey) + throws PDException { + int startPartId = getPartitionId(graphName, startKey); + int endPartId = getPartitionId(graphName, endKey); + + List partShards = new ArrayList<>(); + for (int id = startPartId; id <= endPartId; id++) { + Metapb.Partition partition = partitionMeta.getPartitionById(graphName, id); + partShards.add( + Metapb.PartitionShard.newBuilder() + .setPartition(partition) + // Here you need to return the correct leader, and + // temporarily default to the first one + .setLeader(storeService.getLeader(partition, 0)) + .build() + ); + } + return partShards; + } + + public synchronized long updatePartition(List partitions) throws PDException { + for (Metapb.Partition pt : partitions) { + Metapb.Partition oldPt = getPartitionById(pt.getGraphName(), pt.getId()); + partitionMeta.updatePartition(pt); + onPartitionChanged(oldPt, pt); + } + return partitions.size(); + } + + /** + * Update the status of partitions and graphs + * + * @param graph + * @param partId + * @param state + * @throws PDException + */ +<<<<<<<< HEAD:hugegraph-pd/hg-pd-core/src/main/java/org/apache/hugegraph/pd/PartitionService.java + public synchronized void updatePartitionState(String graph, int partId, + Metapb.PartitionState state) throws PDException { +======== + public synchronized void updatePartitionState(String graph, int partId, Metapb.PartitionState state) + throws PDException { +>>>>>>>> d7e3d51dd (3.6.5 -> 4.x diff):hg-pd-core/src/main/java/org/apache/hugegraph/pd/PartitionService.java + Metapb.Partition partition = getPartitionById(graph, partId); + + if (partition.getState() != state) { + Metapb.Partition newPartition = partitionMeta.updatePartition(partition.toBuilder() + .setState(state) + .build()); + + onPartitionChanged(partition, newPartition); + } + } + + public synchronized void updateGraphState(String graphName, Metapb.PartitionState state) throws + PDException { + Metapb.Graph graph = getGraph(graphName); +<<<<<<<< HEAD:hugegraph-pd/hg-pd-core/src/main/java/org/apache/hugegraph/pd/PartitionService.java + if (graph != null) { + partitionMeta.updateGraph(graph.toBuilder() + .setState(state).build()); +======== + if (graph != null && graph.getState() != state) { + partitionMeta.updateGraph(graph.toBuilder().setState(state).build()); +>>>>>>>> d7e3d51dd (3.6.5 -> 4.x diff):hg-pd-core/src/main/java/org/apache/hugegraph/pd/PartitionService.java + } + } + + public synchronized long removePartition(String graphName, int partId) throws PDException { + log.info("Partition {}-{} removePartition", graphName, partId); + Metapb.Partition partition = partitionMeta.getPartitionById(graphName, partId); + var ret = partitionMeta.removePartition(graphName, partId); + partitionMeta.loadGraph(graphName); + onPartitionRemoved(partition); + +<<<<<<<< HEAD:hugegraph-pd/hg-pd-core/src/main/java/org/apache/hugegraph/pd/PartitionService.java + try { + Metapb.PartitionState state = Metapb.PartitionState.PState_Normal; + for (Metapb.Partition pt : partitionMeta.getPartitions(partition.getGraphName())) { + if (pt.getState().getNumber() > state.getNumber()) { + state = pt.getState(); + } + } + updateGraphState(partition.getGraphName(), state); + + state = Metapb.PartitionState.PState_Normal; + for (Metapb.ShardGroup group : storeService.getShardGroups()) { + if (group.getState().getNumber() > state.getNumber()) { + state = group.getState(); + } + } + storeService.updateClusterStatus(state); + + } catch (PDException e) { + log.error("onPartitionChanged", e); + } +======== + // source中有些是 offline的,删除后,需要更新图的状态 +// try { + // partition状态与 partition无关 +// Metapb.PartitionState state = Metapb.PartitionState.PState_Normal; +// for (Metapb.Partition pt : partitionMeta.getPartitions(partition.getGraphName())) { +// if (pt.getState().getNumber() > state.getNumber()) { +// state = pt.getState(); +// } +// } +// updateGraphState(partition.getGraphName(), state); +>>>>>>>> d7e3d51dd (3.6.5 -> 4.x diff):hg-pd-core/src/main/java/org/apache/hugegraph/pd/PartitionService.java + + // 理论上不需要处理, shard group更新状态的时候,已经更新了cluster的状态 +// Metapb.PartitionState state = Metapb.PartitionState.PState_Normal; +// for(Metapb.ShardGroup group : storeService.getShardGroups()){ +// if ( group.getState().getNumber() > state.getNumber()) { +// state = group.getState(); +// } +// } +// +// storeService.updateClusterStatus(getStoreGroupByGraph(graphName), state); +// }catch ( PDException e){ +// log.error("onPartitionChanged", e); +// } +// + return ret; + } + +<<<<<<<< HEAD:hugegraph-pd/hg-pd-core/src/main/java/org/apache/hugegraph/pd/PartitionService.java + public Metapb.PartitionStats getPartitionStats(String graphName, int partitionId) throws + PDException { + return partitionMeta.getPartitionStats(graphName, partitionId); +======== + public Metapb.PartitionStats getPartitionStats(String graphName, int partitionId) throws PDException { + return partitionMeta.getPartitionStats("", partitionId); +>>>>>>>> d7e3d51dd (3.6.5 -> 4.x diff):hg-pd-core/src/main/java/org/apache/hugegraph/pd/PartitionService.java + } + + /** + * Get the partition status of the graph + */ + public List getPartitionStatus(String graphName) + throws PDException { + return partitionMeta.getPartitionStats(graphName); + } + + /** + * Returns the information of the graph + */ + public List getGraphs() throws PDException { + return partitionMeta.getGraphs(); + } + + public List getGraphs(int shardGroup) throws PDException { + return partitionMeta.getGraphs().stream() + .filter(graph -> graph.getStoreGroupId() == shardGroup / Consts.PARTITION_GAP) + .collect(Collectors.toList()); + } + + public Metapb.Graph getGraph(String graphName) throws PDException { + return partitionMeta.getGraph(graphName); + } + + /** + * Delete the diagram and all partitions of the diagram + */ + public Metapb.Graph delGraph(String graphName) throws PDException { + log.info("delGraph {}", graphName); + Metapb.Graph graph = getGraph(graphName); + getPartitions(graphName).forEach(this::onPartitionRemoved); + partitionMeta.removeAllPartitions(graphName); + partitionMeta.removeGraph(graphName); + if (!StringUtils.isEmpty(graphName)) { + partitionMeta.removePartitionStats(graphName); + } + return graph; + } + + public synchronized Metapb.Graph createGraph(String graphName, int partitionCount, int storeGroupId) + throws PDException { + var lastGraph = partitionMeta.getGraph(graphName); + if (lastGraph != null) { + throw new PDException(ErrorType.GRAPH_ALREADY_EXISTS, "graph already exists:" + graphName); + } + + if (partitionCount == 0) { + partitionCount = configService.getPartitionCount(storeGroupId); + } + + var graph = partitionMeta.createGraph(graphName, partitionCount, storeGroupId); + try { + // alloc partition + allocGraphPartitions(graph); + } catch (PDException e) { + // when errors occur, remove graph + partitionMeta.removeGraph(graphName); + throw e; + } + return graph; + } + + /** + * To modify the graph information, you need to notify the store + */ + public synchronized Metapb.Graph updateGraph(Metapb.Graph graph) throws PDException { + Metapb.Graph lastGraph = partitionMeta.getGraph(graph.getGraphName()); + if (lastGraph == null) { + throw new PDException(ErrorType.GRAPH_NOT_EXISTS, "graph not exists:" + graph.getGraphName()); + } + + log.info("updateGraph graph: {}, last: {}", graph, lastGraph); + +<<<<<<<< HEAD:hugegraph-pd/hg-pd-core/src/main/java/org/apache/hugegraph/pd/PartitionService.java + int partCount = + (graph.getGraphName().endsWith("/s") || graph.getGraphName().endsWith("/m")) ? + 1 : pdConfig.getPartition().getTotalCount(); +======== + int partCount = (graph.getGraphName().endsWith("/s") || graph.getGraphName().endsWith("/m")) ? + 1 : configService.getPartitionCount(graph.getStoreGroupId()); +>>>>>>>> d7e3d51dd (3.6.5 -> 4.x diff):hg-pd-core/src/main/java/org/apache/hugegraph/pd/PartitionService.java + + // set the partition count to specified if legal. + if (graph.getPartitionCount() <= partCount && graph.getPartitionCount() > 0) { + partCount = graph.getPartitionCount(); + } + + if (partCount == 0) { +<<<<<<<< HEAD:hugegraph-pd/hg-pd-core/src/main/java/org/apache/hugegraph/pd/PartitionService.java + throw new PDException(10010, "update graph error, partition count = 0"); +======== + throw new PDException(ErrorType.Invalid_Partition_count, "partition count = 0"); +>>>>>>>> d7e3d51dd (3.6.5 -> 4.x diff):hg-pd-core/src/main/java/org/apache/hugegraph/pd/PartitionService.java + } + + graph = lastGraph.toBuilder() + .mergeFrom(graph) + .setPartitionCount(partCount) + .build(); + partitionMeta.updateGraph(graph); + + // The number of partitions has changed + if (lastGraph.getPartitionCount() != graph.getPartitionCount()) { + log.info("updateGraph graph: {}, partition count changed from {} to {}", +<<<<<<<< HEAD:hugegraph-pd/hg-pd-core/src/main/java/org/apache/hugegraph/pd/PartitionService.java + graph.getGraphName(), lastGraph.getPartitionCount(), + graph.getPartitionCount()); +======== + graph.getGraphName(), lastGraph.getPartitionCount(), graph.getPartitionCount()); +>>>>>>>> d7e3d51dd (3.6.5 -> 4.x diff):hg-pd-core/src/main/java/org/apache/hugegraph/pd/PartitionService.java + } + return graph; + } + + /** + * 修改图名称信息 + */ + public synchronized Metapb.Graph updateGraphName(Metapb.Graph graph) throws PDException { + Metapb.Graph lastGraph = partitionMeta.getGraph(graph.getGraphName()); + if (lastGraph == null) { + throw new PDException(ErrorType.GRAPH_NOT_EXISTS, "update graph: " + graph.getGraphName() + ", not exists"); + } + graph = lastGraph.toBuilder() + .setGraphName(graph.getGraphName()) + .build(); + partitionMeta.updateGraph(graph); + return graph; + } + + // partitionId -> (storeId -> shard committedIndex) + public Map> getCommittedIndexStats() throws PDException { + Map> map = new HashMap<>(); + for (Metapb.Store store : storeService.getActiveStores()) { + for (Metapb.RaftStats raftStats : store.getStats().getRaftStatsList()) { + int partitionID = raftStats.getPartitionId(); + if (!map.containsKey(partitionID)) { + map.put(partitionID, new HashMap<>()); + } + Map storeMap = map.get(partitionID); + if (!storeMap.containsKey(store.getId())) { + storeMap.put(store.getId(), raftStats.getCommittedIndex()); + } + } + } + return map; + } + + /** + * The storage is taken offline and the partition data is migrated + * + * @param store + */ + public void storeOffline(Metapb.Store store) { + try { + log.info("storeOffline store id: {}, address: {}, state: {}", + store.getId(), store.getAddress(), store.getState()); + List partitions = getPartitionByStore(store); + var partIds = new HashSet(); + for (Metapb.Partition p : partitions) { + if (partIds.contains(p.getId())) { + continue; + } + shardOffline(p, store.getId()); + partIds.add(p.getId()); + } + } catch (PDException e) { + log.error("storeOffline exception: ", e); + } + } + + /** + * The storage is taken offline and the partition data is migrated + */ + public synchronized void shardOffline(Metapb.Partition partition, long storeId) { + try { +<<<<<<<< HEAD:hugegraph-pd/hg-pd-core/src/main/java/org/apache/hugegraph/pd/PartitionService.java + log.info("shardOffline Partition {} - {} shardOffline store : {}", + partition.getGraphName(), partition.getId(), storeId); +======== + log.info("shardOffline Partition {}-{} shardOffline store : {}", + partition.getGraphName(), partition.getId(), storeId); +>>>>>>>> d7e3d51dd (3.6.5 -> 4.x diff):hg-pd-core/src/main/java/org/apache/hugegraph/pd/PartitionService.java + // partition = getPartitionById(partition.getGraphName(), partition.getId()); + // Metapb.Partition.Builder builder = Metapb.Partition.newBuilder(partition); + // builder.clearShards(); + // partition.getShardsList().forEach(shard -> { + // if (shard.getStoreId() != storeId) + // builder.addShards(shard); + // }); + // partition = builder.build(); + Metapb.Graph graph = getGraph(partition.getGraphName()); + reallocPartitionShards(graph, partition); + } catch (PDException e) { + log.error("storeOffline exception: ", e); + } + } + +<<<<<<<< HEAD:hugegraph-pd/hg-pd-core/src/main/java/org/apache/hugegraph/pd/PartitionService.java + private boolean isShardListEquals(List list1, List list2) { + if (list1 == list2) { + return true; + } else if (list1 != null && list2 != null) { + + var s1 = list1.stream().map(Metapb.Shard::getStoreId).sorted(Long::compare) + .collect(Collectors.toList()); + var s2 = list2.stream().map(Metapb.Shard::getStoreId).sorted(Long::compare) + .collect(Collectors.toList()); + + if (s1.size() == s2.size()) { + for (int i = 0; i < s1.size(); i++) { + if (s1.get(i) != s2.get(i)) { + return false; + } + } + return true; + } +======== + /** + * 判定两个shard列表成员(store id)是否相同 + * @param list1 shard list1 + * @param list2 shard list2 + * @return true if members are same, false otherwise + */ + private boolean isShardListMemberEquals(List list1, List list2){ + if (list1 == list2) { + return true; + }else if (list1 != null && list2 != null && list1.size() == list2.size()) { + var s1 = list1.stream().map(Metapb.Shard::getStoreId).collect(Collectors.toSet()); + var s2 = list2.stream().map(Metapb.Shard::getStoreId).collect(Collectors.toSet()); + return SetUtils.isEqualSet(s1, s2); +>>>>>>>> d7e3d51dd (3.6.5 -> 4.x diff):hg-pd-core/src/main/java/org/apache/hugegraph/pd/PartitionService.java + } + return false; + } + + /** + * Reassign shards + * + * @param graph + * @param partition + * @throws PDException + */ + public void reallocPartitionShards(Metapb.Graph graph, Metapb.Partition partition) throws + PDException { + if (partition == null) { + return; + } + List originalShards = storeService.getShardList(partition.getId()); + + var shardGroup = storeService.getShardGroup(partition.getId()); + + List shards = storeService.reallocShards(shardGroup); + + if (isShardListMemberEquals(originalShards, shards)) { + log.info("reallocPartitionShards:{} vs {}", shardGroup, shards); + // partition = Metapb.Partition.newBuilder(partition) + // .clearShards().addAllShards(shards) + // .build(); + // partitionMeta.updatePartition(partition); + fireChangeShard(partition, shards, ConfChangeType.CONF_CHANGE_TYPE_ADJUST); + } + } + +<<<<<<<< HEAD:hugegraph-pd/hg-pd-core/src/main/java/org/apache/hugegraph/pd/PartitionService.java + public synchronized void reallocPartitionShards(String graphName, int partitionId) throws + PDException { + reallocPartitionShards(partitionMeta.getGraph(graphName), + partitionMeta.getPartitionById(graphName, partitionId)); + } +======== +// public synchronized void reallocPartitionShards(String graphName, int partitionId) throws PDException { +// reallocPartitionShards(partitionMeta.getGraph(graphName), +// partitionMeta.getPartitionById(graphName, partitionId)); +// } +>>>>>>>> d7e3d51dd (3.6.5 -> 4.x diff):hg-pd-core/src/main/java/org/apache/hugegraph/pd/PartitionService.java + + /** + * Migrate partition copies + */ + public synchronized void movePartitionsShard(Integer partitionId, long fromStore, + long toStore) { + try { +<<<<<<<< HEAD:hugegraph-pd/hg-pd-core/src/main/java/org/apache/hugegraph/pd/PartitionService.java + log.info("movePartitionsShard partitionId {} from store {} to store {}", partitionId, + fromStore, toStore); +======== + if (storeService.getStoreGroupByStore(fromStore) != storeService.getStoreGroupByStore(toStore)) { + log.error("move partition shard: source store {} and dest store {} has different store group", + fromStore, toStore); + return; + } + + log.info("movePartitionsShard partitionId {} from store {} to store {}", partitionId, fromStore, toStore); +>>>>>>>> d7e3d51dd (3.6.5 -> 4.x diff):hg-pd-core/src/main/java/org/apache/hugegraph/pd/PartitionService.java + for (Metapb.Graph graph : getGraphs()) { + Metapb.Partition partition = + this.getPartitionById(graph.getGraphName(), partitionId); + if (partition == null) { + continue; + } + + var shardGroup = storeService.getShardGroup(partitionId); + List shards = new ArrayList<>(); + shardGroup.getShardsList().forEach(shard -> { + if (shard.getStoreId() != fromStore) { + shards.add(shard); + } + }); + + shards.add(Metapb.Shard.newBuilder().setStoreId(toStore) + .setRole(Metapb.ShardRole.Follower).build()); + + // storeService.updateShardGroup(partitionId, shards, -1, -1); + // storeService.onShardGroupStatusChanged(shardGroup, newShardGroup); + fireChangeShard(partition, shards, ConfChangeType.CONF_CHANGE_TYPE_ADJUST); + // Shard groups have nothing to do with Graph, just one is enough + break; + } + } catch (PDException e) { + log.error("Partition {} movePartitionsShard exception {}", partitionId, e); + } + } + + /** + * Split all partitions in the cluster into splits + * + * @param splits Split partitions + */ + public synchronized void splitPartition(List> splits) throws + PDException { + var tasks = new HashMap>>(); + + for (var pair : splits) { +<<<<<<<< HEAD:hugegraph-pd/hg-pd-core/src/main/java/org/apache/hugegraph/pd/PartitionService.java + for (var partition : getPartitionById(pair.getKey())) { +======== + for (var partition : getPartitionById(pair.getKey())){ +>>>>>>>> d7e3d51dd (3.6.5 -> 4.x diff):hg-pd-core/src/main/java/org/apache/hugegraph/pd/PartitionService.java + if (!tasks.containsKey(partition.getGraphName())) { + tasks.put(partition.getGraphName(), new ArrayList<>()); + } + tasks.get(partition.getGraphName()).add(pair); + } + } + + for (var entry : tasks.entrySet()) { + splitPartition(getGraph(entry.getKey()), entry.getValue()); + } + } + + /** + * Partition splitting, splitting a graph into N pieces + * + * @param graph graph + * @param toCount target count + * @throws PDException + */ +<<<<<<<< HEAD:hugegraph-pd/hg-pd-core/src/main/java/org/apache/hugegraph/pd/PartitionService.java + public synchronized void splitPartition(Metapb.Graph graph, int toCount) throws PDException { + + var partitionCount = getPartitions(graph.getGraphName()).size(); + var maxShardsPerStore = pdConfig.getPartition().getMaxShardsPerStore(); + var shardCount = pdConfig.getPartition().getShardCount(); + + if (shardCount * toCount > storeService.getActiveStores().size() * maxShardsPerStore) { + throw new PDException(Pdpb.ErrorType.Too_Many_Partitions_Per_Store_VALUE, + "can't satisfy target shard group count, reached the upper " + + "limit of the cluster"); + } + + if (toCount % partitionCount != 0 || toCount <= partitionCount) { + throw new PDException(Pdpb.ErrorType.Invalid_Split_Partition_Count_VALUE, + "invalid split partition count, make sure to count is N time of" + + " current partition count"); +======== + + public synchronized void splitPartition(Metapb.Graph graph, int toCount) throws PDException{ + assert graph != null; + var partitionCount = graph.getPartitionCount(); + var maxShardsPerStore = pdConfig.getPartition().getMaxShardsPerStore(); + var shardCount = pdConfig.getPartition().getShardCount(); + + if ( shardCount * toCount > + storeService.getActiveStoresByStoreGroup(graph.getStoreGroupId()).size() * maxShardsPerStore){ + throw new PDException(ErrorType.Too_Many_Partitions_Per_Store_VALUE, + "can't satisfy target shard group count, reached the upper limit of the cluster"); + } + + if (toCount % partitionCount != 0 || toCount <= partitionCount) { + throw new PDException(ErrorType.Invalid_Split_Partition_Count_VALUE, + "invalid split partition count, make sure to count is N time of current partition count"); +>>>>>>>> d7e3d51dd (3.6.5 -> 4.x diff):hg-pd-core/src/main/java/org/apache/hugegraph/pd/PartitionService.java + } + + // Since it is an integer multiple,The enrichment factor is toCount / current count + var splitCount = toCount / partitionCount; + var list = new ArrayList>(); + for (int i = 0; i < partitionCount; i++) { + list.add(new KVPair<>(i, splitCount)); + } + + splitPartition(graph, list); + } + +<<<<<<<< HEAD:hugegraph-pd/hg-pd-core/src/main/java/org/apache/hugegraph/pd/PartitionService.java + private synchronized void splitPartition(Metapb.Graph graph, + List> splits) +======== + /** + * split graph + * @param graph graph + * @param splits pairs of (partition id, count) + * @throws PDException + */ + private synchronized void splitPartition(Metapb.Graph graph, List> splits) +>>>>>>>> d7e3d51dd (3.6.5 -> 4.x diff):hg-pd-core/src/main/java/org/apache/hugegraph/pd/PartitionService.java + throws PDException { + var taskInfoMeta = storeService.getTaskInfoMeta(); + if (!taskInfoMeta.scanSplitTask(graph.getGraphName()).isEmpty()) { + return; + } + + splits.sort(Comparator.comparing(KVPair::getKey)); + log.info("split partition, graph: {}, splits:{}", graph, splits); + + // Start with the last partition subscript + var i = getPartitions(graph.getGraphName()).size(); + + for (var pair : splits) { + Metapb.Partition partition = + partitionMeta.getPartitionById(graph.getGraphName(), pair.getKey()); + if (partition != null) { + var splitCount = pair.getValue(); + long splitLen = (partition.getEndKey() - partition.getStartKey()) / splitCount; + + List newPartitions = new ArrayList<>(); + // The first partition is the original partition + newPartitions.add(partition.toBuilder() + .setStartKey(partition.getStartKey()) + .setEndKey(partition.getStartKey() + splitLen) + .setId(partition.getId()) + .setState(Metapb.PartitionState.PState_Offline) + .build()); + + int idx = 0; + + for (; idx < splitCount - 2; idx++) { + newPartitions.add(partition.toBuilder() +<<<<<<<< HEAD:hugegraph-pd/hg-pd-core/src/main/java/org/apache/hugegraph/pd/PartitionService.java + .setStartKey(newPartitions.get(idx).getEndKey()) + .setEndKey(newPartitions.get(idx).getEndKey() + + splitLen) + .setId(i) + .setState(Metapb.PartitionState.PState_Offline) + .build()); +======== + .setStartKey(newPartitions.get(idx).getEndKey()) + .setEndKey(newPartitions.get(idx).getEndKey() + splitLen) + .setId(getPartitionId(graph, i)) + .setState(Metapb.PartitionState.PState_Offline) + .build()); +>>>>>>>> d7e3d51dd (3.6.5 -> 4.x diff):hg-pd-core/src/main/java/org/apache/hugegraph/pd/PartitionService.java + i += 1; + } + + newPartitions.add(partition.toBuilder() +<<<<<<<< HEAD:hugegraph-pd/hg-pd-core/src/main/java/org/apache/hugegraph/pd/PartitionService.java + .setStartKey(newPartitions.get(idx).getEndKey()) + .setEndKey(partition.getEndKey()) + .setId(i) + .setState(Metapb.PartitionState.PState_Offline) + .build()); +======== + .setStartKey(newPartitions.get(idx).getEndKey()) + .setEndKey(partition.getEndKey()) + .setId(getPartitionId(graph, i)) + .setState(Metapb.PartitionState.PState_Offline) + .build()); +>>>>>>>> d7e3d51dd (3.6.5 -> 4.x diff):hg-pd-core/src/main/java/org/apache/hugegraph/pd/PartitionService.java + i += 1; + + // try to save new partitions, and repair shard group + for (int j = 0; j < newPartitions.size(); j++) { + var newPartition = newPartitions.get(j); + + if (j != 0) { + partitionMeta.updatePartition(newPartition); + } + // Create a shard group, if it is empty, create it according to the shard + // group of the partition, and ensure that it is on one machine + // If it exists, the number of partitions in each graph is not the same, and + // the store side needs to be copied to other machines + var shardGroup = storeService.getShardGroup(newPartition.getId()); + if (shardGroup == null) { + shardGroup = storeService.getShardGroup(partition.getId()).toBuilder() + .setId(newPartition.getId()) + .build(); + storeService.getStoreInfoMeta().updateShardGroup(shardGroup); + updateShardGroupCache(shardGroup); + } + + // check shard list + if (shardGroup.getShardsCount() != pdConfig.getPartition().getShardCount()) { + storeService.reallocShards(shardGroup); + } + } + + SplitPartition splitPartition = SplitPartition.newBuilder() + .addAllNewPartition(newPartitions) + .build(); + + fireSplitPartition(partition, splitPartition); +<<<<<<<< HEAD:hugegraph-pd/hg-pd-core/src/main/java/org/apache/hugegraph/pd/PartitionService.java + // Change the partition status to Offline, and resume the partition status to + // Offline after the task is completed + updatePartitionState(partition.getGraphName(), partition.getId(), + Metapb.PartitionState.PState_Offline); +======== + // 修改Partition状态为下线,任务完成后恢复为上线 + updatePartitionState(partition.getGraphName(), partition.getId(), Metapb.PartitionState.PState_Offline); +>>>>>>>> d7e3d51dd (3.6.5 -> 4.x diff):hg-pd-core/src/main/java/org/apache/hugegraph/pd/PartitionService.java + + // Record transactions + var task = MetaTask.Task.newBuilder().setPartition(partition) + .setSplitPartition(splitPartition) + .build(); + taskInfoMeta.addSplitTask(pair.getKey(), task.getPartition(), + task.getSplitPartition()); + } + } + } + + /** + * transfer leader to other shard + * Just transfer a partition + */ + public void transferLeader(Integer partId, Metapb.Shard shard) { + try { + var partitions = getPartitionById(partId); +<<<<<<<< HEAD:hugegraph-pd/hg-pd-core/src/main/java/org/apache/hugegraph/pd/PartitionService.java + if (partitions.size() > 0) { + fireTransferLeader(partitions.get(0), + TransferLeader.newBuilder().setShard(shard).build()); +======== + if (!partitions.isEmpty()) { + fireTransferLeader(partitions.get(0), TransferLeader.newBuilder().setShard(shard).build()); +>>>>>>>> d7e3d51dd (3.6.5 -> 4.x diff):hg-pd-core/src/main/java/org/apache/hugegraph/pd/PartitionService.java + } +// for (Metapb.Graph graph : getGraphs()) { +// Metapb.Partition partition = this.getPartitionById(graph.getGraphName(), partId); +// if (partition != null) { +// fireTransferLeader(partition, TransferLeader.newBuilder().setShard(shard) +// .build()); +// } +// } + } catch (PDException e) { + log.error("Partition {} transferLeader exception {}", partId, e); + } + } + + /** +<<<<<<<< HEAD:hugegraph-pd/hg-pd-core/src/main/java/org/apache/hugegraph/pd/PartitionService.java + * // todo : Check the corresponding store group and check the logic + * Partition merging: Merges the number of partitions in the entire cluster into toCount +======== + * // todo : 检查对应的store group, 检查逻辑 + * 分区合并,将整个集群的分区数,合并到toCount个 +>>>>>>>> d7e3d51dd (3.6.5 -> 4.x diff):hg-pd-core/src/main/java/org/apache/hugegraph/pd/PartitionService.java + * + * @param toCount The number of partitions to be targeted + * @throws PDException when query errors + */ + public void combinePartition(int storeGroupId, int toCount) throws PDException { + +<<<<<<<< HEAD:hugegraph-pd/hg-pd-core/src/main/java/org/apache/hugegraph/pd/PartitionService.java + int shardsTotalCount = getShardGroupCount(); + for (var graph : getGraphs()) { + // All graphs larger than the toCount partition are scaled in + if (graph.getPartitionCount() > toCount) { +======== + int shardsTotalCount = getShardGroupCount(storeGroupId); + for (var graph : getGraphs()){ + if (graph.getStoreGroupId() != storeGroupId){ + continue; + } + // 对所有大于toCount分区的图,都进行缩容 + if (graph.getPartitionCount() > toCount){ +>>>>>>>> d7e3d51dd (3.6.5 -> 4.x diff):hg-pd-core/src/main/java/org/apache/hugegraph/pd/PartitionService.java + combineGraphPartition(graph, toCount, shardsTotalCount); + } + } + } + + /** + * For a single graph, perform partition merging + * + * @param graphName the name of the graph + * @param toCount the target partition count + * @throws PDException when query errors + */ + public void combineGraphPartition(String graphName, int toCount) throws PDException { + var graph = getGraph(graphName); + assert graph != null; + combineGraphPartition(graph, toCount, getShardGroupCount(graph.getStoreGroupId())); + } + + /** + * Internal implementation of single-graph merging + * + * @param graph the name of the graph + * @param toCount the target partition count + * @param shardCount the shard count of the clusters + * @throws PDException when query errors + */ + private synchronized void combineGraphPartition(Metapb.Graph graph, int toCount, int shardCount) + throws PDException { +<<<<<<<< HEAD:hugegraph-pd/hg-pd-core/src/main/java/org/apache/hugegraph/pd/PartitionService.java + if (graph == null) { + throw new PDException(1, + "Graph not exists, try to use full graph name, like " + + "/DEFAULT/GRAPH_NAME/g"); +======== + if (graph == null){ + throw new PDException(ErrorType.GRAPH_NOT_EXISTS); +>>>>>>>> d7e3d51dd (3.6.5 -> 4.x diff):hg-pd-core/src/main/java/org/apache/hugegraph/pd/PartitionService.java + } + + log.info("Combine graph {} partition, from {}, to {}, with shard count:{}", + graph.getGraphName(), graph.getPartitionCount(), toCount, shardCount); + +<<<<<<<< HEAD:hugegraph-pd/hg-pd-core/src/main/java/org/apache/hugegraph/pd/PartitionService.java + if (!checkTargetCount(graph.getPartitionCount(), toCount, shardCount)) { + log.error("Combine partition, illegal toCount:{}, graph:{}", toCount, + graph.getGraphName()); + throw new PDException(2, + "illegal partition toCount, should between 1 ~ shard group " + + "count and " + + " can be dived by shard group count"); + } + + var taskInfoMeta = storeService.getTaskInfoMeta(); + if (taskInfoMeta.scanMoveTask(graph.getGraphName()).size() > 0) { + throw new PDException(3, "Graph Combine process exists"); +======== + if (! checkTargetCount(graph.getPartitionCount(), toCount, shardCount)) { + log.error("Combine partition, illegal toCount:{}, graph:{}", toCount, graph.getGraphName()); + throw new PDException(ErrorType.Invalid_Combine_Partition_Count); + } + + var taskInfoMeta = storeService.getTaskInfoMeta(); + if (!taskInfoMeta.scanMoveTask(graph.getGraphName()).isEmpty()) { + throw new PDException(ErrorType.Combine_Partition_Doing); +>>>>>>>> d7e3d51dd (3.6.5 -> 4.x diff):hg-pd-core/src/main/java/org/apache/hugegraph/pd/PartitionService.java + } + + // According to key start sort + var partitions = getPartitions(graph.getGraphName()).stream() + .sorted(Comparator.comparing( + Metapb.Partition::getStartKey)) + .collect(Collectors.toList()); + + // Partition numbers do not have to be sequential + var sortPartitions = getPartitions(graph.getGraphName()) + .stream() + .sorted(Comparator.comparing(Metapb.Partition::getId)) + .collect(Collectors.toList()); + + var groupSize = partitions.size() / toCount; // merge group size + // 0~12 to 4 partitions + // scheme: 0,1,2 => 0, 3,4,5 => 1, 6,7,8 => 2, 9,10,11 => 3 + // Ensure the continuity of partitions + for (int i = 0; i < toCount; i++) { + var startKey = partitions.get(i * groupSize).getStartKey(); + var endKey = partitions.get(i * groupSize + groupSize - 1).getEndKey(); + // compose the key range + // the start key and end key should be changed if combine success. + + var targetPartition = Metapb.Partition.newBuilder(sortPartitions.get(i)) + .setStartKey(startKey) + .setEndKey(endKey) + .build(); + + for (int j = 0; j < groupSize; j++) { + var partition = partitions.get(i * groupSize + j); + // If the partition ID is the same, skip it + if (i == partition.getId()) { + continue; + } + + log.info("combine partition of graph :{}, from part id {} to {}", + partition.getGraphName(), + partition.getId(), targetPartition.getId()); + MovePartition movePartition = MovePartition.newBuilder() + .setTargetPartition(targetPartition) + .setKeyStart(partition.getStartKey()) + .setKeyEnd(partition.getEndKey()) + .build(); + taskInfoMeta.addMovePartitionTask(partition, movePartition); + // source is offline + updatePartitionState(partition.getGraphName(), partition.getId(), + Metapb.PartitionState.PState_Offline); + fireMovePartition(partition, movePartition); + } + // target offline + updatePartitionState(targetPartition.getGraphName(), targetPartition.getId(), + Metapb.PartitionState.PState_Offline); + } + + storeService.updateClusterStatus(getStoreGroupByGraph(graph.getGraphName()), + Metapb.ClusterState.Cluster_Offline); + } + + /** +<<<<<<<< HEAD:hugegraph-pd/hg-pd-core/src/main/java/org/apache/hugegraph/pd/PartitionService.java + * get raft group count from storeService + * +======== + * // todo: 调用?? + * 通过 storeService 获取 raft group 总数 +>>>>>>>> d7e3d51dd (3.6.5 -> 4.x diff):hg-pd-core/src/main/java/org/apache/hugegraph/pd/PartitionService.java + * @return the count of raft groups + */ + private int getShardGroupCount(int storeGroupId) { + try { +<<<<<<<< HEAD:hugegraph-pd/hg-pd-core/src/main/java/org/apache/hugegraph/pd/PartitionService.java + return Optional.ofNullable(storeService.getShardGroups()).orElseGet(ArrayList::new) + .size(); + } catch (PDException e) { + log.error("get shard group failed, error: {}", e); +======== + // todo: 检查调用逻辑 + return Optional.ofNullable(storeService.getShardGroups(storeGroupId)).orElseGet(ArrayList::new).size(); + }catch (PDException e){ + log.error("get shard group failed, error: ", e); +>>>>>>>> d7e3d51dd (3.6.5 -> 4.x diff):hg-pd-core/src/main/java/org/apache/hugegraph/pd/PartitionService.java + } + return 0; + } + + /** + * Determine whether the graph partition can be retrieved from f to t + * + * @param fromCount The number of partitions now + * @param toCount The number of partitions to be targeted + * @return true when available , or otherwise + */ + private boolean checkTargetCount(int fromCount, int toCount, int shardCount) { + // It should be between 1 ~ N and divisible + return toCount >= 1 && toCount < fromCount && fromCount % toCount == 0 && + toCount < shardCount; + } + + /** + * Process partition heartbeats and record leader information + * Check the term and version to see if it's the latest message + * + * @param stats + */ + public void partitionHeartbeat(Metapb.PartitionStats stats) throws PDException { + + Metapb.ShardGroup shardGroup = storeService.getShardGroup(stats.getId()); +<<<<<<<< HEAD:hugegraph-pd/hg-pd-core/src/main/java/org/apache/hugegraph/pd/PartitionService.java + // shard group version changes or leader changes + // (The shard group is controlled by the PD, and there may be brief inconsistencies after + // operations such as splitting, subject to PD) + // store Upload the final one raft group data + if (shardGroup != null && + (shardGroup.getVersion() < stats.getLeaderTerm() || + shardGroup.getConfVer() < stats.getConfVer())) { + storeService.updateShardGroup(stats.getId(), + stats.getShardList(), stats.getLeaderTerm(), + stats.getConfVer()); +======== + + // shard group version changes or leader changes + // (shard group 由pd控制, 在分裂等操作后,可能出现短暂不一致的情况,以pd为准) + // store 上传最终的 raft group 数据 + if (shardGroup != null) { + if (shardGroup.getVersion() < stats.getLeaderTerm() || shardGroup.getConfVer() < stats.getConfVer() || + ! isShardEquals(shardGroup.getShardsList(), stats.getShardList())) { + storeService.updateShardGroup(stats.getId(), + stats.getShardList(), stats.getLeaderTerm(), stats.getConfVer()); + } + // 更新state信息 + checkShardState(shardGroup, stats); +>>>>>>>> d7e3d51dd (3.6.5 -> 4.x diff):hg-pd-core/src/main/java/org/apache/hugegraph/pd/PartitionService.java + } + + // List partitions = getPartitionById(stats.getId()); + // for (Metapb.Partition partition : partitions) { + // partitionMeta.getAndCreateGraph(partition.getGraphName()); +<<<<<<<< HEAD:hugegraph-pd/hg-pd-core/src/main/java/org/apache/hugegraph/pd/PartitionService.java + checkShardState(partition, stats); + } + // statistics + partitionMeta.updatePartitionStats(stats.toBuilder() + .setTimestamp(System.currentTimeMillis()).build()); +======== + // } + // 统计信息 + partitionMeta.updatePartitionStats(stats.toBuilder() .setTimestamp(System.currentTimeMillis()).build()); + } + + private boolean isShardEquals(List list1, List list2) { + return SetUtils.isEqualSet(list1, list2); + } + + private Long getLeader(Metapb.ShardGroup group) { + for (var shard : group.getShardsList()) { + if (shard.getRole() == Metapb.ShardRole.Leader) { + return shard.getStoreId(); + } + } + return null; +>>>>>>>> d7e3d51dd (3.6.5 -> 4.x diff):hg-pd-core/src/main/java/org/apache/hugegraph/pd/PartitionService.java + } + + /** + * Check the shard status, offline shard affects the partition status + * + * @param stats + */ + private void checkShardState(Metapb.ShardGroup shardGroup, Metapb.PartitionStats stats) { + + try { + Metapb.PartitionState state = Metapb.PartitionState.PState_Normal; + + int offCount = 0; + + for (Metapb.ShardStats shard : stats.getShardStatsList()) { + if (shard.getState() == Metapb.ShardState.SState_Offline) { + offCount++; + } + } +<<<<<<<< HEAD:hugegraph-pd/hg-pd-core/src/main/java/org/apache/hugegraph/pd/PartitionService.java + if (partition.getState() != Metapb.PartitionState.PState_Offline) { + if (offCount == 0) { + updatePartitionState(partition.getGraphName(), partition.getId(), + Metapb.PartitionState.PState_Normal); + } else if (offCount * 2 < stats.getShardStatsCount()) { + updatePartitionState(partition.getGraphName(), partition.getId(), + Metapb.PartitionState.PState_Warn); + } else { + updatePartitionState(partition.getGraphName(), partition.getId(), + Metapb.PartitionState.PState_Warn); + } +======== + + if (offCount > 0 && offCount * 2 < stats.getShardStatsCount()) { + state = Metapb.PartitionState.PState_Warn; +>>>>>>>> d7e3d51dd (3.6.5 -> 4.x diff):hg-pd-core/src/main/java/org/apache/hugegraph/pd/PartitionService.java + } +// if (offCount == 0) { +// state = Metapb.PartitionState.PState_Normal; +// } else if (offCount * 2 < stats.getShardStatsCount()) { +// state = Metapb.PartitionState.PState_Warn; +// } + + if (shardGroup.getState() != state) { + // 更新graph state + for (var graph : getGraphs(shardGroup.getId())) { + if (graph.getState() != state) { + updateGraphState(graph.getGraphName(), state); + } + } + + storeService.updateShardGroupState(shardGroup.getId(), state); + } + +// if (partition.getState() != state) { +// updatePartitionState(partition.getGraphName(), partition.getId(), state); +// } + } catch (Exception e) { +<<<<<<<< HEAD:hugegraph-pd/hg-pd-core/src/main/java/org/apache/hugegraph/pd/PartitionService.java + log.error("Partition {}-{} checkShardState exception {}", + partition.getGraphName(), partition.getId(), e); +======== + log.error("checkShardState {} failed, error: ", shardGroup.getId(), e); +>>>>>>>> d7e3d51dd (3.6.5 -> 4.x diff):hg-pd-core/src/main/java/org/apache/hugegraph/pd/PartitionService.java + } + } + + public void addInstructionListener(PartitionInstructionListener event) { + instructionListeners.add(event); + } + + public void addStatusListener(PartitionStatusListener listener) { + statusListeners.add(listener); + } + + /** + * Initiates the Change Shard command + * + * @param changeType + */ +<<<<<<<< HEAD:hugegraph-pd/hg-pd-core/src/main/java/org/apache/hugegraph/pd/PartitionService.java + protected void fireChangeShard(Metapb.Partition partition, List shards, + ConfChangeType changeType) { + log.info("fireChangeShard partition: {}-{}, changeType: {} {}", partition.getGraphName(), + partition.getId(), changeType, shards); +======== + protected void fireChangeShard(Metapb.Partition partition, List shards, ConfChangeType changeType) { + log.info("fireChangeShard partition: {}-{}, changeType:{} {}", + partition.getGraphName(), partition.getId(), changeType, shards); +>>>>>>>> d7e3d51dd (3.6.5 -> 4.x diff):hg-pd-core/src/main/java/org/apache/hugegraph/pd/PartitionService.java + instructionListeners.forEach(cmd -> { + try { + cmd.changeShard(partition, ChangeShard.newBuilder() + .addAllShard(shards).setChangeType(changeType) + .build()); + } catch (Exception e) { + log.error("fireChangeShard", e); + } + }); + } + + public void changeShard(int groupId, List shards) throws PDException { + var partitions = getPartitionById(groupId); + if (partitions.isEmpty()) { + return; + } + fireChangeShard(partitions.get(0), shards, ConfChangeType.CONF_CHANGE_TYPE_ADJUST); + } + + /** + * Send a partition split message + * + * @param partition + */ + protected void fireSplitPartition(Metapb.Partition partition, SplitPartition splitPartition) { + log.info("fireSplitPartition partition: {}-{}, split: {}", + partition.getGraphName(), partition.getId(), splitPartition); + instructionListeners.forEach(cmd -> { + try { + cmd.splitPartition(partition, splitPartition); + } catch (Exception e) { + log.error("fireSplitPartition", e); + } + }); + } + + /** + * Send a Leader Switchover message + */ + protected void fireTransferLeader(Metapb.Partition partition, TransferLeader transferLeader) { + log.info("fireTransferLeader partition: {}-{}, leader: {}", + partition.getGraphName(), partition.getId(), transferLeader); + instructionListeners.forEach(cmd -> { + try { + cmd.transferLeader(partition, transferLeader); + } catch (Exception e) { + log.error("fireSplitPartition", e); + } + }); + } + + /** + * Send a message to the partition to move data + * + * @param partition Original partition + * @param movePartition Target partition, contains key range + */ + protected void fireMovePartition(Metapb.Partition partition, MovePartition movePartition) { + log.info("fireMovePartition partition: {} -> {}", + partition, movePartition); + + instructionListeners.forEach(cmd -> { + try { + cmd.movePartition(partition, movePartition); + } catch (Exception e) { + log.error("fireMovePartition", e); + } + }); + } + + protected void fireCleanPartition(Metapb.Partition partition, CleanPartition cleanPartition) { + log.info("fireCleanPartition partition: {} -> just keep : {}->{}", + partition.getId(), cleanPartition.getKeyStart(), cleanPartition.getKeyEnd()); + + instructionListeners.forEach(cmd -> { + try { + cmd.cleanPartition(partition, cleanPartition); + } catch (Exception e) { + log.error("cleanPartition", e); + } + }); + } + + protected void fireChangePartitionKeyRange(Metapb.Partition partition, + PartitionKeyRange partitionKeyRange) { + log.info("fireChangePartitionKeyRange partition: {}-{} -> key range {}", + partition.getGraphName(), partition.getId(), partitionKeyRange); + + instructionListeners.forEach(cmd -> { + try { + cmd.changePartitionKeyRange(partition, partitionKeyRange); + } catch (Exception e) { + log.error("cleanPartition", e); + } + }); + } + + /** + * Handle graph migration tasks + * + * @param task + */ + public synchronized void handleMoveTask(MetaTask.Task task) throws PDException { + var taskInfoMeta = storeService.getTaskInfoMeta(); + var partition = task.getPartition(); + var movePartition = task.getMovePartition(); + + MetaTask.Task pdMetaTask = taskInfoMeta.getMovePartitionTask(partition.getGraphName(), + movePartition.getTargetPartition() + .getId(), + partition.getId()); + +<<<<<<<< HEAD:hugegraph-pd/hg-pd-core/src/main/java/org/apache/hugegraph/pd/PartitionService.java + log.info("report move task, graph:{}, pid : {}->{}, state: {}", + task.getPartition().getGraphName(), + task.getPartition().getId(), task.getMovePartition().getTargetPartition().getId(), + task.getState()); +======== + log.info("report move task (id: {}), graph:{}, pid : {}->{}, state: {}", task.getId(), + task.getPartition().getGraphName(), task.getPartition().getId(), + task.getMovePartition().getTargetPartition().getId(), task.getState()); +>>>>>>>> d7e3d51dd (3.6.5 -> 4.x diff):hg-pd-core/src/main/java/org/apache/hugegraph/pd/PartitionService.java + + // HAS BEEN PROCESSED(There is it in front) + if (pdMetaTask != null) { + var newTask = pdMetaTask.toBuilder().setState(task.getState()).build(); + taskInfoMeta.updateMovePartitionTask(newTask); + + List subTasks = taskInfoMeta.scanMoveTask(partition.getGraphName()); + + var finished = subTasks.stream().allMatch(t -> + t.getState() == + MetaTask.TaskState.Task_Success || + t.getState() == + MetaTask.TaskState.Task_Failure); + + if (finished) { + var allSuccess = subTasks.stream().allMatch( + t -> t.getState() == MetaTask.TaskState.Task_Success); + if (allSuccess) { + log.info("graph:{} combine task all success!", partition.getGraphName()); + handleMoveTaskAllSuccess(subTasks, partition.getGraphName(), taskInfoMeta); + } else { + log.info("graph:{} combine task failed!", partition.getGraphName()); + handleMoveTaskIfFailed(partition.getGraphName(), taskInfoMeta); + } + } + } + } + + /** + * When all migration subtasks succeed: + * 1. Send cleanup source partition directives + * 2. Set up target online, renewal key range, renewal graph partition count + * 3. delete move task, mission ended + * + * @param subTasks all move sub tasks + * @param graphName graph name + * @param taskInfoMeta task info meta + * @throws PDException returns if write db failed + */ + private void handleMoveTaskAllSuccess(List subTasks, String graphName, + TaskInfoMeta taskInfoMeta) throws PDException { + + var targetPartitionIds = new HashSet(); + var targetPartitions = new ArrayList(); + var deleteFlags = + subTasks.stream().map(task -> task.getMovePartition().getTargetPartition().getId()) + .collect(Collectors.toSet()); + + for (MetaTask.Task subTask : subTasks) { + var source = subTask.getPartition(); + var targetPartition = subTask.getMovePartition().getTargetPartition(); + // Whether it has been dealt with or not + if (!targetPartitionIds.contains(targetPartition.getId())) { + // renewal range + var old = getPartitionById(targetPartition.getGraphName(), targetPartition.getId()); + var newPartition = Metapb.Partition.newBuilder(old) + .setStartKey(targetPartition.getStartKey()) + .setEndKey(targetPartition.getEndKey()) + .setState(Metapb.PartitionState.PState_Normal) + .build(); + // Update before the key range to avoid the problem that the store does not have + // a partition and needs to be queried to the pd + updatePartition(List.of(newPartition)); + targetPartitions.add(newPartition); + + // Send key range change messages + PartitionKeyRange partitionKeyRange = PartitionKeyRange.newBuilder() + .setPartitionId(old.getId()) + .setKeyStart( + targetPartition.getStartKey()) + .setKeyEnd( + targetPartition.getEndKey()) + .build(); + // Notice store + fireChangePartitionKeyRange( + old.toBuilder().setState(Metapb.PartitionState.PState_Normal).build(), + partitionKeyRange); + + // Set Target to go live. source could theoretically be deleted, so it is not + // processed + updatePartitionState(newPartition.getGraphName(), newPartition.getId(), + Metapb.PartitionState.PState_Normal); + + targetPartitionIds.add(targetPartition.getId()); + } + + CleanPartition cleanPartition = CleanPartition.newBuilder() + .setKeyStart(source.getStartKey()) + .setKeyEnd(source.getEndKey()) + .setCleanType( + CleanType.CLEAN_TYPE_EXCLUDE_RANGE) + // The partition of the target only + // needs to clean up the data, and does + // not need to delete the partition + .setDeletePartition(!deleteFlags.contains( + source.getId())) + .build(); + + log.info("pd clean data: {}-{}, key range:{}-{}, type:{}, delete partition:{}", + source.getGraphName(), + source.getId(), + cleanPartition.getKeyStart(), + cleanPartition.getKeyEnd(), + CleanType.CLEAN_TYPE_EXCLUDE_RANGE, + cleanPartition.getDeletePartition()); + + // Clean up the data of the partition to be moved + fireCleanPartition(source, cleanPartition); + } + + // renewal key range, Local updates, client renewal + // updatePartition(targetPartitions); + + // renewal target Partition status, source may be deleted, so do not process + targetPartitions.forEach(p -> { + try { + updatePartitionState(p.getGraphName(), p.getId(), + Metapb.PartitionState.PState_Normal); + } catch (PDException e) { + throw new RuntimeException(e); + } + }); + + partitionMeta.loadGraph(graphName); + + // renewal graph partition count + var graph = getGraph(graphName).toBuilder() +<<<<<<<< HEAD:hugegraph-pd/hg-pd-core/src/main/java/org/apache/hugegraph/pd/PartitionService.java + .setPartitionCount(targetPartitionIds.size()) + .build(); +======== + .setPartitionCount(targetPartitionIds.size()) + .setState(Metapb.PartitionState.PState_Normal) + .build(); +>>>>>>>> d7e3d51dd (3.6.5 -> 4.x diff):hg-pd-core/src/main/java/org/apache/hugegraph/pd/PartitionService.java + updateGraph(graph); + + // The transaction is complete + taskInfoMeta.removeMoveTaskPrefix(graphName); + } + + /** + * If the scale-in task fails, roll back the merge operation + * 1. Clean up the original target partition and delete the migrated data + * 2. Set the source/target partition to go live + * 3. Delete the task, and the task ends + * + * @param graphName graph name + * @param taskInfoMeta task info meta + * @throws PDException return if write to db failed + */ + private void handleMoveTaskIfFailed(String graphName, TaskInfoMeta taskInfoMeta) throws + PDException { + // Send cleanup target partition tasks,rollback target partition + var targetPartitionIds = new HashSet(); + for (var metaTask : taskInfoMeta.scanMoveTask(graphName)) { + + var source = metaTask.getPartition(); + // Set source to upline + updatePartitionState(source.getGraphName(), source.getId(), + Metapb.PartitionState.PState_Normal); + var movedPartition = metaTask.getMovePartition().getTargetPartition(); + + if (targetPartitionIds.contains(movedPartition.getId())) { + continue; + } + + var targetPartition = getPartitionById(graphName, movedPartition.getId()); + + CleanPartition cleanPartition = CleanPartition.newBuilder() + .setKeyStart( + targetPartition.getStartKey()) + .setKeyEnd(targetPartition.getEndKey()) + .setCleanType( + CleanType.CLEAN_TYPE_KEEP_RANGE) + .setDeletePartition(false) + .build(); + fireCleanPartition(targetPartition, cleanPartition); + targetPartitionIds.add(targetPartition.getId()); + + // Set Target online + updatePartitionState(targetPartition.getGraphName(), targetPartition.getId(), + Metapb.PartitionState.PState_Normal); + } +<<<<<<<< HEAD:hugegraph-pd/hg-pd-core/src/main/java/org/apache/hugegraph/pd/PartitionService.java + // Clean up the task list +======== + + updateGraphState(graphName, Metapb.PartitionState.PState_Normal); + // 清理掉任务列表 +>>>>>>>> d7e3d51dd (3.6.5 -> 4.x diff):hg-pd-core/src/main/java/org/apache/hugegraph/pd/PartitionService.java + taskInfoMeta.removeMoveTaskPrefix(graphName); + } + + /** + * dispose clean task + * + * @param task clean task + */ +<<<<<<<< HEAD:hugegraph-pd/hg-pd-core/src/main/java/org/apache/hugegraph/pd/PartitionService.java + public void handleCleanPartitionTask(MetaTask.Task task) { + log.info("clean task {} -{}, key range:{}~{}, report: {}", + task.getPartition().getGraphName(), + task.getPartition().getId(), + task.getCleanPartition().getKeyStart(), + task.getCleanPartition().getKeyEnd(), + task.getState() +======== + public void handleCleanPartitionTask(MetaTask.Task task){ + log.info("clean task (id: {}) {} -{}, key range:{}~{}, report: {}", task.getId(), + task.getPartition().getGraphName(), + task.getPartition().getId(), + task.getCleanPartition().getKeyStart(), + task.getCleanPartition().getKeyEnd(), + task.getState() +>>>>>>>> d7e3d51dd (3.6.5 -> 4.x diff):hg-pd-core/src/main/java/org/apache/hugegraph/pd/PartitionService.java + ); + + // If it fails, try again? + } + + public void handleBuildIndexTask(MetaTask.Task task) throws PDException { + log.info("build index task (id: {}), {} -{} , report state: {}", task.getId(), + task.getPartition().getGraphName(), task.getPartition().getId(), task.getState()); + storeService.getTaskInfoMeta().updateUserTask(task); + } + + public synchronized void handleSplitTask(MetaTask.Task task) throws PDException { + + var taskInfoMeta = storeService.getTaskInfoMeta(); + var partition = task.getPartition(); + + MetaTask.Task pdMetaTask = + taskInfoMeta.getSplitTask(partition.getGraphName(), partition.getId()); + +<<<<<<<< HEAD:hugegraph-pd/hg-pd-core/src/main/java/org/apache/hugegraph/pd/PartitionService.java + log.info("report split task, graph:{}, pid : {}, state: {}", + task.getPartition().getGraphName(), + task.getPartition().getId(), task.getState()); +======== + log.info("report split task (id: {}), graph:{}, pid : {}, state: {}", task.getId(), + task.getPartition().getGraphName(), task.getPartition().getId(), task.getState()); +>>>>>>>> d7e3d51dd (3.6.5 -> 4.x diff):hg-pd-core/src/main/java/org/apache/hugegraph/pd/PartitionService.java + + if (pdMetaTask != null) { + var newTask = pdMetaTask.toBuilder().setState(task.getState()).build(); + taskInfoMeta.updateSplitTask(newTask); + + List subTasks = taskInfoMeta.scanSplitTask(partition.getGraphName()); + + var finished = subTasks.stream().allMatch(t -> + t.getState() == + MetaTask.TaskState.Task_Success || + t.getState() == + MetaTask.TaskState.Task_Failure); + + if (finished) { + var allSuccess = subTasks.stream().allMatch( + t -> t.getState() == MetaTask.TaskState.Task_Success); + if (allSuccess) { + log.info("graph:{} split task all success!", partition.getGraphName()); + handleSplitTaskAllSuccess(subTasks, partition.getGraphName(), taskInfoMeta); + } else { + handleSplitTaskIfFailed(subTasks, partition.getGraphName(), taskInfoMeta); + } + } + } + } + + private void handleSplitTaskAllSuccess(List subTasks, String graphName, + TaskInfoMeta taskInfoMeta) + throws PDException { + + int addedPartitions = 0; + var partitions = new ArrayList(); + for (MetaTask.Task subTask : subTasks) { + var source = subTask.getPartition(); + var newPartition = subTask.getSplitPartition().getNewPartitionList().get(0); + + // Send key range change messages + PartitionKeyRange partitionKeyRange = PartitionKeyRange.newBuilder() + .setPartitionId(source.getId()) + .setKeyStart( + newPartition.getStartKey()) + .setKeyEnd( + newPartition.getEndKey()) + .build(); + // Notice store + fireChangePartitionKeyRange(source, partitionKeyRange); + // Set Target to go live. source could theoretically be deleted, so it is not processed + + CleanPartition cleanPartition = CleanPartition.newBuilder() + .setKeyStart(newPartition.getStartKey()) + .setKeyEnd(newPartition.getEndKey()) + .setCleanType( + CleanType.CLEAN_TYPE_KEEP_RANGE) + // The partition of the target only + // needs to clean up the data, and does + // not need to delete the partition + .setDeletePartition(false) + .build(); + + log.info("pd clean data: {}-{}, key range:{}-{}, type:{}, delete partition:{}", + source.getGraphName(), + source.getId(), + cleanPartition.getKeyStart(), + cleanPartition.getKeyEnd(), + CleanType.CLEAN_TYPE_EXCLUDE_RANGE, + cleanPartition.getDeletePartition()); + + fireCleanPartition(source, cleanPartition); + + // renewal partition state + for (var sp : subTask.getSplitPartition().getNewPartitionList()) { + partitions.add( + sp.toBuilder().setState(Metapb.PartitionState.PState_Normal).build()); + } + + addedPartitions += subTask.getSplitPartition().getNewPartitionCount() - 1; + } + + updatePartition(partitions); + partitionMeta.loadGraph(graphName); + + var graph = getGraph(graphName); + + var storeGroupId = getStoreGroupByGraph(graphName); + int partitionCount = storeService.getShardGroups(storeGroupId).size(); + // set partition count +<<<<<<<< HEAD:hugegraph-pd/hg-pd-core/src/main/java/org/apache/hugegraph/pd/PartitionService.java + if (pdConfig.getConfigService().getPartitionCount() != + storeService.getShardGroups().size()) { + pdConfig.getConfigService().setPartitionCount(storeService.getShardGroups().size()); + log.info("set the partition count of config server to {}", + storeService.getShardGroups().size()); +======== + if (pdConfig.getConfigService().getPartitionCount(storeGroupId) != partitionCount) { + pdConfig.getConfigService().setPartitionCount(storeGroupId, partitionCount); + log.info("set the partition count of config server to {}", partitionCount); +>>>>>>>> d7e3d51dd (3.6.5 -> 4.x diff):hg-pd-core/src/main/java/org/apache/hugegraph/pd/PartitionService.java + } + + // renewal graph partition count + var newGraph = graph.toBuilder() + .setPartitionCount(graph.getPartitionCount() + addedPartitions) + .build(); + updateGraph(newGraph); + + // The transaction is complete + taskInfoMeta.removeSplitTaskPrefix(graphName); + } + + private void handleSplitTaskIfFailed(List subTasks, String graphName, + TaskInfoMeta taskInfoMeta) + throws PDException { + for (var metaTask : subTasks) { + var splitPartitions = metaTask.getSplitPartition().getNewPartitionList(); + for (int i = 1; i < splitPartitions.size(); i++) { + var split = splitPartitions.get(i); + CleanPartition cleanPartition = CleanPartition.newBuilder() + .setKeyStart(split.getStartKey()) + .setKeyEnd(split.getEndKey()) + .setCleanType( + CleanType.CLEAN_TYPE_EXCLUDE_RANGE) + .setDeletePartition(true) + .build(); + + fireCleanPartition(split, cleanPartition); + } + + // set partition state normal + var partition = metaTask.getPartition(); + updatePartitionState(partition.getGraphName(), partition.getId(), + Metapb.PartitionState.PState_Normal); + } + // Clean up the task list + taskInfoMeta.removeSplitTaskPrefix(graphName); + } + + + public void handleBackupGraphTask(MetaTask.Task task) throws PDException { + log.info("backup graph task (id: {}), {} -{} , report state: {}", task.getId(), + task.getPartition().getGraphName(), task.getPartition().getId(), task.getState()); + storeService.getTaskInfoMeta().updateUserTask(task); + } + + /** +<<<<<<<< HEAD:hugegraph-pd/hg-pd-core/src/main/java/org/apache/hugegraph/pd/PartitionService.java + * todo : What is the impact of partition changes?? + * Received a message that the leader has changed + * Update the status of the graph and trigger a partition change +======== + * todo : partition 变更的影响?? + * 接收到Leader改变的消息 + * 更新图状态,触发分区变更 +>>>>>>>> d7e3d51dd (3.6.5 -> 4.x diff):hg-pd-core/src/main/java/org/apache/hugegraph/pd/PartitionService.java + */ + protected void onPartitionChanged(Metapb.Partition old, Metapb.Partition partition) { + log.info("onPartitionChanged partition: {}", partition); + if (old != null && old.getState() != partition.getState()) { + Metapb.PartitionState state = Metapb.PartitionState.PState_Normal; + for (Metapb.Partition pt : partitionMeta.getPartitions(partition.getGraphName())) { + if (pt.getState().getNumber() > state.getNumber()) { + state = pt.getState(); + } + } + try { + updateGraphState(partition.getGraphName(), state); + } catch (PDException e) { + log.error("onPartitionChanged", e); + } + + } + + statusListeners.forEach(e -> { + e.onPartitionChanged(old, partition); + }); + } + + protected void onPartitionRemoved(Metapb.Partition partition) { + log.info("onPartitionRemoved partition: {}", partition); + statusListeners.forEach(e -> { + e.onPartitionRemoved(partition); + }); + } + + /** + * The leader of the PD has changed and the data needs to be reloaded + */ + @Override + public void onRaftLeaderChanged() { + log.info("Partition service reload cache from rocksdb, due to leader change"); + try { + partitionMeta.reload(); + } catch (PDException e) { + log.error("Partition meta reload exception ", e); + } + } + +<<<<<<<< HEAD:hugegraph-pd/hg-pd-core/src/main/java/org/apache/hugegraph/pd/PartitionService.java + public void onPartitionStateChanged(String graph, int partId, + Metapb.PartitionState state) throws PDException { + updatePartitionState(graph, partId, state); + } + + public void onShardStateChanged(String graph, int partId, Metapb.PartitionState state) { + + } + + /** + * Send rocksdb compaction message + * +======== + /** + * 发送rocksdb compaction 消息 +>>>>>>>> d7e3d51dd (3.6.5 -> 4.x diff):hg-pd-core/src/main/java/org/apache/hugegraph/pd/PartitionService.java + * @param partId + * @param tableName + */ + public void fireDbCompaction(int partId, String tableName) throws PDException { + + // try { + for (Metapb.Graph graph : getGraphs()) { +<<<<<<<< HEAD:hugegraph-pd/hg-pd-core/src/main/java/org/apache/hugegraph/pd/PartitionService.java + Metapb.Partition partition = + partitionMeta.getPartitionById(graph.getGraphName(), partId); +======== + Metapb.Partition partition = partitionMeta.getPartitionById(graph.getGraphName(), partId); + // some graphs may doesn't have such partition + if (partition == null) { + continue; + } +>>>>>>>> d7e3d51dd (3.6.5 -> 4.x diff):hg-pd-core/src/main/java/org/apache/hugegraph/pd/PartitionService.java + + DbCompaction dbCompaction = DbCompaction.newBuilder() + .setTableName(tableName) + .build(); + instructionListeners.forEach(cmd -> { + try { + cmd.dbCompaction(partition, dbCompaction); +<<<<<<<< HEAD:hugegraph-pd/hg-pd-core/src/main/java/org/apache/hugegraph/pd/PartitionService.java + } catch (Exception e) { +======== + log.info("compact partition: {}", partId); + }catch (Exception e){ +>>>>>>>> d7e3d51dd (3.6.5 -> 4.x diff):hg-pd-core/src/main/java/org/apache/hugegraph/pd/PartitionService.java + log.error("firedbCompaction", e); + } + }); + break; + } + // } catch (PDException e) { + // e.printStackTrace(); + // } + + } + +<<<<<<<< HEAD:hugegraph-pd/hg-pd-core/src/main/java/org/apache/hugegraph/pd/PartitionService.java + public void updateShardGroupCache(Metapb.ShardGroup group) { + partitionMeta.getPartitionCache().updateShardGroup(group); +======== + public void updateShardGroupCache(Metapb.ShardGroup group){ + partitionMeta.updateShardGroupCache(group); +>>>>>>>> d7e3d51dd (3.6.5 -> 4.x diff):hg-pd-core/src/main/java/org/apache/hugegraph/pd/PartitionService.java + } + + public Map getShardGroupCache() { + return partitionMeta.getShardGroupCache(); + } + + private Integer getStoreGroupByGraph(String graphName) throws PDException { + Metapb.Graph graph = getGraph(graphName); + return graph.getStoreGroupId(); + } + +} diff --git a/hg-pd-core/src/main/java/org/apache/hugegraph/pd/RegistryService.java b/hg-pd-core/src/main/java/org/apache/hugegraph/pd/RegistryService.java new file mode 100644 index 0000000000..54e371e1b1 --- /dev/null +++ b/hg-pd-core/src/main/java/org/apache/hugegraph/pd/RegistryService.java @@ -0,0 +1,30 @@ +package org.apache.hugegraph.pd; + +import org.apache.hugegraph.pd.common.PDException; +import org.apache.hugegraph.pd.config.PDConfig; +import org.apache.hugegraph.pd.grpc.discovery.NodeInfo; +import org.apache.hugegraph.pd.grpc.discovery.NodeInfos; +import org.apache.hugegraph.pd.grpc.discovery.Query; +import org.apache.hugegraph.pd.meta.DiscoveryMetaStore; +import org.apache.hugegraph.pd.meta.MetadataFactory; + +/** + * @author zhangyingjie + * @date 2022/1/14 + **/ +public class RegistryService { + private PDConfig pdConfig; + private DiscoveryMetaStore meta; + + public RegistryService(PDConfig config){ + this.pdConfig = config; + meta = MetadataFactory.newDiscoveryMeta(config); + } + + public void register(NodeInfo nodeInfo, int outTimes) throws PDException { + meta.register(nodeInfo, outTimes); + } + public NodeInfos getNodes(Query query) { + return meta.getNodes(query); + } +} diff --git a/hg-pd-core/src/main/java/org/apache/hugegraph/pd/StoreMonitorDataService.java b/hg-pd-core/src/main/java/org/apache/hugegraph/pd/StoreMonitorDataService.java new file mode 100644 index 0000000000..29f802bcdb --- /dev/null +++ b/hg-pd-core/src/main/java/org/apache/hugegraph/pd/StoreMonitorDataService.java @@ -0,0 +1,276 @@ +<<<<<<<< HEAD:hugegraph-pd/hg-pd-core/src/main/java/org/apache/hugegraph/pd/StoreMonitorDataService.java +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hugegraph.pd; +======== +package org.apache.hugegraph.pd; + +import org.apache.hugegraph.pd.common.PDException; +import org.apache.hugegraph.pd.config.PDConfig; +import org.apache.hugegraph.pd.grpc.Metapb; +import org.apache.hugegraph.pd.meta.MetadataKeyHelper; +import lombok.extern.slf4j.Slf4j; +import org.springframework.stereotype.Service; +>>>>>>>> d7e3d51dd (3.6.5 -> 4.x diff):hg-pd-core/src/main/java/org/apache/hugegraph/pd/StoreMonitorDataService.java + +import java.time.Instant; +import java.time.LocalDateTime; +import java.time.ZoneId; +import java.time.format.DateTimeFormatter; +import java.util.ArrayList; +import java.util.HashMap; +import java.util.LinkedList; +import java.util.List; +import java.util.Map; +import java.util.stream.Collectors; + +import org.apache.hugegraph.pd.common.PDException; +import org.apache.hugegraph.pd.config.PDConfig; +import org.apache.hugegraph.pd.grpc.Metapb; +import org.apache.hugegraph.pd.meta.MetadataKeyHelper; +import org.springframework.stereotype.Service; + +import lombok.extern.slf4j.Slf4j; + +@Slf4j +@Service +public class StoreMonitorDataService { + + private static final String MONITOR_DATA_PREFIX = "SMD"; + private final PDConfig pdConfig; + private final KvService kvService; + /** + * the last timestamp of the store monitor data, + * used for determine the gap of store's heartbeat. + */ + private final Map lastStoreStateTimestamp; + + public StoreMonitorDataService(PDConfig pdConfig) { + this.pdConfig = pdConfig; + this.kvService = new KvService(pdConfig); + this.lastStoreStateTimestamp = new HashMap<>(); + } + + /** + * save the store stats + * + * @param storeStats + */ + public void saveMonitorData(Metapb.StoreStats storeStats) throws PDException { + long storeId = storeStats.getStoreId(); + /** + * load the latest store timestamp when start up or alter leader + */ + if (!lastStoreStateTimestamp.containsKey(storeId)) { + long lastTimestamp = getLatestStoreMonitorDataTimeStamp(storeId); + log.debug("store id : {}, last timestamp :{}", storeId, lastTimestamp); + lastStoreStateTimestamp.put(storeId, lastTimestamp); + } + + long current = System.currentTimeMillis() / 1000; + long interval = this.pdConfig.getStore().getMonitorInterval(); + + // exceed the interval + if (current - lastStoreStateTimestamp.getOrDefault(storeId, 0L) >= interval) { + saveMonitorDataToDb(storeStats, current); + log.debug("store id: {}, system info:{}", storeId, + debugMonitorInfo(storeStats.getSystemMetricsList())); + lastStoreStateTimestamp.put(storeId, current); + } + } + + /** + * save the snapshot of store status + * + * @param storeStats store status + * @param ts, timestamp + * @return store status + * @throws PDException + */ + private void saveMonitorDataToDb(Metapb.StoreStats storeStats, long ts) throws PDException { + String key = getMonitorDataKey(storeStats.getStoreId(), ts); + log.debug("store id: {}, save monitor data info, ts:{}, my key:{}", storeStats.getStoreId(), + ts, key); + kvService.put(key, extractMetricsFromStoreStatus(storeStats)); + } + + public String debugMonitorInfo(List systemInfo) { + StringBuilder sb = new StringBuilder(); + sb.append("["); + for (Metapb.RecordPair pair : systemInfo) { + sb.append(pair.getKey()); + sb.append(":"); + sb.append(pair.getValue()); + sb.append(","); + } + sb.append("]"); + return sb.toString(); + } + + /** + * get the historical monitor data by store id, by range(start, end) + * + * @param storeId store id + * @param start range start + * @param end range end + * @return list of store stats + */ + public Map getStoreMonitorData(long storeId, long start, long end) throws + PDException { + log.debug("get monitor data, store id:{}, start{}, end:{}", + storeId, + getMonitorDataKey(storeId, start), + getMonitorDataKey(storeId, end)); + return kvService.scanRange(getMonitorDataKey(storeId, start), + getMonitorDataKey(storeId, end)); + } + + /** + * for api service + * + * @param storeId + * @return + * @throws PDException + */ + public List> getStoreMonitorData(long storeId) throws PDException { + List> result = new LinkedList<>(); + long current = System.currentTimeMillis() / 1000; + long start = current - this.pdConfig.getStore().getRetentionPeriod(); + + try { + for (Map.Entry entry : getStoreMonitorData(storeId, start, + current).entrySet()) { + String[] arr = + entry.getKey().split(String.valueOf(MetadataKeyHelper.getDelimiter())); + Map map = new HashMap(); + long timestamp = Long.parseLong(arr[arr.length - 1]); + map.put("ts", timestamp); + for (String pair : entry.getValue().split(",")) { + String[] p = pair.split(":"); + if (p.length == 2) { + map.put(p[0], Long.parseLong(p[1])); + } + } + result.add(map); + } + result.sort((o1, o2) -> o1.get("ts").compareTo(o2.get("ts"))); + } catch (PDException e) { + log.error(e.getMessage()); + } + return result; + } + + /** + * for api service, export txt + * + * @param storeId + * @return + * @throws PDException + */ + public String getStoreMonitorDataText(long storeId) throws PDException { + + List> result = getStoreMonitorData(storeId); + StringBuilder sb = new StringBuilder(); + if (result.size() > 0) { + DateTimeFormatter dtf = DateTimeFormatter.ofPattern("yyyy-MM-dd HH:mm:ss"); + Map lastRow = result.get(result.size() - 1); + List columns = new ArrayList<>(); + // construct columns, ts + sorted keys + columns.add("ts"); + columns.addAll(lastRow.keySet().stream() + .filter(x -> !"ts".equals(x)) + .sorted() + .collect(Collectors.toList())); + sb.append(String.join(",", columns).replace("\"", "")).append("\r\n"); + for (Map row : result) { + for (String key : columns) { + // ts + , + ... + if ("ts".equals(key)) { + // format ts + sb.append(dtf.format( + LocalDateTime.ofInstant(Instant.ofEpochSecond(row.get(key)), + ZoneId.systemDefault()))); + continue; + } else { + sb.append(",").append(row.getOrDefault(key, 0L)); + } + } + sb.append("\r\n"); + } + } + return sb.toString(); + } + + /** + * remove the monitor data of the store that before till(not include) + * + * @param storeId store id + * @param till expire time + * @return affect rows + */ + public int removeExpiredMonitorData(long storeId, long till) throws PDException { + String keyStart = getMonitorDataKey(storeId, 1); + String keyEnd = getMonitorDataKey(storeId, till); + int records = 0; + for (String key : kvService.scanRange(keyStart, keyEnd).keySet()) { + kvService.delete(key); + log.debug("remove monitor data, key: {}", key); + records += 1; + } + return records; + } + + /** + * get the latest timestamp of the store monitor data + * + * @param storeId + * @return timestamp(by seconds) + */ + public long getLatestStoreMonitorDataTimeStamp(long storeId) { + long maxId = 0L; + long current = System.currentTimeMillis() / 1000; + long start = current - this.pdConfig.getStore().getMonitorInterval(); + String keyStart = getMonitorDataKey(storeId, start); + String keyEnd = getMonitorDataKey(storeId, current); + try { + for (String key : kvService.scanRange(keyStart, keyEnd).keySet()) { + String[] arr = key.split(String.valueOf(MetadataKeyHelper.getDelimiter())); + maxId = Math.max(maxId, Long.parseLong(arr[arr.length - 1])); + } + } catch (PDException e) { + } + return maxId; + } + + private String getMonitorDataKey(long storeId, long ts) { + String builder = MONITOR_DATA_PREFIX + + MetadataKeyHelper.getDelimiter() + + storeId + + MetadataKeyHelper.getDelimiter() + + ts; + return builder; + } + + private String extractMetricsFromStoreStatus(Metapb.StoreStats storeStats) { + List list = new ArrayList<>(); + for (Metapb.RecordPair pair : storeStats.getSystemMetricsList()) { + list.add("\"" + pair.getKey() + "\":" + pair.getValue()); + } + return String.join(",", list); + } +} diff --git a/hg-pd-core/src/main/java/org/apache/hugegraph/pd/StoreNodeService.java b/hg-pd-core/src/main/java/org/apache/hugegraph/pd/StoreNodeService.java new file mode 100644 index 0000000000..71f4f6c8d9 --- /dev/null +++ b/hg-pd-core/src/main/java/org/apache/hugegraph/pd/StoreNodeService.java @@ -0,0 +1,1668 @@ +<<<<<<<< HEAD:hugegraph-pd/hg-pd-core/src/main/java/org/apache/hugegraph/pd/StoreNodeService.java +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +======== +>>>>>>>> d7e3d51dd (3.6.5 -> 4.x diff):hg-pd-core/src/main/java/org/apache/hugegraph/pd/StoreNodeService.java +package org.apache.hugegraph.pd; + +import java.util.ArrayList; +import java.util.Collections; +import java.util.HashMap; +import java.util.HashSet; +import java.util.Iterator; +import java.util.List; +import java.util.Map; +import java.util.Objects; +import java.util.Random; +import java.util.Set; +import java.util.concurrent.ConcurrentHashMap; +import java.util.concurrent.atomic.AtomicBoolean; +<<<<<<<< HEAD:hugegraph-pd/hg-pd-core/src/main/java/org/apache/hugegraph/pd/StoreNodeService.java +======== +import java.util.stream.Collectors; + +import org.apache.commons.lang3.StringUtils; + +import org.apache.hugegraph.pd.common.Consts; +import org.apache.hugegraph.pd.common.KVPair; +import org.apache.hugegraph.pd.common.PDException; +import org.apache.hugegraph.pd.config.PDConfig; +import org.apache.hugegraph.pd.grpc.Metapb; +import org.apache.hugegraph.pd.grpc.Metapb.GraphMode; +import org.apache.hugegraph.pd.grpc.Metapb.GraphModeReason; +import org.apache.hugegraph.pd.grpc.Metapb.GraphState; +import org.apache.hugegraph.pd.grpc.Pdpb.CacheResponse; +import org.apache.hugegraph.pd.grpc.common.ErrorType; +import org.apache.hugegraph.pd.grpc.pulse.ConfChangeType; +import org.apache.hugegraph.pd.listener.ShardGroupStatusListener; +import org.apache.hugegraph.pd.listener.StoreStatusListener; +import org.apache.hugegraph.pd.meta.MetadataFactory; +import org.apache.hugegraph.pd.meta.MetadataKeyHelper; +import org.apache.hugegraph.pd.meta.StoreInfoMeta; +import org.apache.hugegraph.pd.meta.TaskInfoMeta; +import com.google.gson.Gson; + +import lombok.Getter; +import lombok.extern.slf4j.Slf4j; +>>>>>>>> d7e3d51dd (3.6.5 -> 4.x diff):hg-pd-core/src/main/java/org/apache/hugegraph/pd/StoreNodeService.java + +import org.apache.commons.lang3.StringUtils; +import org.apache.hugegraph.pd.common.KVPair; +import org.apache.hugegraph.pd.common.PDException; +import org.apache.hugegraph.pd.config.PDConfig; +import org.apache.hugegraph.pd.grpc.Metapb; +import org.apache.hugegraph.pd.grpc.Metapb.GraphMode; +import org.apache.hugegraph.pd.grpc.Metapb.GraphModeReason; +import org.apache.hugegraph.pd.grpc.Metapb.GraphState; +import org.apache.hugegraph.pd.grpc.Pdpb; +import org.apache.hugegraph.pd.grpc.Pdpb.CacheResponse; +import org.apache.hugegraph.pd.grpc.pulse.ConfChangeType; +import org.apache.hugegraph.pd.meta.MetadataFactory; +import org.apache.hugegraph.pd.meta.MetadataKeyHelper; +import org.apache.hugegraph.pd.meta.StoreInfoMeta; +import org.apache.hugegraph.pd.meta.TaskInfoMeta; + +import com.google.gson.Gson; + +import lombok.extern.slf4j.Slf4j; + +/** + * Hg Store registration and keep-alive management + */ +@Slf4j +public class StoreNodeService { + +<<<<<<<< HEAD:hugegraph-pd/hg-pd-core/src/main/java/org/apache/hugegraph/pd/StoreNodeService.java +======== + // Store状态监听 + private List statusListeners; + private List shardGroupStatusListeners; + + private PartitionService partitionService; + @Getter + private StoreInfoMeta storeInfoMeta; + @Getter + private TaskInfoMeta taskInfoMeta; + private Random random = new Random(System.currentTimeMillis()); + private Map clusterStats = new ConcurrentHashMap<>(); + private KvService kvService; + private ConfigService configService; + private PDConfig pdConfig; + private static Metapb.ClusterStats statsNotReady = + Metapb.ClusterStats.newBuilder().setState(Metapb.ClusterState.Cluster_Not_Ready).build(); + +>>>>>>>> d7e3d51dd (3.6.5 -> 4.x diff):hg-pd-core/src/main/java/org/apache/hugegraph/pd/StoreNodeService.java + private static final Long STORE_HEART_BEAT_INTERVAL = 30000L; + private static final String graphSpaceConfPrefix = "HUGEGRAPH/hg/GRAPHSPACE/CONF/"; + private final List statusListeners; + private final List shardGroupStatusListeners; + private final StoreInfoMeta storeInfoMeta; + private final TaskInfoMeta taskInfoMeta; + private final Random random = new Random(System.currentTimeMillis()); + private final KvService kvService; + private final ConfigService configService; + private final PDConfig pdConfig; + private PartitionService partitionService; + private final Runnable quotaChecker = () -> { + try { + getQuota(); + } catch (Exception e) { + log.error( + "obtaining and sending graph space quota information with error: ", + e); + } + }; + private Metapb.ClusterStats clusterStats; + + public StoreNodeService(PDConfig config) { + this.pdConfig = config; + storeInfoMeta = MetadataFactory.newStoreInfoMeta(pdConfig); + taskInfoMeta = MetadataFactory.newTaskInfoMeta(pdConfig); + shardGroupStatusListeners = Collections.synchronizedList(new ArrayList<>()); +<<<<<<<< HEAD:hugegraph-pd/hg-pd-core/src/main/java/org/apache/hugegraph/pd/StoreNodeService.java + statusListeners = Collections.synchronizedList(new ArrayList()); + clusterStats = Metapb.ClusterStats.newBuilder() + .setState(Metapb.ClusterState.Cluster_Not_Ready) + .setTimestamp(System.currentTimeMillis()) + .build(); + kvService = new KvService(pdConfig); + configService = new ConfigService(pdConfig); +======== + statusListeners = Collections.synchronizedList(new ArrayList<>()); + configService = new ConfigService(pdConfig); + kvService = new KvService(pdConfig); + + try { + for (var group: configService.getAllStoreGroup()) { + clusterStats.put(group.getGroupId(), getDefaultClusterStats()); + } + } catch (PDException e) { + log.error("init exception: ", e); + } + + } + + private Metapb.ClusterStats getDefaultClusterStats() { + return Metapb.ClusterStats.newBuilder() + .setState(Metapb.ClusterState.Cluster_Not_Ready) + .setTimestamp(System.currentTimeMillis()) + .build(); +>>>>>>>> d7e3d51dd (3.6.5 -> 4.x diff):hg-pd-core/src/main/java/org/apache/hugegraph/pd/StoreNodeService.java + } + + public void init(PartitionService partitionService) { + this.partitionService = partitionService; +<<<<<<<< HEAD:hugegraph-pd/hg-pd-core/src/main/java/org/apache/hugegraph/pd/StoreNodeService.java + partitionService.addStatusListener(new PartitionStatusListener() { + @Override + public void onPartitionChanged(Metapb.Partition old, Metapb.Partition partition) { + if (old != null && old.getState() != partition.getState()) { + try { + List partitions = + partitionService.getPartitionById(partition.getId()); + Metapb.PartitionState state = Metapb.PartitionState.PState_Normal; + for (Metapb.Partition pt : partitions) { + if (pt.getState().getNumber() > state.getNumber()) { + state = pt.getState(); + } + } + updateShardGroupState(partition.getId(), state); + + for (Metapb.ShardGroup group : getShardGroups()) { + if (group.getState().getNumber() > state.getNumber()) { + state = group.getState(); + } + } + updateClusterStatus(state); + } catch (PDException e) { + log.error("onPartitionChanged exception: ", e); + } + } + } + + @Override + public void onPartitionRemoved(Metapb.Partition partition) { + + } + }); +======== +// partitionService.addStatusListener(new PartitionStatusListener() { +// @Override +// public void onPartitionChanged(Metapb.Partition old, Metapb.Partition partition) { +// if (old != null && old.getState() != partition.getState()) { +// // 状态改变,重置集群状态 +// try { +// List partitions = partitionService.getPartitionById(partition.getId()); +// Metapb.PartitionState state = Metapb.PartitionState.PState_Normal; +// for(Metapb.Partition pt : partitions){ +// if ( pt.getState().getNumber() > state.getNumber()) { +// state = pt.getState(); +// } +// } +// updateShardGroupState(partition.getId(), state); +// +// for(Metapb.ShardGroup group : getShardGroups()){ +// if ( group.getState().getNumber() > state.getNumber()) +// state = group.getState(); +// } +// +// updateClusterStatus(state); +// } catch (PDException e) { +// log.error("onPartitionChanged exception: ", e); +// } +// } +// } +// +// @Override +// public void onPartitionRemoved(Metapb.Partition partition) { +// +// } +// }); +>>>>>>>> d7e3d51dd (3.6.5 -> 4.x diff):hg-pd-core/src/main/java/org/apache/hugegraph/pd/StoreNodeService.java + } + + /** + * Whether the cluster is ready or not + * + * @return + */ +<<<<<<<< HEAD:hugegraph-pd/hg-pd-core/src/main/java/org/apache/hugegraph/pd/StoreNodeService.java + public boolean isOK() { + return this.clusterStats.getState().getNumber() < + Metapb.ClusterState.Cluster_Offline.getNumber(); +======== + public boolean isOK(int storeGroup){ + if (! this.clusterStats.containsKey(storeGroup)) { + return false; + } + return this.clusterStats.get(storeGroup).getState().getNumber() < + Metapb.ClusterState.Cluster_Offline.getNumber(); +>>>>>>>> d7e3d51dd (3.6.5 -> 4.x diff):hg-pd-core/src/main/java/org/apache/hugegraph/pd/StoreNodeService.java + } + + /** + * Store registration, record the IP address of the Store, and the first registration needs + * to generate a store_ID + * + * @param store + */ + public Metapb.Store register(Metapb.Store store) throws PDException { + if (store.getId() == 0) { + // Initial registration, generate a new ID, and ensure that the ID is not duplicated. + store = newStoreNode(store); + } + + if (!storeInfoMeta.storeExists(store.getId())) { +<<<<<<<< HEAD:hugegraph-pd/hg-pd-core/src/main/java/org/apache/hugegraph/pd/StoreNodeService.java + log.error("Store id {} does not belong to this PD, address = {}", store.getId(), + store.getAddress()); + // storeId does not exist, an exception is thrown + throw new PDException(Pdpb.ErrorType.STORE_ID_NOT_EXIST_VALUE, + String.format("Store id %d doest not exist.", store.getId())); +======== + log.error("Store id {} does not belong to this PD, address = {}", store.getId(), store.getAddress()); + // storeId不存在,抛出异常 + throw new PDException(ErrorType.STORE_ID_NOT_EXIST_VALUE, + String.format("Store id %d doest not exist.", store.getId())); +>>>>>>>> d7e3d51dd (3.6.5 -> 4.x diff):hg-pd-core/src/main/java/org/apache/hugegraph/pd/StoreNodeService.java + } + + // If the store status is Tombstone, the registration is denied. + Metapb.Store lastStore = storeInfoMeta.getStore(store.getId()); + if (lastStore.getState() == Metapb.StoreState.Tombstone) { + log.error("Store id {} has been removed, Please reinitialize, address = {}", +<<<<<<<< HEAD:hugegraph-pd/hg-pd-core/src/main/java/org/apache/hugegraph/pd/StoreNodeService.java + store.getId(), store.getAddress()); + // storeId does not exist, an exception is thrown + throw new PDException(Pdpb.ErrorType.STORE_HAS_BEEN_REMOVED_VALUE, + String.format("Store id %d has been removed. %s", store.getId(), + store.getAddress())); +======== + store.getId(), store.getAddress()); + // storeId不存在,抛出异常 + throw new PDException(ErrorType.STORE_HAS_BEEN_REMOVED_VALUE, + String.format("Store id %d has been removed. %s", store.getId(), store.getAddress())); +>>>>>>>> d7e3d51dd (3.6.5 -> 4.x diff):hg-pd-core/src/main/java/org/apache/hugegraph/pd/StoreNodeService.java + } + + // offline or up, or in the initial activation list, go live automatically + Metapb.StoreState storeState = lastStore.getState(); +<<<<<<<< HEAD:hugegraph-pd/hg-pd-core/src/main/java/org/apache/hugegraph/pd/StoreNodeService.java + if (storeState == Metapb.StoreState.Offline || storeState == Metapb.StoreState.Up + || inInitialStoreList(store)) { +======== + if (storeState == Metapb.StoreState.Offline || storeState == Metapb.StoreState.Up || inInitialStoreList(store)){ +>>>>>>>> d7e3d51dd (3.6.5 -> 4.x diff):hg-pd-core/src/main/java/org/apache/hugegraph/pd/StoreNodeService.java + storeState = Metapb.StoreState.Up; + } else { + storeState = Metapb.StoreState.Pending; + } + + store = Metapb.Store.newBuilder(lastStore) + .setAddress(store.getAddress()) + .setRaftAddress(store.getRaftAddress()) + .setDataVersion(store.getDataVersion()) + .setDeployPath(store.getDeployPath()) + .setVersion(store.getVersion()) + .setDataPath(store.getDataPath()) + .setState(storeState).setCores(store.getCores()) + .clearLabels().addAllLabels(store.getLabelsList()) + .setLastHeartbeat(System.currentTimeMillis()).build(); + + long current = System.currentTimeMillis(); + boolean raftChanged = false; +<<<<<<<< HEAD:hugegraph-pd/hg-pd-core/src/main/java/org/apache/hugegraph/pd/StoreNodeService.java + // On-line status Raft Address there has been a change + if (!Objects.equals(lastStore.getRaftAddress(), store.getRaftAddress()) && + storeState == Metapb.StoreState.Up) { + // If the time interval is too short and the raft changes, it is considered an + // invalid store + if (current - lastStore.getLastHeartbeat() < STORE_HEART_BEAT_INTERVAL * 0.8) { + throw new PDException(Pdpb.ErrorType.STORE_PROHIBIT_DUPLICATE_VALUE, + String.format("Store id %d may be duplicate. addr: %s", + store.getId(), store.getAddress())); + } else if (current - lastStore.getLastHeartbeat() > STORE_HEART_BEAT_INTERVAL * 1.2) { + // It is considered that a change has occurred +======== + // 上线状态的Raft Address 发生了变更 + if (!Objects.equals(lastStore.getRaftAddress(), store.getRaftAddress()) && storeState == Metapb.StoreState.Up) { + // 时间间隔太短,而且raft有变更,则认为是无效的store + if (current - lastStore.getLastHeartbeat() < STORE_HEART_BEAT_INTERVAL * 0.8){ + throw new PDException(ErrorType.STORE_PROHIBIT_DUPLICATE_VALUE, + String.format("Store id %d may be duplicate. addr: %s", store.getId(), store.getAddress())); + } else if(current - lastStore.getLastHeartbeat() > STORE_HEART_BEAT_INTERVAL * 1.2 ) { + // 认为发生了变更 +>>>>>>>> d7e3d51dd (3.6.5 -> 4.x diff):hg-pd-core/src/main/java/org/apache/hugegraph/pd/StoreNodeService.java + raftChanged = true; + } else { + // Wait for the next registration + return Metapb.Store.newBuilder(store).setId(0L).build(); + } + } + +<<<<<<<< HEAD:hugegraph-pd/hg-pd-core/src/main/java/org/apache/hugegraph/pd/StoreNodeService.java + // Store information +======== + // 兼容性处理,如果在初始化列表中,则自动插入storeGroup + if (inInitialStoreList(store) && ! isStoreHasStoreGroup(store.getId())) { + int groupId = this.pdConfig.getInitialStoreGroup(store.getAddress()); + updateStoreGroupRelation(store.getId(), groupId); + } + + // 存储store信息 +>>>>>>>> d7e3d51dd (3.6.5 -> 4.x diff):hg-pd-core/src/main/java/org/apache/hugegraph/pd/StoreNodeService.java + storeInfoMeta.updateStore(store); + if (storeState == Metapb.StoreState.Up) { + // Update the store active status + storeInfoMeta.keepStoreAlive(store); + onStoreStatusChanged(store, Metapb.StoreState.Offline, Metapb.StoreState.Up); + checkStoreStatus(storeInfoMeta.getStoreGroupByStoreId(store.getId())); + } + + // Wait for the store information to be saved before sending the changes + if (raftChanged) { + onStoreRaftAddressChanged(store); + } + + log.info("Store register, id = {} {}", store.getId(), store); + return store; + } + + private boolean inInitialStoreList(Metapb.Store store) { + return this.pdConfig.getInitialStoreMap().containsKey(store.getAddress()); + } + + /** + * Creates a new store object + * + * @param store + * @return + * @throws PDException + */ + private synchronized Metapb.Store newStoreNode(Metapb.Store store) throws PDException { + long id = random.nextLong() & Long.MAX_VALUE; +<<<<<<<< HEAD:hugegraph-pd/hg-pd-core/src/main/java/org/apache/hugegraph/pd/StoreNodeService.java + while (id == 0 || storeInfoMeta.storeExists(id)) { +======== + while( id == 0 || storeInfoMeta.storeExists(id) ) { +>>>>>>>> d7e3d51dd (3.6.5 -> 4.x diff):hg-pd-core/src/main/java/org/apache/hugegraph/pd/StoreNodeService.java + id = random.nextLong() & Long.MAX_VALUE; + } + store = Metapb.Store.newBuilder(store) + .setId(id) + .setState(Metapb.StoreState.Pending) + .setStartTimestamp(System.currentTimeMillis()).build(); + storeInfoMeta.updateStore(store); + return store; + } + + /** + * Returns Store information based on store_id + * + * @param id + * @return + * @throws PDException + */ + public Metapb.Store getStore(long id) throws PDException { + Metapb.Store store = storeInfoMeta.getStore(id); +<<<<<<<< HEAD:hugegraph-pd/hg-pd-core/src/main/java/org/apache/hugegraph/pd/StoreNodeService.java + if (store == null) { + throw new PDException(Pdpb.ErrorType.STORE_ID_NOT_EXIST_VALUE, + String.format("Store id %x doest not exist.", id)); +======== + if ( store == null ) { + throw new PDException(ErrorType.STORE_ID_NOT_EXIST_VALUE, + String.format("Store id %x doest not exist.", id)); +>>>>>>>> d7e3d51dd (3.6.5 -> 4.x diff):hg-pd-core/src/main/java/org/apache/hugegraph/pd/StoreNodeService.java + } + return store; + } + + /** + * Update the store information, detect the change of store status, and notify Hugestore + */ + public synchronized Metapb.Store updateStore(Metapb.Store store) throws PDException { +<<<<<<<< HEAD:hugegraph-pd/hg-pd-core/src/main/java/org/apache/hugegraph/pd/StoreNodeService.java + log.info("updateStore storeId: {}, address: {}, state: {}", store.getId(), + store.getAddress(), store.getState()); +======== + log.info("updateStore storeId: {}, address: {}, state: {}", + store.getId(), store.getAddress(), store.getState()); +>>>>>>>> d7e3d51dd (3.6.5 -> 4.x diff):hg-pd-core/src/main/java/org/apache/hugegraph/pd/StoreNodeService.java + Metapb.Store lastStore = storeInfoMeta.getStore(store.getId()); + if (lastStore == null) { + return null; + } +<<<<<<<< HEAD:hugegraph-pd/hg-pd-core/src/main/java/org/apache/hugegraph/pd/StoreNodeService.java + Metapb.Store.Builder builder = + Metapb.Store.newBuilder(lastStore).clearLabels().clearStats(); +======== + + Metapb.Store.Builder builder = Metapb.Store.newBuilder(lastStore).clearLabels().clearStats(); +>>>>>>>> d7e3d51dd (3.6.5 -> 4.x diff):hg-pd-core/src/main/java/org/apache/hugegraph/pd/StoreNodeService.java + store = builder.mergeFrom(store).build(); + if (store.getState() == Metapb.StoreState.Tombstone) { + List activeStores = getStores(getStoreInfoMeta().getStoreGroupByStoreId(store.getId())); + if (lastStore.getState() == Metapb.StoreState.Up +<<<<<<<< HEAD:hugegraph-pd/hg-pd-core/src/main/java/org/apache/hugegraph/pd/StoreNodeService.java + && activeStores.size() - 1 < pdConfig.getMinStoreCount()) { + throw new PDException(Pdpb.ErrorType.LESS_ACTIVE_STORE_VALUE, + "The number of active stores is less then " + + pdConfig.getMinStoreCount()); +======== + && activeStores.size() - 1 < pdConfig.getMinStoreCount()) { + throw new PDException(ErrorType.LESS_ACTIVE_STORE_VALUE, + "The number of active stores is less then " + pdConfig.getMinStoreCount()); +>>>>>>>> d7e3d51dd (3.6.5 -> 4.x diff):hg-pd-core/src/main/java/org/apache/hugegraph/pd/StoreNodeService.java + } + } + + storeInfoMeta.updateStore(store); + if (store.getState() != Metapb.StoreState.Unknown && + store.getState() != lastStore.getState()) { + // If you want to take the store offline + if (store.getState() == Metapb.StoreState.Exiting) { + if (lastStore.getState() == Metapb.StoreState.Exiting) { + // If it is already in the offline state, no further processing will be made + return lastStore; + } + +<<<<<<<< HEAD:hugegraph-pd/hg-pd-core/src/main/java/org/apache/hugegraph/pd/StoreNodeService.java + List activeStores = this.getActiveStores(); + Map storeMap = new HashMap<>(); + activeStores.forEach(s -> { + storeMap.put(s.getId(), s); + }); + // If the store is offline, delete it directly from active, and if the store is + // online, temporarily delete it from active, and then delete it when the status + // is set to Tombstone +======== +// List activeStores = this.getActiveStores(getStoreInfoMeta().getStoreGroup(store.getId())); +// Map storeMap = new HashMap<>(); +// activeStores.forEach(s -> { +// storeMap.put(s.getId(), s); +// }); + + var storeMap = getActiveStoresByStoreGroup(getStoreInfoMeta().getStoreGroupByStoreId(store.getId())) + .stream() + .collect(Collectors.toMap(Metapb.Store::getId, store1 -> store1)); + + //如果store已经离线,直接从活跃中删除,如果store在线,暂时不从活跃中删除,等把状态置成Tombstone的时候再删除 +>>>>>>>> d7e3d51dd (3.6.5 -> 4.x diff):hg-pd-core/src/main/java/org/apache/hugegraph/pd/StoreNodeService.java + if (!storeMap.containsKey(store.getId())) { + log.info("updateStore removeActiveStores store {}", store.getId()); + storeInfoMeta.removeActiveStore(store); + } + storeTurnoff(store); + } else if (store.getState() == Metapb.StoreState.Offline) { + // Monitor that the store has gone offline and is removed from the active + storeInfoMeta.removeActiveStore(store); + } else if (store.getState() == Metapb.StoreState.Tombstone) { + // When the status changes, the store is shut down, the shardGroup is modified, + // and the replica is migrated + log.info("updateStore removeActiveStores store {}", store.getId()); + storeInfoMeta.removeActiveStore(store); + // Storage goes offline + storeTurnoff(store); + } else if (store.getState() == Metapb.StoreState.Up) { + storeInfoMeta.keepStoreAlive(store); + checkStoreStatus(storeInfoMeta.getStoreGroupByStoreId(store.getId())); + } + onStoreStatusChanged(lastStore, lastStore.getState(), store.getState()); + } + return store; + } + + /** + * The shard of the shardGroup is reassigned + * + * @param store + * @throws PDException + */ + public synchronized void storeTurnoff(Metapb.Store store) throws PDException { + // Traverse ShardGroup,redistribution + for (Metapb.ShardGroup group : getShardGroupsByStore(store.getId())) { + Metapb.ShardGroup.Builder builder = Metapb.ShardGroup.newBuilder(group); + builder.clearShards(); + group.getShardsList().forEach(shard -> { + if (shard.getStoreId() != store.getId()) { + builder.addShards(shard); + } + }); + reallocShards(builder.build()); + } + } + + /** + * Returns stores information based on the graph name, and if graphName is empty, all store + * information is returned + * + * @throws PDException + */ +<<<<<<<< HEAD:hugegraph-pd/hg-pd-core/src/main/java/org/apache/hugegraph/pd/StoreNodeService.java + public List getStores() throws PDException { + return storeInfoMeta.getStores(null); + } + + public List getStores(String graphName) throws PDException { + return storeInfoMeta.getStores(graphName); +======== + public List getStores() throws PDException{ + return storeInfoMeta.getAllStores(); + } + + public List getStores(String graphName) throws PDException { + Metapb.Graph graph = partitionService.getGraph(graphName); + return graph == null ? getStores() : getStoresByStoreGroup(graph.getStoreGroupId()); + } + + public List getStores(int storeGroupId) throws PDException{ + Set set = storeInfoMeta.getStoreIdsByGroup(storeGroupId); + return storeInfoMeta.getAllStores().stream() + .filter(store -> set.contains(store.getId())) + .collect(Collectors.toList()); +>>>>>>>> d7e3d51dd (3.6.5 -> 4.x diff):hg-pd-core/src/main/java/org/apache/hugegraph/pd/StoreNodeService.java + } + + public List getStoreStatus(boolean isActive) throws PDException { + return storeInfoMeta.getStoreStatus(isActive); + } + + public List getShardGroups() throws PDException { + return storeInfoMeta.getShardGroups(); + } + + public List getShardGroups(int storeGroup) throws PDException { + Set storeIds = storeInfoMeta.getStoreIdsByGroup(storeGroup); + return storeInfoMeta.getShardGroups().stream().filter(shardGroup -> { + for (var shard : shardGroup.getShardsList()) { + if (storeIds.contains(shard.getStoreId())) { + return true; + } + } + return false; + }).collect(Collectors.toList()); + } + + public Metapb.ShardGroup getShardGroup(int groupId) throws PDException { + return storeInfoMeta.getShardGroup(groupId); + } + + public List getShardList(int groupId) throws PDException { + var shardGroup = getShardGroup(groupId); + if (shardGroup != null) { + return shardGroup.getShardsList(); + } + return new ArrayList<>(); + } + + public List getShardGroupsByStore(long storeId) throws PDException { + List shardGroups = new ArrayList<>(); + storeInfoMeta.getShardGroups().forEach(shardGroup -> { + shardGroup.getShardsList().forEach(shard -> { + if (shard.getStoreId() == storeId) { + shardGroups.add(shardGroup); + } + }); + }); + return shardGroups; + } + + /** + * Returns the active store + * + * @param graphName + * @return + * @throws PDException + */ + public List getActiveStores(String graphName) throws PDException { + // todo: + Metapb.Graph graph = partitionService.getGraph(graphName); + return graph == null ? List.of() : getActiveStoresByStoreGroup(graph.getStoreGroupId()); + } + + public List getActiveStores() throws PDException { + return storeInfoMeta.getActiveStores(); + } + + public List getActiveStoresByStoreGroup(int storeGroupId) throws PDException { + Set ids = storeInfoMeta.getStoreIdsByGroup(storeGroupId); + return storeInfoMeta.getActiveStores() + .stream() + .filter(store -> ids.contains(store.getId())) + .collect(Collectors.toList()); + } + + public List getActiveStoresByPartition(int partitionId) throws PDException { + var shardGroup = getShardGroup(partitionId); + if (shardGroup != null) { + return shardGroup.getShardsList().stream().map(Metapb.Shard::getStoreId).collect(Collectors.toList()); + } + return List.of(); + } + + public List getTombStores() throws PDException { + List stores = new ArrayList<>(); + for (Metapb.Store store : this.getStores()) { + if (store.getState() == Metapb.StoreState.Tombstone) { + stores.add(store); + } + } + return stores; + } + + public long removeStore(Long storeId) throws PDException { + return storeInfoMeta.removeStore(storeId); + } + + /** +<<<<<<<< HEAD:hugegraph-pd/hg-pd-core/src/main/java/org/apache/hugegraph/pd/StoreNodeService.java + * todo : New logic + * Assign a store to the partition and decide how many peers to allocate according to the + * configuration of the graph + * After allocating all the shards, save the ShardGroup object (store does not change, only + * executes once) +======== + * todo : 新逻辑 + * 给partition分配store,根据图的配置,决定分配几个peer + * 分配完所有的shards,保存ShardGroup对象(store不变动,只执行一次) +>>>>>>>> d7e3d51dd (3.6.5 -> 4.x diff):hg-pd-core/src/main/java/org/apache/hugegraph/pd/StoreNodeService.java + */ + public synchronized List allocShards(Metapb.Graph graph, int partId) throws + PDException { + // Multiple graphs share raft grouping, so assigning shard only depends on partitionId. + // The number of partitions can be set based on the size of the data, but the total + // number cannot exceed the number of raft groups + if (storeInfoMeta.getShardGroup(partId) == null) { +<<<<<<<< HEAD:hugegraph-pd/hg-pd-core/src/main/java/org/apache/hugegraph/pd/StoreNodeService.java + // Get active store key + List stores = storeInfoMeta.getActiveStores(); + + if (stores.size() == 0) { + throw new PDException(Pdpb.ErrorType.NO_ACTIVE_STORE_VALUE, + "There is no any online store"); + } + + if (stores.size() < pdConfig.getMinStoreCount()) { + throw new PDException(Pdpb.ErrorType.LESS_ACTIVE_STORE_VALUE, + "The number of active stores is less then " + + pdConfig.getMinStoreCount()); +======== + // 获取活跃的store key + // 根据 partionID计算store + List stores = storeInfoMeta.getActiveStores(graph.getStoreGroupId()); + + if (stores.isEmpty()) { + throw new PDException(ErrorType.NO_ACTIVE_STORE_VALUE, "There is no any online store"); + } + + var minStoreCount = Math.max(pdConfig.getMinStoreCount(), configService.getPDConfig().getShardCount()); + + if (stores.size() < minStoreCount) { + throw new PDException(ErrorType.LESS_ACTIVE_STORE_VALUE, + "The number of active stores is less then " + minStoreCount); +>>>>>>>> d7e3d51dd (3.6.5 -> 4.x diff):hg-pd-core/src/main/java/org/apache/hugegraph/pd/StoreNodeService.java + } + + // todo: 根据graph 配置的store group,获取 shard count + int shardCount = pdConfig.getPartition().getShardCount(); + shardCount = Math.min(shardCount, stores.size()); + // Two shards could not elect a leader + // It cannot be 0 + + if (shardCount == 2 || shardCount < 1) { + shardCount = 1; + } + +<<<<<<<< HEAD:hugegraph-pd/hg-pd-core/src/main/java/org/apache/hugegraph/pd/StoreNodeService.java + // All ShardGroups are created at one time to ensure that the initial groupIDs are + // orderly and easy for humans to read + for (int groupId = 0; groupId < pdConfig.getConfigService().getPartitionCount(); + groupId++) { + int storeIdx = groupId % stores.size(); // Assignment rules, simplified to modulo +======== + // todo: 获取partition count by group + var partitionCount = pdConfig.getConfigService().getPartitionCount(graph.getStoreGroupId()); + int baseId = partId / Consts.PARTITION_GAP * Consts.PARTITION_GAP; + // 一次创建完所有的ShardGroup,保证初始的groupID有序,方便人工阅读 + for (int groupId = 0; groupId >>>>>>> d7e3d51dd (3.6.5 -> 4.x diff):hg-pd-core/src/main/java/org/apache/hugegraph/pd/StoreNodeService.java + List shards = new ArrayList<>(); + for (int i = 0; i < shardCount; i++) { + Metapb.Shard shard = + Metapb.Shard.newBuilder().setStoreId(stores.get(storeIdx).getId()) + .setRole(i == 0 ? Metapb.ShardRole.Leader : + Metapb.ShardRole.Follower) // + .build(); + shards.add(shard); + storeIdx = (storeIdx + 1) >= stores.size() ? 0 : ++storeIdx; // Sequential + } + + Metapb.ShardGroup group = Metapb.ShardGroup.newBuilder() +<<<<<<<< HEAD:hugegraph-pd/hg-pd-core/src/main/java/org/apache/hugegraph/pd/StoreNodeService.java + .setId(groupId) + .setState( + Metapb.PartitionState.PState_Normal) + .addAllShards(shards).build(); +======== + .setId(groupId + baseId) + .setState(Metapb.PartitionState.PState_Normal) + .addAllShards(shards).build(); +>>>>>>>> d7e3d51dd (3.6.5 -> 4.x diff):hg-pd-core/src/main/java/org/apache/hugegraph/pd/StoreNodeService.java + + // new group + storeInfoMeta.updateShardGroup(group); + updateShardGroupCache(group); + onShardGroupStatusChanged(null, group); + log.info("alloc shard group: id {}", groupId + baseId); + } + } + + return storeInfoMeta.getShardGroup(partId).getShardsList(); + } + + /** +<<<<<<<< HEAD:hugegraph-pd/hg-pd-core/src/main/java/org/apache/hugegraph/pd/StoreNodeService.java + * Based on the shard_count of the graph, reallocate shards + * Send change shard + */ + public synchronized List reallocShards(Metapb.ShardGroup shardGroup) throws + PDException { + List stores = storeInfoMeta.getActiveStores(); + + if (stores.size() == 0) { + throw new PDException(Pdpb.ErrorType.NO_ACTIVE_STORE_VALUE, + "There is no any online store"); + } + + if (stores.size() < pdConfig.getMinStoreCount()) { + throw new PDException(Pdpb.ErrorType.LESS_ACTIVE_STORE_VALUE, + "The number of active stores is less then " + + pdConfig.getMinStoreCount()); +======== + * todo : 新逻辑 + * 根据graph的shard_count,重新分配shard + * 发送变更change shard指令 + */ + public synchronized List reallocShards(Metapb.ShardGroup shardGroup) throws PDException { + // todo:检查 shard group在哪个store group里面, 以及 shard group 对应的partition count + // todo: store group 内部分组 + int storeGroup = getShardGroupBelongsToStoreGroup(shardGroup); + List stores = storeInfoMeta.getActiveStores(storeGroup); + + if (stores.isEmpty()) { + throw new PDException(ErrorType.NO_ACTIVE_STORE_VALUE, "There is no any online store"); + } + + if (stores.size() < pdConfig.getMinStoreCount()) { + throw new PDException(ErrorType.LESS_ACTIVE_STORE_VALUE, + "The number of active stores is less then " + pdConfig.getMinStoreCount()); +>>>>>>>> d7e3d51dd (3.6.5 -> 4.x diff):hg-pd-core/src/main/java/org/apache/hugegraph/pd/StoreNodeService.java + } + + // todo: check partition count by store group + int shardCount = pdConfig.getPartition().getShardCount(); + shardCount = Math.min(shardCount, stores.size()); + if (shardCount == 2 || shardCount < 1) { + // Two shards could not elect a leader + // It cannot be 0 + shardCount = 1; + } + + List shards = new ArrayList<>(shardGroup.getShardsList()); + + if (shardCount > shards.size()) { + // Need to add shards + log.info("reallocShards ShardGroup {}, add shards from {} to {}", + shardGroup.getId(), shards.size(), shardCount); + int storeIdx = shardGroup.getId() % stores.size(); + for (int addCount = shardCount - shards.size(); addCount > 0; ) { + // Check if it already exists + if (!isStoreInShards(shards, stores.get(storeIdx).getId())) { + Metapb.Shard shard = Metapb.Shard.newBuilder() + .setStoreId(stores.get(storeIdx).getId()) + .build(); + shards.add(shard); + addCount--; + } + storeIdx = (storeIdx + 1) >= stores.size() ? 0 : ++storeIdx; + } + } else if (shardCount < shards.size()) { + // Need to reduce shard + log.info("reallocShards ShardGroup {}, remove shards from {} to {}", + shardGroup.getId(), shards.size(), shardCount); + + int subCount = shards.size() - shardCount; + Iterator iterator = shards.iterator(); + while (iterator.hasNext() && subCount > 0) { + if (iterator.next().getRole() != Metapb.ShardRole.Leader) { + iterator.remove(); + subCount--; + } + } + } else { + return shards; + } + + Metapb.ShardGroup group = Metapb.ShardGroup.newBuilder(shardGroup) + .clearShards() + .addAllShards(shards).build(); + storeInfoMeta.updateShardGroup(group); + updateShardGroupCache(group); + // change shard group + // onShardGroupStatusChanged(shardGroup, group); + + var partitions = partitionService.getPartitionById(shardGroup.getId()); + if (!partitions.isEmpty()) { + // send one message, change shard is regardless with partition/graph + partitionService.fireChangeShard(partitions.get(0), shards, + ConfChangeType.CONF_CHANGE_TYPE_ADJUST); + } + + log.info("reallocShards ShardGroup {}, shards: {}", group.getId(), group.getShardsList()); + return shards; + } + + /** + * According to the number of partitions, distribute group shard + * + * @param groups list of (partition id, count) + * @return total groups + */ +<<<<<<<< HEAD:hugegraph-pd/hg-pd-core/src/main/java/org/apache/hugegraph/pd/StoreNodeService.java + public synchronized int splitShardGroups(List> groups) throws + PDException { + int sum = groups.stream().map(pair -> pair.getValue()).reduce(0, Integer::sum); + // shard group is too big + if (sum > getActiveStores().size() * pdConfig.getPartition().getMaxShardsPerStore()) { + throw new PDException(Pdpb.ErrorType.Too_Many_Partitions_Per_Store_VALUE, + "can't satisfy target shard group count"); +======== + public synchronized int splitShardGroups(List> groups) throws PDException { + // 1. 检查所有的groups是否属于一个 store group + Set storeGroups = new HashSet<>(); + + for (var group : groups) { + var shardGroup = storeInfoMeta.getShardGroup(group.getKey()); + if (shardGroup == null){ + throw new PDException(ErrorType.SHARD_GROUPS_NOT_EXISTS); + } + storeGroups.add(getShardGroupBelongsToStoreGroup(shardGroup)); + } + assert storeGroups.size() == 1; + + int storeGroup = storeGroups.iterator().next(); + int sum = groups.stream().map(KVPair::getValue).reduce(0, Integer::sum); + + // 2. 检查split后的count, 增加的 + 原有的 + int newCount = (sum - groups.size()) + getShardGroups(storeGroup).size(); + + // shard group 太大 + if (newCount > getActiveStoresByStoreGroup(storeGroup).size() * pdConfig.getPartition().getMaxShardsPerStore()){ + throw new PDException(ErrorType.Too_Many_Partitions_Per_Store); +>>>>>>>> d7e3d51dd (3.6.5 -> 4.x diff):hg-pd-core/src/main/java/org/apache/hugegraph/pd/StoreNodeService.java + } + + partitionService.splitPartition(groups); + + return newCount; + } + + /** +<<<<<<<< HEAD:hugegraph-pd/hg-pd-core/src/main/java/org/apache/hugegraph/pd/StoreNodeService.java + * Alloc shard group, prepare for the split + * +======== + * 分配shard group,为分裂做准备 +>>>>>>>> d7e3d51dd (3.6.5 -> 4.x diff):hg-pd-core/src/main/java/org/apache/hugegraph/pd/StoreNodeService.java + * @param + * @return true + * @throws PDException + */ + private boolean isStoreInShards(List shards, long storeId) { + AtomicBoolean exist = new AtomicBoolean(false); +<<<<<<<< HEAD:hugegraph-pd/hg-pd-core/src/main/java/org/apache/hugegraph/pd/StoreNodeService.java + shards.forEach(s -> { + if (s.getStoreId() == storeId) { +======== + shards.forEach(s->{ + if (s.getStoreId() == storeId ) { +>>>>>>>> d7e3d51dd (3.6.5 -> 4.x diff):hg-pd-core/src/main/java/org/apache/hugegraph/pd/StoreNodeService.java + exist.set(true); + } + }); + return exist.get(); + } + + /** + * update shard group and cache. + * send shard group change message. + * + * @param groupId : shard group + * @param shards : shard lists + * @param version: term version, ignored if less than 0 + * @param confVersion : conf version, ignored if less than 0 + * @return + */ + public synchronized Metapb.ShardGroup updateShardGroup(int groupId, List shards, +<<<<<<<< HEAD:hugegraph-pd/hg-pd-core/src/main/java/org/apache/hugegraph/pd/StoreNodeService.java + long version, long confVersion) throws + PDException { +======== + long version, long confVersion) throws PDException { +>>>>>>>> d7e3d51dd (3.6.5 -> 4.x diff):hg-pd-core/src/main/java/org/apache/hugegraph/pd/StoreNodeService.java + Metapb.ShardGroup group = this.storeInfoMeta.getShardGroup(groupId); + + if (group == null) { + return null; + } + + var builder = Metapb.ShardGroup.newBuilder(group); +<<<<<<<< HEAD:hugegraph-pd/hg-pd-core/src/main/java/org/apache/hugegraph/pd/StoreNodeService.java + if (version >= 0) { + builder.setVersion(version); + } + + if (confVersion >= 0) { + builder.setConfVer(confVersion); + } + + var newGroup = builder.clearShards().addAllShards(shards).build(); + + storeInfoMeta.updateShardGroup(newGroup); + partitionService.updateShardGroupCache(newGroup); + onShardGroupStatusChanged(group, newGroup); + log.info("Raft {} updateShardGroup {}", groupId, newGroup); +======== + if (version >= 0){ + builder.setVersion(version); + } + + if (confVersion >= 0){ + builder.setConfVer(confVersion); + } + + var newGroup = builder.clearShards() .addAllShards(shards) .build(); + + storeInfoMeta.updateShardGroup(newGroup); + updateShardGroupCache(newGroup); + onShardGroupStatusChanged(group, newGroup); + // log.info("Raft {} updateShardGroup {}", groupId, newGroup); +>>>>>>>> d7e3d51dd (3.6.5 -> 4.x diff):hg-pd-core/src/main/java/org/apache/hugegraph/pd/StoreNodeService.java + return group; + } + + /** + * Notify the Store to rebuild the shard group + * + * @param groupId raft group id + * @param shards shard list: If it is empty, delete the corresponding one partition engine + */ + public void shardGroupOp(int groupId, List shards) throws PDException { + + var shardGroup = getShardGroup(groupId); + + if (shardGroup == null) { + return; + } + + var newGroup = shardGroup.toBuilder().clearShards().addAllShards(shards).build(); + if (shards.isEmpty()) { + var partitions = partitionService.getPartitionById(groupId); + for (var partition : partitions) { + partitionService.removePartition(partition.getGraphName(), groupId); + } + deleteShardGroup(groupId); + } + + onShardGroupOp(newGroup); + } + + /** + * Delete shard group + * + * @param groupId shard group id + */ + public synchronized void deleteShardGroup(int groupId) throws PDException { + Metapb.ShardGroup group = this.storeInfoMeta.getShardGroup(groupId); + int storeGroupId = getShardGroupBelongsToStoreGroup(group); + + if (group != null) { + storeInfoMeta.deleteShardGroup(groupId); + } + + onShardGroupStatusChanged(group, null); + +<<<<<<<< HEAD:hugegraph-pd/hg-pd-core/src/main/java/org/apache/hugegraph/pd/StoreNodeService.java + // Fix the number of partitions for the store. (Result from partition merge) + var shardGroups = getShardGroups(); + if (shardGroups != null) { + var count1 = pdConfig.getConfigService().getPDConfig().getPartitionCount(); + var maxGroupId = + getShardGroups().stream().map(Metapb.ShardGroup::getId).max(Integer::compareTo); + if (maxGroupId.get() < count1) { + pdConfig.getConfigService().setPartitionCount(maxGroupId.get() + 1); +======== + // 修正store的分区数. (分区合并导致) + var shardGroups = getShardGroups(storeGroupId); + if (shardGroups != null) { + var count1 = pdConfig.getConfigService().getPartitionCount(storeGroupId); + var maxGroupId = getShardGroups(storeGroupId) + .stream().map(Metapb.ShardGroup::getId) + .max(Integer::compareTo); + // 考虑分组的影响 + var groupId2 = maxGroupId.get() % Consts.PARTITION_GAP; + if (groupId2 < count1) { + configService.setPartitionCount(storeGroupId, groupId2 + 1); +>>>>>>>> d7e3d51dd (3.6.5 -> 4.x diff):hg-pd-core/src/main/java/org/apache/hugegraph/pd/StoreNodeService.java + } + } + + // 没有数据的partition,可能上报不上来 + var partitions = partitionService.getPartitions().stream() + .filter(partition -> partition.getId() == groupId) + .collect(Collectors.toList()); + + for (var partition : partitions) { + partitionService.removePartition(partition.getGraphName(), groupId); + } + } + +<<<<<<<< HEAD:hugegraph-pd/hg-pd-core/src/main/java/org/apache/hugegraph/pd/StoreNodeService.java + public synchronized void updateShardGroupState(int groupId, Metapb.PartitionState state) throws + PDException { + Metapb.ShardGroup shardGroup = storeInfoMeta.getShardGroup(groupId) + .toBuilder() + .setState(state).build(); + storeInfoMeta.updateShardGroup(shardGroup); + partitionService.updateShardGroupCache(shardGroup); +======== + // todo : update cluster state + public synchronized void updateShardGroupState(int groupId, Metapb.PartitionState state) throws PDException { + Metapb.ShardGroup shardGroup = storeInfoMeta.getShardGroup(groupId); + + if (state != shardGroup.getState()) { + var newShardGroup = shardGroup.toBuilder().setState(state).build(); + storeInfoMeta.updateShardGroup(newShardGroup); + + updateShardGroupCache(newShardGroup); + + log.debug("update shard group {} state: {}", groupId, state); + } + + // 检查集群的状态 + // todo : 更明确的集群状态定义 + Metapb.PartitionState clusterState = Metapb.PartitionState.PState_None; + for(Metapb.ShardGroup group : getShardGroups()){ + if (group.getState().getNumber() > state.getNumber()) { + clusterState = group.getState(); + } + } + + var storeGroupId = getShardGroupBelongsToStoreGroup(shardGroup); + updateClusterStatus(storeGroupId, clusterState); +>>>>>>>> d7e3d51dd (3.6.5 -> 4.x diff):hg-pd-core/src/main/java/org/apache/hugegraph/pd/StoreNodeService.java + } + + /** + * Receive the heartbeat of the Store + * + * @param storeStats + * @throws PDException + */ + public Metapb.ClusterStats heartBeat(Metapb.StoreStats storeStats) throws PDException { + this.storeInfoMeta.updateStoreStats(storeStats); + Metapb.Store lastStore = this.getStore(storeStats.getStoreId()); +<<<<<<<< HEAD:hugegraph-pd/hg-pd-core/src/main/java/org/apache/hugegraph/pd/StoreNodeService.java + if (lastStore == null) { + // store does not exist + throw new PDException(Pdpb.ErrorType.STORE_ID_NOT_EXIST_VALUE, + String.format("Store id %d does not exist.", + storeStats.getStoreId())); + } + if (lastStore.getState() == Metapb.StoreState.Tombstone) { + throw new PDException(Pdpb.ErrorType.STORE_HAS_BEEN_REMOVED_VALUE, + String.format( + "Store id %d is useless since it's state is Tombstone", + storeStats.getStoreId())); + } + Metapb.Store nowStore; + // If you are going to take the store offline + if (lastStore.getState() == Metapb.StoreState.Exiting) { + List activeStores = this.getActiveStores(); + Map storeMap = new HashMap<>(); + activeStores.forEach(store -> { + storeMap.put(store.getId(), store); + }); + // If the partition of the offline store is 0, it means that the migration has been + // completed and can be taken offline, if it is not 0, the migration is still in + // progress and you need to wait + if (storeStats.getPartitionCount() > 0 && + storeMap.containsKey(storeStats.getStoreId())) { +======== + if (lastStore == null){ + //store不存在 + throw new PDException(ErrorType.STORE_ID_NOT_EXIST_VALUE, + String.format("Store id %d does not exist.", storeStats.getStoreId())); + } + if (lastStore.getState() == Metapb.StoreState.Tombstone){ + throw new PDException(ErrorType.STORE_HAS_BEEN_REMOVED_VALUE, + String.format("Store id %d is useless since it's state is Tombstone", + storeStats.getStoreId())); + } + Metapb.Store nowStore; + // 如果正在做store下线操作 + if (lastStore.getState() == Metapb.StoreState.Exiting){ + var storeMap = this.getActiveStoresByStoreGroup(storeInfoMeta.getStoreGroupByStoreId(lastStore.getId())) + .stream().collect(Collectors.toMap(Metapb.Store::getId, store -> store)); + + // 下线的store的分区为0,说明已经迁移完毕,可以下线,如果非0,则迁移还在进行,需要等待 + if (storeStats.getPartitionCount() > 0 && storeMap.containsKey(storeStats.getStoreId())){ +>>>>>>>> d7e3d51dd (3.6.5 -> 4.x diff):hg-pd-core/src/main/java/org/apache/hugegraph/pd/StoreNodeService.java + nowStore = Metapb.Store.newBuilder(lastStore) + .setStats(storeStats) + .setLastHeartbeat(System.currentTimeMillis()) + .setState(Metapb.StoreState.Exiting).build(); + this.storeInfoMeta.updateStore(nowStore); +<<<<<<<< HEAD:hugegraph-pd/hg-pd-core/src/main/java/org/apache/hugegraph/pd/StoreNodeService.java + return this.clusterStats; + } else { +======== + }else { +>>>>>>>> d7e3d51dd (3.6.5 -> 4.x diff):hg-pd-core/src/main/java/org/apache/hugegraph/pd/StoreNodeService.java + nowStore = Metapb.Store.newBuilder(lastStore) + .setStats(storeStats) + .setLastHeartbeat(System.currentTimeMillis()) + .setState(Metapb.StoreState.Tombstone).build(); + this.storeInfoMeta.updateStore(nowStore); + storeInfoMeta.removeActiveStore(nowStore); + } + } else if (lastStore.getState() == Metapb.StoreState.Pending) { + nowStore = Metapb.Store.newBuilder(lastStore) + .setStats(storeStats) + .setLastHeartbeat(System.currentTimeMillis()) + .setState(Metapb.StoreState.Pending).build(); + this.storeInfoMeta.updateStore(nowStore); + } else { + if (lastStore.getState() == Metapb.StoreState.Offline) { + this.updateStore( + Metapb.Store.newBuilder(lastStore).setState(Metapb.StoreState.Up).build()); + } + nowStore = Metapb.Store.newBuilder(lastStore) + .setState(Metapb.StoreState.Up) + .setStats(storeStats) + .setLastHeartbeat(System.currentTimeMillis()).build(); + this.storeInfoMeta.updateStore(nowStore); + this.storeInfoMeta.keepStoreAlive(nowStore); + this.checkStoreStatus(storeInfoMeta.getStoreGroupByStoreId(lastStore.getId())); + } + + return this.clusterStats.get(getStoreGroupByStore(lastStore.getId())); + } + +<<<<<<<< HEAD:hugegraph-pd/hg-pd-core/src/main/java/org/apache/hugegraph/pd/StoreNodeService.java + public synchronized Metapb.ClusterStats updateClusterStatus(Metapb.ClusterState state) { + this.clusterStats = clusterStats.toBuilder().setState(state).build(); + return this.clusterStats; + } + + public Metapb.ClusterStats updateClusterStatus(Metapb.PartitionState state) { +======== + + public synchronized Metapb.ClusterStats updateClusterStatus(int storeGroupId, Metapb.ClusterState state) + throws PDException { + var stats = this.clusterStats.get(storeGroupId); + if (stats == null) { + var storeGroup = configService.getStoreGroup(storeGroupId); + if (storeGroup != null) { + this.clusterStats.put(storeGroupId, Metapb.ClusterStats.newBuilder().setState(state).build()); + } else { + throw new PDException(ErrorType.NOT_FOUND.getNumber(), "store group not exists"); + } + } else if (stats != null && stats.getState() != state) { + this.clusterStats.put(storeGroupId, stats.toBuilder().setState(state).build()); + } + return this.clusterStats.get(storeGroupId); + } + + public Metapb.ClusterStats updateClusterStatus(int storeGroupId, Metapb.PartitionState state) throws PDException { +>>>>>>>> d7e3d51dd (3.6.5 -> 4.x diff):hg-pd-core/src/main/java/org/apache/hugegraph/pd/StoreNodeService.java + Metapb.ClusterState cstate = Metapb.ClusterState.Cluster_OK; + switch (state) { + case PState_Normal: + cstate = Metapb.ClusterState.Cluster_OK; + break; + case PState_Warn: + cstate = Metapb.ClusterState.Cluster_Warn; + break; + case PState_Fault: + cstate = Metapb.ClusterState.Cluster_Fault; + break; + case PState_Offline: + cstate = Metapb.ClusterState.Cluster_Offline; + break; + default: + cstate = Metapb.ClusterState.Cluster_Not_Ready; + } + return updateClusterStatus(storeGroupId, cstate); + } + +<<<<<<<< HEAD:hugegraph-pd/hg-pd-core/src/main/java/org/apache/hugegraph/pd/StoreNodeService.java + public Metapb.ClusterStats getClusterStats() { + return this.clusterStats; +======== + public Metapb.ClusterStats getClusterStats(int storeGroup) { + return this.clusterStats.getOrDefault(storeGroup, statsNotReady); + } + + public Metapb.ClusterStats getClusterStats(long storeId) throws PDException { + return this.clusterStats.getOrDefault(storeInfoMeta.getStoreGroupByStoreId(storeId), statsNotReady); + } + + public Map getAllClusterStats() { + return this.clusterStats.entrySet().stream() + .collect(Collectors.toMap(Map.Entry::getKey, entry -> entry.getValue().getState())); +>>>>>>>> d7e3d51dd (3.6.5 -> 4.x diff):hg-pd-core/src/main/java/org/apache/hugegraph/pd/StoreNodeService.java + } + + /** + * Check the cluster health status + * Whether the number of active machines is greater than the minimum threshold + * The number of partition shards online has exceeded half + */ + public synchronized void checkStoreStatus(int storeGroup) { + Metapb.ClusterStats.Builder builder = Metapb.ClusterStats.newBuilder() + .setState( + Metapb.ClusterState.Cluster_OK); + try { + List activeStores = this.getActiveStoresByStoreGroup(storeGroup); + if (activeStores.size() < pdConfig.getMinStoreCount()) { + builder.setState(Metapb.ClusterState.Cluster_Not_Ready); + builder.setMessage("The number of active stores is " + activeStores.size() + + ", less than pd.initial-store-count:" + + pdConfig.getMinStoreCount()); + } + Map storeMap = new HashMap<>(); + activeStores.forEach(store -> { + storeMap.put(store.getId(), store); + }); + + if (builder.getState() == Metapb.ClusterState.Cluster_OK) { +<<<<<<<< HEAD:hugegraph-pd/hg-pd-core/src/main/java/org/apache/hugegraph/pd/StoreNodeService.java + // Check whether the number of online shards for each partition is greater than half + for (Metapb.ShardGroup group : this.getShardGroups()) { +======== + // 检查每个分区的在线shard数量是否大于半数 + for (Metapb.ShardGroup group : this.getShardGroups(storeGroup)) { +>>>>>>>> d7e3d51dd (3.6.5 -> 4.x diff):hg-pd-core/src/main/java/org/apache/hugegraph/pd/StoreNodeService.java + int count = 0; + for (Metapb.Shard shard : group.getShardsList()) { + count += storeMap.containsKey(shard.getStoreId()) ? 1 : 0; + } + if (count * 2 < group.getShardsList().size()) { + builder.setState(Metapb.ClusterState.Cluster_Not_Ready); + builder.setMessage( + "Less than half of active shard, partitionId is " + group.getId()); + break; + } + } + } + + } catch (PDException e) { + log.error("StoreNodeService updateClusterStatus exception", e); + } +<<<<<<<< HEAD:hugegraph-pd/hg-pd-core/src/main/java/org/apache/hugegraph/pd/StoreNodeService.java + this.clusterStats = builder.setTimestamp(System.currentTimeMillis()).build(); + if (this.clusterStats.getState() != Metapb.ClusterState.Cluster_OK) { +======== + + this.clusterStats.put(storeGroup, builder.setTimestamp(System.currentTimeMillis()).build()); + + if (this.clusterStats.get(storeGroup).getState() != Metapb.ClusterState.Cluster_OK) { +>>>>>>>> d7e3d51dd (3.6.5 -> 4.x diff):hg-pd-core/src/main/java/org/apache/hugegraph/pd/StoreNodeService.java + log.error("The cluster is not ready, {}", this.clusterStats); + } + } + + public void addStatusListener(StoreStatusListener listener) { + statusListeners.add(listener); + } + + protected void onStoreRaftAddressChanged(Metapb.Store store) { +<<<<<<<< HEAD:hugegraph-pd/hg-pd-core/src/main/java/org/apache/hugegraph/pd/StoreNodeService.java + log.info("onStoreRaftAddressChanged storeId = {}, new raft addr:", store.getId(), + store.getRaftAddress()); +======== + log.info("onStoreRaftAddressChanged storeId = {}, new raft address: {}", store.getId(), store.getRaftAddress()); +>>>>>>>> d7e3d51dd (3.6.5 -> 4.x diff):hg-pd-core/src/main/java/org/apache/hugegraph/pd/StoreNodeService.java + statusListeners.forEach(e -> { + e.onStoreRaftChanged(store); + }); + } + + public void addShardGroupStatusListener(ShardGroupStatusListener listener) { + shardGroupStatusListeners.add(listener); + } + + protected void onStoreStatusChanged(Metapb.Store store, Metapb.StoreState old, + Metapb.StoreState stats) { + log.info("onStoreStatusChanged storeId = {} from {} to {}", store.getId(), old, stats); + statusListeners.forEach(e -> { + e.onStoreStatusChanged(store, old, stats); + }); + } + +<<<<<<<< HEAD:hugegraph-pd/hg-pd-core/src/main/java/org/apache/hugegraph/pd/StoreNodeService.java + protected void onShardGroupStatusChanged(Metapb.ShardGroup group, Metapb.ShardGroup newGroup) { + log.info("onShardGroupStatusChanged, groupId: {}, from {} to {}", group.getId(), group, + newGroup); + shardGroupStatusListeners.forEach(e -> e.onShardListChanged(group, newGroup)); +======== + protected void onShardGroupStatusChanged(Metapb.ShardGroup group, Metapb.ShardGroup newGroup){ + if (group == null && newGroup == null) { + return; + } + + var id = group == null ? newGroup.getId() : group.getId(); + log.info("onShardGroupStatusChanged, groupId: {}, from {} to {}", id, group, newGroup); + shardGroupStatusListeners.forEach( e -> e.onShardListChanged(group, newGroup)); +>>>>>>>> d7e3d51dd (3.6.5 -> 4.x diff):hg-pd-core/src/main/java/org/apache/hugegraph/pd/StoreNodeService.java + } + + protected void onShardGroupOp(Metapb.ShardGroup shardGroup) { + log.info("onShardGroupOp, group id: {}, shard group:{}", shardGroup.getId(), shardGroup); + shardGroupStatusListeners.forEach(e -> e.onShardListOp(shardGroup)); + } + + /** + * Check whether the current store can be discontinued + * If the number of active machines is less than or equal to the minimum threshold, they + * cannot be taken offline + * If the number of shards in the partition is not more than half, it cannot be offline + */ + public boolean checkStoreCanOffline(Metapb.Store currentStore) { + try { + Map storeMap = getActiveStoresByStoreGroup(getStoreGroupByStore(currentStore.getId())) + .stream().collect(Collectors.toMap(Metapb.Store::getId, store -> store)); + + if (storeMap.size() < pdConfig.getMinStoreCount()) { + return false; + } + +<<<<<<<< HEAD:hugegraph-pd/hg-pd-core/src/main/java/org/apache/hugegraph/pd/StoreNodeService.java + // Check whether the number of online shards for each partition is greater than half + for (Metapb.ShardGroup group : this.getShardGroups()) { +======== + // 检查每个分区的在线shard数量是否大于半数 + for (Metapb.ShardGroup group : this.getShardGroups(getStoreGroupByStore(currentStore.getId()))) { +>>>>>>>> d7e3d51dd (3.6.5 -> 4.x diff):hg-pd-core/src/main/java/org/apache/hugegraph/pd/StoreNodeService.java + int count = 0; + for (Metapb.Shard shard : group.getShardsList()) { + long storeId = shard.getStoreId(); + count += storeMap.containsKey(storeId) ? 1 : 0; + } + if (count * 2 < group.getShardsList().size()) { + return false; + } + } + } catch (PDException e) { + log.error("StoreNodeService checkStoreCanOffline exception ", e); + return false; + } + + return true; + } + + /** + * Compaction on rocksdb on the store + * + * @param groupId + * @param tableName + * @return + */ + public synchronized void shardGroupsDbCompaction(int groupId, String tableName) throws + PDException { + + // Notify all stores to compaction rocksdb + partitionService.fireDbCompaction(groupId, tableName); + // TODO How to deal with exceptions? + } + + public Map getQuota() throws PDException { + List graphs = partitionService.getGraphs(); + String delimiter = String.valueOf(MetadataKeyHelper.DELIMITER); + HashMap storages = new HashMap<>(); + for (Metapb.Graph g : graphs) { + String graphName = g.getGraphName(); + String[] splits = graphName.split(delimiter); + if (splits.length < 2) { + continue; + } + String graphSpace = splits[0]; + storages.putIfAbsent(graphSpace, 0L); + List stores = getStores(graphName); + long dataSize = 0; + for (Metapb.Store store : stores) { + List gss = store.getStats() + .getGraphStatsList(); + for (Metapb.GraphStats gs : gss) { + boolean nameEqual = graphName.equals(gs.getGraphName()); + boolean roleEqual = Metapb.ShardRole.Leader.equals( + gs.getRole()); + if (nameEqual && roleEqual) { + dataSize += gs.getApproximateSize(); + } + } + } + Long size = storages.get(graphSpace); + size += dataSize; + storages.put(graphSpace, size); + + } + Metapb.GraphSpace.Builder spaceBuilder = Metapb.GraphSpace.newBuilder(); + HashMap limits = new HashMap<>(); + for (Map.Entry item : storages.entrySet()) { + String spaceName = item.getKey(); + String value = kvService.get(graphSpaceConfPrefix + spaceName); + if (!StringUtils.isEmpty(value)) { + HashMap config = new Gson().fromJson(value, HashMap.class); + Long size = item.getValue(); + int limit = ((Double) config.get("storage_limit")).intValue(); + long limitByLong = limit * 1024L * 1024L; + try { + spaceBuilder.setName(spaceName).setStorageLimit(limitByLong).setUsedSize(size); + Metapb.GraphSpace graphSpace = spaceBuilder.build(); + configService.setGraphSpace(graphSpace); + } catch (Exception e) { + log.error("update graph space with error:", e); + } + // KB and GB * 1024L * 1024L + if (size > limitByLong) { + limits.put(spaceName, true); + continue; + } + } + limits.put(spaceName, false); + + } + GraphState.Builder stateBuilder = GraphState.newBuilder() + .setMode(GraphMode.ReadOnly) + .setReason( + GraphModeReason.Quota); + for (Metapb.Graph g : graphs) { + String graphName = g.getGraphName(); + String[] splits = graphName.split(delimiter); + if (splits.length < 2) { + continue; + } + String graphSpace = splits[0]; + Metapb.GraphState gsOld = g.getGraphState(); + GraphMode gmOld = gsOld != null ? gsOld.getMode() : GraphMode.ReadWrite; + GraphMode gmNew = limits.get( + graphSpace) ? GraphMode.ReadOnly : GraphMode.ReadWrite; + if (gmOld == null || gmOld.getNumber() != gmNew.getNumber()) { + stateBuilder.setMode(gmNew); + if (gmNew.getNumber() == GraphMode.ReadOnly.getNumber()) { + stateBuilder.setReason(GraphModeReason.Quota); + } + GraphState gsNew = stateBuilder.build(); + Metapb.Graph newGraph = g.toBuilder().setGraphState(gsNew) + .build(); + partitionService.updateGraph(newGraph); + statusListeners.forEach(listener -> { + listener.onGraphChange(newGraph, gsOld, gsNew); + }); + } + } + + return limits; + } + +<<<<<<<< HEAD:hugegraph-pd/hg-pd-core/src/main/java/org/apache/hugegraph/pd/StoreNodeService.java + public Runnable getQuotaChecker() { + return quotaChecker; + } + + public TaskInfoMeta getTaskInfoMeta() { + return taskInfoMeta; + } + + public StoreInfoMeta getStoreInfoMeta() { + return storeInfoMeta; + } +======== + + @Getter + private Runnable quotaChecker = () -> { + try { + getQuota(); + } catch (Exception e) { + log.error( + "obtaining and sending graph space quota information with error: ", + e); + } + }; +>>>>>>>> d7e3d51dd (3.6.5 -> 4.x diff):hg-pd-core/src/main/java/org/apache/hugegraph/pd/StoreNodeService.java + + /** + * Get the leader of the partition + * + * @param partition + * @param initIdx + * @return + */ + public Metapb.Shard getLeader(Metapb.Partition partition, int initIdx) { + Metapb.Shard leader = null; + try { + var shardGroup = this.getShardGroup(partition.getId()); +<<<<<<<< HEAD:hugegraph-pd/hg-pd-core/src/main/java/org/apache/hugegraph/pd/StoreNodeService.java + for (Metapb.Shard shard : shardGroup.getShardsList()) { + if (shard.getRole() == Metapb.ShardRole.Leader) { + leader = shard; + } + } + } catch (Exception e) { + log.error("get leader error: group id:{}, error: {}", + partition.getId(), e.getMessage()); +======== + if (shardGroup != null) { + for (Metapb.Shard shard : shardGroup.getShardsList()) { + if (shard.getRole() == Metapb.ShardRole.Leader) { + leader = shard; + } + } + } + }catch (Exception e){ + log.error("get leader error: group id:{}, error:", partition.getId(), e); +>>>>>>>> d7e3d51dd (3.6.5 -> 4.x diff):hg-pd-core/src/main/java/org/apache/hugegraph/pd/StoreNodeService.java + } + return leader; + } + +<<<<<<<< HEAD:hugegraph-pd/hg-pd-core/src/main/java/org/apache/hugegraph/pd/StoreNodeService.java +======== + private void updateShardGroupCache(Metapb.ShardGroup group) { + if (group == null || group.getShardsList().isEmpty()) { + return; + } + partitionService.updateShardGroupCache(group); + } + +>>>>>>>> d7e3d51dd (3.6.5 -> 4.x diff):hg-pd-core/src/main/java/org/apache/hugegraph/pd/StoreNodeService.java + public CacheResponse getCache() throws PDException { + + List stores = getStores(); + List groups = getShardGroups(); + List graphs = partitionService.getGraphs(); + CacheResponse cache = CacheResponse.newBuilder().addAllGraphs(graphs) + .addAllShards(groups) + .addAllStores(stores) + .build(); + return cache; + } +<<<<<<<< HEAD:hugegraph-pd/hg-pd-core/src/main/java/org/apache/hugegraph/pd/StoreNodeService.java +======== + + public int getStoreGroupByStore(Metapb.Store store ) throws PDException { + return getStoreGroupByStore(store.getId()); + } + + /** + * get the store group id by the store id + * @param storeId store id + * @return group id + * @throws PDException + */ + public int getStoreGroupByStore(Long storeId) throws PDException { + return storeInfoMeta.getStoreGroupByStoreId(storeId); + } + + public boolean isStoreHasStoreGroup(long storeId) throws PDException { + return storeInfoMeta.isStoreHasGroup(storeId); + } + + public List getStoresByStoreGroup(int storeGroupId) throws PDException { + Set storeIds = storeInfoMeta.getStoreIdsByGroup(storeGroupId); + return getStores().stream().filter(store -> storeIds.contains(store.getId())).collect(Collectors.toList()); + } + + /** + * need check the store group id is exist && the store has no partition + * + * @param storeId store id + * @param storeGroupId group id + * @throws PDException + */ + public void updateStoreGroupRelation(long storeId, int storeGroupId) throws PDException { + var storeGroup = configService.getStoreGroup(storeGroupId); + if (storeGroup != null) { + storeInfoMeta.updateStoreGroup(storeId, storeGroupId); + } else { + throw new PDException(-1, "store group not found"); + } + } + + public int getShardGroupBelongsToStoreGroup(Metapb.ShardGroup group) throws PDException { + if (group == null || group.getShardsList().isEmpty()) { + return 0; + } + return getStoreGroupByStore(group.getShardsList().get(0).getStoreId()); + } +>>>>>>>> d7e3d51dd (3.6.5 -> 4.x diff):hg-pd-core/src/main/java/org/apache/hugegraph/pd/StoreNodeService.java +} diff --git a/hg-pd-core/src/main/java/org/apache/hugegraph/pd/TaskScheduleService.java b/hg-pd-core/src/main/java/org/apache/hugegraph/pd/TaskScheduleService.java new file mode 100644 index 0000000000..0df75a7047 --- /dev/null +++ b/hg-pd-core/src/main/java/org/apache/hugegraph/pd/TaskScheduleService.java @@ -0,0 +1,1193 @@ +<<<<<<<< HEAD:hugegraph-pd/hg-pd-core/src/main/java/org/apache/hugegraph/pd/TaskScheduleService.java +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +======== +>>>>>>>> d7e3d51dd (3.6.5 -> 4.x diff):hg-pd-core/src/main/java/org/apache/hugegraph/pd/TaskScheduleService.java +package org.apache.hugegraph.pd; + +import java.util.ArrayList; +import java.util.Comparator; +import java.util.HashMap; +import java.util.HashSet; +import java.util.Iterator; +import java.util.List; +import java.util.Map; +import java.util.Objects; +import java.util.PriorityQueue; +import java.util.concurrent.ScheduledExecutorService; +import java.util.concurrent.ScheduledThreadPoolExecutor; +import java.util.concurrent.TimeUnit; +import java.util.concurrent.atomic.AtomicReference; +import java.util.stream.Collectors; + +import org.apache.hugegraph.pd.common.KVPair; +import org.apache.hugegraph.pd.common.PDException; +import org.apache.hugegraph.pd.config.PDConfig; +<<<<<<<< HEAD:hugegraph-pd/hg-pd-core/src/main/java/org/apache/hugegraph/pd/TaskScheduleService.java +import org.apache.hugegraph.pd.grpc.MetaTask; +import org.apache.hugegraph.pd.grpc.Metapb; +import org.apache.hugegraph.pd.grpc.Pdpb; +======== +import org.apache.hugegraph.pd.grpc.ClusterOp; +import org.apache.hugegraph.pd.grpc.MetaTask; +import org.apache.hugegraph.pd.grpc.Metapb; +import org.apache.hugegraph.pd.grpc.common.ErrorType; +>>>>>>>> d7e3d51dd (3.6.5 -> 4.x diff):hg-pd-core/src/main/java/org/apache/hugegraph/pd/TaskScheduleService.java +import org.apache.hugegraph.pd.meta.TaskInfoMeta; +import org.apache.hugegraph.pd.raft.RaftEngine; + +import lombok.extern.slf4j.Slf4j; +import org.apache.hugegraph.pd.common.Consts; + +/** + * The task scheduling service checks the status of stores, resources, and partitions on a + * regular basis, migrates data in a timely manner, and errors are on nodes + * 1. Monitor whether the store is offline + * 2. Check whether the replica of the partition is correct + * 3. Check whether the working mode of the partition is correct + * 4. Monitor whether the partition needs to be split and whether the split is completed + */ +@Slf4j +public class TaskScheduleService { +<<<<<<<< HEAD:hugegraph-pd/hg-pd-core/src/main/java/org/apache/hugegraph/pd/TaskScheduleService.java + + private static final String BALANCE_SHARD_KEY = "BALANCE_SHARD_KEY"; + // The dynamic balancing can only be carried out after the machine is offline for 30 minutes + private final long TurnOffAndBalanceInterval = 30 * 60 * 1000; + // leader balances the time interval + private final long BalanceLeaderInterval = 30 * 1000; + private final PDConfig pdConfig; + private final long clusterStartTime; // + private final StoreNodeService storeService; + private final PartitionService partitionService; + private final ScheduledExecutorService executor; + private final TaskInfoMeta taskInfoMeta; + private final StoreMonitorDataService storeMonitorDataService; + private final KvService kvService; + private final LogService logService; + private final Comparator> kvPairComparatorAsc = (o1, o2) -> { + if (o1.getValue() == o2.getValue()) { + return o1.getKey().compareTo(o2.getKey()); + } + return o1.getValue().compareTo(o2.getValue()); + }; + private final Comparator> kvPairComparatorDesc = (o1, o2) -> { + if (o1.getValue() == o2.getValue()) { + return o2.getKey().compareTo(o1.getKey()); + } + return o2.getValue().compareTo(o1.getValue()); + }; + private long lastStoreTurnoffTime = 0; + private long lastBalanceLeaderTime = 0; +======== + private static final String KEY_ENABLE_AUTO_BALANCE = "key/ENABLE_AUTO_BALANCE"; + private final long TurnOffAndBalanceInterval = 30 * 60 * 1000; //机器下线30后才能进行动态平衡 + + private final long BalanceLeaderInterval = 30 * 1000; // leader平衡时间间隔 + private final PDConfig pdConfig; + private StoreNodeService storeService; + private PartitionService partitionService; + private ScheduledExecutorService executor; + private TaskInfoMeta taskInfoMeta; + private StoreMonitorDataService storeMonitorDataService; + private KvService kvService; + private LogService logService; + private ConfigService configService; + private long lastStoreTurnoffTime = 0; + private long lastBalanceLeaderTime = 0; + private final long clusterStartTime; + + /** + * 按照value的排序,相同的按照key排序 + * @param + * @param + */ + private static class KvPairComparator, V extends Comparable> + implements Comparator> { + private boolean ascend; + + public KvPairComparator(boolean ascend) { + this.ascend = ascend; + } + + @Override + public int compare(KVPair o1, KVPair o2) { + if (Objects.equals(o1.getValue(), o2.getValue())) { + return o1.getKey().compareTo(o2.getKey()) * (ascend ? 1 : -1); + } + return (o1.getValue().compareTo(o2.getValue())) * (ascend ? 1 : -1); + } + } +>>>>>>>> d7e3d51dd (3.6.5 -> 4.x diff):hg-pd-core/src/main/java/org/apache/hugegraph/pd/TaskScheduleService.java + + public TaskScheduleService(PDConfig config, StoreNodeService storeService, + PartitionService partitionService, ConfigService configService) { + this.pdConfig = config; + this.storeService = storeService; + this.partitionService = partitionService; + this.taskInfoMeta = new TaskInfoMeta(config); + this.logService = new LogService(pdConfig); + this.storeMonitorDataService = new StoreMonitorDataService(pdConfig); + this.clusterStartTime = System.currentTimeMillis(); + this.kvService = new KvService(pdConfig); + this.executor = new ScheduledThreadPoolExecutor(16); +<<<<<<<< HEAD:hugegraph-pd/hg-pd-core/src/main/java/org/apache/hugegraph/pd/TaskScheduleService.java +======== + this.configService = configService; +>>>>>>>> d7e3d51dd (3.6.5 -> 4.x diff):hg-pd-core/src/main/java/org/apache/hugegraph/pd/TaskScheduleService.java + } + + /** + * * 初始化方法,用于启动定时任务 + */ + public void init() { + executor.scheduleWithFixedDelay(() -> { + try { + // if (! kvService.get(KEY_ENABLE_AUTO_BALANCE).isEmpty()) { + patrolStores(); + // } + } catch (Throwable e) { + log.error("patrolStores exception: ", e); + } + + }, 60, 60, TimeUnit.SECONDS); +// executor.scheduleWithFixedDelay(() -> { +// try { +// if (! kvService.get(KEY_ENABLE_AUTO_BALANCE).isEmpty()) { +// patrolPartitions(); +// balancePartitionLeader(false); +// balancePartitionShard(); +// } +// } catch (Throwable e) { +// log.error("patrolPartitions exception: ", e); +// } +// }, pdConfig.getPatrolInterval(), pdConfig.getPatrolInterval(), TimeUnit.SECONDS); + executor.scheduleWithFixedDelay(() -> { + if (isLeader()) { + kvService.clearTTLData(); + } + }, 1000, 1000, TimeUnit.MILLISECONDS); + + executor.scheduleWithFixedDelay( + () -> { + try { + if (isLeader()) { + storeService.getQuota(); + } + } catch (Exception e) { + log.warn("get quota with error:", e); + } + }, 2, 30, TimeUnit.SECONDS); + + // clean expired monitor data each 10 minutes, delay 3min. + if (isLeader() && this.pdConfig.getStore().isMonitorDataEnabled()) { + executor.scheduleAtFixedRate(() -> { + Long expTill = System.currentTimeMillis() / 1000 - + this.pdConfig.getStore().getRetentionPeriod(); + log.debug("monitor data keys before " + expTill + " will be deleted"); + int records = 0; + try { + for (Metapb.Store store : storeService.getStores()) { + int cnt = + this.storeMonitorDataService.removeExpiredMonitorData(store.getId(), + expTill); + log.debug("store id :{}, records:{}", store.getId(), cnt); + records += cnt; + } + } catch (PDException e) { + throw new RuntimeException(e); + } + log.debug(String.format("%d records has been deleted", records)); + }, 180, 600, TimeUnit.SECONDS); + } + +<<<<<<<< HEAD:hugegraph-pd/hg-pd-core/src/main/java/org/apache/hugegraph/pd/TaskScheduleService.java + storeService.addStatusListener(new StoreStatusListener() { + @Override + public void onStoreStatusChanged(Metapb.Store store, Metapb.StoreState old, + Metapb.StoreState status) { + if (status == Metapb.StoreState.Tombstone) { + lastStoreTurnoffTime = System.currentTimeMillis(); + } + + if (status == Metapb.StoreState.Up) { + executor.schedule(() -> { + try { + balancePartitionLeader(false); + } catch (PDException e) { + log.error("exception {}", e); + } + }, BalanceLeaderInterval, TimeUnit.MILLISECONDS); + + } + } + + @Override + public void onGraphChange(Metapb.Graph graph, + Metapb.GraphState stateOld, + Metapb.GraphState stateNew) { + + } + + @Override + public void onStoreRaftChanged(Metapb.Store store) { + + } + }); +======== +// storeService.addStatusListener(new StoreStatusListener() { +// @Override +// public void onStoreStatusChanged(Metapb.Store store, Metapb.StoreState old, Metapb.StoreState status) { +// if ( status == Metapb.StoreState.Tombstone ) { +// lastStoreTurnoffTime = System.currentTimeMillis(); +// } + +// if ( status == Metapb.StoreState.Up) { +// executor.schedule(()->{ +// try { //store 上线后延时1分钟进行leader平衡 +// balancePartitionLeader(false); +// } catch (PDException e) { +// log.error("exception {}", e); +// } +// }, BalanceLeaderInterval, TimeUnit.MILLISECONDS); +// +// } +// } +// +// @Override +// public void onGraphChange(Metapb.Graph graph, +// Metapb.GraphState stateOld, +// Metapb.GraphState stateNew) { +// +// } +// +// @Override +// public void onStoreRaftChanged(Metapb.Store store) { +// +// } +// }); +>>>>>>>> d7e3d51dd (3.6.5 -> 4.x diff):hg-pd-core/src/main/java/org/apache/hugegraph/pd/TaskScheduleService.java + } + + public void shutDown() { + executor.shutdownNow(); + } +<<<<<<<< HEAD:hugegraph-pd/hg-pd-core/src/main/java/org/apache/hugegraph/pd/TaskScheduleService.java + + private boolean isLeader() { + return RaftEngine.getInstance().isLeader(); + } + +======== + private boolean isLeader(){ + return RaftEngine.getInstance().isLeader(); + } +>>>>>>>> d7e3d51dd (3.6.5 -> 4.x diff):hg-pd-core/src/main/java/org/apache/hugegraph/pd/TaskScheduleService.java + /** + * Inspect all stores to see if they are online and have enough storage space + */ + public List patrolStores() throws PDException { + if (!isLeader()) { + return null; + } + + List changedStores = new ArrayList<>(); +<<<<<<<< HEAD:hugegraph-pd/hg-pd-core/src/main/java/org/apache/hugegraph/pd/TaskScheduleService.java + // Check your store online status + List stores = storeService.getStores(""); + Map activeStores = storeService.getActiveStores("") + .stream().collect( + Collectors.toMap(Metapb.Store::getId, t -> t)); +======== + // 检查store在线状态 + List stores = storeService.getStores(); + Map activeStores = storeService.getActiveStores() + .stream().collect(Collectors.toMap(Metapb.Store::getId, t -> t)); +>>>>>>>> d7e3d51dd (3.6.5 -> 4.x diff):hg-pd-core/src/main/java/org/apache/hugegraph/pd/TaskScheduleService.java + for (Metapb.Store store : stores) { + Metapb.Store changeStore = null; + if ((store.getState() == Metapb.StoreState.Up + || store.getState() == Metapb.StoreState.Unknown) + && !activeStores.containsKey(store.getId())) { + // If you are not online, the modification status is offline + changeStore = Metapb.Store.newBuilder(store) + .setState(Metapb.StoreState.Offline) + .build(); + +<<<<<<<< HEAD:hugegraph-pd/hg-pd-core/src/main/java/org/apache/hugegraph/pd/TaskScheduleService.java + } else if ((store.getState() == Metapb.StoreState.Exiting && + !activeStores.containsKey(store.getId())) || + (store.getState() == Metapb.StoreState.Offline && + (System.currentTimeMillis() - store.getLastHeartbeat() > + pdConfig.getStore().getMaxDownTime() * 1000) && + (System.currentTimeMillis() - clusterStartTime > + pdConfig.getStore().getMaxDownTime() * 1000))) { + // Manually change the parameter to Offline or Offline Duration + // Modify the status to shut down and increase checkStoreCanOffline detect + if (storeService.checkStoreCanOffline(store)) { + changeStore = Metapb.Store.newBuilder(store) + .setState(Metapb.StoreState.Tombstone).build(); + this.logService.insertLog(LogService.NODE_CHANGE, + LogService.TASK, changeStore); + log.info("patrolStores store {} Offline", changeStore.getId()); + } +======== +>>>>>>>> d7e3d51dd (3.6.5 -> 4.x diff):hg-pd-core/src/main/java/org/apache/hugegraph/pd/TaskScheduleService.java + } + // tomb store 会导致从新分区,暂时不处理 +// else if ((store.getState() == Metapb.StoreState.Exiting && !activeStores.containsKey(store.getId())) || +// (store.getState() == Metapb.StoreState.Offline && +// (System.currentTimeMillis() - store.getLastHeartbeat() > +// pdConfig.getStore().getMaxDownTime() * 1000) && +// (System.currentTimeMillis() - clusterStartTime > +// pdConfig.getStore().getMaxDownTime() * 1000))) { +// //手工修改为下线或者离线达到时长 +// // 修改状态为关机, 增加 checkStoreCanOffline 检测 +// if (storeService.checkStoreCanOffline(store)) { +// changeStore = Metapb.Store.newBuilder(store) +// .setState(Metapb.StoreState.Tombstone).build(); +// this.logService.insertLog(LogService.NODE_CHANGE, +// LogService.TASK, changeStore); +// log.info("patrolStores store {} Offline", changeStore.getId()); +// } +// } + if (changeStore != null) { + storeService.updateStore(changeStore); + changedStores.add(changeStore); + } + } + return changedStores; + } + + /** +<<<<<<<< HEAD:hugegraph-pd/hg-pd-core/src/main/java/org/apache/hugegraph/pd/TaskScheduleService.java + * Inspect all partitions to check whether the number of replicas is correct and the number + * of replicas in the shard group +======== + * 巡查所有的分区,检查副本数是否正确, shard group的副本数 +>>>>>>>> d7e3d51dd (3.6.5 -> 4.x diff):hg-pd-core/src/main/java/org/apache/hugegraph/pd/TaskScheduleService.java + */ + public List patrolPartitions() throws PDException { + if (!isLeader()) { + return null; + } + + // If the number of replicas is inconsistent, reallocate replicas + for (Metapb.ShardGroup group : storeService.getShardGroups()) { + if (group.getShardsCount() != pdConfig.getPartition().getShardCount()) { + storeService.reallocShards(group); +<<<<<<<< HEAD:hugegraph-pd/hg-pd-core/src/main/java/org/apache/hugegraph/pd/TaskScheduleService.java + kvService.put(BALANCE_SHARD_KEY, "DOING", 180 * 1000); +======== +>>>>>>>> d7e3d51dd (3.6.5 -> 4.x diff):hg-pd-core/src/main/java/org/apache/hugegraph/pd/TaskScheduleService.java + } + } + // Check if the shard is online. + Map tombStores = storeService.getTombStores().stream().collect( + Collectors.toMap(Metapb.Store::getId, t -> t)); + + var partIds = new HashSet(); + + for (var pair : tombStores.entrySet()) { + for (var partition : partitionService.getPartitionByStore(pair.getValue())) { + if (partIds.contains(partition.getId())) { + continue; + } + partIds.add(partition.getId()); + + storeService.storeTurnoff(pair.getValue()); + partitionService.shardOffline(partition, pair.getValue().getId()); + } + } + + return null; + } + + /** +<<<<<<<< HEAD:hugegraph-pd/hg-pd-core/src/main/java/org/apache/hugegraph/pd/TaskScheduleService.java + * Balance the number of partitions between stores + * It takes half an hour for the machine to turn to UP before it can be dynamically balanced + */ + public synchronized Map> balancePartitionShard() throws + PDException { + log.info("balancePartitions starting, isleader:{}", isLeader()); +======== + * 在Store之间平衡分区的数量 + * 机器转为UP半小时后才能进行动态平衡 + * + */ + @Deprecated + public synchronized Map> balancePartitionShard() throws PDException { + return balancePartitionShard(Consts.DEFAULT_STORE_GROUP_ID); + } +>>>>>>>> d7e3d51dd (3.6.5 -> 4.x diff):hg-pd-core/src/main/java/org/apache/hugegraph/pd/TaskScheduleService.java + + public synchronized Map> balancePartitionShard(int storeGroupId) throws PDException { + log.info("balancePartitionShard starting, is leader:{}", isLeader()); + if (!isLeader() || System.currentTimeMillis() - lastStoreTurnoffTime < TurnOffAndBalanceInterval) { + return null; + } + +<<<<<<<< HEAD:hugegraph-pd/hg-pd-core/src/main/java/org/apache/hugegraph/pd/TaskScheduleService.java + if (System.currentTimeMillis() - lastStoreTurnoffTime < TurnOffAndBalanceInterval) { + return null; + } + + int activeStores = storeService.getActiveStores().size(); + if (activeStores == 0) { +======== + var activeStores = storeService.getActiveStoresByStoreGroup(storeGroupId); + if (activeStores.isEmpty()) { +>>>>>>>> d7e3d51dd (3.6.5 -> 4.x diff):hg-pd-core/src/main/java/org/apache/hugegraph/pd/TaskScheduleService.java + log.warn("balancePartitionShard non active stores, skip to balancePartitionShard"); + return null; + } + +<<<<<<<< HEAD:hugegraph-pd/hg-pd-core/src/main/java/org/apache/hugegraph/pd/TaskScheduleService.java + if (Objects.equals(kvService.get(BALANCE_SHARD_KEY), "DOING")) { + return null; + } + + int totalShards = pdConfig.getConfigService().getPartitionCount() * + pdConfig.getPartition().getShardCount(); + int averageCount = totalShards / activeStores; + int remainder = totalShards % activeStores; + + // Count the partitions on each store, StoreId -> PartitionID, ShardRole + Map> partitionMap = new HashMap<>(); + storeService.getActiveStores().forEach(store -> { + partitionMap.put(store.getId(), new HashMap<>()); + }); + +======== + int totalShards = configService.getPartitionCount(storeGroupId) * pdConfig.getPartition().getShardCount(); + int averageCount = totalShards / activeStores.size(); + int remainder = totalShards % activeStores.size(); + + // 统计每个store上分区, StoreId -> PartitionID, ShardRole + Map> partitionMap = activeStores.stream() + .collect(Collectors.toMap(Metapb.Store::getId, s-> new HashMap<>())); + +>>>>>>>> d7e3d51dd (3.6.5 -> 4.x diff):hg-pd-core/src/main/java/org/apache/hugegraph/pd/TaskScheduleService.java + AtomicReference isLeaner = new AtomicReference<>(false); + for (var shardGroup : storeService.getShardGroups(storeGroupId)) { + for (var shard : shardGroup.getShardsList()) { + if (shard.getRole() == Metapb.ShardRole.Learner) { + isLeaner.set(true); + break; + } + +<<<<<<<< HEAD:hugegraph-pd/hg-pd-core/src/main/java/org/apache/hugegraph/pd/TaskScheduleService.java + try { + storeService.getShardList(partition.getId()).forEach(shard -> { + Long storeId = shard.getStoreId(); + if (shard.getRole() == Metapb.ShardRole.Learner + || partition.getState() != Metapb.PartitionState.PState_Normal) { + isLeaner.set(true); + } + if (partitionMap.containsKey(storeId)) { + partitionMap.get(storeId).put(partition.getId(), shard.getRole()); + } + }); + } catch (PDException e) { + log.error("get partition {} shard list error:{}.", partition.getId(), + e.getMessage()); +======== + long storeId = shard.getStoreId(); + partitionMap.get(storeId).put(shardGroup.getId(), shard.getRole()); +>>>>>>>> d7e3d51dd (3.6.5 -> 4.x diff):hg-pd-core/src/main/java/org/apache/hugegraph/pd/TaskScheduleService.java + } + } + + if (isLeaner.get()) { + log.warn("balancePartitionShard is doing, skip this balancePartitionShard task"); + return null; + } + +<<<<<<<< HEAD:hugegraph-pd/hg-pd-core/src/main/java/org/apache/hugegraph/pd/TaskScheduleService.java + // According to shard sort the quantity from highest to lowest + List> sortedList = new ArrayList<>(); + partitionMap.forEach((storeId, shards) -> { + sortedList.add(new KVPair(storeId, shards.size())); + }); + sortedList.sort(((o1, o2) -> o2.getValue().compareTo(o1.getValue()))); + // The largest heap, moved in store -> shard count +======== + // 按照shard数量由高到低排序store + List> sortedList = partitionMap.entrySet().stream() + .map(entry -> new KVPair<>(entry.getKey(), entry.getValue().size())) + .sorted((o1, o2) -> o2.getValue().compareTo(o1.getValue())) + .collect(Collectors.toList()); + + // 最大堆, 被移入的store -> shard count +>>>>>>>> d7e3d51dd (3.6.5 -> 4.x diff):hg-pd-core/src/main/java/org/apache/hugegraph/pd/TaskScheduleService.java + PriorityQueue> maxHeap = new PriorityQueue<>(sortedList.size(), + (o1, o2) -> o2.getValue() + .compareTo( + o1.getValue())); + + // of individual copies committedIndex + Map> committedIndexMap = partitionService.getCommittedIndexStats(); +<<<<<<<< HEAD:hugegraph-pd/hg-pd-core/src/main/java/org/apache/hugegraph/pd/TaskScheduleService.java + // Partition ID -->source StoreID, target StoreID + Map> movedPartitions = new HashMap<>(); + // Remove redundant shards, traverse the stores in the order of shards from most to + // least, and the remainder is allocated to the store with more shards first, reducing + // the probability of migration +======== + + // 分区ID --> 源StoreID,目标StoreID + Map> movedPartitions = new HashMap<>(); + + // 移除多余的shard, 按照shards由多到少的顺序遍历store,余数remainder优先给shards多的store分配,减少迁移的概率 +>>>>>>>> d7e3d51dd (3.6.5 -> 4.x diff):hg-pd-core/src/main/java/org/apache/hugegraph/pd/TaskScheduleService.java + for (int index = 0; index < sortedList.size(); index++) { + long storeId = sortedList.get(index).getKey(); + if (!partitionMap.containsKey(storeId)) { + log.error("cannot found storeId {} in partitionMap", storeId); + return null; + } + Map shards = partitionMap.get(storeId); + int targetCount = index < remainder ? averageCount + 1 : averageCount; + // Remove the redundant shards and add the source StoreID. is not a leader, and the + // partition is unique + if (shards.size() > targetCount) { + int movedCount = shards.size() - targetCount; + log.info( + "balancePartitionShard storeId {}, shardsSize {}, targetCount {}, " + + "moveCount {}", + storeId, shards.size(), targetCount, movedCount); + for (Iterator iterator = shards.keySet().iterator(); movedCount > 0 && iterator.hasNext(); ) { + Integer id = iterator.next(); +<<<<<<<< HEAD:hugegraph-pd/hg-pd-core/src/main/java/org/apache/hugegraph/pd/TaskScheduleService.java + + if (!movedPartitions.containsKey(id)) { +======== + if ( !movedPartitions.containsKey(id)) { +>>>>>>>> d7e3d51dd (3.6.5 -> 4.x diff):hg-pd-core/src/main/java/org/apache/hugegraph/pd/TaskScheduleService.java + log.info("store {}, shard of partition {} can be moved", storeId, id); + movedPartitions.put(id, new KVPair<>(storeId, 0L)); + movedCount--; + } + } + } else if (shards.size() < targetCount) { + int addCount = targetCount - shards.size(); + log.info( + "balancePartitionShard storeId {}, shardsSize {}, targetCount {}, " + + "addCount {}", + storeId, shards.size(), targetCount, addCount); + maxHeap.add(new KVPair<>(storeId, addCount)); + } + } + +<<<<<<<< HEAD:hugegraph-pd/hg-pd-core/src/main/java/org/apache/hugegraph/pd/TaskScheduleService.java + if (movedPartitions.size() == 0) { + log.warn( + "movedPartitions is empty, totalShards:{} averageCount:{} remainder:{} " + + "sortedList:{}", + totalShards, averageCount, remainder, sortedList); + } + Iterator>> moveIterator = + movedPartitions.entrySet().iterator(); + + while (moveIterator.hasNext()) { + if (maxHeap.size() == 0) { +======== + if (movedPartitions.isEmpty()){ + log.warn("movedPartitions is empty, totalShards:{} averageCount:{} remainder:{} sortedList:{}", + totalShards, averageCount, remainder, sortedList); + } + + Iterator>> moveIterator = movedPartitions.entrySet().iterator(); + + while (moveIterator.hasNext()) { + if(maxHeap.isEmpty()) { +>>>>>>>> d7e3d51dd (3.6.5 -> 4.x diff):hg-pd-core/src/main/java/org/apache/hugegraph/pd/TaskScheduleService.java + break; + } + Map.Entry> moveEntry = moveIterator.next(); + int partitionId = moveEntry.getKey(); + long sourceStoreId = moveEntry.getValue().getKey(); + + List> tmpList = new ArrayList<>(maxHeap.size()); + while (!maxHeap.isEmpty()) { + KVPair pair = maxHeap.poll(); + long destStoreId = pair.getKey(); + boolean destContains = false; + if (partitionMap.containsKey(destStoreId)) { + destContains = partitionMap.get(destStoreId).containsKey(partitionId); + } +<<<<<<<< HEAD:hugegraph-pd/hg-pd-core/src/main/java/org/apache/hugegraph/pd/TaskScheduleService.java + // If the destination store already contains the partition, take the store + if (!destContains) { +======== + // 如果目的store已经包含了该partition,则取一下store + if(!destContains) { +>>>>>>>> d7e3d51dd (3.6.5 -> 4.x diff):hg-pd-core/src/main/java/org/apache/hugegraph/pd/TaskScheduleService.java + moveEntry.getValue().setValue(pair.getKey()); + log.info( + "balancePartitionShard will move partition {} from store {} to store " + + "{}", + moveEntry.getKey(), + moveEntry.getValue().getKey(), + moveEntry.getValue().getValue()); + if (pair.getValue() > 1) { + pair.setValue(pair.getValue() - 1); + tmpList.add(pair); + } + break; + } + tmpList.add(pair); + } + maxHeap.addAll(tmpList); + } + +<<<<<<<< HEAD:hugegraph-pd/hg-pd-core/src/main/java/org/apache/hugegraph/pd/TaskScheduleService.java + kvService.put(BALANCE_SHARD_KEY, "DOING", 180 * 1000); + + // Start the migration +======== + // 开始迁移 +>>>>>>>> d7e3d51dd (3.6.5 -> 4.x diff):hg-pd-core/src/main/java/org/apache/hugegraph/pd/TaskScheduleService.java + movedPartitions.forEach((partId, storePair) -> { + // Neither the source nor destination storeID is 0 + if (storePair.getKey() > 0 && storePair.getValue() > 0) { + partitionService.movePartitionsShard(partId, storePair.getKey(), + storePair.getValue()); + } else { + log.warn("balancePartitionShard key or value is zero, partId:{} storePair:{}", + partId, storePair); + } + }); + return movedPartitions; + } + + /** + * Balance the number of leaders of partitions between stores + */ + public synchronized Map balancePartitionLeader(boolean immediately) throws + PDException { + Map results = new HashMap<>(); + + if (!isLeader()) { + return results; + } + + if (!immediately && + System.currentTimeMillis() - lastBalanceLeaderTime < BalanceLeaderInterval) { + return results; + } + + lastBalanceLeaderTime = System.currentTimeMillis(); + + // When a task is split or scaled-in, it is exited + var taskMeta = storeService.getTaskInfoMeta(); + if (taskMeta.hasSplitTaskDoing() || taskMeta.hasMoveTaskDoing()) { + throw new PDException(1001, "split or combine task is processing, please try later!"); + } + +<<<<<<<< HEAD:hugegraph-pd/hg-pd-core/src/main/java/org/apache/hugegraph/pd/TaskScheduleService.java + if (Objects.equals(kvService.get(BALANCE_SHARD_KEY), "DOING")) { + throw new PDException(1001, "balance shard is processing, please try later!"); +======== + for (var storeGroup : configService.getAllStoreGroup()) { + results.putAll(balanceShardLeaderByStoreGroup(storeGroup.getGroupId())); +>>>>>>>> d7e3d51dd (3.6.5 -> 4.x diff):hg-pd-core/src/main/java/org/apache/hugegraph/pd/TaskScheduleService.java + } + + return results; + } + + private Map balanceShardLeaderByStoreGroup(int storeGroupId) throws PDException { + Map results = new HashMap<>(); + + List shardGroups = storeService.getShardGroups(storeGroupId); + + // store id -> shard group count + Map storeShardCount = shardGroups.stream() + .flatMap(shardGroup -> shardGroup.getShardsList().stream()) + .map(Metapb.Shard::getStoreId) + .collect(Collectors.groupingBy(o -> o, Collectors.counting())); + + log.info("balancePartitionLeader, storeGroup: {}, shard group size: {}, by store: {}", storeGroupId, + shardGroups.size(), storeShardCount); + // total + int shardCountPerPartition = pdConfig.getPartition().getShardCount(); + + // part 1 : shard count % shard count per partition + var targetCountMap = storeShardCount.entrySet().stream() + .map(e -> new KVPair<>(e.getKey(), e.getValue() / shardCountPerPartition)) + .collect(Collectors.toMap(KVPair::getKey, KVPair::getValue)); + + var allocCount = targetCountMap.values().stream().mapToInt(Long::intValue).sum(); + int shardGroupCount = shardGroups.size(); + + if (allocCount != shardGroupCount) { + // part 2 : reminder count + var reminderList = storeShardCount.entrySet().stream() + .map(e -> new KVPair<>(e.getKey(), e.getValue() % shardCountPerPartition)) + .filter(e -> e.getValue() > 0) + .sorted(new KvPairComparator<>(false)) + .collect(Collectors.toList()); + for (int i = 0; i < shardGroupCount - allocCount; i++) { + var pair = reminderList.get(i); + targetCountMap.put(pair.getKey(), targetCountMap.getOrDefault(pair.getKey(), 0L) + 1); + } + } + + PriorityQueue> targetCount = new PriorityQueue<>(new KvPairComparator<>(true)); + +<<<<<<<< HEAD:hugegraph-pd/hg-pd-core/src/main/java/org/apache/hugegraph/pd/TaskScheduleService.java + shardGroups.forEach(group -> { + group.getShardsList().forEach(shard -> { + storeShardCount.put(shard.getStoreId(), + storeShardCount.getOrDefault(shard.getStoreId(), 0) + 1); + }); + }); + + log.info("balancePartitionLeader, shard group size: {}, by store: {}", shardGroups.size(), + storeShardCount); + + PriorityQueue> targetCount = + new PriorityQueue<>(kvPairComparatorDesc); + + var sortedGroups = storeShardCount.entrySet().stream() + .map(entry -> new KVPair<>(entry.getKey(), + entry.getValue())) + .sorted(kvPairComparatorAsc) + .collect(Collectors.toList()); + int sum = 0; + + for (int i = 0; i < sortedGroups.size() - 1; i++) { + // at least one + int v = Math.max( + sortedGroups.get(i).getValue() / pdConfig.getPartition().getShardCount(), 1); + targetCount.add(new KVPair<>(sortedGroups.get(i).getKey(), v)); + sum += v; + } + targetCount.add(new KVPair<>(sortedGroups.get(sortedGroups.size() - 1).getKey(), + shardGroups.size() - sum)); +======== + targetCount.addAll(targetCountMap.entrySet().stream() + .map(e -> new KVPair<>(e.getKey(), e.getValue())) + .collect(Collectors.toList())); +>>>>>>>> d7e3d51dd (3.6.5 -> 4.x diff):hg-pd-core/src/main/java/org/apache/hugegraph/pd/TaskScheduleService.java + log.info("target count: {}", targetCount); + + for (var group : shardGroups) { + var map = group.getShardsList().stream() +<<<<<<<< HEAD:hugegraph-pd/hg-pd-core/src/main/java/org/apache/hugegraph/pd/TaskScheduleService.java + .collect(Collectors.toMap(Metapb.Shard::getStoreId, shard -> shard)); + var tmpList = new ArrayList>(); + // If there are many stores, they may not contain the corresponding store ID. Save + // the non-compliant stores to the temporary list until you find a suitable store + while (!targetCount.isEmpty()) { +======== + .collect(Collectors.toMap(Metapb.Shard::getStoreId, shard -> shard)); + var tmpList = new ArrayList>(); + // store比较多的情况,可能不包含对应的store id. 则先将不符合的store保存到临时列表,直到找到一个合适的store + while (!targetCount.isEmpty()){ +>>>>>>>> d7e3d51dd (3.6.5 -> 4.x diff):hg-pd-core/src/main/java/org/apache/hugegraph/pd/TaskScheduleService.java + var pair = targetCount.poll(); + var storeId = pair.getKey(); + if (map.containsKey(storeId)) { + if (map.get(storeId).getRole() != Metapb.ShardRole.Leader) { + log.info("shard group{}, store id:{}, set to leader", group.getId(), + storeId); + partitionService.transferLeader(group.getId(), map.get(storeId)); + results.put(group.getId(), storeId); + } else { + log.info("shard group {}, store id :{}, is leader, no need change", + group.getId(), storeId); + } + + if (pair.getValue() > 1) { + // count -1 + pair.setValue(pair.getValue() - 1); + tmpList.add(pair); + } + // If it is found, the processing is complete + break; + } else { + tmpList.add(pair); + } + } + // 设置完成后,如果没达到target count,还要放回去 + targetCount.addAll(tmpList); + } + + return results; + } + +<<<<<<<< HEAD:hugegraph-pd/hg-pd-core/src/main/java/org/apache/hugegraph/pd/TaskScheduleService.java + private long getMaxIndexGap(Map> committedIndexMap, int partitionId) { + long maxGap = Long.MAX_VALUE; + if (committedIndexMap == null || !committedIndexMap.containsKey(partitionId)) { + return maxGap; + } + Map shardMap = committedIndexMap.get(partitionId); + if (shardMap == null || shardMap.size() == 0) { + return maxGap; + } + List sortedList = new ArrayList<>(); + shardMap.forEach((storeId, committedIndex) -> { + sortedList.add(committedIndex); + }); + sortedList.sort(Comparator.reverseOrder()); + maxGap = sortedList.get(0) - sortedList.get(sortedList.size() - 1); + return maxGap; + } +======== +// private long getMaxIndexGap(Map> committedIndexMap, int partitionId) { +// long maxGap = Long.MAX_VALUE; +// if (committedIndexMap == null || !committedIndexMap.containsKey(partitionId)) { +// return maxGap; +// } +// Map shardMap = committedIndexMap.get(partitionId); +// if(shardMap == null || shardMap.size() == 0) { +// return maxGap; +// } +// List sortedList = new ArrayList<>(); +// shardMap.forEach((storeId, committedIndex) -> { +// sortedList.add(committedIndex); +// }); +// // 由大到小排序的list +// sortedList.sort(Comparator.reverseOrder()); +// maxGap = sortedList.get(0) - sortedList.get(sortedList.size() - 1); +// return maxGap; +// } +>>>>>>>> d7e3d51dd (3.6.5 -> 4.x diff):hg-pd-core/src/main/java/org/apache/hugegraph/pd/TaskScheduleService.java + + /** + * Perform partition splitting, which is divided into automatic splitting and manual splitting + * + * @return + * @throws PDException + */ + public List splitPartition( ClusterOp.OperationMode mode, int storeGroupId, + List params) throws PDException { + + if (mode == ClusterOp.OperationMode.Auto) { + return autoSplitPartition(storeGroupId); + } + + var list = params.stream() + .map(param -> new KVPair<>(param.getPartitionId(), param.getCount())) + .collect(Collectors.toList()); + + storeService.splitShardGroups(list); + return null; + } + + /** + * Partition splitting is performed automatically, and each store reaches the maximum number + * of partitions + * execution conditions + * The number of partitions per machine after the split is less than partition + * .max-partitions-per-store + * + * @throws PDException + */ +<<<<<<<< HEAD:hugegraph-pd/hg-pd-core/src/main/java/org/apache/hugegraph/pd/TaskScheduleService.java + public List autoSplitPartition() throws PDException { +======== + public List autoSplitPartition(int storeGroupId) throws PDException { +>>>>>>>> d7e3d51dd (3.6.5 -> 4.x diff):hg-pd-core/src/main/java/org/apache/hugegraph/pd/TaskScheduleService.java + if (!isLeader()) { + return null; + } + +<<<<<<<< HEAD:hugegraph-pd/hg-pd-core/src/main/java/org/apache/hugegraph/pd/TaskScheduleService.java + if (Metapb.ClusterState.Cluster_OK != storeService.getClusterStats().getState()) { + if (Metapb.ClusterState.Cluster_Offline == storeService.getClusterStats().getState()) { + throw new PDException(Pdpb.ErrorType.Split_Partition_Doing_VALUE, + "The data is splitting"); + } else { + throw new PDException(Pdpb.ErrorType.Cluster_State_Forbid_Splitting_VALUE, + "The current state of the cluster prohibits splitting data"); + } + } + + // The maximum split count that a compute cluster can support + int splitCount = pdConfig.getPartition().getMaxShardsPerStore() * + storeService.getActiveStores().size() / + (storeService.getShardGroups().size() * + pdConfig.getPartition().getShardCount()); + + if (splitCount < 2) { + throw new PDException(Pdpb.ErrorType.Too_Many_Partitions_Per_Store_VALUE, + "Too many partitions per store, partition.store-max-shard-count" + + " = " + + pdConfig.getPartition().getMaxShardsPerStore()); +======== + if (Metapb.ClusterState.Cluster_OK != storeService.getClusterStats(storeGroupId).getState()) { + if (Metapb.ClusterState.Cluster_Offline == storeService.getClusterStats(storeGroupId).getState()) { + throw new PDException(ErrorType.Split_Partition_Doing_VALUE, "The data is splitting"); + } + + else { + throw new PDException(ErrorType.Cluster_State_Forbid_Splitting_VALUE, + "The current state of the cluster prohibits splitting data"); + } + } + + // 计算集群能能支持的最大split count + int splitCount = pdConfig.getPartition().getMaxShardsPerStore() * + storeService.getActiveStoresByStoreGroup(storeGroupId).size() / + (configService.getPartitionCount(storeGroupId) * pdConfig.getPartition().getShardCount()); + + if (splitCount < 2) { + throw new PDException(ErrorType.Too_Many_Partitions_Per_Store_VALUE, + "Too many partitions per store, partition.store-max-shard-count = " + + pdConfig.getPartition().getMaxShardsPerStore()); +>>>>>>>> d7e3d51dd (3.6.5 -> 4.x diff):hg-pd-core/src/main/java/org/apache/hugegraph/pd/TaskScheduleService.java + } + + // If the maximum number of partitions per store is not reached, it will be split + log.info("Start to split partitions..., split count = {}", splitCount); + +<<<<<<<< HEAD:hugegraph-pd/hg-pd-core/src/main/java/org/apache/hugegraph/pd/TaskScheduleService.java + // Set the cluster status to Offline + storeService.updateClusterStatus(Metapb.ClusterState.Cluster_Offline); + // Modify the default number of partitions + // pdConfig.getConfigService().setPartitionCount(storeService.getShardGroups().size() * + // splitCount); + + var list = storeService.getShardGroups().stream() + .map(shardGroup -> new KVPair<>(shardGroup.getId(), splitCount)) + .collect(Collectors.toList()); +======== + // 设置集群状态为下线 + storeService.updateClusterStatus(storeGroupId, Metapb.ClusterState.Cluster_Offline); + // 修改默认分区数量 + // pdConfig.getConfigService().setPartitionCount(storeService.getShardGroups().size() * splitCount); + + var list = storeService.getShardGroups(storeGroupId).stream() + .map(shardGroup -> new KVPair<>(shardGroup.getId(), splitCount)) + .collect(Collectors.toList()); +>>>>>>>> d7e3d51dd (3.6.5 -> 4.x diff):hg-pd-core/src/main/java/org/apache/hugegraph/pd/TaskScheduleService.java + storeService.splitShardGroups(list); + + return null; + } + + /** + * Store reports the status of the task + * The state of the partition changes, and the state of the ShardGroup, graph, and the entire + * cluster where the partition resides + * + * @param task + */ + public void reportTask(MetaTask.Task task) { + try { + switch (task.getType()) { + case Split_Partition: + partitionService.handleSplitTask(task); + break; + case Move_Partition: + partitionService.handleMoveTask(task); + break; + case Clean_Partition: + partitionService.handleCleanPartitionTask(task); + break; + case Build_Index: + partitionService.handleBuildIndexTask(task); + break; + case Backup_Graph: + partitionService.handleBackupGraphTask(task); + break; + default: + break; + } + } catch (Exception e) { + log.error("Report task exception {}, {}", e, task); + } + } + + /** + * Compaction on rocksdb + * + * @throws PDException + */ + public Boolean dbCompaction(String tableName) throws PDException { + if (!isLeader()) { + return false; + } + + for (Metapb.ShardGroup shardGroup : storeService.getShardGroups()) { + storeService.shardGroupsDbCompaction(shardGroup.getId(), tableName); + } + + return true; + } + + /** + * Determine whether all partitions of a store can be migrated out, and give the judgment + * result and migration plan + */ + public Map canAllPartitionsMovedOut(Metapb.Store sourceStore) throws + PDException { + if (!isLeader()) { + return null; + } + // Analyze whether the partition on a store can be completely checked out + Map resultMap = new HashMap<>(); + // The definition object is used to hold the partition above the source store StoreId + // ->PartitionID, ShardRole + Map> sourcePartitionMap = new HashMap<>(); + sourcePartitionMap.put(sourceStore.getId(), new HashMap<>()); + // The definition object is used to hold the partition above the other active stores + // StoreId ->PartitionID, ShardRole + Map> otherPartitionMap = new HashMap<>(); + // The amount of disk space remaining for each store + Map availableDiskSpace = new HashMap<>(); + // Record the amount of data in the partition to be migrated + Map partitionDataSize = new HashMap<>(); + + storeService.getActiveStores().forEach(store -> { + if (store.getId() != sourceStore.getId()) { + otherPartitionMap.put(store.getId(), new HashMap<>()); + // Records the remaining disk space of other stores, in bytes + availableDiskSpace.put(store.getId(), store.getStats().getAvailable()); + } else { + resultMap.put("current_store_is_online", true); + } + }); + // Count the size of the partition to be migrated (from storeStats in KB) + for (Metapb.GraphStats graphStats : sourceStore.getStats().getGraphStatsList()) { + partitionDataSize.put(graphStats.getPartitionId(), + partitionDataSize.getOrDefault(graphStats.getPartitionId(), 0L) + + graphStats.getApproximateSize()); + } + // Assign values to sourcePartitionMap and otherPartitionMap + partitionService.getPartitions().forEach(partition -> { + try { + storeService.getShardList(partition.getId()).forEach(shard -> { + long storeId = shard.getStoreId(); + if (storeId == sourceStore.getId()) { + sourcePartitionMap.get(storeId).put(partition.getId(), shard.getRole()); + } else { + if (otherPartitionMap.containsKey(storeId)) { + otherPartitionMap.get(storeId).put(partition.getId(), shard.getRole()); + } + } + + }); + } catch (PDException e) { + throw new RuntimeException(e); + } + }); + // Count the partitions to be removed: all partitions on the source store + Map> movedPartitions = new HashMap<>(); + for (Map.Entry entry : sourcePartitionMap.get( + sourceStore.getId()).entrySet()) { + movedPartitions.put(entry.getKey(), new KVPair<>(sourceStore.getId(), 0L)); + } + // Count the number of partitions of other stores and save them with a small top heap, so + // that stores with fewer partitions are always prioritized + PriorityQueue> minHeap = new PriorityQueue<>(otherPartitionMap.size(), + (o1, o2) -> o1.getValue() + .compareTo( + o2.getValue())); + otherPartitionMap.forEach((storeId, shards) -> { + minHeap.add(new KVPair(storeId, shards.size())); + }); + // Traverse the partitions to be migrated, and prioritize the migration to the store with + // fewer partitions + Iterator>> moveIterator = + movedPartitions.entrySet().iterator(); + while (moveIterator.hasNext()) { + Map.Entry> moveEntry = moveIterator.next(); + int partitionId = moveEntry.getKey(); + // Record the elements that have popped up in the priority + List> tmpList = new ArrayList<>(); + while (minHeap.size() > 0) { + KVPair pair = minHeap.poll(); // The first element pops up + long storeId = pair.getKey(); + int partitionCount = pair.getValue(); + Map shards = otherPartitionMap.get(storeId); + final int unitRate = 1024; // Balance the feed rate of different storage units + if ((!shards.containsKey(partitionId)) && ( + availableDiskSpace.getOrDefault(storeId, 0L) / unitRate >= + partitionDataSize.getOrDefault(partitionId, 0L))) { + // If the partition is not included on the destination store and the + // remaining space of the destination store can accommodate the partition, + // the migration is performed + moveEntry.getValue().setValue(storeId); // Set the target store for the move + log.info("plan to move partition {} to store {}, " + + "available disk space {}, current partitionSize:{}", + partitionId, + storeId, + availableDiskSpace.getOrDefault(storeId, 0L) / unitRate, + partitionDataSize.getOrDefault(partitionId, 0L) + ); + // Update the expected remaining space for the store + availableDiskSpace.put(storeId, availableDiskSpace.getOrDefault(storeId, 0L) + - partitionDataSize.getOrDefault(partitionId, + 0L) * + unitRate); + // Update the number of partitions for that store in the stat variable + partitionCount += 1; + pair.setValue(partitionCount); + tmpList.add(pair); + break; + } else { + tmpList.add(pair); + } + } + minHeap.addAll(tmpList); + } + // Check that there are no partitions that don't have a target store assigned + List remainPartitions = new ArrayList<>(); + movedPartitions.forEach((partId, storePair) -> { + if (storePair.getValue() == 0L) { + remainPartitions.add(partId); + } + }); + + boolean isExecutingTasks = storeService.getStore(sourceStore.getId()).getStats().getExecutingTask(); + + if (!remainPartitions.isEmpty() || isExecutingTasks) { + resultMap.put("flag", false); + resultMap.put("movedPartitions", null); + } else { + resultMap.put("flag", true); + resultMap.put("movedPartitions", movedPartitions); + } + return resultMap; + + } + + public Map> movePartitions( + Map> movedPartitions) { + if (!isLeader()) { + return null; + } + // Start the migration + log.info("begin move partitions:"); + movedPartitions.forEach((partId, storePair) -> { + // Neither the source nor destination storeID is 0 + if (storePair.getKey() > 0 && storePair.getValue() > 0) { + partitionService.movePartitionsShard(partId, storePair.getKey(), + storePair.getValue()); + } + }); + return movedPartitions; + } + +} diff --git a/hg-pd-core/src/main/java/org/apache/hugegraph/pd/config/PDConfig.java b/hg-pd-core/src/main/java/org/apache/hugegraph/pd/config/PDConfig.java new file mode 100644 index 0000000000..b04c2feb20 --- /dev/null +++ b/hg-pd-core/src/main/java/org/apache/hugegraph/pd/config/PDConfig.java @@ -0,0 +1,460 @@ +<<<<<<<< HEAD:hugegraph-pd/hg-pd-core/src/main/java/org/apache/hugegraph/pd/config/PDConfig.java +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hugegraph.pd.config; +======== +package org.apache.hugegraph.pd.config; + +import static org.apache.hugegraph.pd.common.Consts.DEFAULT_STORE_GROUP_ID; + +import java.util.ArrayList; +import java.util.Arrays; +import java.util.List; +import java.util.Map; +import java.util.concurrent.ConcurrentHashMap; +import java.util.regex.Matcher; +import java.util.regex.Pattern; + +import org.apache.commons.collections4.CollectionUtils; +import org.springframework.beans.factory.annotation.Autowired; +import org.springframework.beans.factory.annotation.Value; +import org.springframework.boot.context.properties.ConfigurationProperties; +import org.springframework.context.annotation.Configuration; +import org.springframework.stereotype.Component; +>>>>>>>> d7e3d51dd (3.6.5 -> 4.x diff):hg-pd-core/src/main/java/org/apache/hugegraph/pd/config/PDConfig.java + +import org.apache.hugegraph.pd.ConfigService; +import org.apache.hugegraph.pd.IdService; + +import lombok.Data; +import lombok.Getter; + +import org.apache.hugegraph.pd.ConfigService; +import org.apache.hugegraph.pd.IdService; +import org.springframework.beans.factory.annotation.Autowired; +import org.springframework.beans.factory.annotation.Value; +import org.springframework.context.annotation.Configuration; +import org.springframework.stereotype.Component; + +import lombok.Data; + +/** + * PD profile + */ +@Data +@Component +public class PDConfig { + +<<<<<<<< HEAD:hugegraph-pd/hg-pd-core/src/main/java/org/apache/hugegraph/pd/config/PDConfig.java + // cluster ID +======== + private static String[] storeInfo = {"store", + "$2a$04$9ZGBULe2vc73DMj7r/iBKeQB1SagtUXPrDbMmNswRkTwlWQURE/Jy", + "E3UnnQa605go"}; + private static String[] serverInfo = {"hg", + "$2a$04$i10KooNg6wLvIPVDh909n.RBYlZ/4pJo978nFK86nrqQiGIKV4UGS", + "qRyYhxVAWDb5"}; + private static String[] hubbleInfo = {"hubble", + "$2a$04$pSGkohaywGgFrJLr6VOPm.IK2WtOjlNLcZN8gct5uIKEDO1I61DGa", + "iMjHnUl5Pprx"}; + private static String[] vermeer = {"vermeer", + "$2a$04$N89qHe0v5jqNJKhQZHnTdOFSGmiNoiA2B2fdWpV2BwrtJK72dXYD.", + "FqU8BOvTpteT"}; + private static String[][] infos = new String[][]{storeInfo, serverInfo, hubbleInfo, vermeer}; + + @Getter + private static List defaultServers; + + static { + defaultServers = new ArrayList<>(infos.length); + for (String[] info : infos) { + defaultServers.add(new Server(info[0], info[1], info[2])); + } + } + +>>>>>>>> d7e3d51dd (3.6.5 -> 4.x diff):hg-pd-core/src/main/java/org/apache/hugegraph/pd/config/PDConfig.java + @Value("${pd.cluster_id:1}") + private long clusterId; + +<<<<<<<< HEAD:hugegraph-pd/hg-pd-core/src/main/java/org/apache/hugegraph/pd/config/PDConfig.java + // The patrol task interval +======== + // 巡查任务时间间隔 +>>>>>>>> d7e3d51dd (3.6.5 -> 4.x diff):hg-pd-core/src/main/java/org/apache/hugegraph/pd/config/PDConfig.java + @Value("${pd.patrol-interval:300}") + private long patrolInterval = 300; + @Value("${pd.data-path}") + private String dataPath; + @Value("${pd.initial-store-count:3}") + private int minStoreCount; + +<<<<<<<< HEAD:hugegraph-pd/hg-pd-core/src/main/java/org/apache/hugegraph/pd/config/PDConfig.java + // The initial store list, within which the store is automatically activated +======== + // 初始store列表,该列表内的store自动激活 +>>>>>>>> d7e3d51dd (3.6.5 -> 4.x diff):hg-pd-core/src/main/java/org/apache/hugegraph/pd/config/PDConfig.java + // format: store_addresss, store_address, store_address/group_id, store_address/group_id + @Value("${pd.initial-store-list: ''}") + private String initialStoreList; + @Value("${grpc.host}") + private String host; + + @Value("${license.verify-path}") + private String verifyPath; + @Value("${license.license-path}") + private String licensePath; + @Autowired +<<<<<<<< HEAD:hugegraph-pd/hg-pd-core/src/main/java/org/apache/hugegraph/pd/config/PDConfig.java + private ThreadPoolGrpc threadPoolGrpc; +======== + private JobConfig jobConfig; + + @Autowired + private ThreadPoolGrpc threadPoolGrpc; + + @Data + @Configuration + public class ThreadPoolGrpc { + @Value("${thread.pool.grpc.core:600}") + private int core; + @Value("${thread.pool.grpc.max:1000}") + private int max; + @Value("${thread.pool.grpc.queue:" + Integer.MAX_VALUE + "}") + private int queue; + } + + @Value("${auth.secret-key: 'FXQXbJtbCLxODc6tGci732pkH1cyf8Qg'}") + private String secretKey; + +>>>>>>>> d7e3d51dd (3.6.5 -> 4.x diff):hg-pd-core/src/main/java/org/apache/hugegraph/pd/config/PDConfig.java + @Autowired + private Raft raft; + @Autowired + private Store store; + @Autowired + private Partition partition; + @Autowired + private Discovery discovery; +<<<<<<<< HEAD:hugegraph-pd/hg-pd-core/src/main/java/org/apache/hugegraph/pd/config/PDConfig.java + private Map initialStoreMap = null; + private ConfigService configService; + private IdService idService; +======== + + private volatile Map initialStoreMap = null; + private volatile Map initialStoreGroupMap = null; +>>>>>>>> d7e3d51dd (3.6.5 -> 4.x diff):hg-pd-core/src/main/java/org/apache/hugegraph/pd/config/PDConfig.java + + public Map getInitialStoreMap() { + if (initialStoreMap == null) { + initialStoreMap = new ConcurrentHashMap<>(); + Arrays.asList(initialStoreList.split(",")).forEach(s -> { + String[] arr = s.split("/"); + initialStoreMap.put(arr[0], arr[0]); + }); + } + return initialStoreMap; + } + + public int getInitialStoreGroup(String address) { + if (initialStoreGroupMap == null) { + synchronized (this) { + if (initialStoreGroupMap == null) { + initialStoreGroupMap = new ConcurrentHashMap<>(); + Arrays.asList(initialStoreList.split(",")).forEach(s -> { + String[] arr = s.split("/"); + if (arr.length == 2) { + initialStoreGroupMap.put(arr[0], Integer.parseInt(arr[1])); + } else { + initialStoreGroupMap.put(arr[0], DEFAULT_STORE_GROUP_ID); + } + }); + } + } + } + return initialStoreGroupMap.getOrDefault(address, DEFAULT_STORE_GROUP_ID); + } + + /** + * The initial number of partitions + * Number of Stores * Maximum number of replicas per Store / Number of replicas per partition + * + * @return + */ + public int getInitialPartitionCount() { + return getInitialStoreMap().size() * partition.getMaxShardsPerStore() + / partition.getShardCount(); + } + + public ConfigService getConfigService() { + return configService; + } + + public void setConfigService(ConfigService configService) { + this.configService = configService; + } + + public IdService getIdService() { + return idService; + } + + public void setIdService(IdService idService) { + this.idService = idService; + } + + @Data + @Configuration + public class ThreadPoolGrpc { + + @Value("${thread.pool.grpc.core:600}") + private int core; + @Value("${thread.pool.grpc.max:1000}") + private int max; + @Value("${thread.pool.grpc.queue:" + Integer.MAX_VALUE + "}") + private int queue; + } + + @Data + @Configuration + public class Raft { + + @Value("${raft.enable:true }") + private boolean enable; + @Value("${raft.address}") + private String address; + @Value("${pd.data-path}") + private String dataPath; + @Value("${raft.peers-list}") + private String peersList; + @Value("${raft.snapshotInterval: 300}") + private int snapshotInterval; + @Value("${raft.rpc-timeout:10000}") + private int rpcTimeout; + @Value("${grpc.host}") + private String host; + @Value("${server.port}") + private int port; + + @Value("${pd.cluster_id:1}") + private long clusterId; + @Value("${grpc.port}") + private int grpcPort; + + public String getGrpcAddress() { + return host + ":" + grpcPort; + } + } + + @Data + @Configuration + public class Store { + + // store Heartbeat timeout + @Value("${store.keepAlive-timeout:300}") + private long keepAliveTimeout = 300; + @Value("${store.max-down-time:1800}") + private long maxDownTime = 1800; + + @Value("${store.monitor_data_enabled:false}") + private boolean monitorDataEnabled = false; + + @Value("${store.monitor_data_interval: 1 minute}") + private String monitorDataInterval = "1 minute"; + + @Value("${store.monitor_data_retention: 1 day}") + private String monitorDataRetention = "1 day"; + + /** + * interval -> seconds. + * minimum value is 1 seconds. + * + * @return the seconds of the interval + */ + public Long getMonitorInterval() { + return parseTimeExpression(this.monitorDataInterval); + } + + /** + * the monitor data that saved in rocksdb, will be deleted + * out of period + * + * @return the period of the monitor data should keep + */ + public Long getRetentionPeriod() { + return parseTimeExpression(this.monitorDataRetention); + } + + /** + * parse time expression , support pattern: + * [1-9][ ](second, minute, hour, day, month, year) + * unit could not be null, the number part is 1 by default. + * + * @param exp + * @return seconds value of the expression. 1 will return by illegal expression + */ + private Long parseTimeExpression(String exp) { + if (exp != null) { + Pattern pattern = Pattern.compile( + "(?(\\d+)*)(\\s)*(?(second|minute|hour|day|month|year)$)"); + Matcher matcher = pattern.matcher(exp.trim()); + if (matcher.find()) { + String n = matcher.group("n"); + String unit = matcher.group("unit"); + + if (null == n || n.length() == 0) { + n = "1"; + } + + Long interval; + switch (unit) { + case "minute": + interval = 60L; + break; + case "hour": + interval = 3600L; + break; + case "day": + interval = 86400L; + break; + case "month": + interval = 86400L * 30; + break; + case "year": + interval = 86400L * 365; + break; + case "second": + default: + interval = 1L; + } + // avoid n == '0' + return Math.max(1L, interval * Integer.parseInt(n)); + } + } + return 1L; + } + + } + + @Data + @Configuration +<<<<<<<< HEAD:hugegraph-pd/hg-pd-core/src/main/java/org/apache/hugegraph/pd/config/PDConfig.java + public class Partition { + + private int totalCount = 0; +======== + public class Partition{ +// private int totalCount = 0; +>>>>>>>> d7e3d51dd (3.6.5 -> 4.x diff):hg-pd-core/src/main/java/org/apache/hugegraph/pd/config/PDConfig.java + + // Maximum number of replicas per Store + @Value("${partition.store-max-shard-count:24}") + private int maxShardsPerStore = 24; + + @Value("${partition.default-shard-count:3}") + private int shardCount = 3; + +<<<<<<<< HEAD:hugegraph-pd/hg-pd-core/src/main/java/org/apache/hugegraph/pd/config/PDConfig.java + public int getTotalCount() { + if (totalCount == 0) { + totalCount = getInitialPartitionCount(); + } + return totalCount; + } + + public void setTotalCount(int totalCount) { + this.totalCount = totalCount; + } +======== +// public void setTotalCount(int totalCount){ +// this.totalCount = totalCount; +// } +// +// public int getTotalCount() { +// if ( totalCount == 0 ) { +// totalCount = getInitialPartitionCount(); +// } +// return totalCount; +// } +>>>>>>>> d7e3d51dd (3.6.5 -> 4.x diff):hg-pd-core/src/main/java/org/apache/hugegraph/pd/config/PDConfig.java + } + + @Data + @Configuration + public class Discovery { + + // After the client registers, the maximum number of heartbeats is not reached, and after + // that, the previous registration information will be deleted + @Value("${discovery.heartbeat-try-count:3}") + private int heartbeatOutTimes = 3; + } + +<<<<<<<< HEAD:hugegraph-pd/hg-pd-core/src/main/java/org/apache/hugegraph/pd/config/PDConfig.java +======== + @Data + @Configuration + public class JobConfig { + @Value("${job.interruptableThreadPool.core:0}") + private int core; + @Value("${job.interruptableThreadPool.max:256}") + private int max; + @Value("${job.interruptableThreadPool.queue:" + Integer.MAX_VALUE + "}") + private int queueSize; + @Value("${job.start-time:19}") + private int startTime; + @Value("${job.uninterruptibleThreadPool.core:0}") + private int uninterruptibleCore; + @Value("${job.uninterruptibleThreadPool.max:256}") + private int uninterruptibleMax; + @Value("${job.uninterruptibleThreadPool.queue:" + Integer.MAX_VALUE + "}") + private int uninterruptibleQueueSize; + } + + + @Data + @Configuration + @ConfigurationProperties(prefix = "pd") + public class Servers { + List servers; + + public List getServers() { + if (CollectionUtils.isEmpty(servers)) { + return defaultServers; + } + return servers; + } + } + + @Value("${pd.allows-address-acquisition: false}") + private boolean allowsAddressAcquisition = false; + + @Getter + private ConfigService configService; + + @Getter + private IdService idService; + + public void setConfigService(ConfigService configService) { + this.configService = configService; + } + + public void setIdService(IdService idService) { + this.idService = idService; + } + +>>>>>>>> d7e3d51dd (3.6.5 -> 4.x diff):hg-pd-core/src/main/java/org/apache/hugegraph/pd/config/PDConfig.java +} diff --git a/hg-pd-core/src/main/java/org/apache/hugegraph/pd/config/Server.java b/hg-pd-core/src/main/java/org/apache/hugegraph/pd/config/Server.java new file mode 100644 index 0000000000..64432d59d6 --- /dev/null +++ b/hg-pd-core/src/main/java/org/apache/hugegraph/pd/config/Server.java @@ -0,0 +1,21 @@ +package org.apache.hugegraph.pd.config; + +import org.springframework.context.annotation.Configuration; + +import lombok.AllArgsConstructor; +import lombok.Data; +import lombok.NoArgsConstructor; + +/** + * @author zhangyingjie + * @date 2024/4/2 + **/ +@Data +@Configuration +@AllArgsConstructor +@NoArgsConstructor +public class Server { + String server; + String token; + String pwd; +} diff --git a/hg-pd-core/src/main/java/org/apache/hugegraph/pd/consts/PoolNames.java b/hg-pd-core/src/main/java/org/apache/hugegraph/pd/consts/PoolNames.java new file mode 100644 index 0000000000..8f4befb768 --- /dev/null +++ b/hg-pd-core/src/main/java/org/apache/hugegraph/pd/consts/PoolNames.java @@ -0,0 +1,16 @@ +package org.apache.hugegraph.pd.consts; + +/** + * @author zhangyingjie + * @date 2023/10/30 + **/ +public class PoolNames { + + public static final String GRPC = "hg-grpc"; + public static final String SCAN = "hg-scan"; + public static final String I_JOB = "hg-i-job"; + public static final String U_JOB = "hg-u-job"; + public static final String COMPACT = "hg-compact"; + public static final String HEARTBEAT = "hg-heartbeat"; + public static final String SHUTDOWN = "hg-shutdown"; +} diff --git a/hg-pd-core/src/main/java/org/apache/hugegraph/pd/listener/PartitionInstructionListener.java b/hg-pd-core/src/main/java/org/apache/hugegraph/pd/listener/PartitionInstructionListener.java new file mode 100644 index 0000000000..17781f8c04 --- /dev/null +++ b/hg-pd-core/src/main/java/org/apache/hugegraph/pd/listener/PartitionInstructionListener.java @@ -0,0 +1,31 @@ +package org.apache.hugegraph.pd.listener; + +import org.apache.hugegraph.pd.common.PDException; +import org.apache.hugegraph.pd.grpc.Metapb; +import org.apache.hugegraph.pd.grpc.pulse.ChangeShard; +import org.apache.hugegraph.pd.grpc.pulse.CleanPartition; +import org.apache.hugegraph.pd.grpc.pulse.DbCompaction; +import org.apache.hugegraph.pd.grpc.pulse.MovePartition; +import org.apache.hugegraph.pd.grpc.pulse.PartitionKeyRange; +import org.apache.hugegraph.pd.grpc.pulse.SplitPartition; +import org.apache.hugegraph.pd.grpc.pulse.TransferLeader; + +/** + * 分区命令监听 + */ +public interface PartitionInstructionListener { + void changeShard(Metapb.Partition partition, ChangeShard changeShard) throws PDException; + + void transferLeader(Metapb.Partition partition, TransferLeader transferLeader) throws PDException; + + void splitPartition(Metapb.Partition partition, SplitPartition splitPartition) throws PDException; + + void dbCompaction(Metapb.Partition partition, DbCompaction dbCompaction) throws PDException; + + void movePartition(Metapb.Partition partition, MovePartition movePartition) throws PDException; + + void cleanPartition(Metapb.Partition partition, CleanPartition cleanPartition) throws PDException; + + void changePartitionKeyRange(Metapb.Partition partition, PartitionKeyRange partitionKeyRange) throws PDException; + +} diff --git a/hg-pd-core/src/main/java/org/apache/hugegraph/pd/listener/PartitionStatusListener.java b/hg-pd-core/src/main/java/org/apache/hugegraph/pd/listener/PartitionStatusListener.java new file mode 100644 index 0000000000..d80f83dabe --- /dev/null +++ b/hg-pd-core/src/main/java/org/apache/hugegraph/pd/listener/PartitionStatusListener.java @@ -0,0 +1,11 @@ +package org.apache.hugegraph.pd.listener; + +import org.apache.hugegraph.pd.grpc.Metapb; + +/** + * 分区状态监听 + */ +public interface PartitionStatusListener { + void onPartitionChanged(Metapb.Partition partition, Metapb.Partition newPartition); + void onPartitionRemoved(Metapb.Partition partition); +} diff --git a/hg-pd-core/src/main/java/org/apache/hugegraph/pd/listener/ShardGroupStatusListener.java b/hg-pd-core/src/main/java/org/apache/hugegraph/pd/listener/ShardGroupStatusListener.java new file mode 100644 index 0000000000..67ceaf9c2d --- /dev/null +++ b/hg-pd-core/src/main/java/org/apache/hugegraph/pd/listener/ShardGroupStatusListener.java @@ -0,0 +1,9 @@ +package org.apache.hugegraph.pd.listener; + +import org.apache.hugegraph.pd.grpc.Metapb; + +public interface ShardGroupStatusListener { + void onShardListChanged(Metapb.ShardGroup shardGroup, Metapb.ShardGroup newShardGroup); + + void onShardListOp(Metapb.ShardGroup shardGroup); +} diff --git a/hg-pd-core/src/main/java/org/apache/hugegraph/pd/listener/StoreStatusListener.java b/hg-pd-core/src/main/java/org/apache/hugegraph/pd/listener/StoreStatusListener.java new file mode 100644 index 0000000000..5ec6de061e --- /dev/null +++ b/hg-pd-core/src/main/java/org/apache/hugegraph/pd/listener/StoreStatusListener.java @@ -0,0 +1,13 @@ +package org.apache.hugegraph.pd.listener; + +import org.apache.hugegraph.pd.grpc.Metapb; + +public interface StoreStatusListener { + + void onStoreStatusChanged(Metapb.Store store, Metapb.StoreState old, + Metapb.StoreState status); + + void onGraphChange(Metapb.Graph graph, Metapb.GraphState stateOld, + Metapb.GraphState stateNew) ; + void onStoreRaftChanged(Metapb.Store store); +} diff --git a/hg-pd-core/src/main/java/org/apache/hugegraph/pd/meta/ConfigMetaStore.java b/hg-pd-core/src/main/java/org/apache/hugegraph/pd/meta/ConfigMetaStore.java new file mode 100644 index 0000000000..cfa80678ed --- /dev/null +++ b/hg-pd-core/src/main/java/org/apache/hugegraph/pd/meta/ConfigMetaStore.java @@ -0,0 +1,100 @@ +<<<<<<<< HEAD:hugegraph-pd/hg-pd-core/src/main/java/org/apache/hugegraph/pd/meta/ConfigMetaStore.java +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hugegraph.pd.meta; +======== +package org.apache.hugegraph.pd.meta; + +import org.apache.hugegraph.pd.common.PDException; +import org.apache.hugegraph.pd.config.PDConfig; +import org.apache.hugegraph.pd.grpc.Metapb; +>>>>>>>> d7e3d51dd (3.6.5 -> 4.x diff):hg-pd-core/src/main/java/org/apache/hugegraph/pd/meta/ConfigMetaStore.java + +import java.util.List; +import java.util.Optional; + +import org.apache.hugegraph.pd.common.PDException; +import org.apache.hugegraph.pd.config.PDConfig; +import org.apache.hugegraph.pd.grpc.Metapb; + +<<<<<<<< HEAD:hugegraph-pd/hg-pd-core/src/main/java/org/apache/hugegraph/pd/meta/ConfigMetaStore.java +public class ConfigMetaStore extends MetadataRocksDBStore { + +======== +>>>>>>>> d7e3d51dd (3.6.5 -> 4.x diff):hg-pd-core/src/main/java/org/apache/hugegraph/pd/meta/ConfigMetaStore.java + private final long clusterId; + + public ConfigMetaStore(PDConfig pdConfig) { + super(pdConfig); + this.clusterId = pdConfig.getClusterId(); + } + + /** + * Update the storage status of the graph space + * + * @param + */ + public Metapb.GraphSpace setGraphSpace(Metapb.GraphSpace graphSpace) throws PDException { + byte[] graphSpaceKey = MetadataKeyHelper.getGraphSpaceKey(graphSpace.getName()); + graphSpace = graphSpace.toBuilder().setTimestamp(System.currentTimeMillis()).build(); + put(graphSpaceKey, graphSpace.toByteArray()); + return graphSpace; + } + + public List getGraphSpace(String graphSpace) throws PDException { + byte[] graphSpaceKey = MetadataKeyHelper.getGraphSpaceKey(graphSpace); + return scanPrefix(Metapb.GraphSpace.parser(), graphSpaceKey); + } + + public Metapb.PDConfig setPdConfig(Metapb.PDConfig pdConfig) throws PDException { + byte[] graphSpaceKey = + MetadataKeyHelper.getPdConfigKey(String.valueOf(pdConfig.getVersion())); + Metapb.PDConfig config = Metapb.PDConfig.newBuilder( + pdConfig).setTimestamp(System.currentTimeMillis()).build(); + put(graphSpaceKey, config.toByteArray()); + return config; + } + + public Metapb.PDConfig getPdConfig(long version) throws PDException { + byte[] graphSpaceKey = MetadataKeyHelper.getPdConfigKey(version <= 0 ? null : + String.valueOf(version)); + Optional max = scanPrefix( + Metapb.PDConfig.parser(), graphSpaceKey).stream().max( + (o1, o2) -> (o1.getVersion() > o2.getVersion()) ? 1 : -1); + return max.isPresent() ? max.get() : null; + } + +<<<<<<<< HEAD:hugegraph-pd/hg-pd-core/src/main/java/org/apache/hugegraph/pd/meta/ConfigMetaStore.java +======== + public Metapb.StoreGroup saveStoreGroup(Metapb.StoreGroup storeGroup) throws PDException { + byte[] graphSpaceKey = MetadataKeyHelper.getStoreGroupKey(storeGroup.getGroupId()); + put(graphSpaceKey, storeGroup.toByteArray()); + return storeGroup; + } + + public Metapb.StoreGroup getStoreGroup(int groupId) throws PDException { + byte[] graphSpaceKey = MetadataKeyHelper.getStoreGroupKey(groupId); + return getOne(Metapb.StoreGroup.parser(), graphSpaceKey); + } + + public List getStoreGroups() throws PDException { + byte[] graphSpaceKey = MetadataKeyHelper.getStoreGroupPrefix(); + return scanPrefix(Metapb.StoreGroup.parser(), graphSpaceKey); + } +>>>>>>>> d7e3d51dd (3.6.5 -> 4.x diff):hg-pd-core/src/main/java/org/apache/hugegraph/pd/meta/ConfigMetaStore.java +} diff --git a/hg-pd-core/src/main/java/org/apache/hugegraph/pd/meta/DiscoveryMetaStore.java b/hg-pd-core/src/main/java/org/apache/hugegraph/pd/meta/DiscoveryMetaStore.java new file mode 100644 index 0000000000..7a0b6f8585 --- /dev/null +++ b/hg-pd-core/src/main/java/org/apache/hugegraph/pd/meta/DiscoveryMetaStore.java @@ -0,0 +1,119 @@ +<<<<<<<< HEAD:hugegraph-pd/hg-pd-core/src/main/java/org/apache/hugegraph/pd/meta/DiscoveryMetaStore.java +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hugegraph.pd.meta; +======== +package org.apache.hugegraph.pd.meta; + +import org.apache.hugegraph.pd.common.PDException; +import org.apache.hugegraph.pd.config.PDConfig; +import org.apache.hugegraph.pd.grpc.discovery.NodeInfo; +import org.apache.hugegraph.pd.grpc.discovery.NodeInfos; +import org.apache.hugegraph.pd.grpc.discovery.Query; +import lombok.extern.slf4j.Slf4j; +import org.apache.commons.lang3.StringUtils; +>>>>>>>> d7e3d51dd (3.6.5 -> 4.x diff):hg-pd-core/src/main/java/org/apache/hugegraph/pd/meta/DiscoveryMetaStore.java + +import java.util.LinkedList; +import java.util.List; +import java.util.Map; + +import org.apache.commons.lang3.StringUtils; +import org.apache.hugegraph.pd.common.PDException; +import org.apache.hugegraph.pd.common.Useless; +import org.apache.hugegraph.pd.config.PDConfig; +import org.apache.hugegraph.pd.grpc.discovery.NodeInfo; +import org.apache.hugegraph.pd.grpc.discovery.NodeInfos; +import org.apache.hugegraph.pd.grpc.discovery.Query; + +import lombok.extern.slf4j.Slf4j; + +@Useless("discovery related") +@Slf4j +public class DiscoveryMetaStore extends MetadataRocksDBStore { + + /** + * appName --> address --> registryInfo + */ + private static final String PREFIX = "REGIS-"; + private static final String SPLITTER = "-"; + + public DiscoveryMetaStore(PDConfig pdConfig) { + super(pdConfig); + } + + public void register(NodeInfo nodeInfo, int outTimes) throws PDException { + putWithTTL(toKey(nodeInfo.getAppName(), nodeInfo.getVersion(), nodeInfo.getAddress()), + nodeInfo.toByteArray(), (nodeInfo.getInterval() / 1000) * outTimes); + } + + byte[] toKey(String appName, String version, String address) { + StringBuilder builder = getPrefixBuilder(appName, version); + builder.append(SPLITTER); + builder.append(address); + return builder.toString().getBytes(); + } + + private StringBuilder getPrefixBuilder(String appName, String version) { + StringBuilder builder = new StringBuilder(); + builder.append(PREFIX); + if (!StringUtils.isEmpty(appName)) { + builder.append(appName); + builder.append(SPLITTER); + } + if (!StringUtils.isEmpty(version)) { + builder.append(version); + } + return builder; + } + + public NodeInfos getNodes(Query query) { + List nodeInfos = null; + try { + StringBuilder builder = getPrefixBuilder(query.getAppName(), + query.getVersion()); + nodeInfos = getInstanceListWithTTL( + NodeInfo.parser(), + builder.toString().getBytes()); + builder.setLength(0); + } catch (PDException e) { + log.error("An error occurred getting data from the store,{}", e); + } + if (query.getLabelsMap() != null && !query.getLabelsMap().isEmpty()) { + List result = new LinkedList(); + for (NodeInfo node : nodeInfos) { + if (labelMatch(node, query)) { + result.add(node); + } + } + return NodeInfos.newBuilder().addAllInfo(result).build(); + } + return NodeInfos.newBuilder().addAllInfo(nodeInfos).build(); + + } + + private boolean labelMatch(NodeInfo node, Query query) { + Map labelsMap = node.getLabelsMap(); + for (Map.Entry entry : query.getLabelsMap().entrySet()) { + if (!entry.getValue().equals(labelsMap.get(entry.getKey()))) { + return false; + } + } + return true; + } +} diff --git a/hg-pd-core/src/main/java/org/apache/hugegraph/pd/meta/IdMetaStore.java b/hg-pd-core/src/main/java/org/apache/hugegraph/pd/meta/IdMetaStore.java new file mode 100644 index 0000000000..fc467b1add --- /dev/null +++ b/hg-pd-core/src/main/java/org/apache/hugegraph/pd/meta/IdMetaStore.java @@ -0,0 +1,269 @@ +<<<<<<<< HEAD:hugegraph-pd/hg-pd-core/src/main/java/org/apache/hugegraph/pd/meta/IdMetaStore.java +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hugegraph.pd.meta; +======== +package org.apache.hugegraph.pd.meta; + +import org.apache.hugegraph.pd.common.PDException; +import org.apache.hugegraph.pd.config.PDConfig; +import org.apache.hugegraph.pd.store.KV; +import com.caucho.hessian.io.Hessian2Input; +import com.caucho.hessian.io.Hessian2Output; +import lombok.extern.slf4j.Slf4j; +>>>>>>>> d7e3d51dd (3.6.5 -> 4.x diff):hg-pd-core/src/main/java/org/apache/hugegraph/pd/meta/IdMetaStore.java + +import java.io.ByteArrayInputStream; +import java.io.ByteArrayOutputStream; +import java.io.IOException; +import java.nio.ByteBuffer; +import java.nio.charset.Charset; +import java.util.List; +import java.util.concurrent.ConcurrentHashMap; + +import org.apache.hugegraph.pd.common.PDException; +import org.apache.hugegraph.pd.config.PDConfig; +import org.apache.hugegraph.pd.store.KV; + +import com.caucho.hessian.io.Hessian2Input; +import com.caucho.hessian.io.Hessian2Output; + +import lombok.extern.slf4j.Slf4j; + +/** + * Implementation class for auto-increment ID. + */ +@Slf4j +public class IdMetaStore extends MetadataRocksDBStore { + + private static final String ID_PREFIX = "@ID@"; + private static final String CID_PREFIX = "@CID@"; + private static final String CID_SLOT_PREFIX = "@CID_SLOT@"; + private static final String CID_DEL_SLOT_PREFIX = "@CID_DEL_SLOT@"; + private static final String SEPARATOR = "@"; + private static final ConcurrentHashMap SEQUENCES = new ConcurrentHashMap<>(); + private static final long CID_DEL_TIMEOUT = 24 * 3600 * 1000; + private final long clusterId; + + public IdMetaStore(PDConfig pdConfig) { + super(pdConfig); + this.clusterId = pdConfig.getClusterId(); + } + + public static long bytesToLong(byte[] b) { + ByteBuffer buf = ByteBuffer.wrap(b); + return buf.getLong(); + } + + public static byte[] longToBytes(long l) { + ByteBuffer buf = ByteBuffer.wrap(new byte[Long.SIZE]); + buf.putLong(l); + buf.flip(); + return buf.array(); + } + + /** + * Get auto-increment ID. + * + * @param key + * @param delta + * @return + * @throws PDException + */ + public long getId(String key, int delta) throws PDException { + Object probableLock = getLock(key); + byte[] keyBs = (ID_PREFIX + key).getBytes(Charset.defaultCharset()); + synchronized (probableLock) { + byte[] bs = getOne(keyBs); + long current = bs != null ? bytesToLong(bs) : 0L; + long next = current + delta; + put(keyBs, longToBytes(next)); + return current; + } + } + + private Object getLock(String key) { + Object probableLock = new Object(); + Object currentLock = SEQUENCES.putIfAbsent(key, probableLock); + if (currentLock != null) { + probableLock = currentLock; + } + return probableLock; + } + + public void resetId(String key) throws PDException { + Object probableLock = new Object(); + Object currentLock = SEQUENCES.putIfAbsent(key, probableLock); + if (currentLock != null) { + probableLock = currentLock; + } + byte[] keyBs = (ID_PREFIX + key).getBytes(Charset.defaultCharset()); + synchronized (probableLock) { + removeByPrefix(keyBs); + } + } + + /** + * Within 24 hours of deleting the cid identified by the name, + * repeat applying for the same name's cid to keep the same value. + * This design is to prevent inconsistent caching, causing data errors. + * + * @param key + * @param name cid identifier + * @param max + * @return + * @throws PDException + */ + public long getCId(String key, String name, long max) throws PDException { + // Check for expired cids to delete. The frequency of deleting graphs is relatively low, + // so this has little performance impact. + byte[] delKeyPrefix = (CID_DEL_SLOT_PREFIX + + key + SEPARATOR).getBytes(Charset.defaultCharset()); + synchronized (this) { + scanPrefix(delKeyPrefix).forEach(kv -> { + long[] value = (long[]) deserialize(kv.getValue()); + if (value.length >= 2) { + if (System.currentTimeMillis() - value[1] > CID_DEL_TIMEOUT) { + try { + delCId(key, value[0]); + remove(kv.getKey()); + } catch (Exception e) { + log.error("Exception ", e); + } + } + } + }); + + // Restore key from delayed deletion queue + byte[] cidDelayKey = getCIDDelayKey(key, name); + byte[] value = getOne(cidDelayKey); + if (value != null) { + // Remove from delayed deletion queue + remove(cidDelayKey); + return ((long[]) deserialize(value))[0]; + } else { + return getCId(key, max); + } + } + } + + /** + * Add to the deletion queue for delayed deletion. + */ + public long delCIdDelay(String key, String name, long cid) throws PDException { + byte[] delKey = getCIDDelayKey(key, name); + put(delKey, serialize(new long[]{cid, System.currentTimeMillis()})); + return cid; + } + + /** + * Get an auto-incrementing cyclic non-repeating ID. When the upper limit is reached, it + * starts from 0 again. + * + * @param key + * @param max the upper limit of the ID. After reaching this value, it starts incrementing + * from 0 again. + * @return + * @throws PDException + */ + public long getCId(String key, long max) throws PDException { + Object probableLock = getLock(key); + byte[] keyBs = (CID_PREFIX + key).getBytes(Charset.defaultCharset()); + synchronized (probableLock) { + byte[] bs = getOne(keyBs); + long current = bs != null ? bytesToLong(bs) : 0L; + long last = current == 0 ? max - 1 : current - 1; + { // Find an unused cid + List kvs = scanRange(genCIDSlotKey(key, current), genCIDSlotKey(key, max)); + for (KV kv : kvs) { + if (current == bytesToLong(kv.getValue())) { + current++; + } else { + break; + } + } + } + if (current == max) { + current = 0; + List kvs = scanRange(genCIDSlotKey(key, current), genCIDSlotKey(key, last)); + for (KV kv : kvs) { + if (current == bytesToLong(kv.getValue())) { + current++; + } else { + break; + } + } + } + if (current == last) { + return -1; + } + put(genCIDSlotKey(key, current), longToBytes(current)); + put(keyBs, longToBytes(current + 1)); + return current; + } + } + + private byte[] genCIDSlotKey(String key, long value) { + byte[] keySlot = (CID_SLOT_PREFIX + key + SEPARATOR).getBytes(Charset.defaultCharset()); + ByteBuffer buf = ByteBuffer.allocate(keySlot.length + Long.SIZE); + buf.put(keySlot); + buf.put(longToBytes(value)); + return buf.array(); + } + + private byte[] getCIDDelayKey(String key, String name) { + byte[] bsKey = (CID_DEL_SLOT_PREFIX + + key + SEPARATOR + + name).getBytes(Charset.defaultCharset()); + return bsKey; + } + + /** + * Delete a cyclic ID and release its value. + * + * @param key + * @param cid + * @return + * @throws PDException + */ + public long delCId(String key, long cid) throws PDException { + return remove(genCIDSlotKey(key, cid)); + } + + private byte[] serialize(Object obj) { + try (ByteArrayOutputStream bos = new ByteArrayOutputStream()) { + Hessian2Output output = new Hessian2Output(bos); + output.writeObject(obj); + output.flush(); + return bos.toByteArray(); + } catch (IOException e) { + throw new RuntimeException(e); + } + } + + private Object deserialize(byte[] bytes) { + try (ByteArrayInputStream bis = new ByteArrayInputStream(bytes)) { + Hessian2Input input = new Hessian2Input(bis); + Object obj = input.readObject(); + input.close(); + return obj; + } catch (IOException e) { + throw new RuntimeException(e); + } + } +} diff --git a/hg-pd-core/src/main/java/org/apache/hugegraph/pd/meta/LogMeta.java b/hg-pd-core/src/main/java/org/apache/hugegraph/pd/meta/LogMeta.java new file mode 100644 index 0000000000..f4d2b7a17d --- /dev/null +++ b/hg-pd-core/src/main/java/org/apache/hugegraph/pd/meta/LogMeta.java @@ -0,0 +1,35 @@ +package org.apache.hugegraph.pd.meta; + +import org.apache.hugegraph.pd.common.PDException; +import org.apache.hugegraph.pd.config.PDConfig; +import org.apache.hugegraph.pd.grpc.Metapb; + +import java.util.List; + +/** + * @author zhangyingjie + * @date 2022/3/29 + **/ +public class LogMeta extends MetadataRocksDBStore { + + private PDConfig pdConfig; + + public LogMeta(PDConfig pdConfig) { + super(pdConfig); + this.pdConfig = pdConfig; + } + + public void insertLog(Metapb.LogRecord record) throws PDException { + byte[] storeLogKey = MetadataKeyHelper.getLogKey(record); + put(storeLogKey, record.toByteArray()); + + } + + public List getLog(String action, Long start, Long end) throws PDException { + byte[] keyStart = MetadataKeyHelper.getLogKeyPrefix(action, start); + byte[] keyEnd = MetadataKeyHelper.getLogKeyPrefix(action, end); + List stores =this.scanRange(Metapb.LogRecord.parser(), + keyStart, keyEnd); + return stores; + } +} diff --git a/hg-pd-core/src/main/java/org/apache/hugegraph/pd/meta/MetadataFactory.java b/hg-pd-core/src/main/java/org/apache/hugegraph/pd/meta/MetadataFactory.java new file mode 100644 index 0000000000..d61b75934d --- /dev/null +++ b/hg-pd-core/src/main/java/org/apache/hugegraph/pd/meta/MetadataFactory.java @@ -0,0 +1,63 @@ +package org.apache.hugegraph.pd.meta; + +import org.apache.hugegraph.pd.config.PDConfig; +import org.apache.hugegraph.pd.raft.RaftEngine; +import org.apache.hugegraph.pd.store.HgKVStore; +import org.apache.hugegraph.pd.store.HgKVStoreImpl; +import org.apache.hugegraph.pd.store.RaftKVStore; + +/** + * 存储工厂类,创建相关对象的存储类 + */ +public class MetadataFactory { + + private static HgKVStore store = null; + + public static HgKVStore getStore(PDConfig pdConfig){ + if ( store == null ){ + synchronized (MetadataFactory.class){ + if ( store == null ) { + HgKVStore proto = new HgKVStoreImpl(); + //proto.init(pdConfig); + store = pdConfig.getRaft().isEnable()? + new RaftKVStore(RaftEngine.getInstance(), proto) : + proto; + store.init(pdConfig); + } + } + } + return store; + } + + public static void closeStore(){ + if ( store != null ) + store.close(); + } + + public static StoreInfoMeta newStoreInfoMeta(PDConfig pdConfig) { + return new StoreInfoMeta(pdConfig); + } + + public static PartitionMeta newPartitionMeta(PDConfig pdConfig) { + return new PartitionMeta(pdConfig); + } + public static IdMetaStore newHugeServerMeta(PDConfig pdConfig) { + return new IdMetaStore(pdConfig); + } + public static DiscoveryMetaStore newDiscoveryMeta(PDConfig pdConfig) { + return new DiscoveryMetaStore(pdConfig); + } + public static ConfigMetaStore newConfigMeta(PDConfig pdConfig) { + return new ConfigMetaStore(pdConfig); + } + public static TaskInfoMeta newTaskInfoMeta(PDConfig pdConfig) { return new TaskInfoMeta(pdConfig);} + + + public static PulseStore newPulseStore(PDConfig pdConfig) { + return new PulseStore(pdConfig); + } + + public static LogMeta newLogMeta(PDConfig pdConfig) { + return new LogMeta(pdConfig); + } +} diff --git a/hg-pd-core/src/main/java/org/apache/hugegraph/pd/meta/MetadataKeyHelper.java b/hg-pd-core/src/main/java/org/apache/hugegraph/pd/meta/MetadataKeyHelper.java new file mode 100644 index 0000000000..42060a622e --- /dev/null +++ b/hg-pd-core/src/main/java/org/apache/hugegraph/pd/meta/MetadataKeyHelper.java @@ -0,0 +1,522 @@ +<<<<<<<< HEAD:hugegraph-pd/hg-pd-core/src/main/java/org/apache/hugegraph/pd/meta/MetadataKeyHelper.java +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hugegraph.pd.meta; +======== +package org.apache.hugegraph.pd.meta; + +import org.apache.hugegraph.pd.grpc.Metapb; +import org.apache.commons.lang3.StringUtils; +>>>>>>>> d7e3d51dd (3.6.5 -> 4.x diff):hg-pd-core/src/main/java/org/apache/hugegraph/pd/meta/MetadataKeyHelper.java + +import java.nio.charset.Charset; + +import org.apache.commons.lang3.StringUtils; +import org.apache.hugegraph.pd.grpc.Metapb; + +public class MetadataKeyHelper { + + public static final char DELIMITER = '/'; + + private static final String STORE = "STORE"; + private static final String ACTIVESTORE = "ACTIVESTORE"; + private static final String STORESTATUS = "STORESTATUS"; + private static final String PARTITION = "PARTITION"; + private static final String PARTITION_V36 = "PARTITION_V36"; + private static final String SHARDGROUP = "SHARDGROUP"; + + private static final String PARTITION_STATUS = "PARTITION_STATUS"; + private static final String GRAPH = "GRAPH"; + private static final String GRAPHMETA = "GRAPHMETA"; + private static final String GRAPH_SPACE = "GRAPH_SPACE"; + private static final String PD_CONFIG = "PD_CONFIG"; + private static final String TASK_SPLIT = "TASK_SPLIT"; + private static final String TASK_MOVE = "TASK_MOVE"; + private static final String TASK_USER = "TASK_USER"; + private static final String LOG_RECORD = "LOG_RECORD"; + + private static final String QUEUE = "QUEUE"; + private static final String OBSERVER_NOTICE = "OB_N"; + private static final String NOTICE_CONTENT = "NOTICE_C"; + private static final String STORE_GROUP = "STORE_GROUP"; + + private static final String STORE_GROUP_RELATION = "STORE_GROUP_RELATION"; + + public static byte[] getStoreInfoKey(final long storeId) { + //STORE/{storeId} + String key = StringBuilderHelper.get() + .append(STORE).append(DELIMITER) + .append(storeId) + .toString(); + return key.getBytes(Charset.defaultCharset()); + } + + public static byte[] getActiveStoreKey(final long storeId) { + //ACTIVESTORE/{storeId} + String key = StringBuilderHelper.get() + .append(ACTIVESTORE).append(DELIMITER) + .append(storeId) + .toString(); + return key.getBytes(Charset.defaultCharset()); + } + + public static byte[] getActiveStorePrefix() { + //ACTIVESTORE + String key = StringBuilderHelper.get() + .append(ACTIVESTORE).append(DELIMITER) + .toString(); + return key.getBytes(Charset.defaultCharset()); + } + + public static byte[] getStorePrefix() { + //STORE + String key = StringBuilderHelper.get() + .append(STORE).append(DELIMITER) + .toString(); + return key.getBytes(Charset.defaultCharset()); + } + + public static byte[] getStoreStatusKey(final long storeId) { + //STORESTATUS/{storeId} + String key = StringBuilderHelper.get() + .append(STORESTATUS).append(DELIMITER) + .append(storeId) + .toString(); + return key.getBytes(Charset.defaultCharset()); + } + + public static byte[] getShardGroupKey(final long groupId) { + //SHARDGROUP/{storeId} + String key = StringBuilderHelper.get() + .append(SHARDGROUP).append(DELIMITER) + .append(groupId) + .toString(); + return key.getBytes(Charset.defaultCharset()); + } + + public static byte[] getShardGroupPrefix() { + //SHARDGROUP + String key = StringBuilderHelper.get() + .append(SHARDGROUP).append(DELIMITER) + .toString(); + return key.getBytes(Charset.defaultCharset()); + } + + public static byte[] getPartitionKey(final String graphName, final int partId) { + //GRAPH/{graphName}/Partition/{partId} + String key = StringBuilderHelper.get() + .append(GRAPH).append(DELIMITER) + .append(graphName).append(DELIMITER) + .append(PARTITION).append(DELIMITER) + .append(partId) + .toString(); + return key.getBytes(Charset.defaultCharset()); + } + + public static byte[] getPartitionV36Key(final String graphName, final int partId) { + // GRAPH/{graphName}/PartitionV36/{partId} + String key = StringBuilderHelper.get() + .append(GRAPH).append(DELIMITER) + .append(graphName).append(DELIMITER) + .append(PARTITION_V36).append(DELIMITER) + .append(partId) + .toString(); + return key.getBytes(Charset.defaultCharset()); + } + + public static byte[] getPartitionPrefix(final String graphName) { + //GRAPH/{graph}/Partition + String key = StringBuilderHelper.get() + .append(GRAPH).append(DELIMITER) + .append(graphName).append(DELIMITER) + .append(PARTITION).append(DELIMITER) + .toString(); + return key.getBytes(Charset.defaultCharset()); + } + + public static byte[] getShardKey(final long storeId, final int partId) { + //SHARD/{graphName}/{type} + String key = StringBuilderHelper.get() + .append(SHARDGROUP).append(DELIMITER) + .append(storeId).append(DELIMITER) + .append(partId) + .toString(); + return key.getBytes(Charset.defaultCharset()); + } + + public static byte[] getShardPrefix(final long storeId) { + //SHARD/{graphName}/{type} + String key = StringBuilderHelper.get() + .append(SHARDGROUP).append(DELIMITER) + .append(storeId).append(DELIMITER) + .toString(); + return key.getBytes(Charset.defaultCharset()); + } + + public static byte[] getGraphKey(final String graphName) { + //GRAPHMETA/{graphName} + String key = StringBuilderHelper.get() + .append(GRAPHMETA).append(DELIMITER) + .append(graphName).append(DELIMITER) + .toString(); + return key.getBytes(Charset.defaultCharset()); + } + + public static byte[] getGraphPrefix() { + //GRAPHMETA/{ + String key = StringBuilderHelper.get() + .append(GRAPHMETA).append(DELIMITER) + .toString(); + return key.getBytes(Charset.defaultCharset()); + } + + public static byte[] getPartitionStatusKey(String graphName, int id) { + //PARTITION_STATUS/{ + String key = StringBuilderHelper.get() +<<<<<<<< HEAD:hugegraph-pd/hg-pd-core/src/main/java/org/apache/hugegraph/pd/meta/MetadataKeyHelper.java + .append(PARTITION_STATUS) + .append(DELIMITER) + .append(graphName).append(DELIMITER) + .append(id).append(DELIMITER) + .toString(); +======== + .append(PARTITION_STATUS) + .append(DELIMITER) + // .append(graphName).append(DELIMITER) + .append(id).append(DELIMITER) + .toString(); +>>>>>>>> d7e3d51dd (3.6.5 -> 4.x diff):hg-pd-core/src/main/java/org/apache/hugegraph/pd/meta/MetadataKeyHelper.java + return key.getBytes(Charset.defaultCharset()); + } + + public static byte[] getPartitionStatusPrefixKey(String graphName) { + //PARTITION_STATUS/{ + StringBuilder builder = StringBuilderHelper.get().append(PARTITION_STATUS) + .append(DELIMITER); + if (!StringUtils.isEmpty(graphName)) { + builder.append(graphName).append(DELIMITER); + } + return builder.toString().getBytes(Charset.defaultCharset()); + } + + public static byte[] getGraphSpaceKey(String graphSpace) { + //GRAPH_SPACE/{ + StringBuilder builder = StringBuilderHelper.get().append( + GRAPH_SPACE).append(DELIMITER); + if (!StringUtils.isEmpty(graphSpace)) { + builder.append(graphSpace).append(DELIMITER); + } + return builder.toString().getBytes(Charset.defaultCharset()); + } + + public static byte[] getPdConfigKey(String configKey) { + //PD_CONFIG/{ + StringBuilder builder = StringBuilderHelper.get().append( + PD_CONFIG).append(DELIMITER); + if (!StringUtils.isEmpty(configKey)) { + builder.append(configKey).append(DELIMITER); + } + return builder.toString().getBytes(Charset.defaultCharset()); + } + + public static byte[] getQueueItemPrefix() { + // QUEUE + String key = StringBuilderHelper.get() + .append(QUEUE).append(DELIMITER) + .toString(); + return key.getBytes(Charset.defaultCharset()); + } + + public static byte[] getQueueItemKey(String itemId) { + // QUEUE + StringBuilder builder = StringBuilderHelper.get() + .append(QUEUE).append(DELIMITER); + if (!StringUtils.isEmpty(itemId)) { + builder.append(itemId).append(DELIMITER); + } + return builder.toString().getBytes(Charset.defaultCharset()); + } + +<<<<<<<< HEAD:hugegraph-pd/hg-pd-core/src/main/java/org/apache/hugegraph/pd/meta/MetadataKeyHelper.java + public static byte[] getSplitTaskKey(String graphName, int groupId) { + // TASK_SPLIT/{GraphName}/{partitionID} + StringBuilder builder = StringBuilderHelper.get() + .append(TASK_SPLIT).append(DELIMITER) + .append(graphName).append(DELIMITER) + .append(groupId); + return builder.toString().getBytes(Charset.defaultCharset()); + } + + public static byte[] getSplitTaskPrefix(String graphName) { +======== + public static byte[] getNoticeContentKey(long noticeId) { + // NOTICE_C/{noticeId} + StringBuilder builder = StringBuilderHelper.get() + .append(NOTICE_CONTENT).append(DELIMITER).append(noticeId); + + return builder.toString().getBytes(Charset.defaultCharset()); + } + + public static byte[] getObserverNoticePrefix() { + // OB_N + String key = StringBuilderHelper.get() + .append(OBSERVER_NOTICE).append(DELIMITER) + .toString(); + + return key.getBytes(Charset.defaultCharset()); + } + + public static byte[] getObserverNoticeKey(long observerId, long noticeId) { + // OB_N/{observerId}/{noticeId} + StringBuilder builder = StringBuilderHelper.get() + .append(OBSERVER_NOTICE).append(DELIMITER).append(observerId) + .append(DELIMITER).append(noticeId); + return builder.toString().getBytes(Charset.defaultCharset()); + } + + public static byte[] getSplitTaskKey(String graphName, int groupId) { + // TASK_SPLIT/{GraphName}/{partitionID} + StringBuilder builder = StringBuilderHelper.get() + .append(TASK_SPLIT).append(DELIMITER) + .append(graphName).append(DELIMITER) + .append(groupId); + return builder.toString().getBytes(Charset.defaultCharset()); + } + + public static byte[] getSplitTaskPrefix(String graphName) { + // TASK_SPLIT/{GraphName}/ + StringBuilder builder = StringBuilderHelper.get() + .append(TASK_SPLIT).append(DELIMITER) + .append(graphName); + return builder.toString().getBytes(Charset.defaultCharset()); + } + + public static byte[] getAllSplitTaskPrefix() { +>>>>>>>> d7e3d51dd (3.6.5 -> 4.x diff):hg-pd-core/src/main/java/org/apache/hugegraph/pd/meta/MetadataKeyHelper.java + // TASK_SPLIT/{GraphName}/ + StringBuilder builder = StringBuilderHelper.get() + .append(TASK_SPLIT).append(DELIMITER) + .append(graphName); + return builder.toString().getBytes(Charset.defaultCharset()); + } + +<<<<<<<< HEAD:hugegraph-pd/hg-pd-core/src/main/java/org/apache/hugegraph/pd/meta/MetadataKeyHelper.java + public static byte[] getAllSplitTaskPrefix() { + // TASK_SPLIT/{GraphName}/ + StringBuilder builder = StringBuilderHelper.get() + .append(TASK_SPLIT).append(DELIMITER); + return builder.toString().getBytes(Charset.defaultCharset()); + } + +======== +>>>>>>>> d7e3d51dd (3.6.5 -> 4.x diff):hg-pd-core/src/main/java/org/apache/hugegraph/pd/meta/MetadataKeyHelper.java + public static byte[] getMoveTaskKey(String graphName, int targetGroupId, int groupId) { + // TASK_MOVE/{GraphName}/to PartitionID/{source partitionID} + StringBuilder builder = StringBuilderHelper.get() + .append(TASK_MOVE).append(DELIMITER) + .append(graphName).append(DELIMITER) + .append(targetGroupId).append(DELIMITER) + .append(groupId); + return builder.toString().getBytes(Charset.defaultCharset()); + } + + public static byte[] getMoveTaskPrefix(String graphName) { + // TASK_MOVE/{graphName}/toPartitionId/ + StringBuilder builder = StringBuilderHelper.get() + .append(TASK_MOVE).append(DELIMITER) + .append(graphName); + return builder.toString().getBytes(Charset.defaultCharset()); + } + +<<<<<<<< HEAD:hugegraph-pd/hg-pd-core/src/main/java/org/apache/hugegraph/pd/meta/MetadataKeyHelper.java +======== + public static byte[] getUserTaskKey(long taskId, int partitionId) { + // TASK_BI/ task id / partition id + StringBuilder builder = StringBuilderHelper.get() + .append(TASK_USER).append(DELIMITER) + .append(taskId).append(DELIMITER) + .append(partitionId); + return builder.toString().getBytes(Charset.defaultCharset()); + } + + public static byte[] getUserTaskPrefix(long taskId) { + // TASK_MOVE/{GraphName}/to PartitionID/{source partitionID} + StringBuilder builder = StringBuilderHelper.get() + .append(TASK_USER).append(DELIMITER) + .append(taskId); + return builder.toString().getBytes(Charset.defaultCharset()); + } + +>>>>>>>> d7e3d51dd (3.6.5 -> 4.x diff):hg-pd-core/src/main/java/org/apache/hugegraph/pd/meta/MetadataKeyHelper.java + public static byte[] getAllMoveTaskPrefix() { + // TASK_MOVE/{graphName}/toPartitionId/ + StringBuilder builder = StringBuilderHelper.get() + .append(TASK_MOVE).append(DELIMITER); + return builder.toString().getBytes(Charset.defaultCharset()); + } + + public static byte[] getLogKey(Metapb.LogRecord record) { +<<<<<<<< HEAD:hugegraph-pd/hg-pd-core/src/main/java/org/apache/hugegraph/pd/meta/MetadataKeyHelper.java + //LOG_RECORD/{action}/{time}/ + StringBuilder builder = StringBuilderHelper.get() + .append(LOG_RECORD) + .append(DELIMITER) + .append(record.getAction()) + .append(DELIMITER) + .append(record.getTimestamp()); + return builder.toString().getBytes(Charset.defaultCharset()); + } + + public static byte[] getLogKeyPrefix(String action, long time) { + //LOG_RECORD/{action}/{time}/ + StringBuilder builder = StringBuilderHelper.get() + .append(LOG_RECORD) + .append(DELIMITER) + .append(action) + .append(DELIMITER) + .append(time); + return builder.toString().getBytes(Charset.defaultCharset()); + } + + public static byte[] getKVPrefix(String prefix, String key) { + //K@/{key} +======== + // LOG_RECORD/{action}/{time}/ + StringBuilder builder = StringBuilderHelper.get() + .append(LOG_RECORD) + .append(DELIMITER) + .append(record.getAction()) + .append(DELIMITER) + .append(record.getTimestamp()); + return builder.toString().getBytes(Charset.defaultCharset()); + } + + public static byte[] getLogKeyPrefix(String action, long time) { + // LOG_DATA_SPLIT/{time}/{GraphName} +>>>>>>>> d7e3d51dd (3.6.5 -> 4.x diff):hg-pd-core/src/main/java/org/apache/hugegraph/pd/meta/MetadataKeyHelper.java + StringBuilder builder = StringBuilderHelper.get() + .append(LOG_RECORD) + .append(DELIMITER) + .append(action) + .append(DELIMITER) + .append(time); + return builder.toString().getBytes(Charset.defaultCharset()); + } + + public static byte[] getKVPrefix(String prefix, String key) { + // K@/{key} + StringBuilder builder = StringBuilderHelper.get() + .append(prefix).append(DELIMITER); + if (!StringUtils.isEmpty(key)) { + builder.append(key).append(DELIMITER); + } + return builder.toString().getBytes(Charset.defaultCharset()); + } + + public static byte[] getKVTTLPrefix(String ttlPrefix, String prefix, String key) { + StringBuilder builder = StringBuilderHelper.get().append(ttlPrefix) + .append(prefix).append(DELIMITER); + if (!StringUtils.isEmpty(key)) { + builder.append(key).append(DELIMITER); + } + return builder.toString().getBytes(Charset.defaultCharset()); + } + + public static String getKVWatchKeyPrefix(String key, String watchDelimiter, long clientId) { + StringBuilder builder = StringBuilderHelper.get(); + builder.append(watchDelimiter).append(DELIMITER); + builder.append(key == null ? "" : key).append(DELIMITER); + builder.append(clientId); + return builder.toString(); + } + + public static String getKVWatchKeyPrefix(String key, String watchDelimiter) { + StringBuilder builder = StringBuilderHelper.get(); + builder.append(watchDelimiter).append(DELIMITER); + builder.append(key == null ? "" : key).append(DELIMITER); + return builder.toString(); + } + +<<<<<<<< HEAD:hugegraph-pd/hg-pd-core/src/main/java/org/apache/hugegraph/pd/meta/MetadataKeyHelper.java +======== + public static byte[] getStoreGroupRelationKey(Long storeId) { + StringBuilder builder = StringBuilderHelper.get(); + builder.append(STORE_GROUP_RELATION).append(DELIMITER).append(storeId); + return builder.toString().getBytes(Charset.defaultCharset()); + } + + public static byte[] getStoreGroupRelationPrefix() { + StringBuilder builder = StringBuilderHelper.get(); + builder.append(STORE_GROUP_RELATION).append(DELIMITER); + return builder.toString().getBytes(Charset.defaultCharset()); + } + + public static byte[] getStoreGroupKey(int groupId) { + StringBuilder builder = StringBuilderHelper.get(); + builder.append(STORE_GROUP).append(DELIMITER).append(groupId); + return builder.toString().getBytes(Charset.defaultCharset()); + } + + public static byte[] getStoreGroupPrefix() { + StringBuilder builder = StringBuilderHelper.get(); + builder.append(STORE_GROUP).append(DELIMITER); + return builder.toString().getBytes(Charset.defaultCharset()); + } + +>>>>>>>> d7e3d51dd (3.6.5 -> 4.x diff):hg-pd-core/src/main/java/org/apache/hugegraph/pd/meta/MetadataKeyHelper.java + public static char getDelimiter() { + return DELIMITER; + } + + public static StringBuilder getStringBuilderHelper() { + return StringBuilderHelper.get(); + } + + static class StringBuilderHelper { + + private static final int DISCARD_LIMIT = 1024 << 3; // 8k + + private static final ThreadLocal holderThreadLocal = ThreadLocal + .withInitial(StringBuilderHolder::new); + + public static StringBuilder get() { + final StringBuilderHolder holder = holderThreadLocal.get(); + return holder.getStringBuilder(); + } + + public static void truncate() { + final StringBuilderHolder holder = holderThreadLocal.get(); + holder.truncate(); + } + + private static class StringBuilderHolder { + + private final StringBuilder buf = new StringBuilder(); + + private StringBuilder getStringBuilder() { + truncate(); + return buf; + } + + private void truncate() { + buf.setLength(0); + } + } + } + +} diff --git a/hg-pd-core/src/main/java/org/apache/hugegraph/pd/meta/MetadataRocksDBStore.java b/hg-pd-core/src/main/java/org/apache/hugegraph/pd/meta/MetadataRocksDBStore.java new file mode 100644 index 0000000000..38502e718c --- /dev/null +++ b/hg-pd-core/src/main/java/org/apache/hugegraph/pd/meta/MetadataRocksDBStore.java @@ -0,0 +1,215 @@ +<<<<<<<< HEAD:hugegraph-pd/hg-pd-core/src/main/java/org/apache/hugegraph/pd/meta/MetadataRocksDBStore.java +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hugegraph.pd.meta; +======== +package org.apache.hugegraph.pd.meta; + +import org.apache.hugegraph.pd.common.PDException; +import org.apache.hugegraph.pd.config.PDConfig; +import org.apache.hugegraph.pd.grpc.Pdpb; +import org.apache.hugegraph.pd.grpc.common.ErrorType; +import org.apache.hugegraph.pd.store.HgKVStore; +import org.apache.hugegraph.pd.store.KV; +import com.google.protobuf.Parser; +import org.apache.commons.lang3.ArrayUtils; +>>>>>>>> d7e3d51dd (3.6.5 -> 4.x diff):hg-pd-core/src/main/java/org/apache/hugegraph/pd/meta/MetadataRocksDBStore.java + +import java.util.LinkedList; +import java.util.List; +import java.util.concurrent.TimeUnit; + +import org.apache.commons.lang3.ArrayUtils; +import org.apache.hugegraph.pd.common.PDException; +import org.apache.hugegraph.pd.config.PDConfig; +import org.apache.hugegraph.pd.grpc.Pdpb; +import org.apache.hugegraph.pd.store.HgKVStore; +import org.apache.hugegraph.pd.store.KV; + +import com.google.protobuf.Parser; + +public class MetadataRocksDBStore extends MetadataStoreBase { + + HgKVStore store; + + PDConfig pdConfig; + + public MetadataRocksDBStore(PDConfig pdConfig) { + store = MetadataFactory.getStore(pdConfig); + this.pdConfig = pdConfig; + } + +<<<<<<<< HEAD:hugegraph-pd/hg-pd-core/src/main/java/org/apache/hugegraph/pd/meta/MetadataRocksDBStore.java + public HgKVStore getStore() { +======== + public HgKVStore getStore(){ +>>>>>>>> d7e3d51dd (3.6.5 -> 4.x diff):hg-pd-core/src/main/java/org/apache/hugegraph/pd/meta/MetadataRocksDBStore.java + if (store == null) { + store = MetadataFactory.getStore(pdConfig); + } + return store; + } + + @Override + public byte[] getOne(byte[] key) throws PDException { + try { + byte[] bytes = store.get(key); + return bytes; +<<<<<<<< HEAD:hugegraph-pd/hg-pd-core/src/main/java/org/apache/hugegraph/pd/meta/MetadataRocksDBStore.java + } catch (Exception e) { + throw new PDException(Pdpb.ErrorType.ROCKSDB_READ_ERROR_VALUE, e); +======== + }catch (Exception e){ + throw new PDException(ErrorType.ROCKSDB_READ_ERROR_VALUE, e); +>>>>>>>> d7e3d51dd (3.6.5 -> 4.x diff):hg-pd-core/src/main/java/org/apache/hugegraph/pd/meta/MetadataRocksDBStore.java + } + } + + @Override + public E getOne(Parser parser, byte[] key) throws PDException { + try { + byte[] bytes = store.get(key); + if (ArrayUtils.isEmpty(bytes)) { + return null; + } + return parser.parseFrom(bytes); +<<<<<<<< HEAD:hugegraph-pd/hg-pd-core/src/main/java/org/apache/hugegraph/pd/meta/MetadataRocksDBStore.java + } catch (Exception e) { + throw new PDException(Pdpb.ErrorType.ROCKSDB_READ_ERROR_VALUE, e); +======== + }catch (Exception e){ + throw new PDException(ErrorType.ROCKSDB_READ_ERROR_VALUE, e); +>>>>>>>> d7e3d51dd (3.6.5 -> 4.x diff):hg-pd-core/src/main/java/org/apache/hugegraph/pd/meta/MetadataRocksDBStore.java + } + } + + @Override + public void put(byte[] key, byte[] value) throws PDException { + try { + getStore().put(key, value); +<<<<<<<< HEAD:hugegraph-pd/hg-pd-core/src/main/java/org/apache/hugegraph/pd/meta/MetadataRocksDBStore.java + } catch (Exception e) { + throw new PDException(Pdpb.ErrorType.ROCKSDB_WRITE_ERROR_VALUE, e); +======== + } catch (Exception e){ + throw new PDException(ErrorType.ROCKSDB_WRITE_ERROR_VALUE, e); +>>>>>>>> d7e3d51dd (3.6.5 -> 4.x diff):hg-pd-core/src/main/java/org/apache/hugegraph/pd/meta/MetadataRocksDBStore.java + } + } + + @Override + public void putWithTTL(byte[] key, byte[] value, long ttl) throws PDException { + this.store.putWithTTL(key, value, ttl); + } + + @Override + public void putWithTTL(byte[] key, byte[] value, long ttl, TimeUnit timeUnit) throws + PDException { + this.store.putWithTTL(key, value, ttl, timeUnit); + } + + @Override + public byte[] getWithTTL(byte[] key) throws PDException { + return this.store.getWithTTL(key); + } + + @Override + public List getListWithTTL(byte[] key) throws PDException { + return this.store.getListWithTTL(key); + } + + @Override + public void removeWithTTL(byte[] key) throws PDException { + this.store.removeWithTTL(key); + } + + @Override + public List scanPrefix(byte[] prefix) throws PDException { + try { + return this.store.scanPrefix(prefix); + } catch (Exception e) { + throw new PDException(ErrorType.ROCKSDB_READ_ERROR_VALUE, e); + } + } + + @Override + public List scanRange(byte[] start, byte[] end) throws PDException { + return this.store.scanRange(start, end); + } + + @Override + public List scanRange(Parser parser, byte[] start, byte[] end) throws PDException { + List stores = new LinkedList<>(); + try { + List kvs = this.scanRange(start, end); + for (KV keyValue : kvs) { + stores.add(parser.parseFrom(keyValue.getValue())); + } + } catch (Exception e) { + throw new PDException(ErrorType.ROCKSDB_READ_ERROR_VALUE, e); + } + return stores; + } + + @Override + public List scanPrefix(Parser parser, byte[] prefix) throws PDException { + List stores = new LinkedList<>(); + try { + List kvs = this.scanPrefix(prefix); + for (KV keyValue : kvs) { + stores.add(parser.parseFrom(keyValue.getValue())); + } + } catch (Exception e) { + throw new PDException(ErrorType.ROCKSDB_READ_ERROR_VALUE, e); + } + return stores; + } + + @Override + public boolean containsKey(byte[] key) throws PDException { + return !ArrayUtils.isEmpty(store.get(key)); + } + + @Override + public long remove(byte[] key) throws PDException { + try { + return this.store.remove(key); + } catch (Exception e) { + throw new PDException(ErrorType.ROCKSDB_WRITE_ERROR_VALUE, e); + } + } + + @Override + public long removeByPrefix(byte[] prefix) throws PDException { + try { + return this.store.removeByPrefix(prefix); + } catch (Exception e) { + throw new PDException(ErrorType.ROCKSDB_WRITE_ERROR_VALUE, e); + } + } + + @Override + public void clearAllCache() throws PDException { + this.store.clear(); + } + + @Override + public void close() { + + } +} diff --git a/hg-pd-core/src/main/java/org/apache/hugegraph/pd/meta/MetadataStoreBase.java b/hg-pd-core/src/main/java/org/apache/hugegraph/pd/meta/MetadataStoreBase.java new file mode 100644 index 0000000000..7f4f61cdf5 --- /dev/null +++ b/hg-pd-core/src/main/java/org/apache/hugegraph/pd/meta/MetadataStoreBase.java @@ -0,0 +1,139 @@ +<<<<<<<< HEAD:hugegraph-pd/hg-pd-core/src/main/java/org/apache/hugegraph/pd/meta/MetadataStoreBase.java +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +======== +package org.apache.hugegraph.pd.meta; +>>>>>>>> d7e3d51dd (3.6.5 -> 4.x diff):hg-pd-core/src/main/java/org/apache/hugegraph/pd/meta/MetadataStoreBase.java + +package org.apache.hugegraph.pd.meta; + +<<<<<<<< HEAD:hugegraph-pd/hg-pd-core/src/main/java/org/apache/hugegraph/pd/meta/MetadataStoreBase.java +======== +import org.apache.hugegraph.pd.common.PDException; +import org.apache.hugegraph.pd.grpc.Pdpb; +import org.apache.hugegraph.pd.grpc.common.ErrorType; +import org.apache.hugegraph.pd.store.KV; +>>>>>>>> d7e3d51dd (3.6.5 -> 4.x diff):hg-pd-core/src/main/java/org/apache/hugegraph/pd/meta/MetadataStoreBase.java +import java.io.IOException; +import java.util.LinkedList; +import java.util.List; +import java.util.concurrent.TimeUnit; + +import org.apache.hugegraph.pd.common.PDException; +import org.apache.hugegraph.pd.grpc.Pdpb; +import org.apache.hugegraph.pd.store.KV; + +import com.google.protobuf.Parser; + +public abstract class MetadataStoreBase { + + // public long timeout = 3; + + public abstract byte[] getOne(byte[] key) throws PDException; + + public abstract E getOne(Parser parser, byte[] key) throws PDException; + + public abstract void put(byte[] key, byte[] value) throws PDException; + + /** + * A put with an expiration time + */ + public abstract void putWithTTL(byte[] key, + byte[] value, + long ttl) throws PDException; + + public abstract void putWithTTL(byte[] key, + byte[] value, + long ttl, TimeUnit timeUnit) throws PDException; + + public abstract byte[] getWithTTL(byte[] key) throws PDException; + + public abstract List getListWithTTL(byte[] key) throws PDException; + + public abstract void removeWithTTL(byte[] key) throws PDException; + + /** + * Prefix queries + * + * @param prefix + * @return + * @throws PDException + */ + public abstract List scanPrefix(byte[] prefix) throws PDException; + + /** + * Prefix queries + * + * @param prefix + * @return + * @throws PDException + */ + public abstract List scanPrefix(Parser parser, byte[] prefix) throws PDException; + + public abstract List scanRange(byte[] start, byte[] end) throws PDException; + + public abstract List scanRange(Parser parser, byte[] start, byte[] end) throws + PDException; + + /** + * Check if the key exists + * + * @param key + * @return + * @throws PDException + */ + public abstract boolean containsKey(byte[] key) throws PDException; + + public abstract long remove(byte[] key) throws PDException; + + public abstract long removeByPrefix(byte[] prefix) throws PDException; + + public abstract void clearAllCache() throws PDException; + + public abstract void close() throws IOException; + + public T getInstanceWithTTL(Parser parser, byte[] key) throws PDException { + try { + byte[] withTTL = this.getWithTTL(key); + return parser.parseFrom(withTTL); + } catch (Exception e) { +<<<<<<<< HEAD:hugegraph-pd/hg-pd-core/src/main/java/org/apache/hugegraph/pd/meta/MetadataStoreBase.java + throw new PDException(Pdpb.ErrorType.ROCKSDB_READ_ERROR_VALUE, e); +======== + throw new PDException(ErrorType.ROCKSDB_READ_ERROR_VALUE,e); +>>>>>>>> d7e3d51dd (3.6.5 -> 4.x diff):hg-pd-core/src/main/java/org/apache/hugegraph/pd/meta/MetadataStoreBase.java + } + } + + public List getInstanceListWithTTL(Parser parser, byte[] key) + throws PDException { + try { + List withTTL = this.getListWithTTL(key); + LinkedList ts = new LinkedList<>(); + for (int i = 0; i < withTTL.size(); i++) { + ts.add(parser.parseFrom((byte[]) withTTL.get(i))); + } + return ts; + } catch (Exception e) { +<<<<<<<< HEAD:hugegraph-pd/hg-pd-core/src/main/java/org/apache/hugegraph/pd/meta/MetadataStoreBase.java + throw new PDException(Pdpb.ErrorType.ROCKSDB_READ_ERROR_VALUE, e); +======== + throw new PDException(ErrorType.ROCKSDB_READ_ERROR_VALUE,e); +>>>>>>>> d7e3d51dd (3.6.5 -> 4.x diff):hg-pd-core/src/main/java/org/apache/hugegraph/pd/meta/MetadataStoreBase.java + } + } +} diff --git a/hg-pd-core/src/main/java/org/apache/hugegraph/pd/meta/PartitionMeta.java b/hg-pd-core/src/main/java/org/apache/hugegraph/pd/meta/PartitionMeta.java new file mode 100644 index 0000000000..b83e48207c --- /dev/null +++ b/hg-pd-core/src/main/java/org/apache/hugegraph/pd/meta/PartitionMeta.java @@ -0,0 +1,416 @@ +<<<<<<<< HEAD:hugegraph-pd/hg-pd-core/src/main/java/org/apache/hugegraph/pd/meta/PartitionMeta.java +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +======== +>>>>>>>> d7e3d51dd (3.6.5 -> 4.x diff):hg-pd-core/src/main/java/org/apache/hugegraph/pd/meta/PartitionMeta.java +package org.apache.hugegraph.pd.meta; + +import java.util.ArrayList; +import java.util.List; +import java.util.Map; + +import org.apache.hugegraph.pd.grpc.common.ErrorType; +import org.apache.commons.collections4.CollectionUtils; + +import org.apache.hugegraph.pd.common.PDException; +import org.apache.hugegraph.pd.common.PDRuntimeException; +import org.apache.hugegraph.pd.common.PartitionCache; +import org.apache.hugegraph.pd.config.PDConfig; +import org.apache.hugegraph.pd.grpc.Metapb; + +import lombok.extern.slf4j.Slf4j; + +import org.apache.hugegraph.pd.common.PDException; +import org.apache.hugegraph.pd.common.PartitionCache; +import org.apache.hugegraph.pd.config.PDConfig; +import org.apache.hugegraph.pd.grpc.Metapb; + +import lombok.extern.slf4j.Slf4j; + +/** + * Partition information management + */ +@Slf4j +public class PartitionMeta extends MetadataRocksDBStore { +<<<<<<<< HEAD:hugegraph-pd/hg-pd-core/src/main/java/org/apache/hugegraph/pd/meta/PartitionMeta.java + + static String CID_GRAPH_ID_KEY = "GraphID"; + static int CID_GRAPH_ID_MAX = 0xFFFE; + private final PartitionCache cache; + + public PartitionMeta(PDConfig pdConfig) { + super(pdConfig); + //this.timeout = pdConfig.getEtcd().getTimeout(); +======== + + public static final String CID_GRAPH_ID_KEY = "GraphID"; + public static final int CID_GRAPH_ID_MAX = 0xFFFE; + private PDConfig pdConfig; + private PartitionCache cache; + + public PartitionMeta(PDConfig pdConfig) { + super(pdConfig); + this.pdConfig = pdConfig; + // this.timeout = pdConfig.getEtcd().getTimeout(); +>>>>>>>> d7e3d51dd (3.6.5 -> 4.x diff):hg-pd-core/src/main/java/org/apache/hugegraph/pd/meta/PartitionMeta.java + this.cache = new PartitionCache(); + } + + /** + * Initialize, load all partitions + */ + public void init() throws PDException { + loadShardGroups(); + loadGraphs(); + } + + public void reload() throws PDException { + cache.clear(); + loadShardGroups(); + loadGraphs(); + } + + private void loadGraphs() throws PDException { + byte[] key = MetadataKeyHelper.getGraphPrefix(); + List graphs = scanPrefix(Metapb.Graph.parser(), key); + for (Metapb.Graph graph : graphs) { + cache.updateGraph(graph); + loadPartitions(graph); + } + } + +<<<<<<<< HEAD:hugegraph-pd/hg-pd-core/src/main/java/org/apache/hugegraph/pd/meta/PartitionMeta.java + /** + * The partition and shard group are stored separately, and when they are init, they need to + * be loaded + * + * @throws PDException + */ +======== + public void loadGraph(String graphName) throws PDException { + Metapb.Graph graph = getGraph(graphName); + if (graph != null) { + cache.updateGraph(graph); + loadPartitions(graph); + } + } + + /** + * partition 和 shard group分开存储,再init的时候,需要加载进来 + * + * @throws PDException + */ +>>>>>>>> d7e3d51dd (3.6.5 -> 4.x diff):hg-pd-core/src/main/java/org/apache/hugegraph/pd/meta/PartitionMeta.java + private void loadShardGroups() throws PDException { + byte[] shardGroupPrefix = MetadataKeyHelper.getShardGroupPrefix(); + for (var shardGroup : scanPrefix(Metapb.ShardGroup.parser(), shardGroupPrefix)) { + cache.updateShardGroup(shardGroup); + } + } + + private void loadPartitions(Metapb.Graph graph) throws PDException { + byte[] prefix = MetadataKeyHelper.getPartitionPrefix(graph.getGraphName()); + List partitions = scanPrefix(Metapb.Partition.parser(), prefix); +<<<<<<<< HEAD:hugegraph-pd/hg-pd-core/src/main/java/org/apache/hugegraph/pd/meta/PartitionMeta.java + partitions.forEach(p -> { + cache.updatePartition(p); + }); + } + + /** + * Find partitions by ID (first from the cache, then from the database) +======== + partitions.forEach(p -> cache.updatePartition(p)); + } + + /** + * 根据id查找分区 (先从缓存找,再到数据库中找) +>>>>>>>> d7e3d51dd (3.6.5 -> 4.x diff):hg-pd-core/src/main/java/org/apache/hugegraph/pd/meta/PartitionMeta.java + * + * @param graphName + * @param partId + * @return + * @throws PDException + */ + public Metapb.Partition getPartitionById(String graphName, int partId) throws PDException { + var pair = cache.getPartitionById(graphName, partId); + Metapb.Partition partition; + if (pair == null) { + byte[] key = MetadataKeyHelper.getPartitionKey(graphName, partId); + partition = getOne(Metapb.Partition.parser(), key); + if (partition != null) { + cache.updatePartition(partition); + } + } else { + partition = pair.getKey(); + } + return partition; + } + + public List getPartitionById(int partId) throws PDException { + List partitions = new ArrayList<>(); +<<<<<<<< HEAD:hugegraph-pd/hg-pd-core/src/main/java/org/apache/hugegraph/pd/meta/PartitionMeta.java + cache.getGraphs().forEach(graph -> { + cache.getPartitions(graph.getGraphName()).forEach(partition -> { + if (partition.getId() == partId) { + partitions.add(partition); + } + }); + }); +======== + cache.getGraphs().forEach(graph -> cache.getPartitions(graph.getGraphName()).forEach(partition -> { + if (partition.getId() == partId) { + partitions.add(partition); + } + })); +>>>>>>>> d7e3d51dd (3.6.5 -> 4.x diff):hg-pd-core/src/main/java/org/apache/hugegraph/pd/meta/PartitionMeta.java + return partitions; + } + + /** +<<<<<<<< HEAD:hugegraph-pd/hg-pd-core/src/main/java/org/apache/hugegraph/pd/meta/PartitionMeta.java + * Find partitions based on code +======== + * 根据code查找分区 +>>>>>>>> d7e3d51dd (3.6.5 -> 4.x diff):hg-pd-core/src/main/java/org/apache/hugegraph/pd/meta/PartitionMeta.java + */ + public Metapb.Partition getPartitionByCode(String graphName, long code) throws PDException { + var pair = cache.getPartitionByCode(graphName, code); + if (pair != null) { + return pair.getKey(); + } + return null; + } + +<<<<<<<< HEAD:hugegraph-pd/hg-pd-core/src/main/java/org/apache/hugegraph/pd/meta/PartitionMeta.java + public Metapb.Graph getAndCreateGraph(String graphName) throws PDException { + return getAndCreateGraph(graphName, pdConfig.getPartition().getTotalCount()); + } + + public Metapb.Graph getAndCreateGraph(String graphName, int partitionCount) throws PDException { + + if (partitionCount > pdConfig.getPartition().getTotalCount()) { + partitionCount = pdConfig.getPartition().getTotalCount(); + } + + if (graphName.endsWith("/s") || graphName.endsWith("/m")) { + partitionCount = 1; + } + + Metapb.Graph graph = cache.getGraph(graphName); + if (graph == null) { + graph = Metapb.Graph.newBuilder() + .setGraphName(graphName) + .setPartitionCount(partitionCount) + .setState(Metapb.PartitionState.PState_Normal) + .build(); + updateGraph(graph); + } + return graph; + } + + /** + * Save the partition information +======== + public Metapb.Graph createGraph(String graphName, int partitionCount, int groupId) throws PDException { + return updateGraph(Metapb.Graph.newBuilder() + .setGraphName(graphName) + .setPartitionCount(partitionCount) + .setStoreGroupId(groupId) + .setState(Metapb.PartitionState.PState_Normal) + .build()); + } + + /** + * 保存分区信息 +>>>>>>>> d7e3d51dd (3.6.5 -> 4.x diff):hg-pd-core/src/main/java/org/apache/hugegraph/pd/meta/PartitionMeta.java + * + * @param partition + * @return + * @throws PDException + */ + public Metapb.Partition updatePartition(Metapb.Partition partition) throws PDException { + if (!cache.hasGraph(partition.getGraphName())) { +<<<<<<<< HEAD:hugegraph-pd/hg-pd-core/src/main/java/org/apache/hugegraph/pd/meta/PartitionMeta.java + getAndCreateGraph(partition.getGraphName()); + } +======== + throw new PDException(ErrorType.GRAPH_NOT_EXISTS, "Graph " + partition.getGraphName() + " not exist"); + } + +>>>>>>>> d7e3d51dd (3.6.5 -> 4.x diff):hg-pd-core/src/main/java/org/apache/hugegraph/pd/meta/PartitionMeta.java + byte[] key = MetadataKeyHelper.getPartitionKey(partition.getGraphName(), partition.getId()); + put(key, partition.toByteString().toByteArray()); + cache.updatePartition(partition); + return partition; + } + +<<<<<<<< HEAD:hugegraph-pd/hg-pd-core/src/main/java/org/apache/hugegraph/pd/meta/PartitionMeta.java + public Metapb.Partition updateShardList(Metapb.Partition partition) throws PDException { + if (!cache.hasGraph(partition.getGraphName())) { + getAndCreateGraph(partition.getGraphName()); + } + + Metapb.Partition pt = getPartitionById(partition.getGraphName(), partition.getId()); + // pt = pt.toBuilder().setVersion(partition.getVersion()) + // .setConfVer(partition.getConfVer()) + // .clearShards() + // .addAllShards(partition.getShardsList()).build(); + + byte[] key = MetadataKeyHelper.getPartitionKey(pt.getGraphName(), pt.getId()); + put(key, pt.toByteString().toByteArray()); + cache.updatePartition(pt); + return partition; + } + + /** + * Delete all partitions +======== + /** + * 删除所有分区 +>>>>>>>> d7e3d51dd (3.6.5 -> 4.x diff):hg-pd-core/src/main/java/org/apache/hugegraph/pd/meta/PartitionMeta.java + */ + public long removeAllPartitions(String graphName) throws PDException { + cache.removeAll(graphName); + byte[] prefix = MetadataKeyHelper.getPartitionPrefix(graphName); + return removeByPrefix(prefix); + } + + public long removePartition(String graphName, int id) throws PDException { + cache.remove(graphName, id); + byte[] key = MetadataKeyHelper.getPartitionKey(graphName, id); + return remove(key); + } + + public void updatePartitionStats(Metapb.PartitionStats stats) throws PDException { +<<<<<<<< HEAD:hugegraph-pd/hg-pd-core/src/main/java/org/apache/hugegraph/pd/meta/PartitionMeta.java + for (String graphName : stats.getGraphNameList()) { + byte[] prefix = MetadataKeyHelper.getPartitionStatusKey(graphName, stats.getId()); + put(prefix, stats.toByteArray()); + } +======== + // for (String graphName : stats.getGraphNameList()) { + byte[] prefix = MetadataKeyHelper.getPartitionStatusKey("", stats.getId()); + put(prefix, stats.toByteArray()); + // } +>>>>>>>> d7e3d51dd (3.6.5 -> 4.x diff):hg-pd-core/src/main/java/org/apache/hugegraph/pd/meta/PartitionMeta.java + } + + /** + * Get the partition status + */ + public Metapb.PartitionStats getPartitionStats(String graphName, int id) throws PDException { + byte[] prefix = MetadataKeyHelper.getPartitionStatusKey(graphName, id); + return getOne(Metapb.PartitionStats.parser(), prefix); + } + + /** + * Get the partition status + */ + public List getPartitionStats(String graphName) throws PDException { + byte[] prefix = MetadataKeyHelper.getPartitionStatusPrefixKey(graphName); + return scanPrefix(Metapb.PartitionStats.parser(), prefix); + } + + /** +<<<<<<<< HEAD:hugegraph-pd/hg-pd-core/src/main/java/org/apache/hugegraph/pd/meta/PartitionMeta.java + * Update the diagram information +======== + * 更新图信息 +>>>>>>>> d7e3d51dd (3.6.5 -> 4.x diff):hg-pd-core/src/main/java/org/apache/hugegraph/pd/meta/PartitionMeta.java + * + * @param graph + * @return + */ + public Metapb.Graph updateGraph(Metapb.Graph graph) throws PDException { + log.info("updateGraph {}", graph); + byte[] key = MetadataKeyHelper.getGraphKey(graph.getGraphName()); +<<<<<<<< HEAD:hugegraph-pd/hg-pd-core/src/main/java/org/apache/hugegraph/pd/meta/PartitionMeta.java +======== + // 保存图信息 +>>>>>>>> d7e3d51dd (3.6.5 -> 4.x diff):hg-pd-core/src/main/java/org/apache/hugegraph/pd/meta/PartitionMeta.java + put(key, graph.toByteString().toByteArray()); + cache.updateGraph(graph); + return graph; + } + + public List getPartitions() { + List partitions = new ArrayList<>(); +<<<<<<<< HEAD:hugegraph-pd/hg-pd-core/src/main/java/org/apache/hugegraph/pd/meta/PartitionMeta.java + List graphs = cache.getGraphs(); + graphs.forEach(e -> { + partitions.addAll(cache.getPartitions(e.getGraphName())); + }); +======== + try { + List graphs = cache.getGraphs(); + if (CollectionUtils.isEmpty(graphs)) { + loadGraphs(); + graphs = cache.getGraphs(); + } + graphs.forEach(e -> partitions.addAll(cache.getPartitions(e.getGraphName()))); + } catch (PDException e) { + throw new PDRuntimeException(e.getErrorCode(), e); + } +>>>>>>>> d7e3d51dd (3.6.5 -> 4.x diff):hg-pd-core/src/main/java/org/apache/hugegraph/pd/meta/PartitionMeta.java + return partitions; + } + + public List getPartitions(String graphName) { + return cache.getPartitions(graphName); + } + + public List getGraphs() throws PDException { + byte[] key = MetadataKeyHelper.getGraphPrefix(); + return scanPrefix(Metapb.Graph.parser(), key); + } + + public Metapb.Graph getGraph(String graphName) throws PDException { + byte[] key = MetadataKeyHelper.getGraphKey(graphName); + return getOne(Metapb.Graph.parser(), key); + } + + /** + * Delete the diagram and delete the diagram ID + */ + public long removeGraph(String graphName) throws PDException { + byte[] key = MetadataKeyHelper.getGraphKey(graphName); + long l = remove(key); + return l; + } + +<<<<<<<< HEAD:hugegraph-pd/hg-pd-core/src/main/java/org/apache/hugegraph/pd/meta/PartitionMeta.java +======== + public long removePartitionStats(String graphName) throws PDException { + byte[] prefix = MetadataKeyHelper.getPartitionStatusPrefixKey(graphName); + return removeByPrefix(prefix); + } + +>>>>>>>> d7e3d51dd (3.6.5 -> 4.x diff):hg-pd-core/src/main/java/org/apache/hugegraph/pd/meta/PartitionMeta.java + public PartitionCache getPartitionCache() { + return cache; + } + + public void updateShardGroupCache(Metapb.ShardGroup group) { + cache.updateShardGroup(group); + } + + public Map getShardGroupCache() { + return cache.getShardGroups(); + } +} diff --git a/hg-pd-core/src/main/java/org/apache/hugegraph/pd/meta/PulseStore.java b/hg-pd-core/src/main/java/org/apache/hugegraph/pd/meta/PulseStore.java new file mode 100644 index 0000000000..10f5b90906 --- /dev/null +++ b/hg-pd-core/src/main/java/org/apache/hugegraph/pd/meta/PulseStore.java @@ -0,0 +1,68 @@ +package org.apache.hugegraph.pd.meta; + +import org.apache.hugegraph.pd.common.HgAssert; +import org.apache.hugegraph.pd.common.PDException; +import org.apache.hugegraph.pd.config.PDConfig; +import org.apache.hugegraph.pd.grpc.Metapb; + +import java.util.List; + +/** + * @author lynn.bond@hotmail.com on 2022/2/10 + */ +public class PulseStore extends MetadataRocksDBStore { + PulseStore(PDConfig pdConfig) { + super(pdConfig); + } + + public void addItem(Metapb.QueueItem queueItem) throws PDException { + HgAssert.isArgumentNotNull(queueItem, "queueItem"); + byte[] key = MetadataKeyHelper.getQueueItemKey(queueItem.getItemId()); + put(key, queueItem.toByteString().toByteArray()); + } + + public void removeItem(String itemId) throws PDException { + remove(MetadataKeyHelper.getQueueItemKey(itemId)); + } + + public List getQueue() throws PDException { + byte[] prefix = MetadataKeyHelper.getQueueItemPrefix(); + return scanPrefix(Metapb.QueueItem.parser(), prefix); + } + + /***************************************************************** + * The following methods are for the retying notice dispatcher * + ****************************************************************/ + + public void addNotice(Metapb.NoticeContent noticeContent) throws PDException { + HgAssert.isArgumentNotNull(noticeContent, "noticeContent"); + byte[] key = MetadataKeyHelper.getNoticeContentKey(noticeContent.getNoticeId()); + put(key, noticeContent.toByteString().toByteArray()); + } + + public Metapb.NoticeContent getNotice(long noticeId) throws PDException { + byte[] key = MetadataKeyHelper.getNoticeContentKey(noticeId); + return getOne(Metapb.NoticeContent.parser(), key); + } + + public void addObserverNotice(Metapb.ObserverNotice observerNotice) throws PDException { + HgAssert.isArgumentNotNull(observerNotice, "observerNotice"); + byte[] key = MetadataKeyHelper.getObserverNoticeKey(observerNotice.getObserverId(), + observerNotice.getNoticeId()); + put(key, observerNotice.toByteString().toByteArray()); + } + + public List getObserverNotices() throws PDException { + byte[] prefix = MetadataKeyHelper.getObserverNoticePrefix(); + return scanPrefix(Metapb.ObserverNotice.parser(), prefix); + } + + public void removeObserverNotice(long observerId, long noticeId) throws PDException { + remove(MetadataKeyHelper.getObserverNoticeKey(observerId, noticeId)); + } + + public void removeNoticeContent(long noticeId) throws PDException { + remove(MetadataKeyHelper.getNoticeContentKey(noticeId)); + } + +} diff --git a/hg-pd-core/src/main/java/org/apache/hugegraph/pd/meta/StoreInfoMeta.java b/hg-pd-core/src/main/java/org/apache/hugegraph/pd/meta/StoreInfoMeta.java new file mode 100644 index 0000000000..2c9e38161f --- /dev/null +++ b/hg-pd-core/src/main/java/org/apache/hugegraph/pd/meta/StoreInfoMeta.java @@ -0,0 +1,272 @@ +<<<<<<<< HEAD:hugegraph-pd/hg-pd-core/src/main/java/org/apache/hugegraph/pd/meta/StoreInfoMeta.java +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hugegraph.pd.meta; +======== +package org.apache.hugegraph.pd.meta; + +import org.apache.hugegraph.pd.common.PDException; +import org.apache.hugegraph.pd.config.PDConfig; +import org.apache.hugegraph.pd.grpc.Metapb; +import lombok.extern.slf4j.Slf4j; +>>>>>>>> d7e3d51dd (3.6.5 -> 4.x diff):hg-pd-core/src/main/java/org/apache/hugegraph/pd/meta/StoreInfoMeta.java + +import java.util.LinkedList; +import java.util.List; +import java.util.ListIterator; +import java.util.Objects; +import java.util.Set; +import java.util.stream.Collectors; + +import static org.apache.hugegraph.pd.common.Consts.DEFAULT_STORE_GROUP_ID; + +import org.apache.hugegraph.pd.common.PDException; +import org.apache.hugegraph.pd.config.PDConfig; +import org.apache.hugegraph.pd.grpc.Metapb; + +import lombok.extern.slf4j.Slf4j; + +/** + * Store information storage + */ +@Slf4j +public class StoreInfoMeta extends MetadataRocksDBStore { + +<<<<<<<< HEAD:hugegraph-pd/hg-pd-core/src/main/java/org/apache/hugegraph/pd/meta/StoreInfoMeta.java +======== + private PDConfig pdConfig; +>>>>>>>> d7e3d51dd (3.6.5 -> 4.x diff):hg-pd-core/src/main/java/org/apache/hugegraph/pd/meta/StoreInfoMeta.java + + public StoreInfoMeta(PDConfig pdConfig) { + super(pdConfig); + // this.timeout = pdConfig.getDiscovery().getHeartbeatOutTimes(); + } + + public static boolean shardGroupEquals(List g1, List g2) { + ListIterator e1 = g1.listIterator(); + ListIterator e2 = g2.listIterator(); + while (e1.hasNext() && e2.hasNext()) { + Metapb.Shard o1 = e1.next(); + Metapb.Shard o2 = e2.next(); + if (!(o1 == null ? o2 == null : o1.getStoreId() == o2.getStoreId())) { + return false; + } + } + return !(e1.hasNext() || e2.hasNext()); + } + + /** + * Update the Store information + * + * @param store + * @throws PDException + */ + public void updateStore(Metapb.Store store) throws PDException { + byte[] storeInfoKey = MetadataKeyHelper.getStoreInfoKey(store.getId()); + put(storeInfoKey, store.toByteArray()); + } + + /** + * Update the survivability status of the store + * + * @param store + */ + public void keepStoreAlive(Metapb.Store store) throws PDException { + byte[] activeStoreKey = MetadataKeyHelper.getActiveStoreKey(store.getId()); + putWithTTL(activeStoreKey, store.toByteArray(), pdConfig.getStore().getKeepAliveTimeout()); + } + + public void removeActiveStore(Metapb.Store store) throws PDException { + byte[] activeStoreKey = MetadataKeyHelper.getActiveStoreKey(store.getId()); + removeWithTTL(activeStoreKey); + } + + public Metapb.Store getStore(Long storeId) throws PDException { + byte[] storeInfoKey = MetadataKeyHelper.getStoreInfoKey(storeId); + Metapb.Store store = getOne(Metapb.Store.parser(), storeInfoKey); + return store; + } + + /** + * Get all the stores + * + * @param graphName + * @return + * @throws PDException + */ + @Deprecated + public List getStores(String graphName) throws PDException { + byte[] storePrefix = MetadataKeyHelper.getStorePrefix(); + return scanPrefix(Metapb.Store.parser(), storePrefix); +<<<<<<<< HEAD:hugegraph-pd/hg-pd-core/src/main/java/org/apache/hugegraph/pd/meta/StoreInfoMeta.java +======== + } + + public List getAllStores() throws PDException { + byte[] storePrefix = MetadataKeyHelper.getStorePrefix(); + return scanPrefix(Metapb.Store.parser(), storePrefix); +>>>>>>>> d7e3d51dd (3.6.5 -> 4.x diff):hg-pd-core/src/main/java/org/apache/hugegraph/pd/meta/StoreInfoMeta.java + } + + /** + * Get an active store + * + * @return + * @throws PDException + */ +<<<<<<<< HEAD:hugegraph-pd/hg-pd-core/src/main/java/org/apache/hugegraph/pd/meta/StoreInfoMeta.java + public List getActiveStores(String graphName) throws PDException { + byte[] activePrefix = MetadataKeyHelper.getActiveStorePrefix(); + List listWithTTL = getInstanceListWithTTL(Metapb.Store.parser(), + activePrefix); + return listWithTTL; +======== + public List getActiveStores(int storeGroupId) throws PDException { + Set storeIds = getStoreIdsByGroup(storeGroupId); + return getActiveStores().stream() + .filter(store -> storeIds.contains(store.getId())) + .collect(Collectors.toList()); +>>>>>>>> d7e3d51dd (3.6.5 -> 4.x diff):hg-pd-core/src/main/java/org/apache/hugegraph/pd/meta/StoreInfoMeta.java + } + + public List getActiveStores() throws PDException { + byte[] activePrefix = MetadataKeyHelper.getActiveStorePrefix(); + List listWithTTL = getInstanceListWithTTL(Metapb.Store.parser(), + activePrefix); + return listWithTTL; + } + + /** + * Check whether the storeID exists + * + * @param storeId + * @return + */ + public boolean storeExists(Long storeId) throws PDException { + byte[] storeInfoKey = MetadataKeyHelper.getStoreInfoKey(storeId); + return containsKey(storeInfoKey); + } + + /** + * Update the storage status information + * + * @param storeStats + */ + public Metapb.StoreStats updateStoreStats(Metapb.StoreStats storeStats) throws PDException { + byte[] storeStatusKey = MetadataKeyHelper.getStoreStatusKey(storeStats.getStoreId()); + + put(storeStatusKey, storeStats.toByteArray()); + return storeStats; + } + + public long removeStore(long storeId) throws PDException { + byte[] storeInfoKey = MetadataKeyHelper.getStoreInfoKey(storeId); + return remove(storeInfoKey); + } + + public long removeAll() throws PDException { + byte[] storePrefix = MetadataKeyHelper.getStorePrefix(); + return this.removeByPrefix(storePrefix); + } + + public void updateShardGroup(Metapb.ShardGroup group) throws PDException { + byte[] shardGroupKey = MetadataKeyHelper.getShardGroupKey(group.getId()); + put(shardGroupKey, group.toByteArray()); + } + + public void deleteShardGroup(int groupId) throws PDException { + byte[] shardGroupKey = MetadataKeyHelper.getShardGroupKey(groupId); + remove(shardGroupKey); + } + + public Metapb.ShardGroup getShardGroup(int groupId) throws PDException { + byte[] shardGroupKey = MetadataKeyHelper.getShardGroupKey(groupId); + return getOne(Metapb.ShardGroup.parser(), shardGroupKey); + } + + public int getShardGroupCount() throws PDException { + byte[] shardGroupPrefix = MetadataKeyHelper.getShardGroupPrefix(); + return scanPrefix(Metapb.ShardGroup.parser(), shardGroupPrefix).size(); + } + + public List getShardGroups() throws PDException { + byte[] shardGroupPrefix = MetadataKeyHelper.getShardGroupPrefix(); + return scanPrefix(Metapb.ShardGroup.parser(), shardGroupPrefix); + } + + public Metapb.StoreStats getStoreStats(long storeId) throws PDException { + byte[] storeStatusKey = MetadataKeyHelper.getStoreStatusKey(storeId); + Metapb.StoreStats stats = getOne(Metapb.StoreStats.parser(), + storeStatusKey); + return stats; + } + + /** + * @return store and status information + * @throws PDException + */ + public List getStoreStatus(boolean isActive) throws PDException { + byte[] storePrefix = MetadataKeyHelper.getStorePrefix(); +<<<<<<<< HEAD:hugegraph-pd/hg-pd-core/src/main/java/org/apache/hugegraph/pd/meta/StoreInfoMeta.java + List stores = isActive ? getActiveStores() : + scanPrefix(Metapb.Store.parser(), storePrefix); +======== + List stores =isActive ? getActiveStores() : + scanPrefix(Metapb.Store.parser(), storePrefix); +>>>>>>>> d7e3d51dd (3.6.5 -> 4.x diff):hg-pd-core/src/main/java/org/apache/hugegraph/pd/meta/StoreInfoMeta.java + LinkedList list = new LinkedList<>(); + for (int i = 0; i < stores.size(); i++) { + Metapb.Store store = stores.get(i); + Metapb.StoreStats stats = getStoreStats(store.getId()); + if (stats != null) { +<<<<<<<< HEAD:hugegraph-pd/hg-pd-core/src/main/java/org/apache/hugegraph/pd/meta/StoreInfoMeta.java + store = Metapb.Store.newBuilder(store).setStats(getStoreStats(store.getId())) + .build(); +======== + store = Metapb.Store.newBuilder(store).setStats(getStoreStats(store.getId())) .build(); +>>>>>>>> d7e3d51dd (3.6.5 -> 4.x diff):hg-pd-core/src/main/java/org/apache/hugegraph/pd/meta/StoreInfoMeta.java + } + list.add(store); + } + return list; + } + + public void updateStoreGroup(Long storeId, int storeGroupId) throws PDException { + byte[] groupKey = MetadataKeyHelper.getStoreGroupRelationKey(storeId); + put(groupKey, String.valueOf(storeGroupId).getBytes()); + } + + public Set getStoreIdsByGroup(int storeGroupId) throws PDException { + byte[] groupKey = MetadataKeyHelper.getStoreGroupRelationPrefix(); + return scanPrefix(groupKey).stream() + .filter(g -> Objects.equals(storeGroupId, Integer.parseInt(new String(g.getValue())))) + .map(g -> Long.parseLong(new String(g.getKey()).split("/")[1])) + .collect(Collectors.toSet()); + } + + public int getStoreGroupByStoreId(long storeId) throws PDException { + byte[] storeInfoKey = MetadataKeyHelper.getStoreGroupRelationKey(storeId); + byte[] value = getOne(storeInfoKey); + return value == null ? DEFAULT_STORE_GROUP_ID : Integer.parseInt(new String(value)); + } + + public boolean isStoreHasGroup(long storeId) throws PDException { + byte[] groupKey = MetadataKeyHelper.getStoreGroupRelationKey(storeId); + return getOne(groupKey) != null; + } +} diff --git a/hg-pd-core/src/main/java/org/apache/hugegraph/pd/meta/TaskInfoMeta.java b/hg-pd-core/src/main/java/org/apache/hugegraph/pd/meta/TaskInfoMeta.java new file mode 100644 index 0000000000..ef346c5a2e --- /dev/null +++ b/hg-pd-core/src/main/java/org/apache/hugegraph/pd/meta/TaskInfoMeta.java @@ -0,0 +1,159 @@ +<<<<<<<< HEAD:hugegraph-pd/hg-pd-core/src/main/java/org/apache/hugegraph/pd/meta/TaskInfoMeta.java +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hugegraph.pd.meta; +======== +package org.apache.hugegraph.pd.meta; + +import org.apache.hugegraph.pd.common.PDException; +import org.apache.hugegraph.pd.config.PDConfig; +import org.apache.hugegraph.pd.grpc.MetaTask; +import org.apache.hugegraph.pd.grpc.Metapb; +import org.apache.hugegraph.pd.grpc.pulse.MovePartition; +import org.apache.hugegraph.pd.grpc.pulse.SplitPartition; +>>>>>>>> d7e3d51dd (3.6.5 -> 4.x diff):hg-pd-core/src/main/java/org/apache/hugegraph/pd/meta/TaskInfoMeta.java + +import java.util.List; + +import org.apache.hugegraph.pd.common.PDException; +import org.apache.hugegraph.pd.config.PDConfig; +import org.apache.hugegraph.pd.grpc.MetaTask; +import org.apache.hugegraph.pd.grpc.Metapb; +import org.apache.hugegraph.pd.grpc.pulse.MovePartition; +import org.apache.hugegraph.pd.grpc.pulse.SplitPartition; + +/** + * Task management + */ +public class TaskInfoMeta extends MetadataRocksDBStore { + + public TaskInfoMeta(PDConfig pdConfig) { + super(pdConfig); + } + + /** + * Add a partition splitting task + */ + public void addSplitTask(int groupID, Metapb.Partition partition, SplitPartition splitPartition) + throws PDException { + byte[] key = MetadataKeyHelper.getSplitTaskKey(partition.getGraphName(), groupID); + MetaTask.Task task = MetaTask.Task.newBuilder() + .setType(MetaTask.TaskType.Split_Partition) + .setState(MetaTask.TaskState.Task_Doing) + .setStartTimestamp(System.currentTimeMillis()) + .setPartition(partition) + .setSplitPartition(splitPartition) + .build(); + put(key, task.toByteString().toByteArray()); + } + + public void updateSplitTask(MetaTask.Task task) throws PDException { + var partition = task.getPartition(); + byte[] key = MetadataKeyHelper.getSplitTaskKey(partition.getGraphName(), partition.getId()); + put(key, task.toByteString().toByteArray()); + } + + public MetaTask.Task getSplitTask(String graphName, int groupID) throws PDException { + byte[] key = MetadataKeyHelper.getSplitTaskKey(graphName, groupID); + return getOne(MetaTask.Task.parser(), key); + } + + public List scanSplitTask(String graphName) throws PDException { + byte[] prefix = MetadataKeyHelper.getSplitTaskPrefix(graphName); + return scanPrefix(MetaTask.Task.parser(), prefix); + } + + public void removeSplitTaskPrefix(String graphName) throws PDException { + byte[] key = MetadataKeyHelper.getSplitTaskPrefix(graphName); + removeByPrefix(key); + } + + public boolean hasSplitTaskDoing() throws PDException { + byte[] key = MetadataKeyHelper.getAllSplitTaskPrefix(); + return !scanPrefix(key).isEmpty(); + } + + public void addMovePartitionTask(Metapb.Partition partition, MovePartition movePartition) + throws PDException { + byte[] key = MetadataKeyHelper.getMoveTaskKey(partition.getGraphName(), + movePartition.getTargetPartition().getId(), + partition.getId()); + + MetaTask.Task task = MetaTask.Task.newBuilder() + .setType(MetaTask.TaskType.Move_Partition) + .setState(MetaTask.TaskState.Task_Doing) + .setStartTimestamp(System.currentTimeMillis()) + .setPartition(partition) + .setMovePartition(movePartition) + .build(); + put(key, task.toByteArray()); + } + + public void updateMovePartitionTask(MetaTask.Task task) + throws PDException { + + byte[] key = MetadataKeyHelper.getMoveTaskKey(task.getPartition().getGraphName(), + task.getMovePartition().getTargetPartition() + .getId(), + task.getPartition().getId()); + put(key, task.toByteArray()); + } + + public MetaTask.Task getMovePartitionTask(String graphName, int targetId, int partId) throws + PDException { + byte[] key = MetadataKeyHelper.getMoveTaskKey(graphName, targetId, partId); + return getOne(MetaTask.Task.parser(), key); + } + + public List scanMoveTask(String graphName) throws PDException { + byte[] prefix = MetadataKeyHelper.getMoveTaskPrefix(graphName); + return scanPrefix(MetaTask.Task.parser(), prefix); + } + + public List scanUserTask(long taskId) throws PDException { + byte[] prefix = MetadataKeyHelper.getUserTaskPrefix(taskId); + return scanPrefix(MetaTask.Task.parser(), prefix); + } + +// public MetaTask.Task getBuildIndexTask(long taskId, int partitionId) throws PDException { +// byte[] key = MetadataKeyHelper.getUserTaskKey(taskId, partitionId); +// return getOne(MetaTask.Task.parser(), key); +// } + + public void updateUserTask(MetaTask.Task task) throws PDException { + byte[] key = MetadataKeyHelper.getUserTaskKey(task.getId(), task.getPartition().getId()); + put(key, task.toByteArray()); + } + + /** + * Delete the migration task by prefixing it and group them all at once + * + * @param graphName graphName + * @throws PDException io error + */ + public void removeMoveTaskPrefix(String graphName) throws PDException { + byte[] key = MetadataKeyHelper.getMoveTaskPrefix(graphName); + removeByPrefix(key); + } + + public boolean hasMoveTaskDoing() throws PDException { + byte[] key = MetadataKeyHelper.getAllMoveTaskPrefix(); + return !scanPrefix(key).isEmpty(); + } + +} diff --git a/hg-pd-core/src/main/java/org/apache/hugegraph/pd/raft/FutureClosureAdapter.java b/hg-pd-core/src/main/java/org/apache/hugegraph/pd/raft/FutureClosureAdapter.java new file mode 100644 index 0000000000..5df461cafa --- /dev/null +++ b/hg-pd-core/src/main/java/org/apache/hugegraph/pd/raft/FutureClosureAdapter.java @@ -0,0 +1,29 @@ +package org.apache.hugegraph.pd.raft; + +import com.alipay.sofa.jraft.Closure; +import com.alipay.sofa.jraft.Status; + +import java.util.concurrent.CompletableFuture; + +public class FutureClosureAdapter implements Closure { + public final CompletableFuture future = new CompletableFuture<>(); + private T resp; + + public T getResponse() { return this.resp; } + + public void setResponse(T resp) { + this.resp = resp; + future.complete(resp); + run(Status.OK()); + } + + public void failure(Throwable t){ + future.completeExceptionally(t); + run(new Status(-1, t.getMessage())); + } + + @Override + public void run(Status status) { + + } +} \ No newline at end of file diff --git a/hg-pd-core/src/main/java/org/apache/hugegraph/pd/raft/KVOperation.java b/hg-pd-core/src/main/java/org/apache/hugegraph/pd/raft/KVOperation.java new file mode 100644 index 0000000000..ca752e7a65 --- /dev/null +++ b/hg-pd-core/src/main/java/org/apache/hugegraph/pd/raft/KVOperation.java @@ -0,0 +1,176 @@ +<<<<<<<< HEAD:hugegraph-pd/hg-pd-core/src/main/java/org/apache/hugegraph/pd/raft/KVOperation.java +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hugegraph.pd.raft; +======== +package org.apache.hugegraph.pd.raft; + +import com.alipay.sofa.jraft.util.BytesUtil; +import com.alipay.sofa.jraft.util.Requires; +import com.caucho.hessian.io.Hessian2Input; +import com.caucho.hessian.io.Hessian2Output; +import lombok.Data; +>>>>>>>> d7e3d51dd (3.6.5 -> 4.x diff):hg-pd-core/src/main/java/org/apache/hugegraph/pd/raft/KVOperation.java + +import java.io.ByteArrayInputStream; +import java.io.ByteArrayOutputStream; +import java.io.IOException; +import java.util.concurrent.TimeUnit; + +import com.alipay.sofa.jraft.util.BytesUtil; +import com.alipay.sofa.jraft.util.Requires; +import com.caucho.hessian.io.Hessian2Input; +import com.caucho.hessian.io.Hessian2Output; + +import lombok.Data; + +import org.apache.hugegraph.pd.raft.serializer.HugegraphHessianSerializerFactory; + +@Data +public class KVOperation { + + /** + * Put operation + */ + public static final byte PUT = 0x01; + /** + * Get operation + */ + public static final byte GET = 0x02; + public static final byte DEL = 0x03; + public static final byte REMOVE_BY_PREFIX = 0x04; + public static final byte REMOVE = 0x05; + public static final byte PUT_WITH_TTL = 0x06; + public static final byte CLEAR = 0x07; + public static final byte PUT_WITH_TTL_UNIT = 0x08; + public static final byte REMOVE_WITH_TTL = 0x09; + /** + * Snapshot operation + */ + public static final byte SAVE_SNAPSHOT = 0x10; + public static final byte LOAD_SNAPSHOT = 0x11; + + private byte[] key; + private byte[] value; + // Raw object, used for native processing, reducing the number of deserialization + // operations + private Object attach; + private Object arg; + private byte op; + + public KVOperation() { + + } + + public KVOperation(byte[] key, byte[] value, Object attach, byte op) { + this.key = key; + this.value = value; + this.attach = attach; + this.op = op; + } + + public KVOperation(byte[] key, byte[] value, Object attach, byte op, Object arg) { + this.key = key; + this.value = value; + this.attach = attach; + this.op = op; + this.arg = arg; + } + + public static KVOperation fromByteArray(byte[] value) throws IOException { + + try (ByteArrayInputStream bis = new ByteArrayInputStream(value, 1, value.length - 1)) { + Hessian2Input input = new Hessian2Input(bis); + input.setSerializerFactory(HugegraphHessianSerializerFactory.getInstance()); + KVOperation op = new KVOperation(); + op.op = value[0]; + op.key = input.readBytes(); + op.value = input.readBytes(); + op.arg = input.readObject(); + input.close(); + return op; + } + } + + public static KVOperation createPut(final byte[] key, final byte[] value) { + Requires.requireNonNull(key, "key"); + Requires.requireNonNull(value, "value"); + return new KVOperation(key, value, null, PUT); + } + + public static KVOperation createGet(final byte[] key) { + Requires.requireNonNull(key, "key"); + return new KVOperation(key, BytesUtil.EMPTY_BYTES, null, GET); + } + + public static KVOperation createPutWithTTL(byte[] key, byte[] value, long ttl) { + Requires.requireNonNull(key, "key"); + Requires.requireNonNull(value, "value"); + return new KVOperation(key, value, value, PUT_WITH_TTL, + ttl); + } + + public static KVOperation createPutWithTTL(byte[] key, byte[] value, long ttl, + TimeUnit timeUnit) { + Requires.requireNonNull(key, "key"); + Requires.requireNonNull(value, "value"); + return new KVOperation(key, value, value, PUT_WITH_TTL_UNIT, + new Object[]{ttl, timeUnit}); + } + + public static KVOperation createRemoveWithTTL(byte[] key) { + Requires.requireNonNull(key, "key"); + return new KVOperation(key, key, null, REMOVE_WITH_TTL); + } + + public static KVOperation createRemoveByPrefix(byte[] key) { + Requires.requireNonNull(key, "key"); + return new KVOperation(key, key, null, REMOVE_BY_PREFIX); + } + + public static KVOperation createRemove(byte[] key) { + Requires.requireNonNull(key, "key"); + return new KVOperation(key, key, null, REMOVE); + } + + public static KVOperation createClear() { + return new KVOperation(null, null, null, CLEAR); + } + + public static KVOperation createSaveSnapshot(String snapshotPath) { + Requires.requireNonNull(snapshotPath, "snapshotPath"); + return new KVOperation(null, null, snapshotPath, SAVE_SNAPSHOT); + } + + public static KVOperation createLoadSnapshot(String snapshotPath) { + Requires.requireNonNull(snapshotPath, "snapshotPath"); + return new KVOperation(null, null, snapshotPath, LOAD_SNAPSHOT); + } + + public byte[] toByteArray() throws IOException { + try (ByteArrayOutputStream bos = new ByteArrayOutputStream()) { + bos.write(op); + Hessian2Output output = new Hessian2Output(bos); + output.writeObject(key); + output.writeObject(value); + output.writeObject(arg); + output.flush(); + return bos.toByteArray(); + } + } +} diff --git a/hg-pd-core/src/main/java/org/apache/hugegraph/pd/raft/KVStoreClosure.java b/hg-pd-core/src/main/java/org/apache/hugegraph/pd/raft/KVStoreClosure.java new file mode 100644 index 0000000000..00b374c903 --- /dev/null +++ b/hg-pd-core/src/main/java/org/apache/hugegraph/pd/raft/KVStoreClosure.java @@ -0,0 +1,16 @@ +package org.apache.hugegraph.pd.raft; + +import com.alipay.sofa.jraft.Closure; +import org.apache.hugegraph.pd.grpc.Pdpb; +import org.apache.hugegraph.pd.grpc.common.Errors; + +public interface KVStoreClosure extends Closure { + + Errors getError(); + + void setError(final Errors error); + + Object getData(); + + void setData(final Object data); +} \ No newline at end of file diff --git a/hg-pd-core/src/main/java/org/apache/hugegraph/pd/raft/PeerUtil.java b/hg-pd-core/src/main/java/org/apache/hugegraph/pd/raft/PeerUtil.java new file mode 100644 index 0000000000..573e692c18 --- /dev/null +++ b/hg-pd-core/src/main/java/org/apache/hugegraph/pd/raft/PeerUtil.java @@ -0,0 +1,47 @@ +package org.apache.hugegraph.pd.raft; + +import com.alipay.sofa.jraft.JRaftUtils; +import com.alipay.sofa.jraft.entity.PeerId; +import org.apache.hugegraph.pd.common.KVPair; + +import java.util.LinkedList; +import java.util.List; +import java.util.Objects; + +public class PeerUtil { + /** + * 只比较 ip 和 port + * @param p1 + * @param p2 + * @return + */ + public static boolean isPeerEquals(PeerId p1, PeerId p2) { + if (p1 == null && p2 == null) { + return true; + } + if (p1 == null || p2 == null) { + return false; + } + return Objects.equals(p1.getIp(), p2.getIp()) && Objects.equals(p1.getPort(), p2.getPort()); + } + + public static List> parseConfig(String conf) { + List> result = new LinkedList<>(); + + if (conf != null && conf.length() > 0) { + for (var s : conf.split(",")) { + if (s.endsWith("/leader")) { + result.add(new KVPair<>("leader", JRaftUtils.getPeerId(s.substring(0, s.length() - 7)))); + } else if (s.endsWith("/learner")) { + result.add(new KVPair<>("learner", JRaftUtils.getPeerId(s.substring(0, s.length() - 8)))); + } else if (s.endsWith("/follower")) { + result.add(new KVPair<>("follower", JRaftUtils.getPeerId(s.substring(0, s.length() - 9)))); + } else { + result.add(new KVPair<>("follower", JRaftUtils.getPeerId(s))); + } + } + } + + return result; + } +} diff --git a/hg-pd-core/src/main/java/org/apache/hugegraph/pd/raft/RaftEngine.java b/hg-pd-core/src/main/java/org/apache/hugegraph/pd/raft/RaftEngine.java new file mode 100644 index 0000000000..cb545b0048 --- /dev/null +++ b/hg-pd-core/src/main/java/org/apache/hugegraph/pd/raft/RaftEngine.java @@ -0,0 +1,394 @@ +package org.apache.hugegraph.pd.raft; + +import java.io.File; +import java.util.ArrayList; +import java.util.HashSet; +import java.util.List; +import java.util.concurrent.CompletableFuture; +import java.util.concurrent.ConcurrentHashMap; +import java.util.concurrent.ConcurrentMap; +import java.util.concurrent.CountDownLatch; +import java.util.concurrent.ExecutionException; +import java.util.concurrent.atomic.AtomicReference; + +import org.apache.commons.lang3.StringUtils; + +import com.alipay.sofa.jraft.JRaftUtils; +import com.alipay.sofa.jraft.Node; +import com.alipay.sofa.jraft.RaftGroupService; +import com.alipay.sofa.jraft.Status; +import com.alipay.sofa.jraft.conf.Configuration; +import com.alipay.sofa.jraft.entity.PeerId; +import com.alipay.sofa.jraft.entity.Task; +import com.alipay.sofa.jraft.error.RaftError; +import com.alipay.sofa.jraft.option.NodeOptions; +import com.alipay.sofa.jraft.option.RaftOptions; +import com.alipay.sofa.jraft.option.RpcOptions; +import com.alipay.sofa.jraft.rpc.RaftRpcServerFactory; +import com.alipay.sofa.jraft.rpc.RpcServer; +import com.alipay.sofa.jraft.util.Endpoint; +import com.alipay.sofa.jraft.util.internal.ThrowUtil; +import org.apache.hugegraph.pd.common.PDException; +import org.apache.hugegraph.pd.grpc.Metapb; +import org.apache.hugegraph.pd.grpc.common.ErrorType; +import org.apache.hugegraph.pd.grpc.common.Errors; +import org.apache.hugegraph.pd.raft.RaftRpcProcessor.GetMemberResponse; + +import lombok.extern.slf4j.Slf4j; +import org.apache.hugegraph.pd.config.PDConfig.Raft; + +@Slf4j +public class RaftEngine { + private volatile static RaftEngine instance = new RaftEngine(); + + public static RaftEngine getInstance() { + return instance; + } + + private String groupId = "pd_raft"; + private Raft config; + private RaftStateMachine stateMachine; + private RaftGroupService raftGroupService; + private RpcServer rpcServer; + private Node raftNode; + private RaftRpcClient raftRpcClient; + private static ConcurrentMap grpcAddresses = new ConcurrentHashMap(); + + public RaftEngine(){ + this.stateMachine = new RaftStateMachine(); + } + + public synchronized boolean init(Raft config) { + if (this.raftNode != null) return false; + this.config = config; + + raftRpcClient = new RaftRpcClient(); + raftRpcClient.init(new RpcOptions()); + + String raftPath = config.getDataPath() + "/" + groupId; + new File(raftPath).mkdirs(); + + new File(config.getDataPath()).mkdirs(); + Configuration initConf = new Configuration(); + initConf.parse(config.getPeersList()); + if (config.isEnable() && config.getPeersList().length() < 3) { + log.error( + "The RaftEngine parameter is incorrect." + + " When RAFT is enabled, the number of peers " + + "cannot be less than 3"); + } + // 设置Node参数,包括日志存储路径和状态机实例 + NodeOptions nodeOptions = new NodeOptions(); + nodeOptions.setFsm(stateMachine); + nodeOptions.setEnableMetrics(true); + // 日志路径 + nodeOptions.setLogUri(raftPath + "/log"); + // raft元数据路径 + nodeOptions.setRaftMetaUri(raftPath + "/meta"); + // 快照路径 + nodeOptions.setSnapshotUri(raftPath + "/snapshot"); + // 初始集群 + nodeOptions.setInitialConf(initConf); + // 快照时间间隔 + nodeOptions.setSnapshotIntervalSecs(config.getSnapshotInterval()); + + nodeOptions.setRpcConnectTimeoutMs(config.getRpcTimeout()); + nodeOptions.setRpcDefaultTimeout(config.getRpcTimeout()); + nodeOptions.setRpcInstallSnapshotTimeout(config.getRpcTimeout()); + // 设置raft配置 + RaftOptions raftOptions = nodeOptions.getRaftOptions(); + + nodeOptions.setEnableMetrics(true); + + final PeerId serverId = JRaftUtils.getPeerId(config.getAddress()); + + rpcServer = createRaftRpcServer(config.getAddress()); + // 构建raft组并启动raft + this.raftGroupService = new RaftGroupService(groupId, serverId, + nodeOptions, rpcServer, true); + this.raftNode = raftGroupService.start(false); + log.info("RaftEngine start successfully: id = {}, peers list = {}", groupId, nodeOptions.getInitialConf().getPeers()); + return this.raftNode != null; + } + + /** + * 创建raft rpc server,用于pd之间通讯 + */ + private RpcServer createRaftRpcServer(String raftAddr) { + Endpoint endpoint = JRaftUtils.getEndPoint(raftAddr); + RpcServer rpcServer = RaftRpcServerFactory.createRaftRpcServer(endpoint); + RaftRpcProcessor.registerProcessor(rpcServer, this); + rpcServer.init(null); + return rpcServer; + } + + public void shutDown() { + if (this.raftGroupService != null) { + this.raftGroupService.shutdown(); + try { + this.raftGroupService.join(); + } catch (final InterruptedException e) { + this.raftNode = null; + ThrowUtil.throwException(e); + } + this.raftGroupService = null; + } + if (this.rpcServer != null){ + this.rpcServer.shutdown(); + this.rpcServer = null; + } + if (this.raftNode != null) { + this.raftNode.shutdown(); + } + this.raftNode = null; + } + + public boolean isLeader() { + return this.raftNode.isLeader(true); + } + + /** + * 添加Raft任务,grpc通过该接口给raft发送数据 + */ + public void addTask(Task task) { + if (!isLeader()) { + KVStoreClosure closure = (KVStoreClosure) task.getDone(); + closure.setError(Errors.newBuilder().setType(ErrorType.NOT_LEADER).build()); + closure.run(new Status(RaftError.EPERM, "Not leader")); + return; + } + this.raftNode.apply(task); + } + + public void addStateListener(RaftStateListener listener){ + this.stateMachine.addStateListener(listener); + } + + public void addTaskHandler(RaftTaskHandler handler){ + this.stateMachine.addTaskHandler(handler); + } + public Raft getConfig() { + return this.config; + } + + public PeerId getLeader(){ + return raftNode.getLeaderId(); + } + + /** + * 向leader发消息,获取grpc地址; + */ + public String getLeaderGrpcAddress() throws PDException { + return getLeaderGrpcAddress(true); + } + + /** + * 获取leader grpc地址; + */ + public String getLeaderGrpcAddress(boolean blocking) throws PDException { + try{ + if (isLeader()) { + return config.getGrpcAddress(); + } + PeerId leaderId = raftNode.getLeaderId(); + if (leaderId == null) { + if (blocking) { + leaderId = waitingForLeader(10000); + if (leaderId == null) { + return ""; + } + } else { + return ""; + } + } + String raftAddress = leaderId.getEndpoint().toString(); + String grpcAddress = grpcAddresses.get(raftAddress); + if (!StringUtils.isEmpty(grpcAddress)) { + return grpcAddress; + } + grpcAddress = raftRpcClient.getGrpcAddress(raftAddress).get().getGrpcAddress(); + grpcAddresses.put(raftAddress, grpcAddress); + return grpcAddress; + } catch (Exception e) { + throw new PDException(ErrorType.ERROR, e); + } + } + + /** + * 清空 gRPC 地址列表,用于极端情况下修改了Grpc地址而不重启的场景 + */ + public void clearGrpcAddresses() { + grpcAddresses.clear(); + } + + /** + * 获取本地成员信息 + * + * @return 本地成员信息对象 {@link Metapb.Member} 的构建器 + */ + public Metapb.Member getLocalMember(){ + Metapb.Member.Builder builder = Metapb.Member.newBuilder(); + builder.setClusterId(config.getClusterId()); + builder.setRaftUrl(config.getAddress()); + builder.setDataPath(config.getDataPath()); + builder.setGrpcUrl(config.getGrpcAddress()); + builder.setRestUrl(config.getHost() + ":" + config.getPort()); + builder.setState(Metapb.StoreState.Up); + return builder.build(); + } + + public List getMembers() throws ExecutionException, InterruptedException { + List members = new ArrayList<>(); + + List peers = raftNode.listPeers(); + peers.addAll(raftNode.listLearners()); + var learners = new HashSet<>(raftNode.listLearners()); + + for(PeerId peerId : peers){ + Metapb.Member.Builder builder = Metapb.Member.newBuilder(); + builder.setClusterId(config.getClusterId()); + CompletableFuture future = + raftRpcClient.getGrpcAddress(peerId.getEndpoint().toString()); + + Metapb.ShardRole role = Metapb.ShardRole.Follower; + if (PeerUtil.isPeerEquals(peerId, raftNode.getLeaderId())) { + role = Metapb.ShardRole.Leader; + } else if (learners.contains(peerId)) { + role = Metapb.ShardRole.Learner; + var state = raftNode.getReplicatorState(peerId); + if (state != null) { + builder.setReplicatorState(state.name()); + } + } + + builder.setRole(role); + + try { + if (future.isCompletedExceptionally()) { + log.error("failed to getGrpcAddress of {}", + peerId.getEndpoint().toString()); + builder.setState(Metapb.StoreState.Offline); + builder.setRaftUrl(peerId.getEndpoint().toString()); + members.add(builder.build()); + } else { + GetMemberResponse response = future.get(); + builder.setState(Metapb.StoreState.Up); + builder.setRaftUrl(response.getRaftAddress()); + builder.setDataPath(response.getDatePath()); + builder.setGrpcUrl(response.getGrpcAddress()); + builder.setRestUrl(response.getRestAddress()); + members.add(builder.build()); + } + } catch (Exception e) { + log.error("failed to getGrpcAddress of {}. {}", + peerId.getEndpoint().toString(), e); + builder.setState(Metapb.StoreState.Offline); + builder.setRaftUrl(peerId.getEndpoint().toString()); + members.add(builder.build()); + } + + } + return members; + } + + public Status changePeerList(String peerList) { + AtomicReference result = new AtomicReference<>(); + try { + String[] peers = peerList.split(",", -1); + if ((peers.length & 1) != 1) { + throw new PDException(-1, "the number of peer list must be odd."); + } + Configuration newPeers = new Configuration(); + newPeers.parse(peerList); + CountDownLatch latch = new CountDownLatch(1); + this.raftNode.changePeers(newPeers, status -> { + result.set(status); + latch.countDown(); + }); + latch.await(); + } catch (Exception e) { + log.error("failed to changePeerList to {},{}", peerList, e); + result.set(new Status(-1, e.getMessage())); + } + return result.get(); + } + + public PeerId waitingForLeader(long timeOut){ + PeerId leader = getLeader(); + if ( leader != null ) { + return leader; + } + + synchronized (this) { + leader = getLeader(); + long start = System.currentTimeMillis(); + while ((System.currentTimeMillis() - start < timeOut) && (leader == null)) { + try { + this.wait(1000); + } catch (InterruptedException e) { + log.error("Raft wait for leader exception", e); + } + leader = getLeader(); + } + return leader != null ? leader : null; + } + + } + + public Node getRaftNode() { + return raftNode; + } + + public List getPeerGrpcAddresses() throws PDException { + try { + List peers = raftNode.listPeers(); + peers.addAll(raftNode.listLearners()); + ArrayList addresses = new ArrayList<>(peers.size()); + for (PeerId id : peers) { + CompletableFuture future = + raftRpcClient.getGrpcAddress(id.getEndpoint().toString()); + try { + String grpcAddress = future.get().getGrpcAddress(); + if (!StringUtils.isEmpty(grpcAddress)) { + addresses.add(grpcAddress); + } + } catch (Exception e) { + log.warn("get grpc address of peer: {} with error:", id, e); + } + } + return addresses; + } catch (Exception e) { + throw new PDException(ErrorType.ERROR, e); + } + } + + public List getPeerGrpcAddressesByCache() throws PDException { + try { + List peers = raftNode.listPeers(); + peers.addAll(raftNode.listLearners()); + ArrayList addresses = new ArrayList<>(peers.size()); + String grpcAddress; + for (PeerId id : peers) { + String raftAddress = id.getEndpoint().toString(); + grpcAddress = grpcAddresses.get(raftAddress); + if (grpcAddress != null) { + addresses.add(grpcAddress); + } else { + CompletableFuture future = raftRpcClient.getGrpcAddress(raftAddress); + try { + grpcAddress = future.get().getGrpcAddress(); + if (!StringUtils.isEmpty(grpcAddress)) { + grpcAddresses.put(raftAddress, grpcAddress); + addresses.add(grpcAddress); + } + } catch (Exception e) { + log.warn("get grpc address of peer: {} with error:", id, e); + } + } + } + return addresses; + } catch (Exception e) { + throw new PDException(ErrorType.ERROR, e); + } + } +} diff --git a/hg-pd-core/src/main/java/org/apache/hugegraph/pd/raft/RaftRpcClient.java b/hg-pd-core/src/main/java/org/apache/hugegraph/pd/raft/RaftRpcClient.java new file mode 100644 index 0000000000..e5ee055cde --- /dev/null +++ b/hg-pd-core/src/main/java/org/apache/hugegraph/pd/raft/RaftRpcClient.java @@ -0,0 +1,92 @@ +<<<<<<<< HEAD:hugegraph-pd/hg-pd-core/src/main/java/org/apache/hugegraph/pd/raft/RaftRpcClient.java +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hugegraph.pd.raft; + +import java.util.concurrent.CompletableFuture; +======== +package org.apache.hugegraph.pd.raft; +>>>>>>>> d7e3d51dd (3.6.5 -> 4.x diff):hg-pd-core/src/main/java/org/apache/hugegraph/pd/raft/RaftRpcClient.java + +import com.alipay.sofa.jraft.JRaftUtils; +import com.alipay.sofa.jraft.Status; +import com.alipay.sofa.jraft.option.RpcOptions; +import com.alipay.sofa.jraft.rpc.InvokeCallback; +import com.alipay.sofa.jraft.rpc.InvokeContext; +import com.alipay.sofa.jraft.rpc.RaftRpcFactory; +import com.alipay.sofa.jraft.rpc.RpcClient; +import com.alipay.sofa.jraft.util.Endpoint; +import com.alipay.sofa.jraft.util.RpcFactoryHelper; + +import lombok.extern.slf4j.Slf4j; + +@Slf4j +public class RaftRpcClient { + + protected volatile RpcClient rpcClient; + private RpcOptions rpcOptions; + + public synchronized boolean init(final RpcOptions rpcOptions) { + this.rpcOptions = rpcOptions; + final RaftRpcFactory factory = RpcFactoryHelper.rpcFactory(); + this.rpcClient = + factory.createRpcClient(factory.defaultJRaftClientConfigHelper(this.rpcOptions)); + return this.rpcClient.init(null); + } + + /** + * Request a snapshot + */ + public CompletableFuture + getGrpcAddress(final String address) { + RaftRpcProcessor.GetMemberRequest request = new RaftRpcProcessor.GetMemberRequest(); + FutureClosureAdapter response = + new FutureClosureAdapter<>(); + internalCallAsyncWithRpc(JRaftUtils.getEndPoint(address), request, response); + return response.future; + } + + private void internalCallAsyncWithRpc(final Endpoint endpoint, + final RaftRpcProcessor.BaseRequest request, + final FutureClosureAdapter closure) { + final InvokeContext invokeCtx = new InvokeContext(); + final InvokeCallback invokeCallback = new InvokeCallback() { + + @Override + public void complete(final Object result, final Throwable err) { + if (err == null) { + final RaftRpcProcessor.BaseResponse response = + (RaftRpcProcessor.BaseResponse) result; + closure.setResponse((V) response); + } else { + closure.failure(err); + closure.run(new Status(-1, err.getMessage())); + } + } + }; + + try { + this.rpcClient.invokeAsync(endpoint, request, invokeCtx, invokeCallback, + this.rpcOptions.getRpcDefaultTimeout()); + } catch (final Throwable t) { + log.error("failed to call rpc to {}. {}", endpoint, t.getMessage()); + closure.failure(t); + closure.run(new Status(-1, t.getMessage())); + } + } +} diff --git a/hg-pd-core/src/main/java/org/apache/hugegraph/pd/raft/RaftRpcProcessor.java b/hg-pd-core/src/main/java/org/apache/hugegraph/pd/raft/RaftRpcProcessor.java new file mode 100644 index 0000000000..212dfefbc9 --- /dev/null +++ b/hg-pd-core/src/main/java/org/apache/hugegraph/pd/raft/RaftRpcProcessor.java @@ -0,0 +1,134 @@ +<<<<<<<< HEAD:hugegraph-pd/hg-pd-core/src/main/java/org/apache/hugegraph/pd/raft/RaftRpcProcessor.java +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +======== +package org.apache.hugegraph.pd.raft; +>>>>>>>> d7e3d51dd (3.6.5 -> 4.x diff):hg-pd-core/src/main/java/org/apache/hugegraph/pd/raft/RaftRpcProcessor.java + +package org.apache.hugegraph.pd.raft; + +import java.io.Serializable; + +import com.alipay.sofa.jraft.rpc.RpcContext; +import com.alipay.sofa.jraft.rpc.RpcProcessor; +import com.alipay.sofa.jraft.rpc.RpcServer; + +import lombok.Data; + +public class RaftRpcProcessor implements RpcProcessor { + + private final Class requestClass; + private final RaftEngine raftEngine; + + public RaftRpcProcessor(Class requestClass, RaftEngine raftEngine) { + this.requestClass = requestClass; + this.raftEngine = raftEngine; + } + + public static void registerProcessor(final RpcServer rpcServer, RaftEngine raftEngine) { + rpcServer.registerProcessor(new RaftRpcProcessor<>(GetMemberRequest.class, raftEngine)); + } + + @Override + public void handleRequest(RpcContext rpcCtx, T request) { + if (request.magic() == BaseRequest.GET_GRPC_ADDRESS) { + rpcCtx.sendResponse(getGrpcAddress()); + } + } + + @Override + public String interest() { + return this.requestClass.getName(); + } + + private GetMemberResponse getGrpcAddress() { + GetMemberResponse rep = new GetMemberResponse(); + rep.setGrpcAddress(raftEngine.getConfig().getGrpcAddress()); + rep.setClusterId(raftEngine.getConfig().getClusterId()); + rep.setDatePath(raftEngine.getConfig().getDataPath()); + rep.setRaftAddress(raftEngine.getConfig().getAddress()); + rep.setRestAddress( + raftEngine.getConfig().getHost() + ":" + raftEngine.getConfig().getPort()); + rep.setStatus(Status.OK); + return rep; + } + + public enum Status implements Serializable { + UNKNOWN(-1, "unknown"), + OK(0, "ok"), + COMPLETE(0, "Transmission completed"), + INCOMPLETE(1, "Incomplete transmission"), + NO_PARTITION(10, "Partition not found"), + IO_ERROR(11, "io error"), + EXCEPTION(12, "exception"), + ABORT(100, "Transmission aborted"); + + private final int code; + private String msg; + + Status(int code, String msg) { + this.code = code; + this.msg = msg; + } + + public int getCode() { + return this.code; + } + + public Status setMsg(String msg) { + this.msg = msg; + return this; + } + + public boolean isOK() { + return this.code == 0; + } + } + + public abstract static class BaseRequest implements Serializable { + + public static final byte GET_GRPC_ADDRESS = 0x01; + + public abstract byte magic(); + } + + @Data + public abstract static class BaseResponse implements Serializable { + + private Status status; + + } + + @Data + public static class GetMemberRequest extends BaseRequest { + + @Override + public byte magic() { + return GET_GRPC_ADDRESS; + } + } + + @Data + public static class GetMemberResponse extends BaseResponse { + + private long clusterId; + private String raftAddress; + private String grpcAddress; + private String datePath; + private String restAddress; + } +} diff --git a/hg-pd-core/src/main/java/org/apache/hugegraph/pd/raft/RaftStateListener.java b/hg-pd-core/src/main/java/org/apache/hugegraph/pd/raft/RaftStateListener.java new file mode 100644 index 0000000000..c36c553a7f --- /dev/null +++ b/hg-pd-core/src/main/java/org/apache/hugegraph/pd/raft/RaftStateListener.java @@ -0,0 +1,5 @@ +package org.apache.hugegraph.pd.raft; + +public interface RaftStateListener { + void onRaftLeaderChanged(); +} diff --git a/hg-pd-core/src/main/java/org/apache/hugegraph/pd/raft/RaftStateMachine.java b/hg-pd-core/src/main/java/org/apache/hugegraph/pd/raft/RaftStateMachine.java new file mode 100644 index 0000000000..f7670f59c7 --- /dev/null +++ b/hg-pd-core/src/main/java/org/apache/hugegraph/pd/raft/RaftStateMachine.java @@ -0,0 +1,409 @@ +<<<<<<<< HEAD:hugegraph-pd/hg-pd-core/src/main/java/org/apache/hugegraph/pd/raft/RaftStateMachine.java +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +======== +>>>>>>>> d7e3d51dd (3.6.5 -> 4.x diff):hg-pd-core/src/main/java/org/apache/hugegraph/pd/raft/RaftStateMachine.java +package org.apache.hugegraph.pd.raft; + +import java.io.File; +import java.io.IOException; +import java.util.List; +import java.util.concurrent.CopyOnWriteArrayList; +import java.util.concurrent.CountDownLatch; +import java.util.concurrent.atomic.AtomicLong; +<<<<<<<< HEAD:hugegraph-pd/hg-pd-core/src/main/java/org/apache/hugegraph/pd/raft/RaftStateMachine.java +import java.util.zip.Checksum; + +import org.apache.commons.io.FileUtils; +import org.apache.hugegraph.pd.common.PDException; +import org.apache.hugegraph.pd.grpc.Pdpb; +======== +import java.util.concurrent.locks.ReentrantLock; +import java.util.zip.Checksum; + +import org.apache.hugegraph.pd.grpc.common.ErrorType; +import org.apache.hugegraph.pd.grpc.common.Errors; +import org.apache.commons.io.FileUtils; +>>>>>>>> d7e3d51dd (3.6.5 -> 4.x diff):hg-pd-core/src/main/java/org/apache/hugegraph/pd/raft/RaftStateMachine.java +import org.springframework.util.CollectionUtils; + +import com.alipay.sofa.jraft.Closure; +import com.alipay.sofa.jraft.Iterator; +import com.alipay.sofa.jraft.Status; +import com.alipay.sofa.jraft.conf.Configuration; +import com.alipay.sofa.jraft.core.StateMachineAdapter; +import com.alipay.sofa.jraft.entity.LeaderChangeContext; +import com.alipay.sofa.jraft.entity.LocalFileMetaOutter; +import com.alipay.sofa.jraft.error.RaftError; +import com.alipay.sofa.jraft.error.RaftException; +import com.alipay.sofa.jraft.storage.snapshot.SnapshotReader; +import com.alipay.sofa.jraft.storage.snapshot.SnapshotWriter; +import com.alipay.sofa.jraft.util.CRC64; +import com.alipay.sofa.jraft.util.Utils; +<<<<<<<< HEAD:hugegraph-pd/hg-pd-core/src/main/java/org/apache/hugegraph/pd/raft/RaftStateMachine.java +======== +import org.apache.hugegraph.pd.common.PDException; +import org.apache.hugegraph.pd.grpc.Pdpb; +import org.apache.hugegraph.pd.service.MetadataService; +>>>>>>>> d7e3d51dd (3.6.5 -> 4.x diff):hg-pd-core/src/main/java/org/apache/hugegraph/pd/raft/RaftStateMachine.java + +import lombok.extern.slf4j.Slf4j; + +@Slf4j +public class RaftStateMachine extends StateMachineAdapter { +<<<<<<<< HEAD:hugegraph-pd/hg-pd-core/src/main/java/org/apache/hugegraph/pd/raft/RaftStateMachine.java +======== + private List taskHandlers; + private List stateListeners; + private ReentrantLock lock = new ReentrantLock(); +>>>>>>>> d7e3d51dd (3.6.5 -> 4.x diff):hg-pd-core/src/main/java/org/apache/hugegraph/pd/raft/RaftStateMachine.java + + private static final String SNAPSHOT_DIR_NAME = "snapshot"; + private static final String SNAPSHOT_ARCHIVE_NAME = "snapshot.zip"; + private final AtomicLong leaderTerm = new AtomicLong(-1); + private final List taskHandlers; + private final List stateListeners; + + public RaftStateMachine() { + this.taskHandlers = new CopyOnWriteArrayList<>(); + this.stateListeners = new CopyOnWriteArrayList<>(); + } + + public void addTaskHandler(RaftTaskHandler handler) { + taskHandlers.add(handler); + } + + public void addStateListener(RaftStateListener listener) { + stateListeners.add(listener); + } + + public boolean isLeader() { + return this.leaderTerm.get() > 0; + } + + @Override + public void onApply(Iterator iter) { + while (iter.hasNext()) { + final RaftClosureAdapter done = (RaftClosureAdapter) iter.done(); + try { + KVOperation kvOp; + if (done != null) { + kvOp = done.op; + } else { + kvOp = KVOperation.fromByteArray(iter.getData().array()); + } + for (RaftTaskHandler taskHandler : taskHandlers) { + taskHandler.invoke(kvOp, done); + } + if (done != null) { + done.run(Status.OK()); + } + } catch (Throwable t) { + log.error("StateMachine encountered critical error", t); + if (done != null) { + done.run(new Status(RaftError.EINTERNAL, t.getMessage())); + } + } + iter.next(); + } + } + + @Override + public void onError(final RaftException e) { + log.error("Raft StateMachine encountered an error", e); + } + + @Override + public void onShutdown() { + super.onShutdown(); + } + + @Override + public void onLeaderStart(final long term) { + this.leaderTerm.set(term); + super.onLeaderStart(term); + + log.info("Raft becomes leader"); + Utils.runInThread(() -> { + if (!CollectionUtils.isEmpty(stateListeners)) { + stateListeners.forEach(RaftStateListener::onRaftLeaderChanged); + } + }); + } + + @Override + public void onLeaderStop(final Status status) { + this.leaderTerm.set(-1); + super.onLeaderStop(status); + log.info("Raft lost leader "); + } + + @Override + public void onStartFollowing(final LeaderChangeContext ctx) { + super.onStartFollowing(ctx); + Utils.runInThread(() -> { + if (!CollectionUtils.isEmpty(stateListeners)) { + stateListeners.forEach(RaftStateListener::onRaftLeaderChanged); + } + }); + } + + @Override + public void onStopFollowing(final LeaderChangeContext ctx) { + super.onStopFollowing(ctx); + } + + @Override + public void onConfigurationCommitted(final Configuration conf) { + log.info("Raft onConfigurationCommitted {}", conf); + } + + @Override + public void onSnapshotSave(final SnapshotWriter writer, final Closure done) { + MetadataService.getUninterruptibleJobs().submit(() -> { + lock.lock(); + try { + log.info("start snapshot save"); + String snapshotDir = writer.getPath() + File.separator + SNAPSHOT_DIR_NAME; + try { + FileUtils.deleteDirectory(new File(snapshotDir)); + FileUtils.forceMkdir(new File(snapshotDir)); + } catch (IOException e) { + log.error("Failed to create snapshot directory {}", snapshotDir); + done.run(new Status(RaftError.EIO, e.toString())); + return; + } +<<<<<<<< HEAD:hugegraph-pd/hg-pd-core/src/main/java/org/apache/hugegraph/pd/raft/RaftStateMachine.java + }); + } + try { + latch.await(); + } catch (InterruptedException e) { + log.error("Raft onSnapshotSave failed. {}", e.toString()); + done.run(new Status(RaftError.EIO, e.toString())); + return; + } + + // compress + try { + compressSnapshot(writer); + FileUtils.deleteDirectory(new File(snapshotDir)); + } catch (Exception e) { + log.error("Failed to delete snapshot directory {}, {}", snapshotDir, e.toString()); + done.run(new Status(RaftError.EIO, e.toString())); + return; + } + done.run(Status.OK()); +======== + for (RaftTaskHandler taskHandler : taskHandlers) { + try { + KVOperation op = KVOperation.createSaveSnapshot(snapshotDir); + taskHandler.invoke(op, null); + log.info("Raft onSnapshotSave success"); + } catch (PDException e) { + log.error("Raft onSnapshotSave failed. {}", e.toString()); + done.run(new Status(RaftError.EIO, e.toString())); + } + } + // compress + try { + compressSnapshot(writer); + FileUtils.deleteDirectory(new File(snapshotDir)); + } catch (Exception e) { + log.error("Failed to delete snapshot directory {}, {}", snapshotDir, e.toString()); + done.run(new Status(RaftError.EIO, e.toString())); + return; + } + done.run(Status.OK()); + log.info("snapshot save done"); + } catch (Exception e) { + log.error("failed to save snapshot", e); + done.run(new Status(RaftError.EIO, e.toString())); + } finally { + lock.unlock(); + } + }); +>>>>>>>> d7e3d51dd (3.6.5 -> 4.x diff):hg-pd-core/src/main/java/org/apache/hugegraph/pd/raft/RaftStateMachine.java + } + + @Override + public boolean onSnapshotLoad(final SnapshotReader reader) { + if (isLeader()) { + log.warn("Leader is not supposed to load snapshot"); + return false; + } + lock.lock(); + try { +<<<<<<<< HEAD:hugegraph-pd/hg-pd-core/src/main/java/org/apache/hugegraph/pd/raft/RaftStateMachine.java + decompressSnapshot(reader); + } catch (PDException e) { + log.error("Failed to delete snapshot directory {}, {}", snapshotDir, e.toString()); + return true; + } + + CountDownLatch latch = new CountDownLatch(taskHandlers.size()); + for (RaftTaskHandler taskHandler : taskHandlers) { +======== + String snapshotDir = reader.getPath() + File.separator + SNAPSHOT_DIR_NAME; + String snapshotArchive = reader.getPath() + File.separator + SNAPSHOT_ARCHIVE_NAME; + // 2. decompress snapshot archive +>>>>>>>> d7e3d51dd (3.6.5 -> 4.x diff):hg-pd-core/src/main/java/org/apache/hugegraph/pd/raft/RaftStateMachine.java + try { + decompressSnapshot(reader); + } catch (PDException e) { + log.error("Failed to delete snapshot directory {}, {}", snapshotDir, e.toString()); + return true; + } + + CountDownLatch latch = new CountDownLatch(taskHandlers.size()); + for (RaftTaskHandler taskHandler : taskHandlers) { + try { + KVOperation op = KVOperation.createLoadSnapshot(snapshotDir); + taskHandler.invoke(op, null); + log.info("Raft onSnapshotLoad success"); + latch.countDown(); + } catch (PDException e) { + log.error("Raft onSnapshotLoad failed. {}", e.toString()); + return false; + } + } + try { + latch.await(); + } catch (InterruptedException e) { + log.error("Raft onSnapshotSave failed. {}", e.toString()); + return false; + } + +<<<<<<<< HEAD:hugegraph-pd/hg-pd-core/src/main/java/org/apache/hugegraph/pd/raft/RaftStateMachine.java + try { + // TODO: remove file from meta + FileUtils.deleteDirectory(new File(snapshotDir)); + File file = new File(snapshotArchive); + if (file.exists()) { + FileUtils.forceDelete(file); + } + } catch (IOException e) { + log.error("Failed to delete snapshot directory {} and file {}", snapshotDir, + snapshotArchive); +======== + + try { + // TODO: remove file from meta + // SnapshotReader 沒有提供刪除文件的接口 + FileUtils.deleteDirectory(new File(snapshotDir)); + // File file = new File(snapshotArchive); + // if (file.exists()) { + // FileUtils.forceDelete(file); + // } + } catch (IOException e) { + log.error("Failed to delete snapshot directory {} and file {}", snapshotDir, snapshotArchive); + return false; + } + return true; + } catch (Exception e) { + log.error("load snapshot with error:", e); +>>>>>>>> d7e3d51dd (3.6.5 -> 4.x diff):hg-pd-core/src/main/java/org/apache/hugegraph/pd/raft/RaftStateMachine.java + return false; + } finally { + lock.unlock(); + } + } + + private void compressSnapshot(final SnapshotWriter writer) throws PDException { + final Checksum checksum = new CRC64(); + final String snapshotArchive = writer.getPath() + File.separator + SNAPSHOT_ARCHIVE_NAME; + try { + ZipUtils.compress(writer.getPath(), SNAPSHOT_DIR_NAME, snapshotArchive, checksum); + LocalFileMetaOutter.LocalFileMeta.Builder metaBuild = + LocalFileMetaOutter.LocalFileMeta.newBuilder(); + metaBuild.setChecksum(Long.toHexString(checksum.getValue())); + if (!writer.addFile(SNAPSHOT_ARCHIVE_NAME, metaBuild.build())) { +<<<<<<<< HEAD:hugegraph-pd/hg-pd-core/src/main/java/org/apache/hugegraph/pd/raft/RaftStateMachine.java + throw new PDException(Pdpb.ErrorType.ROCKSDB_SAVE_SNAPSHOT_ERROR_VALUE, + "failed to add file to LocalFileMeta"); +======== + throw new PDException(ErrorType.ROCKSDB_SAVE_SNAPSHOT_ERROR_VALUE, "failed to add file to LocalFileMeta"); +>>>>>>>> d7e3d51dd (3.6.5 -> 4.x diff):hg-pd-core/src/main/java/org/apache/hugegraph/pd/raft/RaftStateMachine.java + } + } catch (IOException e) { + throw new PDException(ErrorType.ROCKSDB_SAVE_SNAPSHOT_ERROR_VALUE, e); + } + } + + private void decompressSnapshot(final SnapshotReader reader) throws PDException { + final LocalFileMetaOutter.LocalFileMeta meta = + (LocalFileMetaOutter.LocalFileMeta) reader.getFileMeta(SNAPSHOT_ARCHIVE_NAME); + final Checksum checksum = new CRC64(); + final String snapshotArchive = reader.getPath() + File.separator + SNAPSHOT_ARCHIVE_NAME; + try { + ZipUtils.decompress(snapshotArchive, new File(reader.getPath()), checksum); + if (meta.hasChecksum()) { + if (!meta.getChecksum().equals(Long.toHexString(checksum.getValue()))) { +<<<<<<<< HEAD:hugegraph-pd/hg-pd-core/src/main/java/org/apache/hugegraph/pd/raft/RaftStateMachine.java + throw new PDException(Pdpb.ErrorType.ROCKSDB_LOAD_SNAPSHOT_ERROR_VALUE, + "Snapshot checksum failed"); +======== + throw new PDException(ErrorType.ROCKSDB_LOAD_SNAPSHOT_ERROR_VALUE, "Snapshot checksum failed"); +>>>>>>>> d7e3d51dd (3.6.5 -> 4.x diff):hg-pd-core/src/main/java/org/apache/hugegraph/pd/raft/RaftStateMachine.java + } + } + } catch (IOException e) { + throw new PDException(ErrorType.ROCKSDB_LOAD_SNAPSHOT_ERROR_VALUE, e); + } + } + + public static class RaftClosureAdapter implements KVStoreClosure { + + private final KVOperation op; + private final KVStoreClosure closure; + + public RaftClosureAdapter(KVOperation op, KVStoreClosure closure) { + this.op = op; + this.closure = closure; + } + + public KVStoreClosure getClosure() { + return closure; + } + + @Override + public void run(Status status) { + closure.run(status); + } + + @Override + public Errors getError() { + return null; + } + + @Override + public void setError(Errors error) { + + } + + @Override + public Object getData() { + return null; + } + + @Override + public void setData(Object data) { + + } + } +} diff --git a/hg-pd-core/src/main/java/org/apache/hugegraph/pd/raft/RaftTaskHandler.java b/hg-pd-core/src/main/java/org/apache/hugegraph/pd/raft/RaftTaskHandler.java new file mode 100644 index 0000000000..c897ed138e --- /dev/null +++ b/hg-pd-core/src/main/java/org/apache/hugegraph/pd/raft/RaftTaskHandler.java @@ -0,0 +1,10 @@ +package org.apache.hugegraph.pd.raft; + +import org.apache.hugegraph.pd.common.PDException; + +/** + * 接收raft发送的数据 + */ +public interface RaftTaskHandler { + boolean invoke(final KVOperation op, KVStoreClosure response) throws PDException; +} diff --git a/hg-pd-core/src/main/java/org/apache/hugegraph/pd/raft/ZipUtils.java b/hg-pd-core/src/main/java/org/apache/hugegraph/pd/raft/ZipUtils.java new file mode 100644 index 0000000000..13f25347e1 --- /dev/null +++ b/hg-pd-core/src/main/java/org/apache/hugegraph/pd/raft/ZipUtils.java @@ -0,0 +1,64 @@ +package org.apache.hugegraph.pd.raft; + +import lombok.extern.slf4j.Slf4j; +import org.apache.commons.io.FileUtils; +import org.apache.commons.io.IOUtils; +import org.apache.commons.io.output.NullOutputStream; + +import java.io.*; +import java.nio.file.Paths; +import java.util.zip.*; + +@Slf4j +public final class ZipUtils { + + public static void compress(final String rootDir, final String sourceDir, + final String outputFile, final Checksum checksum) throws IOException { + try (final FileOutputStream fos = new FileOutputStream(outputFile); + final CheckedOutputStream cos = new CheckedOutputStream(fos, checksum); + final ZipOutputStream zos = new ZipOutputStream(new BufferedOutputStream(cos))) { + ZipUtils.compressDirectoryToZipFile(rootDir, sourceDir, zos); + zos.flush(); + fos.getFD().sync(); + } + } + + private static void compressDirectoryToZipFile(final String rootDir, final String sourceDir, + final ZipOutputStream zos) throws IOException { + final String dir = Paths.get(rootDir, sourceDir).toString(); + final File[] files = new File(dir).listFiles(); + for (final File file : files) { + final String child = Paths.get(sourceDir, file.getName()).toString(); + if (file.isDirectory()) { + compressDirectoryToZipFile(rootDir, child, zos); + } else { + zos.putNextEntry(new ZipEntry(child)); + try (final FileInputStream fis = new FileInputStream(file); + final BufferedInputStream bis = new BufferedInputStream(fis)) { + IOUtils.copy(bis, zos); + } + } + } + } + + public static void decompress(final String sourceFile, final String outputDir, + final Checksum checksum) throws IOException { + try (final FileInputStream fis = new FileInputStream(sourceFile); + final CheckedInputStream cis = new CheckedInputStream(fis, checksum); + final ZipInputStream zis = new ZipInputStream(new BufferedInputStream(cis))) { + ZipEntry entry; + while ((entry = zis.getNextEntry()) != null) { + final String fileName = entry.getName(); + final File entryFile = new File(Paths.get(outputDir, fileName).toString()); + FileUtils.forceMkdir(entryFile.getParentFile()); + try (final FileOutputStream fos = new FileOutputStream(entryFile); + final BufferedOutputStream bos = new BufferedOutputStream(fos)) { + IOUtils.copy(zis, bos); + bos.flush(); + fos.getFD().sync(); + } + } + IOUtils.copy(cis, NullOutputStream.NULL_OUTPUT_STREAM); + } + } +} diff --git a/hg-pd-core/src/main/java/org/apache/hugegraph/pd/service/MetadataService.java b/hg-pd-core/src/main/java/org/apache/hugegraph/pd/service/MetadataService.java new file mode 100644 index 0000000000..0ecd7b7a5e --- /dev/null +++ b/hg-pd-core/src/main/java/org/apache/hugegraph/pd/service/MetadataService.java @@ -0,0 +1,209 @@ +package org.apache.hugegraph.pd.service; + +import static org.apache.hugegraph.pd.grpc.Metapb.Graph; +import static org.apache.hugegraph.pd.grpc.Metapb.GraphSpace; +import static org.apache.hugegraph.pd.grpc.Metapb.Partition; +import static org.apache.hugegraph.pd.grpc.Metapb.ShardGroup; +import static org.apache.hugegraph.pd.grpc.Metapb.Store; + +import java.util.List; +import java.util.concurrent.ThreadPoolExecutor; + +import org.apache.hugegraph.pd.meta.MetadataFactory; +import org.apache.hugegraph.pd.meta.MetadataKeyHelper; +import org.apache.hugegraph.pd.meta.MetadataRocksDBStore; +import org.apache.hugegraph.pd.meta.PartitionMeta; +import org.apache.hugegraph.pd.meta.StoreInfoMeta; +import org.springframework.beans.factory.annotation.Autowired; +import org.springframework.stereotype.Service; + +import org.apache.hugegraph.pd.common.PDException; +import org.apache.hugegraph.pd.config.PDConfig; +import org.apache.hugegraph.pd.consts.PoolNames; +import org.apache.hugegraph.pd.grpc.GraphSpaces; +import org.apache.hugegraph.pd.grpc.Graphs; +import org.apache.hugegraph.pd.grpc.Partitions; +import org.apache.hugegraph.pd.grpc.ShardGroups; +import org.apache.hugegraph.pd.grpc.Stores; +import org.apache.hugegraph.pd.raft.RaftEngine; +import org.apache.hugegraph.pd.util.ExecutorUtil; + +import lombok.extern.slf4j.Slf4j; + +/** + * @author zhangyingjie + * @date 2023/9/20 + **/ +@Slf4j +@Service +public class MetadataService extends MetadataRocksDBStore { + + private static ThreadPoolExecutor uninterruptibleJobs; + private static int cpus = Runtime.getRuntime().availableProcessors(); + private StoreInfoMeta store; + private PartitionMeta partition; + + public MetadataService(@Autowired PDConfig config) { + super(config); + store = MetadataFactory.newStoreInfoMeta(config); + partition = MetadataFactory.newPartitionMeta(config); + try { + if (uninterruptibleJobs == null) { + PDConfig.JobConfig jobConfig = config.getJobConfig(); + int uninterruptibleCore = jobConfig.getUninterruptibleCore(); + if (uninterruptibleCore <= 0) { + uninterruptibleCore = cpus / 2; + } + uninterruptibleJobs = ExecutorUtil.createExecutor(PoolNames.U_JOB, + uninterruptibleCore, + jobConfig.getUninterruptibleMax(), + jobConfig.getUninterruptibleQueueSize(), + false); + } + } catch (Exception e) { + log.error("an error occurred while creating the background job thread pool", e); + } + } + + /** + * + */ + public Stores getStores() throws PDException { + Stores.Builder builder = Stores.newBuilder(); + try { + List data = store.getStores(""); + builder.addAllData(data); + } catch (Exception e) { + throw e; + } + return builder.build(); + } + + /** + * + */ + public Partitions getPartitions() throws PDException { + Partitions.Builder builder = Partitions.newBuilder(); + try { + List data = partition.getPartitions(); + builder.addAllData(data); + } catch (Exception e) { + throw e; + } + return builder.build(); + } + + /** + * + */ + public ShardGroups getShardGroups() throws PDException { + ShardGroups.Builder builder = ShardGroups.newBuilder(); + try { + List data = store.getShardGroups(); + builder.addAllData(data); + } catch (Exception e) { + throw e; + } + return builder.build(); + } + + /** + * + */ + public GraphSpaces getGraphSpaces() throws PDException { + GraphSpaces.Builder builder = GraphSpaces.newBuilder(); + try { + byte[] prefix = MetadataKeyHelper.getGraphSpaceKey(""); + List data = scanPrefix(GraphSpace.parser(), prefix); + builder.addAllData(data); + } catch (Exception e) { + throw e; + } + return builder.build(); + } + + /** + * + */ + public Graphs getGraphs() throws PDException { + Graphs.Builder builder = Graphs.newBuilder(); + try { + List data = partition.getGraphs(); + builder.addAllData(data); + } catch (Exception e) { + throw e; + } + return builder.build(); + } + + /** + * + */ + public boolean updateStore(Store request) throws PDException { + try { + store.updateStore(request); + return true; + } catch (PDException e) { + throw e; + } + } + + /** + * + */ + public boolean updatePartition(Partition request) throws PDException { + try { + partition.updatePartition(request); + return true; + } catch (Exception e) { + throw e; + } + } + + /** + * + */ + public boolean updateShardGroup(ShardGroup request) throws PDException { + try { + store.updateShardGroup(request); + return true; + } catch (Exception e) { + throw e; + } + } + + /** + * + */ + public boolean updateGraphSpace(GraphSpace request) throws PDException { + try { + byte[] key = MetadataKeyHelper.getGraphSpaceKey(request.getName()); + put(key, request.toByteArray()); + return true; + } catch (Exception e) { + throw e; + } + } + + /** + * + */ + public boolean updateGraph(Graph request) throws PDException { + try { + byte[] key = MetadataKeyHelper.getGraphKey(request.getGraphName()); + put(key, request.toByteArray()); + return true; + } catch (Exception e) { + throw e; + } + } + + public List getPeerGrpcAddresses() throws PDException { + return RaftEngine.getInstance().getPeerGrpcAddresses(); + } + + public static ThreadPoolExecutor getUninterruptibleJobs() { + return uninterruptibleJobs; + } + +} diff --git a/hg-pd-core/src/main/java/org/apache/hugegraph/pd/store/BaseKVStoreClosure.java b/hg-pd-core/src/main/java/org/apache/hugegraph/pd/store/BaseKVStoreClosure.java new file mode 100644 index 0000000000..2c143f380c --- /dev/null +++ b/hg-pd-core/src/main/java/org/apache/hugegraph/pd/store/BaseKVStoreClosure.java @@ -0,0 +1,32 @@ +package org.apache.hugegraph.pd.store; + +import com.alipay.sofa.jraft.Status; +import org.apache.hugegraph.pd.grpc.Pdpb; +import org.apache.hugegraph.pd.grpc.common.Errors; +import org.apache.hugegraph.pd.raft.KVStoreClosure; + +public abstract class BaseKVStoreClosure implements KVStoreClosure { + private Errors error; + private Object data; + @Override + public Errors getError() { + return error; + } + + @Override + public void setError(Errors error) { + this.error = error; + } + + @Override + public Object getData() { + return data; + } + + @Override + public void setData(Object data) { + this.data = data; + } + + +} diff --git a/hg-pd-core/src/main/java/org/apache/hugegraph/pd/store/HgKVStore.java b/hg-pd-core/src/main/java/org/apache/hugegraph/pd/store/HgKVStore.java new file mode 100644 index 0000000000..3a0ea09f22 --- /dev/null +++ b/hg-pd-core/src/main/java/org/apache/hugegraph/pd/store/HgKVStore.java @@ -0,0 +1,40 @@ +package org.apache.hugegraph.pd.store; + +import org.apache.hugegraph.pd.common.PDException; +import org.apache.hugegraph.pd.config.PDConfig; + +import java.util.List; +import java.util.concurrent.TimeUnit; + +public interface HgKVStore { + void init(PDConfig config); + + void put(byte[] key, byte[] value) throws PDException; + + byte[] get(byte[] key) throws PDException; + + List scanPrefix(byte[] prefix); + + long remove(byte[] bytes) throws PDException; + + long removeByPrefix(byte[] bytes) throws PDException; + + void putWithTTL(byte[] key, byte[] value, long ttl) throws PDException; + + void putWithTTL(byte[] key, byte[] value, long ttl, TimeUnit timeUnit) throws PDException; + + byte[] getWithTTL(byte[] key) throws PDException; + void removeWithTTL(byte[] key) throws PDException; + + List getListWithTTL(byte[] key) throws PDException; + + void clear() throws PDException; + + void saveSnapshot(String snapshotPath) throws PDException; + + void loadSnapshot(String snapshotPath) throws PDException; + + List scanRange(byte[] start,byte[] end); + + void close(); +} diff --git a/hg-pd-core/src/main/java/org/apache/hugegraph/pd/store/HgKVStoreImpl.java b/hg-pd-core/src/main/java/org/apache/hugegraph/pd/store/HgKVStoreImpl.java new file mode 100644 index 0000000000..8f6dab6466 --- /dev/null +++ b/hg-pd-core/src/main/java/org/apache/hugegraph/pd/store/HgKVStoreImpl.java @@ -0,0 +1,375 @@ +<<<<<<<< HEAD:hugegraph-pd/hg-pd-core/src/main/java/org/apache/hugegraph/pd/store/HgKVStoreImpl.java +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hugegraph.pd.store; +======== +package org.apache.hugegraph.pd.store; + +import com.alipay.sofa.jraft.util.Utils; +import org.apache.hugegraph.pd.common.PDException; +import org.apache.hugegraph.pd.config.PDConfig; +import org.apache.hugegraph.pd.grpc.Pdpb; +import org.apache.hugegraph.pd.grpc.common.ErrorType; +import org.apache.hugegraph.pd.grpc.discovery.RegisterInfo; +import com.google.common.cache.CacheBuilder; +import com.google.common.primitives.Bytes; + +import lombok.extern.slf4j.Slf4j; +import org.apache.commons.io.FileUtils; +import org.rocksdb.*; +>>>>>>>> d7e3d51dd (3.6.5 -> 4.x diff):hg-pd-core/src/main/java/org/apache/hugegraph/pd/store/HgKVStoreImpl.java + +import java.io.File; +import java.io.IOException; +import java.nio.charset.Charset; +import java.nio.file.Paths; +import java.util.ArrayList; +import java.util.LinkedList; +import java.util.List; +import java.util.concurrent.ConcurrentHashMap; +import java.util.concurrent.ConcurrentMap; +import java.util.concurrent.TimeUnit; +import java.util.concurrent.locks.Lock; +import java.util.concurrent.locks.ReadWriteLock; +import java.util.concurrent.locks.ReentrantReadWriteLock; + +import org.apache.commons.io.FileUtils; +import org.apache.hugegraph.pd.common.PDException; +import org.apache.hugegraph.pd.config.PDConfig; +import org.apache.hugegraph.pd.grpc.Pdpb; +import org.apache.hugegraph.pd.grpc.discovery.RegisterInfo; +import org.rocksdb.Checkpoint; +import org.rocksdb.Options; +import org.rocksdb.ReadOptions; +import org.rocksdb.RocksDB; +import org.rocksdb.RocksDBException; +import org.rocksdb.RocksIterator; +import org.rocksdb.Slice; + +import com.alipay.sofa.jraft.util.Utils; +import com.google.common.cache.CacheBuilder; +import com.google.common.primitives.Bytes; + +import lombok.extern.slf4j.Slf4j; + +@Slf4j +public class HgKVStoreImpl implements HgKVStore { + + private static final ConcurrentHashMap> CACHE = new ConcurrentHashMap(); + private final ReadWriteLock readWriteLock = new ReentrantReadWriteLock(); + private RocksDB db; + private String dbPath; + private Options dbOptions; + + @Override + public void init(PDConfig config) { + dbOptions = new Options().setCreateIfMissing(true); + + final Lock writeLock = this.readWriteLock.writeLock(); + writeLock.lock(); + try { + this.dbPath = config.getDataPath() + "/rocksdb/"; + File file = new File(this.dbPath); + if (!file.exists()) { + try { + FileUtils.forceMkdir(file); + } catch (IOException e) { + log.warn("Failed to create data file,{}", e); + } + } + openRocksDB(dbPath); + } catch (PDException e) { + log.error("Failed to open data file,{}", e); + } finally { + writeLock.unlock(); + } + } + + @Override + public void put(byte[] key, byte[] value) throws PDException { + final Lock readLock = this.readWriteLock.readLock(); + readLock.lock(); + try { + db.put(key, value); + } catch (RocksDBException e) { + throw new PDException(ErrorType.ROCKSDB_WRITE_ERROR_VALUE, e); + } finally { + readLock.unlock(); + } + } + + @Override + public byte[] get(byte[] key) throws PDException { + final Lock readLock = this.readWriteLock.readLock(); + readLock.lock(); + try { + return db.get(key); + } catch (RocksDBException e) { + throw new PDException(ErrorType.ROCKSDB_READ_ERROR_VALUE, e); + } finally { + readLock.unlock(); + } + } + + @Override + public List scanPrefix(byte[] prefix) { + final Lock readLock = this.readWriteLock.readLock(); + readLock.lock(); + try (ReadOptions options = new ReadOptions() + .setIterateLowerBound(new Slice(prefix))) { + List kvs = new ArrayList<>(); + RocksIterator iterator = db.newIterator(options); + iterator.seekToFirst(); + while (iterator.isValid() && 0 == Bytes.indexOf(iterator.key(), prefix)) { + kvs.add(new KV(iterator.key(), iterator.value())); + iterator.next(); + } + return kvs; + } finally { + readLock.unlock(); + } + } + + @Override + public long remove(byte[] key) throws PDException { + final Lock readLock = this.readWriteLock.readLock(); + readLock.lock(); + try { + db.delete(key); + } catch (RocksDBException e) { + throw new PDException(ErrorType.ROCKSDB_DEL_ERROR_VALUE, e); + } finally { + readLock.unlock(); + } + return 0; + } + + @Override + public long removeByPrefix(byte[] prefix) throws PDException { + final Lock readLock = this.readWriteLock.readLock(); + readLock.lock(); + try (ReadOptions options = new ReadOptions() + .setIterateLowerBound(new Slice(prefix))) { + RocksIterator iterator = db.newIterator(options); + iterator.seekToFirst(); + + while (iterator.isValid()) { + if (0 == Bytes.indexOf(iterator.key(), prefix)) { + db.delete(iterator.key()); + } else { + break; + } + iterator.next(); + } + } catch (Exception e) { + throw new PDException(ErrorType.ROCKSDB_WRITE_ERROR_VALUE, e); + } finally { + readLock.unlock(); + } + return 0; + } + + @Override + public void clear() throws PDException { + CACHE.clear(); + } + + @Override + public List getListWithTTL(byte[] key) throws PDException { + String storeKey = new String(key, Charset.defaultCharset()); + LinkedList ts = new LinkedList<>(); + CACHE.keySet().forEach((cacheKey) -> { + if (cacheKey.startsWith(storeKey)) { + ConcurrentMap map; + if ((map = CACHE.get(cacheKey)) == null) { + return; + } + map.values().forEach((element) -> { + ts.add((byte[]) element); + }); + } + }); + return ts; + } + + @Override + public byte[] getWithTTL(byte[] key) throws PDException { + ConcurrentMap map; + String storeKey = new String(key, Charset.defaultCharset()); + if ((map = CACHE.get(storeKey)) == null) { + return null; + } + Object value = map.get(storeKey); + return value == null ? null : (byte[]) value; + } + + @Override + public void removeWithTTL(byte[] key) throws PDException { + ConcurrentMap map; + String storeKey = new String(key, Charset.defaultCharset()); + if ((map = CACHE.get(storeKey)) == null) { + return; + } + map.remove(storeKey); + } + + @Override + public void putWithTTL(byte[] key, byte[] value, long ttl) throws PDException { + this.putWithTTL(key, value, ttl, TimeUnit.SECONDS); + } + + @Override + public void putWithTTL(byte[] key, byte[] value, long ttl, TimeUnit timeUnit) throws + PDException { + try { + ConcurrentMap spaceNode = CacheBuilder.newBuilder().initialCapacity(200) + .expireAfterWrite(ttl, + timeUnit) + .build().asMap(); + String storeKey = new String(key, Charset.defaultCharset()); + ConcurrentMap space = CACHE.putIfAbsent(storeKey, spaceNode); + if (space == null) { + space = spaceNode; + } + space.put(storeKey, value); + } catch (Exception e) { + throw new PDException(ErrorType.ROCKSDB_WRITE_ERROR_VALUE, e); + } + } + + @Override + public void saveSnapshot(String snapshotPath) throws PDException { + log.info("begin save snapshot at {}", snapshotPath); + final Lock writeLock = this.readWriteLock.writeLock(); + writeLock.lock(); + try (final Checkpoint checkpoint = Checkpoint.create(this.db)) { + final String tempPath = Paths.get(snapshotPath) + "_temp"; + final File tempFile = new File(tempPath); + FileUtils.deleteDirectory(tempFile); + checkpoint.createCheckpoint(tempPath); + final File snapshotFile = new File(snapshotPath); + FileUtils.deleteDirectory(snapshotFile); + if (!Utils.atomicMoveFile(tempFile, snapshotFile, true)) { + log.error("Fail to rename {} to {}", tempPath, snapshotPath); +<<<<<<<< HEAD:hugegraph-pd/hg-pd-core/src/main/java/org/apache/hugegraph/pd/store/HgKVStoreImpl.java + throw new PDException(Pdpb.ErrorType.ROCKSDB_SAVE_SNAPSHOT_ERROR_VALUE, + String.format("Fail to rename %s to %s", tempPath, + snapshotPath)); +======== + throw new PDException(ErrorType.ROCKSDB_SAVE_SNAPSHOT_ERROR_VALUE, + String.format("Fail to rename %s to %s", tempPath, snapshotPath)); +>>>>>>>> d7e3d51dd (3.6.5 -> 4.x diff):hg-pd-core/src/main/java/org/apache/hugegraph/pd/store/HgKVStoreImpl.java + } + } catch (final PDException e) { + throw e; + } catch (final Exception e) { + log.error("Fail to write snapshot at path: {}", snapshotPath, e); + throw new PDException(ErrorType.ROCKSDB_SAVE_SNAPSHOT_ERROR_VALUE, e); + } finally { + writeLock.unlock(); + } + log.info("saved snapshot into {}", snapshotPath); + } + + @Override + public void loadSnapshot(String snapshotPath) throws PDException { + log.info("begin load snapshot from {}", snapshotPath); + final Lock writeLock = this.readWriteLock.writeLock(); + writeLock.lock(); + try { + final File snapshotFile = new File(snapshotPath); + if (!snapshotFile.exists()) { + log.error("Snapshot file {} not exists.", snapshotPath); + return; + } + // close DB + closeRocksDB(); + // replace rocksdb data with snapshot data + final File dbFile = new File(this.dbPath); + FileUtils.deleteDirectory(dbFile); + if (!Utils.atomicMoveFile(snapshotFile, dbFile, true)) { + log.error("Fail to rename {} to {}", snapshotPath, this.dbPath); +<<<<<<<< HEAD:hugegraph-pd/hg-pd-core/src/main/java/org/apache/hugegraph/pd/store/HgKVStoreImpl.java + throw new PDException(Pdpb.ErrorType.ROCKSDB_LOAD_SNAPSHOT_ERROR_VALUE, + String.format("Fail to rename %s to %s", snapshotPath, + this.dbPath)); +======== + throw new PDException(ErrorType.ROCKSDB_LOAD_SNAPSHOT_ERROR_VALUE, + String.format("Fail to rename %s to %s", snapshotPath, this.dbPath)); +>>>>>>>> d7e3d51dd (3.6.5 -> 4.x diff):hg-pd-core/src/main/java/org/apache/hugegraph/pd/store/HgKVStoreImpl.java + } + // reopen the db + openRocksDB(this.dbPath); + } catch (final PDException e) { + throw e; + } catch (final Exception e) { + log.error("failed to load snapshot from {}", snapshotPath); + throw new PDException(ErrorType.ROCKSDB_LOAD_SNAPSHOT_ERROR_VALUE, e); + } finally { + writeLock.unlock(); + } + log.info("loaded snapshot from {}", snapshotPath); + } + + @Override + public List scanRange(byte[] start, byte[] end) { + final Lock readLock = this.readWriteLock.readLock(); + readLock.lock(); + try (ReadOptions options = new ReadOptions() + .setIterateLowerBound(new Slice(start)) + .setIterateUpperBound(new Slice(end))) { + List kvs = new ArrayList<>(); + RocksIterator iterator = db.newIterator(options); + iterator.seekToFirst(); + while (iterator.isValid()) { + kvs.add(new KV(iterator.key(), iterator.value())); + iterator.next(); + } + return kvs; + } finally { + readLock.unlock(); + } + } + + @Override + public void close() { + closeRocksDB(); + } + + private void closeRocksDB() { + if (this.db != null) { + try { + this.db.syncWal(); + } catch (RocksDBException e) { + log.warn("exception ", e); + } + this.db.close(); + this.db = null; + } + } + + private void openRocksDB(String dbPath) throws PDException { + try { + this.db = RocksDB.open(dbOptions, dbPath); + } catch (RocksDBException e) { + log.error("Failed to open RocksDB from {}", dbPath, e); + throw new PDException(ErrorType.ROCKSDB_LOAD_SNAPSHOT_ERROR_VALUE, e); + } + } +} diff --git a/hg-pd-core/src/main/java/org/apache/hugegraph/pd/store/KV.java b/hg-pd-core/src/main/java/org/apache/hugegraph/pd/store/KV.java new file mode 100644 index 0000000000..a7f39ffc07 --- /dev/null +++ b/hg-pd-core/src/main/java/org/apache/hugegraph/pd/store/KV.java @@ -0,0 +1,27 @@ +package org.apache.hugegraph.pd.store; + + +public class KV { + private byte[] key; + private byte[] value; + + public KV(byte[] key, byte[] value){ + this.key = key; + this.value = value; + } + public void setKey(byte[] key) { + this.key = key; + } + + public void setValue(byte[] value) { + this.value = value; + } + + public byte[] getKey() { + return key; + } + + public byte[] getValue() { + return value; + } +} diff --git a/hg-pd-core/src/main/java/org/apache/hugegraph/pd/store/RaftKVStore.java b/hg-pd-core/src/main/java/org/apache/hugegraph/pd/store/RaftKVStore.java new file mode 100644 index 0000000000..5af8d6b226 --- /dev/null +++ b/hg-pd-core/src/main/java/org/apache/hugegraph/pd/store/RaftKVStore.java @@ -0,0 +1,395 @@ +<<<<<<<< HEAD:hugegraph-pd/hg-pd-core/src/main/java/org/apache/hugegraph/pd/store/RaftKVStore.java +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hugegraph.pd.store; +======== +package org.apache.hugegraph.pd.store; + +import com.alipay.sofa.jraft.Status; +import com.alipay.sofa.jraft.entity.Task; +import com.alipay.sofa.jraft.error.RaftError; +import org.apache.hugegraph.pd.common.PDException; +import org.apache.hugegraph.pd.config.PDConfig; +import org.apache.hugegraph.pd.grpc.common.ErrorType; +import org.apache.hugegraph.pd.grpc.common.Errors; +import org.apache.hugegraph.pd.raft.KVOperation; +import org.apache.hugegraph.pd.raft.KVStoreClosure; +import org.apache.hugegraph.pd.raft.RaftEngine; +import org.apache.hugegraph.pd.raft.RaftTaskHandler; +import lombok.extern.slf4j.Slf4j; +import org.apache.hugegraph.pd.raft.RaftStateMachine.RaftClosureAdapter; +>>>>>>>> d7e3d51dd (3.6.5 -> 4.x diff):hg-pd-core/src/main/java/org/apache/hugegraph/pd/store/RaftKVStore.java + +import java.nio.ByteBuffer; +import java.util.List; +import java.util.concurrent.CompletableFuture; +import java.util.concurrent.TimeUnit; + +import org.apache.hugegraph.pd.common.PDException; +import org.apache.hugegraph.pd.config.PDConfig; +import org.apache.hugegraph.pd.grpc.Pdpb; +import org.apache.hugegraph.pd.raft.KVOperation; +import org.apache.hugegraph.pd.raft.KVStoreClosure; +import org.apache.hugegraph.pd.raft.RaftEngine; +import org.apache.hugegraph.pd.raft.RaftStateMachine; +import org.apache.hugegraph.pd.raft.RaftTaskHandler; + +import com.alipay.sofa.jraft.Status; +import com.alipay.sofa.jraft.entity.Task; +import com.alipay.sofa.jraft.error.RaftError; + +import lombok.extern.slf4j.Slf4j; + +@Slf4j +public class RaftKVStore implements HgKVStore, RaftTaskHandler { + + private final RaftEngine engine; + private final HgKVStore store; + + public RaftKVStore(RaftEngine engine, HgKVStore store) { + this.engine = engine; + this.store = store; + } + + @Override + public void init(PDConfig config) { + this.store.init(config); + this.engine.addTaskHandler(this); + } + + private BaseKVStoreClosure createClosure() { + return new BaseKVStoreClosure() { + @Override + public void run(Status status) { + if (!status.isOk()) { + log.error("An exception occurred while performing the RAFT,{}", + status.getErrorMsg()); + } else { + log.info("RAFT done!"); + } + } + }; + } + + @Override + public void put(byte[] key, byte[] value) throws PDException { + KVOperation operation = KVOperation.createPut(key, value); + try { + applyOperation(operation).get(); +<<<<<<<< HEAD:hugegraph-pd/hg-pd-core/src/main/java/org/apache/hugegraph/pd/store/RaftKVStore.java + } catch (Exception e) { + throw new PDException(Pdpb.ErrorType.UNKNOWN_VALUE, e.getMessage()); +======== + } catch (Exception e){ + throw new PDException(ErrorType.UNKNOWN_VALUE, e.getMessage()); +>>>>>>>> d7e3d51dd (3.6.5 -> 4.x diff):hg-pd-core/src/main/java/org/apache/hugegraph/pd/store/RaftKVStore.java + } + } + + /** + * Queries can be read without rafting + */ + @Override + public byte[] get(byte[] key) throws PDException { + return store.get(key); + + } + + @Override + public List scanPrefix(byte[] prefix) { + return store.scanPrefix(prefix); + } + + @Override + public long remove(byte[] bytes) throws PDException { + try { + applyOperation(KVOperation.createRemove(bytes)).get(); +<<<<<<<< HEAD:hugegraph-pd/hg-pd-core/src/main/java/org/apache/hugegraph/pd/store/RaftKVStore.java + } catch (Exception e) { + throw new PDException(Pdpb.ErrorType.UNKNOWN_VALUE, e.getMessage()); +======== + } catch (Exception e){ + throw new PDException(ErrorType.UNKNOWN_VALUE, e.getMessage()); +>>>>>>>> d7e3d51dd (3.6.5 -> 4.x diff):hg-pd-core/src/main/java/org/apache/hugegraph/pd/store/RaftKVStore.java + } + return 0; + } + + @Override + public long removeByPrefix(byte[] bytes) throws PDException { + try { + applyOperation(KVOperation.createRemoveByPrefix(bytes)).get(); +<<<<<<<< HEAD:hugegraph-pd/hg-pd-core/src/main/java/org/apache/hugegraph/pd/store/RaftKVStore.java + } catch (Exception e) { + throw new PDException(Pdpb.ErrorType.UNKNOWN_VALUE, e.getMessage()); +======== + } catch (Exception e){ + throw new PDException(ErrorType.UNKNOWN_VALUE, e.getMessage()); +>>>>>>>> d7e3d51dd (3.6.5 -> 4.x diff):hg-pd-core/src/main/java/org/apache/hugegraph/pd/store/RaftKVStore.java + } + return 0; + } + + @Override + public void clear() throws PDException { + try { + applyOperation(KVOperation.createClear()).get(); +<<<<<<<< HEAD:hugegraph-pd/hg-pd-core/src/main/java/org/apache/hugegraph/pd/store/RaftKVStore.java + } catch (Exception e) { + throw new PDException(Pdpb.ErrorType.UNKNOWN_VALUE, e.getMessage()); +======== + } catch (Exception e){ + throw new PDException(ErrorType.UNKNOWN_VALUE, e.getMessage()); +>>>>>>>> d7e3d51dd (3.6.5 -> 4.x diff):hg-pd-core/src/main/java/org/apache/hugegraph/pd/store/RaftKVStore.java + } + } + + @Override + public void putWithTTL(byte[] key, byte[] value, long ttl) throws PDException { + try { + applyOperation(KVOperation.createPutWithTTL(key, value, ttl)).get(); +<<<<<<<< HEAD:hugegraph-pd/hg-pd-core/src/main/java/org/apache/hugegraph/pd/store/RaftKVStore.java + } catch (Exception e) { + throw new PDException(Pdpb.ErrorType.UNKNOWN_VALUE, e.getMessage()); +======== + } catch (Exception e){ + throw new PDException(ErrorType.UNKNOWN_VALUE, e.getMessage()); +>>>>>>>> d7e3d51dd (3.6.5 -> 4.x diff):hg-pd-core/src/main/java/org/apache/hugegraph/pd/store/RaftKVStore.java + } + } + + @Override + public void putWithTTL(byte[] key, byte[] value, long ttl, TimeUnit timeUnit) throws + PDException { + try { + applyOperation(KVOperation.createPutWithTTL(key, value, ttl, timeUnit)).get(); +<<<<<<<< HEAD:hugegraph-pd/hg-pd-core/src/main/java/org/apache/hugegraph/pd/store/RaftKVStore.java + } catch (Exception e) { + throw new PDException(Pdpb.ErrorType.UNKNOWN_VALUE, e.getMessage()); +======== + } catch (Exception e){ + throw new PDException(ErrorType.UNKNOWN_VALUE, e.getMessage()); +>>>>>>>> d7e3d51dd (3.6.5 -> 4.x diff):hg-pd-core/src/main/java/org/apache/hugegraph/pd/store/RaftKVStore.java + } + } + + @Override + public List getListWithTTL(byte[] key) throws PDException { + return store.getListWithTTL(key); + } + + @Override + public byte[] getWithTTL(byte[] key) throws PDException { + return store.getWithTTL(key); + } + + @Override + public void removeWithTTL(byte[] key) throws PDException { + try { + applyOperation(KVOperation.createRemoveWithTTL(key)).get(); +<<<<<<<< HEAD:hugegraph-pd/hg-pd-core/src/main/java/org/apache/hugegraph/pd/store/RaftKVStore.java + } catch (Exception e) { + throw new PDException(Pdpb.ErrorType.UNKNOWN_VALUE, e.getMessage()); +======== + } catch (Exception e){ + throw new PDException(ErrorType.UNKNOWN_VALUE, e.getMessage()); +>>>>>>>> d7e3d51dd (3.6.5 -> 4.x diff):hg-pd-core/src/main/java/org/apache/hugegraph/pd/store/RaftKVStore.java + } + } + + @Override + public void saveSnapshot(String snapshotPath) throws PDException { + store.saveSnapshot(snapshotPath); + } + + @Override + public void loadSnapshot(String snapshotPath) throws PDException { + store.loadSnapshot(snapshotPath); + } + + @Override + public List scanRange(byte[] start, byte[] end) { + return store.scanRange(start, end); + } + + @Override + public void close() { + store.close(); + } + + /** + * Need to walk the real operation of Raft + */ + private void doPut(byte[] key, byte[] value) throws PDException { + + store.put(key, value); + } + + public long doRemove(byte[] bytes) throws PDException { + return this.store.remove(bytes); + } + + public long doRemoveByPrefix(byte[] bytes) throws PDException { + return this.store.removeByPrefix(bytes); + } + + public void doRemoveWithTTL(byte[] key) throws PDException { + this.store.removeWithTTL(key); + } + + public void doClear() throws PDException { + this.store.clear(); + } + + public void doPutWithTTL(byte[] key, byte[] value, long ttl) throws PDException { + this.store.putWithTTL(key, value, ttl); + } + + public void doPutWithTTL(byte[] key, byte[] value, long ttl, TimeUnit timeUnit) throws + PDException { + this.store.putWithTTL(key, value, ttl, timeUnit); + } + + public void doSaveSnapshot(String snapshotPath) throws PDException { + this.store.saveSnapshot(snapshotPath); + } + + public void doLoadSnapshot(String snapshotPath) throws PDException { + this.store.loadSnapshot(snapshotPath); + } + + private CompletableFuture applyOperation(final KVOperation op) throws PDException { + CompletableFuture future = new CompletableFuture<>(); + try { + final Task task = new Task(); + task.setData(ByteBuffer.wrap(op.toByteArray())); + task.setDone(new RaftClosureAdapter(op, new KVStoreClosure() { + Object data; +<<<<<<<< HEAD:hugegraph-pd/hg-pd-core/src/main/java/org/apache/hugegraph/pd/store/RaftKVStore.java + Pdpb.Error error; + + @Override + public Pdpb.Error getError() { + return error; + } + + @Override + public void setError(Pdpb.Error error) { + this.error = error; + } + + @Override + public Object getData() { + return data; + } + + @Override +======== + Errors error; + @Override + public Errors getError() { + return error; + } + + @Override + public void setError(Errors error) { + this.error = error; + } + + @Override + public Object getData() { + return data; + } + + @Override +>>>>>>>> d7e3d51dd (3.6.5 -> 4.x diff):hg-pd-core/src/main/java/org/apache/hugegraph/pd/store/RaftKVStore.java + public void setData(Object data) { + this.data = data; + } + + @Override + public void run(Status status) { + if (status.isOk()) { + future.complete((T) data); + } else { + RaftError raftError = status.getRaftError(); + ErrorType type; + if (RaftError.EPERM.equals(raftError)) { + type = ErrorType.NOT_LEADER; + } else { + type = ErrorType.UNKNOWN; + } + error = Errors.newBuilder().setType(type) + .setMessage(status.getErrorMsg()) + .build(); + future.completeExceptionally( + new PDException(error.getTypeValue())); + } + } + })); + this.engine.addTask(task); + return future; + } catch (Exception e) { + future.completeExceptionally(e); + return future; + } + } + + private boolean isLeader() { + return this.engine.isLeader(); + } + + @Override + public boolean invoke(KVOperation op, KVStoreClosure response) throws PDException { + switch (op.getOp()) { + case KVOperation.GET: + break; + case KVOperation.PUT: + doPut(op.getKey(), op.getValue()); + break; + case KVOperation.REMOVE: + doRemove(op.getKey()); + break; + case KVOperation.PUT_WITH_TTL: + doPutWithTTL(op.getKey(), op.getValue(), (long) op.getArg()); + break; + case KVOperation.PUT_WITH_TTL_UNIT: + Object[] arg = (Object[]) op.getArg(); + doPutWithTTL(op.getKey(), op.getValue(), (long) arg[0], (TimeUnit) arg[1]); + break; + case KVOperation.REMOVE_BY_PREFIX: + doRemoveByPrefix(op.getKey()); + break; + case KVOperation.REMOVE_WITH_TTL: + doRemoveWithTTL(op.getKey()); + break; + case KVOperation.CLEAR: + doClear(); + break; + case KVOperation.SAVE_SNAPSHOT: + doSaveSnapshot((String) op.getAttach()); + break; + case KVOperation.LOAD_SNAPSHOT: + doLoadSnapshot((String) op.getAttach()); + break; + default: + log.error("Err op {}", op.getOp()); + } + return false; + } +} diff --git a/hg-pd-core/src/test/java/org/apache/hugegraph/pd/MonitorServiceTest.java b/hg-pd-core/src/test/java/org/apache/hugegraph/pd/MonitorServiceTest.java new file mode 100644 index 0000000000..6998018901 --- /dev/null +++ b/hg-pd-core/src/test/java/org/apache/hugegraph/pd/MonitorServiceTest.java @@ -0,0 +1,90 @@ +package org.apache.hugegraph.pd; + +import org.apache.hugegraph.pd.common.PDException; +import org.apache.hugegraph.pd.config.PDConfig; +import org.apache.hugegraph.pd.grpc.Metapb; +import org.junit.Assert; +import org.junit.BeforeClass; +// import org.junit.Test; + +import java.util.concurrent.ExecutionException; + +public class MonitorServiceTest { + static PDConfig pdConfig; + + @BeforeClass + public static void init() throws ExecutionException, InterruptedException { + pdConfig = new PDConfig() {{ + this.setClusterId(100); + this.setPatrolInterval(1); + }}; + + //pdConfig.setEtcd(new PDConfig().new Etcd() {{ + // this.setAddress("http://localhost:2379"); + // + //}}); + pdConfig.setStore(new PDConfig().new Store() {{ + this.setMaxDownTime(1); + this.setKeepAliveTimeout(5); + }}); + + pdConfig.setPartition(new PDConfig().new Partition() {{ + this.setShardCount(3); + this.setMaxShardsPerStore(12); + // this.setTotalCount(10); + }}); + + clearClusterData(); + } + + public static void clearClusterData() throws ExecutionException, InterruptedException { + //Client client = Client.builder().endpoints(pdConfig.getEtcd().getAddress()).build(); + //KV kvClient = client.getKVClient(); + // + //ByteSequence key = ByteSequence.from("HUGEGRAPH/" + pdConfig.getClusterId(), Charset.forName("utf-8")); + //CompletableFuture rsp = kvClient.delete(key, DeleteOption.newBuilder().isPrefix(true).build()); + //System.out.println("删除数量 : " + rsp.get().getDeleted()); + //kvClient.close(); + //client.close(); + } + + // @Test + public void testPatrolStores() throws PDException, InterruptedException { + StoreNodeService storeService = new StoreNodeService(pdConfig); + ConfigService configService = new ConfigService(pdConfig); + PartitionService partitionService = new PartitionService(pdConfig, storeService, configService); + var monitorService = new TaskScheduleService(pdConfig, storeService, partitionService, configService); + storeService.init(partitionService); + partitionService.init(); + monitorService.init(); + + int count = 6; + Metapb.Store[] stores = new Metapb.Store[count]; + for (int i = 0; i < count; i++) { + Metapb.Store store = Metapb.Store.newBuilder() + .setId(0) + .setAddress("" + i) + .setDeployPath("/data") + .addLabels(Metapb.StoreLabel.newBuilder() + .setKey("namespace") + .setValue("default").build()) + .build(); + stores[i] = storeService.register(store); + System.out.println("新注册store, id = " + Long.toHexString(stores[i].getId())); + } + Metapb.Graph graph = Metapb.Graph.newBuilder() + .setGraphName("defaultGH") + + .setPartitionCount(10) + .build(); + partitionService.updateGraph(graph); + Thread.sleep(10000); + count = 0; + count += storeService.getStores("").stream().filter(store -> store.getState() == Metapb.StoreState.Tombstone).count(); + + Assert.assertEquals(6, count); + + } + + +} diff --git a/hg-pd-core/src/test/java/org/apache/hugegraph/pd/PartitionServiceTest.java b/hg-pd-core/src/test/java/org/apache/hugegraph/pd/PartitionServiceTest.java new file mode 100644 index 0000000000..f6bb27977f --- /dev/null +++ b/hg-pd-core/src/test/java/org/apache/hugegraph/pd/PartitionServiceTest.java @@ -0,0 +1,29 @@ +package org.apache.hugegraph.pd; + +import org.apache.hugegraph.pd.grpc.Metapb; +import org.junit.Test; + +import java.util.ArrayList; +import java.util.Collection; +import java.util.Collections; +import java.util.List; + +public class PartitionServiceTest { + @Test + public void testPartitionHeartbeat() { + List shardList = new ArrayList<>(); + shardList.add(Metapb.Shard.newBuilder().setStoreId(1).build()); + shardList.add(Metapb.Shard.newBuilder().setStoreId(2).build()); + shardList.add(Metapb.Shard.newBuilder().setStoreId(3).build()); + shardList = new ArrayList<>(shardList); + Metapb.PartitionStats stats = Metapb.PartitionStats.newBuilder() + .addAllShard(shardList).build(); + List shardList2 = new ArrayList<>(stats.getShardList()); + Collections.shuffle(shardList2); + shardList2.forEach(shard -> { + System.out.println(shard.getStoreId()); + }); + + + } +} diff --git a/hg-pd-core/src/test/java/org/apache/hugegraph/pd/StoreNodeServiceTest.java b/hg-pd-core/src/test/java/org/apache/hugegraph/pd/StoreNodeServiceTest.java new file mode 100644 index 0000000000..78a13c3ea4 --- /dev/null +++ b/hg-pd-core/src/test/java/org/apache/hugegraph/pd/StoreNodeServiceTest.java @@ -0,0 +1,440 @@ +package org.apache.hugegraph.pd; + +import org.apache.hugegraph.pd.common.PDException; +import org.apache.hugegraph.pd.config.PDConfig; +import org.apache.hugegraph.pd.grpc.Metapb; +import org.apache.hugegraph.pd.grpc.pulse.ChangeShard; +import org.apache.hugegraph.pd.grpc.pulse.CleanPartition; +import org.apache.hugegraph.pd.grpc.pulse.DbCompaction; +import org.apache.hugegraph.pd.grpc.pulse.MovePartition; +import org.apache.hugegraph.pd.grpc.pulse.PartitionKeyRange; +import org.apache.hugegraph.pd.grpc.pulse.SplitPartition; +import org.apache.hugegraph.pd.grpc.pulse.TransferLeader; +import org.apache.hugegraph.pd.listener.PartitionInstructionListener; +import org.apache.hugegraph.pd.listener.PartitionStatusListener; + +import org.apache.commons.io.FileUtils; +import org.junit.Assert; +import org.junit.BeforeClass; +// import org.junit.Test; + +import java.io.File; +import java.io.IOException; +import java.util.ArrayList; +import java.util.List; +import java.util.concurrent.ExecutionException; +import java.util.concurrent.atomic.AtomicReference; + +public class StoreNodeServiceTest { + static PDConfig pdConfig; + static ConfigService configService; + + @BeforeClass + public static void init() throws Exception { + String path = "tmp/unitTest"; + deleteDirectory(new File(path)); + pdConfig = new PDConfig() {{ + this.setClusterId(100); + this.setInitialStoreList("127.0.0.1:8500,127.0.0.1:8501,127.0.0.1:8502,127.0.0.1:8503,127.0.0.1:8504,127.0.0.1:8505"); + }}; + + pdConfig.setStore(new PDConfig().new Store() {{ + this.setMaxDownTime(3600); + this.setKeepAliveTimeout(3600); + }}); + + pdConfig.setPartition(new PDConfig().new Partition() {{ + this.setShardCount(3); + this.setMaxShardsPerStore(3); + }}); + pdConfig.setRaft(new PDConfig().new Raft(){{ + this.setEnable(false); + }}); + pdConfig.setDiscovery(new PDConfig().new Discovery()); + pdConfig.setDataPath(path); + ConfigService configService = new ConfigService(pdConfig); + pdConfig = configService.loadConfig(); + + configService = new ConfigService(pdConfig); + } + + + + // @Test + public void testStoreNodeService() throws PDException { + Assert.assertEquals(configService.getPartitionCount(0), + (long) pdConfig.getInitialStoreMap().size() * pdConfig.getPartition().getMaxShardsPerStore() + / pdConfig.getPartition().getShardCount()); + StoreNodeService storeService = new StoreNodeService(pdConfig); + int count = 6; + Metapb.Store[] stores = new Metapb.Store[count]; + for (int i = 0; i < count; i++) { + Metapb.Store store = Metapb.Store.newBuilder() + .setId(0) + .setAddress("127.0.0.1:850" + i) + .setDeployPath("/data") + .addLabels(Metapb.StoreLabel.newBuilder() + .setKey("namespace") + .setValue("default").build()) + .build(); + stores[i] = storeService.register(store); + System.out.println("新注册store, id = " + stores[i].getId()); + } + Assert.assertEquals(count, storeService.getStores("").size()); + + for (Metapb.Store store : stores) { + Metapb.StoreStats stats = Metapb.StoreStats.newBuilder() + .setStoreId(store.getId()) + .build(); + storeService.heartBeat(stats); + } + + Assert.assertEquals(6, storeService.getActiveStores("").size()); + + Metapb.Graph graph = Metapb.Graph.newBuilder() + .setGraphName("defaultGH") + .setPartitionCount(10) + .build(); + // 分配shard + List shards = storeService.allocShards(graph, 1); + + + Assert.assertEquals(3, shards.size()); + + Assert.assertEquals(configService.getPartitionCount(0), storeService.getShardGroups().size()); + // 设置leader + Metapb.Shard leader = Metapb.Shard.newBuilder(shards.get(0)) + .setRole(Metapb.ShardRole.Leader).build(); + shards = new ArrayList<>(shards); + shards.set(0, leader); + // 增加shard + pdConfig.getPartition().setShardCount(5); + + Metapb.ShardGroup shardGroup = Metapb.ShardGroup.newBuilder() + .setId(1) + .addAllShards(shards).build(); + shards = storeService.reallocShards(shardGroup); + + Assert.assertEquals(5, shards.size()); + // 减少shard + pdConfig.getPartition().setShardCount(3); + shards = storeService.reallocShards(shardGroup); + Assert.assertEquals(3, shards.size()); + // 包含leader,leader不能被删除 + Assert.assertTrue(shards.contains(leader)); + + // 减少shard + pdConfig.getPartition().setShardCount(1); + graph = Metapb.Graph.newBuilder(graph).build(); + shards = storeService.reallocShards(shardGroup); + Assert.assertEquals(1, shards.size()); + // 包含leader,leader不能被删除 + Assert.assertTrue(shards.contains(leader)); + + for (Metapb.Store store : stores) { + storeService.removeStore(store.getId()); + } + Assert.assertEquals(0, storeService.getStores("").size()); + + + } + + // @Test + public void testSplitPartition() throws PDException { + StoreNodeService storeService = new StoreNodeService(pdConfig); + PartitionService partitionService = new PartitionService(pdConfig, storeService, new ConfigService(pdConfig)); + + storeService.init(partitionService); + partitionService.addInstructionListener(new PartitionInstructionListener(){ + + @Override + public void changeShard(Metapb.Partition partition, ChangeShard changeShard) throws PDException { + + } + + @Override + public void transferLeader(Metapb.Partition partition, TransferLeader transferLeader) throws PDException { + + } + + @Override + public void splitPartition(Metapb.Partition partition, SplitPartition splitPartition) throws PDException { + splitPartition.getNewPartitionList().forEach(p->{ + System.out.println("SplitPartition " + p.getId() + " " + p.getStartKey() + "," + p.getEndKey()); + }); + } + + @Override + public void dbCompaction(Metapb.Partition partition, DbCompaction dbCompaction) throws PDException{ + + } + + @Override + public void movePartition(Metapb.Partition partition, MovePartition movePartition) throws PDException { + + } + + @Override + public void cleanPartition(Metapb.Partition partition, CleanPartition cleanPartition) throws PDException { + + } + + @Override + public void changePartitionKeyRange(Metapb.Partition partition, PartitionKeyRange partitionKeyRange) throws PDException { + + } + }); + int count = 6; + Metapb.Store[] stores = new Metapb.Store[count]; + for (int i = 0; i < count; i++) { + Metapb.Store store = Metapb.Store.newBuilder() + .setId(0) + .setAddress("127.0.0.1:850" + i) + .setDeployPath("/data") + .addLabels(Metapb.StoreLabel.newBuilder() + .setKey("namespace") + .setValue("default").build()) + .build(); + stores[i] = storeService.register(store); + System.out.println("新注册store, id = " + Long.toHexString(stores[i].getId())); + } + Assert.assertEquals(count, storeService.getStores().size()); + + Metapb.Graph graph = Metapb.Graph.newBuilder() + .setGraphName("defaultGH") + .build(); + Metapb.PartitionShard ptShard = partitionService.getPartitionByCode(graph.getGraphName(), 0); + System.out.println(ptShard.getPartition().getId()); + { + Metapb.Partition pt = ptShard.getPartition(); + System.out.println(pt.getId() + " " + pt.getStartKey() + "," + pt.getEndKey()); + } + + Assert.assertEquals(6, storeService.getShardGroups().size()); + // storeService.splitShardGroups(ptShard.getPartition().getId(), 4); + Assert.assertEquals(9, storeService.getShardGroups().size()); + storeService.getShardGroups().forEach(shardGroup -> { + System.out.println("shardGroup id = " + shardGroup.getId()); + }); + } + + // @Test + public void testPartitionService() throws PDException, ExecutionException, InterruptedException { + StoreNodeService storeService = new StoreNodeService(pdConfig); + int count = 6; + Metapb.Store[] stores = new Metapb.Store[count]; + for (int i = 0; i < count; i++) { + Metapb.Store store = Metapb.Store.newBuilder() + .setId(0) + .setAddress("" + i) + .setDeployPath("/data") + .addLabels(Metapb.StoreLabel.newBuilder() + .setKey("namespace") + .setValue("default").build()) + .build(); + stores[i] = storeService.register(store); + System.out.println("新注册store, id = " + Long.toHexString(stores[i].getId())); + } + Assert.assertEquals(count, storeService.getStores("").size()); + + + PartitionService partitionService = new PartitionService(pdConfig, storeService, new ConfigService(pdConfig)); + + Metapb.Graph graph = Metapb.Graph.newBuilder() + .setGraphName("defaultGH") + + .setPartitionCount(10) + .build(); + // 申请分区 + Metapb.PartitionShard[] partitions = new Metapb.PartitionShard[10]; + for (int i = 0; i < partitions.length; i++) { + partitions[i] = partitionService.getPartitionShard(graph.getGraphName(), intToByteArray(i)); + Assert.assertEquals(3, storeService.getShardGroup(i).getShardsCount()); + } + System.out.println("分区数量: " + partitionService.getPartitions(graph.getGraphName()).size()); + + int[] caseNo = {0}; //1 测试增加shard, 2 //测试store下线 + + Metapb.Shard leader = null; + int[] finalCaseNo = caseNo; + + partitionService.addInstructionListener(new PartitionInstructionListener(){ + + @Override + public void changeShard(Metapb.Partition partition, ChangeShard changeShard) throws PDException { + switch (finalCaseNo[0]){ + case 2: + Assert.assertEquals(5, storeService.getShardGroup(partition.getId()).getShardsCount()); + break; + case 3: + storeService.getShardGroup(partition.getId()).getShardsList().forEach(shard -> { + Assert.assertNotEquals(shard.getStoreId(), stores[0].getId()); + }); + break; + } + + } + + @Override + public void transferLeader(Metapb.Partition partition, TransferLeader transferLeader) { + + } + + @Override + public void splitPartition(Metapb.Partition partition, SplitPartition splitPartition) { + } + + @Override + public void dbCompaction(Metapb.Partition partition, DbCompaction dbCompaction) throws PDException{ + + } + + @Override + public void movePartition(Metapb.Partition partition, MovePartition movePartition) throws PDException { + + } + + @Override + public void cleanPartition(Metapb.Partition partition, CleanPartition cleanPartition) throws PDException { + + } + + @Override + public void changePartitionKeyRange(Metapb.Partition partition, PartitionKeyRange partitionKeyRange) + throws PDException { + + } + }); + Metapb.Partition partition = partitions[0].getPartition(); + leader = Metapb.Shard.newBuilder(storeService.getShardGroup(partition.getId()).getShardsList().get(0)).build(); + Metapb.Shard finalLeader = leader; + partitionService.addStatusListener(new PartitionStatusListener() { + @Override + public void onPartitionChanged(Metapb.Partition partition, Metapb.Partition newPartition) { + + } + + @Override + public void onPartitionRemoved(Metapb.Partition partition) { + + } + }); + // 测试修改图 + caseNo[0] = 1; partitionService.updateGraph(graph); + for(int i = 0; i< partitions.length ;i++) { + partitions[i] = partitionService.getPartitionShard(graph.getGraphName(), intToByteArray(i)); + Assert.assertEquals(3, storeService.getShardGroup(i).getShardsCount()); + } + + graph = Metapb.Graph.newBuilder(graph) + .setGraphName("defaultGH") + + .setPartitionCount(10) + .build(); + caseNo[0] = 2; partitionService.updateGraph(graph); + + // 测试store离线 + caseNo[0] = 3; partitionService.storeOffline(stores[0]); + + + + Metapb.PartitionStats stats = Metapb.PartitionStats.newBuilder() + .addGraphName(partition.getGraphName()) + .setId(partition.getId()) + .setLeader(Metapb.Shard.newBuilder(leader).setRole(Metapb.ShardRole.Leader)) + .build(); + // 测试leader飘移 + caseNo[0] = 4; partitionService.partitionHeartbeat(stats); + AtomicReference shard = new AtomicReference<>(); + Metapb.PartitionShard ss = partitionService.getPartitionShardById(partition.getGraphName(), partition.getId()); + storeService.getShardList(partition.getId()).forEach(s->{ + if ( s.getRole() == Metapb.ShardRole.Leader){ + Assert.assertNull(shard.get()); + shard.set(s); + } + }); + + Assert.assertEquals(leader.getStoreId(), shard.get().getStoreId()); + + } + + + public static byte[] intToByteArray(int i) { + byte[] result = new byte[4]; + result[0] = (byte)((i >> 24) & 0xFF); + result[1] = (byte)((i >> 16) & 0xFF); + result[2] = (byte)((i >> 8) & 0xFF); + result[3] = (byte)(i & 0xFF); + return result; + } + + // @Test + public void testMergeGraphParams() throws PDException { + StoreNodeService storeService = new StoreNodeService(pdConfig); + PartitionService partitionService = new PartitionService(pdConfig, storeService, new ConfigService(pdConfig)); + + Metapb.Graph dfGraph = Metapb.Graph.newBuilder() + + .setPartitionCount(configService.getPartitionCount(0)) + + .build(); + + Metapb.Graph graph1 = Metapb.Graph.newBuilder() + .setGraphName("test") + .setPartitionCount(20) + + .build(); + + Metapb.Graph graph2 = Metapb.Graph.newBuilder() + .setGraphName("test") + .setPartitionCount(7).build(); + Metapb.Graph graph3 = Metapb.Graph.newBuilder() + .setGraphName("test") + .build(); + Metapb.Graph graph4 = Metapb.Graph.newBuilder() + .setGraphName("test") + .build(); + + Metapb.Graph graph = Metapb.Graph.newBuilder(dfGraph).mergeFrom(graph2).build(); + Assert.assertEquals(graph2.getGraphName(), graph.getGraphName()); + + Assert.assertEquals(graph2.getPartitionCount(), graph.getPartitionCount()); + + + graph = Metapb.Graph.newBuilder(dfGraph).mergeFrom(graph3).build(); + Assert.assertEquals(graph3.getGraphName(), graph.getGraphName()); + + Assert.assertEquals(dfGraph.getPartitionCount(), graph.getPartitionCount()); + + + graph = Metapb.Graph.newBuilder(dfGraph).mergeFrom(graph4).build(); + Assert.assertEquals(graph4.getGraphName(), graph.getGraphName()); + + Assert.assertEquals(dfGraph.getPartitionCount(), graph.getPartitionCount()); + + } + + public static void deleteDirectory(File dir) { + try { + FileUtils.deleteDirectory(dir); + } catch (IOException e) { + System.out.println(String.format("Failed to start ....,%s", e.getMessage())); + } + } + + // @Test + public void test(){ + int[] n = new int[3]; + + + if ( ++n[2] > 1){ + System.out.println(n[2]); + } + if ( ++n[2] > 1){ + System.out.println(n[2]); + } + if ( ++n[2] > 1){ + System.out.println(n[2]); + } + } +} diff --git a/hg-pd-core/src/test/java/org/apache/hugegraph/pd/UnitTestBase.java b/hg-pd-core/src/test/java/org/apache/hugegraph/pd/UnitTestBase.java new file mode 100644 index 0000000000..21afc4136f --- /dev/null +++ b/hg-pd-core/src/test/java/org/apache/hugegraph/pd/UnitTestBase.java @@ -0,0 +1,14 @@ +package org.apache.hugegraph.pd; + +import java.io.File; + +public class UnitTestBase { + public static boolean deleteDir(File dir) { + if (dir.isDirectory()) { + for (File file : dir.listFiles()) { + deleteDir(file); + } + } + return dir.delete(); + } +} diff --git a/hg-pd-core/src/test/java/org/apache/hugegraph/pd/common/PartitionUtilsTest.java b/hg-pd-core/src/test/java/org/apache/hugegraph/pd/common/PartitionUtilsTest.java new file mode 100644 index 0000000000..7755d03b85 --- /dev/null +++ b/hg-pd-core/src/test/java/org/apache/hugegraph/pd/common/PartitionUtilsTest.java @@ -0,0 +1,27 @@ +package org.apache.hugegraph.pd.common; + +// import org.junit.Test; + +import java.nio.charset.StandardCharsets; + +public class PartitionUtilsTest { + + // @Test + public void testHashCode() { + int partCount = 10; + int partSize = PartitionUtils.MAX_VALUE / partCount+1; + int[] counter = new int[partCount]; + for (int i = 0; i < 10000; i++) { + String s = String.format("BATCH-GET-UNIT-%02d", i); + int c = PartitionUtils.calcHashcode(s.getBytes(StandardCharsets.UTF_8)); + + counter[c / partSize]++; + + } + + for (int i = 0; i < counter.length; i++) + System.out.println(i + " " + counter[i]); + } + + +} diff --git a/hg-pd-core/src/test/java/org/apache/hugegraph/pd/store/HgKVStoreImplTest.java b/hg-pd-core/src/test/java/org/apache/hugegraph/pd/store/HgKVStoreImplTest.java new file mode 100644 index 0000000000..12a26013c7 --- /dev/null +++ b/hg-pd-core/src/test/java/org/apache/hugegraph/pd/store/HgKVStoreImplTest.java @@ -0,0 +1,124 @@ +<<<<<<<< HEAD:hugegraph-pd/hg-pd-test/src/main/java/org/apache/hugegraph/pd/core/store/HgKVStoreImplTest.java +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hugegraph.pd.core.store; +======== +package org.apache.hugegraph.pd.store; + +import org.apache.hugegraph.pd.common.PDException; +import org.apache.hugegraph.pd.config.PDConfig; +import org.apache.commons.io.FileUtils; +import org.junit.Assert; +import org.junit.BeforeClass; +// import org.junit.Test; +>>>>>>>> d7e3d51dd (3.6.5 -> 4.x diff):hg-pd-core/src/test/java/org/apache/hugegraph/pd/store/HgKVStoreImplTest.java + +import java.io.File; +import java.io.IOException; +import java.nio.file.Paths; + +import org.apache.commons.io.FileUtils; +import org.apache.hugegraph.pd.common.PDException; +import org.apache.hugegraph.pd.config.PDConfig; +import org.apache.hugegraph.pd.store.HgKVStore; +import org.apache.hugegraph.pd.store.HgKVStoreImpl; +import org.junit.Assert; +import org.junit.BeforeClass; +import org.junit.Test; + +public class HgKVStoreImplTest { + + private static final String testPath = "tmp/test"; + private static PDConfig pdConfig; + + @BeforeClass + public static void init() throws IOException { + File testFile = new File(testPath); + if (testFile.exists()) { + FileUtils.deleteDirectory(testFile); + } + FileUtils.forceMkdir(testFile); + pdConfig = new PDConfig() {{ + setDataPath(testPath); + }}; + } + + @Test + public void Test() throws PDException { + HgKVStore kvStore = new HgKVStoreImpl(); + kvStore.init(pdConfig); + + { + byte[] key = "hello".getBytes(); + byte[] value = "pd".getBytes(); + kvStore.put(key, value); + } + for (int i = 0; i < 100; i++) { + byte[] key = String.format("k%03d", i).getBytes(); + byte[] value = ("value" + i).getBytes(); + kvStore.put(key, value); + } + + Assert.assertEquals(100, kvStore.scanPrefix("k".getBytes()).size()); + + kvStore.removeByPrefix("k".getBytes()); + Assert.assertEquals(0, kvStore.scanPrefix("k".getBytes()).size()); + + kvStore.close(); + } + + @Test + public void TestSnapshot() throws PDException { + HgKVStore kvStore = new HgKVStoreImpl(); + kvStore.init(pdConfig); + + // put 100 data + for (int i = 0; i < 100; i++) { + byte[] key = String.format("k%03d", i).getBytes(); + byte[] value = ("value" + i).getBytes(); + kvStore.put(key, value); + } + Assert.assertEquals(100, kvStore.scanPrefix("k".getBytes()).size()); + + // save snapshot + String snapshotPath = Paths.get(testPath, "snapshot").toString(); + kvStore.saveSnapshot(snapshotPath); + + // put another 100 data + for (int i = 100; i < 200; i++) { + byte[] key = String.format("k%03d", i).getBytes(); + byte[] value = ("value" + i).getBytes(); + kvStore.put(key, value); + } + Assert.assertEquals(200, kvStore.scanPrefix("k".getBytes()).size()); + + // load snapshot + kvStore.loadSnapshot(snapshotPath); + Assert.assertEquals(100, kvStore.scanPrefix("k".getBytes()).size()); + + // put another 100 data + for (int i = 100; i < 200; i++) { + byte[] key = String.format("k%03d", i).getBytes(); + byte[] value = ("value" + i).getBytes(); + kvStore.put(key, value); + } + Assert.assertEquals(200, kvStore.scanPrefix("k".getBytes()).size()); + + kvStore.close(); + } +} diff --git a/hg-pd-dist/src/assembly/static/bin/start-hugegraph-pd.sh b/hg-pd-dist/src/assembly/static/bin/start-hugegraph-pd.sh new file mode 100644 index 0000000000..9285b379c6 --- /dev/null +++ b/hg-pd-dist/src/assembly/static/bin/start-hugegraph-pd.sh @@ -0,0 +1,107 @@ +#!/bin/bash + +if [ -z "$GC_OPTION" ];then + GC_OPTION="" +fi +if [ -z "$USER_OPTION" ];then + USER_OPTION="" +fi + +while getopts "g:j:v" arg; do + case ${arg} in + g) GC_OPTION="$OPTARG" ;; + j) USER_OPTION="$OPTARG" ;; + v) VERBOSE="verbose" ;; + ?) echo "USAGE: $0 [-g g1] [-j xxx] [-v]" && exit 1 ;; + esac +done + +function abs_path() { + SOURCE="${BASH_SOURCE[0]}" + while [ -h "$SOURCE" ]; do + DIR="$( cd -P "$( dirname "$SOURCE" )" && pwd )" + SOURCE="$(readlink "$SOURCE")" + [[ $SOURCE != /* ]] && SOURCE="$DIR/$SOURCE" + done + echo "$( cd -P "$( dirname "$SOURCE" )" && pwd )" +} + +BIN=$(abs_path) +TOP="$(cd "$BIN"/../ && pwd)" +CONF="$TOP/conf" +LIB="$TOP/lib" +LOGS="$TOP/logs" +OUTPUT=${LOGS}/hugegraph-pd-stdout.log +PID_FILE="$BIN/pid" + +. "$BIN"/util.sh + +mkdir -p ${LOGS} + +# The maximum and minium heap memory that service can use +MAX_MEM=$((32 * 1024)) +MIN_MEM=$((1 * 512)) +EXPECT_JDK_VERSION=11 + +# Change to $BIN's parent +cd ${TOP} + +# Find Java +if [ "$JAVA_HOME" = "" ]; then + JAVA="java" +else + JAVA="$JAVA_HOME/bin/java" +fi + +# check jdk version +JAVA_VERSION=$($JAVA -version 2>&1 | awk 'NR==1{gsub(/"/,""); print $3}' | awk -F'_' '{print $1}') +if [[ $? -ne 0 || $JAVA_VERSION < $EXPECT_JDK_VERSION ]]; then + echo "Please make sure that the JDK is installed and the version >= $EXPECT_JDK_VERSION" >> ${OUTPUT} + exit 1 +fi + +# Set Java options +if [ "$JAVA_OPTIONS" = "" ]; then + XMX=$(calc_xmx $MIN_MEM $MAX_MEM) + if [ $? -ne 0 ]; then + echo "Failed to start HugeGraphPDServer, requires at least ${MIN_MEM}m free memory" \ + >> ${OUTPUT} + exit 1 + fi + JAVA_OPTIONS="-Xms${MIN_MEM}m -Xmx${XMX}m -XX:+HeapDumpOnOutOfMemoryError -XX:HeapDumpPath=${LOGS} ${USER_OPTION}" + + # Rolling out detailed GC logs + #JAVA_OPTIONS="${JAVA_OPTIONS} -XX:+UseGCLogFileRotation -XX:GCLogFileSize=10M -XX:NumberOfGCLogFiles=3 \ + # -Xloggc:./logs/gc.log -XX:+PrintHeapAtGC -XX:+PrintGCDetails -XX:+PrintGCDateStamps" +fi + +# Using G1GC as the default garbage collector (Recommended for large memory machines) +case "$GC_OPTION" in + g1) + echo "Using G1GC as the default garbage collector" + JAVA_OPTIONS="${JAVA_OPTIONS} -XX:+UseG1GC -XX:+ParallelRefProcEnabled \ + -XX:InitiatingHeapOccupancyPercent=50 -XX:G1RSetUpdatingPauseTimePercent=5" + ;; + "") ;; + *) + echo "Unrecognized gc option: '$GC_OPTION', only support 'g1' now" >> ${OUTPUT} + exit 1 +esac + +#if [ "${JMX_EXPORT_PORT}" != "" ] && [ ${JMX_EXPORT_PORT} -ne 0 ] ; then +# JAVA_OPTIONS="${JAVA_OPTIONS} -javaagent:${LIB}/jmx_prometheus_javaagent-0.16.1.jar=${JMX_EXPORT_PORT}:${CONF}/jmx_exporter.yml" +#fi +if [ $(ps -ef|grep -v grep| grep java|grep -cE ${CONF}) -ne 0 ]; then + echo "HugeGraphPDServer is already running..." + exit 0 +fi +echo "Starting HugeGraphPDServer..." + +# Turn on security check +exec ${JAVA} ${JAVA_OPTIONS} -jar -Dspring.config.location=${CONF}/application.yml \ + ${LIB}/hugegraph-pd-4.0.0-SNAPSHOT.jar >> ${OUTPUT} 2>&1 & + +PID="$!" +# Write pid to file +echo "$PID" > "$PID_FILE" +echo "[+pid] $PID" diff --git a/hg-pd-dist/src/assembly/static/conf/application.yml b/hg-pd-dist/src/assembly/static/conf/application.yml new file mode 100644 index 0000000000..810f49a67a --- /dev/null +++ b/hg-pd-dist/src/assembly/static/conf/application.yml @@ -0,0 +1,62 @@ +spring: + application: + name: hugegraph-pd + +management: + metrics: + export: + prometheus: + enabled: true + endpoints: + enabled-by-default: false + web: + exposure: + include: "prometheus,health" + endpoint: + prometheus.enabled: true + health.enabled: true + +logging: + config: 'file:./conf/log4j2.xml' +license: + verify-path: ./conf/verify-license.json + license-path: ./conf/hugegraph.license +grpc: + port: 8686 + # grpc的服务地址, 部署时需要改为本地实际IPv4地址。 + host: 127.0.0.1 + +server: + # rest服务端口号 + port : 8620 + +pd: + # 存储路径 + data-path: ./pd_data + # 自动扩容的检查周期,定时检查每个Store的分区数量,自动进行分区数量平衡 + patrol-interval: 1800 + # 初始store列表,grpc IP:grpc port, 在列表内的store自动激活 + initial-store-list: 127.0.0.1:8501,127.0.0.1:8502,127.0.0.1:8503 +raft: + # 本机raft服务地址 + address: 127.0.0.1:8610 + # PD集群服务地址 + peers-list: 127.0.0.1:8610,127.0.0.1:8611,127.0.0.1:8612 + +store: + # store下线时间。超过该时间,认为store永久不可用,分配副本到其他机器,单位秒 + max-down-time: 172800 + # 是否开启store监控数据存储 + monitor_data_enabled: false + # 监控数据的间隔,minute(默认), hour, second + # default: 1 min * 1 day = 1440 + monitor_data_interval: 1 minute + # 监控数据的保留时间 1 天; day, month, year + monitor_data_retention: 1 day + +partition: + # 默认每个分区副本数 + default-shard-count: 3 + # 默认每机器最大副本数,初始分区数= store-max-shard-count * store-number / default-shard-count + store-max-shard-count: 12 + diff --git a/hg-pd-dist/src/assembly/static/conf/application.yml.template b/hg-pd-dist/src/assembly/static/conf/application.yml.template new file mode 100644 index 0000000000..42bdf74313 --- /dev/null +++ b/hg-pd-dist/src/assembly/static/conf/application.yml.template @@ -0,0 +1,58 @@ +spring: + application: + name: hugegraph-pd +logging: + config: file:./conf/log4j2.xml + +management: + metrics: + export: + prometheus: + enabled: true + endpoints: + enabled-by-default: false + web: + exposure: + include: "prometheus,health" + endpoint: + prometheus.enabled: true + health.enabled: true + +grpc: + port: $GRPC_PORT$ + # grpc的服务地址, + #注意:部署时需要改为本地实际IPv4地址。 + host: $GRPC_HOST$ + netty-server: + max-inbound-message-size: 100MB + +server: + port : $SERVER_PORT$ + +pd: + # 集群ID,区分不同的PD集群 + + patrol-interval: 2147483647 + data-path: $PD_DATA_PATH$ + +raft: + address: $RAFT_ADDRESS$ + # raft集群 + peers-list: $RAFT_PEERS_LIST$ + # 快照生成时间间隔,单位秒 + snapshotInterval: 300 + metrics: true +store: + # store心跳超时时间,超过该时间,认为store临时不可用,转移Leader到其他副本,单位秒 + keepAlive-timeout: 60 + # store下线时间。超过该时间,认为store永久不可用,分配副本到其他机器,单位秒 + max-down-time: 1800 +partition: + # 默认分区总数 + default-total-count: 30 + # 默认每个分区副本数 + default-shard-count: 3 + +discovery: + #客户端注册后,无心跳最长次数,超过后,之前的注册信息会被删除 + heartbeat-try-count: 3 diff --git a/hg-pd-dist/src/assembly/static/conf/hugegraph.license b/hg-pd-dist/src/assembly/static/conf/hugegraph.license new file mode 100644 index 0000000000000000000000000000000000000000..3cc0c344b4a6c13836715fb5ba5b4b14749a24a6 GIT binary patch literal 848 zcmV-W1F!sHhn&I!I)GnwHK;#FaR|4FN4%n_lEcO%-pD`UE$4 z;}s8Uc0vW;3h8Db>Ut5-RbDLDFg4sj+R1}DC@hY_76QKi-1E@=2sXEFS4T*78c^SV z;JwjN3~NucPY5@MO0Dptt3WmuL zvLdjwz)(1zbffoKZ<*L6@x0=h@f}gk&EsRw<1VG!*N5-I@TBGOGeHlvFY;EY z92byd^2&`ZhdrK?UR^s6Nsut(Lm)~**LQi*%)Hdf{yL!^$DUl92wViz_}K7xBsv8m zr_d)pw5k#!C3!={-c16PI$O)?0fdkBJ$mg1d&zY--|5eJYoIou0N_i7R`8)*x*wmZ zv-vnKj!vCg@gJ%q-sId;{wH+RI298*As9_V>gPOHR!-(HolCt-)C{?yG--Q>n|Fn> z&T#y@sK^bQOxk!SJXC4rX5+`-KBil3N+ labels = 6; +} +message Query { + string appName = 1; + string version = 2; + map labels = 3; +} +message LeaseInfo { + int64 registrationTs = 1; + int64 lastHeartbeatTs = 2; + int64 serverUpTs = 3; +} +message RegisterInfo { + NodeInfo nodeInfo = 1; + LeaseInfo leaseInfo = 2 ; + RegisterType type = 3 ; + ResponseHeader header = 4; +} +enum RegisterType { + Register = 0; + Heartbeat = 1; + Dislodge = 2; +} +//message Condition{ +// string label = 1; +//} +//message Conditions{ +// string label = 1; +// string value = 2; +//} +message NodeInfos{ + repeated NodeInfo info = 1; +} \ No newline at end of file diff --git a/hg-pd-grpc/src/main/proto/metaTask.proto b/hg-pd-grpc/src/main/proto/metaTask.proto new file mode 100644 index 0000000000..420e63474b --- /dev/null +++ b/hg-pd-grpc/src/main/proto/metaTask.proto @@ -0,0 +1,51 @@ +syntax = "proto3"; +package metaTask; +import "metapb.proto"; +import "pulse.proto"; +option java_package = "org.apache.hugegraph.pd.grpc"; + +enum TaskType { + Unknown = 0; + Split_Partition = 1; + Change_Shard = 2; + Move_Partition = 3; + Clean_Partition = 4; + Change_KeyRange = 5; + Build_Index = 6; + Backup_Graph = 7; +} + +// 一条任务信息 +message Task { + uint64 id = 1; + TaskType type = 2; + TaskState state = 3; + int64 start_timestamp = 4; + metapb.Partition partition = 5; + string message = 6; + //每个shard执行的任务状态 + repeated ShardTaskState shardState = 7; + ChangeShard changeShard = 9; + SplitPartition splitPartition = 10; + MovePartition movePartition = 11; + CleanPartition cleanPartition = 12; + PartitionKeyRange partitionKeyRange = 13; + metapb.BuildIndex buildIndex = 14; +} + +enum TaskState{ + Task_Unknown = 0; + Task_Ready = 1; //任务就绪 + Task_Doing = 2; //执行中 + Task_Done = 3; //完成 + Task_Exit = 4; //退出 + Task_Stop = 10; + Task_Success = 11; + Task_Failure = 12; +} + +message ShardTaskState{ + uint64 store_id = 1; + TaskState state = 2; +} + diff --git a/hg-pd-service/src/main/java/org/apache/hugegraph/pd/boot/HugePDServer.java b/hg-pd-service/src/main/java/org/apache/hugegraph/pd/boot/HugePDServer.java new file mode 100644 index 0000000000..901c1db474 --- /dev/null +++ b/hg-pd-service/src/main/java/org/apache/hugegraph/pd/boot/HugePDServer.java @@ -0,0 +1,25 @@ +package org.apache.hugegraph.pd.boot; + +import org.springframework.boot.SpringApplication; +import org.springframework.boot.autoconfigure.SpringBootApplication; +import org.springframework.context.annotation.ComponentScan; + +import com.alipay.remoting.util.StringUtils; + +/** + * PD服务启动类 + */ +@ComponentScan(basePackages = {"org.apache.hugegraph.pd"}) +@SpringBootApplication +public class HugePDServer { + public static void main(String[] args) { + String logPath = System.getProperty("logging.path"); + if (StringUtils.isBlank(logPath)) { + System.setProperty("logging.path", "logs"); + System.setProperty("com.alipay.remoting.client.log.level", "error"); + } + Runtime.getRuntime().addShutdownHook(new ShutdownHook(Thread.currentThread())); + SpringApplication.run(HugePDServer.class); + System.out.println("Hugegraph-pd started."); + } +} diff --git a/hg-pd-service/src/main/java/org/apache/hugegraph/pd/boot/ShutdownHook.java b/hg-pd-service/src/main/java/org/apache/hugegraph/pd/boot/ShutdownHook.java new file mode 100644 index 0000000000..d7e57e1bea --- /dev/null +++ b/hg-pd-service/src/main/java/org/apache/hugegraph/pd/boot/ShutdownHook.java @@ -0,0 +1,74 @@ +package org.apache.hugegraph.pd.boot; + +import java.util.concurrent.ThreadPoolExecutor; + +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +import org.apache.hugegraph.pd.consts.PoolNames; +import org.apache.hugegraph.pd.service.MetadataService; + +public class ShutdownHook extends Thread { + + private static Logger log = LoggerFactory.getLogger(ShutdownHook.class); + private static String msg = "there are still uninterruptible jobs that have not been completed and" + + " will wait for them to complete"; + private Thread main; + + public ShutdownHook(Thread main) { + super(); + this.main = main; + setName(PoolNames.SHUTDOWN); + } + + @Override + public void run() { + log.info("shutdown signal received"); + main.interrupt(); + shutdown(); + try { + main.join(); + } catch (InterruptedException e) { + } + log.info("shutdown completed"); + } + + private void shutdown() { + checkUninterruptibleJobs(); + } + + private void checkUninterruptibleJobs() { + ThreadPoolExecutor jobs = MetadataService.getUninterruptibleJobs(); + try { + if (jobs != null) { + long lastPrint = System.currentTimeMillis() - 5000; + log.info("check for ongoing background jobs that cannot be interrupted, active:{}, queue:{}.", + jobs.getActiveCount(), jobs.getQueue().size()); + while (jobs.getActiveCount() != 0 || jobs.getQueue().size() != 0) { + synchronized (ShutdownHook.class) { + if (System.currentTimeMillis() - lastPrint > 5000) { + log.warn(msg); + lastPrint = System.currentTimeMillis(); + } + try { + ShutdownHook.class.wait(200); + } catch (InterruptedException e) { + log.error("close jobs with error:", e); + } + } + } + log.info("all ongoing background jobs have been completed and the shutdown will continue"); + } + + } catch (Exception e) { + log.error("close jobs with error:", e); + } + try { + if (jobs != null) { + jobs.shutdownNow(); + } + } catch (Exception e) { + log.error("close jobs with error:", e); + } + } +} diff --git a/hg-pd-service/src/main/java/org/apache/hugegraph/pd/license/CommonLicenseManager.java b/hg-pd-service/src/main/java/org/apache/hugegraph/pd/license/CommonLicenseManager.java new file mode 100644 index 0000000000..91fcada88a --- /dev/null +++ b/hg-pd-service/src/main/java/org/apache/hugegraph/pd/license/CommonLicenseManager.java @@ -0,0 +1,106 @@ +package org.apache.hugegraph.pd.license; + +import java.beans.XMLDecoder; +import java.io.BufferedInputStream; +import java.io.ByteArrayInputStream; +import java.io.InputStream; +import java.io.UnsupportedEncodingException; + +import de.schlichtherle.license.LicenseContent; +import de.schlichtherle.license.LicenseContentException; +import de.schlichtherle.license.LicenseManager; +import de.schlichtherle.license.LicenseNotary; +import de.schlichtherle.license.LicenseParam; +import de.schlichtherle.license.NoLicenseInstalledException; +import de.schlichtherle.xml.GenericCertificate; +import lombok.extern.slf4j.Slf4j; + +@Slf4j +public class CommonLicenseManager extends LicenseManager { + private static final String CHARSET = "UTF-8"; + private static final int BUF_SIZE = 8 * 1024; + + public CommonLicenseManager(LicenseParam param) { + super(param); + } + + @Override + protected synchronized byte[] create(LicenseContent content, + LicenseNotary notary) + throws Exception { + super.initialize(content); + this.validateCreate(content); + GenericCertificate certificate = notary.sign(content); + return super.getPrivacyGuard().cert2key(certificate); + } + + @Override + protected synchronized LicenseContent install(byte[] key, + LicenseNotary notary) + throws Exception { + GenericCertificate certificate = super.getPrivacyGuard().key2cert(key); + notary.verify(certificate); + String encodedText = certificate.getEncoded(); + LicenseContent content = (LicenseContent) this.load(encodedText); + this.validate(content); + super.setLicenseKey(key); + super.setCertificate(certificate); + return content; + } + + @Override + protected synchronized LicenseContent verify(LicenseNotary notary) + throws Exception { + // Load license key from preferences + byte[] key = super.getLicenseKey(); + if (key == null) { + String subject = super.getLicenseParam().getSubject(); + throw new NoLicenseInstalledException(subject); + } + + GenericCertificate certificate = super.getPrivacyGuard().key2cert(key); + notary.verify(certificate); + String encodedText = certificate.getEncoded(); + LicenseContent content = (LicenseContent) this.load(encodedText); + this.validate(content); + super.setCertificate(certificate); + return content; + } + + @Override + protected synchronized void validate(LicenseContent content) + throws LicenseContentException { + // Call super validate, expected to be overwritten + super.validate(content); + } + + protected synchronized void validateCreate(LicenseContent content) + throws LicenseContentException { + // Just call super validate is ok + super.validate(content); + } + + private Object load(String text) throws Exception { + InputStream bis = null; + XMLDecoder decoder = null; + try { + bis = new ByteArrayInputStream(text.getBytes(CHARSET)); + decoder = new XMLDecoder(new BufferedInputStream(bis, BUF_SIZE)); + return decoder.readObject(); + } catch (UnsupportedEncodingException e) { + throw new LicenseContentException(String.format( + "Unsupported charset: %s", CHARSET)); + } finally { + if (decoder != null) { + decoder.close(); + } + try { + if (bis != null) { + bis.close(); + } + } catch (Exception e) { + log.error("load file {} error: ", text, e); + } + } + } +} diff --git a/hg-pd-service/src/main/java/org/apache/hugegraph/pd/license/ExtraParam.java b/hg-pd-service/src/main/java/org/apache/hugegraph/pd/license/ExtraParam.java new file mode 100644 index 0000000000..2cd621f28d --- /dev/null +++ b/hg-pd-service/src/main/java/org/apache/hugegraph/pd/license/ExtraParam.java @@ -0,0 +1,114 @@ +package org.apache.hugegraph.pd.license; + +import com.fasterxml.jackson.annotation.JsonProperty; + +public class ExtraParam { + + @JsonProperty("username") + private String username; + + @JsonProperty("license_type") + private String licenseType; + + @JsonProperty("id") + private String id; + + @JsonProperty("version") + private String version; + + @JsonProperty("graphs") + private int graphs; + + @JsonProperty("ip") + private String ip; + + @JsonProperty("mac") + private String mac; + + @JsonProperty("cpus") + private int cpus; + + // The unit is MB + @JsonProperty("ram") + private int ram; + + @JsonProperty("threads") + private int threads; + + // The unit is MB + @JsonProperty("memory") + private int memory; + + @JsonProperty("nodes") + private int nodes; + + // The unit is MB + @JsonProperty("data_size") + private long dataSize; + + @JsonProperty("vertices") + private long vertices; + + @JsonProperty("edges") + private long edges; + + public String username() { + return this.username; + } + + public String licenseType() { + return this.licenseType; + } + + public String id() { + return this.id; + } + + public String version() { + return this.version; + } + + public int graphs() { + return this.graphs; + } + + public String ip() { + return this.ip; + } + + public String mac() { + return this.mac; + } + + public int cpus() { + return this.cpus; + } + + public int ram() { + return this.ram; + } + + public int threads() { + return this.threads; + } + + public int memory() { + return this.memory; + } + + public int nodes() { + return this.nodes; + } + + public long dataSize() { + return this.dataSize; + } + + public long vertices() { + return this.vertices; + } + + public long edges() { + return this.edges; + } +} diff --git a/hg-pd-service/src/main/java/org/apache/hugegraph/pd/license/LicenseVerifierService.java b/hg-pd-service/src/main/java/org/apache/hugegraph/pd/license/LicenseVerifierService.java new file mode 100644 index 0000000000..dedfacdc88 --- /dev/null +++ b/hg-pd-service/src/main/java/org/apache/hugegraph/pd/license/LicenseVerifierService.java @@ -0,0 +1,379 @@ +/* + * Copyright 2017 HugeGraph Authors + * + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with this + * work for additional information regarding copyright ownership. The ASF + * licenses this file to You under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the + * License for the specific language governing permissions and limitations + * under the License. + */ + +package org.apache.hugegraph.pd.license; + +import java.io.File; +import java.io.IOException; +import java.net.InetAddress; +import java.net.UnknownHostException; +import java.nio.charset.Charset; +import java.text.SimpleDateFormat; +import java.time.Duration; +import java.time.Instant; +import java.util.Date; +import java.util.HashMap; +import java.util.List; +import java.util.concurrent.CountDownLatch; +import java.util.prefs.Preferences; + +import org.apache.commons.lang3.StringUtils; +import org.apache.hugegraph.license.MachineInfo; +import org.apache.hugegraph.pd.service.ServiceGrpc; +import org.springframework.stereotype.Service; +import org.springframework.util.Base64Utils; + +import org.apache.hugegraph.pd.KvService; +import org.apache.hugegraph.pd.common.PDRuntimeException; +import org.apache.hugegraph.pd.config.PDConfig; +import org.apache.hugegraph.pd.grpc.common.ErrorType; +import org.apache.hugegraph.pd.grpc.common.Errors; +import org.apache.hugegraph.pd.grpc.kv.KvServiceGrpc; +import org.apache.hugegraph.pd.grpc.kv.TTLRequest; +import org.apache.hugegraph.pd.grpc.kv.TTLResponse; +import org.apache.hugegraph.pd.raft.RaftEngine; +import com.fasterxml.jackson.databind.ObjectMapper; +import com.google.gson.Gson; +import com.google.gson.internal.LinkedTreeMap; + +import de.schlichtherle.license.CipherParam; +import de.schlichtherle.license.DefaultCipherParam; +import de.schlichtherle.license.DefaultKeyStoreParam; +import de.schlichtherle.license.DefaultLicenseParam; +import de.schlichtherle.license.KeyStoreParam; +import de.schlichtherle.license.LicenseContent; +import de.schlichtherle.license.LicenseParam; +import io.grpc.stub.StreamObserver; +import lombok.extern.slf4j.Slf4j; + +@Service +@Slf4j +public class LicenseVerifierService implements ServiceGrpc { + + private PDConfig pdConfig; + private static final Duration CHECK_INTERVAL = Duration.ofMinutes(10); + private volatile Instant lastCheckTime = Instant.now(); + // private final LicenseVerifyParam verifyParam; + private LicenseVerifyManager manager; + private static LicenseContent content; + private static KvService kvService; + private static String contentKey = "contentKey"; + private static Gson mapper = new Gson(); + private final MachineInfo machineInfo; + private static volatile boolean installed = false; + + public LicenseVerifierService(PDConfig pdConfig) { + this.pdConfig = pdConfig; + machineInfo = new MachineInfo(); + kvService = new KvService(pdConfig); + // verifyParam = initLicense(pdConfig); + } + + public LicenseVerifyParam init() { + LicenseVerifyParam verifyParam = null; + if (!installed) { + synchronized (LicenseVerifierService.class) { + if (!installed) { + verifyParam = buildVerifyParam(pdConfig.getVerifyPath()); + log.info("get license param: {}", pdConfig.getVerifyPath()); + if (verifyParam != null) { + LicenseParam licenseParam = this.initLicenseParam(verifyParam); + this.manager = new LicenseVerifyManager(licenseParam); + // this.install("d01e1814cd9edb01a05671bebf3919cc"); + try { + // this.verifyPublicCert(md5); + File licenseFile = new File(pdConfig.getLicensePath()); + if (!licenseFile.exists()) { + log.warn("invalid parameter:license-path"); + return null; + } else { + log.info("get license file....{}", licenseFile.getAbsolutePath()); + } + this.manager.uninstall(); + content = this.manager.install(licenseFile); + ExtraParam param = LicenseVerifyManager.getExtraParams(content); + content.setExtra(param); + this.checkIpAndMac(param); + // 获取有效期,并设置过期时间,通知leader,将content保存到... + Date notAfter = content.getNotAfter(); + long ttl = notAfter.getTime() - System.currentTimeMillis(); + final TTLResponse[] info = {null}; + if (!isLeader()) { + while (RaftEngine.getInstance().getLeader() == null) { + this.wait(200); + } + while (RaftEngine.getInstance().getLeader() != null) { + CountDownLatch latch = new CountDownLatch(1); + TTLRequest request = TTLRequest.newBuilder().setKey(contentKey).setValue( + mapper.toJson(content, LicenseContent.class)).setTtl(ttl).build(); + StreamObserver observer = new StreamObserver() { + @Override + public void onNext(TTLResponse value) { + info[0] = value; + latch.countDown(); + } + + @Override + public void onError(Throwable t) { + latch.countDown(); + } + + @Override + public void onCompleted() { + latch.countDown(); + } + }; + redirectToLeader(KvServiceGrpc.getPutTTLMethod(), request, observer); + latch.await(); + if (info[0] == null) { + while (RaftEngine.getInstance().getLeader() == null) { + log.info("wait for leader to put the license content......"); + this.wait(200); + } + } else { + Errors error = info[0].getHeader().getError(); + if (!error.getType().equals(ErrorType.OK)) { + throw new Exception(error.getMessage()); + } + break; + } + } + + } else { + kvService.put(contentKey, mapper.toJson(content, LicenseContent.class), ttl); + } + installed = true; + log.info("The license is successfully installed, valid for {} - {}", + content.getNotBefore(), notAfter); + } catch (Exception e) { + log.error("Failed to install license", e); + throw new PDRuntimeException(ErrorType.LICENSE_ERROR_VALUE, + "Failed to install license, ", e); + } + } + } + } + } + return verifyParam; + } + + // public static LicenseVerifierService instance() { + // if (INSTANCE == null) { + // synchronized (LicenseVerifierService.class) { + // if (INSTANCE == null) { + // INSTANCE = new LicenseVerifierService(); + // } + // } + // } + // return INSTANCE; + // } + + // public void verifyIfNeeded() { + // Instant now = Instant.now(); + // Duration interval = Duration.between(this.lastCheckTime, now); + // if (!interval.minus(CHECK_INTERVAL).isNegative()) { + // this.verify(); + // this.lastCheckTime = now; + // } + // } + + public synchronized void install(String md5) { + + } + SimpleDateFormat formatter = new SimpleDateFormat("yyyy-MM-dd HH:mm:ss"); + + public HashMap getContext() throws Exception { + try { + String value = kvService.get(contentKey); + if (StringUtils.isEmpty(value)) { + throw new Exception("can not find license content from storage"); + } + LicenseContent content = mapper.fromJson(value, LicenseContent.class); + Date notAfter = content.getNotAfter(); + Date notBefore = content.getNotBefore(); + Date issued = content.getIssued(); + // long currentTimeMillis = System.currentTimeMillis(); + // long diff = notAfter - currentTimeMillis; + // boolean expired = diff <= 0; + HashMap result = mapper.fromJson(value, HashMap.class); + result.put("current", formatter.format(new Date())); + result.put("notAfter", formatter.format(notAfter)); + result.put("issued", formatter.format(issued)); + result.put("notBefore", formatter.format(notBefore)); + return result; + } catch (Exception e) { + throw new Exception("can not find license content from storage:" + e.getMessage()); + } + } + + public LicenseContent verify(int cores, int nodeCount) { + try { + String value = kvService.get(contentKey); + if (StringUtils.isEmpty(value)) { + throw new Exception("can not find license content from storage"); + } + LicenseContent content = mapper.fromJson(value, LicenseContent.class); + LinkedTreeMap param = (LinkedTreeMap) content.getExtra(); + int licCpus = ((Double) param.get("cpus")).intValue(); + int licNodes = ((Double) param.get("nodes")).intValue(); + if (param != null) { + if (licCpus != -1) { + // licCpus为 -1时,表示不限制cpu核数 + if (cores <= 0 || cores > licCpus) { + String msg = + String.format("无效的cpu核数: %s,授权数: %s", cores, licCpus); + throw new PDRuntimeException( + ErrorType.LICENSE_VERIFY_ERROR_VALUE, msg); + } + } + + if (licNodes != -1) { + // licNodes为 -1时,表示不限制服务节点数目 + if (nodeCount > licNodes) { + String msg = String.format("无效的节点个数: %s,授权数: %s", nodeCount, licNodes); + throw new PDRuntimeException( + ErrorType.LICENSE_VERIFY_ERROR_VALUE, msg); + } + } + } + return content; + } catch (Exception e) { + throw new PDRuntimeException(ErrorType.LICENSE_VERIFY_ERROR_VALUE, + "授权信息校验异常," + e.getMessage()); + } + } + + // private void verifyPublicCert(String expectMD5) { + // String path = this.verifyParam.publicKeyPath(); + // try (InputStream is = LicenseVerifierService.class.getResourceAsStream(path)) { + // String actualMD5 = DigestUtils.md5Hex(is); + // if (!actualMD5.equals(expectMD5)) { + // throw new PDRuntimeException(PDRuntimeException.LICENSE_ERROR, "Invalid public cert"); + // } + // } catch (IOException e) { + // log.error("Failed to read public cert", e); + // throw new PDRuntimeException(PDRuntimeException.LICENSE_ERROR, "Failed to read public cert", e); + // } + // } + + private LicenseParam initLicenseParam(LicenseVerifyParam param) { + Preferences preferences = Preferences.userNodeForPackage(LicenseVerifierService.class); + CipherParam cipherParam = new DefaultCipherParam(param.storePassword()); + KeyStoreParam keyStoreParam = new DefaultKeyStoreParam(LicenseVerifierService.class, + param.publicKeyPath(), param.publicAlias(), + param.storePassword(), null); + return new DefaultLicenseParam(param.subject(), preferences, keyStoreParam, cipherParam); + } + + private static LicenseVerifyParam buildVerifyParam(String path) { + // NOTE: can't use JsonUtil due to it bind tinkerpop jackson + try { + ObjectMapper mapper = new ObjectMapper(); + File licenseParamFile = new File(path); + if (!licenseParamFile.exists()) { + log.warn("failed to get file:{}", path); + return null; + } + return mapper.readValue(licenseParamFile, LicenseVerifyParam.class); + } catch (IOException e) { + throw new PDRuntimeException(ErrorType.LICENSE_VERIFY_ERROR_VALUE, + String.format("Failed to read json stream to %s", + LicenseVerifyParam.class)); + } + } + + public String getIpAndMac() { + List actualIps = this.machineInfo.getIpAddress(); + String host = pdConfig.getHost(); + String licenseHost = host; + if (!actualIps.contains(host)) { + licenseHost = actualIps.get(0); + } + try { + String mac = this.machineInfo.getMacByInetAddress(InetAddress.getByName(licenseHost)); + HashMap ipAndMac = new HashMap<>(); + ipAndMac.put("ip", licenseHost); + ipAndMac.put("mac", mac); + String json = new Gson().toJson(ipAndMac); + String encode = Base64Utils.encodeToString(json.getBytes(Charset.defaultCharset())); + return encode; + } catch (Exception e) { + throw new PDRuntimeException(ErrorType.LICENSE_ERROR_VALUE, + String.format("Failed to get ip and mac for %s", + e.getMessage())); + } + } + + private void checkIpAndMac(ExtraParam param) { + String expectIp = param.ip(); + boolean matched = false; + List actualIps = null; + if (StringUtils.isEmpty(expectIp)) { + matched = true; + } else { + actualIps = this.machineInfo.getIpAddress(); + for (String actualIp : actualIps) { + if (actualIp.equalsIgnoreCase(expectIp)) { + matched = true; + break; + } + } + } + if (!matched) { + throw new PDRuntimeException(ErrorType.LICENSE_VERIFY_ERROR_VALUE, String.format( + "The server's ip '%s' doesn't match the authorized '%s'", actualIps, expectIp)); + } + String expectMac = param.mac(); + if (StringUtils.isEmpty(expectMac)) { + return; + } + // The mac must be not empty here + if (!StringUtils.isEmpty(expectIp)) { + String actualMac; + try { + actualMac = this.machineInfo.getMacByInetAddress(InetAddress.getByName(expectIp)); + } catch (UnknownHostException e) { + throw new PDRuntimeException(ErrorType.LICENSE_VERIFY_ERROR_VALUE, + String.format("Failed to get mac address for ip '%s'", + expectIp)); + } + String expectFormatMac = expectMac.replaceAll(":", "-"); + String actualFormatMac = actualMac.replaceAll(":", "-"); + if (!actualFormatMac.equalsIgnoreCase(expectFormatMac)) { + throw new PDRuntimeException(ErrorType.LICENSE_VERIFY_ERROR_VALUE, String.format( + "The server's mac '%s' doesn't match the authorized '%s'", actualMac, expectMac)); + } + } else { + String expectFormatMac = expectMac.replaceAll(":", "-"); + List actualMacs = this.machineInfo.getMacAddress(); + matched = false; + for (String actualMac : actualMacs) { + String actualFormatMac = actualMac.replaceAll(":", "-"); + if (actualFormatMac.equalsIgnoreCase(expectFormatMac)) { + matched = true; + break; + } + } + if (!matched) { + throw new PDRuntimeException(ErrorType.LICENSE_VERIFY_ERROR_VALUE, String.format( + "The server's macs %s don't match the authorized '%s'", actualMacs, expectMac)); + } + } + } +} diff --git a/hg-pd-service/src/main/java/org/apache/hugegraph/pd/license/LicenseVerifyManager.java b/hg-pd-service/src/main/java/org/apache/hugegraph/pd/license/LicenseVerifyManager.java new file mode 100644 index 0000000000..37253d9d5c --- /dev/null +++ b/hg-pd-service/src/main/java/org/apache/hugegraph/pd/license/LicenseVerifyManager.java @@ -0,0 +1,56 @@ +package org.apache.hugegraph.pd.license; + +import org.apache.hugegraph.pd.common.PDRuntimeException; +import org.apache.hugegraph.pd.grpc.Pdpb; +import org.apache.hugegraph.pd.grpc.common.ErrorType; +import com.fasterxml.jackson.core.type.TypeReference; +import com.fasterxml.jackson.databind.ObjectMapper; +import de.schlichtherle.license.LicenseContent; +import de.schlichtherle.license.LicenseContentException; +import de.schlichtherle.license.LicenseParam; +import lombok.extern.slf4j.Slf4j; + +import java.io.IOException; +import java.util.List; + +@Slf4j +public class LicenseVerifyManager extends CommonLicenseManager { + + private static final ObjectMapper MAPPER = new ObjectMapper(); + private static final int NO_LIMIT = -1; + + public LicenseVerifyManager(LicenseParam param) { + super(param); + } + + @Override + protected synchronized void validate(LicenseContent content) throws LicenseContentException { + // Call super validate firstly to verify the common license parameters + try { + super.validate(content); + } catch (LicenseContentException e) { + // log.error("Failed to verify license", e); + throw e; + } + // Verify the customized license parameters. + getExtraParams(content); + } + + public static ExtraParam getExtraParams(LicenseContent content) { + List params; + try { + TypeReference> type; + type = new TypeReference<>() { + }; + params = MAPPER.readValue((String) content.getExtra(), type); + if (params != null && params.size() > 0) { + return params.get(0); + } + } catch (IOException e) { + log.error("Failed to read extra params", e); + throw new PDRuntimeException(ErrorType.LICENSE_VERIFY_ERROR_VALUE, + "Failed to read extra params", e); + } + return null; + } +} diff --git a/hg-pd-service/src/main/java/org/apache/hugegraph/pd/license/LicenseVerifyParam.java b/hg-pd-service/src/main/java/org/apache/hugegraph/pd/license/LicenseVerifyParam.java new file mode 100644 index 0000000000..37fdbfbdc8 --- /dev/null +++ b/hg-pd-service/src/main/java/org/apache/hugegraph/pd/license/LicenseVerifyParam.java @@ -0,0 +1,43 @@ +package org.apache.hugegraph.pd.license; + +import com.fasterxml.jackson.annotation.JsonAlias; +import com.fasterxml.jackson.annotation.JsonProperty; + +public class LicenseVerifyParam { + + @JsonProperty("subject") + private String subject; + + @JsonProperty("public_alias") + private String publicAlias; + + @JsonAlias("store_ticket") + @JsonProperty("store_password") + private String storePassword; + + @JsonProperty("publickey_path") + private String publicKeyPath; + + @JsonProperty("license_path") + private String licensePath; + + public String subject() { + return this.subject; + } + + public String publicAlias() { + return this.publicAlias; + } + + public String storePassword() { + return this.storePassword; + } + + public String licensePath() { + return this.licensePath; + } + + public String publicKeyPath() { + return this.publicKeyPath; + } +} diff --git a/hg-pd-service/src/main/java/org/apache/hugegraph/pd/metrics/MetricsConfig.java b/hg-pd-service/src/main/java/org/apache/hugegraph/pd/metrics/MetricsConfig.java new file mode 100644 index 0000000000..0bbba95f1e --- /dev/null +++ b/hg-pd-service/src/main/java/org/apache/hugegraph/pd/metrics/MetricsConfig.java @@ -0,0 +1,29 @@ +package org.apache.hugegraph.pd.metrics; + +import io.micrometer.core.instrument.MeterRegistry; +import org.springframework.beans.factory.annotation.Autowired; +import org.springframework.boot.actuate.autoconfigure.metrics.MeterRegistryCustomizer; +import org.springframework.context.annotation.Bean; +import org.springframework.context.annotation.Configuration; + +/** + * @author lynn.bond@hotmail.com on 2022/01/05 + */ +@Configuration +public class MetricsConfig { + @Autowired + private PDMetrics metrics; + + @Bean + public MeterRegistryCustomizer metricsCommonTags() { + return (registry) -> registry.config().commonTags("hg", "pd"); + } + + @Bean + public MeterRegistryCustomizer registerMeters() { + return (registry) -> { + metrics.init(registry); + }; + } + +} diff --git a/hg-pd-service/src/main/java/org/apache/hugegraph/pd/metrics/PDMetrics.java b/hg-pd-service/src/main/java/org/apache/hugegraph/pd/metrics/PDMetrics.java new file mode 100644 index 0000000000..a30771b803 --- /dev/null +++ b/hg-pd-service/src/main/java/org/apache/hugegraph/pd/metrics/PDMetrics.java @@ -0,0 +1,200 @@ +package org.apache.hugegraph.pd.metrics; + +import java.util.Collections; +import java.util.HashMap; +import java.util.List; +import java.util.Map; +import java.util.concurrent.ConcurrentHashMap; +import java.util.concurrent.atomic.AtomicLong; +import java.util.function.Function; +import java.util.function.ToDoubleFunction; + +import org.apache.commons.collections4.MapUtils; +import org.apache.commons.lang3.tuple.Pair; +import org.apache.hugegraph.pd.service.PDRestService; +import org.apache.hugegraph.pd.service.PDService; +import org.springframework.beans.factory.annotation.Autowired; +import org.springframework.stereotype.Component; + +import org.apache.hugegraph.pd.StoreNodeService; +import org.apache.hugegraph.pd.common.PDException; +import org.apache.hugegraph.pd.grpc.Metapb; +import org.apache.hugegraph.pd.grpc.Metapb.ShardGroup; +import org.apache.hugegraph.pd.model.GraphStatistics; + +import io.micrometer.core.instrument.Counter; +import io.micrometer.core.instrument.Gauge; +import io.micrometer.core.instrument.MeterRegistry; +import lombok.extern.slf4j.Slf4j; + +/** + * @author lynn.bond@hotmail.com on 2022/1/5 + */ +@Component +@Slf4j +public final class PDMetrics { + + public final static String PREFIX = "hg"; + private static AtomicLong graphs = new AtomicLong(0); + private static Map lastTerms = new ConcurrentHashMap(); + private static Map leaderCountGauges = new ConcurrentHashMap(); + private static Map leaderCounts = new ConcurrentHashMap(); + @Autowired + PDRestService pdRestService; + @Autowired + private PDService pdService; + private MeterRegistry registry; + private Map> lasts = new ConcurrentHashMap(); + private int interval = 120 * 1000; + private volatile int avgLeaderCount = 0; + private Function gaugeFunction = k -> Gauge.builder(PREFIX + ".store.leader.count", + () -> leaderCounts.getOrDefault(k, 0)) + .description("leader count of node") + .tag("address", k) + .register(this.registry); + + public synchronized void init(MeterRegistry meterRegistry) { + if (registry == null) { + registry = meterRegistry; + registerMeters(); + } + } + + private void registerMeters() { + Gauge.builder(PREFIX + ".up", () -> 1).register(registry); + Gauge.builder(PREFIX + ".graphs", () -> updateGraphs()) + .description("Number of graphs registered in PD") + .register(registry); + Gauge.builder(PREFIX + ".stores", () -> updateStores()) + .description("Number of stores registered in PD") + .register(registry); + Gauge.builder(PREFIX + ".terms", () -> setTerms()) + .description("term of partitions in PD") + .register(registry); + Gauge.builder(PREFIX + ".store.leader.averageCount", () -> avgLeaderCount) + .description("term of partitions in PD") + .register(registry); + } + + private long updateGraphs() { + long buf = getGraphs(); + if (buf != graphs.get()) { + graphs.set(buf); + registerGraphMetrics(); + } + return buf; + } + + private long updateStores() { + return getStores(); + } + + private long getGraphs() { + return getGraphMetas().size(); + } + + private long getStores() { + try { + return this.pdService.getStoreNodeService().getStores(null).size(); + } catch (PDException e) { + log.error(e.getMessage(), e); + e.printStackTrace(); + } + return 0; + } + + private long setTerms() { + List groups = null; + try { + groups = pdRestService.getShardGroups(); + StoreNodeService nodeService = pdService.getStoreNodeService(); + List activeStores = nodeService.getActiveStores(); + HashMap stores = new HashMap<>(activeStores.size()); + for (Metapb.Store s : activeStores) { + stores.put(s.getId(), s); + leaderCountGauges.computeIfAbsent(s.getAddress(), gaugeFunction); + } + HashMap leaders = new HashMap<>(); + if (!MapUtils.isEmpty(stores)) { + avgLeaderCount = (int) Math.ceil((double) groups.size() / (double) stores.size()); + } + for (ShardGroup g : groups) { + String id = String.valueOf(g.getId()); + ShardGroup group = nodeService.getShardGroup(g.getId()); + long version = group.getVersion(); + Counter lastTerm = lastTerms.get(id); + if (lastTerm == null) { + lastTerm = Counter.builder(PREFIX + ".partition.terms") + .description("term of partition") + .tag("id", id) + .register(this.registry); + lastTerm.increment(version); + lastTerms.put(id, lastTerm); + } else { + lastTerm.increment(version - lastTerm.count()); + } + List shards = g.getShardsList(); + for (Metapb.Shard shard : shards) { + if (shard.getRole() == Metapb.ShardRole.Leader) { + leaders.put(shard.getStoreId(), leaders.getOrDefault(shard.getStoreId(), 0) + 1); + break; + } + } + } + leaderCounts.clear(); + for (Map.Entry entry : leaders.entrySet()) { + Long storeId = entry.getKey(); + String address = stores.get(storeId).getAddress(); + leaderCounts.put(address, entry.getValue()); + } + } catch (Exception e) { + log.info("get partition term with error :", e); + } + if (groups == null) { + return 0; + } else { + return groups.size(); + } + } + + private List getGraphMetas() { + try { + return this.pdService.getPartitionService().getGraphs(); + } catch (PDException e) { + log.error(e.getMessage(), e); + } + return Collections.EMPTY_LIST; + } + + private void registerGraphMetrics() { + this.getGraphMetas().forEach(meta -> { + Gauge.builder(PREFIX + ".partitions", this.pdService.getPartitionService(), + e -> e.getPartitions(meta.getGraphName()).size()) + .description("Number of partitions assigned to a graph") + .tag("graph", meta.getGraphName()) + .register(this.registry); + ToDoubleFunction getGraphSize = e -> { + try { + String graphName = e.getGraphName(); + Pair last = lasts.get(graphName); + Long lastTime; + if (last == null || (lastTime = last.getLeft()) == null || + System.currentTimeMillis() - lastTime >= interval) { + long dataSize = new GraphStatistics(e, pdRestService, pdService).getDataSize(); + lasts.put(graphName, Pair.of(System.currentTimeMillis(), dataSize)); + return dataSize; + } else { + return last.getRight(); + } + } catch (PDException ex) { + log.error("get graph size with error", e); + } + return 0; + }; + Gauge.builder(PREFIX + ".graph.size", meta, getGraphSize) + .description("data size of graph") + .tag("graph", meta.getGraphName()) + .register(this.registry); + }); + } +} diff --git a/hg-pd-service/src/main/java/org/apache/hugegraph/pd/model/DemoModel.java b/hg-pd-service/src/main/java/org/apache/hugegraph/pd/model/DemoModel.java new file mode 100644 index 0000000000..eb1ca0005e --- /dev/null +++ b/hg-pd-service/src/main/java/org/apache/hugegraph/pd/model/DemoModel.java @@ -0,0 +1,55 @@ +package org.apache.hugegraph.pd.model; + +import java.util.Objects; + +/** + * @author lynn.bond@hotmail.com created on 2021/11/1 + */ +public class DemoModel { + private int status; + private String text; + + public DemoModel(int status, String text) { + this.status = status; + this.text = text; + } + + public int getStatus() { + return status; + } + + public DemoModel setStatus(int status) { + this.status = status; + return this; + } + + public String getText() { + return text; + } + + public DemoModel setText(String text) { + this.text = text; + return this; + } + + @Override + public boolean equals(Object o) { + if (this == o) return true; + if (o == null || getClass() != o.getClass()) return false; + DemoModel that = (DemoModel) o; + return status == that.status && Objects.equals(text, that.text); + } + + @Override + public int hashCode() { + return Objects.hash(status, text); + } + + @Override + public String toString() { + return "HgNodeStatus{" + + "status=" + status + + ", text='" + text + '\'' + + '}'; + } +} diff --git a/hg-pd-service/src/main/java/org/apache/hugegraph/pd/model/GraphRestRequest.java b/hg-pd-service/src/main/java/org/apache/hugegraph/pd/model/GraphRestRequest.java new file mode 100644 index 0000000000..60340beb57 --- /dev/null +++ b/hg-pd-service/src/main/java/org/apache/hugegraph/pd/model/GraphRestRequest.java @@ -0,0 +1,9 @@ +package org.apache.hugegraph.pd.model; + +import lombok.Data; + +@Data +public class GraphRestRequest { + private int partitionCount; + private int shardCount; +} diff --git a/hg-pd-service/src/main/java/org/apache/hugegraph/pd/model/GraphSpaceRestRequest.java b/hg-pd-service/src/main/java/org/apache/hugegraph/pd/model/GraphSpaceRestRequest.java new file mode 100644 index 0000000000..92b48982ea --- /dev/null +++ b/hg-pd-service/src/main/java/org/apache/hugegraph/pd/model/GraphSpaceRestRequest.java @@ -0,0 +1,8 @@ +package org.apache.hugegraph.pd.model; + +import lombok.Data; + +@Data +public class GraphSpaceRestRequest { + private Long storageLimit; +} diff --git a/hg-pd-service/src/main/java/org/apache/hugegraph/pd/model/GraphStatistics.java b/hg-pd-service/src/main/java/org/apache/hugegraph/pd/model/GraphStatistics.java new file mode 100644 index 0000000000..9fc3ca2fbc --- /dev/null +++ b/hg-pd-service/src/main/java/org/apache/hugegraph/pd/model/GraphStatistics.java @@ -0,0 +1,78 @@ +package org.apache.hugegraph.pd.model; + +import java.util.ArrayList; +import java.util.HashMap; +import java.util.List; +import java.util.Map; + +import org.apache.hugegraph.pd.common.PDException; +import org.apache.hugegraph.pd.grpc.Metapb; +import org.apache.hugegraph.pd.service.PDRestService; +import org.apache.hugegraph.pd.service.PDService; + +import lombok.AccessLevel; +import lombok.Data; +import lombok.Getter; +import lombok.Setter; + +/** + * @author zhangyingjie + * @date 2023/7/25 + **/ +@Data +public class GraphStatistics { + + @Getter(AccessLevel.NONE) + @Setter(AccessLevel.NONE) + private transient PDRestService pdRestService; + // 图统计信息 + String graphName; + long partitionCount; + String state; + int storeGroupId; + List partitions; + long dataSize; + int nodeCount; + int edgeCount; + long keyCount; + + public GraphStatistics(Metapb.Graph graph, PDRestService restService, PDService pdService) throws PDException { + this.pdRestService = restService; + if (graph == null) { + return; + } + Map partition2DataSize = new HashMap<>(); + graphName = graph.getGraphName(); + partitionCount = graph.getPartitionCount(); + state = String.valueOf(graph.getState()); + storeGroupId = graph.getStoreGroupId(); + // 数据量及key的数量 + List stores = pdRestService.getStores(graphName); + for (Metapb.Store store : stores) { + List graphStatsList = store.getStats().getGraphStatsList(); + for (Metapb.GraphStats graphStats : graphStatsList) { + if ((graphName.equals(graphStats.getGraphName())) + && (Metapb.ShardRole.Leader.equals(graphStats.getRole()))) { + keyCount += graphStats.getApproximateKeys(); + dataSize += graphStats.getApproximateSize(); + partition2DataSize.put(graphStats.getPartitionId(), graphStats.getApproximateSize()); + } + } + } + List resultPartitionList = new ArrayList<>(); + List tmpPartitions = pdRestService.getPartitions(graphName); + if ((tmpPartitions != null) && (!tmpPartitions.isEmpty())) { + // 需要返回的分区信息 + for (Metapb.Partition partition : tmpPartitions) { + Metapb.PartitionStats partitionStats = pdRestService.getPartitionStats(graphName, partition.getId()); + Partition pt = new Partition(partition, partitionStats, pdService); + pt.dataSize = partition2DataSize.getOrDefault(partition.getId(), 0L); + resultPartitionList.add(pt); + } + } + partitions = resultPartitionList; + // 隐去图名后面的 /g /m /s + final int postfixLength = 2; + graphName = graphName.substring(0, graphName.length() - postfixLength); + } +} diff --git a/hg-pd-service/src/main/java/org/apache/hugegraph/pd/model/Partition.java b/hg-pd-service/src/main/java/org/apache/hugegraph/pd/model/Partition.java new file mode 100644 index 0000000000..0d758102fa --- /dev/null +++ b/hg-pd-service/src/main/java/org/apache/hugegraph/pd/model/Partition.java @@ -0,0 +1,75 @@ +package org.apache.hugegraph.pd.model; + +import java.util.ArrayList; +import java.util.List; + +import org.apache.hugegraph.pd.common.PDException; +import org.apache.hugegraph.pd.grpc.Metapb; +import org.apache.hugegraph.pd.service.PDService; + +import lombok.AccessLevel; +import lombok.Data; +import lombok.Getter; +import lombok.Setter; +import lombok.extern.slf4j.Slf4j; + +/** + * @author zhangyingjie + * @date 2023/7/25 + **/ + +@Slf4j +@Data +class Partition { + + int partitionId; + String graphName; + String workState; + long startKey; + long endKey; + List shards; + long dataSize; + @Getter(AccessLevel.NONE) + @Setter(AccessLevel.NONE) + private transient PDService pdService; + + public Partition(Metapb.Partition pt, Metapb.PartitionStats stats, PDService service) { + this.pdService = service; + if (pt != null) { + partitionId = pt.getId(); + startKey = pt.getStartKey(); + endKey = pt.getEndKey(); + workState = String.valueOf(pt.getState()); + graphName = pt.getGraphName(); + final int postfixLength = 2; + graphName = graphName.substring(0, graphName.length() - postfixLength); + if (stats != null) { + List shardStatsList = stats.getShardStatsList(); + List shardsList = new ArrayList<>(); + for (Metapb.ShardStats shardStats : shardStatsList) { + Shard shard = new Shard(shardStats, partitionId); + shardsList.add(shard); + } + this.shards = shardsList; + } else { + List shardsList = new ArrayList<>(); + try { + + var shardGroup = pdService.getStoreNodeService().getShardGroup(pt.getId()); + if (shardGroup != null) { + for (Metapb.Shard shard1 : shardGroup.getShardsList()) { + shardsList.add(new Shard(shard1, partitionId)); + } + } else { + log.error("GraphAPI.Partition(), get shard group: {} returns null", pt.getId()); + } + } catch (PDException e) { + log.error("Partition init failed, error: {}", e.getMessage()); + } + this.shards = shardsList; + } + + + } + } +} \ No newline at end of file diff --git a/hg-pd-service/src/main/java/org/apache/hugegraph/pd/model/PeerRestRequest.java b/hg-pd-service/src/main/java/org/apache/hugegraph/pd/model/PeerRestRequest.java new file mode 100644 index 0000000000..daa48dffad --- /dev/null +++ b/hg-pd-service/src/main/java/org/apache/hugegraph/pd/model/PeerRestRequest.java @@ -0,0 +1,8 @@ +package org.apache.hugegraph.pd.model; + +import lombok.Data; + +@Data +public class PeerRestRequest { + private String peerList; +} diff --git a/hg-pd-service/src/main/java/org/apache/hugegraph/pd/model/RegistryQueryRestRequest.java b/hg-pd-service/src/main/java/org/apache/hugegraph/pd/model/RegistryQueryRestRequest.java new file mode 100644 index 0000000000..a001578736 --- /dev/null +++ b/hg-pd-service/src/main/java/org/apache/hugegraph/pd/model/RegistryQueryRestRequest.java @@ -0,0 +1,17 @@ +package org.apache.hugegraph.pd.model; + +import lombok.Data; + +import java.util.HashMap; + +/** + * @author zhangyingjie + * @date 2022/2/8 + **/ +@Data +public class RegistryQueryRestRequest { + + String appName; + String version; + HashMap labels; +} diff --git a/hg-pd-service/src/main/java/org/apache/hugegraph/pd/model/RegistryRestRequest.java b/hg-pd-service/src/main/java/org/apache/hugegraph/pd/model/RegistryRestRequest.java new file mode 100644 index 0000000000..9682de795a --- /dev/null +++ b/hg-pd-service/src/main/java/org/apache/hugegraph/pd/model/RegistryRestRequest.java @@ -0,0 +1,20 @@ +package org.apache.hugegraph.pd.model; + +import lombok.Data; + +import java.util.HashMap; + +/** + * @author zhangyingjie + * @date 2022/2/8 + **/ +@Data +public class RegistryRestRequest { + + String id; + String appName; + String version; + String address; + String interval; + HashMap labels; +} diff --git a/hg-pd-service/src/main/java/org/apache/hugegraph/pd/model/RegistryRestResponse.java b/hg-pd-service/src/main/java/org/apache/hugegraph/pd/model/RegistryRestResponse.java new file mode 100644 index 0000000000..261d750c9d --- /dev/null +++ b/hg-pd-service/src/main/java/org/apache/hugegraph/pd/model/RegistryRestResponse.java @@ -0,0 +1,20 @@ +package org.apache.hugegraph.pd.model; + +import org.apache.hugegraph.pd.grpc.Pdpb; +import org.apache.hugegraph.pd.grpc.common.ErrorType; +import lombok.Data; + +import java.io.Serializable; + +/** + * @author zhangyingjie + * @date 2022/2/8 + **/ +@Data +public class RegistryRestResponse { + + ErrorType errorType; + String message; + Serializable data; + +} diff --git a/hg-pd-service/src/main/java/org/apache/hugegraph/pd/model/RestApiResponse.java b/hg-pd-service/src/main/java/org/apache/hugegraph/pd/model/RestApiResponse.java new file mode 100644 index 0000000000..5fc0ce3745 --- /dev/null +++ b/hg-pd-service/src/main/java/org/apache/hugegraph/pd/model/RestApiResponse.java @@ -0,0 +1,41 @@ +package org.apache.hugegraph.pd.model; + +import org.apache.hugegraph.pd.grpc.Pdpb; +import org.apache.hugegraph.pd.grpc.common.ErrorType; +import lombok.Data; + +import java.io.Serializable; +import java.util.HashMap; + +/** + * @author tianxiaohui + * @date 2022-07-21 + */ +@Data +public class RestApiResponse { + String message; + Object data; + int status; + + public RestApiResponse(Object data, ErrorType status, String message) { + if (data == null){ + data = new HashMap(); + } + this.data = data; + this.status = status.getNumber(); + this.message = message; + } + + public RestApiResponse() { + + } + + public RestApiResponse(Object data, int status, String message){ + if (data == null){ + data = new HashMap(); + } + this.data = data; + this.status = status; + this.message = message; + } +} diff --git a/hg-pd-service/src/main/java/org/apache/hugegraph/pd/model/SDConfig.java b/hg-pd-service/src/main/java/org/apache/hugegraph/pd/model/SDConfig.java new file mode 100644 index 0000000000..8c8c4d9b96 --- /dev/null +++ b/hg-pd-service/src/main/java/org/apache/hugegraph/pd/model/SDConfig.java @@ -0,0 +1,143 @@ +<<<<<<<< HEAD:hugegraph-pd/hg-pd-service/src/main/java/org/apache/hugegraph/pd/model/PromTargetsModel.java +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +======== +>>>>>>>> d7e3d51dd (3.6.5 -> 4.x diff):hg-pd-service/src/main/java/org/apache/hugegraph/pd/model/SDConfig.java +package org.apache.hugegraph.pd.model; + +import java.util.HashMap; +import java.util.HashSet; +import java.util.Map; +import java.util.Set; + +<<<<<<<< HEAD:hugegraph-pd/hg-pd-service/src/main/java/org/apache/hugegraph/pd/model/PromTargetsModel.java +public class PromTargetsModel { +======== +/** + * @author lynn.bond@hotmail.com on 2022/2/14 + */ +public class SDConfig { +>>>>>>>> d7e3d51dd (3.6.5 -> 4.x diff):hg-pd-service/src/main/java/org/apache/hugegraph/pd/model/SDConfig.java + + private static final String LABEL_METRICS_PATH = "__metrics_path__"; + private static final String LABEL_SCHEME = "__scheme__"; + private static final String LABEL_JOB_NAME = "job"; + private static final String LABEL_CLUSTER = "cluster"; +<<<<<<<< HEAD:hugegraph-pd/hg-pd-service/src/main/java/org/apache/hugegraph/pd/model/PromTargetsModel.java + private final Map labels = new HashMap<>(); + private Set targets = new HashSet<>(); + + private PromTargetsModel() { + } + + public static PromTargetsModel of() { + return new PromTargetsModel(); +======== + + private Set targets = new HashSet<>(); + private Map labels = new HashMap<>(); + + private SDConfig() { + } + + public static SDConfig of() { + return new SDConfig(); +>>>>>>>> d7e3d51dd (3.6.5 -> 4.x diff):hg-pd-service/src/main/java/org/apache/hugegraph/pd/model/SDConfig.java + } + + public Set getTargets() { + return targets; + } + +<<<<<<<< HEAD:hugegraph-pd/hg-pd-service/src/main/java/org/apache/hugegraph/pd/model/PromTargetsModel.java + public PromTargetsModel setTargets(Set targets) { +======== + public SDConfig setTargets(Set targets) { +>>>>>>>> d7e3d51dd (3.6.5 -> 4.x diff):hg-pd-service/src/main/java/org/apache/hugegraph/pd/model/SDConfig.java + if (targets != null) { + this.targets = targets; + } + return this; + } + + public Map getLabels() { + return labels; + } + +<<<<<<<< HEAD:hugegraph-pd/hg-pd-service/src/main/java/org/apache/hugegraph/pd/model/PromTargetsModel.java + public PromTargetsModel addTarget(String target) { + if (target == null) { + return this; + } +======== + public SDConfig addTarget(String target) { + if (target == null) return this; +>>>>>>>> d7e3d51dd (3.6.5 -> 4.x diff):hg-pd-service/src/main/java/org/apache/hugegraph/pd/model/SDConfig.java + this.targets.add(target); + return this; + } + +<<<<<<<< HEAD:hugegraph-pd/hg-pd-service/src/main/java/org/apache/hugegraph/pd/model/PromTargetsModel.java + public PromTargetsModel setMetricsPath(String path) { + return this.addLabel(LABEL_METRICS_PATH, path); + } + + public PromTargetsModel setScheme(String scheme) { + return this.addLabel(LABEL_SCHEME, scheme); + } + + public PromTargetsModel setClusterId(String clusterId) { + return this.addLabel(LABEL_CLUSTER, clusterId); + } + + public PromTargetsModel addLabel(String label, String value) { + if (label == null || value == null) { + return this; + } +======== + public SDConfig setMetricsPath(String path) { + return this.addLabel(LABEL_METRICS_PATH, path); + } + + public SDConfig setScheme(String scheme) { + return this.addLabel(LABEL_SCHEME, scheme); + } + + public SDConfig setClusterId(String clusterId) { + return this.addLabel(LABEL_CLUSTER, clusterId); + } + + public SDConfig addLabel(String label, String value) { + if (label == null || value == null) return this; +>>>>>>>> d7e3d51dd (3.6.5 -> 4.x diff):hg-pd-service/src/main/java/org/apache/hugegraph/pd/model/SDConfig.java + this.labels.put(label, value); + return this; + } + + @Override + public String toString() { +<<<<<<<< HEAD:hugegraph-pd/hg-pd-service/src/main/java/org/apache/hugegraph/pd/model/PromTargetsModel.java + return "PromTargetModel{" + +======== + return "SDConfig{" + +>>>>>>>> d7e3d51dd (3.6.5 -> 4.x diff):hg-pd-service/src/main/java/org/apache/hugegraph/pd/model/SDConfig.java + "targets=" + targets + + ", labels=" + labels + + '}'; + } +} diff --git a/hg-pd-service/src/main/java/org/apache/hugegraph/pd/model/Shard.java b/hg-pd-service/src/main/java/org/apache/hugegraph/pd/model/Shard.java new file mode 100644 index 0000000000..05ebc72cc9 --- /dev/null +++ b/hg-pd-service/src/main/java/org/apache/hugegraph/pd/model/Shard.java @@ -0,0 +1,34 @@ +package org.apache.hugegraph.pd.model; + +import org.apache.hugegraph.pd.grpc.Metapb; + +import lombok.Data; + +/** + * @author zhangyingjie + * @date 2023/7/25 + **/ +@Data +class Shard { + long partitionId; + long storeId; + String state; + String role; + int progress; + + public Shard(Metapb.ShardStats shardStats, long partitionId) { + this.role = String.valueOf(shardStats.getRole()); + this.storeId = shardStats.getStoreId(); + this.state = String.valueOf(shardStats.getState()); + this.partitionId = partitionId; + this.progress = shardStats.getProgress(); + } + + public Shard(Metapb.Shard shard, long partitionId) { + this.role = String.valueOf(shard.getRole()); + this.storeId = shard.getStoreId(); + this.state = Metapb.ShardState.SState_Normal.name(); // gshard的状态默认为normal + this.progress = 0; + this.partitionId = partitionId; + } +} diff --git a/hg-pd-service/src/main/java/org/apache/hugegraph/pd/model/StoreRestRequest.java b/hg-pd-service/src/main/java/org/apache/hugegraph/pd/model/StoreRestRequest.java new file mode 100644 index 0000000000..30a02d49b7 --- /dev/null +++ b/hg-pd-service/src/main/java/org/apache/hugegraph/pd/model/StoreRestRequest.java @@ -0,0 +1,8 @@ +package org.apache.hugegraph.pd.model; + +import lombok.Data; + +@Data +public class StoreRestRequest { + String storeState; +} diff --git a/hg-pd-service/src/main/java/org/apache/hugegraph/pd/model/TimeRangeRequest.java b/hg-pd-service/src/main/java/org/apache/hugegraph/pd/model/TimeRangeRequest.java new file mode 100644 index 0000000000..cf0f03ddef --- /dev/null +++ b/hg-pd-service/src/main/java/org/apache/hugegraph/pd/model/TimeRangeRequest.java @@ -0,0 +1,13 @@ +package org.apache.hugegraph.pd.model; + +import lombok.Data; + +/** + * @author zhangyingjie + * @date 2022/3/23 + **/ +@Data +public class TimeRangeRequest { + String startTime; + String endTime; +} diff --git a/hg-pd-service/src/main/java/org/apache/hugegraph/pd/notice/NoticeBroadcaster.java b/hg-pd-service/src/main/java/org/apache/hugegraph/pd/notice/NoticeBroadcaster.java new file mode 100644 index 0000000000..3a4fc9254d --- /dev/null +++ b/hg-pd-service/src/main/java/org/apache/hugegraph/pd/notice/NoticeBroadcaster.java @@ -0,0 +1,282 @@ +<<<<<<<< HEAD:hugegraph-pd/hg-pd-service/src/main/java/org/apache/hugegraph/pd/notice/NoticeBroadcaster.java +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hugegraph.pd.notice; + +import java.util.function.Function; +import java.util.function.Supplier; + +import org.apache.hugegraph.pd.common.HgAssert; + +import lombok.extern.slf4j.Slf4j; + +// TODO: merge/move to another package +@Slf4j +public class NoticeBroadcaster { + + private final Supplier noticeSupplier; + private long noticeId; + private String durableId; + private Supplier durableSupplier; + private Function removeFunction; + private int state; //0=ready; 1=notified; 2=done ack; -1=error + private int counter; + private long timestamp; + + private NoticeBroadcaster(Supplier noticeSupplier) { + this.noticeSupplier = noticeSupplier; + this.timestamp = System.currentTimeMillis(); + } + + public static NoticeBroadcaster of(Supplier noticeSupplier) { + HgAssert.isArgumentNotNull(noticeSupplier, "noticeSupplier"); + return new NoticeBroadcaster(noticeSupplier); + } + + public NoticeBroadcaster setDurableSupplier(Supplier durableSupplier) { + this.durableSupplier = durableSupplier; + return this; + } + + public NoticeBroadcaster setRemoveFunction(Function removeFunction) { + this.removeFunction = removeFunction; + return this; + } + + public NoticeBroadcaster notifying() { + + if (this.state >= 2) { + log.warn("Aborted notifying as ack has done. notice: {}", this); +======== +package org.apache.hugegraph.pd.notice; + +import org.apache.hugegraph.pd.common.HgAssert; +import lombok.extern.slf4j.Slf4j; + +/** + * @author lynn.bond@hotmail.com on 2022/2/10 + * @version 2.0 added the NoticeDeliver on 2023/11/29 + */ +@Slf4j +public class NoticeBroadcaster { + private final NoticeDeliver noticeDeliver; + private long noticeId; + private String durableId; + private int state; // 0=ready; 1=notified; 2=done ack; 10=invalid, -1=error + private int counter; + private long timestamp; + + public static NoticeBroadcaster of(NoticeDeliver noticeDeliver) { + HgAssert.isArgumentNotNull(noticeDeliver, "noticeDeliver"); + return new NoticeBroadcaster(noticeDeliver); + } + + private NoticeBroadcaster(NoticeDeliver noticeDeliver) { + this.noticeDeliver = noticeDeliver; + this.timestamp = System.currentTimeMillis(); + } + + public NoticeBroadcaster notifying() { + try { + if (!this.noticeDeliver.isDuty()) { + this.state = 10; + log.warn("Notification aborted due to not in duty state. notice: {}", this.getNoticeString()); + return this; + } + } catch (Throwable t) { + log.error("Failed to invoke `NoticeDeliver::isDuty`, but continuing the the notification, caused by:", t); + } + + if (this.state >= 2) { + log.warn("Notification aborted as acknowledgment has been received. notice: {}", this); +>>>>>>>> d7e3d51dd (3.6.5 -> 4.x diff):hg-pd-service/src/main/java/org/apache/hugegraph/pd/notice/NoticeBroadcaster.java + return this; + } + + this.counter++; +<<<<<<<< HEAD:hugegraph-pd/hg-pd-service/src/main/java/org/apache/hugegraph/pd/notice/NoticeBroadcaster.java + + if (this.durableId == null && this.durableSupplier != null) { + try { + this.durableId = this.durableSupplier.get(); + } catch (Throwable t) { + log.error("Failed to invoke durableSupplier, cause by:", t); +======== + if (this.durableId == null) { + try { + this.durableId = this.noticeDeliver.save(); + } catch (Throwable t) { + log.error("Failed to invoke `NoticeDeliver::save`, caused by:", t); +>>>>>>>> d7e3d51dd (3.6.5 -> 4.x diff):hg-pd-service/src/main/java/org/apache/hugegraph/pd/notice/NoticeBroadcaster.java + } + } + + try { +<<<<<<<< HEAD:hugegraph-pd/hg-pd-service/src/main/java/org/apache/hugegraph/pd/notice/NoticeBroadcaster.java + this.noticeId = this.noticeSupplier.get(); + state = 1; + } catch (Throwable t) { + state = -1; + log.error("Failed to invoke noticeSupplier: {}; cause by: " + + this.noticeSupplier.toString(), t); +======== + this.noticeId = this.noticeDeliver.send(this.durableId); + state = 1; + } catch (Throwable t) { + state = -1; + log.error("Failed to invoke `NoticeDeliver::send`, notice: {}, caused by: " + this.noticeDeliver.toNoticeString(), t); +>>>>>>>> d7e3d51dd (3.6.5 -> 4.x diff):hg-pd-service/src/main/java/org/apache/hugegraph/pd/notice/NoticeBroadcaster.java + } + + return this; + } + + public boolean checkAck(long ackNoticeId) { + boolean flag = false; + + if (this.noticeId == ackNoticeId) { + flag = true; + this.state = 2; + } + + if (flag) { + this.doRemoveDurable(); + } + + return flag; + } + + public boolean doRemoveDurable() { +<<<<<<<< HEAD:hugegraph-pd/hg-pd-service/src/main/java/org/apache/hugegraph/pd/notice/NoticeBroadcaster.java + log.info("Removing NoticeBroadcaster is stating, noticeId:{}, durableId: {}" + , this.noticeId, this.durableId); + boolean flag = false; + + if (this.removeFunction == null) { + log.warn("The remove-function hasn't been set."); + return false; + } +======== + log.info("NoticeBroadcaster is being removed, noticeId:{}, durableId: {}" + , this.noticeId, this.durableId); + boolean flag = false; + +>>>>>>>> d7e3d51dd (3.6.5 -> 4.x diff):hg-pd-service/src/main/java/org/apache/hugegraph/pd/notice/NoticeBroadcaster.java + + if (this.durableId == null) { + log.warn("The durableId hasn't been set."); + return false; + } + + try { +<<<<<<<< HEAD:hugegraph-pd/hg-pd-service/src/main/java/org/apache/hugegraph/pd/notice/NoticeBroadcaster.java + if (!(flag = this.removeFunction.apply(this.durableId))) { + log.error("Removing NoticeBroadcaster was not complete, noticeId: {}, durableId: {}" + , this.noticeId, this.durableId); + } + } catch (Throwable t) { + log.error("Failed to remove NoticeBroadcaster, noticeId: " + + this.noticeId + ", durableId: " + this.durableId + ". Cause by:", t); +======== + if (!(flag = this.noticeDeliver.remove(this.durableId))) { + log.error("Removing NoticeBroadcaster was not complete, noticeId: {}, durableId: {}", + this.noticeId, this.durableId); + } + } catch (Throwable t) { + log.error("Failed to remove NoticeBroadcaster, noticeId: {}, durableId: {}. Caused by:", + this.noticeId, this.durableId, t); +>>>>>>>> d7e3d51dd (3.6.5 -> 4.x diff):hg-pd-service/src/main/java/org/apache/hugegraph/pd/notice/NoticeBroadcaster.java + } + + return flag; + } + +<<<<<<<< HEAD:hugegraph-pd/hg-pd-service/src/main/java/org/apache/hugegraph/pd/notice/NoticeBroadcaster.java +======== + public void setDurableId(String durableId) { + if (HgAssert.isInvalid(durableId)) { + log.warn("Set an invalid durable id to the NoticeBroadcaster."); + } + + this.durableId = durableId; + } + +>>>>>>>> d7e3d51dd (3.6.5 -> 4.x diff):hg-pd-service/src/main/java/org/apache/hugegraph/pd/notice/NoticeBroadcaster.java + public long getNoticeId() { + return noticeId; + } + + public int getState() { + return state; + } + + public int getCounter() { + return counter; + } + + public String getDurableId() { + return durableId; + } + +<<<<<<<< HEAD:hugegraph-pd/hg-pd-service/src/main/java/org/apache/hugegraph/pd/notice/NoticeBroadcaster.java + public void setDurableId(String durableId) { + + if (HgAssert.isInvalid(durableId)) { + log.warn("Set an invalid durable-id to NoticeBroadcaster."); + } + + this.durableId = durableId; + } + +======== +>>>>>>>> d7e3d51dd (3.6.5 -> 4.x diff):hg-pd-service/src/main/java/org/apache/hugegraph/pd/notice/NoticeBroadcaster.java + public long getTimestamp() { + return timestamp; + } + + public void setTimestamp(long timestamp) { + this.timestamp = timestamp; + } + +<<<<<<<< HEAD:hugegraph-pd/hg-pd-service/src/main/java/org/apache/hugegraph/pd/notice/NoticeBroadcaster.java + @Override + public String toString() { + return "NoticeBroadcaster{" + + "noticeId=" + noticeId + + ", durableId='" + durableId + '\'' + + ", state=" + state + + ", counter=" + counter + + ", timestamp=" + timestamp + + '}'; +======== + public String getNoticeString() { + return this.noticeDeliver.toNoticeString(); + } + + @Override + public String toString() { + return "NoticeBroadcaster{" + + "noticeId=" + noticeId + + ", durableId='" + durableId + '\'' + + ", state=" + state + + ", counter=" + counter + + ", timestamp=" + timestamp + + '}'; +>>>>>>>> d7e3d51dd (3.6.5 -> 4.x diff):hg-pd-service/src/main/java/org/apache/hugegraph/pd/notice/NoticeBroadcaster.java + } +} diff --git a/hg-pd-service/src/main/java/org/apache/hugegraph/pd/notice/NoticeDeliver.java b/hg-pd-service/src/main/java/org/apache/hugegraph/pd/notice/NoticeDeliver.java new file mode 100644 index 0000000000..9dcef6ff7d --- /dev/null +++ b/hg-pd-service/src/main/java/org/apache/hugegraph/pd/notice/NoticeDeliver.java @@ -0,0 +1,16 @@ +package org.apache.hugegraph.pd.notice; + +/** + * @author lynn.bond@hotmail.com on 2023/11/29 + */ +public interface NoticeDeliver { + boolean isDuty(); + + Long send(String durableId); + + String save(); + + boolean remove(String durableId); + + String toNoticeString(); +} diff --git a/hg-pd-service/src/main/java/org/apache/hugegraph/pd/pulse/AbstractObserverSubject.java b/hg-pd-service/src/main/java/org/apache/hugegraph/pd/pulse/AbstractObserverSubject.java new file mode 100644 index 0000000000..bc2229ced4 --- /dev/null +++ b/hg-pd-service/src/main/java/org/apache/hugegraph/pd/pulse/AbstractObserverSubject.java @@ -0,0 +1,298 @@ +package org.apache.hugegraph.pd.pulse; + +import org.apache.hugegraph.pd.grpc.pulse.PulseNoticeRequest; +import org.apache.hugegraph.pd.grpc.pulse.PulseResponse; +import org.apache.hugegraph.pd.grpc.pulse.PulseType; +import org.apache.hugegraph.pd.util.IdUtil; +import com.google.protobuf.GeneratedMessageV3; +import io.grpc.Status; +import io.grpc.stub.StreamObserver; +import lombok.extern.slf4j.Slf4j; + +import javax.annotation.concurrent.ThreadSafe; +import java.util.Collection; +import java.util.Iterator; +import java.util.Map; +import java.util.concurrent.ConcurrentHashMap; +import java.util.function.BiFunction; +import java.util.function.Consumer; +import java.util.function.Function; + +import static org.apache.hugegraph.pd.common.HgAssert.isArgumentNotNull; + +/** + * @author lynn.bond@hotmail.com created on 2021/11/9 + */ +@ThreadSafe +@Slf4j +abstract class AbstractObserverSubject { + /* sending notices to the clients */ + private final ConcurrentHashMap> observerHolder = new ConcurrentHashMap<>(1024); + /* notices from the clients */ + private final ConcurrentHashMap listenerHolder = new ConcurrentHashMap<>(1024); + private BiFunction listenerErrInterceptor = defaultErrInterceptor(); + + private final byte[] lock = new byte[0]; + private final PulseType pulseType; + + private static BiFunction defaultErrInterceptor() { + return (req, err) -> { + log.error("Failed to handle client's notice[{}], error: {}", req, err); + return 0; + }; + } + + public PulseType getPulseType() { + return pulseType; + } + + public void setListenerErrInterceptor(BiFunction listenerErrInterceptor) { + isArgumentNotNull(listenerErrInterceptor, "listenerErrInterceptor"); + this.listenerErrInterceptor = listenerErrInterceptor; + } + + protected AbstractObserverSubject(PulseType pulseType) { + this.pulseType = pulseType; + } + + /** + * Adding an observer from the remote client. + * + * @param observerId + * @param responseObserver + */ + void addObserver(Long observerId, StreamObserver responseObserver) { + synchronized (this.observerHolder) { + + if (this.observerHolder.containsKey(observerId)) { + responseObserver.onError( + new Exception("The observer [" + observerId + "] of " + this.pulseType.name() + + " subject has been existing.")); + return; + } + + this.observerHolder.put(observerId, responseObserver); + log.info("Added an observer to subject [ {} ], observer-id: [ {} ], total observers: [ {} ]." + , this.pulseType, observerId, this.observerHolder.size()); + } + + } + + /** + * Removing an observer by id + * + * @param observerId + * @param responseObserver + */ + void removeObserver(Long observerId, StreamObserver responseObserver) { + synchronized (this.observerHolder) { + log.info("Removing an observer of subject [ {} ], observer-id: [ {} ].", this.pulseType, observerId); + this.observerHolder.remove(observerId); + } + + responseObserver.onCompleted(); + } + + abstract String toNoticeString(PulseResponse res); + + /** + * @param c + * @return notice ID + */ + protected long send2Clients(Consumer c, String originId) { + synchronized (lock) { + if (c == null) { + log.error(this.pulseType.name() + "'s notice was abandoned, caused by: notifyObserver(null)"); + return -1; + } + + if (originId == null) { + originId = "nil"; + } + PulseResponse.Builder resBuilder = PulseResponse.newBuilder(); + + try { + c.accept(resBuilder); + resBuilder.setPulseType(this.pulseType); + resBuilder.setOriginId(originId); + } catch (Throwable t) { + log.error(this.pulseType.name() + "'s notice was abandoned, caused by:", t); + return -1; + } + + long noticeId = IdUtil.createMillisId(); + Iterator>> iter = observerHolder.entrySet().iterator(); + + log.info("Broadcasting a notice to clients, subject: [ {} ], notice id: [ {} ], origin id: [ {} ]" + + ", observer count: [ {} ]", this.pulseType.name(), noticeId, originId, + observerHolder.size()); + + while (iter.hasNext()) { + Map.Entry> entry = iter.next(); + Long observerId = entry.getKey(); + PulseResponse res = resBuilder.setObserverId(observerId).setNoticeId(noticeId).build(); + + try { + entry.getValue().onNext(res); + } catch (Throwable e) { + log.error("Failed to send a notice to observer [ {} ] of subject [ {} ], caused by:", + observerId, this.pulseType.name(), e); + + // TODO: ? try multi-times? + // iter.remove(); + // log.error("Removed an observer [ {} ] of subject [ {} ]" + // + ", because of once failure of sending.", entry.getKey(), this.pulseType.name(),e); + } + + } + + return noticeId; + } + + } + + protected long send2Clients(Consumer c, long noticeId, Collection observerIds) { + if (observerIds == null || observerIds.isEmpty()) { + return noticeId; + } + + synchronized (lock) { + if (c == null) { + log.error(this.pulseType.name() + "'s notice was abandoned, caused by: notifyObserver(null)"); + return -1; + } + + PulseResponse.Builder resBuilder = PulseResponse.newBuilder(); + + try { + c.accept(resBuilder); + resBuilder.setPulseType(this.pulseType) + .setOriginId(String.valueOf(noticeId)) + .setNoticeId(noticeId); + } catch (Throwable t) { + log.error(this.pulseType.name() + "'s notice was abandoned, caused by:", t); + return -1; + } + + log.info("Dispatching a notice to clients, subject: [ {} ], notice id: [ {} ], observer ids: {} " + // , content:\n{}" + , this.pulseType.name(), noticeId, observerIds); + + PulseResponse resPrototype = resBuilder.build(); + + observerIds.parallelStream().forEach(observerId -> { + StreamObserver observer = this.observerHolder.get(observerId); + + if (observer == null) { + log.warn("Failed to send a notice, because observer [ {} ] does not exist.", + observerId); + return; + } + + try { + observer.onNext(PulseResponse.newBuilder(resPrototype).setObserverId(observerId).build()); + } catch (Throwable e) { + log.error("Failed to send a notice to observer [ {} ] of subject [ {} ], caused by:", + observerId, this.pulseType.name(), e); + } + +/* log.info("Sent a notice, subject: [ {} ], notice id: [ {} ], observer id: [ {} ] " + // , content:\n{}" + , this.pulseType.name(), noticeId, observerId);*/ + }); + + return noticeId; + } + + } + + public long notifyClient(GeneratedMessageV3 response) { + return this.notifyClient(response, null); + } + + abstract long notifyClient(GeneratedMessageV3 response, String originId); + + abstract long notifyClient(GeneratedMessageV3 response, long noticeId, Collection observerIds); + + protected void notifyError(int code, String message) { + synchronized (lock) { + Iterator>> iter = observerHolder.entrySet().iterator(); + PulseResponse.Builder resBuilder = PulseResponse.newBuilder(); + + while (iter.hasNext()) { + Map.Entry> entry = iter.next(); + Long observerId = entry.getKey(); + PulseResponse res = resBuilder.setObserverId(observerId).build(); + try { + entry.getValue().onError(Status.fromCodeValue(code).withDescription(message).asRuntimeException()); + } catch (Throwable e) { + log.warn("Failed to send {} 's notice[{}] to observer[{}], error:{}", + this.pulseType.name(), toNoticeString(res), observerId, e.getMessage()); + } + } + + } + } + + /** + * Add a listener from local server + * + * @param listenerId + * @param listener + */ + void addListener(Long listenerId, PulseListener listener) { + synchronized (this.listenerHolder) { + if (this.listenerHolder.containsKey(listenerId)) { + listener.onError( + new Exception("The listener-id[" + listenerId + "] of " + this.pulseType.name() + + " subject has been existing.")); + return; + } + + log.info("Adding a " + this.pulseType + "'s listener, listener-id is [" + listenerId + "]."); + this.listenerHolder.put(listenerId, listener); + + } + } + + /** + * Remove a listener by id + * + * @param listenerId + * @param listener + */ + void removeListener(Long listenerId, PulseListener listener) { + synchronized (this.listenerHolder) { + log.info("Removing a " + this.pulseType + "'s listener, listener-id is [" + listenerId + "]."); + this.observerHolder.remove(listenerId); + } + + listener.onCompleted(); + } + + abstract Function getNoticeHandler(); + + void handleClientNotice(PulseNoticeRequest noticeRequest) { + Iterator> iter = listenerHolder.entrySet().iterator(); + + while (iter.hasNext()) { + Map.Entry entry = iter.next(); + Long listenerId = entry.getKey(); + try { + entry.getValue().onNext(getNoticeHandler().apply(noticeRequest)); + } catch (Exception e) { + int flag = 0; + try { + flag = this.listenerErrInterceptor.apply(noticeRequest, e); + } catch (Exception e1) { + log.error("Failed to invoke error interceptor with notice[{}], listenerId: {}, error: {}" + , noticeRequest, listenerId, e1); + } + if (flag != 0) { + break; + } + } + } + } + +} diff --git a/hg-pd-service/src/main/java/org/apache/hugegraph/pd/pulse/BroadcasterFactory.java b/hg-pd-service/src/main/java/org/apache/hugegraph/pd/pulse/BroadcasterFactory.java new file mode 100644 index 0000000000..32a34918e1 --- /dev/null +++ b/hg-pd-service/src/main/java/org/apache/hugegraph/pd/pulse/BroadcasterFactory.java @@ -0,0 +1,113 @@ +package org.apache.hugegraph.pd.pulse; + +import org.apache.hugegraph.pd.grpc.Metapb; +import org.apache.hugegraph.pd.notice.NoticeBroadcaster; +import org.apache.hugegraph.pd.notice.NoticeDeliver; +import org.apache.hugegraph.pd.util.IdUtil; +import com.google.protobuf.GeneratedMessageV3; +import lombok.extern.slf4j.Slf4j; + +import javax.annotation.concurrent.ThreadSafe; +import java.util.function.Function; + +import static org.apache.hugegraph.pd.common.HgAssert.isArgumentNotNull; + +/** + * @author lynn.bond@hotmail.com on 2023/11/7 + */ +@Slf4j +@ThreadSafe +final class BroadcasterFactory { + static PulseDurableProvider durableQueueProvider = PulseDurableProvider.DEFAULT; + static Function, AbstractObserverSubject> subjectProvider = (e) -> null; + + private BroadcasterFactory() { + } + + public static NoticeBroadcaster create(GeneratedMessageV3 notice) { + isArgumentNotNull(notice, "notice"); + AbstractObserverSubject subject = getSubject(notice); + + return NoticeBroadcaster.of(new NoticeDeliverImpl(notice, subject)); + } + + private static AbstractObserverSubject getSubject(T notice) { + AbstractObserverSubject subject = subjectProvider.apply(notice.getClass()); + + if (subject == null) { + throw new IllegalStateException("Failed to retrieve the subject via notice class: [" + + notice.getClass().getTypeName() + "]"); + } + + return subject; + } + + private static Metapb.QueueItem toQueueItem(GeneratedMessageV3 notice) { + return Metapb.QueueItem.newBuilder() + .setItemId(IdUtil.createMillisStr()) + .setItemClass(notice.getClass().getTypeName()) + .setItemContent(notice.toByteString()) + .setTimestamp(System.currentTimeMillis()) + .build(); + } + + /** + * this inner class is used to deliver notice to client + */ + private static class NoticeDeliverImpl implements NoticeDeliver { + private GeneratedMessageV3 notice; + private AbstractObserverSubject subject; + + public NoticeDeliverImpl(GeneratedMessageV3 notice, AbstractObserverSubject subject) { + this.notice = notice; + this.subject = subject; + } + + @Override + public boolean isDuty() { + return durableQueueProvider.isLeader(); + } + + @Override + public Long send(String durableId) { + return subject.notifyClient(notice, durableId); + } + + @Override + public String save() { + Metapb.QueueItem queueItem = toQueueItem(notice); + String res = null; + + try { + if (durableQueueProvider.saveQueue(queueItem)) { + res = queueItem.getItemId(); + } else { + log.error("Failed to persist queue item: {}", notice); + } + } catch (Throwable t) { + log.error("Failed to invoke `DurableQueueProvider::saveQueue`, caused by:", t); + } + + return res; + } + + @Override + public boolean remove(String durableId) { + boolean flag = false; + + try { + flag = durableQueueProvider.removeQueue(durableId); + } catch (Throwable t) { + log.error("Failed to invoke `DurableQueueProvider::removeQueue`, cause by:", t); + } + + return flag; + } + + @Override + public String toNoticeString() { + return this.notice.toString(); + } + } + +} diff --git a/hg-pd-service/src/main/java/org/apache/hugegraph/pd/pulse/ChangeType.java b/hg-pd-service/src/main/java/org/apache/hugegraph/pd/pulse/ChangeType.java new file mode 100644 index 0000000000..4d9613a480 --- /dev/null +++ b/hg-pd-service/src/main/java/org/apache/hugegraph/pd/pulse/ChangeType.java @@ -0,0 +1,27 @@ +package org.apache.hugegraph.pd.pulse; + +import org.apache.hugegraph.pd.grpc.pulse.PulseChangeType; +import org.apache.hugegraph.pd.grpc.watch.WatchChangeType; + +/** + * Copy from {@link WatchChangeType} + * + * @author original by zhangyingjie + * @date 2023/11/08 + **/ +public enum ChangeType { + ADD(PulseChangeType.PULSE_CHANGE_TYPE_ADD), + ALTER(PulseChangeType.PULSE_CHANGE_TYPE_ALTER), + DEL(PulseChangeType.PULSE_CHANGE_TYPE_DEL), + USER_DEFINED(PulseChangeType.PULSE_CHANGE_TYPE_SPECIAL1); + + private final PulseChangeType grpcType; + + ChangeType(PulseChangeType grpcType) { + this.grpcType = grpcType; + } + + public PulseChangeType getGrpcType() { + return this.grpcType; + } +} diff --git a/hg-pd-service/src/main/java/org/apache/hugegraph/pd/pulse/GraphChangeSubject.java b/hg-pd-service/src/main/java/org/apache/hugegraph/pd/pulse/GraphChangeSubject.java new file mode 100644 index 0000000000..c3d8bb715a --- /dev/null +++ b/hg-pd-service/src/main/java/org/apache/hugegraph/pd/pulse/GraphChangeSubject.java @@ -0,0 +1,43 @@ +package org.apache.hugegraph.pd.pulse; + + +import org.apache.hugegraph.pd.grpc.pulse.PulseGraphRequest; +import org.apache.hugegraph.pd.grpc.pulse.PulseGraphResponse; +import org.apache.hugegraph.pd.grpc.pulse.PulseNoticeRequest; +import org.apache.hugegraph.pd.grpc.pulse.PulseResponse; +import org.apache.hugegraph.pd.grpc.pulse.PulseType; +import com.google.protobuf.GeneratedMessageV3; + +import java.util.Collection; +import java.util.function.Function; + +/** + * @author lynn.bond@hotmail.com created on 2023/11/08 + */ +class GraphChangeSubject extends AbstractObserverSubject { + + GraphChangeSubject() { + super(PulseType.PULSE_TYPE_GRAPH_CHANGE); + } + + @Override + String toNoticeString(PulseResponse res) { + return res.getGraphResponse().toString(); + } + + @Override + Function getNoticeHandler() { + return r -> r.getGraphRequest(); + } + + + @Override + long notifyClient(GeneratedMessageV3 response, String originId) { + return super.send2Clients(b -> b.setGraphResponse((PulseGraphResponse) response), originId); + } + + @Override + long notifyClient(GeneratedMessageV3 response, long noticeId, Collection observerIds) { + return super.send2Clients(b -> b.setGraphResponse((PulseGraphResponse) response), noticeId, observerIds); + } +} diff --git a/hg-pd-service/src/main/java/org/apache/hugegraph/pd/pulse/NoticeParseUtil.java b/hg-pd-service/src/main/java/org/apache/hugegraph/pd/pulse/NoticeParseUtil.java new file mode 100644 index 0000000000..592ff57c5d --- /dev/null +++ b/hg-pd-service/src/main/java/org/apache/hugegraph/pd/pulse/NoticeParseUtil.java @@ -0,0 +1,75 @@ +package org.apache.hugegraph.pd.pulse; + +import com.google.protobuf.ByteString; +import com.google.protobuf.GeneratedMessageV3; +import com.google.protobuf.Parser; +import lombok.extern.slf4j.Slf4j; + +import java.lang.reflect.Method; +import java.util.Map; +import java.util.concurrent.ConcurrentHashMap; + +/** + * @author lynn.bond@hotmail.com on 2024/2/26 + */ +@Slf4j +class NoticeParseUtil { + private static final Map PARSER_HOLDER = new ConcurrentHashMap<>(); + + public static GeneratedMessageV3 parseNotice(ByteString instanceData, String className) { + Class clazz = null; + + try { + clazz = Class.forName(className); + } catch (ClassNotFoundException e) { + log.error("Failed to retrieve the Class of notice with class-name: " + + className + ", caused by error: ", e); + return null; + } + + return deserializeNotice(instanceData, clazz); + } + + public static T deserializeNotice(ByteString data, Class noticeClass) { + Parser parser = getNoticeParser(noticeClass); + if (parser == null) { + return null; + } + + T message = null; + + try { + message = (T) parser.parseFrom(data); + } catch (Exception e) { + log.error("Failed to deserialize notice with class-name: " + noticeClass.getTypeName(), e); + return null; + } + + return message; + } + + public static Parser getNoticeParser(Class noticeClass) { + Parser parser = (Parser) PARSER_HOLDER.get(noticeClass.getTypeName()); + + if (parser != null) { + return parser; + } + + try { + Method parseFromMethod = noticeClass.getMethod("parser"); + parser = (Parser) parseFromMethod.invoke(null); + } catch (Exception e) { + log.error("Failed to fetch the Parser of notice, class name: " + noticeClass.getTypeName(), e); + return null; + } + + if (parser == null) { + log.error("There is no Parser for the notice with the class name: " + noticeClass.getTypeName()); + return null; + } + + PARSER_HOLDER.put(noticeClass.getTypeName(), parser); + + return parser; + } +} diff --git a/hg-pd-service/src/main/java/org/apache/hugegraph/pd/pulse/PDPulseSubjects.java b/hg-pd-service/src/main/java/org/apache/hugegraph/pd/pulse/PDPulseSubjects.java new file mode 100644 index 0000000000..0dc068599e --- /dev/null +++ b/hg-pd-service/src/main/java/org/apache/hugegraph/pd/pulse/PDPulseSubjects.java @@ -0,0 +1,236 @@ +package org.apache.hugegraph.pd.pulse; + +import static org.apache.hugegraph.pd.common.HgAssert.isArgumentNotNull; + +import java.util.List; +import java.util.Map; +import java.util.concurrent.ConcurrentHashMap; +import java.util.function.BiConsumer; +import java.util.function.BiFunction; +import java.util.function.Function; + +import javax.annotation.concurrent.ThreadSafe; + +import org.apache.hugegraph.pd.common.HgAssert; +import org.apache.hugegraph.pd.grpc.Metapb; +import org.apache.hugegraph.pd.grpc.pulse.PartitionHeartbeatRequest; +import org.apache.hugegraph.pd.grpc.pulse.PartitionHeartbeatResponse; +import org.apache.hugegraph.pd.grpc.pulse.PdInstructionResponse; +import org.apache.hugegraph.pd.grpc.pulse.PdInstructionType; +import org.apache.hugegraph.pd.grpc.pulse.PulseAckRequest; +import org.apache.hugegraph.pd.grpc.pulse.PulseGraphResponse; +import org.apache.hugegraph.pd.grpc.pulse.PulseNodeResponse; +import org.apache.hugegraph.pd.grpc.pulse.PulseNoticeRequest; +import org.apache.hugegraph.pd.grpc.pulse.PulsePartitionResponse; +import org.apache.hugegraph.pd.grpc.pulse.PulseRequest; +import org.apache.hugegraph.pd.grpc.pulse.PulseResponse; +import org.apache.hugegraph.pd.grpc.pulse.PulseShardGroupResponse; +import org.apache.hugegraph.pd.grpc.pulse.PulseType; +import org.apache.hugegraph.pd.grpc.pulse.StoreNodeEventType; +import org.apache.hugegraph.pd.notice.NoticeBroadcaster; +import org.apache.hugegraph.pd.util.IdUtil; +import com.google.protobuf.GeneratedMessageV3; + +import io.grpc.stub.StreamObserver; +import lombok.extern.slf4j.Slf4j; + +import java.util.Collection; +import java.util.Collections; + +/** + * @author lynn.bond@hotmail.com created on 2021/11/8 + * @version 2.0.0 added the watch suite on 2023/11/06 + */ +@Slf4j +@ThreadSafe +public class PDPulseSubjects { + private static final Map PULSE_SUBJECT_HOLDER = new ConcurrentHashMap<>(); + private static final Map NOTICE_SUBJECT_HOLDER = new ConcurrentHashMap<>(); + private static final Map PULSE_TYPE_HOLDER = new ConcurrentHashMap<>(); + + static { + RetryingHub.broadcasterProvider = PDPulseSubjects::createBroadcaster; + BroadcasterFactory.subjectProvider = PDPulseSubjects::getSubjectViaNotice; + RetryingSwitch.subjectProvider = PDPulseSubjects::getSubjectViaNotice; + + putPulseSubject(new PartitionHeartbeatSubject(), PartitionHeartbeatResponse.class); + putPulseSubject(new PdInstructionSubject(), PdInstructionResponse.class); + + putPulseSubject(new PartitionChangeSubject(), PulsePartitionResponse.class); + putPulseSubject(new StoreNodeChangeSubject(), PulseNodeResponse.class); + putPulseSubject(new GraphChangeSubject(), PulseGraphResponse.class); + putPulseSubject(new ShardGroupChangeSubject(), PulseShardGroupResponse.class); + } + + static synchronized void putPulseSubject + (AbstractObserverSubject subject, Class noticeClass) { + + HgAssert.isArgumentNotNull(subject, "subject"); + HgAssert.isArgumentNotNull(noticeClass, "noticeClass"); + + PulseType pulseType = subject.getPulseType(); + if (PULSE_SUBJECT_HOLDER.containsKey(pulseType.name())) { + log.warn("Pulse type [ {} ] has been registered, will be replaced with subject: [ {} ]" + , pulseType.name(), subject.getClass().getTypeName()); + } + + log.info("Registering pulse type [ {} ] with subject: [ {} ]", pulseType, subject.getClass().getSimpleName()); + PULSE_SUBJECT_HOLDER.put(pulseType.name(), subject); + NOTICE_SUBJECT_HOLDER.put(noticeClass.getTypeName(), subject); + PULSE_TYPE_HOLDER.put(noticeClass.getTypeName(), pulseType); + } + + public static void setDurableQueueProvider(PulseDurableProvider durableQueueProvider) { + HgAssert.isArgumentNotNull(durableQueueProvider, "durableQueueProvider"); + RetryingHub.durableQueueProvider = durableQueueProvider; + BroadcasterFactory.durableQueueProvider = durableQueueProvider; + RetryingSwitch.pulseDurableProvider = durableQueueProvider; + } + + /** + * Add a responseObserver of client + */ + public static StreamObserver addObserver(StreamObserver responseObserver) { + isArgumentNotNull(responseObserver, "responseObserver"); + return SubjectIndividualObserver.of(responseObserver, getSubjectProvider()) + .setAckConsumer(getAckConsumer()); + } + + /** + * Broadcasting notice to all PD clients. + */ + public static void notifyClient(PartitionHeartbeatResponse.Builder responseBuilder) { + HgAssert.isArgumentNotNull(responseBuilder, "responseBuilder"); + RetryingHub.addQueue(createBroadcaster(responseBuilder.build()).notifying()); + } + + /** + * Dispatching notice to the specific PD client + * + * @return false if failed to dispatch notice + */ + public static boolean notifyClient(PartitionHeartbeatResponse.Builder responseBuilder, long storeId) { + return notifyClient(responseBuilder, Collections.singleton(storeId)); + } + + /** + * Dispatching notice to the specific PD client + * + * @return false if failed to dispatch notice + */ + public static boolean notifyClient(PartitionHeartbeatResponse.Builder responseBuilder, Collection storeIds) { + HgAssert.isArgumentNotNull(responseBuilder, "responseBuilder"); + return RetryingSwitch.addNotice(responseBuilder.build(), storeIds); + } + + /** + * @param response + * @see PDPulseSubjects::notifyPeerChange + */ + @Deprecated + public static void notifyClient(PdInstructionResponse response) { + RetryingHub.addQueue(createBroadcaster(response).notifying()); + } + + public static void notifyError(int code, String message) { + PULSE_SUBJECT_HOLDER.forEach((k, v) -> { + v.notifyError(code, message); + }); + } + + /** + * Adding a notice listener for notices from the pd-client. + * + * @param listener + */ + public static void listenPartitionHeartbeat(PulseListener listener) { + PULSE_SUBJECT_HOLDER.get(PulseType.PULSE_TYPE_PARTITION_HEARTBEAT.name()).addListener(createListenerId(), listener); + } + + /** + * Setting an interceptor for partition-heartbeat listener. + * The interceptor will be invoked when an exception raised by the listener. + * Continuing to invoke the next listener if interceptor returns 0. + * + * @param interceptor + */ + public static void setPartitionErrInterceptor(BiFunction interceptor) { + PULSE_SUBJECT_HOLDER.get(PulseType.PULSE_TYPE_PARTITION_HEARTBEAT.name()).setListenerErrInterceptor(interceptor); + } + + /************************************************************************** + * The following methods are for internal use only. * + * ***********************************************************************/ + private static BiConsumer getAckConsumer() { + return (subject, ack) -> { + log.info("Receiving an ack of subject[{}], ack: {noticeId={},observerId={}}" + , subject.getPulseType(), ack.getNoticeId(), ack.getObserverId()); + if (!RetryingHub.removeNotice(ack.getNoticeId())) { + RetryingSwitch.ackNotice(ack.getNoticeId(), ack.getObserverId()); + } + }; + } + + private static AbstractObserverSubject getSubjectViaNotice(Class noticeClass) { + return PDPulseSubjects.NOTICE_SUBJECT_HOLDER.get(noticeClass.getTypeName()); + } + + private static NoticeBroadcaster createBroadcaster(GeneratedMessageV3 notice) { + return BroadcasterFactory.create(notice); + } + + private static Function getSubjectProvider() { + return (pulseType) -> PULSE_SUBJECT_HOLDER.get(pulseType.name()); + } + + private static AbstractObserverSubject getSubject(PulseType pulseType) { + AbstractObserverSubject subject = PULSE_SUBJECT_HOLDER.get(pulseType.name()); + if (subject == null) { + throw new IllegalArgumentException("Can't find the subject of pulseType: " + pulseType.name()); + } + return subject; + } + + private static Long createListenerId() { + return IdUtil.createMillisId(); + } + + /************************************************************************** + * The following methods are for the implementation of watch * + *************************************************************************/ + + public static void notifyPartitionChange(ChangeType changeType, String graph, int partitionId) { + getSubject(PulseType.PULSE_TYPE_PARTITION_CHANGE).notifyClient( + PulseNotices.createPartitionChange(changeType, graph, partitionId) + ); + } + + public static void notifyShardGroupChange(ChangeType changeType, int groupId, Metapb.ShardGroup group) { + getSubject(PulseType.PULSE_TYPE_SHARD_GROUP_CHANGE).notifyClient( + PulseNotices.createShardGroupChange(changeType, groupId, group) + ); + } + + public static void notifyNodeChange(StoreNodeEventType changeType, String graph, long nodeId) { + getSubject(PulseType.PULSE_TYPE_STORE_NODE_CHANGE).notifyClient( + PulseNotices.createNodeChange(changeType, graph, nodeId) + ); + } + + public static void notifyGraphChange(Metapb.Graph graph) { + getSubject(PulseType.PULSE_TYPE_GRAPH_CHANGE).notifyClient( + PulseNotices.createGraphChange(graph) + ); + } + + public static void notifyPeerChange(List peers) { + PdInstructionResponse.Builder builder = PdInstructionResponse.newBuilder(); + builder.setInstructionType(PdInstructionType.CHANGE_PEERS).addAllPeers(peers); + notify(PulseType.PULSE_TYPE_PD_INSTRUCTION, builder.build()); + } + + public static void notify(PulseType type, GeneratedMessageV3 message){ + getSubject(type).notifyClient(message); + } + +} diff --git a/hg-pd-service/src/main/java/org/apache/hugegraph/pd/pulse/PartitionChangeSubject.java b/hg-pd-service/src/main/java/org/apache/hugegraph/pd/pulse/PartitionChangeSubject.java new file mode 100644 index 0000000000..00daaa6dfc --- /dev/null +++ b/hg-pd-service/src/main/java/org/apache/hugegraph/pd/pulse/PartitionChangeSubject.java @@ -0,0 +1,42 @@ +package org.apache.hugegraph.pd.pulse; + + +import org.apache.hugegraph.pd.grpc.pulse.PulseNoticeRequest; +import org.apache.hugegraph.pd.grpc.pulse.PulsePartitionRequest; +import org.apache.hugegraph.pd.grpc.pulse.PulsePartitionResponse; +import org.apache.hugegraph.pd.grpc.pulse.PulseResponse; +import org.apache.hugegraph.pd.grpc.pulse.PulseType; +import com.google.protobuf.GeneratedMessageV3; + +import java.util.Collection; +import java.util.function.Function; + +/** + * @author lynn.bond@hotmail.com created on 2023/11/08 + */ +class PartitionChangeSubject extends AbstractObserverSubject { + + PartitionChangeSubject() { + super(PulseType.PULSE_TYPE_PARTITION_CHANGE); + } + + @Override + String toNoticeString(PulseResponse res) { + return res.getPartitionResponse().toString(); + } + + @Override + Function getNoticeHandler() { + return r -> r.getPartitionRequest(); + } + + @Override + long notifyClient(GeneratedMessageV3 response, String originId) { + return super.send2Clients(b -> b.setPartitionResponse((PulsePartitionResponse) response),originId); + } + + @Override + long notifyClient(GeneratedMessageV3 response, long noticeId, Collection observerIds) { + return super.send2Clients(b -> b.setPartitionResponse((PulsePartitionResponse) response),noticeId,observerIds); + } +} diff --git a/hg-pd-service/src/main/java/org/apache/hugegraph/pd/pulse/PartitionHeartbeatSubject.java b/hg-pd-service/src/main/java/org/apache/hugegraph/pd/pulse/PartitionHeartbeatSubject.java new file mode 100644 index 0000000000..70bb2fffa1 --- /dev/null +++ b/hg-pd-service/src/main/java/org/apache/hugegraph/pd/pulse/PartitionHeartbeatSubject.java @@ -0,0 +1,49 @@ +package org.apache.hugegraph.pd.pulse; + +import org.apache.hugegraph.pd.grpc.pulse.*; +import com.google.protobuf.GeneratedMessageV3; + +import java.util.Collection; +import java.util.function.Function; + +/** + * @author lynn.bond@hotmail.com created on 2021/11/9 + */ +public class PartitionHeartbeatSubject extends AbstractObserverSubject { + + PartitionHeartbeatSubject() { + super(PulseType.PULSE_TYPE_PARTITION_HEARTBEAT); + } + + @Override + String toNoticeString(PulseResponse res) { + return res.getPartitionHeartbeatResponse().toString(); + } + + @Override + Function getNoticeHandler() { + return r -> r.getPartitionHeartbeatRequest(); + } +// +// void notifyClient(PartitionHeartbeatResponse.Builder responseBuilder) { +// +// super.send2Clients(b -> { +// b.setPartitionHeartbeatResponse(responseBuilder);; +// }); +// +// } + + @Override + long notifyClient(GeneratedMessageV3 response, String originId) { + return super.send2Clients(b -> { + b.setPartitionHeartbeatResponse((PartitionHeartbeatResponse) response); + }, originId); + } + + @Override + long notifyClient(GeneratedMessageV3 response, long noticeId, Collection observerIds) { + return super.send2Clients(b -> { + b.setPartitionHeartbeatResponse((PartitionHeartbeatResponse) response); + }, noticeId, observerIds); + } +} diff --git a/hg-pd-service/src/main/java/org/apache/hugegraph/pd/pulse/PdInstructionSubject.java b/hg-pd-service/src/main/java/org/apache/hugegraph/pd/pulse/PdInstructionSubject.java new file mode 100644 index 0000000000..376a2b7ce7 --- /dev/null +++ b/hg-pd-service/src/main/java/org/apache/hugegraph/pd/pulse/PdInstructionSubject.java @@ -0,0 +1,45 @@ +package org.apache.hugegraph.pd.pulse; + +import org.apache.hugegraph.pd.grpc.pulse.PdInstructionResponse; +import org.apache.hugegraph.pd.grpc.pulse.PulseNoticeRequest; +import org.apache.hugegraph.pd.grpc.pulse.PulseResponse; +import org.apache.hugegraph.pd.grpc.pulse.PulseType; +import com.google.protobuf.GeneratedMessageV3; + +import java.util.Collection; +import java.util.function.Function; + +class PdInstructionSubject extends AbstractObserverSubject{ + + public PdInstructionSubject() { + super(PulseType.PULSE_TYPE_PD_INSTRUCTION); + } + + @Override + String toNoticeString(PulseResponse res) { + return res.getInstructionResponse().toString(); + } + + /** + * pd单纯的向pulse发送的指令,不接收对应的notice + * @return null + */ + @Override + Function getNoticeHandler() { + return pulseNoticeRequest -> null; + } + + @Override + long notifyClient(GeneratedMessageV3 response, String originId) { + return super.send2Clients(b -> { + b.setInstructionResponse((PdInstructionResponse) response); + },originId); + } + + @Override + long notifyClient(GeneratedMessageV3 response, long noticeId, Collection observerIds) { + return super.send2Clients(b -> { + b.setInstructionResponse((PdInstructionResponse) response); + },noticeId,observerIds); + } +} diff --git a/hg-pd-service/src/main/java/org/apache/hugegraph/pd/pulse/PulseDurableProvider.java b/hg-pd-service/src/main/java/org/apache/hugegraph/pd/pulse/PulseDurableProvider.java new file mode 100644 index 0000000000..4c0041863a --- /dev/null +++ b/hg-pd-service/src/main/java/org/apache/hugegraph/pd/pulse/PulseDurableProvider.java @@ -0,0 +1,86 @@ +package org.apache.hugegraph.pd.pulse; + +import org.apache.hugegraph.pd.grpc.Metapb; + +import javax.annotation.concurrent.ThreadSafe; +import java.util.Collection; +import java.util.List; + +/** + * @author lynn.bond@hotmail.com on 2023/11/28 + */ +@ThreadSafe +public interface PulseDurableProvider { + boolean isLeader(); + + boolean removeQueue(String queueId); + + boolean saveQueue(Metapb.QueueItem queue); + + List queryQueue(); + + boolean addNotice(Metapb.NoticeContent notice); + + Metapb.NoticeContent getNotice(long noticeId); + + boolean addObserverNotice(Collection observerNotices) ; + + List getObserverNotices() ; + + boolean removeObserverNotice(long observerId, long noticeId) ; + + boolean removeNoticeContent(long noticeId) ; + + PulseDurableProvider DEFAULT = new PulseDurableProvider() { + @Override + public boolean addNotice(Metapb.NoticeContent notice) { + return false; + } + + @Override + public Metapb.NoticeContent getNotice(long noticeId) { + return null; + } + + @Override + public boolean addObserverNotice(Collection observerNotices) { + return false; + } + + @Override + public List getObserverNotices() { + return null; + } + + @Override + public boolean removeObserverNotice(long observerId, long noticeId) { + return false; + } + + @Override + public boolean removeNoticeContent(long noticeId) { + return false; + } + + @Override + public boolean isLeader() { + return false; + } + + @Override + public boolean removeQueue(String queueId) { + return false; + } + + @Override + public boolean saveQueue(Metapb.QueueItem queue) { + return false; + } + + @Override + public List queryQueue() { + return null; + } + + }; +} diff --git a/hg-pd-service/src/main/java/org/apache/hugegraph/pd/pulse/PulseDurableProviderImpl.java b/hg-pd-service/src/main/java/org/apache/hugegraph/pd/pulse/PulseDurableProviderImpl.java new file mode 100644 index 0000000000..fa1a120de7 --- /dev/null +++ b/hg-pd-service/src/main/java/org/apache/hugegraph/pd/pulse/PulseDurableProviderImpl.java @@ -0,0 +1,168 @@ +package org.apache.hugegraph.pd.pulse; + +import org.apache.hugegraph.pd.config.PDConfig; +import org.apache.hugegraph.pd.grpc.Metapb; +import org.apache.hugegraph.pd.meta.MetadataFactory; + +import org.apache.hugegraph.pd.meta.PulseStore; +import org.apache.hugegraph.pd.raft.RaftEngine; +import lombok.extern.slf4j.Slf4j; +import org.springframework.beans.factory.annotation.Autowired; +import org.springframework.stereotype.Component; + +import java.util.Collection; +import java.util.Collections; +import java.util.List; + +/** + * @author lynn.bond@hotmail.com on 2023/11/28 + */ +@Slf4j +@Component("durableQueueProvider") +public class PulseDurableProviderImpl implements PulseDurableProvider { + @Autowired + private PDConfig pdConfig; + private PulseStore pulseStore = null; + + @Override + public boolean removeQueue(String queueId) { + if (queueId == null) { + return false; + } + + try { + this.getPulseStore().removeItem(queueId); + return true; + } catch (Throwable t) { + log.error("Failed to remove item from store, item-id: " + queueId + ", caused by:", t); + } + + return false; + } + + @Override + public boolean saveQueue(Metapb.QueueItem queue) { + if (queue == null) { + return false; + } + + try { + this.getPulseStore().addItem(queue); + return true; + } catch (Throwable t) { + log.error("Failed to add item to store, item: " + queue.toString() + ", caused by:", t); + } + + return false; + } + + @Override + public List queryQueue() { + if (!isLeader()) { + return Collections.emptyList(); + } + + try { + return this.getPulseStore().getQueue(); + } catch (Throwable t) { + log.error("Failed to retrieve queue from PulseStore, caused by:", t); + } + + log.warn("Returned empty queue list."); + return Collections.emptyList(); + } + + @Override + public boolean addNotice(Metapb.NoticeContent notice) { + try { + this.getPulseStore().addNotice(notice); + return true; + } catch (Throwable t) { + log.error("Failed to add notice to store, notice: " + notice.toString() + ", caused by:", t); + } + + return false; + } + + @Override + public Metapb.NoticeContent getNotice(long noticeId) { + try { + return this.getPulseStore().getNotice(noticeId); + } catch (Throwable t) { + log.error("Failed to retrieve notice from PulseStore, caused by:", t); + } + + return null; + } + + @Override + public boolean addObserverNotice(Collection observerNotices) { + // TODO: implement a batch add method + try { + for (Metapb.ObserverNotice observerNotice : observerNotices) { + this.getPulseStore().addObserverNotice(observerNotice); + } + return true; + } catch (Throwable t) { + log.error("Failed to add a collection of observer notices to the store, observerNotices: " + + observerNotices.toString() + ", caused by:", t); + } + + return false; + } + + @Override + public List getObserverNotices() { + try { + return this.getPulseStore().getObserverNotices(); + } catch (Throwable t) { + log.error("Failed to retrieve observer notices from PulseStore, caused by:", t); + } + + return Collections.emptyList(); + } + + @Override + public boolean removeObserverNotice(long observerId, long noticeId) { + try { + this.getPulseStore().removeObserverNotice(observerId, noticeId); + return true; + } catch (Throwable t) { + log.error("Failed to remove observer notice from store, " + + "observerId: " + observerId + ", noticeId: " + noticeId + + ", caused by:", t); + } + + return false; + } + + @Override + public boolean removeNoticeContent(long noticeId) { + try { + this.getPulseStore().removeNoticeContent(noticeId); + return true; + }catch (Throwable t) { + log.error("Failed to remove notice content from store, " + + "noticeId: " + noticeId + + ", caused by:", t); + } + return false; + } + + @Override + public boolean isLeader() { + return RaftEngine.getInstance().isLeader(); + } + + private PulseStore getPulseStore() { + if (this.pulseStore == null) { + synchronized (this) { + if (this.pulseStore == null) { + this.pulseStore = MetadataFactory.newPulseStore(this.pdConfig); + } + } + } + + return this.pulseStore; + } +} diff --git a/hg-pd-service/src/main/java/org/apache/hugegraph/pd/pulse/PulseListener.java b/hg-pd-service/src/main/java/org/apache/hugegraph/pd/pulse/PulseListener.java new file mode 100644 index 0000000000..d282d95c3d --- /dev/null +++ b/hg-pd-service/src/main/java/org/apache/hugegraph/pd/pulse/PulseListener.java @@ -0,0 +1,25 @@ +package org.apache.hugegraph.pd.pulse; + +/** + * @author lynn.bond@hotmail.com created on 2021/11/9 + */ +public interface PulseListener { + /** + * Invoked on new notice. + * + * @param notice the notice. + */ + void onNext(T notice) throws Exception; + + /** + * Invoked on errors. + * + * @param throwable the error. + */ + void onError(Throwable throwable); + + /** + * Invoked on completion. + */ + void onCompleted(); +} \ No newline at end of file diff --git a/hg-pd-service/src/main/java/org/apache/hugegraph/pd/pulse/PulseNotices.java b/hg-pd-service/src/main/java/org/apache/hugegraph/pd/pulse/PulseNotices.java new file mode 100644 index 0000000000..a73c6d7132 --- /dev/null +++ b/hg-pd-service/src/main/java/org/apache/hugegraph/pd/pulse/PulseNotices.java @@ -0,0 +1,65 @@ +package org.apache.hugegraph.pd.pulse; + +import org.apache.hugegraph.pd.grpc.Metapb; +import org.apache.hugegraph.pd.grpc.pulse.PulseGraphResponse; +import org.apache.hugegraph.pd.grpc.pulse.PulseNodeResponse; +import org.apache.hugegraph.pd.grpc.pulse.PulsePartitionResponse; +import org.apache.hugegraph.pd.grpc.pulse.PulseShardGroupResponse; +import org.apache.hugegraph.pd.grpc.pulse.StoreNodeEventType; + + +import static org.apache.hugegraph.pd.common.HgAssert.isArgumentNotNull; +import static org.apache.hugegraph.pd.common.HgAssert.isArgumentValid; + +/** + * @author lynn.bond@hotmail.com on 2023/11/8 + */ +public class PulseNotices { + private PulseNotices() { + } + + public static PulsePartitionResponse createPartitionChange(ChangeType changeType, + String graph, int partitionId) { + isArgumentNotNull(changeType, "changeType"); + isArgumentValid(graph, "graph"); + + return PulsePartitionResponse.newBuilder() + .setChangeType(changeType.getGrpcType()) + .setGraph(graph) + .setPartitionId(partitionId) + .build(); + } + + public static PulseShardGroupResponse createShardGroupChange(ChangeType changeType, + int groupId, Metapb.ShardGroup shardGroup) { + isArgumentNotNull(changeType, "changeType"); + isArgumentNotNull(shardGroup, "shardGroup"); + + return PulseShardGroupResponse.newBuilder() + .setShardGroupId(groupId) + .setType(changeType.getGrpcType()) + .setShardGroup(shardGroup) + .build(); + + } + + public static PulseNodeResponse createNodeChange(StoreNodeEventType nodeEventType, String graph, long nodeId){ + isArgumentNotNull(nodeEventType, "nodeEventType"); + isArgumentNotNull(graph, "graph"); + + return PulseNodeResponse.newBuilder() + .setGraph(graph) + .setNodeId(nodeId) + .setNodeEventType(nodeEventType) + .build(); + } + + public static PulseGraphResponse createGraphChange(Metapb.Graph graph){ + isArgumentNotNull(graph, "graph"); + + return PulseGraphResponse.newBuilder() + .setGraph(graph) + .build(); + } + +} diff --git a/hg-pd-service/src/main/java/org/apache/hugegraph/pd/pulse/RetryingHub.java b/hg-pd-service/src/main/java/org/apache/hugegraph/pd/pulse/RetryingHub.java new file mode 100644 index 0000000000..32b6cea7b8 --- /dev/null +++ b/hg-pd-service/src/main/java/org/apache/hugegraph/pd/pulse/RetryingHub.java @@ -0,0 +1,184 @@ +package org.apache.hugegraph.pd.pulse; + +import org.apache.hugegraph.pd.common.HgAssert; +import org.apache.hugegraph.pd.grpc.Metapb; +import org.apache.hugegraph.pd.notice.NoticeBroadcaster; +import com.google.protobuf.ByteString; +import com.google.protobuf.GeneratedMessageV3; +import com.google.protobuf.Parser; +import lombok.extern.slf4j.Slf4j; + +import javax.annotation.concurrent.ThreadSafe; +import java.time.Instant; +import java.util.Collections; +import java.util.List; +import java.util.Map; +import java.util.concurrent.*; +import java.util.function.Function; +import java.util.stream.Collectors; + +/** + * @author lynn.bond@hotmail.com on 2023/11/3 + */ +@Slf4j +@ThreadSafe +abstract class RetryingHub { + private static final int RETRYING_PERIOD_SECONDS = 60; + private static final int RETRYING_PERIOD_MILLISECONDS = RETRYING_PERIOD_SECONDS * 1000; + private static final long NOTICE_EXPIRATION_TIME = 30 * 60 * 1000; + + private static final ConcurrentLinkedQueue BROADCASTER_QUEUE = new ConcurrentLinkedQueue<>(); + private static final ScheduledExecutorService SCHEDULED_EXECUTOR = new ScheduledThreadPoolExecutor(1); + private static final Map PARSER_HOLDER = new ConcurrentHashMap<>(); + + static Function broadcasterProvider = (e) -> null; + static PulseDurableProvider durableQueueProvider = PulseDurableProvider.DEFAULT; + + static { + /* Initiate retries at the beginning of each minute. e.g. 14:55:00,14:56:00 */ + long currentSeconds = Instant.now().getEpochSecond(); + long nextMinute = (currentSeconds / 60 + 1) * 60; + long initialDelay = nextMinute - currentSeconds; + + initialDelay += 10; // 10 seconds after the switch starts. + + log.info("Initiate the retries of hub for the pulse notice after [ {} ] seconds.", initialDelay); + + SCHEDULED_EXECUTOR.scheduleAtFixedRate( + () -> doSchedule(), initialDelay, RETRYING_PERIOD_SECONDS, TimeUnit.SECONDS); + } + + public static void addQueue(NoticeBroadcaster broadcaster) { + HgAssert.isArgumentNotNull(broadcaster, "broadcaster"); + + log.info("Adding a notice to retrying queue, notice: {}", broadcaster); + BROADCASTER_QUEUE.add(broadcaster); + } + + /** + * Removing a notice from retrying queue via the notice id. + * + * @param noticeId + * @return true if the notice was removed, otherwise false. + */ + public static boolean removeNotice(long noticeId) { + return BROADCASTER_QUEUE.removeIf(e -> e.checkAck(noticeId)); + } + + private static void doSchedule() { + try { + appendQueue(); + expireQueue(); + BROADCASTER_QUEUE.forEach(e -> { + retrying(e); + }); + wipeQueue(); + } catch (Throwable t) { + log.error("Failed to schedule a notice broadcasting retry, caused by: ", t); + } + } + + private static void appendQueue() { + BROADCASTER_QUEUE.addAll( + getQueueItems() + .parallelStream() + .filter(e -> !BROADCASTER_QUEUE + .stream() + .anyMatch(b -> e.getItemId().equals(b.getDurableId())) + ).map(e -> retrieveBroadcaster(e)) + .peek(e -> log.info("Appending a notice: {}", e)) + .filter(e -> e != null) + .collect(Collectors.toList()) + ); + } + + private static void expireQueue() { + BROADCASTER_QUEUE.removeIf(e -> { + if (System.currentTimeMillis() - e.getTimestamp() >= NOTICE_EXPIRATION_TIME) { + log.info("Notice was expired, trying to remove, notice: {}", e); + return e.doRemoveDurable(); + } else { + return false; + } + }); + } + + private static void retrying(NoticeBroadcaster broadcaster) { + if (System.currentTimeMillis() - broadcaster.getTimestamp() < RETRYING_PERIOD_MILLISECONDS) { + log.info("Skipped the retrying of notice due to 'current-time - creation-time < {} sec'" + + ", notice: {}", RETRYING_PERIOD_SECONDS, broadcaster); + return; + } + + log.info("Retrying... notice: {}", broadcaster); + broadcaster.notifying(); + } + + private static void wipeQueue() { + BROADCASTER_QUEUE.removeIf(e -> { + if (e.getState() == 10) { + log.info("Starting to remove an invalid notice from the queue, notice: {}", e); + return true; + } else { + return false; + } + }); + } + + private static List getQueueItems() { + try { + List items = durableQueueProvider.queryQueue(); + if (items != null) { + return items; + } + } catch (Throwable t) { + log.error("Failed to retrieve a queue from the DurableQueueProvider, caused by: ", t); + } + + return Collections.emptyList(); + } + + private static NoticeBroadcaster retrieveBroadcaster(Metapb.QueueItem item) { + if (item == null) { + log.error("Failed to create a NoticeBroadcaster, caused by: queue-item is null"); + return null; + } + + String className = item.getItemClass(); + if (className == null || className.isEmpty()) { + log.error("Failed to create a NoticeBroadcaster, caused by: class-name is null or empty"); + } + + ByteString instanceData = item.getItemContent(); + if (instanceData == null || instanceData.isEmpty()) { + log.error("Failed to create a NoticeBroadcaster, caused by: item-content is null or empty"); + } + + GeneratedMessageV3 notice = NoticeParseUtil.parseNotice(instanceData, className); + if (notice == null) { + return null; + } + + NoticeBroadcaster res = toBroadcaster(notice, className); + if (res == null) { + return null; + } + + res.setDurableId(item.getItemId()); + res.setTimestamp(item.getTimestamp()); + + return res; + } + + private static NoticeBroadcaster toBroadcaster(GeneratedMessageV3 notice, String className) { + NoticeBroadcaster res = null; + + try { + res = broadcasterProvider.apply(notice); + } catch (Throwable t) { + log.error("Failed to fetch a NoticeBroadcaster via Notice instance and Notice class: " + className, t); + } + + return res; + } +} diff --git a/hg-pd-service/src/main/java/org/apache/hugegraph/pd/pulse/RetryingSwitch.java b/hg-pd-service/src/main/java/org/apache/hugegraph/pd/pulse/RetryingSwitch.java new file mode 100644 index 0000000000..b0fb9b1c16 --- /dev/null +++ b/hg-pd-service/src/main/java/org/apache/hugegraph/pd/pulse/RetryingSwitch.java @@ -0,0 +1,268 @@ +package org.apache.hugegraph.pd.pulse; + +import org.apache.hugegraph.pd.common.HgAssert; +import org.apache.hugegraph.pd.common.KVPair; +import org.apache.hugegraph.pd.grpc.Metapb; +import org.apache.hugegraph.pd.util.IdUtil; +import com.google.protobuf.ByteString; +import com.google.protobuf.GeneratedMessageV3; +import lombok.extern.slf4j.Slf4j; + +import javax.annotation.concurrent.ThreadSafe; +import java.text.SimpleDateFormat; +import java.time.Instant; +import java.util.Collection; +import java.util.Date; +import java.util.Iterator; +import java.util.List; +import java.util.Map; +import java.util.Queue; +import java.util.concurrent.ConcurrentHashMap; +import java.util.concurrent.ConcurrentLinkedQueue; +import java.util.concurrent.ScheduledExecutorService; +import java.util.concurrent.ScheduledThreadPoolExecutor; +import java.util.concurrent.TimeUnit; +import java.util.function.Function; +import java.util.stream.Collectors; + +/** + * @author lynn.bond@hotmail.com on 2024/2/21 + */ +@Slf4j +@ThreadSafe +public class RetryingSwitch { + private static final int RETRYING_PERIOD_SECONDS = 60; + private static final int RETRYING_PERIOD_MILLISECONDS = RETRYING_PERIOD_SECONDS * 1000; + private static final long NOTICE_EXPIRATION_TIME = 1000 * 60 * 30; /** 30 minutes */ + + private static final ScheduledExecutorService SCHEDULED_EXECUTOR = new ScheduledThreadPoolExecutor(1); + private static final ConcurrentHashMap> NOTICE_CACHE = new ConcurrentHashMap<>(); + private static final Queue EXPIRED_BUFFER = new ConcurrentLinkedQueue<>(); + + static PulseDurableProvider pulseDurableProvider = PulseDurableProvider.DEFAULT; + static Function, AbstractObserverSubject> subjectProvider = (e) -> null; + + static { + /* Initiate retries at the beginning of each minute. e.g. 14:55:00,14:56:00 */ + long currentSeconds = Instant.now().getEpochSecond(); + long nextMinute = (currentSeconds / 60 + 1) * 60; + long initialDelay = nextMinute - currentSeconds; + + log.info("Initiate the retries of switch for the pulse notice after [ {} ] seconds.", initialDelay); + SCHEDULED_EXECUTOR.scheduleAtFixedRate( + () -> doSchedule(), initialDelay, RETRYING_PERIOD_SECONDS, TimeUnit.SECONDS); + + } + + /** + * @param notice + * @param observerIds + * @throws IllegalArgumentException if notice or observerIds is null + */ + public static boolean addNotice(GeneratedMessageV3 notice, Collection observerIds) { + HgAssert.isArgumentNotNull(notice, "notice"); + HgAssert.isFalse(HgAssert.isInvalid(observerIds), "The argument is invalid: observerIds"); + + Metapb.NoticeContent noticeContent = toNoticeContent(notice); + + if (!saveNotice(noticeContent, observerIds)) { + return false; + } + + return sending(notice, noticeContent.getNoticeId(), observerIds); + } + + public static boolean ackNotice(long noticeId, long observerId) { + log.info("Ack remove notice: [ {} ], observer: [ {} ] ", noticeId, observerId); + return pulseDurableProvider.removeObserverNotice(observerId, noticeId); + } + + private static boolean saveNotice(Metapb.NoticeContent noticeContent, Collection observerIds) { + + if (!pulseDurableProvider.addNotice(noticeContent)) { + log.error("Failed to add notice: {}", noticeContent); + return false; + } + + if (!pulseDurableProvider.addObserverNotice(toObserverNotices(noticeContent.getNoticeId(), observerIds))) { + log.error("Failed to add observer notice: {}", noticeContent); + return false; + } + + return true; + } + + private static void doSchedule() { + try { + if (isOnDuty()) { + retrying(); + wipeExpired(); + cleanCache(); + } else { + log.debug("Not on duty, skip retrying."); + } + } catch (Throwable t) { + log.error("Failed to schedule a notice broadcasting retry, caused by: ", t); + } + } + + private static boolean isOnDuty() { + return pulseDurableProvider.isLeader(); + } + + private static void retrying() { + List observerNotices = pulseDurableProvider.getObserverNotices(); + Map> noticeObservers = toNoticeObservers(observerNotices); + + log.info("Retrying notices of switch, amount: {}", noticeObservers.size()); + + noticeObservers.forEach( + (noticeId, observerIds) -> { + sending(noticeId, observerIds); + }); + } + + private static boolean sending(long noticeId, Collection observerIds) { + GeneratedMessageV3 notice = getNotice(noticeId); + + if (notice == null) { + log.error("Failed to get notice for id: {}", noticeId); + return false; + } + + return sending(notice, noticeId, observerIds); + } + + private static boolean sending(GeneratedMessageV3 notice, long noticeId, Collection observerIds) { + AbstractObserverSubject subject = subjectProvider.apply(notice.getClass()); + + if (subject == null) { + log.error("Failed to get an observer subject for notice: {}", notice); + return false; + } + + subject.notifyClient(notice, noticeId, observerIds); + + return true; + } + + private static void wipeExpired() { + SimpleDateFormat formatter = new SimpleDateFormat("yyyy-MM-dd HH:mm:ss"); + + while (!EXPIRED_BUFFER.isEmpty()) { + Metapb.ObserverNotice notice = EXPIRED_BUFFER.poll(); + Date createdTime = new Date(notice.getTimestamp()); + log.info("Removing expired notice: [ {} ], observer: [ {} ], created: [ {} ]", + notice.getNoticeId(), notice.getObserverId(), formatter.format(createdTime)); + NOTICE_CACHE.remove(notice.getNoticeId()); + pulseDurableProvider.removeObserverNotice(notice.getObserverId(), notice.getNoticeId()); + pulseDurableProvider.removeNoticeContent(notice.getNoticeId()); + } + } + + /** + * Remove expired notice from NOTICE_CACHE and remove it from store. + */ + private static void cleanCache() { + Iterator>> iterator = NOTICE_CACHE.entrySet().iterator(); + + while (iterator.hasNext()) { + Map.Entry> entry = iterator.next(); + long noticeId = entry.getKey(); + long intoCacheTime = entry.getValue().getValue(); + + if (System.currentTimeMillis() - intoCacheTime >= NOTICE_EXPIRATION_TIME) { + log.info("Cleaning a cached notice: [ {} ]", noticeId); + iterator.remove(); + pulseDurableProvider.removeNoticeContent(noticeId); + } + + } + } + + private static GeneratedMessageV3 getNotice(long noticeId) { + KVPair tsNotice = NOTICE_CACHE.get(noticeId); + if (tsNotice != null) { + return tsNotice.getKey(); + } + + Metapb.NoticeContent noticeContent = pulseDurableProvider.getNotice(noticeId); + if (noticeContent == null) { + log.warn("Failed to get notice content for id: {}", noticeId); + return null; + } + + String className = noticeContent.getNoticeClass(); + if (className == null || className.isEmpty()) { + log.error("Failed to get notice class for id: {}", noticeId); + return null; + } + + ByteString instanceData = noticeContent.getNoticeContent(); + if (instanceData == null || instanceData.isEmpty()) { + log.error("Failed to create a notice, caused by: notice-content is null or empty"); + return null; + } + + GeneratedMessageV3 notice = NoticeParseUtil.parseNotice(instanceData, className); + + if (notice == null) { + log.error("Failed to parse a notice, caused by: parse notice content failed."); + return null; + } + + // KVPair + NOTICE_CACHE.put(noticeId, new KVPair<>(notice, System.currentTimeMillis())); + + return notice; + } + + private static Map> toNoticeObservers(List notices) { + return notices.stream() + .filter(RetryingSwitch::isNotExpired) + .filter(RetryingSwitch::isNotBrandNew) + .collect( + Collectors.groupingBy(Metapb.ObserverNotice::getNoticeId, + Collectors.mapping(Metapb.ObserverNotice::getObserverId, Collectors.toList())) + ); + } + + private static boolean isNotExpired(Metapb.ObserverNotice notice) { + if (System.currentTimeMillis() - notice.getTimestamp() < NOTICE_EXPIRATION_TIME) { + return true; + } else { + EXPIRED_BUFFER.offer(notice); + return false; + } + } + + private static boolean isNotBrandNew(Metapb.ObserverNotice notice) { + if (System.currentTimeMillis() - notice.getTimestamp() > RETRYING_PERIOD_MILLISECONDS) { + return true; + } else { + return false; + } + } + + private static Metapb.NoticeContent toNoticeContent(GeneratedMessageV3 notice) { + return Metapb.NoticeContent.newBuilder() + .setNoticeId(IdUtil.createMillisId()) + .setNoticeClass(notice.getClass().getName()) + .setNoticeContent(notice.toByteString()) + .setTimestamp(System.currentTimeMillis()) + .build(); + } + + private static Collection toObserverNotices(long noticeId, Collection< + Long> observerIds) { + long timestamp = System.currentTimeMillis(); + return observerIds.stream() + .map(e -> Metapb.ObserverNotice.newBuilder() + .setObserverId(e) + .setNoticeId(noticeId) + .setTimestamp(timestamp) + .build() + ).collect(Collectors.toList()); + } + +} diff --git a/hg-pd-service/src/main/java/org/apache/hugegraph/pd/pulse/ShardGroupChangeSubject.java b/hg-pd-service/src/main/java/org/apache/hugegraph/pd/pulse/ShardGroupChangeSubject.java new file mode 100644 index 0000000000..10a222c7cf --- /dev/null +++ b/hg-pd-service/src/main/java/org/apache/hugegraph/pd/pulse/ShardGroupChangeSubject.java @@ -0,0 +1,45 @@ +package org.apache.hugegraph.pd.pulse; + +import org.apache.hugegraph.pd.grpc.pulse.PulseNoticeRequest; +import org.apache.hugegraph.pd.grpc.pulse.PulseResponse; +import org.apache.hugegraph.pd.grpc.pulse.PulseShardGroupRequest; +import org.apache.hugegraph.pd.grpc.pulse.PulseShardGroupResponse; +import org.apache.hugegraph.pd.grpc.pulse.PulseType; +import com.google.protobuf.GeneratedMessageV3; + +import java.util.Collection; +import java.util.function.Function; + +/** + * @author lynn.bond@hotmail.com created on 2023/11/08 + */ +class ShardGroupChangeSubject extends AbstractObserverSubject { + + ShardGroupChangeSubject() { + super(PulseType.PULSE_TYPE_SHARD_GROUP_CHANGE); + } + + @Override + String toNoticeString(PulseResponse res) { + return res.getShardGroupResponse().toString(); + } + + @Override + Function getNoticeHandler() { + return r -> r.getShardGroupRequest(); + } + +// void notifyClient(PulseShardGroupResponse.Builder responseBuilder) { +// super.notifyClient(b -> b.setShardGroupResponse(responseBuilder)); +// } + + @Override + long notifyClient(GeneratedMessageV3 response, String originId) { + return super.send2Clients(b -> b.setShardGroupResponse((PulseShardGroupResponse) response),originId); + } + + @Override + long notifyClient(GeneratedMessageV3 response, long noticeId, Collection observerIds) { + return super.send2Clients(b -> b.setShardGroupResponse((PulseShardGroupResponse) response),noticeId,observerIds); + } +} diff --git a/hg-pd-service/src/main/java/org/apache/hugegraph/pd/pulse/StoreNodeChangeSubject.java b/hg-pd-service/src/main/java/org/apache/hugegraph/pd/pulse/StoreNodeChangeSubject.java new file mode 100644 index 0000000000..11aacbcfea --- /dev/null +++ b/hg-pd-service/src/main/java/org/apache/hugegraph/pd/pulse/StoreNodeChangeSubject.java @@ -0,0 +1,45 @@ +package org.apache.hugegraph.pd.pulse; + +import org.apache.hugegraph.pd.grpc.pulse.PulseNodeRequest; +import org.apache.hugegraph.pd.grpc.pulse.PulseNodeResponse; +import org.apache.hugegraph.pd.grpc.pulse.PulseNoticeRequest; +import org.apache.hugegraph.pd.grpc.pulse.PulseResponse; +import org.apache.hugegraph.pd.grpc.pulse.PulseType; +import com.google.protobuf.GeneratedMessageV3; + +import java.util.Collection; +import java.util.function.Function; + +/** + * @author lynn.bond@hotmail.com created on 2023/11/07 + */ +class StoreNodeChangeSubject extends AbstractObserverSubject { + + StoreNodeChangeSubject() { + super(PulseType.PULSE_TYPE_STORE_NODE_CHANGE); + } + + @Override + String toNoticeString(PulseResponse res) { + return res.getNodeResponse().toString(); + } + + @Override + Function getNoticeHandler() { + return r -> r.getNodeRequest(); + } + +// void notifyClient(PulseNodeResponse.Builder responseBuilder) { +// super.notifyClient(b -> b.setNodeResponse(responseBuilder)); +// } + + @Override + long notifyClient(GeneratedMessageV3 response, String originId) { + return super.send2Clients(b -> b.setNodeResponse((PulseNodeResponse) response),originId); + } + + @Override + long notifyClient(GeneratedMessageV3 response, long noticeId, Collection observerIds) { + return super.send2Clients(b -> b.setNodeResponse((PulseNodeResponse) response),noticeId,observerIds); + } +} diff --git a/hg-pd-service/src/main/java/org/apache/hugegraph/pd/pulse/SubjectIndividualObserver.java b/hg-pd-service/src/main/java/org/apache/hugegraph/pd/pulse/SubjectIndividualObserver.java new file mode 100644 index 0000000000..bcad40016d --- /dev/null +++ b/hg-pd-service/src/main/java/org/apache/hugegraph/pd/pulse/SubjectIndividualObserver.java @@ -0,0 +1,155 @@ +package org.apache.hugegraph.pd.pulse; + +import org.apache.hugegraph.pd.common.HgAssert; +import org.apache.hugegraph.pd.grpc.pulse.PulseAckRequest; +import org.apache.hugegraph.pd.grpc.pulse.PulseCreateRequest; +import org.apache.hugegraph.pd.grpc.pulse.PulseNoticeRequest; +import org.apache.hugegraph.pd.grpc.pulse.PulseRequest; +import org.apache.hugegraph.pd.grpc.pulse.PulseResponse; +import org.apache.hugegraph.pd.grpc.pulse.PulseType; +import org.apache.hugegraph.pd.util.IdUtil; +import io.grpc.stub.StreamObserver; +import lombok.extern.slf4j.Slf4j; + +import java.util.function.BiConsumer; +import java.util.function.Function; + +/** + * @author lynn.bond@hotmail.com on 2023/11/6 + */ +@Slf4j +class SubjectIndividualObserver implements StreamObserver { + private final StreamObserver responseObserver; + private AbstractObserverSubject subject; + private Long observerId; + + private BiConsumer ackConsumer = defaultAckConsumer(); + private Function subjectProvider; + + static SubjectIndividualObserver of(StreamObserver responseObserver + , Function subjectProvider) { + + HgAssert.isArgumentNotNull(responseObserver, "responseObserver"); + HgAssert.isArgumentNotNull(subjectProvider, "subjectProvider"); + + return new SubjectIndividualObserver(responseObserver, subjectProvider); + } + + private SubjectIndividualObserver(StreamObserver responseObserver + , Function subjectProvider) { + + this.responseObserver = responseObserver; + this.subjectProvider = subjectProvider; + } + + public SubjectIndividualObserver setAckConsumer(BiConsumer ackConsumer) { + HgAssert.isArgumentNotNull(ackConsumer, "ackConsumer"); + this.ackConsumer = ackConsumer; + return this; + } + + private BiConsumer defaultAckConsumer() { + return (subject, ack) -> { + log.info("[defaultAckConsumer] Receiving an ack of subject [ {} ], ack: {noticeId={},observerId={}}", + subject.getPulseType(), ack.getNoticeId(), ack.getObserverId()); + }; + } + + private void cancelObserver() { + if (this.observerId == null) { + this.responseObserver.onError(new Exception("Invoke cancel-observer before create-observer.")); + return; + } + + this.subject.removeObserver(this.observerId, this.responseObserver); + } + + private void addObserver(PulseCreateRequest request) { + if (this.subject != null) { + log.warn("Aborted a PulseCreateRequest because the subject already exists."); + return; + } + + PulseType pulseType = request.getPulseType(); + + if (pulseType.equals(PulseType.PULSE_TYPE_UNKNOWN)) { + log.warn("Aborted a PulseCreateRequest because of the unknown pulse type."); + this.responseObserver.onError(new Exception("Unknown pulse type.")); + return; + } + + try { + this.subject = this.subjectProvider.apply(pulseType); + } catch (Throwable t) { + log.error("Failed to apply a subject with pulse type [" + pulseType + "], caused by: ", t); + responseObserver.onError(new Exception("Failed to apply a subject with pulse type [" + + pulseType + "], caused by: ", t)); + return; + } + + if (subject == null) { + log.warn("Aborted a PulseCreateRequest because of an unsupported pulse type: [{}]", pulseType); + responseObserver.onError(new Exception("Unsupported pulse type: " + pulseType.name())); + return; + } + + if (request.getObserverId() > 0L) { + /* Accepted the observerId from the PulseCreateRequest, which is greater than 0. */ + this.observerId = request.getObserverId(); + log.info("Accepted observerId: [ {} ].", this.observerId); + } else { + /* Created a new observerId if the id passed in PulseCreateRequest is less than or equal to 0. */ + this.observerId = createObserverId(); + log.info("Created observerId: [ {} ].", this.observerId); + } + + this.subject.addObserver(this.observerId, this.responseObserver); + } + + private void ackNotice(PulseAckRequest ackRequest) { + this.ackConsumer.accept(this.subject, ackRequest); + } + + private void handleNotice(PulseNoticeRequest noticeRequest) { + subject.handleClientNotice(noticeRequest); + } + + private static Long createObserverId() { + return IdUtil.createMillisId(); + } + + @Override + public void onNext(PulseRequest pulseRequest) { + if (pulseRequest.hasCreateRequest()) { + this.addObserver(pulseRequest.getCreateRequest()); + return; + } + + if (pulseRequest.hasCancelRequest()) { + this.cancelObserver(); + return; + } + + if (pulseRequest.hasNoticeRequest()) { + this.handleNotice(pulseRequest.getNoticeRequest()); + } + + if (pulseRequest.hasAckRequest()) { + this.ackNotice(pulseRequest.getAckRequest()); + return; + } + } + + @Override + public void onError(Throwable throwable) { + log.error("Received a client's onError, subject [ {} ], error: {}" + , subject == null ? "no subject" : subject.getPulseType(), throwable); + this.cancelObserver(); + } + + @Override + public void onCompleted() { + this.cancelObserver(); + } + +} \ No newline at end of file diff --git a/hg-pd-service/src/main/java/org/apache/hugegraph/pd/pulse/impl/PartitionInstructionListenerImpl.java b/hg-pd-service/src/main/java/org/apache/hugegraph/pd/pulse/impl/PartitionInstructionListenerImpl.java new file mode 100644 index 0000000000..2179b7254f --- /dev/null +++ b/hg-pd-service/src/main/java/org/apache/hugegraph/pd/pulse/impl/PartitionInstructionListenerImpl.java @@ -0,0 +1,78 @@ +package org.apache.hugegraph.pd.pulse.impl; + +import org.apache.hugegraph.pd.common.PDException; +import org.apache.hugegraph.pd.grpc.Metapb; +import org.apache.hugegraph.pd.grpc.pulse.ChangeShard; +import org.apache.hugegraph.pd.grpc.pulse.CleanPartition; +import org.apache.hugegraph.pd.grpc.pulse.DbCompaction; +import org.apache.hugegraph.pd.grpc.pulse.MovePartition; +import org.apache.hugegraph.pd.grpc.pulse.PartitionHeartbeatResponse; +import org.apache.hugegraph.pd.grpc.pulse.PartitionKeyRange; +import org.apache.hugegraph.pd.grpc.pulse.SplitPartition; +import org.apache.hugegraph.pd.grpc.pulse.TransferLeader; +import org.apache.hugegraph.pd.listener.PartitionInstructionListener; +import org.apache.hugegraph.pd.pulse.PDPulseSubjects; +import org.apache.hugegraph.pd.service.PDService; + +import java.util.List; + +public class PartitionInstructionListenerImpl implements PartitionInstructionListener { + + PDService pdService; + public PartitionInstructionListenerImpl(PDService pdService) { + this.pdService = pdService; + } + + private PartitionHeartbeatResponse.Builder getBuilder(Metapb.Partition partition) throws PDException { + return PartitionHeartbeatResponse.newBuilder().setPartition(partition) + .setId(pdService.getIdService().getId(PDService.TASK_ID_KEY, 1)); + } + + @Override + public void changeShard(Metapb.Partition partition, ChangeShard changeShard) throws PDException { + PDPulseSubjects.notifyClient(getBuilder(partition).setChangeShard(changeShard), getStoreIds(partition.getId())); + } + + @Override + public void transferLeader(Metapb.Partition partition, TransferLeader transferLeader) throws PDException { + PDPulseSubjects.notifyClient(getBuilder(partition).setTransferLeader(transferLeader), + getStoreIds(partition.getId())); + } + + @Override + public void splitPartition(Metapb.Partition partition, SplitPartition splitPartition) throws PDException { + PDPulseSubjects.notifyClient(getBuilder(partition).setSplitPartition(splitPartition), + getStoreIds(partition.getId())); + + } + + @Override + public void dbCompaction(Metapb.Partition partition, DbCompaction dbCompaction) throws PDException { + PDPulseSubjects.notifyClient(getBuilder(partition).setDbCompaction(dbCompaction), + getStoreIds(partition.getId())); + + } + + @Override + public void movePartition(Metapb.Partition partition, MovePartition movePartition) throws PDException { + PDPulseSubjects.notifyClient(getBuilder(partition).setMovePartition(movePartition), + getStoreIds(partition.getId())); + } + + @Override + public void cleanPartition(Metapb.Partition partition, CleanPartition cleanPartition) throws PDException { + PDPulseSubjects.notifyClient(getBuilder(partition).setCleanPartition(cleanPartition), + getStoreIds(partition.getId())); + } + + @Override + public void changePartitionKeyRange(Metapb.Partition partition, PartitionKeyRange partitionKeyRange) + throws PDException { + PDPulseSubjects.notifyClient(getBuilder(partition).setKeyRange(partitionKeyRange), + getStoreIds(partition.getId())); + } + + private List getStoreIds(int partId) throws PDException { + return pdService.getStoreNodeService().getActiveStoresByPartition(partId); + } +} diff --git a/hg-pd-service/src/main/java/org/apache/hugegraph/pd/pulse/impl/PartitionStatusListenerImpl.java b/hg-pd-service/src/main/java/org/apache/hugegraph/pd/pulse/impl/PartitionStatusListenerImpl.java new file mode 100644 index 0000000000..df094dfb74 --- /dev/null +++ b/hg-pd-service/src/main/java/org/apache/hugegraph/pd/pulse/impl/PartitionStatusListenerImpl.java @@ -0,0 +1,22 @@ +package org.apache.hugegraph.pd.pulse.impl; + +import org.apache.hugegraph.pd.grpc.Metapb; +import org.apache.hugegraph.pd.listener.PartitionStatusListener; +import org.apache.hugegraph.pd.pulse.ChangeType; +import org.apache.hugegraph.pd.pulse.PDPulseSubjects; + + +public class PartitionStatusListenerImpl implements PartitionStatusListener { + @Override + public void onPartitionChanged(Metapb.Partition old, Metapb.Partition partition) { + PDPulseSubjects.notifyPartitionChange(ChangeType.ALTER, + partition.getGraphName(), partition.getId()); + } + + @Override + public void onPartitionRemoved(Metapb.Partition partition) { + PDPulseSubjects.notifyPartitionChange(ChangeType.DEL, partition.getGraphName(), + partition.getId()); + + } +} diff --git a/hg-pd-service/src/main/java/org/apache/hugegraph/pd/pulse/impl/PulseListenerImpl.java b/hg-pd-service/src/main/java/org/apache/hugegraph/pd/pulse/impl/PulseListenerImpl.java new file mode 100644 index 0000000000..f2a76ae215 --- /dev/null +++ b/hg-pd-service/src/main/java/org/apache/hugegraph/pd/pulse/impl/PulseListenerImpl.java @@ -0,0 +1,36 @@ +package org.apache.hugegraph.pd.pulse.impl; + +import org.apache.hugegraph.pd.grpc.pulse.PartitionHeartbeatRequest; +import org.apache.hugegraph.pd.pulse.PulseListener; +import org.apache.hugegraph.pd.service.PDService; + +import lombok.extern.slf4j.Slf4j; + +/** + * @author zhangyingjie + * @date 2024/2/27 + **/ +@Slf4j +public class PulseListenerImpl implements PulseListener { + + PDService pdService; + + public PulseListenerImpl(PDService pdService) { + this.pdService = pdService; + } + + @Override + public void onNext(PartitionHeartbeatRequest request) throws Exception { + this.pdService.getPartitionService().partitionHeartbeat(request.getStates()); + } + + @Override + public void onError(Throwable throwable) { + log.error("Received an error notice from pd-client", throwable); + } + + @Override + public void onCompleted() { + log.info("Received an completed notice from pd-client"); + } +} diff --git a/hg-pd-service/src/main/java/org/apache/hugegraph/pd/pulse/impl/ShardGroupStatusListenerImpl.java b/hg-pd-service/src/main/java/org/apache/hugegraph/pd/pulse/impl/ShardGroupStatusListenerImpl.java new file mode 100644 index 0000000000..19a38c3852 --- /dev/null +++ b/hg-pd-service/src/main/java/org/apache/hugegraph/pd/pulse/impl/ShardGroupStatusListenerImpl.java @@ -0,0 +1,33 @@ +package org.apache.hugegraph.pd.pulse.impl; + +import org.apache.hugegraph.pd.grpc.Metapb; +import org.apache.hugegraph.pd.listener.ShardGroupStatusListener; +import org.apache.hugegraph.pd.pulse.ChangeType; +import org.apache.hugegraph.pd.pulse.PDPulseSubjects; + +/** + * @author zhangyingjie + * @date 2024/2/27 + **/ +public class ShardGroupStatusListenerImpl implements ShardGroupStatusListener { + @Override + public void onShardListChanged(Metapb.ShardGroup shardGroup, Metapb.ShardGroup newShardGroup) { + if (shardGroup == null && newShardGroup == null) { + return; + } + + // invoked before change, saved to db and update cache. + if (newShardGroup == null) { + PDPulseSubjects.notifyShardGroupChange(ChangeType.DEL, shardGroup.getId(), shardGroup); + } else if (shardGroup == null) { + PDPulseSubjects.notifyShardGroupChange(ChangeType.ADD, newShardGroup.getId(), newShardGroup); + } else { + PDPulseSubjects.notifyShardGroupChange(ChangeType.ALTER, shardGroup.getId(), newShardGroup); + } + } + + @Override + public void onShardListOp(Metapb.ShardGroup shardGroup) { + PDPulseSubjects.notifyShardGroupChange(ChangeType.USER_DEFINED, shardGroup.getId(), shardGroup); + } +} diff --git a/hg-pd-service/src/main/java/org/apache/hugegraph/pd/pulse/impl/StoreStatusListenerImpl.java b/hg-pd-service/src/main/java/org/apache/hugegraph/pd/pulse/impl/StoreStatusListenerImpl.java new file mode 100644 index 0000000000..86e7309c2b --- /dev/null +++ b/hg-pd-service/src/main/java/org/apache/hugegraph/pd/pulse/impl/StoreStatusListenerImpl.java @@ -0,0 +1,41 @@ +package org.apache.hugegraph.pd.pulse.impl; + +import org.apache.hugegraph.pd.grpc.Metapb; +import org.apache.hugegraph.pd.grpc.pulse.StoreNodeEventType; +import org.apache.hugegraph.pd.listener.StoreStatusListener; +import org.apache.hugegraph.pd.pulse.PDPulseSubjects; + +public class StoreStatusListenerImpl implements StoreStatusListener { + @Override + public void onStoreStatusChanged(Metapb.Store store, + Metapb.StoreState old, + Metapb.StoreState status) { + StoreNodeEventType type = StoreNodeEventType.STORE_NODE_EVENT_TYPE_UNKNOWN; + if (status == Metapb.StoreState.Up) { + type = StoreNodeEventType.STORE_NODE_EVENT_TYPE_NODE_ONLINE; + } else if (status == Metapb.StoreState.Offline) { + type = StoreNodeEventType.STORE_NODE_EVENT_TYPE_NODE_OFFLINE; + } + PDPulseSubjects.notifyNodeChange(type, "", store.getId()); + } + + @Override + public void onGraphChange(Metapb.Graph graph, + Metapb.GraphState stateOld, + Metapb.GraphState stateNew) { +// PulseGraphResponse wgr = PulseGraphResponse.newBuilder() +// .setGraph(graph) +// .build(); +// PulseResponse.Builder wr = PulseResponse.newBuilder() +// .setGraphResponse(wgr); +// PDPulseSubjects.notifyChange(WatchType.WATCH_TYPE_GRAPH_CHANGE, +// wr); + PDPulseSubjects.notifyGraphChange(graph); + } + + @Override + public void onStoreRaftChanged(Metapb.Store store) { + PDPulseSubjects.notifyNodeChange(StoreNodeEventType.STORE_NODE_EVENT_TYPE_NODE_RAFT_CHANGE, "", + store.getId()); + } +} diff --git a/hg-pd-service/src/main/java/org/apache/hugegraph/pd/rest/API.java b/hg-pd-service/src/main/java/org/apache/hugegraph/pd/rest/API.java new file mode 100644 index 0000000000..b9b5dc1c72 --- /dev/null +++ b/hg-pd-service/src/main/java/org/apache/hugegraph/pd/rest/API.java @@ -0,0 +1,206 @@ +<<<<<<<< HEAD:hugegraph-pd/hg-pd-service/src/main/java/org/apache/hugegraph/pd/rest/API.java +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hugegraph.pd.rest; + +import java.util.HashMap; +import java.util.List; +import java.util.Map; + +import org.apache.hugegraph.pd.common.PDException; + +======== +package org.apache.hugegraph.pd.rest; + +import org.apache.hugegraph.pd.common.PDException; +>>>>>>>> d7e3d51dd (3.6.5 -> 4.x diff):hg-pd-service/src/main/java/org/apache/hugegraph/pd/rest/API.java +import com.fasterxml.jackson.core.JsonProcessingException; +import com.fasterxml.jackson.databind.ObjectMapper; +import com.google.protobuf.InvalidProtocolBufferException; +import com.google.protobuf.MessageOrBuilder; +import com.google.protobuf.util.JsonFormat; + +public class API { + + // TODO: use a flexible way to define the version + // refer: https://github.com/apache/hugegraph/pull/2528#discussion_r1573823996 + public static final String VERSION = "1.5.0"; + public static final String PD = "PD"; + public static final String STORE = "STORE"; + public static String STATUS_KEY = "status"; + public static String ERROR_KEY = "error"; + public static String QUOTATION = "\""; + public static String COMMA = ","; + public static String COLON = ": "; +<<<<<<<< HEAD:hugegraph-pd/hg-pd-service/src/main/java/org/apache/hugegraph/pd/rest/API.java +======== + public static final String VERSION = "4.0.0"; + public static final String PD = "PD"; + public static final String STORE = "STORE"; + +>>>>>>>> d7e3d51dd (3.6.5 -> 4.x diff):hg-pd-service/src/main/java/org/apache/hugegraph/pd/rest/API.java + + public String toJSON(List values, String key) { + + StringBuilder builder = new StringBuilder(); + builder.append("{") + .append(QUOTATION).append(STATUS_KEY).append(QUOTATION).append(COLON).append("0") + .append(COMMA) + .append(QUOTATION).append(key).append(QUOTATION).append(COLON) + .append("[ "); + + if (values != null) { + values.forEach(s -> { + try { + builder.append(JsonFormat.printer().print(s)); + } catch (InvalidProtocolBufferException e) { + e.printStackTrace(); + } + builder.append(","); + }); + builder.deleteCharAt(builder.length() - 1); + } + builder.append("]}"); + return builder.toString(); + } + + public String toJSON(MessageOrBuilder value, String key) { + StringBuilder builder = new StringBuilder(); + builder.append("{") + .append(QUOTATION).append(STATUS_KEY).append(QUOTATION).append(COLON).append("0") + .append(COMMA) + .append(QUOTATION).append(key).append(QUOTATION).append(COLON); + try { + if (value != null) { + builder.append(JsonFormat.printer().print(value)); + } else { + builder.append("{}"); + } + builder.append("}"); + return builder.toString(); + } catch (InvalidProtocolBufferException e) { + e.printStackTrace(); + return toJSON(e); + } + + } + + public String toJSON(Map> values) { + StringBuilder builder = new StringBuilder(); + builder.append("{ "); + for (Map.Entry> entry : values.entrySet()) { + String entryKey = entry.getKey(); + List entryValue = entry.getValue(); + builder.append(QUOTATION).append(entryKey).append(QUOTATION).append(COLON).append("["); + if ((entryValue != null) && !(entryValue.isEmpty())) { + entryValue.forEach(s -> { + try { + if (s == null) { + builder.append("null"); + } else { + builder.append(JsonFormat.printer().print(s)); + } + } catch (InvalidProtocolBufferException e) { + e.printStackTrace(); + } + builder.append(","); + }); + builder.deleteCharAt(builder.length() - 1); + } + builder.append("]").append(COMMA); + } + builder.deleteCharAt(builder.length() - 1); + builder.append("}"); + return builder.toString(); + } + + public String toJSON(PDException exception) { + String builder = "{" + + QUOTATION + STATUS_KEY + QUOTATION + COLON + + exception.getErrorCode() + COMMA + + QUOTATION + ERROR_KEY + QUOTATION + COLON + + QUOTATION + exception.getMessage() + QUOTATION + + "}"; + + return builder; + } + +<<<<<<<< HEAD:hugegraph-pd/hg-pd-service/src/main/java/org/apache/hugegraph/pd/rest/API.java + public String toJSON(Exception exception) { + String builder = "{" + + QUOTATION + STATUS_KEY + QUOTATION + COLON + "-1" + + COMMA + + QUOTATION + ERROR_KEY + QUOTATION + COLON + + QUOTATION + exception.getMessage() + QUOTATION + + "}"; +======== + public String toJSON(Throwable exception) { + StringBuilder builder = new StringBuilder(); + builder.append("{") + .append(QUOTATION).append(STATUS_KEY).append(QUOTATION).append(COLON).append("-1").append(COMMA) + .append(QUOTATION).append(ERROR_KEY).append(QUOTATION).append(COLON) + .append(QUOTATION).append(exception.getMessage()).append(QUOTATION); + builder.append("}"); +>>>>>>>> d7e3d51dd (3.6.5 -> 4.x diff):hg-pd-service/src/main/java/org/apache/hugegraph/pd/rest/API.java + + return builder; + } + + public String toJSON(Object object) { + ObjectMapper mapper = new ObjectMapper(); + try { + return mapper.writeValueAsString(object); + } catch (JsonProcessingException e) { + e.printStackTrace(); + return e.getMessage(); + } + } + + public Map okMap(String k, Object v) { + Map map = new HashMap<>(); + map.put(STATUS_KEY, 0); + map.put(k, v); + return map; + } + + public String toJSON(List values, + JsonFormat.TypeRegistry registry) { + + StringBuilder builder = new StringBuilder(); + builder.append("{") + .append(QUOTATION).append(STATUS_KEY).append(QUOTATION).append(COLON).append("0") + .append(COMMA) + .append(QUOTATION).append("log").append(QUOTATION).append(COLON) + .append("[ "); + JsonFormat.Printer printer = JsonFormat.printer().usingTypeRegistry(registry); + if (values != null) { + values.forEach(s -> { + try { + builder.append(printer.print(s)); + } catch (InvalidProtocolBufferException e) { + e.printStackTrace(); + } + builder.append(","); + }); + builder.deleteCharAt(builder.length() - 1); + } + builder.append("]}"); + return builder.toString(); + } + +} diff --git a/hg-pd-service/src/main/java/org/apache/hugegraph/pd/rest/GraphAPI.java b/hg-pd-service/src/main/java/org/apache/hugegraph/pd/rest/GraphAPI.java new file mode 100644 index 0000000000..8eadb9f127 --- /dev/null +++ b/hg-pd-service/src/main/java/org/apache/hugegraph/pd/rest/GraphAPI.java @@ -0,0 +1,141 @@ +package org.apache.hugegraph.pd.rest; + +import java.io.UnsupportedEncodingException; +import java.net.URLDecoder; +import java.util.ArrayList; +import java.util.HashMap; +import java.util.List; +import java.util.Map; + +import javax.servlet.http.HttpServletRequest; + +import org.apache.hugegraph.pd.grpc.common.ErrorType; +import org.apache.hugegraph.pd.model.GraphRestRequest; +import org.apache.hugegraph.pd.model.GraphStatistics; +import org.apache.hugegraph.pd.service.PDRestService; +import org.apache.hugegraph.pd.service.PDService; +import org.springframework.beans.factory.annotation.Autowired; +import org.springframework.http.MediaType; +import org.springframework.web.bind.annotation.GetMapping; +import org.springframework.web.bind.annotation.PostMapping; +import org.springframework.web.bind.annotation.RequestBody; +import org.springframework.web.bind.annotation.RequestMapping; +import org.springframework.web.bind.annotation.ResponseBody; +import org.springframework.web.bind.annotation.RestController; + +import org.apache.hugegraph.pd.common.PDException; +import org.apache.hugegraph.pd.grpc.Metapb; +import org.apache.hugegraph.pd.grpc.Pdpb; +import org.apache.hugegraph.pd.model.RestApiResponse; + +import lombok.extern.slf4j.Slf4j; + +@RestController +@Slf4j +@RequestMapping("/v1") +public class GraphAPI extends API { + @Autowired + PDRestService pdRestService; + @Autowired + PDService pdService; + + @GetMapping(value = "/graph/partitionSizeRange", produces = MediaType.APPLICATION_JSON_VALUE) + @ResponseBody + public RestApiResponse getPartitionSizeRange() { + try { + int minPartitionSize = 1; + int maxPartitionSize = pdService.getStoreNodeService().getShardGroups().size(); + Map dataMap = new HashMap<>(); + dataMap.put("minPartitionSize", minPartitionSize); + dataMap.put("maxPartitionSize", maxPartitionSize); + return new RestApiResponse(dataMap, ErrorType.OK, ErrorType.OK.name()); + } catch (PDException e) { + log.error("PDException:", e); + return new RestApiResponse(null, e.getErrorCode(), e.getMessage()); + } + } + + @GetMapping(value = "/graphs", produces = MediaType.APPLICATION_JSON_VALUE) + @ResponseBody + public RestApiResponse getGraphs() { + RestApiResponse response = new RestApiResponse(); + try { + List graphs = pdRestService.getGraphs(); + List resultGraphs = new ArrayList<>(); + for (Metapb.Graph graph : graphs) { + if ((graph.getGraphName() != null) && (graph.getGraphName().endsWith("/g"))) { + resultGraphs.add(new GraphStatistics(graph, pdRestService, pdService)); + } + } + HashMap dataMap = new HashMap<>(); + dataMap.put("graphs", resultGraphs); + response.setData(dataMap); + response.setStatus(ErrorType.OK.getNumber()); + response.setMessage(ErrorType.OK.name()); + + } catch (PDException e) { + log.error("PDException: ", e); + response.setData(new HashMap()); + response.setStatus(e.getErrorCode()); + response.setMessage(e.getMessage()); + } + return response; + } + + @PostMapping(value = "/graph/**", consumes = MediaType.APPLICATION_JSON_VALUE, + produces = MediaType.APPLICATION_JSON_VALUE) + @ResponseBody + public String setGraph(@RequestBody GraphRestRequest body, HttpServletRequest request) { + try { + String requestURL = request.getRequestURL().toString(); + final String prefix = "/graph/"; + final int limit = 2; + String graphName = requestURL.split(prefix, limit)[1]; + graphName = URLDecoder.decode(graphName, "utf-8"); + Metapb.Graph curGraph = pdRestService.getGraph(graphName); + Metapb.Graph.Builder builder = + Metapb.Graph.newBuilder(curGraph == null ? Metapb.Graph.getDefaultInstance() : curGraph); + builder.setGraphName(graphName); + if (body.getPartitionCount() > 0) { + builder.setPartitionCount(body.getPartitionCount()); + } + + Metapb.Graph newGraph = pdRestService.updateGraph(builder.build()); + return toJSON(newGraph, "graph"); + } catch (PDException exception) { + return toJSON(exception); + } catch (Exception e) { + return toJSON(e); + } + } + + + @GetMapping(value = "/graph/**", produces = MediaType.APPLICATION_JSON_VALUE) + @ResponseBody + public RestApiResponse getGraph(HttpServletRequest request) throws UnsupportedEncodingException { + RestApiResponse response = new RestApiResponse(); + GraphStatistics statistics; + String requestURL = request.getRequestURL().toString(); + final String prefix = "/graph/"; + final int limit = 2; + String graphName = requestURL.split(prefix, limit)[1]; + graphName = URLDecoder.decode(graphName, "utf-8"); + try { + Metapb.Graph graph = pdRestService.getGraph(graphName); + if (graph != null) { + statistics = new GraphStatistics(graph, pdRestService, pdService); + response.setData(statistics); + } else { + response.setData(new HashMap()); //没有该图 + } + response.setStatus(ErrorType.OK.getNumber()); + response.setMessage(ErrorType.OK.name()); + } catch (PDException e) { + log.error(e.getMessage()); + response.setData(new HashMap()); + response.setStatus(ErrorType.UNKNOWN.getNumber()); + response.setMessage(e.getMessage()); + } + return response; + } +} \ No newline at end of file diff --git a/hg-pd-service/src/main/java/org/apache/hugegraph/pd/rest/GraphSpaceAPI.java b/hg-pd-service/src/main/java/org/apache/hugegraph/pd/rest/GraphSpaceAPI.java new file mode 100644 index 0000000000..5d9bf324c3 --- /dev/null +++ b/hg-pd-service/src/main/java/org/apache/hugegraph/pd/rest/GraphSpaceAPI.java @@ -0,0 +1,72 @@ +package org.apache.hugegraph.pd.rest; + +import org.apache.hugegraph.pd.common.PDException; +import org.apache.hugegraph.pd.grpc.Metapb; +import org.apache.hugegraph.pd.model.GraphSpaceRestRequest; +import org.apache.hugegraph.pd.service.PDRestService; +import lombok.extern.slf4j.Slf4j; +import org.springframework.beans.factory.annotation.Autowired; +import org.springframework.http.MediaType; +import org.springframework.web.bind.annotation.*; + +import javax.servlet.http.HttpServletRequest; +import java.net.URLDecoder; +import java.util.List; + +@RestController +@Slf4j +@RequestMapping("/v1") +public class GraphSpaceAPI extends API{ + @Autowired + PDRestService pdRestService; + + @GetMapping(value = "/graph-spaces", produces = MediaType.APPLICATION_JSON_VALUE) + @ResponseBody + public String getGraphSpaces() { + try { + List graphSpaces = pdRestService.getGraphSpaces(); + return toJSON(graphSpaces, "graph-spaces"); + } catch (PDException e) { + e.printStackTrace(); + return toJSON(e); + } + } + + @PostMapping(value = "/graph-spaces/**", consumes = MediaType.APPLICATION_JSON_VALUE, produces = MediaType.APPLICATION_JSON_VALUE) + @ResponseBody + public String setGraphSpace(@RequestBody GraphSpaceRestRequest body, HttpServletRequest request) { + try { + String requestURL = request.getRequestURL().toString(); + String graphSpaceName = requestURL.split("/graph-spaces/", 2)[1]; + graphSpaceName = URLDecoder.decode(graphSpaceName, "utf-8"); + Metapb.GraphSpace graphSpace = Metapb.GraphSpace.newBuilder() + .setName(graphSpaceName) + .setStorageLimit(body.getStorageLimit()) + .build(); + Metapb.GraphSpace newGraphSpace = pdRestService.setGraphSpace(graphSpace); + return toJSON(newGraphSpace, "graph-spaces"); + } catch (PDException exception) { + return toJSON(exception); + } catch (Exception e) { + return toJSON(e); + } + } + + @GetMapping(value = "/graph-spaces/**", produces = MediaType.APPLICATION_JSON_VALUE) + @ResponseBody + public String getGraphSpace(HttpServletRequest request) { + try { + String requestURL = request.getRequestURL().toString(); + String graphSpaceName = requestURL.split("/graph-spaces/", 2)[1]; + graphSpaceName = URLDecoder.decode(graphSpaceName, "utf-8"); + Metapb.GraphSpace graphSpace = pdRestService.getGraphSpace(graphSpaceName); + return toJSON(graphSpace, "graphs-paces"); + } catch (PDException exception) { + return toJSON(exception); + } catch (Exception e) { + return toJSON(e); + } + } + + +} diff --git a/hg-pd-service/src/main/java/org/apache/hugegraph/pd/rest/IndexAPI.java b/hg-pd-service/src/main/java/org/apache/hugegraph/pd/rest/IndexAPI.java new file mode 100644 index 0000000000..aabaeaefe9 --- /dev/null +++ b/hg-pd-service/src/main/java/org/apache/hugegraph/pd/rest/IndexAPI.java @@ -0,0 +1,333 @@ +<<<<<<<< HEAD:hugegraph-pd/hg-pd-service/src/main/java/org/apache/hugegraph/pd/rest/IndexAPI.java +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hugegraph.pd.rest; + +import java.lang.management.ManagementFactory; +import java.util.ArrayList; +import java.util.List; +import java.util.concurrent.ExecutionException; + +import org.apache.hugegraph.pd.common.PDException; +import org.apache.hugegraph.pd.grpc.Metapb; +import org.apache.hugegraph.pd.grpc.Pdpb; +======== +package org.apache.hugegraph.pd.rest; + +import org.apache.hugegraph.pd.common.PDException; +import org.apache.hugegraph.pd.grpc.Metapb; +import org.apache.hugegraph.pd.grpc.Pdpb; +import org.apache.hugegraph.pd.grpc.common.ErrorType; +>>>>>>>> d7e3d51dd (3.6.5 -> 4.x diff):hg-pd-service/src/main/java/org/apache/hugegraph/pd/rest/IndexAPI.java +import org.apache.hugegraph.pd.model.RestApiResponse; +import org.apache.hugegraph.pd.raft.RaftEngine; +import org.apache.hugegraph.pd.rest.MemberAPI.CallStreamObserverWrap; +import org.apache.hugegraph.pd.service.PDRestService; +import org.apache.hugegraph.pd.service.PDService; +<<<<<<<< HEAD:hugegraph-pd/hg-pd-service/src/main/java/org/apache/hugegraph/pd/rest/IndexAPI.java +======== +import lombok.Data; +import lombok.extern.slf4j.Slf4j; +>>>>>>>> d7e3d51dd (3.6.5 -> 4.x diff):hg-pd-service/src/main/java/org/apache/hugegraph/pd/rest/IndexAPI.java +import org.springframework.beans.factory.annotation.Autowired; +import org.springframework.http.MediaType; +import org.springframework.web.bind.annotation.GetMapping; +import org.springframework.web.bind.annotation.RequestMapping; +import org.springframework.web.bind.annotation.ResponseBody; +import org.springframework.web.bind.annotation.RestController; + +<<<<<<<< HEAD:hugegraph-pd/hg-pd-service/src/main/java/org/apache/hugegraph/pd/rest/IndexAPI.java +import lombok.Data; +import lombok.extern.slf4j.Slf4j; +======== +import java.lang.management.ManagementFactory; +import java.util.ArrayList; +import java.util.HashMap; +import java.util.List; +import java.util.Map; +import java.util.concurrent.ExecutionException; +>>>>>>>> d7e3d51dd (3.6.5 -> 4.x diff):hg-pd-service/src/main/java/org/apache/hugegraph/pd/rest/IndexAPI.java + +import static org.apache.hugegraph.pd.common.Consts.DEFAULT_STORE_GROUP_ID; + +@RestController +@Slf4j +@RequestMapping("/") +public class IndexAPI extends API { + + @Autowired + PDService pdService; + @Autowired + PDRestService pdRestService; + + @GetMapping(value = "/", produces = MediaType.APPLICATION_JSON_VALUE) + @ResponseBody + public BriefStatistics index() throws PDException, ExecutionException, InterruptedException { + + BriefStatistics statistics = new BriefStatistics(); + statistics.leader = RaftEngine.getInstance().getLeaderGrpcAddress(); + statistics.state = getClusterState(); + statistics.storeSize = pdService.getStoreNodeService().getActiveStores().size(); + statistics.graphSize = pdService.getPartitionService().getGraphs().size(); + statistics.partitionSize = pdService.getStoreNodeService().getShardGroups().size(); + return statistics; + + } + +<<<<<<<< HEAD:hugegraph-pd/hg-pd-service/src/main/java/org/apache/hugegraph/pd/rest/IndexAPI.java +======== + @Data + class BriefStatistics { + Map state; + String leader; + int memberSize; + int storeSize; + int graphSize; + int partitionSize; + } + +>>>>>>>> d7e3d51dd (3.6.5 -> 4.x diff):hg-pd-service/src/main/java/org/apache/hugegraph/pd/rest/IndexAPI.java + @GetMapping(value = "/v1/cluster", produces = MediaType.APPLICATION_JSON_VALUE) + @ResponseBody + public RestApiResponse cluster() throws InterruptedException, ExecutionException { + Statistics statistics = new Statistics(); + try { +<<<<<<<< HEAD:hugegraph-pd/hg-pd-service/src/main/java/org/apache/hugegraph/pd/rest/IndexAPI.java + statistics.state = + String.valueOf(pdService.getStoreNodeService().getClusterStats().getState()); + String leaderGrpcAddress = RaftEngine.getInstance().getLeaderGrpcAddress(); + CallStreamObserverWrap response = + new CallStreamObserverWrap<>(); +======== + statistics.states = getClusterState(); + statistics.state = statistics.getStates().get(DEFAULT_STORE_GROUP_ID); + String leaderGrpcAddress = RaftEngine.getInstance().getLeaderGrpcAddress(); + CallStreamObserverWrap response = new CallStreamObserverWrap<>(); +>>>>>>>> d7e3d51dd (3.6.5 -> 4.x diff):hg-pd-service/src/main/java/org/apache/hugegraph/pd/rest/IndexAPI.java + pdService.getMembers(Pdpb.GetMembersRequest.newBuilder().build(), response); + List pdList = new ArrayList<>(); + for (Metapb.Member member : response.get().get(0).getMembersList()) { + Member member1 = new Member(member); + if ((leaderGrpcAddress != null) && + (leaderGrpcAddress.equals(member.getGrpcUrl()))) { + member1.role = "Leader"; + statistics.pdLeader = member1; + } else { + member1.role = "Follower"; + } + pdList.add(member1); + } + statistics.pdList = pdList; + statistics.memberSize = pdList.size(); + List stores = new ArrayList<>(); + for (Metapb.Store store : pdService.getStoreNodeService().getStores()) { + stores.add(new Store(store)); + } + statistics.stores = stores; + statistics.storeSize = statistics.stores.size(); + statistics.onlineStoreSize = pdService.getStoreNodeService().getActiveStores().size(); + statistics.offlineStoreSize = statistics.storeSize - statistics.onlineStoreSize; + List graphs = pdRestService.getGraphs(); + statistics.graphSize = graphs.stream().filter((g) -> (g.getGraphName() != null) + && + (g.getGraphName().endsWith("/g"))) + .count(); + statistics.partitionSize = pdService.getStoreNodeService().getShardGroups().size(); + statistics.shardCount = pdService.getConfigService().getPDConfig().getShardCount(); + for (Metapb.Store store : pdService.getStoreNodeService().getStores()) { + List graphStatsList = store.getStats().getGraphStatsList(); + for (Metapb.GraphStats graphStats : graphStatsList) { + statistics.keyCount += graphStats.getApproximateKeys(); + statistics.dataSize += graphStats.getApproximateSize(); + } + } + // Data status: The data status is deduced based on the state of the graph, the + // larger the enumeration value, the more serious the problem, and the default is the + // normal state + Metapb.PartitionState dataState = Metapb.PartitionState.PState_Normal; + for (Metapb.Graph graph : pdRestService.getGraphs()) { + if (graph.getState() == Metapb.PartitionState.UNRECOGNIZED) { + // If it is not recognized, it will not participate in the + // comparison, otherwise an exception will be thrown + continue; + } + if ((graph.getState() != null) && + (graph.getState().getNumber() > dataState.getNumber())) { + dataState = graph.getState(); + } + } + statistics.dataState = dataState.name(); +<<<<<<<< HEAD:hugegraph-pd/hg-pd-service/src/main/java/org/apache/hugegraph/pd/rest/IndexAPI.java + return new RestApiResponse(statistics, Pdpb.ErrorType.OK, Pdpb.ErrorType.OK.name()); + } catch (PDException e) { +======== + return new RestApiResponse(statistics, ErrorType.OK, ErrorType.OK.name()); + } catch (PDException e){ +>>>>>>>> d7e3d51dd (3.6.5 -> 4.x diff):hg-pd-service/src/main/java/org/apache/hugegraph/pd/rest/IndexAPI.java + log.error("PD Exception: ", e); + return new RestApiResponse(null, e.getErrorCode(), e.getMessage()); + } + } + + private Map getClusterState() { + Map state = new HashMap<>(); + for (var entry : pdService.getStoreNodeService().getAllClusterStats().entrySet()) { + state.put(entry.getKey(), String.valueOf(entry.getValue())); + } + return state; + } + + @Data + class BriefStatistics { + + String state; + String leader; + int memberSize; + int storeSize; + int graphSize; + int partitionSize; + } + + @Data + class Store { + + long storeId; + String address; + String raftAddress; + String version; + String state; + long startTimeStamp; + + public Store(Metapb.Store store) { + if (store != null) { + storeId = store.getId(); + address = store.getAddress(); + raftAddress = store.getRaftAddress(); + version = store.getVersion(); + state = String.valueOf(store.getState()); + startTimeStamp = store.getStartTimestamp(); + } + + } + } + + @Data + class Member { + + String raftUrl; + String grpcUrl; + String restUrl; + String state; + String dataPath; + String role; + String serviceName; // Service name, custom attributes + String serviceVersion; // Static definitions + long startTimeStamp; // The time when the process started + + public Member(Metapb.Member member) { + if (member != null) { + raftUrl = member.getRaftUrl(); + grpcUrl = member.getGrpcUrl(); + restUrl = member.getRestUrl(); + state = String.valueOf(member.getState()); + dataPath = member.getDataPath(); + serviceName = grpcUrl + "-PD"; + serviceVersion = VERSION; + startTimeStamp = ManagementFactory.getRuntimeMXBean().getStartTime(); + } + } + + public Member() { + + } + } + + @Data + class Statistics { + + /** +<<<<<<<< HEAD:hugegraph-pd/hg-pd-service/src/main/java/org/apache/hugegraph/pd/rest/IndexAPI.java + * Cluster status, default of the cluster + */ + String state; + /** + * Data status +======== + * 集群状态, 默认集群的 + */ + String state; + /** + * 集群状态 + */ + Map states; + /** + * 数据状态 +>>>>>>>> d7e3d51dd (3.6.5 -> 4.x diff):hg-pd-service/src/main/java/org/apache/hugegraph/pd/rest/IndexAPI.java + */ + String dataState; + /** + * pd Cluster members + */ + List pdList; + /** + * pd The leader of the cluster + */ + Member pdLeader; + /** + * pd The size of the cluster + */ + int memberSize; + /** + * stores list + */ + List stores; + /** + * store quantity + */ + int storeSize; + /** + * onlineStore + */ + int onlineStoreSize; + /** + * The number of stores that are offline + */ + int offlineStoreSize; + /** + * The number of graphs + */ + long graphSize; + /** + * The number of partitions + */ + int partitionSize; + /** + * Number of partition replicas + */ + int shardCount; + /** + * The number of keys + */ + long keyCount; + /** + * Amount of data + */ + long dataSize; + + } +} diff --git a/hg-pd-service/src/main/java/org/apache/hugegraph/pd/rest/MemberAPI.java b/hg-pd-service/src/main/java/org/apache/hugegraph/pd/rest/MemberAPI.java new file mode 100644 index 0000000000..8d9634ebed --- /dev/null +++ b/hg-pd-service/src/main/java/org/apache/hugegraph/pd/rest/MemberAPI.java @@ -0,0 +1,305 @@ +<<<<<<<< HEAD:hugegraph-pd/hg-pd-service/src/main/java/org/apache/hugegraph/pd/rest/MemberAPI.java +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hugegraph.pd.rest; +======== +package org.apache.hugegraph.pd.rest; + +import static org.apache.hugegraph.pd.common.Consts.DEFAULT_STORE_GROUP_ID; +>>>>>>>> d7e3d51dd (3.6.5 -> 4.x diff):hg-pd-service/src/main/java/org/apache/hugegraph/pd/rest/MemberAPI.java + +import java.lang.management.ManagementFactory; +import java.util.ArrayList; +import java.util.HashMap; +import java.util.List; +import java.util.Map; +import java.util.concurrent.CompletableFuture; +import java.util.concurrent.CountDownLatch; +import java.util.concurrent.ExecutionException; +import java.util.concurrent.Future; +import java.util.concurrent.TimeUnit; +import java.util.concurrent.TimeoutException; +<<<<<<<< HEAD:hugegraph-pd/hg-pd-service/src/main/java/org/apache/hugegraph/pd/rest/MemberAPI.java + +import javax.servlet.http.HttpServletRequest; + +import org.apache.hugegraph.pd.common.Useless; +import org.apache.hugegraph.pd.grpc.Metapb; +import org.apache.hugegraph.pd.grpc.Pdpb; +import org.apache.hugegraph.pd.model.PeerRestRequest; +import org.apache.hugegraph.pd.model.RestApiResponse; +import org.apache.hugegraph.pd.raft.RaftEngine; +======== +import java.util.stream.Collectors; + +import javax.servlet.http.HttpServletRequest; + +>>>>>>>> d7e3d51dd (3.6.5 -> 4.x diff):hg-pd-service/src/main/java/org/apache/hugegraph/pd/rest/MemberAPI.java +import org.apache.hugegraph.pd.service.PDService; +import org.springframework.beans.factory.annotation.Autowired; +import org.springframework.http.MediaType; +import org.springframework.web.bind.annotation.GetMapping; +import org.springframework.web.bind.annotation.PostMapping; +import org.springframework.web.bind.annotation.RequestBody; +import org.springframework.web.bind.annotation.RequestMapping; +import org.springframework.web.bind.annotation.ResponseBody; +import org.springframework.web.bind.annotation.RestController; + +<<<<<<<< HEAD:hugegraph-pd/hg-pd-service/src/main/java/org/apache/hugegraph/pd/rest/MemberAPI.java +======== +import org.apache.hugegraph.pd.grpc.ClusterOp; +import org.apache.hugegraph.pd.grpc.Metapb; +import org.apache.hugegraph.pd.grpc.Pdpb; +import org.apache.hugegraph.pd.grpc.common.ErrorType; +import org.apache.hugegraph.pd.grpc.common.Errors; +import org.apache.hugegraph.pd.grpc.common.ResponseHeader; +import org.apache.hugegraph.pd.model.PeerRestRequest; +import org.apache.hugegraph.pd.model.RestApiResponse; +import org.apache.hugegraph.pd.raft.RaftEngine; + +>>>>>>>> d7e3d51dd (3.6.5 -> 4.x diff):hg-pd-service/src/main/java/org/apache/hugegraph/pd/rest/MemberAPI.java +import io.grpc.stub.CallStreamObserver; +import io.grpc.stub.StreamObserver; +import lombok.Data; +import lombok.extern.slf4j.Slf4j; + +@RestController +@Slf4j +@RequestMapping("/v1") +public class MemberAPI extends API { + + //TODO + @Autowired + PDService pdService; + + @GetMapping(value = "/members", produces = MediaType.APPLICATION_JSON_VALUE) + @ResponseBody + public RestApiResponse getMembers() throws Exception { + + String leaderGrpcAddress = RaftEngine.getInstance().getLeaderGrpcAddress(); + CallStreamObserverWrap response = new CallStreamObserverWrap<>(); + pdService.getMembersAndClusterState(Pdpb.GetMembersRequest.newBuilder().build(), response); + List members = new ArrayList<>(); + Member leader = null; + Map stateCountMap = new HashMap<>(); + Pdpb.MembersAndClusterState membersAndClusterState = response.get().get(0); + for (Metapb.Member member : membersAndClusterState.getMembersList()) { + String stateKey = member.getState().name(); + stateCountMap.put(stateKey, stateCountMap.getOrDefault(stateKey, 0) + 1); + Member member1 = new Member(member); + if ((leaderGrpcAddress != null) && (leaderGrpcAddress.equals(member.getGrpcUrl()))) { + leader = member1; + } + member1.role = member.getRole().name(); + members.add(member1); + } + + var stateMap = membersAndClusterState.getStateList().stream() + .collect(Collectors.toMap(Metapb.GroupClusterState::getStoreGroup, + state -> String.valueOf(state.getState()))); + + HashMap resultMap = new HashMap<>(); + resultMap.put("state", stateMap.get(DEFAULT_STORE_GROUP_ID)); + resultMap.put("states", stateMap); + resultMap.put("pdList", members); + resultMap.put("pdLeader", leader); + resultMap.put("numOfService", members.size()); + resultMap.put("numOfNormalService", + stateCountMap.getOrDefault(Metapb.StoreState.Up.name(), 0)); + resultMap.put("stateCountMap", stateCountMap); + return new RestApiResponse(resultMap, ErrorType.OK, ErrorType.OK.name()); + } + + @PostMapping(value = "/members/change", consumes = MediaType.APPLICATION_JSON_VALUE, + produces = MediaType.APPLICATION_JSON_VALUE) + @ResponseBody + public String changePeerList(@RequestBody PeerRestRequest body, HttpServletRequest request) { + try { +<<<<<<<< HEAD:hugegraph-pd/hg-pd-service/src/main/java/org/apache/hugegraph/pd/rest/MemberAPI.java + Pdpb.ChangePeerListRequest rpcRequest = + Pdpb.ChangePeerListRequest.newBuilder().setPeerList( + body.getPeerList()).build(); + CountDownLatch latch = new CountDownLatch(1); + final Pdpb.ResponseHeader[] responseHeader = {null}; + StreamObserver observer = + new StreamObserver() { + @Override + public void onNext(Pdpb.getChangePeerListResponse value) { + responseHeader[0] = value.getHeader(); + } + + @Override + public void onError(Throwable t) { + responseHeader[0] = Pdpb.ResponseHeader.newBuilder().setError( + Pdpb.Error.newBuilder().setType( + Pdpb.ErrorType.UNKNOWN).setMessage( + t.getMessage()).build()).build(); + latch.countDown(); + } +======== + ClusterOp.ChangePeerListRequest rpcRequest = ClusterOp.ChangePeerListRequest.newBuilder().setPeerList( + body.getPeerList()).build(); + CountDownLatch latch = new CountDownLatch(1); + final ResponseHeader[] responseHeader = {null}; + StreamObserver observer = new StreamObserver<>() { + @Override + public void onNext(ClusterOp.ChangePeerListResponse value) { + responseHeader[0] = value.getHeader(); + } + + @Override + public void onError(Throwable t) { + responseHeader[0] = ResponseHeader.newBuilder().setError( + Errors.newBuilder().setType( + ErrorType.UNKNOWN).setMessage( + t.getMessage()).build()).build(); + latch.countDown(); + } +>>>>>>>> d7e3d51dd (3.6.5 -> 4.x diff):hg-pd-service/src/main/java/org/apache/hugegraph/pd/rest/MemberAPI.java + + @Override + public void onCompleted() { + latch.countDown(); + } + }; + pdService.changePeerList(rpcRequest, observer); + latch.await(); + return toJSON(responseHeader[0], "changeResult"); + } catch (Exception e) { + return toJSON(e); + } + } + + public static class CallStreamObserverWrap extends CallStreamObserver implements + Future> { + + CompletableFuture> future = new CompletableFuture<>(); + List values = new ArrayList<>(); + + @Override + public boolean isReady() { + return false; + } + + @Override + public void setOnReadyHandler(Runnable runnable) { + + } + + @Override + public void disableAutoInboundFlowControl() { + + } + + @Override + public void request(int i) { + + } + + @Override + public void setMessageCompression(boolean b) { + + } + + @Override + public void onNext(V v) { + values.add(v); + } + + @Override + public void onError(Throwable throwable) { + future.completeExceptionally(throwable); + } + + @Override + public void onCompleted() { + future.complete(values); + } + + @Override + public boolean cancel(boolean mayInterruptIfRunning) { + return future.cancel(mayInterruptIfRunning); + } + + @Override + public boolean isCancelled() { + return future.isCancelled(); + } + + @Override + public boolean isDone() { + return future.isDone(); + } + + @Override + public List get() throws InterruptedException, ExecutionException { + return future.get(); + } + + @Override + public List get(long timeout, TimeUnit unit) throws InterruptedException, + ExecutionException, + TimeoutException { + return future.get(timeout, unit); + } + } + + @Data + class Member { + + String raftUrl; + String grpcUrl; + String restUrl; + String state; + String dataPath; + String role; + String replicateState; +<<<<<<<< HEAD:hugegraph-pd/hg-pd-service/src/main/java/org/apache/hugegraph/pd/rest/MemberAPI.java + String serviceName; // Service name, custom attributes + String serviceVersion; // Static definitions + long startTimeStamp; // Startup time: temporarily takes the startup time of the process +======== + String serviceName; //服务名称,自定义属性 + String serviceVersion; //静态定义 + long startTimeStamp; //启动时间,暂时取进程的启动时间 +>>>>>>>> d7e3d51dd (3.6.5 -> 4.x diff):hg-pd-service/src/main/java/org/apache/hugegraph/pd/rest/MemberAPI.java + + public Member(Metapb.Member member) { + if (member != null) { + raftUrl = member.getRaftUrl(); + grpcUrl = member.getGrpcUrl(); + restUrl = member.getRestUrl(); + state = String.valueOf(member.getState()); + dataPath = member.getDataPath(); + serviceName = grpcUrl + "-PD"; +<<<<<<<< HEAD:hugegraph-pd/hg-pd-service/src/main/java/org/apache/hugegraph/pd/rest/MemberAPI.java + serviceVersion = VERSION; +======== + serviceVersion = API.VERSION; +>>>>>>>> d7e3d51dd (3.6.5 -> 4.x diff):hg-pd-service/src/main/java/org/apache/hugegraph/pd/rest/MemberAPI.java + startTimeStamp = ManagementFactory.getRuntimeMXBean().getStartTime(); + replicateState = member.getReplicatorState(); + } + + } + + @Useless("delete later") + public Member() { + } + } +} diff --git a/hg-pd-service/src/main/java/org/apache/hugegraph/pd/rest/PartitionAPI.java b/hg-pd-service/src/main/java/org/apache/hugegraph/pd/rest/PartitionAPI.java new file mode 100644 index 0000000000..9b8b87e2a6 --- /dev/null +++ b/hg-pd-service/src/main/java/org/apache/hugegraph/pd/rest/PartitionAPI.java @@ -0,0 +1,516 @@ +<<<<<<<< HEAD:hugegraph-pd/hg-pd-service/src/main/java/org/apache/hugegraph/pd/rest/PartitionAPI.java +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hugegraph.pd.rest; + +import java.util.ArrayList; +import java.util.Comparator; +import java.util.Date; +import java.util.HashMap; +import java.util.List; +import java.util.Map; +import java.util.concurrent.ExecutionException; + +======== +package org.apache.hugegraph.pd.rest; + +import org.apache.hugegraph.pd.common.PDException; +import org.apache.hugegraph.pd.grpc.ClusterOp; +import org.apache.hugegraph.pd.grpc.Metapb; +import org.apache.hugegraph.pd.grpc.common.ErrorType; +import org.apache.hugegraph.pd.model.RestApiResponse; +import org.apache.hugegraph.pd.model.TimeRangeRequest; +import org.apache.hugegraph.pd.service.PDRestService; +import org.apache.hugegraph.pd.util.DateUtil; +import com.google.protobuf.util.JsonFormat; +import lombok.Data; +import lombok.extern.slf4j.Slf4j; +>>>>>>>> d7e3d51dd (3.6.5 -> 4.x diff):hg-pd-service/src/main/java/org/apache/hugegraph/pd/rest/PartitionAPI.java +import org.apache.commons.lang.time.DateFormatUtils; +import org.apache.hugegraph.pd.common.PDException; +import org.apache.hugegraph.pd.grpc.Metapb; +import org.apache.hugegraph.pd.grpc.Pdpb; +import org.apache.hugegraph.pd.model.RestApiResponse; +import org.apache.hugegraph.pd.model.TimeRangeRequest; +import org.apache.hugegraph.pd.service.PDRestService; +import org.apache.hugegraph.pd.util.DateUtil; +import org.springframework.beans.factory.annotation.Autowired; +import org.springframework.http.MediaType; +import org.springframework.web.bind.annotation.GetMapping; +import org.springframework.web.bind.annotation.PostMapping; +import org.springframework.web.bind.annotation.RequestBody; +import org.springframework.web.bind.annotation.RequestMapping; +import org.springframework.web.bind.annotation.ResponseBody; +import org.springframework.web.bind.annotation.RestController; + +import com.google.protobuf.util.JsonFormat; + +import lombok.Data; +import lombok.extern.slf4j.Slf4j; + +@RestController +@Slf4j +@RequestMapping("/v1") +public class PartitionAPI extends API { + + public static final String DEFAULT_DATETIME_FORMAT = "yyyy-MM-dd HH:mm:ss"; + @Autowired + PDRestService pdRestService; + + @GetMapping(value = "/highLevelPartitions", produces = MediaType.APPLICATION_JSON_VALUE) + public RestApiResponse getHighLevelPartitions() { + // Information about multiple graphs under the partition + Map> partitions2GraphsMap = new HashMap<>(); + Map resultPartitionsMap = new HashMap<>(); + // The keyCount of each partition is only taken from the leader + Map partition2KeyCount = new HashMap<>(); + // The dataSize of each partition is only taken from the leader + Map partition2DataSize = new HashMap<>(); + List stores; + Map storesMap = new HashMap<>(); + try { + stores = pdRestService.getStores(""); + } catch (PDException e) { + log.error("getStores error", e); + return new RestApiResponse(null, e.getErrorCode(), e.getMessage()); + } + for (Metapb.Store store : stores) { + storesMap.put(store.getId(), store); + List graphStatsList = store.getStats().getGraphStatsList(); + for (Metapb.GraphStats graphStats : graphStatsList) { + // Obtaining Graph Information Saved by a Partition (Only from the Leader) + if (Metapb.ShardRole.Leader != graphStats.getRole()) { + continue; + } + // Calculating the key count of partitions (indiscriminate graphs) + partition2KeyCount.put(graphStats.getPartitionId(), + partition2KeyCount.getOrDefault(graphStats.getPartitionId(), + graphStats.getApproximateKeys())); + // The dataSize of the partition is calculated by adding the size of the graph + partition2DataSize.put(graphStats.getPartitionId(), + partition2DataSize.getOrDefault(graphStats.getPartitionId(), + 0L) + + graphStats.getApproximateSize()); + if (partitions2GraphsMap.get(graphStats.getPartitionId()) == null) { + partitions2GraphsMap.put(graphStats.getPartitionId(), + new HashMap()); + } + Map partitionGraphsMap = + partitions2GraphsMap.get(graphStats.getPartitionId()); + partitionGraphsMap.put(graphStats.getGraphName(), new GraphStats(graphStats)); + } + } + // Construct all the information that needs to be returned for the partition + List partitionList = pdRestService.getPartitions(""); + for (Metapb.Partition partition : partitionList) { + // Supplement the startKey and endKey of the partition image + if (partitions2GraphsMap.get(partition.getId()) != null) { + GraphStats graphStats = + partitions2GraphsMap.get(partition.getId()).get(partition.getGraphName()); + if (graphStats != null) { + graphStats.startKey = partition.getStartKey(); + graphStats.endKey = partition.getEndKey(); + } + } + // Construct the overall information of the partition (regardless of the diagram) + if ((resultPartitionsMap.get(partition.getId()) == null) + && (!partition.getGraphName().endsWith("/s")) + ) { + Metapb.PartitionStats partitionStats; + try { + partitionStats = pdRestService.getPartitionStats(partition.getGraphName(), + partition.getId()); + } catch (PDException e) { + log.error("getPartitionStats error", e); + partitionStats = null; + } + // Initialize the partition information + HighLevelPartition resultPartition = + new HighLevelPartition(partition, partitionStats); + resultPartition.keyCount = + partition2KeyCount.getOrDefault(resultPartition.partitionId, 0L); + resultPartition.dataSize = + partition2DataSize.getOrDefault(resultPartition.partitionId, 0L); + for (ShardStats shard : resultPartition.shards) { + // Assign values to the address and partition information of the replica + shard.address = storesMap.get(shard.storeId).getAddress(); + shard.partitionId = partition.getId(); +<<<<<<<< HEAD:hugegraph-pd/hg-pd-service/src/main/java/org/apache/hugegraph/pd/rest/PartitionAPI.java + } + if ((partitionStats != null) && (partitionStats.getLeader() != null)) { + long storeId = partitionStats.getLeader().getStoreId(); + resultPartition.leaderAddress = + storesMap.get(storeId).getAddress(); +======== + if (shard.getRole().equalsIgnoreCase(Metapb.ShardRole.Leader.name())){ + resultPartition.leaderAddress = shard.address; + } +>>>>>>>> d7e3d51dd (3.6.5 -> 4.x diff):hg-pd-service/src/main/java/org/apache/hugegraph/pd/rest/PartitionAPI.java + } + resultPartitionsMap.put(partition.getId(), resultPartition); + } + } + // Construct a list of graphs under the partitions to be returned, return only /g, and + // sort by name + for (Map.Entry entry : resultPartitionsMap.entrySet()) { + Integer partitionId = entry.getKey(); + HighLevelPartition currentPartition = resultPartitionsMap.get(partitionId); + Map graphsMap = partitions2GraphsMap + .getOrDefault(partitionId, + new HashMap<>()); // Avoid null pointer exceptions at the back + ArrayList graphsList = new ArrayList<>(); + for (Map.Entry entry1 : graphsMap.entrySet()) { + if (!entry1.getKey().endsWith("/g")) { + continue; // Only the graph of /g is kept + } + String graphName = entry1.getKey(); + GraphStats tmpGraph = graphsMap.get(graphName); + final int postfixLength = 2; + tmpGraph.graphName = tmpGraph.graphName.substring(0, tmpGraph.graphName.length() - + postfixLength); + graphsList.add(tmpGraph); + } + graphsList.sort(Comparator.comparing(o -> o.graphName)); + currentPartition.graphs = graphsList; + } + List resultPartitionList = new ArrayList<>(); + if (!resultPartitionsMap.isEmpty()) { + ArrayList partitionids = new ArrayList(resultPartitionsMap.keySet()); + partitionids.sort((o1, o2) -> o1.intValue() - o2.intValue()); + for (Integer partitionId : partitionids) { + resultPartitionList.add(resultPartitionsMap.get(partitionId)); + } + } + HashMap dataMap = new HashMap<>(); + dataMap.put("partitions", resultPartitionList); + return new RestApiResponse(dataMap, ErrorType.OK, ErrorType.OK.name()); + } + + @GetMapping(value = "/partitions", produces = MediaType.APPLICATION_JSON_VALUE) + public RestApiResponse getPartitions() { + try { + List partitions = new ArrayList<>(); + List partitionList = pdRestService.getPartitions(""); + List stores = pdRestService.getStoreStats(false); + // The status of the raft node of the partition + HashMap> raftMap = new HashMap<>(); + + HashMap> shardIndexMap = new HashMap<>(); + String delimiter = "@"; + for (int i = 0; i < stores.size(); i++) { + Metapb.Store store = stores.get(i); + Metapb.StoreStats storeStats = store.getStats(); + HashMap storeRaftStats = new HashMap<>(); + List raftStatsList = storeStats.getRaftStatsList(); + for (int j = 0; j < raftStatsList.size(); j++) { + Metapb.RaftStats raftStats = raftStatsList.get(j); + storeRaftStats.put(raftStats.getPartitionId(), raftStats); + } + + HashMap partitionShardStats = new HashMap<>(); + List graphStatsList = storeStats.getGraphStatsList(); + StringBuilder builder = new StringBuilder(); + for (int j = 0; j < graphStatsList.size(); j++) { + Metapb.GraphStats graphStats = graphStatsList.get(j); + String graphName = graphStats.getGraphName(); + String partitionId = Integer.toString(graphStats.getPartitionId()); + builder.append(graphName).append(delimiter).append(partitionId); + partitionShardStats.put(builder.toString(), graphStats); + builder.setLength(0); + } + raftMap.put(store.getId(), storeRaftStats); + shardIndexMap.put(store.getId(), partitionShardStats); + } + + for (Metapb.Partition pt : partitionList) { + Partition partition = new Partition(pt); + String graphName = partition.getGraphName(); + partition.getShards().sort(Comparator.comparing(Shard::getStoreId)); + Metapb.PartitionStats partitionStats = + pdRestService.getPartitionStats(graphName, pt.getId()); + Map shardStats = new HashMap<>(); + if (partitionStats != null) { + String dateTime = DateFormatUtils.format( + partitionStats.getTimestamp(), DEFAULT_DATETIME_FORMAT); + partition.setTimestamp(dateTime); + shardStats = getShardStats(partitionStats); + } + + for (Metapb.Shard shard : pdRestService.getShardList(pt.getId())) { + Map finalShardStats = shardStats; + partition.getShards().add(new Shard() {{ + storeId = Long.toString(shard.getStoreId()); + role = shard.getRole(); + address = pdRestService.getStore( + shard.getStoreId()).getAddress(); + partitionId = partition.getId(); + if (finalShardStats.containsKey(shard.getStoreId())) { + state = finalShardStats.get(shard.getStoreId()).getState().toString(); + progress = finalShardStats.get(shard.getStoreId()).getProgress(); + role = finalShardStats.get(shard.getStoreId()).getRole(); + } + + HashMap storeRaftStats = + raftMap.get(shard.getStoreId()); + if (storeRaftStats != null) { + Metapb.RaftStats raftStats = storeRaftStats.get(partition.getId()); + if (raftStats != null) { + committedIndex = Long.toString(raftStats.getCommittedIndex()); + } + } + }}); + } + + partition.setPartitionStats(partitionStats); + + partitions.add(partition); + } + partitions.sort( + Comparator.comparing(Partition::getGraphName).thenComparing(Partition::getId)); + HashMap dataMap = new HashMap<>(); + dataMap.put("partitions", partitions); + return new RestApiResponse(dataMap, ErrorType.OK, ErrorType.OK.name()); + } catch (PDException e) { + log.error("query metric data error", e); + return new RestApiResponse(null, e.getErrorCode(), e.getMessage()); + } + } + + @GetMapping(value = "/partitionsAndStats", produces = MediaType.APPLICATION_JSON_VALUE) + public String getPartitionsAndStats() { + //for debug use, return partition && partitionStats + try { + Map> graph2Partitions = new HashMap<>(); + Map> graph2PartitionStats = new HashMap<>(); + for (Metapb.Graph graph : pdRestService.getGraphs()) { + List partitionList = new ArrayList<>(); + List partitionStatsList = new ArrayList<>(); + for (Metapb.Partition partition : pdRestService.getPartitions( + graph.getGraphName())) { + Metapb.PartitionStats partitionStats = pdRestService + .getPartitionStats(graph.getGraphName(), partition.getId()); + partitionList.add(partition); + partitionStatsList.add(partitionStats); + } + graph2Partitions.put(graph.getGraphName(), partitionList); + graph2PartitionStats.put(graph.getGraphName(), partitionStatsList); + } + String builder = "{\"partitions\":" + toJSON(graph2Partitions) + + ",\"partitionStats\":" + toJSON(graph2PartitionStats) + "}"; + return builder; + } catch (PDException e) { + log.error("PD exception:" + e); + return toJSON(e); + } + } + + private Map getShardStats(Metapb.PartitionStats partitionStats) { + Map stats = new HashMap<>(); + if (partitionStats.getShardStatsList() != null) { + partitionStats.getShardStatsList().forEach(shardStats -> { + stats.put(shardStats.getStoreId(), shardStats); + }); + } + return stats; + } + + @PostMapping(value = "/partitions/log", consumes = MediaType.APPLICATION_JSON_VALUE, + produces = MediaType.APPLICATION_JSON_VALUE) + @ResponseBody + public String getPartitionLog(@RequestBody TimeRangeRequest request) { + try { + Date dateStart = DateUtil.getDate(request.getStartTime()); + Date dateEnd = DateUtil.getDate(request.getEndTime()); + List changedRecords = + pdRestService.getPartitionLog(dateStart.getTime(), + dateEnd.getTime()); + if (changedRecords != null) { + JsonFormat.TypeRegistry registry = JsonFormat.TypeRegistry + .newBuilder().add(ClusterOp.SplitDataRequest.getDescriptor()).build(); + return toJSON(changedRecords, registry); + } else { + return toJSON(new PDException(ErrorType.NOT_FOUND_VALUE, "error")); + } + } catch (PDException e) { + return toJSON(e); + } + } + + + @GetMapping(value = "/resetPartitionState", produces = MediaType.APPLICATION_JSON_VALUE) + public String resetPartitionState() { + try { + for (Metapb.Partition partition : pdRestService.getPartitions("")) { + pdRestService.resetPartitionState(partition); + } + } catch (PDException e) { + return toJSON(e); + } + return "OK"; + } + + @GetMapping(value = "/", produces = MediaType.APPLICATION_JSON_VALUE) + @ResponseBody + public Statistics getStatistics() throws PDException, ExecutionException, InterruptedException { + + Statistics statistics = new Statistics(); + int partitionId = -1; + return statistics; + } + + @Data + class Shard { + + String address; + String storeId; + Metapb.ShardRole role; + String state; + int progress; + String committedIndex; + long partitionId; + + } + + @Data + class Partition { + + int id; + long version; + String graphName; + long startKey; + long endKey; + + Metapb.PartitionState workState; + List shards; + String timestamp; + + Partition(Metapb.Partition pt) { + id = pt.getId(); + version = pt.getVersion(); + graphName = pt.getGraphName(); + startKey = pt.getStartKey(); + endKey = pt.getEndKey(); + workState = pt.getState(); + shards = new ArrayList<>(); + } + + public void setPartitionStats(Metapb.PartitionStats stats) { + + } + } + + @Data + class Statistics { + + } + + @Data + class HighLevelPartition { + + int partitionId; + String state; + String leaderAddress; + long keyCount; + long dataSize; + String shardState; + int progress; + long raftTerm; + List graphs; + List shards; + String failureCause = ""; + + HighLevelPartition(Metapb.Partition partition, Metapb.PartitionStats partitionStats) { + partitionId = partition.getId(); + state = String.valueOf(partition.getState()); + if (partitionStats != null) { + raftTerm = partitionStats.getLeaderTerm(); + } + + Metapb.ShardState tmpShardState = Metapb.ShardState.SState_Normal; + if (partitionStats != null) { + shards = new ArrayList<>(); + for (Metapb.ShardStats shardStats : partitionStats.getShardStatsList()) { + if ((shardStats.getState() != Metapb.ShardState.UNRECOGNIZED) + && (shardStats.getState().getNumber() > tmpShardState.getNumber())) { + tmpShardState = shardStats.getState(); + progress = shardStats.getProgress(); + } + shards.add(new ShardStats(shardStats)); + } + } else { + shards = new ArrayList<>(); + try { + for (Metapb.Shard shard : pdRestService.getShardList(partition.getId())) { + shards.add(new ShardStats(shard)); + } + } catch (PDException e) { + log.error("get shard list failed, {}", e.getMessage()); + } + } + // Synthesize the state of all replicas and assign a value to shardState + shardState = tmpShardState.name(); + } + } + + @Data + class GraphStats { + + String graphName; + long keyCount; + long startKey; + long endKey; + long dataSize; + String workState; + long partitionId; + + GraphStats(Metapb.GraphStats graphStats) { + graphName = graphStats.getGraphName(); + keyCount = graphStats.getApproximateKeys(); + workState = graphStats.getWorkState().toString(); + dataSize = graphStats.getApproximateSize(); + partitionId = graphStats.getPartitionId(); + } + } + + @Data + class ShardStats { + + long storeId; + String role; + String state; + int progress; + // Extra attributes + long partitionId; + String address; + + ShardStats(Metapb.ShardStats shardStats) { + storeId = shardStats.getStoreId(); + role = String.valueOf(shardStats.getRole()); + state = shardStats.getState().toString(); + progress = shardStats.getProgress(); + } + + ShardStats(Metapb.Shard shard) { + // When there is no initialization method for shardStats + storeId = shard.getStoreId(); + role = String.valueOf(shard.getRole()); + state = Metapb.ShardState.SState_Normal.name(); + progress = 0; + } + } +} diff --git a/hg-pd-service/src/main/java/org/apache/hugegraph/pd/rest/RegistryAPI.java b/hg-pd-service/src/main/java/org/apache/hugegraph/pd/rest/RegistryAPI.java new file mode 100644 index 0000000000..5711968262 --- /dev/null +++ b/hg-pd-service/src/main/java/org/apache/hugegraph/pd/rest/RegistryAPI.java @@ -0,0 +1,235 @@ +<<<<<<<< HEAD:hugegraph-pd/hg-pd-service/src/main/java/org/apache/hugegraph/pd/rest/RegistryAPI.java +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hugegraph.pd.rest; + +import java.io.Serializable; +import java.util.ArrayList; +import java.util.HashMap; +import java.util.LinkedList; +import java.util.List; + +import javax.servlet.http.HttpServletRequest; + +======== +package org.apache.hugegraph.pd.rest; + +import org.apache.hugegraph.pd.common.PDException; +import org.apache.hugegraph.pd.common.PDRuntimeException; +import org.apache.hugegraph.pd.grpc.Metapb; +import org.apache.hugegraph.pd.grpc.Pdpb; +import org.apache.hugegraph.pd.grpc.Pdpb.GetMembersResponse; +import org.apache.hugegraph.pd.grpc.common.ErrorType; +import org.apache.hugegraph.pd.grpc.discovery.NodeInfo; +import org.apache.hugegraph.pd.grpc.discovery.Query; +import org.apache.hugegraph.pd.license.LicenseVerifierService; +import org.apache.hugegraph.pd.model.RegistryQueryRestRequest; +import org.apache.hugegraph.pd.model.RegistryRestRequest; +import org.apache.hugegraph.pd.model.RegistryRestResponse; +import org.apache.hugegraph.pd.rest.MemberAPI.CallStreamObserverWrap; +import org.apache.hugegraph.pd.service.PDRestService; +import org.apache.hugegraph.pd.service.PDService; +import lombok.extern.slf4j.Slf4j; +>>>>>>>> d7e3d51dd (3.6.5 -> 4.x diff):hg-pd-service/src/main/java/org/apache/hugegraph/pd/rest/RegistryAPI.java +import org.apache.commons.lang3.StringUtils; +import org.apache.hugegraph.pd.common.PDException; +import org.apache.hugegraph.pd.common.PDRuntimeException; +import org.apache.hugegraph.pd.grpc.Metapb; +import org.apache.hugegraph.pd.grpc.Pdpb; +import org.apache.hugegraph.pd.grpc.Pdpb.GetMembersResponse; +import org.apache.hugegraph.pd.grpc.discovery.NodeInfo; +import org.apache.hugegraph.pd.grpc.discovery.Query; +import org.apache.hugegraph.pd.model.RegistryQueryRestRequest; +import org.apache.hugegraph.pd.model.RegistryRestRequest; +import org.apache.hugegraph.pd.model.RegistryRestResponse; +import org.apache.hugegraph.pd.rest.MemberAPI.CallStreamObserverWrap; +import org.apache.hugegraph.pd.service.PDRestService; +import org.apache.hugegraph.pd.service.PDService; +import org.springframework.beans.factory.annotation.Autowired; +import org.springframework.http.MediaType; +import org.springframework.web.bind.annotation.GetMapping; +import org.springframework.web.bind.annotation.PostMapping; +import org.springframework.web.bind.annotation.RequestBody; +import org.springframework.web.bind.annotation.RequestMapping; +import org.springframework.web.bind.annotation.ResponseBody; +import org.springframework.web.bind.annotation.RestController; + +import lombok.extern.slf4j.Slf4j; + +@RestController +@Slf4j +@RequestMapping("/v1") +public class RegistryAPI extends API { + + @Autowired + PDRestService pdRestService; + @Autowired + PDService pdService; + + @PostMapping(value = "/registry", consumes = MediaType.APPLICATION_JSON_VALUE, + produces = MediaType.APPLICATION_JSON_VALUE) + @ResponseBody + public RegistryRestResponse register(@RequestBody RegistryRestRequest body, + HttpServletRequest request) { + RegistryRestResponse registryResponse = null; + try { + long interval = Long.valueOf(body.getInterval()).longValue(); + NodeInfo info = NodeInfo.newBuilder().setAppName(body.getAppName()) + .setVersion(body.getVersion()) + .setAddress(body.getAddress()).putAllLabels(body.getLabels()) + .setInterval(interval).build(); + registryResponse = pdRestService.register(info); + } catch (PDException e) { + registryResponse = new RegistryRestResponse(); + registryResponse.setErrorType(ErrorType.UNRECOGNIZED); + registryResponse.setMessage(e.getMessage()); + } catch (PDRuntimeException e) { + registryResponse = new RegistryRestResponse(); + registryResponse.setErrorType(ErrorType.LICENSE_VERIFY_ERROR); + registryResponse.setMessage(e.getMessage()); + } + return registryResponse; + } + + @PostMapping(value = "/registryInfo", consumes = MediaType.APPLICATION_JSON_VALUE, + produces = MediaType.APPLICATION_JSON_VALUE) + @ResponseBody + public RegistryRestResponse getInfo(@RequestBody RegistryQueryRestRequest body, + HttpServletRequest request) { + RegistryRestResponse response = new RegistryRestResponse(); + try { + boolean labelNotEmpty = body.getLabels() != null && !body.getLabels().isEmpty(); + Query query = Query.newBuilder() + .setAppName(StringUtils.isEmpty(body.getAppName()) ? "" : + body.getAppName()) + .putAllLabels(labelNotEmpty ? body.getLabels() : new HashMap<>()) + .setVersion(StringUtils.isEmpty(body.getVersion()) ? "" : + body.getVersion()) + .build(); + ArrayList registryResponse = pdRestService.getNodeInfo(query); + response.setErrorType(ErrorType.OK); + response.setData(registryResponse); + } catch (Exception e) { + log.warn(e.getMessage()); + response.setErrorType(ErrorType.UNRECOGNIZED); + response.setMessage(e.getMessage()); + } + return response; + } + + @GetMapping(value = "/allInfo", consumes = MediaType.APPLICATION_JSON_VALUE, + produces = MediaType.APPLICATION_JSON_VALUE) + @ResponseBody + public RegistryRestResponse allInfo(HttpServletRequest request) { + RegistryRestResponse response = new RegistryRestResponse(); + try { + //1.normal registry + Query query = + Query.newBuilder().setAppName("").putAllLabels(new HashMap<>()).setVersion("") + .build(); + ArrayList registryResponse = pdRestService.getNodeInfo(query); + //2.pd member + LinkedList pdMembers = getMembers(); + //3.store member + List stores = pdRestService.getStores(""); + LinkedList storeMembers = new LinkedList<>(); + for (Metapb.Store store : stores) { + RegistryRestRequest restRequest = new RegistryRestRequest(); + restRequest.setAddress(store.getAddress()); + restRequest.setVersion(store.getVersion()); + restRequest.setAppName(STORE); + restRequest.setId(String.valueOf(store.getId())); + storeMembers.add(restRequest); + } + response.setErrorType(ErrorType.OK); + HashMap result = new HashMap<>(); + result.put("other", registryResponse); + result.put(PD, pdMembers); + result.put(STORE, storeMembers); + response.setData(result); + } catch (Exception e) { + log.warn(e.getMessage()); + response.setErrorType(ErrorType.UNRECOGNIZED); + response.setMessage(e.getMessage()); + } + return response; + } + + private LinkedList getMembers() throws Exception { + CallStreamObserverWrap response = new CallStreamObserverWrap<>(); + pdService.getMembers(Pdpb.GetMembersRequest.newBuilder().build(), response); + LinkedList members = new LinkedList<>(); + List membersList = response.get().get(0).getMembersList(); + for (Metapb.Member member : membersList) { + RegistryRestRequest restRequest = new RegistryRestRequest(); + restRequest.setAddress(member.getRestUrl()); + restRequest.setVersion(VERSION); + restRequest.setAppName(PD); + members.add(restRequest); + } + return members; + } + + @GetMapping(value = "/license", consumes = MediaType.APPLICATION_JSON_VALUE, + produces = MediaType.APPLICATION_JSON_VALUE) + @ResponseBody + public RegistryRestResponse getLicenseInfo(HttpServletRequest request) { + RegistryRestResponse response = new RegistryRestResponse(); + try { +<<<<<<<< HEAD:hugegraph-pd/hg-pd-service/src/main/java/org/apache/hugegraph/pd/rest/RegistryAPI.java + response.setErrorType(Pdpb.ErrorType.OK); + // TODO: uncomment later + //LicenseVerifierService licenseVerifierService = pdService.getLicenseVerifierService(); + //response.setData(licenseVerifierService.getContext()); +======== + response.setErrorType(ErrorType.OK); + LicenseVerifierService licenseVerifierService = pdService.getLicenseVerifierService(); + response.setData(licenseVerifierService.getContext()); +>>>>>>>> d7e3d51dd (3.6.5 -> 4.x diff):hg-pd-service/src/main/java/org/apache/hugegraph/pd/rest/RegistryAPI.java + } catch (Exception e) { + log.warn(e.getMessage()); + response.setErrorType(ErrorType.UNRECOGNIZED); + response.setMessage(e.getMessage()); + } + return response; + } + + @GetMapping(value = "/license/machineInfo", consumes = MediaType.APPLICATION_JSON_VALUE, + produces = MediaType.APPLICATION_JSON_VALUE) + @ResponseBody + public RegistryRestResponse getLicenseMachineInfo(HttpServletRequest request) { + RegistryRestResponse response = new RegistryRestResponse(); + try { +<<<<<<<< HEAD:hugegraph-pd/hg-pd-service/src/main/java/org/apache/hugegraph/pd/rest/RegistryAPI.java + response.setErrorType(Pdpb.ErrorType.OK); + // TODO: uncomment later + //LicenseVerifierService licenseVerifierService = pdService.getLicenseVerifierService(); + //response.setData(licenseVerifierService.getIpAndMac()); +======== + response.setErrorType(ErrorType.OK); + LicenseVerifierService licenseVerifierService = pdService.getLicenseVerifierService(); + response.setData(licenseVerifierService.getIpAndMac()); +>>>>>>>> d7e3d51dd (3.6.5 -> 4.x diff):hg-pd-service/src/main/java/org/apache/hugegraph/pd/rest/RegistryAPI.java + } catch (Exception e) { + log.warn(e.getMessage()); + response.setErrorType(ErrorType.UNRECOGNIZED); + response.setMessage(e.getMessage()); + } + return response; + } +} diff --git a/hg-pd-service/src/main/java/org/apache/hugegraph/pd/rest/SDConfigAPI.java b/hg-pd-service/src/main/java/org/apache/hugegraph/pd/rest/SDConfigAPI.java new file mode 100644 index 0000000000..cfcad069ea --- /dev/null +++ b/hg-pd-service/src/main/java/org/apache/hugegraph/pd/rest/SDConfigAPI.java @@ -0,0 +1,84 @@ +package org.apache.hugegraph.pd.rest; + +import java.util.Collections; +import java.util.List; +import java.util.Optional; + +import org.apache.hugegraph.pd.service.SDConfigService; +import org.springframework.beans.factory.annotation.Autowired; +import org.springframework.http.MediaType; +import org.springframework.http.ResponseEntity; +import org.springframework.web.bind.annotation.GetMapping; +import org.springframework.web.bind.annotation.PathVariable; +import org.springframework.web.bind.annotation.RequestMapping; +import org.springframework.web.bind.annotation.RequestParam; +import org.springframework.web.bind.annotation.RestController; + +import org.apache.hugegraph.pd.model.SDConfig; + +import lombok.extern.slf4j.Slf4j; + +/** + * @author lynn.bond@hotmail.com on 2022/2/14 + * service discovery config for prometheus + */ +@RestController +@Slf4j +@RequestMapping("/v1/prom") +public class SDConfigAPI { + + @Autowired + private SDConfigService service; + + @GetMapping(value = "/targets/{appName}", produces = MediaType.APPLICATION_JSON_VALUE) + public ResponseEntity> getPromTargets( + @PathVariable(value = "appName", required = true) String appName) { + return ResponseEntity.of(Optional.ofNullable(this.service.getTargets(appName))); + } + + @GetMapping(value = "/targets-all", produces = MediaType.APPLICATION_JSON_VALUE) + public ResponseEntity> getPromAllTargets() { + return ResponseEntity.of(Optional.ofNullable(this.service.getAllTargets())); + } + + @GetMapping(value = "/demo/targets/{appName}", produces = MediaType.APPLICATION_JSON_VALUE) + public List getDemoTargets( + @PathVariable(value = "appName", required = true) String targetType) { + + SDConfig model = null; + switch (targetType) { + case "node": + model = SDConfig.of() + .addTarget("10.14.139.26:8100") + .addTarget("10.14.139.27:8100") + .addTarget("10.14.139.28:8100") + .setMetricsPath("/metrics") + .setScheme("http"); + break; + case "store": + model = SDConfig.of() + .addTarget("172.20.94.98:8521") + .addTarget("172.20.94.98:8522") + .addTarget("172.20.94.98:8523") + .setMetricsPath("/actuator/prometheus") + .setScheme("http"); + break; + case "pd": + model = SDConfig.of() + .addTarget("172.20.94.98:8620") + .setMetricsPath("/actuator/prometheus"); + + break; + default: + } + return Collections.singletonList(model); + } + + @GetMapping(value = "/sd_config", produces = MediaType.APPLICATION_JSON_VALUE) + public ResponseEntity> getSDConfig(@RequestParam(value = "appName") String appName, + @RequestParam(value = "path", required = false) + String path) { + return ResponseEntity.of(Optional.ofNullable(this.service.getConfigs(appName, path))); + } + +} diff --git a/hg-pd-service/src/main/java/org/apache/hugegraph/pd/rest/ShardAPI.java b/hg-pd-service/src/main/java/org/apache/hugegraph/pd/rest/ShardAPI.java new file mode 100644 index 0000000000..5c531142b3 --- /dev/null +++ b/hg-pd-service/src/main/java/org/apache/hugegraph/pd/rest/ShardAPI.java @@ -0,0 +1,134 @@ +<<<<<<<< HEAD:hugegraph-pd/hg-pd-service/src/main/java/org/apache/hugegraph/pd/rest/ShardAPI.java +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +======== +package org.apache.hugegraph.pd.rest; +>>>>>>>> d7e3d51dd (3.6.5 -> 4.x diff):hg-pd-service/src/main/java/org/apache/hugegraph/pd/rest/ShardAPI.java + +package org.apache.hugegraph.pd.rest; + +<<<<<<<< HEAD:hugegraph-pd/hg-pd-service/src/main/java/org/apache/hugegraph/pd/rest/ShardAPI.java +import java.util.ArrayList; +import java.util.HashMap; +import java.util.List; + +import org.apache.hugegraph.pd.common.PDException; +import org.apache.hugegraph.pd.grpc.Metapb; +import org.apache.hugegraph.pd.grpc.Pdpb; +import org.apache.hugegraph.pd.model.RestApiResponse; +import org.apache.hugegraph.pd.service.PDRestService; +import org.apache.hugegraph.pd.service.PDService; +======== +import org.apache.hugegraph.pd.common.PDException; +import org.apache.hugegraph.pd.grpc.Metapb; +import org.apache.hugegraph.pd.grpc.Pdpb; +import org.apache.hugegraph.pd.grpc.common.ErrorType; +import org.apache.hugegraph.pd.model.RestApiResponse; +import org.apache.hugegraph.pd.service.PDRestService; +import org.apache.hugegraph.pd.service.PDService; +import lombok.Data; +import lombok.extern.slf4j.Slf4j; +>>>>>>>> d7e3d51dd (3.6.5 -> 4.x diff):hg-pd-service/src/main/java/org/apache/hugegraph/pd/rest/ShardAPI.java +import org.springframework.beans.factory.annotation.Autowired; +import org.springframework.http.MediaType; +import org.springframework.web.bind.annotation.GetMapping; +import org.springframework.web.bind.annotation.RequestMapping; +import org.springframework.web.bind.annotation.ResponseBody; +import org.springframework.web.bind.annotation.RestController; + +import lombok.Data; +import lombok.extern.slf4j.Slf4j; + +@RestController +@Slf4j +@RequestMapping("/v1") +public class ShardAPI extends API { + + @Autowired + PDRestService pdRestService; + @Autowired + PDService pdService; + + @GetMapping(value = "/shards", produces = MediaType.APPLICATION_JSON_VALUE) + @ResponseBody + public RestApiResponse getShards() { + try { + List resultShardList = new ArrayList<>(); + List graphs = pdRestService.getGraphs(); + for (Metapb.Graph graph : graphs) { + String graphName = graph.getGraphName(); + List partitions = pdRestService.getPartitions(graphName); + for (Metapb.Partition pt : partitions) { + Metapb.PartitionStats partitionStats = + pdRestService.getPartitionStats(graphName, pt.getId()); + if (partitionStats != null) { + List shardStatsList = partitionStats.getShardStatsList(); + for (Metapb.ShardStats shardStats : shardStatsList) { + Shard resultShard = new Shard(); + resultShard.storeId = shardStats.getStoreId(); + resultShard.partitionId = pt.getId(); + resultShard.role = String.valueOf(shardStats.getRole()); + resultShard.state = String.valueOf(shardStats.getState()); + resultShard.graphName = graphName; + resultShard.progress = shardStats.getProgress(); + resultShardList.add(resultShard); + } + } else { + List shardList = new ArrayList<>(); + var shardGroup = pdService.getStoreNodeService().getShardGroup(pt.getId()); + if (shardGroup != null) { + shardList = shardGroup.getShardsList(); + } else { + log.error( + "ShardAPI.getShards(), get shards of group id: {} returns " + + "null.", + pt.getId()); + } + + for (Metapb.Shard shard : shardList) { + Shard resultShard = new Shard(); + resultShard.storeId = shard.getStoreId(); + resultShard.partitionId = pt.getId(); + resultShard.role = String.valueOf(shard.getRole()); + resultShard.state = String.valueOf(Metapb.ShardState.SState_Normal); + resultShard.graphName = graphName; + resultShard.progress = 0; + resultShardList.add(resultShard); + } + } + } + } + HashMap dataMap = new HashMap<>(); + dataMap.put("shards", resultShardList); + return new RestApiResponse(dataMap, ErrorType.OK, ErrorType.OK.name()); + } catch (PDException e) { + log.error("PDException: ", e); + return new RestApiResponse(null, e.getErrorCode(), e.getMessage()); + } + } + + @Data + class Shard { + + long storeId; + long partitionId; + String role; + String state; + String graphName; + int progress; + } +} diff --git a/hg-pd-service/src/main/java/org/apache/hugegraph/pd/rest/StoreAPI.java b/hg-pd-service/src/main/java/org/apache/hugegraph/pd/rest/StoreAPI.java new file mode 100644 index 0000000000..304858876f --- /dev/null +++ b/hg-pd-service/src/main/java/org/apache/hugegraph/pd/rest/StoreAPI.java @@ -0,0 +1,420 @@ +<<<<<<<< HEAD:hugegraph-pd/hg-pd-service/src/main/java/org/apache/hugegraph/pd/rest/StoreAPI.java +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hugegraph.pd.rest; + +import java.util.ArrayList; +======== +package org.apache.hugegraph.pd.rest; + +import java.io.Serializable; +import java.util.ArrayList; +import java.util.Comparator; +>>>>>>>> d7e3d51dd (3.6.5 -> 4.x diff):hg-pd-service/src/main/java/org/apache/hugegraph/pd/rest/StoreAPI.java +import java.util.Date; +import java.util.HashMap; +import java.util.HashSet; +import java.util.List; +import java.util.Map; + +<<<<<<<< HEAD:hugegraph-pd/hg-pd-service/src/main/java/org/apache/hugegraph/pd/rest/StoreAPI.java +import org.apache.hugegraph.pd.common.PDException; +import org.apache.hugegraph.pd.grpc.Metapb; +import org.apache.hugegraph.pd.grpc.Pdpb; +import org.apache.hugegraph.pd.model.RestApiResponse; +import org.apache.hugegraph.pd.model.StoreRestRequest; +import org.apache.hugegraph.pd.model.TimeRangeRequest; +import org.apache.hugegraph.pd.service.PDRestService; +import org.apache.hugegraph.pd.util.DateUtil; +======== +import org.apache.hugegraph.pd.grpc.common.ErrorType; +import org.apache.hugegraph.pd.service.PDRestService; +>>>>>>>> d7e3d51dd (3.6.5 -> 4.x diff):hg-pd-service/src/main/java/org/apache/hugegraph/pd/rest/StoreAPI.java +import org.springframework.beans.factory.annotation.Autowired; +import org.springframework.http.MediaType; +import org.springframework.web.bind.annotation.DeleteMapping; +import org.springframework.web.bind.annotation.GetMapping; +import org.springframework.web.bind.annotation.PathVariable; +import org.springframework.web.bind.annotation.PostMapping; +import org.springframework.web.bind.annotation.RequestBody; +import org.springframework.web.bind.annotation.RequestMapping; +import org.springframework.web.bind.annotation.ResponseBody; +import org.springframework.web.bind.annotation.RestController; + +<<<<<<<< HEAD:hugegraph-pd/hg-pd-service/src/main/java/org/apache/hugegraph/pd/rest/StoreAPI.java +======== +import org.apache.hugegraph.pd.common.PDException; +import org.apache.hugegraph.pd.grpc.Metapb; +import org.apache.hugegraph.pd.grpc.Pdpb; +import org.apache.hugegraph.pd.model.RestApiResponse; +import org.apache.hugegraph.pd.model.StoreRestRequest; +import org.apache.hugegraph.pd.model.TimeRangeRequest; +import org.apache.hugegraph.pd.util.DateUtil; +>>>>>>>> d7e3d51dd (3.6.5 -> 4.x diff):hg-pd-service/src/main/java/org/apache/hugegraph/pd/rest/StoreAPI.java +import com.google.protobuf.util.JsonFormat; + +import lombok.Data; +import lombok.extern.slf4j.Slf4j; + +@RestController +@Slf4j +@RequestMapping("/v1") +public class StoreAPI extends API { + + @Autowired + PDRestService pdRestService; + + @GetMapping(value = "/stores", produces = MediaType.APPLICATION_JSON_VALUE) + @ResponseBody + public RestApiResponse getStores() { + List storeStatsList = new ArrayList<>(); + try { + HashMap dataMap = new HashMap<>(); + Map stateCountMap = new HashMap<>(); + for (Metapb.Store store : pdRestService.getStores("")) { + String stateKey = store.getState().name(); + stateCountMap.put(stateKey, stateCountMap.getOrDefault(stateKey, 0) + 1); + storeStatsList.add(new StoreStatistics(store)); + } + storeStatsList.sort(Comparator.comparing(o -> o.address)); + dataMap.put("stores", storeStatsList); + dataMap.put("numOfService", storeStatsList.size()); + dataMap.put("numOfNormalService", + stateCountMap.getOrDefault(Metapb.StoreState.Up.name(), 0)); + dataMap.put("stateCountMap", stateCountMap); + return new RestApiResponse(dataMap, ErrorType.OK, ErrorType.OK.name()); + } catch (PDException e) { + log.error("PDException", e); + return new RestApiResponse(null, e.getErrorCode(), e.getMessage()); + } + } + + // Only storeState can be modified through this API + @PostMapping(value = "/store/{storeId}", consumes = MediaType.APPLICATION_JSON_VALUE, + produces = MediaType.APPLICATION_JSON_VALUE) + @ResponseBody + public String setStore(@PathVariable long storeId, @RequestBody StoreRestRequest request) { + try { + Metapb.Store lastStore = pdRestService.getStore(storeId); + if (lastStore != null) { + Metapb.Store.Builder builder = Metapb.Store.newBuilder(lastStore); + Metapb.StoreState storeState = Metapb.StoreState.valueOf(request.getStoreState()); + builder.setState(storeState); + Metapb.Store newStore = pdRestService.updateStore(builder.build()); + return toJSON(newStore, "store"); + } else { + return toJSON(new PDException(ErrorType.NOT_FOUND_VALUE, "error")); + } + } catch (PDException e) { + return toJSON(e); + } + } + + @GetMapping(value = "/shardGroups", produces = MediaType.APPLICATION_JSON_VALUE) + @ResponseBody + public String getShardGroups() { + try { + return toJSON(pdRestService.getShardGroups(), "shardGroups"); + } catch (PDException e) { + return toJSON(e); + } + } + + /** + * Returns the leader on each store + * + * @return + */ + @GetMapping(value = "/shardLeaders") + public Map> shardLeaders() throws PDException { + Map> leaders = new HashMap<>(); + try { + + List groups = pdRestService.getShardGroups(); + groups.forEach(group -> { + group.getShardsList().forEach(shard -> { + if (shard.getRole() == Metapb.ShardRole.Leader) { + try { + String ip = pdRestService.getStore(shard.getStoreId()).getRaftAddress(); + if (!leaders.containsKey(ip)) { + leaders.put(ip, new ArrayList<>()); + } + leaders.get(ip).add(group.getId()); + } catch (PDException e) { + throw new RuntimeException(e); + } + } + }); + }); + } catch (PDException e) { + throw e; + } + return leaders; + } + + @GetMapping(value = "/balanceLeaders") + public Map balanceLeaders() throws PDException { + return pdRestService.balancePartitionLeader(); + } + + @DeleteMapping(value = "/store/{storeId}") + public String removeStore(@PathVariable(value = "storeId") Long storeId) { + try { + pdRestService.removeStore(storeId); + } catch (PDException e) { + return e.getStackTrace().toString(); + } + return "OK"; + } + + @PostMapping(value = "/store/log", consumes = MediaType.APPLICATION_JSON_VALUE, + produces = MediaType.APPLICATION_JSON_VALUE) + @ResponseBody + public String getStoreLog(@RequestBody TimeRangeRequest request) { + try { + Date dateStart = DateUtil.getDate(request.getStartTime()); + Date dateEnd = DateUtil.getDate(request.getEndTime()); + List changedStore = + pdRestService.getStoreStatusLog(dateStart.getTime(), + dateEnd.getTime()); + if (changedStore != null) { + JsonFormat.TypeRegistry registry = JsonFormat.TypeRegistry + .newBuilder().add(Metapb.Store.getDescriptor()).build(); + return toJSON(changedStore, registry); + } else { + return toJSON(new PDException(ErrorType.NOT_FOUND_VALUE, "error")); + } + } catch (PDException e) { + return toJSON(e); + } + } + + @GetMapping(value = "store/{storeId}", produces = MediaType.APPLICATION_JSON_VALUE) + @ResponseBody + public RestApiResponse getStore(@PathVariable long storeId) { + // Get the statistics of the store + Metapb.Store store = null; + try { + store = pdRestService.getStore(storeId); + } catch (PDException e) { + return new RestApiResponse(null, e.getErrorCode(), e.getMessage()); + } + if (store != null) { + StoreStatistics resultStoreStats = resultStoreStats = new StoreStatistics(store); +<<<<<<<< HEAD:hugegraph-pd/hg-pd-service/src/main/java/org/apache/hugegraph/pd/rest/StoreAPI.java + return new RestApiResponse(resultStoreStats, Pdpb.ErrorType.OK, + Pdpb.ErrorType.OK.name()); + } else { + return new RestApiResponse(null, Pdpb.ErrorType.STORE_ID_NOT_EXIST, + Pdpb.ErrorType.STORE_ID_NOT_EXIST.name()); +======== + return new RestApiResponse(resultStoreStats, ErrorType.OK, ErrorType.OK.name()); + } else { + return new RestApiResponse(null, ErrorType.STORE_ID_NOT_EXIST, ErrorType.STORE_ID_NOT_EXIST.name()); +>>>>>>>> d7e3d51dd (3.6.5 -> 4.x diff):hg-pd-service/src/main/java/org/apache/hugegraph/pd/rest/StoreAPI.java + } + } + + @GetMapping(value = "storesAndStats", produces = MediaType.APPLICATION_JSON_VALUE) + @ResponseBody + public String getStoresAndStats() { + //for debug use + try { + List stores = pdRestService.getStores(""); + return toJSON(stores, "stores"); + } catch (PDException e) { + log.error("PD exception:" + e); + return toJSON(e); + } + } + + @GetMapping(value = "store_monitor/json/{storeId}", produces = MediaType.APPLICATION_JSON_VALUE) + @ResponseBody + public RestApiResponse getStoreMonitorData(@PathVariable long storeId) { + try { +<<<<<<<< HEAD:hugegraph-pd/hg-pd-service/src/main/java/org/apache/hugegraph/pd/rest/StoreAPI.java + List> result = pdRestService.getMonitorData(storeId); + return new RestApiResponse(result, Pdpb.ErrorType.OK, Pdpb.ErrorType.OK.name()); +======== + List> result = pdRestService.getMonitorData(storeId); + return new RestApiResponse(result, ErrorType.OK, ErrorType.OK.name()); +>>>>>>>> d7e3d51dd (3.6.5 -> 4.x diff):hg-pd-service/src/main/java/org/apache/hugegraph/pd/rest/StoreAPI.java + } catch (PDException e) { + return new RestApiResponse(null, e.getErrorCode(), e.getMessage()); + } + } + + @GetMapping(value = "store_monitor/{storeId}") + @ResponseBody + public String getStoreMonitorDataText(@PathVariable long storeId) { + try { + return pdRestService.getMonitorDataText(storeId); + } catch (PDException e) { + return "error:" + e.getErrorCode() + e.getMessage(); + } + } + + @GetMapping(value = "/shardGroupsCache", produces = MediaType.APPLICATION_JSON_VALUE) + @ResponseBody + public String getShardGroupsCache() { + return toJSON(new ArrayList<>(pdRestService.getShardGroupCache().values()), "shardGroups"); + } + + @Data + class Partition { + + int partitionId; + String graphName; + String role; // shard role + String workState; + long dataSize; // The amount of storage space occupied + + Partition() { + } + + Partition(Metapb.GraphStats graphStats) { + partitionId = graphStats.getPartitionId(); + graphName = graphStats.getGraphName(); + final int postfixLength = 2; + graphName = graphName.substring(0, graphName.length() - postfixLength); + role = String.valueOf(graphStats.getRole()); + workState = String.valueOf(graphStats.getWorkState()); + dataSize = graphStats.getApproximateSize(); + } + } + + @Data + class StoreStatistics { +<<<<<<<< HEAD:hugegraph-pd/hg-pd-service/src/main/java/org/apache/hugegraph/pd/rest/StoreAPI.java + + // store statistics + long storeId; +======== + // store的统计信息 + String storeId; +>>>>>>>> d7e3d51dd (3.6.5 -> 4.x diff):hg-pd-service/src/main/java/org/apache/hugegraph/pd/rest/StoreAPI.java + String address; + String raftAddress; + String version; + String state; + String deployPath; + String dataPath; // The path where the data is stored + long startTimeStamp; + // For the time being, the time of the first heartbeat is taken as the registration time + long registedTimeStamp; + long lastHeartBeat; // Last heartbeat time + long capacity; + long available; + int partitionCount; + int graphSize; + long keyCount; +<<<<<<<< HEAD:hugegraph-pd/hg-pd-service/src/main/java/org/apache/hugegraph/pd/rest/StoreAPI.java + long leaderCount; // shard role = 'Leader' The number of partitions +======== + int storeGroupId; + long leaderCount; // shard role = 'Leader'的分区数量 +>>>>>>>> d7e3d51dd (3.6.5 -> 4.x diff):hg-pd-service/src/main/java/org/apache/hugegraph/pd/rest/StoreAPI.java + String serviceName; + String serviceVersion; + long serviceCreatedTimeStamp; // The time when the service was created + List partitions; + + StoreStatistics(Metapb.Store store) { + if (store != null) { + storeId = String.valueOf(store.getId()); + address = store.getAddress(); + raftAddress = store.getRaftAddress(); + state = String.valueOf(store.getState()); + version = store.getVersion(); + deployPath = store.getDeployPath(); + final String prefix = "file:"; + if ((deployPath != null) && (deployPath.startsWith(prefix))) { + // Remove the prefix + deployPath = deployPath.substring(prefix.length()); + } + if ((deployPath != null) && (deployPath.contains(".jar"))) { + // Remove the information after the jar package + deployPath = deployPath.substring(0, deployPath.indexOf(".jar") + 4); + } + dataPath = store.getDataPath(); + startTimeStamp = store.getStartTimestamp(); + try { + serviceCreatedTimeStamp = pdRestService.getStore(store.getId()) + .getStats() + .getStartTime(); // Instance time + final int base = 1000; + serviceCreatedTimeStamp *= base; // Translates to milliseconds + } catch (PDException e) { + e.printStackTrace(); + serviceCreatedTimeStamp = store.getStartTimestamp(); + } + registedTimeStamp = store.getStartTimestamp(); // Time of registration + lastHeartBeat = store.getLastHeartbeat(); + capacity = store.getStats().getCapacity(); + available = store.getStats().getAvailable(); + partitionCount = store.getStats().getPartitionCount(); + serviceName = address + "-store"; + serviceVersion = store.getVersion(); + try { + storeGroupId = pdRestService.getStoreGroupId(store.getId()); + } catch (PDException e) { + storeGroupId = -1; + log.error("get store group id failed,", e); + } + List graphStatsList = store.getStats().getGraphStatsList(); + // Save the partition information + List partitionStatsList = new ArrayList<>(); + // The number used for the chart + HashSet graphNameSet = new HashSet<>(); + // Statistics on the number of partitions of the leader + HashSet leaderPartitionIds = new HashSet(); + // Construct partition information (graph information stored in the store) + Map partition2KeyCount = new HashMap<>(); + for (Metapb.GraphStats graphStats : graphStatsList) { + String graphName = graphStats.getGraphName(); + // Only the part in front of /g /m /s is retained in the title + final int postfixLength = 2; + graphNameSet.add(graphName.substring(0, graphName.length() - postfixLength)); + if ((graphStats.getGraphName() != null) && + (graphStats.getGraphName().endsWith("/g"))) { + Partition pt = new Partition(graphStats); + partitionStatsList.add(pt); + } + // Count the keyCount of each partition + partition2KeyCount.put(graphStats.getPartitionId(), + graphStats.getApproximateKeys()); + if (graphStats.getRole() == Metapb.ShardRole.Leader) { + leaderPartitionIds.add(graphStats.getPartitionId()); + } + } + for (Map.Entry entry : partition2KeyCount.entrySet()) { + keyCount += entry.getValue(); + } + partitions = partitionStatsList; + graphSize = graphNameSet.size(); + leaderCount = leaderPartitionIds.size(); + } + + } + } + + @GetMapping(value = "/health", produces = MediaType.TEXT_PLAIN_VALUE) + public Serializable checkHealthy() { + return ""; + } +} diff --git a/hg-pd-service/src/main/java/org/apache/hugegraph/pd/rest/TaskAPI.java b/hg-pd-service/src/main/java/org/apache/hugegraph/pd/rest/TaskAPI.java new file mode 100644 index 0000000000..2820b8648f --- /dev/null +++ b/hg-pd-service/src/main/java/org/apache/hugegraph/pd/rest/TaskAPI.java @@ -0,0 +1,111 @@ +<<<<<<<< HEAD:hugegraph-pd/hg-pd-service/src/main/java/org/apache/hugegraph/pd/rest/TaskAPI.java +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hugegraph.pd.rest; + +import java.util.List; +import java.util.Map; + +======== +package org.apache.hugegraph.pd.rest; + +>>>>>>>> d7e3d51dd (3.6.5 -> 4.x diff):hg-pd-service/src/main/java/org/apache/hugegraph/pd/rest/TaskAPI.java +import org.apache.hugegraph.pd.common.KVPair; +import org.apache.hugegraph.pd.common.PDException; +import org.apache.hugegraph.pd.grpc.Metapb; +import org.apache.hugegraph.pd.service.PDRestService; +<<<<<<<< HEAD:hugegraph-pd/hg-pd-service/src/main/java/org/apache/hugegraph/pd/rest/TaskAPI.java +======== +import lombok.extern.slf4j.Slf4j; +>>>>>>>> d7e3d51dd (3.6.5 -> 4.x diff):hg-pd-service/src/main/java/org/apache/hugegraph/pd/rest/TaskAPI.java +import org.springframework.beans.factory.annotation.Autowired; +import org.springframework.http.MediaType; +import org.springframework.web.bind.annotation.GetMapping; +import org.springframework.web.bind.annotation.RequestMapping; +import org.springframework.web.bind.annotation.ResponseBody; +import org.springframework.web.bind.annotation.RestController; + +import lombok.extern.slf4j.Slf4j; + +@RestController +@Slf4j +@RequestMapping("/v1/task") +public class TaskAPI extends API { + + @Autowired + PDRestService pdRestService; + + @GetMapping(value = "/patrolStores", produces = MediaType.APPLICATION_JSON_VALUE) + @ResponseBody + public String patrolStores() { + try { + List stores = pdRestService.patrolStores(); + return toJSON(stores, "stores"); + } catch (PDException e) { + e.printStackTrace(); + return toJSON(e); + } + } + + @GetMapping(value = "/patrolPartitions", produces = MediaType.APPLICATION_JSON_VALUE) + @ResponseBody + public String patrolPartitions() { + try { + List partitions = pdRestService.patrolPartitions(); + return toJSON(partitions, "partitions"); + } catch (PDException e) { + e.printStackTrace(); + return toJSON(e); + } + } + + @GetMapping(value = "/balancePartitions", produces = MediaType.APPLICATION_JSON_VALUE) + @ResponseBody + public Map> balancePartitions() { + try { + Map> partitions = pdRestService.balancePartitions(); + return partitions; + } catch (PDException e) { + e.printStackTrace(); + return null; + } + } + + @GetMapping(value = "/splitPartitions", produces = MediaType.APPLICATION_JSON_VALUE) + @ResponseBody + public String splitPartitions() { + try { + List partitions = pdRestService.splitPartitions(); + return toJSON(partitions, "partitions"); + } catch (PDException e) { + e.printStackTrace(); + return toJSON(e); + } + } + + @GetMapping(value = "/balanceLeaders") + public Map balanceLeaders() throws PDException { + return pdRestService.balancePartitionLeader(); + } + + @GetMapping(value = "/compact") + public String dbCompaction() throws PDException { + pdRestService.dbCompaction(); + return "compact ok"; + } +} diff --git a/hg-pd-service/src/main/java/org/apache/hugegraph/pd/rest/TestAPI.java b/hg-pd-service/src/main/java/org/apache/hugegraph/pd/rest/TestAPI.java new file mode 100644 index 0000000000..39f97ea51a --- /dev/null +++ b/hg-pd-service/src/main/java/org/apache/hugegraph/pd/rest/TestAPI.java @@ -0,0 +1,192 @@ +package org.apache.hugegraph.pd.rest; + +import org.apache.hugegraph.pd.RegistryService; +import org.apache.hugegraph.pd.common.PDException; +import org.apache.hugegraph.pd.config.PDConfig; +import org.apache.hugegraph.pd.grpc.Metapb; +import org.apache.hugegraph.pd.grpc.discovery.Query; +import org.apache.hugegraph.pd.grpc.pulse.ChangeShard; +import org.apache.hugegraph.pd.grpc.pulse.PartitionHeartbeatResponse; +import org.apache.hugegraph.pd.grpc.pulse.PdInstructionResponse; +import org.apache.hugegraph.pd.grpc.pulse.PdInstructionType; +import org.apache.hugegraph.pd.grpc.pulse.StoreNodeEventType; +import org.apache.hugegraph.pd.meta.MetadataFactory; +import org.apache.hugegraph.pd.meta.PulseStore; +import org.apache.hugegraph.pd.pulse.ChangeType; +import org.apache.hugegraph.pd.pulse.PDPulseSubjects; +import com.google.protobuf.InvalidProtocolBufferException; +import com.google.protobuf.Parser; +import lombok.extern.slf4j.Slf4j; +import org.springframework.beans.factory.annotation.Autowired; +import org.springframework.http.MediaType; +import org.springframework.web.bind.annotation.GetMapping; +import org.springframework.web.bind.annotation.PathVariable; +import org.springframework.web.bind.annotation.PutMapping; +import org.springframework.web.bind.annotation.RequestMapping; +import org.springframework.web.bind.annotation.ResponseBody; +import org.springframework.web.bind.annotation.RestController; + +import java.util.HashMap; +import java.util.List; +import java.util.concurrent.atomic.AtomicLong; + +/** + * @author lynn.bond@hotmail.com on 2022/2/9 + */ +@RestController +@Slf4j +@RequestMapping("/test") +public class TestAPI { + + @Autowired + private PDConfig pdConfig; + + @GetMapping(value = "/discovery/{appName}", produces = MediaType.TEXT_PLAIN_VALUE) + @ResponseBody + public String discovery(@PathVariable(value = "appName", required = true) String appName) { + RegistryService register = new RegistryService(pdConfig); + // Query query=Query.newBuilder().setAppName("hugegraph").build(); + AtomicLong label = new AtomicLong(); + HashMap labels = new HashMap<>(); + String labelValue = String.valueOf(label.incrementAndGet()); + //labels.put("address",labelValue); + Query query = Query.newBuilder().build(); + // Query query = Query.newBuilder().setAppName("hugegraph").set.build(); + + return register.getNodes(query).toString(); + } + + @GetMapping(value = "/pulse", produces = MediaType.TEXT_PLAIN_VALUE) + @ResponseBody + public String notifyClient() { + PDPulseSubjects.notifyClient(getPartitionHeartbeatResponse()); + return "PartitionHeartbeatResponse"; + } + + @GetMapping(value = "/pulse/1", produces = MediaType.TEXT_PLAIN_VALUE) + @ResponseBody + public String notifyClientTo1() { + return String.valueOf(PDPulseSubjects.notifyClient(getPartitionHeartbeatResponse(), 1L)); + } + + @GetMapping(value = "/pulse/12", produces = MediaType.TEXT_PLAIN_VALUE) + @ResponseBody + public String notifyClientTo12() { + return String.valueOf(PDPulseSubjects.notifyClient(getPartitionHeartbeatResponse(), List.of(1L, 2L))); + } + + @GetMapping(value = "/pulse/123", produces = MediaType.TEXT_PLAIN_VALUE) + @ResponseBody + public String notifyClientTo123() { + return String.valueOf(PDPulseSubjects.notifyClient(getPartitionHeartbeatResponse(), List.of(1L, 2L, 3L))); + } + + private PartitionHeartbeatResponse.Builder getPartitionHeartbeatResponse() { + return PartitionHeartbeatResponse.newBuilder() + .setPartition(Metapb.Partition.newBuilder() + .setId(8) + .setGraphName("graphName8")) + .setChangeShard( + ChangeShard.newBuilder() + .setChangeTypeValue(8) + .addShard(Metapb.Shard.newBuilder() + .setRoleValue(8) + .setStoreId(8) + ) + ); + + } + + @GetMapping(value = "/partition", produces = MediaType.TEXT_PLAIN_VALUE) + @ResponseBody + public String noticePartition() { + PDPulseSubjects.notifyPartitionChange(ChangeType.ALTER, "graph-test", 99); + return "partition"; + } + + @GetMapping(value = "/shard-group", produces = MediaType.TEXT_PLAIN_VALUE) + @ResponseBody + public String noticeShardGroup() { + Metapb.ShardGroup group = Metapb.ShardGroup.newBuilder().setId(88).build(); + PDPulseSubjects.notifyShardGroupChange(ChangeType.ALTER, 88, group); + + return group.toString(); + } + + @GetMapping(value = "/node", produces = MediaType.TEXT_PLAIN_VALUE) + @ResponseBody + public String notifyNodeChange() { + PDPulseSubjects.notifyNodeChange( + StoreNodeEventType.STORE_NODE_EVENT_TYPE_NODE_RAFT_CHANGE + , "graph-test", 77 + ); + + return "notifyNodeChange( STORE_NODE_EVENT_TYPE_NODE_RAFT_CHANGE, graph-test, 77)"; + } + + @GetMapping(value = "/graph", produces = MediaType.TEXT_PLAIN_VALUE) + @ResponseBody + public String notifyGraphChange() { + Metapb.Graph graph = Metapb.Graph.newBuilder().setGraphName("graph-meta").build(); + PDPulseSubjects.notifyGraphChange(graph); + + return graph.toString(); + } + + @PutMapping(value = "/queue", produces = MediaType.TEXT_PLAIN_VALUE) + @ResponseBody + public String testPutQueue() { + this.putQueue(); + return "queue"; + } + + public void putQueue() { + PartitionHeartbeatResponse response = PartitionHeartbeatResponse.newBuilder() + .setPartition(Metapb.Partition.newBuilder() + .setId(9) + .setGraphName("graphName")) + .setChangeShard( + ChangeShard.newBuilder() + .setChangeTypeValue(9) + .addShard(Metapb.Shard.newBuilder() + .setRoleValue(9) + .setStoreId(9) + ) + ).build(); + + Metapb.QueueItem.Builder builder = Metapb.QueueItem.newBuilder() + .setItemId("item-id") + .setItemClass("item-class") + .setItemContent(response.toByteString()); + + + PulseStore store = MetadataFactory.newPulseStore(pdConfig); + + try { + store.addItem(builder.setItemId("item-id-1").build()); + store.addItem(builder.setItemId("item-id-2").build()); + store.addItem(builder.setItemId("item-id-3").build()); + } catch (PDException e) { + e.printStackTrace(); + } + List queue = null; + try { + queue = store.getQueue(); + } catch (PDException e) { + e.printStackTrace(); + } + Parser parser = PartitionHeartbeatResponse.parser(); + + queue.stream().forEach(e -> { + PartitionHeartbeatResponse buf = null; + try { + buf = parser.parseFrom(e.getItemContent()); + } catch (InvalidProtocolBufferException ex) { + ex.printStackTrace(); + } + PDPulseSubjects.notifyClient(PartitionHeartbeatResponse.newBuilder(buf)); + }); + + + } +} diff --git a/hg-pd-service/src/main/java/org/apache/hugegraph/pd/rest/interceptor/AuthenticationConfigurer.java b/hg-pd-service/src/main/java/org/apache/hugegraph/pd/rest/interceptor/AuthenticationConfigurer.java new file mode 100644 index 0000000000..4ac87b308f --- /dev/null +++ b/hg-pd-service/src/main/java/org/apache/hugegraph/pd/rest/interceptor/AuthenticationConfigurer.java @@ -0,0 +1,24 @@ +package org.apache.hugegraph.pd.rest.interceptor; + +import org.springframework.beans.factory.annotation.Autowired; +import org.springframework.context.annotation.Configuration; +import org.springframework.web.servlet.config.annotation.InterceptorRegistry; +import org.springframework.web.servlet.config.annotation.WebMvcConfigurer; + +/** + * @author zhangyingjie + * @date 2023/5/5 + **/ +@Configuration +public class AuthenticationConfigurer implements WebMvcConfigurer { + + @Autowired + RestAuthentication restAuthentication; + + @Override + public void addInterceptors(InterceptorRegistry registry) { + registry.addInterceptor(restAuthentication) + .addPathPatterns("/**") + .excludePathPatterns("/actuator/*", "/v1/health"); + } +} diff --git a/hg-pd-service/src/main/java/org/apache/hugegraph/pd/rest/interceptor/RestAuthentication.java b/hg-pd-service/src/main/java/org/apache/hugegraph/pd/rest/interceptor/RestAuthentication.java new file mode 100644 index 0000000000..0eff081430 --- /dev/null +++ b/hg-pd-service/src/main/java/org/apache/hugegraph/pd/rest/interceptor/RestAuthentication.java @@ -0,0 +1,64 @@ +package org.apache.hugegraph.pd.rest.interceptor; + +import java.io.IOException; +import java.util.function.Function; + +import javax.servlet.http.HttpServletRequest; +import javax.servlet.http.HttpServletResponse; + +import org.apache.commons.lang3.StringUtils; +import org.apache.hugegraph.pd.service.interceptor.Authentication; +import org.springframework.lang.Nullable; +import org.springframework.stereotype.Service; +import org.springframework.web.servlet.HandlerInterceptor; +import org.springframework.web.servlet.ModelAndView; + +import org.apache.hugegraph.pd.rest.API; + +import lombok.extern.slf4j.Slf4j; + +/** + * @author zhangyingjie + * @date 2023/4/28 + **/ +@Slf4j +@Service +public class RestAuthentication extends Authentication implements HandlerInterceptor { + + private static final String TOKEN_KEY = "Pd-Token"; + + @Override + public boolean preHandle(HttpServletRequest request, HttpServletResponse response, Object handler) throws + IOException { + try { + String token = request.getHeader(TOKEN_KEY); + String authority = request.getHeader("Authorization"); + if (authority == null) { + throw new Exception("Unauthorized!"); + } + Function tokenCall = t -> { + if (!StringUtils.isEmpty(t)) { + response.addHeader(TOKEN_KEY, t); + } + return true; + }; + authority = authority.replace("Basic ", ""); + return authenticate(authority, token, tokenCall); + } catch (Exception e) { + response.setContentType("application/json"); + response.getWriter().println(new API().toJSON(e)); + response.getWriter().flush(); + return false; + } + } + + @Override + public void postHandle(HttpServletRequest request, HttpServletResponse response, Object handler, @Nullable + ModelAndView modelAndView) { + } + + @Override + public void afterCompletion(HttpServletRequest request, HttpServletResponse response, Object handler, + @Nullable Exception ex) { + } +} diff --git a/hg-pd-service/src/main/java/org/apache/hugegraph/pd/service/DiscoveryService.java b/hg-pd-service/src/main/java/org/apache/hugegraph/pd/service/DiscoveryService.java new file mode 100644 index 0000000000..dcc4f873b7 --- /dev/null +++ b/hg-pd-service/src/main/java/org/apache/hugegraph/pd/service/DiscoveryService.java @@ -0,0 +1,119 @@ +package org.apache.hugegraph.pd.service; + +import java.util.Map; +import java.util.concurrent.atomic.AtomicLong; + +import javax.annotation.PostConstruct; + +import org.apache.commons.lang3.StringUtils; +import org.lognet.springboot.grpc.GRpcService; +import org.springframework.beans.factory.annotation.Autowired; + +import org.apache.hugegraph.pd.RegistryService; +import org.apache.hugegraph.pd.common.PDException; +import org.apache.hugegraph.pd.common.PDRuntimeException; +import org.apache.hugegraph.pd.config.PDConfig; +import org.apache.hugegraph.pd.grpc.common.ErrorType; +import org.apache.hugegraph.pd.grpc.common.Errors; +import org.apache.hugegraph.pd.grpc.common.ResponseHeader; +import org.apache.hugegraph.pd.grpc.discovery.DiscoveryServiceGrpc; +import org.apache.hugegraph.pd.grpc.discovery.NodeInfo; +import org.apache.hugegraph.pd.grpc.discovery.NodeInfos; +import org.apache.hugegraph.pd.grpc.discovery.Query; +import org.apache.hugegraph.pd.grpc.discovery.RegisterInfo; +import org.apache.hugegraph.pd.license.LicenseVerifierService; +import org.apache.hugegraph.pd.raft.RaftEngine; +import org.apache.hugegraph.pd.service.interceptor.GrpcAuthentication; + +import lombok.extern.slf4j.Slf4j; + +/** + * @author zhangyingjie + * @date 2021/12/20 + **/ +@Slf4j +@GRpcService(interceptors = {GrpcAuthentication.class}) +public class DiscoveryService extends DiscoveryServiceGrpc.DiscoveryServiceImplBase implements ServiceGrpc { + + private static final String CORES = "cores"; + private static AtomicLong id = new AtomicLong(); + private RegistryService register = null; + private LicenseVerifierService licenseVerifierService; + @Autowired + private PDConfig pdConfig; + + @PostConstruct + public void init() { + RaftEngine.getInstance().init(pdConfig.getRaft()); + RaftEngine.getInstance().addStateListener(this); + register = new RegistryService(pdConfig); + licenseVerifierService = new LicenseVerifierService(pdConfig); + } + + @Override + public void register(NodeInfo request, io.grpc.stub.StreamObserver observer) { + if (!isLeader()) { + redirectToLeader(DiscoveryServiceGrpc.getRegisterMethod(), request, observer); + return; + } + int outTimes = pdConfig.getDiscovery().getHeartbeatOutTimes(); + RegisterInfo registerInfo; + try { + if (request.getAppName().equals("hg")) { + Query queryRequest = Query.newBuilder().setAppName(request.getAppName()) + .setVersion(request.getVersion()).build(); + NodeInfos nodes = register.getNodes(queryRequest); + String address = request.getAddress(); + int nodeCount = nodes.getInfoCount() + 1; + for (NodeInfo node : nodes.getInfoList()) { + if (node.getAddress().equals(address)) { + nodeCount = nodes.getInfoCount(); + break; + } + } + Map labelsMap = request.getLabelsMap(); + String coreCount = labelsMap.get(CORES); + if (StringUtils.isEmpty(coreCount)) { + throw new PDException(-1, "core count can not be null"); + } + int core = Integer.parseInt(coreCount); + licenseVerifierService.verify(core, nodeCount); + } + register.register(request, outTimes); + String valueId = request.getId(); + registerInfo = RegisterInfo.newBuilder().setNodeInfo(NodeInfo.newBuilder().setId( + "0".equals(valueId) ? String.valueOf(id.incrementAndGet()) : valueId).build()).build(); + + } catch (PDException e) { + registerInfo = RegisterInfo.newBuilder().setHeader(getResponseHeader(e)).build(); + log.debug("registerStore exception: ", e); + } catch (PDRuntimeException ex) { + Errors error = Errors.newBuilder().setTypeValue(ex.getErrorCode()) + .setMessage(ex.getMessage()).build(); + ResponseHeader header = ResponseHeader.newBuilder().setError(error).build(); + registerInfo = RegisterInfo.newBuilder().setHeader(header).build(); + log.debug("registerStore exception: ", ex); + } catch (Exception e) { + Errors error = Errors.newBuilder().setTypeValue(ErrorType.UNKNOWN.getNumber()) + .setMessage(e.getMessage()).build(); + ResponseHeader header = ResponseHeader.newBuilder().setError(error).build(); + registerInfo = RegisterInfo.newBuilder().setHeader(header).build(); + } + observer.onNext(registerInfo); + observer.onCompleted(); + } + + public void getNodes(Query request, io.grpc.stub.StreamObserver responseObserver) { + if (!isLeader()) { + redirectToLeader(DiscoveryServiceGrpc.getGetNodesMethod(), request, responseObserver); + return; + } + responseObserver.onNext(register.getNodes(request)); + responseObserver.onCompleted(); + } + + @Override + public void onRaftLeaderChanged() { + + } +} diff --git a/hg-pd-service/src/main/java/org/apache/hugegraph/pd/service/KvServiceGrpcImpl.java b/hg-pd-service/src/main/java/org/apache/hugegraph/pd/service/KvServiceGrpcImpl.java new file mode 100644 index 0000000000..05596bd3b7 --- /dev/null +++ b/hg-pd-service/src/main/java/org/apache/hugegraph/pd/service/KvServiceGrpcImpl.java @@ -0,0 +1,628 @@ +<<<<<<<< HEAD:hugegraph-pd/hg-pd-service/src/main/java/org/apache/hugegraph/pd/service/KvServiceGrpcImpl.java +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hugegraph.pd.service; + +======== +package org.apache.hugegraph.pd.service; + +>>>>>>>> d7e3d51dd (3.6.5 -> 4.x diff):hg-pd-service/src/main/java/org/apache/hugegraph/pd/service/KvServiceGrpcImpl.java +import java.util.List; +import java.util.Map; +import java.util.Random; +import java.util.concurrent.Executors; +import java.util.concurrent.ScheduledExecutorService; +import java.util.concurrent.TimeUnit; +import java.util.concurrent.atomic.AtomicLong; + +import javax.annotation.PostConstruct; + +<<<<<<<< HEAD:hugegraph-pd/hg-pd-service/src/main/java/org/apache/hugegraph/pd/service/KvServiceGrpcImpl.java +======== +import org.lognet.springboot.grpc.GRpcService; +import org.springframework.beans.factory.annotation.Autowired; + +>>>>>>>> d7e3d51dd (3.6.5 -> 4.x diff):hg-pd-service/src/main/java/org/apache/hugegraph/pd/service/KvServiceGrpcImpl.java +import org.apache.hugegraph.pd.KvService; +import org.apache.hugegraph.pd.common.PDException; +import org.apache.hugegraph.pd.config.PDConfig; +import org.apache.hugegraph.pd.grpc.kv.K; +import org.apache.hugegraph.pd.grpc.kv.KResponse; +import org.apache.hugegraph.pd.grpc.kv.Kv; +import org.apache.hugegraph.pd.grpc.kv.KvResponse; +import org.apache.hugegraph.pd.grpc.kv.KvServiceGrpc; +import org.apache.hugegraph.pd.grpc.kv.LockRequest; +import org.apache.hugegraph.pd.grpc.kv.LockResponse; +import org.apache.hugegraph.pd.grpc.kv.ScanPrefixResponse; +import org.apache.hugegraph.pd.grpc.kv.TTLRequest; +import org.apache.hugegraph.pd.grpc.kv.TTLResponse; +import org.apache.hugegraph.pd.grpc.kv.WatchKv; +import org.apache.hugegraph.pd.grpc.kv.WatchRequest; +import org.apache.hugegraph.pd.grpc.kv.WatchResponse; +import org.apache.hugegraph.pd.grpc.kv.WatchState; +import org.apache.hugegraph.pd.grpc.kv.WatchType; +import org.apache.hugegraph.pd.raft.RaftEngine; +<<<<<<<< HEAD:hugegraph-pd/hg-pd-service/src/main/java/org/apache/hugegraph/pd/service/KvServiceGrpcImpl.java +import org.apache.hugegraph.pd.raft.RaftStateListener; +import org.apache.hugegraph.pd.watch.KvWatchSubject; +import org.lognet.springboot.grpc.GRpcService; +import org.springframework.beans.factory.annotation.Autowired; + +import io.grpc.ManagedChannel; +======== +import org.apache.hugegraph.pd.service.interceptor.GrpcAuthentication; +import org.apache.hugegraph.pd.watch.KvWatchSubject; + +>>>>>>>> d7e3d51dd (3.6.5 -> 4.x diff):hg-pd-service/src/main/java/org/apache/hugegraph/pd/service/KvServiceGrpcImpl.java +import io.grpc.stub.StreamObserver; +import lombok.extern.slf4j.Slf4j; + +/** + * The core implementation class of KV storage + */ +@Slf4j +<<<<<<<< HEAD:hugegraph-pd/hg-pd-service/src/main/java/org/apache/hugegraph/pd/service/KvServiceGrpcImpl.java +@GRpcService +public class KvServiceGrpcImpl extends KvServiceGrpc.KvServiceImplBase implements RaftStateListener, + ServiceGrpc { +======== +@GRpcService(interceptors = {GrpcAuthentication.class}) +public class KvServiceGrpcImpl extends KvServiceGrpc.KvServiceImplBase implements ServiceGrpc { +>>>>>>>> d7e3d51dd (3.6.5 -> 4.x diff):hg-pd-service/src/main/java/org/apache/hugegraph/pd/service/KvServiceGrpcImpl.java + + private final ManagedChannel channel = null; + KvService kvService; + AtomicLong count = new AtomicLong(); + String msg = "node is not leader,it is necessary to redirect to the leader on the client"; + @Autowired + private PDConfig pdConfig; +<<<<<<<< HEAD:hugegraph-pd/hg-pd-service/src/main/java/org/apache/hugegraph/pd/service/KvServiceGrpcImpl.java +======== + KvService kvService; +>>>>>>>> d7e3d51dd (3.6.5 -> 4.x diff):hg-pd-service/src/main/java/org/apache/hugegraph/pd/service/KvServiceGrpcImpl.java + private KvWatchSubject subjects; + private ScheduledExecutorService executor; + + @PostConstruct + public void init() { + RaftEngine.getInstance().init(pdConfig.getRaft()); + RaftEngine.getInstance().addStateListener(this); + kvService = new KvService(pdConfig); + subjects = new KvWatchSubject(pdConfig); + executor = Executors.newScheduledThreadPool(1); + executor.scheduleWithFixedDelay(() -> { + if (isLeader()) { + subjects.keepClientAlive(); + } +<<<<<<<< HEAD:hugegraph-pd/hg-pd-service/src/main/java/org/apache/hugegraph/pd/service/KvServiceGrpcImpl.java + }, 0, KvWatchSubject.WATCH_TTL / 2, TimeUnit.MILLISECONDS); +======== + }, 0, KvWatchSubject.WATCH_TTL * 1 / 3, TimeUnit.MILLISECONDS); +>>>>>>>> d7e3d51dd (3.6.5 -> 4.x diff):hg-pd-service/src/main/java/org/apache/hugegraph/pd/service/KvServiceGrpcImpl.java + } + + /** + * Ordinary put + * + * @param request + * @param responseObserver + */ + @Override + public void put(Kv request, StreamObserver responseObserver) { + if (!isLeader()) { + redirectToLeader(channel, KvServiceGrpc.getPutMethod(), request, responseObserver); + return; + } + KvResponse response; + KvResponse.Builder builder = KvResponse.newBuilder(); + try { + String key = request.getKey(); + String value = request.getValue(); + this.kvService.put(key, value); + WatchKv watchKV = getWatchKv(key, value); + subjects.notifyAllObserver(key, WatchType.Put, new WatchKv[]{watchKV}); + response = builder.setHeader(getResponseHeader()).build(); + } catch (PDException e) { + if (!isLeader()) { + redirectToLeader(channel, KvServiceGrpc.getPutMethod(), request, responseObserver); + return; + } + response = builder.setHeader(getResponseHeader(e)).build(); + } + responseObserver.onNext(response); + responseObserver.onCompleted(); + } + + /** + * Ordinary get + * + * @param request + * @param responseObserver + */ + @Override + public void get(K request, StreamObserver responseObserver) { + if (!isLeader()) { + redirectToLeader(channel, KvServiceGrpc.getGetMethod(), request, responseObserver); + return; + } + KResponse response; + KResponse.Builder builder = KResponse.newBuilder(); + try { + String value = this.kvService.get(request.getKey()); + builder.setHeader(getResponseHeader()); + if (value != null) { + builder.setValue(value); + } + response = builder.build(); + } catch (PDException e) { + if (!isLeader()) { + redirectToLeader(channel, KvServiceGrpc.getGetMethod(), request, responseObserver); + return; + } + response = builder.setHeader(getResponseHeader(e)).build(); + } + responseObserver.onNext(response); + responseObserver.onCompleted(); + } + + /** + * Ordinary delete + * + * @param request + * @param responseObserver + */ + @Override + public void delete(K request, StreamObserver responseObserver) { + if (!isLeader()) { + redirectToLeader(channel, KvServiceGrpc.getDeleteMethod(), request, responseObserver); + return; + } + KvResponse response; + KvResponse.Builder builder = KvResponse.newBuilder(); + try { + String key = request.getKey(); + Kv deleted = this.kvService.delete(key); + if (deleted.getValue() != null) { + WatchKv watchKV = getWatchKv(deleted.getKey(), deleted.getValue()); + subjects.notifyAllObserver(key, WatchType.Delete, new WatchKv[]{watchKV}); + } + response = builder.setHeader(getResponseHeader()).build(); + } catch (PDException e) { + if (!isLeader()) { + redirectToLeader(channel, KvServiceGrpc.getDeleteMethod(), request, + responseObserver); + return; + } + response = builder.setHeader(getResponseHeader(e)).build(); + } + responseObserver.onNext(response); + responseObserver.onCompleted(); + } + + /** + * Delete by prefix + * + * @param request + * @param responseObserver + */ + @Override + public void deletePrefix(K request, StreamObserver responseObserver) { + if (!isLeader()) { + redirectToLeader(channel, KvServiceGrpc.getDeletePrefixMethod(), request, + responseObserver); + return; + } + KvResponse response; + KvResponse.Builder builder = KvResponse.newBuilder(); + try { + String key = request.getKey(); + List kvs = this.kvService.deleteWithPrefix(key); + WatchKv[] watchKvs = new WatchKv[kvs.size()]; + int i = 0; + for (Kv kv : kvs) { + WatchKv watchKV = getWatchKv(kv.getKey(), kv.getValue()); + watchKvs[i++] = watchKV; + } + subjects.notifyAllObserver(key, WatchType.Delete, watchKvs); + response = builder.setHeader(getResponseHeader()).build(); + } catch (PDException e) { + if (!isLeader()) { + redirectToLeader(channel, KvServiceGrpc.getDeletePrefixMethod(), request, + responseObserver); + return; + } + response = builder.setHeader(getResponseHeader(e)).build(); + } + responseObserver.onNext(response); + responseObserver.onCompleted(); + } + + /** + * Search by prefix + * + * @param request + * @param responseObserver + */ + @Override + public void scanPrefix(K request, StreamObserver responseObserver) { + if (!isLeader()) { + redirectToLeader(channel, KvServiceGrpc.getScanPrefixMethod(), request, + responseObserver); + return; + } + ScanPrefixResponse response; + ScanPrefixResponse.Builder builder = ScanPrefixResponse.newBuilder(); + try { + Map kvs = this.kvService.scanWithPrefix(request.getKey()); + response = builder.setHeader(getResponseHeader()).putAllKvs(kvs).build(); + } catch (PDException e) { + if (!isLeader()) { + redirectToLeader(channel, KvServiceGrpc.getScanPrefixMethod(), request, + responseObserver); + return; + } + response = builder.setHeader(getResponseHeader(e)).build(); + } + responseObserver.onNext(response); + responseObserver.onCompleted(); + } + + /** + * Obtain a random non-0 string as an Id + * + * @return + */ + private long getRandomLong() { + + long result; + Random random = new Random(); + while ((result = random.nextLong()) == 0) { + continue; + } + return result; + } + + /** + * Ordinary watch + * + * @param request + * @param responseObserver + */ + @Override + public void watch(WatchRequest request, StreamObserver responseObserver) { + if (!isLeader()) { + responseObserver.onError(new PDException(-1, msg)); + return; + } + try { + clientWatch(request, responseObserver, false); + } catch (PDException e) { + if (!isLeader()) { + try { + responseObserver.onError(new PDException(-1, msg)); +<<<<<<<< HEAD:hugegraph-pd/hg-pd-service/src/main/java/org/apache/hugegraph/pd/service/KvServiceGrpcImpl.java +======== + return; +>>>>>>>> d7e3d51dd (3.6.5 -> 4.x diff):hg-pd-service/src/main/java/org/apache/hugegraph/pd/service/KvServiceGrpcImpl.java + } catch (IllegalStateException ie) { + + } catch (Exception e1) { + log.error("redirect with error: ", e1); + } + } + } + } + + /** + * Ordinary prefix watch + * + * @param request + * @param responseObserver + */ + @Override + public void watchPrefix(WatchRequest request, StreamObserver responseObserver) { + if (!isLeader()) { + responseObserver.onError(new PDException(-1, msg)); + return; + } + try { + clientWatch(request, responseObserver, true); + } catch (PDException e) { + if (!isLeader()) { + try { + responseObserver.onError(new PDException(-1, msg)); +<<<<<<<< HEAD:hugegraph-pd/hg-pd-service/src/main/java/org/apache/hugegraph/pd/service/KvServiceGrpcImpl.java +======== + return; +>>>>>>>> d7e3d51dd (3.6.5 -> 4.x diff):hg-pd-service/src/main/java/org/apache/hugegraph/pd/service/KvServiceGrpcImpl.java + } catch (IllegalStateException ie) { + + } catch (Exception e1) { + log.error("redirect with error: ", e1); + } + } + } + } + + /** + * A generic approach to the above two methods + * + * @param request + * @param responseObserver + * @param isPrefix + * @throws PDException + */ + private void clientWatch(WatchRequest request, StreamObserver responseObserver, + boolean isPrefix) throws PDException { + try { + String key = request.getKey(); + long clientId = request.getClientId(); + WatchResponse.Builder builder = WatchResponse.newBuilder(); + WatchResponse response; + if (request.getState().equals(WatchState.Starting) && clientId == 0) { + clientId = getRandomLong(); + response = builder.setClientId(clientId).setState(WatchState.Starting).build(); + } else { + response = builder.setState(WatchState.Started).build(); + } + String delimiter = + isPrefix ? KvWatchSubject.PREFIX_DELIMITER : KvWatchSubject.KEY_DELIMITER; + subjects.addObserver(key, clientId, responseObserver, delimiter); + synchronized (responseObserver) { + responseObserver.onNext(response); + } + } catch (PDException e) { + if (!isLeader()) { + throw new PDException(-1, msg); + } + throw new PDException(e.getErrorCode(), e); + } + + } + + /** + * Locking + * + * @param request + * @param responseObserver + */ + @Override + public void lock(LockRequest request, StreamObserver responseObserver) { + if (!isLeader()) { + redirectToLeader(channel, KvServiceGrpc.getLockMethod(), request, responseObserver); + return; + } + LockResponse response; + LockResponse.Builder builder = LockResponse.newBuilder(); + try { + long clientId = request.getClientId(); + if (clientId == 0) { + clientId = getRandomLong(); + } + boolean locked = this.kvService.lock(request.getKey(), request.getTtl(), clientId); + response = + builder.setHeader(getResponseHeader()).setSucceed(locked).setClientId(clientId) + .build(); + } catch (PDException e) { + if (!isLeader()) { + redirectToLeader(channel, KvServiceGrpc.getLockMethod(), request, responseObserver); + return; + } + log.error("lock with error :", e); + response = builder.setHeader(getResponseHeader(e)).build(); + } + responseObserver.onNext(response); + responseObserver.onCompleted(); + } + + @Override + public void lockWithoutReentrant(LockRequest request, + StreamObserver responseObserver) { + if (!isLeader()) { + redirectToLeader(channel, KvServiceGrpc.getLockWithoutReentrantMethod(), request, + responseObserver); + return; + } + LockResponse response; + LockResponse.Builder builder = LockResponse.newBuilder(); + try { + long clientId = request.getClientId(); + if (clientId == 0) { + clientId = getRandomLong(); + } + boolean locked = this.kvService.lockWithoutReentrant(request.getKey(), request.getTtl(), + clientId); + response = + builder.setHeader(getResponseHeader()).setSucceed(locked).setClientId(clientId) + .build(); + } catch (PDException e) { + if (!isLeader()) { + redirectToLeader(channel, KvServiceGrpc.getLockWithoutReentrantMethod(), request, + responseObserver); + return; + } + log.error("lock with error :", e); + response = builder.setHeader(getResponseHeader(e)).build(); + } + responseObserver.onNext(response); + responseObserver.onCompleted(); + } + + @Override + public void isLocked(LockRequest request, StreamObserver responseObserver) { + if (!isLeader()) { + redirectToLeader(channel, KvServiceGrpc.getIsLockedMethod(), request, responseObserver); + return; + } + LockResponse response; + LockResponse.Builder builder = LockResponse.newBuilder(); + try { + boolean locked = this.kvService.locked(request.getKey()); + response = builder.setHeader(getResponseHeader()).setSucceed(locked).build(); + } catch (PDException e) { + log.error("lock with error :", e); + if (!isLeader()) { + redirectToLeader(channel, KvServiceGrpc.getIsLockedMethod(), request, + responseObserver); + return; + } + response = builder.setHeader(getResponseHeader(e)).build(); + } + responseObserver.onNext(response); + responseObserver.onCompleted(); + } + + /** + * Unlock + * + * @param request + * @param responseObserver + */ + @Override + public void unlock(LockRequest request, StreamObserver responseObserver) { + if (!isLeader()) { + redirectToLeader(channel, KvServiceGrpc.getUnlockMethod(), request, responseObserver); + return; + } + LockResponse response; + LockResponse.Builder builder = LockResponse.newBuilder(); + try { + long clientId = request.getClientId(); + if (clientId == 0) { + throw new PDException(-1, "incorrect clientId: 0"); + } + boolean unlocked = this.kvService.unlock(request.getKey(), clientId); + response = builder.setHeader(getResponseHeader()).setSucceed(unlocked) + .setClientId(clientId).build(); + } catch (PDException e) { + if (!isLeader()) { + redirectToLeader(channel, KvServiceGrpc.getUnlockMethod(), request, + responseObserver); + return; + } + response = builder.setHeader(getResponseHeader(e)).build(); + } + responseObserver.onNext(response); + responseObserver.onCompleted(); + } + + /** + * Lock renewal + * + * @param request + * @param responseObserver + */ + @Override + public void keepAlive(LockRequest request, StreamObserver responseObserver) { + if (!isLeader()) { + redirectToLeader(channel, KvServiceGrpc.getKeepAliveMethod(), request, + responseObserver); + return; + } + LockResponse response; + LockResponse.Builder builder = LockResponse.newBuilder(); + try { + long clientId = request.getClientId(); + if (clientId == 0) { + throw new PDException(-1, "incorrect clientId: 0"); + } + boolean alive = this.kvService.keepAlive(request.getKey(), clientId); + response = + builder.setHeader(getResponseHeader()).setSucceed(alive).setClientId(clientId) + .build(); + } catch (PDException e) { + if (!isLeader()) { + redirectToLeader(channel, KvServiceGrpc.getKeepAliveMethod(), request, + responseObserver); + return; + } + response = builder.setHeader(getResponseHeader(e)).build(); + } + responseObserver.onNext(response); + responseObserver.onCompleted(); + } + + /** + * PUT with timeout + * + * @param request + * @param responseObserver + */ + @Override + public void putTTL(TTLRequest request, StreamObserver responseObserver) { + if (!isLeader()) { + redirectToLeader(channel, KvServiceGrpc.getPutTTLMethod(), request, responseObserver); + return; + } + TTLResponse response; + TTLResponse.Builder builder = TTLResponse.newBuilder(); + try { + this.kvService.put(request.getKey(), request.getValue(), request.getTtl()); + response = builder.setHeader(getResponseHeader()).setSucceed(true).build(); + } catch (PDException e) { + if (!isLeader()) { + redirectToLeader(channel, KvServiceGrpc.getPutTTLMethod(), request, + responseObserver); + return; + } + response = builder.setHeader(getResponseHeader(e)).build(); + } + responseObserver.onNext(response); + responseObserver.onCompleted(); + } + + /** + * Reactivate the key with a timeout period + * + * @param request + * @param responseObserver + */ + @Override + public void keepTTLAlive(TTLRequest request, StreamObserver responseObserver) { + if (!isLeader()) { + redirectToLeader(channel, KvServiceGrpc.getKeepTTLAliveMethod(), request, + responseObserver); + return; + } + TTLResponse response; + TTLResponse.Builder builder = TTLResponse.newBuilder(); + try { + this.kvService.keepAlive(request.getKey()); + response = builder.setHeader(getResponseHeader()).setSucceed(true).build(); + } catch (PDException e) { + if (!isLeader()) { + redirectToLeader(channel, KvServiceGrpc.getKeepTTLAliveMethod(), request, + responseObserver); + return; + } + response = builder.setHeader(getResponseHeader(e)).build(); + } + responseObserver.onNext(response); + responseObserver.onCompleted(); + } + + private WatchKv getWatchKv(String key, String value) { + WatchKv kv = WatchKv.newBuilder().setKey(key).setValue(value).build(); + return kv; + } + + @Override + public void onRaftLeaderChanged() { + subjects.notifyClientChangeLeader(); + } +} diff --git a/hg-pd-service/src/main/java/org/apache/hugegraph/pd/service/MetaServiceGrpcImpl.java b/hg-pd-service/src/main/java/org/apache/hugegraph/pd/service/MetaServiceGrpcImpl.java new file mode 100644 index 0000000000..9c03b8012b --- /dev/null +++ b/hg-pd-service/src/main/java/org/apache/hugegraph/pd/service/MetaServiceGrpcImpl.java @@ -0,0 +1,308 @@ +package org.apache.hugegraph.pd.service; + +import java.util.List; + +import org.lognet.springboot.grpc.GRpcService; +import org.springframework.beans.factory.annotation.Autowired; + +import org.apache.hugegraph.pd.common.PDException; +import org.apache.hugegraph.pd.grpc.GraphSpaces; +import org.apache.hugegraph.pd.grpc.Graphs; +import org.apache.hugegraph.pd.grpc.MetaServiceGrpc; +import org.apache.hugegraph.pd.grpc.MetaServiceGrpc.MetaServiceImplBase; +import org.apache.hugegraph.pd.grpc.Metapb.Graph; +import org.apache.hugegraph.pd.grpc.Metapb.GraphSpace; +import org.apache.hugegraph.pd.grpc.Metapb.Partition; +import org.apache.hugegraph.pd.grpc.Metapb.ShardGroup; +import org.apache.hugegraph.pd.grpc.Metapb.Store; +import org.apache.hugegraph.pd.grpc.Partitions; +import org.apache.hugegraph.pd.grpc.ShardGroups; +import org.apache.hugegraph.pd.grpc.Stores; +import org.apache.hugegraph.pd.grpc.common.NoArg; +import org.apache.hugegraph.pd.grpc.common.ResponseHeader; +import org.apache.hugegraph.pd.grpc.common.VoidResponse; +import org.apache.hugegraph.pd.pulse.PDPulseSubjects; +import org.apache.hugegraph.pd.service.interceptor.GrpcAuthentication; + +import io.grpc.stub.StreamObserver; +import lombok.extern.slf4j.Slf4j; + +/** + * 元数据信息获取与修改类 + * + * @author zhangyingjie + * @date 2023/9/19 + **/ +@Slf4j +@GRpcService(interceptors = {GrpcAuthentication.class}) +public class MetaServiceGrpcImpl extends MetaServiceImplBase implements ServiceGrpc { + + @Autowired + private MetadataService metadataService; + private ResponseHeader okHeader = getResponseHeader(); + + /** + * + */ + public void getStores(NoArg request, StreamObserver observer) { + if (!isLeader()) { + redirectToLeader(MetaServiceGrpc.getGetStoresMethod(), request, observer); + return; + } + Stores response; + Stores.Builder builder = Stores.newBuilder(); + try { + response = metadataService.getStores(); + } catch (PDException e) { + if (!isLeader()) { + redirectToLeader(MetaServiceGrpc.getGetStoresMethod(), request, observer); + return; + } + response = builder.setHeader(getResponseHeader(e)).build(); + } + observer.onNext(response); + observer.onCompleted(); + } + + /** + * + */ + public void getPartitions(NoArg request, StreamObserver observer) { + if (!isLeader()) { + redirectToLeader(MetaServiceGrpc.getGetPartitionsMethod(), request, observer); + return; + } + Partitions response; + Partitions.Builder builder = Partitions.newBuilder(); + try { + response = metadataService.getPartitions(); + } catch (PDException e) { + if (!isLeader()) { + redirectToLeader(MetaServiceGrpc.getGetPartitionsMethod(), request, observer); + return; + } + response = builder.setHeader(getResponseHeader(e)).build(); + } + observer.onNext(response); + observer.onCompleted(); + } + + /** + * + */ + public void getShardGroups(NoArg request, StreamObserver observer) { + if (!isLeader()) { + redirectToLeader(MetaServiceGrpc.getGetShardGroupsMethod(), request, observer); + return; + } + ShardGroups response; + ShardGroups.Builder builder = ShardGroups.newBuilder(); + try { + response = metadataService.getShardGroups(); + } catch (PDException e) { + if (!isLeader()) { + redirectToLeader(MetaServiceGrpc.getGetShardGroupsMethod(), request, observer); + return; + } + response = builder.setHeader(getResponseHeader(e)).build(); + } + observer.onNext(response); + observer.onCompleted(); + } + + + /** + * + */ + public void getGraphSpaces(NoArg request, StreamObserver observer) { + if (!isLeader()) { + redirectToLeader(MetaServiceGrpc.getGetGraphSpacesMethod(), request, observer); + return; + } + GraphSpaces response; + GraphSpaces.Builder builder = GraphSpaces.newBuilder(); + try { + response = metadataService.getGraphSpaces(); + } catch (PDException e) { + if (!isLeader()) { + redirectToLeader(MetaServiceGrpc.getGetGraphSpacesMethod(), request, observer); + return; + } + response = builder.setHeader(getResponseHeader(e)).build(); + } + observer.onNext(response); + observer.onCompleted(); + } + + /** + * + */ + public void getGraphs(NoArg request, StreamObserver observer) { + if (!isLeader()) { + redirectToLeader(MetaServiceGrpc.getGetGraphsMethod(), request, observer); + return; + } + Graphs response; + Graphs.Builder builder = Graphs.newBuilder(); + try { + response = metadataService.getGraphs(); + } catch (PDException e) { + if (!isLeader()) { + redirectToLeader(MetaServiceGrpc.getGetGraphsMethod(), request, observer); + return; + } + response = builder.setHeader(getResponseHeader(e)).build(); + } + observer.onNext(response); + observer.onCompleted(); + } + + /** + * + */ + public void updateStore(Store request, StreamObserver observer) { + if (!isLeader()) { + redirectToLeader(MetaServiceGrpc.getUpdateStoreMethod(), request, observer); + return; + } + VoidResponse response; + VoidResponse.Builder builder = VoidResponse.newBuilder(); + try { + metadataService.updateStore(request); + response = builder.build(); + } catch (PDException e) { + if (!isLeader()) { + redirectToLeader(MetaServiceGrpc.getUpdateStoreMethod(), request, observer); + return; + } + ResponseHeader header = getResponseHeader(e); + response = builder.setHeader(header).build(); + } + observer.onNext(response); + observer.onCompleted(); + } + + /** + * + */ + public void updatePartition(Partition request, StreamObserver observer) { + if (!isLeader()) { + redirectToLeader(MetaServiceGrpc.getUpdatePartitionMethod(), request, observer); + return; + } + VoidResponse response; + VoidResponse.Builder builder = VoidResponse.newBuilder(); + try { + metadataService.updatePartition(request); + response = builder.build(); + } catch (PDException e) { + if (!isLeader()) { + redirectToLeader(MetaServiceGrpc.getUpdatePartitionMethod(), request, observer); + return; + } + ResponseHeader header = getResponseHeader(e); + response = builder.setHeader(header).build(); + } + observer.onNext(response); + observer.onCompleted(); + } + + /** + * + */ + public void updateShardGroup(ShardGroup request, StreamObserver observer) { + if (!isLeader()) { + redirectToLeader(MetaServiceGrpc.getUpdateShardGroupMethod(), request, observer); + return; + } + VoidResponse response; + VoidResponse.Builder builder = VoidResponse.newBuilder(); + try { + metadataService.updateShardGroup(request); + response = builder.build(); + } catch (PDException e) { + if (!isLeader()) { + redirectToLeader(MetaServiceGrpc.getUpdateShardGroupMethod(), request, observer); + return; + } + ResponseHeader header = getResponseHeader(e); + response = builder.setHeader(header).build(); + } + observer.onNext(response); + observer.onCompleted(); + } + + /** + * + */ + public void updateGraphSpace(GraphSpace request, StreamObserver observer) { + if (!isLeader()) { + redirectToLeader(MetaServiceGrpc.getUpdateGraphSpaceMethod(), request, observer); + return; + } + VoidResponse response; + VoidResponse.Builder builder = VoidResponse.newBuilder(); + try { + metadataService.updateGraphSpace(request); + response = builder.build(); + } catch (PDException e) { + if (!isLeader()) { + redirectToLeader(MetaServiceGrpc.getUpdateGraphSpaceMethod(), request, observer); + return; + } + ResponseHeader header = getResponseHeader(e); + response = builder.setHeader(header).build(); + } + observer.onNext(response); + observer.onCompleted(); + } + + /** + * + */ + public void updateGraph(Graph request, StreamObserver observer) { + if (!isLeader()) { + redirectToLeader(MetaServiceGrpc.getUpdateGraphMethod(), request, observer); + return; + } + VoidResponse response; + VoidResponse.Builder builder = VoidResponse.newBuilder(); + try { + metadataService.updateGraph(request); + response = builder.build(); + } catch (PDException e) { + if (!isLeader()) { + redirectToLeader(MetaServiceGrpc.getUpdateGraphMethod(), request, observer); + return; + } + ResponseHeader header = getResponseHeader(e); + response = builder.setHeader(header).build(); + } + observer.onNext(response); + observer.onCompleted(); + } + + public void updatePeers(NoArg request, StreamObserver observer) { + if (!isLeader()) { + redirectToLeader(MetaServiceGrpc.getUpdatePeersMethod(), request, observer); + return; + } + VoidResponse response; + VoidResponse.Builder builder = VoidResponse.newBuilder(); + try { + List addresses = metadataService.getPeerGrpcAddresses(); + PDPulseSubjects.notifyPeerChange(addresses); + builder.setHeader(okHeader); + response = builder.build(); + } catch (PDException e) { + if (!isLeader()) { + redirectToLeader(MetaServiceGrpc.getUpdatePeersMethod(), request, observer); + return; + } + ResponseHeader header = getResponseHeader(e); + response = builder.setHeader(header).build(); + } + observer.onNext(response); + observer.onCompleted(); + } +} diff --git a/hg-pd-service/src/main/java/org/apache/hugegraph/pd/service/PDPulseService.java b/hg-pd-service/src/main/java/org/apache/hugegraph/pd/service/PDPulseService.java new file mode 100644 index 0000000000..fcf6976414 --- /dev/null +++ b/hg-pd-service/src/main/java/org/apache/hugegraph/pd/service/PDPulseService.java @@ -0,0 +1,40 @@ +package org.apache.hugegraph.pd.service; + +import org.apache.hugegraph.pd.grpc.pulse.HgPdPulseGrpc; +import org.apache.hugegraph.pd.grpc.pulse.PulseRequest; +import org.apache.hugegraph.pd.grpc.pulse.PulseResponse; +import org.apache.hugegraph.pd.pulse.PDPulseSubjects; +import org.apache.hugegraph.pd.pulse.PulseDurableProvider; +import org.apache.hugegraph.pd.service.interceptor.GrpcAuthentication; + +import io.grpc.stub.StreamObserver; +import lombok.extern.slf4j.Slf4j; +import org.lognet.springboot.grpc.GRpcService; +import org.springframework.beans.factory.annotation.Autowired; +import org.springframework.stereotype.Component; + +import javax.annotation.PostConstruct; + +/** + * @author lynn.bond@hotmail.com created on 2021/11/4 + */ + +@Slf4j +@GRpcService(interceptors = {GrpcAuthentication.class}) +@Component("pdPulseService") +public class PDPulseService extends HgPdPulseGrpc.HgPdPulseImplBase { + + @Autowired + private PulseDurableProvider durableQueueProvider; + + @PostConstruct + public void init() { + PDPulseSubjects.setDurableQueueProvider(this.durableQueueProvider); + } + + @Override + public StreamObserver pulse(StreamObserver responseObserver) { + return PDPulseSubjects.addObserver(responseObserver); + } + +} diff --git a/hg-pd-service/src/main/java/org/apache/hugegraph/pd/service/PDRestService.java b/hg-pd-service/src/main/java/org/apache/hugegraph/pd/service/PDRestService.java new file mode 100644 index 0000000000..a117a3d351 --- /dev/null +++ b/hg-pd-service/src/main/java/org/apache/hugegraph/pd/service/PDRestService.java @@ -0,0 +1,366 @@ +<<<<<<<< HEAD:hugegraph-pd/hg-pd-service/src/main/java/org/apache/hugegraph/pd/service/PDRestService.java +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hugegraph.pd.service; +======== +package org.apache.hugegraph.pd.service; + +import static org.apache.hugegraph.pd.common.Consts.DEFAULT_STORE_GROUP_ID; +>>>>>>>> d7e3d51dd (3.6.5 -> 4.x diff):hg-pd-service/src/main/java/org/apache/hugegraph/pd/service/PDRestService.java + +import java.util.ArrayList; +import java.util.HashMap; +import java.util.List; +import java.util.Map; +import java.util.concurrent.CountDownLatch; +import java.util.concurrent.TimeUnit; + +import javax.annotation.PreDestroy; + +import org.springframework.beans.factory.InitializingBean; +import org.springframework.beans.factory.annotation.Autowired; +import org.springframework.stereotype.Service; + +import com.alipay.sofa.jraft.Node; +import com.alipay.sofa.jraft.Status; +import com.alipay.sofa.jraft.entity.PeerId; +import org.apache.hugegraph.pd.ConfigService; +import org.apache.hugegraph.pd.LogService; +import org.apache.hugegraph.pd.PartitionService; +import org.apache.hugegraph.pd.StoreMonitorDataService; +import org.apache.hugegraph.pd.StoreNodeService; +import org.apache.hugegraph.pd.TaskScheduleService; +import org.apache.hugegraph.pd.boot.ShutdownHook; +import org.apache.hugegraph.pd.common.HgAssert; +import org.apache.hugegraph.pd.common.KVPair; +import org.apache.hugegraph.pd.common.PDException; +import org.apache.hugegraph.pd.grpc.Metapb; +import org.apache.hugegraph.pd.grpc.common.ErrorType; +import org.apache.hugegraph.pd.grpc.common.Errors; +import org.apache.hugegraph.pd.grpc.discovery.NodeInfo; +import org.apache.hugegraph.pd.grpc.discovery.NodeInfos; +import org.apache.hugegraph.pd.grpc.discovery.Query; +import org.apache.hugegraph.pd.grpc.discovery.RegisterInfo; +import org.apache.hugegraph.pd.meta.MetadataFactory; +import org.apache.hugegraph.pd.model.RegistryRestRequest; +import org.apache.hugegraph.pd.model.RegistryRestResponse; +import org.apache.hugegraph.pd.raft.RaftEngine; + +import io.grpc.stub.StreamObserver; +import lombok.extern.slf4j.Slf4j; + +import org.apache.hugegraph.pd.ConfigService; +import org.apache.hugegraph.pd.LogService; +import org.apache.hugegraph.pd.PartitionService; +import org.apache.hugegraph.pd.StoreMonitorDataService; +import org.apache.hugegraph.pd.StoreNodeService; +import org.apache.hugegraph.pd.TaskScheduleService; +import org.apache.hugegraph.pd.common.HgAssert; +import org.apache.hugegraph.pd.common.KVPair; +import org.apache.hugegraph.pd.common.PDException; +import org.apache.hugegraph.pd.grpc.Metapb; +import org.apache.hugegraph.pd.grpc.Pdpb; +import org.apache.hugegraph.pd.grpc.discovery.NodeInfo; +import org.apache.hugegraph.pd.grpc.discovery.NodeInfos; +import org.apache.hugegraph.pd.grpc.discovery.Query; +import org.apache.hugegraph.pd.grpc.discovery.RegisterInfo; +import org.apache.hugegraph.pd.model.RegistryRestRequest; +import org.apache.hugegraph.pd.model.RegistryRestResponse; +import org.springframework.beans.factory.InitializingBean; +import org.springframework.beans.factory.annotation.Autowired; +import org.springframework.stereotype.Service; + +import io.grpc.stub.StreamObserver; +import lombok.extern.slf4j.Slf4j; + +@Slf4j +@Service +public class PDRestService implements InitializingBean { + + private static final String EMPTY_STRING = ""; + @Autowired + PDService pdService; + @Autowired + DiscoveryService discoveryService; + private StoreNodeService storeNodeService; + private PartitionService partitionService; + private TaskScheduleService monitorService; + private ConfigService configService; + private LogService logService; + private StoreMonitorDataService storeMonitorDataService; +<<<<<<<< HEAD:hugegraph-pd/hg-pd-service/src/main/java/org/apache/hugegraph/pd/service/PDRestService.java +======== + private static final int WAIT_TIMEOUT = 45; + @Autowired + PDService pdService; + @Autowired + DiscoveryService discoveryService; +>>>>>>>> d7e3d51dd (3.6.5 -> 4.x diff):hg-pd-service/src/main/java/org/apache/hugegraph/pd/service/PDRestService.java + + /** + * initialize + * + * @throws Exception + */ + @Override + public void afterPropertiesSet() throws Exception { + storeNodeService = pdService.getStoreNodeService(); + partitionService = pdService.getPartitionService(); + monitorService = pdService.getTaskService(); + configService = pdService.getConfigService(); + logService = pdService.getLogService(); + storeMonitorDataService = pdService.getStoreMonitorDataService(); + HgAssert.isNotNull(storeNodeService, "storeNodeService does not initialize"); + HgAssert.isNotNull(partitionService, "partitionService does not initialize"); + } + + public List getStores(String graphName) throws PDException { + return storeNodeService.getStores(graphName); + } + + public Metapb.Store getStore(long storeId) throws PDException { + return storeNodeService.getStore(storeId); + } + + public List getShardGroups() throws PDException { + return storeNodeService.getShardGroups(); + } + + public Metapb.Store updateStore(Metapb.Store store) throws PDException { + logService.insertLog(LogService.NODE_CHANGE, LogService.REST, store); + return storeNodeService.updateStore(store); + } + + public boolean removeStore(Long storeId) throws PDException { + if (storeId == null) { + return false; + } + return 0 != storeNodeService.removeStore(storeId); + } + + public Metapb.GraphSpace setGraphSpace(Metapb.GraphSpace graphSpace) throws PDException { + return configService.setGraphSpace(graphSpace); + } + + public List getGraphSpaces() throws PDException { + return configService.getGraphSpace(EMPTY_STRING); + } + + public Metapb.GraphSpace getGraphSpace(String graphSpaceName) throws PDException { + return configService.getGraphSpace(graphSpaceName).get(0); + } + + public List getGraphs() throws PDException { + return partitionService.getGraphs(); + } + + public Metapb.Graph getGraph(String graphName) throws PDException { + return partitionService.getGraph(graphName); + } + + public Metapb.Graph updateGraph(Metapb.Graph graph) throws PDException { + return partitionService.updateGraphName(graph); + } + + public List getPartitions(String graphName) { + return partitionService.getPartitions(graphName); + } + + public Map getShardGroupCache() { + return partitionService.getShardGroupCache(); + } + + public List patrolStores() throws PDException { + return monitorService.patrolStores(); + } + + public List patrolPartitions() throws PDException { + return monitorService.patrolPartitions(); + } + + public Metapb.PartitionStats getPartitionStats(String graphName, int partitionId) throws + PDException { + return partitionService.getPartitionStats(graphName, partitionId); + } + + public List getPartitionStatus(String graphName) throws PDException { + return partitionService.getPartitionStatus(graphName); + } + + public Map> balancePartitions() throws PDException { + return monitorService.balancePartitionShard(DEFAULT_STORE_GROUP_ID); + } + + public List splitPartitions() throws PDException { + return monitorService.autoSplitPartition(DEFAULT_STORE_GROUP_ID); + } + + public List getStoreStats(boolean isActive) throws PDException { + return storeNodeService.getStoreStatus(isActive); + } + + public List> getMonitorData(long storeId) throws PDException { + return storeMonitorDataService.getStoreMonitorData(storeId); + } + + public String getMonitorDataText(long storeId) throws PDException { + return storeMonitorDataService.getStoreMonitorDataText(storeId); + } + + public RegistryRestResponse register(NodeInfo nodeInfo) throws PDException { + CountDownLatch latch = new CountDownLatch(1); + final RegisterInfo[] info = {null}; + RegistryRestResponse response = new RegistryRestResponse(); + try { + StreamObserver observer = new StreamObserver() { + @Override + public void onNext(RegisterInfo value) { + info[0] = value; + latch.countDown(); + } + + @Override + public void onError(Throwable t) { + latch.countDown(); + } + + @Override + public void onCompleted() { + latch.countDown(); + } + }; + this.discoveryService.register(nodeInfo, observer); + latch.await(WAIT_TIMEOUT, TimeUnit.SECONDS); + Errors error = info[0].getHeader().getError(); + response.setErrorType(error.getType()); + response.setMessage(error.getMessage()); + } catch (Exception e) { + response.setErrorType(ErrorType.UNRECOGNIZED); + response.setMessage(e.getMessage()); + } + return response; + } + + public ArrayList getNodeInfo(Query request) throws PDException { + CountDownLatch latch = new CountDownLatch(1); + final NodeInfos[] info = {null}; + RegistryRestResponse response = new RegistryRestResponse(); + ArrayList registryRestRequests = null; + try { + StreamObserver observer = new StreamObserver() { + @Override + public void onNext(NodeInfos value) { + info[0] = value; + latch.countDown(); + } + + @Override + public void onError(Throwable t) { + latch.countDown(); + } + + @Override + public void onCompleted() { + latch.countDown(); + } + }; + this.discoveryService.getNodes(request, observer); + latch.await(WAIT_TIMEOUT, TimeUnit.SECONDS); + List infoList = info[0].getInfoList(); + registryRestRequests = new ArrayList(infoList.size()); + for (int i = 0; i < infoList.size(); i++) { + NodeInfo element = infoList.get(i); + RegistryRestRequest registryRestRequest = new RegistryRestRequest(); + registryRestRequest.setAddress(element.getAddress()); + registryRestRequest.setAppName(element.getAppName()); + registryRestRequest.setVersion(element.getVersion()); + registryRestRequest.setInterval(String.valueOf(element.getInterval())); + HashMap labels = new HashMap<>(); + labels.putAll(element.getLabelsMap()); + registryRestRequest.setLabels(labels); + registryRestRequests.add(registryRestRequest); + } + } catch (Exception e) { + response.setErrorType(ErrorType.UNRECOGNIZED); + response.setMessage(e.getMessage()); + } + return registryRestRequests; + } + + public List getStoreStatusLog(Long start, Long end) throws PDException { + return logService.getLog(LogService.NODE_CHANGE, start, end); + } + + public List getPartitionLog(Long start, Long end) throws PDException { + return logService.getLog(LogService.PARTITION_CHANGE, start, end); + } + + public Map balancePartitionLeader() throws PDException { + return monitorService.balancePartitionLeader(true); + } + + public void dbCompaction() throws PDException { + monitorService.dbCompaction(""); + } + + public List getShardList(int partitionId) throws PDException { + return storeNodeService.getShardList(partitionId); + } + + public void resetPartitionState(Metapb.Partition partition) throws PDException { + partitionService.updatePartitionState(partition.getGraphName(), partition.getId(), + Metapb.PartitionState.PState_Normal); + } + + public int getStoreGroupId(long storeId) throws PDException { + return storeNodeService.getStoreGroupByStore(storeId); + } + + @PreDestroy + public void shutdown(){ + try { + log.info("shutdown RaftEngine...."); + RaftEngine engine = RaftEngine.getInstance(); + int count = 0; + while (count++ < 3) { + Node raftNode = engine.getRaftNode(); + if (raftNode.isLeader(true)) { + Status status = raftNode.transferLeadershipTo(PeerId.ANY_PEER); + if (status.isOk()) { + raftNode.disableVote(); + break; + } else { + log.warn("transfer leader with warning: {}", status); + synchronized (ShutdownHook.class) { + ShutdownHook.class.wait(1000); + } + } + } else { + break; + } + } + engine.shutDown(); + log.info("RaftEngine shutdown and start to shutdown db...."); + MetadataFactory.closeStore(); + log.info("db shutdown"); + log.info("all resources have been closed"); + } catch (Exception e) { + log.warn("shutdown with error:", e); + } + } +} diff --git a/hg-pd-service/src/main/java/org/apache/hugegraph/pd/service/PDService.java b/hg-pd-service/src/main/java/org/apache/hugegraph/pd/service/PDService.java new file mode 100644 index 0000000000..a73dcd0ed6 --- /dev/null +++ b/hg-pd-service/src/main/java/org/apache/hugegraph/pd/service/PDService.java @@ -0,0 +1,2823 @@ +<<<<<<<< HEAD:hugegraph-pd/hg-pd-service/src/main/java/org/apache/hugegraph/pd/service/PDService.java +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hugegraph.pd.service; + +import java.io.File; +import java.io.IOException; +import java.util.ArrayList; +import java.util.HashSet; +import java.util.LinkedList; +import java.util.List; +import java.util.Map; +import java.util.Objects; +import java.util.concurrent.ConcurrentHashMap; +import java.util.concurrent.ExecutionException; +import java.util.stream.Collectors; + +import javax.annotation.PostConstruct; + +import org.apache.commons.io.FileUtils; +import org.apache.hugegraph.pd.ConfigService; +import org.apache.hugegraph.pd.IdService; +import org.apache.hugegraph.pd.LogService; +import org.apache.hugegraph.pd.PartitionInstructionListener; +import org.apache.hugegraph.pd.PartitionService; +import org.apache.hugegraph.pd.PartitionStatusListener; +import org.apache.hugegraph.pd.ShardGroupStatusListener; +import org.apache.hugegraph.pd.StoreMonitorDataService; +import org.apache.hugegraph.pd.StoreNodeService; +import org.apache.hugegraph.pd.StoreStatusListener; +import org.apache.hugegraph.pd.TaskScheduleService; +import org.apache.hugegraph.pd.common.KVPair; +import org.apache.hugegraph.pd.common.PDException; +import org.apache.hugegraph.pd.config.PDConfig; +import org.apache.hugegraph.pd.grpc.Metapb; +import org.apache.hugegraph.pd.grpc.PDGrpc; +import org.apache.hugegraph.pd.grpc.Pdpb; +import org.apache.hugegraph.pd.grpc.Pdpb.CachePartitionResponse; +import org.apache.hugegraph.pd.grpc.Pdpb.CacheResponse; +import org.apache.hugegraph.pd.grpc.Pdpb.GetGraphRequest; +import org.apache.hugegraph.pd.grpc.Pdpb.PutLicenseRequest; +import org.apache.hugegraph.pd.grpc.Pdpb.PutLicenseResponse; +import org.apache.hugegraph.pd.grpc.pulse.ChangeShard; +import org.apache.hugegraph.pd.grpc.pulse.CleanPartition; +import org.apache.hugegraph.pd.grpc.pulse.DbCompaction; +import org.apache.hugegraph.pd.grpc.pulse.MovePartition; +import org.apache.hugegraph.pd.grpc.pulse.PartitionHeartbeatRequest; +import org.apache.hugegraph.pd.grpc.pulse.PartitionHeartbeatResponse; +import org.apache.hugegraph.pd.grpc.pulse.PartitionKeyRange; +import org.apache.hugegraph.pd.grpc.pulse.SplitPartition; +import org.apache.hugegraph.pd.grpc.pulse.TransferLeader; +import org.apache.hugegraph.pd.grpc.watch.NodeEventType; +import org.apache.hugegraph.pd.grpc.watch.WatchGraphResponse; +import org.apache.hugegraph.pd.grpc.watch.WatchResponse; +import org.apache.hugegraph.pd.grpc.watch.WatchType; +import org.apache.hugegraph.pd.pulse.PDPulseSubject; +import org.apache.hugegraph.pd.pulse.PulseListener; +import org.apache.hugegraph.pd.raft.RaftEngine; +import org.apache.hugegraph.pd.raft.RaftStateListener; +import org.apache.hugegraph.pd.util.grpc.StreamObserverUtil; +import org.apache.hugegraph.pd.watch.PDWatchSubject; +import org.lognet.springboot.grpc.GRpcService; +import org.springframework.beans.factory.annotation.Autowired; +import org.springframework.util.CollectionUtils; + +import com.alipay.sofa.jraft.JRaftUtils; +import com.alipay.sofa.jraft.Status; +import com.alipay.sofa.jraft.conf.Configuration; +import com.alipay.sofa.jraft.entity.PeerId; +======== +package org.apache.hugegraph.pd.service; + +import static org.apache.hugegraph.pd.grpc.common.ErrorType.GRAPH_NOT_EXISTS; +import static org.apache.hugegraph.pd.grpc.common.ErrorType.PARTITION_NOT_EXISTS; +import static org.apache.hugegraph.pd.grpc.common.ErrorType.STORE_GROUP_NOT_EXISTS; +import static org.apache.hugegraph.pd.grpc.common.ErrorType.TASK_NOT_EXISTS; + +import java.io.File; +import java.io.IOException; +import java.util.ArrayList; +import java.util.HashSet; +import java.util.List; +import java.util.Map; +import java.util.stream.Collectors; + +import javax.annotation.PostConstruct; + +import org.apache.commons.io.FileUtils; +import org.lognet.springboot.grpc.GRpcService; +import org.springframework.beans.factory.annotation.Autowired; +import org.springframework.context.annotation.DependsOn; +import org.springframework.util.CollectionUtils; + +import com.alipay.sofa.jraft.Status; +import com.alipay.sofa.jraft.conf.Configuration; +import org.apache.hugegraph.pd.ConfigService; +import org.apache.hugegraph.pd.IdService; +import org.apache.hugegraph.pd.LogService; +import org.apache.hugegraph.pd.PartitionService; +import org.apache.hugegraph.pd.StoreMonitorDataService; +import org.apache.hugegraph.pd.StoreNodeService; +import org.apache.hugegraph.pd.TaskScheduleService; +import org.apache.hugegraph.pd.common.KVPair; +import org.apache.hugegraph.pd.common.PDException; +import org.apache.hugegraph.pd.config.PDConfig; +import org.apache.hugegraph.pd.grpc.ClusterOp; +import org.apache.hugegraph.pd.grpc.MetaTask; +import org.apache.hugegraph.pd.grpc.Metapb; +import org.apache.hugegraph.pd.grpc.Metapb.GraphStats; +import org.apache.hugegraph.pd.grpc.PDGrpc; +import org.apache.hugegraph.pd.grpc.Pdpb; +import org.apache.hugegraph.pd.grpc.Pdpb.CachePartitionResponse; +import org.apache.hugegraph.pd.grpc.Pdpb.CacheResponse; +import org.apache.hugegraph.pd.grpc.Pdpb.GetGraphRequest; +import org.apache.hugegraph.pd.grpc.Pdpb.GetLeaderGrpcAddressResponse; +import org.apache.hugegraph.pd.grpc.Pdpb.GraphStatsResponse; +import org.apache.hugegraph.pd.grpc.Pdpb.PutLicenseRequest; +import org.apache.hugegraph.pd.grpc.Pdpb.PutLicenseResponse; +import org.apache.hugegraph.pd.grpc.StoreGroup; +import org.apache.hugegraph.pd.grpc.common.ErrorType; +import org.apache.hugegraph.pd.grpc.common.Errors; +import org.apache.hugegraph.pd.grpc.common.NoArg; +import org.apache.hugegraph.pd.grpc.common.ResponseHeader; +import org.apache.hugegraph.pd.grpc.common.VoidResponse; +import org.apache.hugegraph.pd.grpc.pulse.PartitionHeartbeatResponse; +import org.apache.hugegraph.pd.grpc.pulse.SplitPartition; +import org.apache.hugegraph.pd.grpc.pulse.StoreNodeEventType; +import org.apache.hugegraph.pd.license.LicenseVerifierService; +import org.apache.hugegraph.pd.pulse.PDPulseSubjects; +import org.apache.hugegraph.pd.pulse.impl.PartitionInstructionListenerImpl; +import org.apache.hugegraph.pd.pulse.impl.PartitionStatusListenerImpl; +import org.apache.hugegraph.pd.pulse.impl.PulseListenerImpl; +import org.apache.hugegraph.pd.pulse.impl.ShardGroupStatusListenerImpl; +import org.apache.hugegraph.pd.pulse.impl.StoreStatusListenerImpl; +import org.apache.hugegraph.pd.raft.PeerUtil; +import org.apache.hugegraph.pd.raft.RaftEngine; +import org.apache.hugegraph.pd.service.interceptor.GrpcAuthentication; +import org.apache.hugegraph.pd.util.grpc.StreamObserverUtil; + +import io.grpc.stub.StreamObserver; +import lombok.Getter; +import lombok.extern.slf4j.Slf4j; +>>>>>>>> d7e3d51dd (3.6.5 -> 4.x diff):hg-pd-service/src/main/java/org/apache/hugegraph/pd/service/PDService.java + +import io.grpc.ManagedChannel; +import io.grpc.stub.StreamObserver; +import lombok.extern.slf4j.Slf4j; + +// TODO: uncomment later - remove license verifier service now +@Slf4j +<<<<<<<< HEAD:hugegraph-pd/hg-pd-service/src/main/java/org/apache/hugegraph/pd/service/PDService.java +@GRpcService +public class PDService extends PDGrpc.PDImplBase implements ServiceGrpc, RaftStateListener { + + static String TASK_ID_KEY = "task_id"; + private final Pdpb.ResponseHeader okHeader = Pdpb.ResponseHeader.newBuilder().setError( + Pdpb.Error.newBuilder().setType(Pdpb.ErrorType.OK)).build(); + // private ManagedChannel channel; + private final Map channelMap = new ConcurrentHashMap<>(); + @Autowired + private PDConfig pdConfig; +======== +@GRpcService(interceptors = {GrpcAuthentication.class}) +@DependsOn("pdPulseService") +public class PDService extends PDGrpc.PDImplBase implements ServiceGrpc { + + public static final String TASK_ID_KEY = "task_id"; + private static final String USER_TASK_ID_KEY = "user_task_key"; + @Autowired + private PDConfig pdConfig; + @Getter +>>>>>>>> d7e3d51dd (3.6.5 -> 4.x diff):hg-pd-service/src/main/java/org/apache/hugegraph/pd/service/PDService.java + private StoreNodeService storeNodeService; + @Getter + private PartitionService partitionService; + @Getter + private TaskScheduleService taskService; + @Getter + private IdService idService; + @Getter + private ConfigService configService; + @Getter + private LogService logService; +<<<<<<<< HEAD:hugegraph-pd/hg-pd-service/src/main/java/org/apache/hugegraph/pd/service/PDService.java + //private LicenseVerifierService licenseVerifierService; + private StoreMonitorDataService storeMonitorDataService; + private ManagedChannel channel; + + private Pdpb.ResponseHeader newErrorHeader(int errorCode, String errorMsg) { + Pdpb.ResponseHeader header = Pdpb.ResponseHeader.newBuilder().setError( + Pdpb.Error.newBuilder().setTypeValue(errorCode).setMessage(errorMsg)).build(); + return header; + } + + private Pdpb.ResponseHeader newErrorHeader(PDException e) { + Pdpb.ResponseHeader header = Pdpb.ResponseHeader.newBuilder().setError( + Pdpb.Error.newBuilder().setTypeValue(e.getErrorCode()).setMessage(e.getMessage())) + .build(); + return header; + } + + public StoreNodeService getStoreNodeService() { + return storeNodeService; + } + + public PartitionService getPartitionService() { + return partitionService; + } + + public TaskScheduleService getTaskService() { + return taskService; + } + + public ConfigService getConfigService() { + return configService; + } + + public StoreMonitorDataService getStoreMonitorDataService() { + return this.storeMonitorDataService; + } + + public LogService getLogService() { + return logService; + } + + //public LicenseVerifierService getLicenseVerifierService() { + // return licenseVerifierService; + //} +======== + @Getter + private LicenseVerifierService licenseVerifierService; + @Getter + private StoreMonitorDataService storeMonitorDataService; + private ResponseHeader okHeader = getResponseHeader(); +>>>>>>>> d7e3d51dd (3.6.5 -> 4.x diff):hg-pd-service/src/main/java/org/apache/hugegraph/pd/service/PDService.java + + /** + * initialize + */ + @PostConstruct + public void init() throws PDException { + log.info("PDService init……{}", pdConfig); + configService = new ConfigService(pdConfig); +<<<<<<<< HEAD:hugegraph-pd/hg-pd-service/src/main/java/org/apache/hugegraph/pd/service/PDService.java + + RaftEngine.getInstance().addStateListener(this); + RaftEngine.getInstance().addStateListener(configService); + RaftEngine.getInstance().init(pdConfig.getRaft()); + //pdConfig = configService.loadConfig(); onLeaderChanged +======== + RaftEngine engine = RaftEngine.getInstance(); + engine.addStateListener(this); + engine.addStateListener(configService); + engine.init(pdConfig.getRaft()); + // pdConfig = configService.loadConfig(); onLeaderChanged中加载 +>>>>>>>> d7e3d51dd (3.6.5 -> 4.x diff):hg-pd-service/src/main/java/org/apache/hugegraph/pd/service/PDService.java + storeNodeService = new StoreNodeService(pdConfig); + partitionService = new PartitionService(pdConfig, storeNodeService, configService); + taskService = new TaskScheduleService(pdConfig, storeNodeService, partitionService, configService); + idService = new IdService(pdConfig); + logService = new LogService(pdConfig); + storeMonitorDataService = new StoreMonitorDataService(pdConfig); +<<<<<<<< HEAD:hugegraph-pd/hg-pd-service/src/main/java/org/apache/hugegraph/pd/service/PDService.java + //if (licenseVerifierService == null) { + // licenseVerifierService = new LicenseVerifierService(pdConfig); + //} + RaftEngine.getInstance().addStateListener(partitionService); + pdConfig.setIdService(idService); + + // Receive a heartbeat message + PDPulseSubject.listenPartitionHeartbeat(new PulseListener() { + @Override + public void onNext(PartitionHeartbeatRequest request) throws Exception { + partitionService.partitionHeartbeat(request.getStates()); + } + + @Override + public void onError(Throwable throwable) { + log.error("Received an error notice from pd-client", throwable); + } + + @Override + public void onCompleted() { + log.info("Received an completed notice from pd-client"); + } + }); + + /** + // Listen for partition commands and forward them to Store + */ + partitionService.addInstructionListener(new PartitionInstructionListener() { + private PartitionHeartbeatResponse.Builder getBuilder(Metapb.Partition partition) throws + PDException { + return PartitionHeartbeatResponse.newBuilder().setPartition(partition) + .setId(idService.getId(TASK_ID_KEY, 1)); + } + + @Override + public void changeShard(Metapb.Partition partition, ChangeShard changeShard) throws + PDException { + PDPulseSubject.notifyClient(getBuilder(partition).setChangeShard(changeShard)); + + } + + @Override + public void transferLeader(Metapb.Partition partition, + TransferLeader transferLeader) throws + PDException { + PDPulseSubject.notifyClient( + getBuilder(partition).setTransferLeader(transferLeader)); + } + + @Override + public void splitPartition(Metapb.Partition partition, + SplitPartition splitPartition) throws + PDException { + PDPulseSubject.notifyClient( + getBuilder(partition).setSplitPartition(splitPartition)); + + } + + @Override + public void dbCompaction(Metapb.Partition partition, DbCompaction dbCompaction) throws + PDException { + PDPulseSubject.notifyClient(getBuilder(partition).setDbCompaction(dbCompaction)); + + } + + @Override + public void movePartition(Metapb.Partition partition, + MovePartition movePartition) throws PDException { + PDPulseSubject.notifyClient(getBuilder(partition).setMovePartition(movePartition)); + } + + @Override + public void cleanPartition(Metapb.Partition partition, + CleanPartition cleanPartition) throws PDException { + PDPulseSubject.notifyClient( + getBuilder(partition).setCleanPartition(cleanPartition)); + } + + @Override + public void changePartitionKeyRange(Metapb.Partition partition, + PartitionKeyRange partitionKeyRange) + throws PDException { + PDPulseSubject.notifyClient(getBuilder(partition).setKeyRange(partitionKeyRange)); + } + }); + + /** + // Listen for partition status change messages and forward them to Client + */ + partitionService.addStatusListener(new PartitionStatusListener() { + @Override + public void onPartitionChanged(Metapb.Partition old, Metapb.Partition partition) { + PDWatchSubject.notifyPartitionChange(PDWatchSubject.ChangeType.ALTER, + partition.getGraphName(), partition.getId()); + } + + @Override + public void onPartitionRemoved(Metapb.Partition partition) { + PDWatchSubject.notifyPartitionChange(PDWatchSubject.ChangeType.DEL, + partition.getGraphName(), + partition.getId()); + + } + }); + + storeNodeService.addShardGroupStatusListener(new ShardGroupStatusListener() { + @Override + public void onShardListChanged(Metapb.ShardGroup shardGroup, + Metapb.ShardGroup newShardGroup) { + // invoked before change, saved to db and update cache. + if (newShardGroup == null) { + PDWatchSubject.notifyShardGroupChange(PDWatchSubject.ChangeType.DEL, + shardGroup.getId(), + shardGroup); + } else { + PDWatchSubject.notifyShardGroupChange(PDWatchSubject.ChangeType.ALTER, + shardGroup.getId(), newShardGroup); + } + } + + @Override + public void onShardListOp(Metapb.ShardGroup shardGroup) { + PDWatchSubject.notifyShardGroupChange(PDWatchSubject.ChangeType.USER_DEFINED, + shardGroup.getId(), shardGroup); + } + }); + + /** + // Listen for store status change messages and forward them to Client + */ + storeNodeService.addStatusListener(new StoreStatusListener() { + + @Override + public void onStoreStatusChanged(Metapb.Store store, + Metapb.StoreState old, + Metapb.StoreState status) { + NodeEventType type = NodeEventType.NODE_EVENT_TYPE_UNKNOWN; + if (status == Metapb.StoreState.Up) { + type = NodeEventType.NODE_EVENT_TYPE_NODE_ONLINE; + } else if (status == Metapb.StoreState.Offline) { + type = NodeEventType.NODE_EVENT_TYPE_NODE_OFFLINE; + } + PDWatchSubject.notifyNodeChange(type, "", store.getId()); + } + + @Override + public void onGraphChange(Metapb.Graph graph, + Metapb.GraphState stateOld, + Metapb.GraphState stateNew) { + WatchGraphResponse wgr = WatchGraphResponse.newBuilder() + .setGraph(graph) + .build(); + WatchResponse.Builder wr = WatchResponse.newBuilder() + .setGraphResponse(wgr); + PDWatchSubject.notifyChange(WatchType.WATCH_TYPE_GRAPH_CHANGE, + wr); + } + + @Override + public void onStoreRaftChanged(Metapb.Store store) { + PDWatchSubject.notifyNodeChange(NodeEventType.NODE_EVENT_TYPE_NODE_RAFT_CHANGE, "", + store.getId()); + } + }); +======== + if (licenseVerifierService == null) { + licenseVerifierService = new LicenseVerifierService(pdConfig); + } + engine.addStateListener(partitionService); + pdConfig.setIdService(idService); + // 接收心跳消息 + PDPulseSubjects.listenPartitionHeartbeat(new PulseListenerImpl(this)); + // 处理心跳Listener异常,返回0,不中断其他Listener。 +// PDPulseSubjects.setPartitionErrInterceptor( +// (req,e) -> { +// if (e instanceof PDException) { +// var pde = (PDException) e; +// if (pde.getErrorCode() == NOT_LEADER.getNumber()) { +// try { +// log.info("send change leader command to watch, due to ERROR-100", pde); +// PDPulseSubjects.notifyClient(PdInstructionResponse.newBuilder() +// .setInstructionType(PdInstructionType.CHANGE_TO_FOLLOWER) +// .setLeaderIp(engine.getLeaderGrpcAddress()) +// .build()); +// } catch (Exception ex) { +// log.error("send notice to observer failed, ", ex); +// } +// return 1; // Aborting other listeners. +// } +// } else { +// log.error("handleNotice error", e); +// } +// return 0; +// } +// ); + // 监听分区指令,并转发给Store + partitionService.addInstructionListener(new PartitionInstructionListenerImpl(this)); + // 监听分区状态改变消息,并转发给Client + partitionService.addStatusListener(new PartitionStatusListenerImpl()); + storeNodeService.addShardGroupStatusListener(new ShardGroupStatusListenerImpl()); + // 监听store状态改变消息,并转发给Client + storeNodeService.addStatusListener(new StoreStatusListenerImpl()); +>>>>>>>> d7e3d51dd (3.6.5 -> 4.x diff):hg-pd-service/src/main/java/org/apache/hugegraph/pd/service/PDService.java + storeNodeService.init(partitionService); + partitionService.init(); + taskService.init(); + } + + /** + *
+     * Register a store, and the first registration generates a new store_id, store_id is the unique identifier of the store
+     * 
+ */ + @Override + public void registerStore(Pdpb.RegisterStoreRequest request, + io.grpc.stub.StreamObserver observer) { + if (!isLeader()) { + redirectToLeader(PDGrpc.getRegisterStoreMethod(), request, observer); + return; + } + Pdpb.RegisterStoreResponse response = null; + try { + Metapb.Store store = storeNodeService.register(request.getStore()); + response = Pdpb.RegisterStoreResponse.newBuilder().setHeader(okHeader) + .setStoreId(store.getId()) + .build(); + } catch (PDException e) { + response = Pdpb.RegisterStoreResponse.newBuilder().setHeader(getResponseHeader(e)).build(); + log.error("registerStore exception: ", e); + } + observer.onNext(response); + observer.onCompleted(); + + } + + /** + * Find the store based on store_id + */ + @Override + public void getStore(Pdpb.GetStoreRequest request, + io.grpc.stub.StreamObserver observer) { + if (!isLeader()) { + redirectToLeader(PDGrpc.getGetStoreMethod(), request, observer); + return; + } + Pdpb.GetStoreResponse response = null; + try { + Metapb.Store store = storeNodeService.getStore(request.getStoreId()); + response = + Pdpb.GetStoreResponse.newBuilder().setHeader(okHeader).setStore(store).build(); + } catch (PDException e) { + response = Pdpb.GetStoreResponse.newBuilder().setHeader(getResponseHeader(e)).build(); + log.error("{} getStore exception: {}", StreamObserverUtil.getRemoteIP(observer), e); + } + + observer.onNext(response); + observer.onCompleted(); + } + + /** + *
+     * Modify information such as the status of the store.
+     * 
+ */ + @Override + public void setStore(Pdpb.SetStoreRequest request, + StreamObserver observer) { + if (!isLeader()) { + redirectToLeader(PDGrpc.getSetStoreMethod(), request, observer); + return; + } + Pdpb.SetStoreResponse response = null; + try { + Metapb.StoreState state = request.getStore().getState(); + Long storeId = request.getStore().getId(); + // In the Pending state, you can go online + Metapb.Store lastStore = storeNodeService.getStore(request.getStore().getId()); +<<<<<<<< HEAD:hugegraph-pd/hg-pd-service/src/main/java/org/apache/hugegraph/pd/service/PDService.java + if (lastStore == null) { + // storeId does not exist, an exception is thrown + throw new PDException(Pdpb.ErrorType.STORE_ID_NOT_EXIST_VALUE, + String.format("Store id %d does not exist!", storeId)); + } + if (Metapb.StoreState.Up.equals(state)) { + if (!Metapb.StoreState.Pending.equals(lastStore.getState())) { + throw new PDException(Pdpb.ErrorType.UPDATE_STORE_STATE_ERROR_VALUE, + "only stores in Pending state can be set to Up!"); +======== + if (lastStore == null){ + // storeId不存在,抛出异常 + throw new PDException(ErrorType.STORE_ID_NOT_EXIST_VALUE, + String.format("Store id %d does not exist!", storeId)); + } + if (Metapb.StoreState.Up.equals(state)){ + if (!Metapb.StoreState.Pending.equals(lastStore.getState())){ + throw new PDException(ErrorType.UPDATE_STORE_STATE_ERROR_VALUE, + "only stores in Pending state can be set to Up!"); +>>>>>>>> d7e3d51dd (3.6.5 -> 4.x diff):hg-pd-service/src/main/java/org/apache/hugegraph/pd/service/PDService.java + } + } + if (state.equals(Metapb.StoreState.Offline)) { + Metapb.ClusterStats stats = storeNodeService.getClusterStats(storeId); + if (stats.getState() != Metapb.ClusterState.Cluster_OK) { + ResponseHeader errorHeader = getResponseHeader(-1, + "can not offline node " + + + "when cluster state is not " + + "normal "); + response = Pdpb.SetStoreResponse.newBuilder().setHeader(errorHeader).build(); + observer.onNext(response); + observer.onCompleted(); + return; + } + } + logService.insertLog(LogService.NODE_CHANGE, LogService.GRPC, request.getStore()); + // If the check fails, the status will be changed to Pending, and the reason for the + // error will be returned + if (state.equals(Metapb.StoreState.Up)) { + int cores = 0; + long id = request.getStore().getId(); + List stores = storeNodeService.getStores(); + int nodeCount = 0; + for (Metapb.Store store : stores) { + if (store.getId() == id) { + // Get the cores from the previously registered store as a validation + // parameter + cores = store.getCores(); + } + if (store.getState().equals(Metapb.StoreState.Up)) { + nodeCount++; + } + } + try { + //licenseVerifierService.verify(cores, nodeCount); + } catch (Exception e) { + Metapb.Store store = Metapb.Store.newBuilder(request.getStore()) + .setState(Metapb.StoreState.Pending).build(); + storeNodeService.updateStore(store); + throw new PDException(ErrorType.LICENSE_ERROR_VALUE, + "check license with error :" + + e.getMessage() + + ", and changed node state to 'Pending'"); + } + } + Metapb.Store store = request.getStore(); + // Before going offline, check whether the number of active machines is greater than + // the minimum threshold + if (state.equals(Metapb.StoreState.Tombstone)) { + List activeStores = storeNodeService.getActiveStores(); + if (lastStore.getState() == Metapb.StoreState.Up +<<<<<<<< HEAD:hugegraph-pd/hg-pd-service/src/main/java/org/apache/hugegraph/pd/service/PDService.java + && activeStores.size() - 1 < pdConfig.getMinStoreCount()) { + throw new PDException(Pdpb.ErrorType.LESS_ACTIVE_STORE_VALUE, + "The number of active stores is less then " + + pdConfig.getMinStoreCount()); + } + if (!storeNodeService.checkStoreCanOffline(request.getStore())) { + throw new PDException(Pdpb.ErrorType.LESS_ACTIVE_STORE_VALUE, + "check activeStores or online shardsList size"); + } + if (lastStore.getState() == Metapb.StoreState.Exiting) { + // If it is already in the offline state, no further processing will be made + throw new PDException(Pdpb.ErrorType.Store_Tombstone_Doing_VALUE, + "Downline is in progress, do not resubmit"); +======== + && activeStores.size() - 1 < pdConfig.getMinStoreCount()) { + throw new PDException(ErrorType.LESS_ACTIVE_STORE_VALUE, + "The number of active stores is less then " + pdConfig.getMinStoreCount()); + } + if (!storeNodeService.checkStoreCanOffline(request.getStore())){ + throw new PDException(ErrorType.LESS_ACTIVE_STORE_VALUE, + "check activeStores or online shardsList size"); + } + if (lastStore.getState() == Metapb.StoreState.Exiting){ + // 如果已经是下线中的状态,则不作进一步处理 + throw new PDException(ErrorType.Store_Tombstone_Doing_VALUE, + "Downline is in progress, do not resubmit"); +>>>>>>>> d7e3d51dd (3.6.5 -> 4.x diff):hg-pd-service/src/main/java/org/apache/hugegraph/pd/service/PDService.java + } + Map resultMap = taskService.canAllPartitionsMovedOut(lastStore); + if ((boolean) resultMap.get("flag")) { + if (resultMap.get("current_store_is_online") != null +<<<<<<<< HEAD:hugegraph-pd/hg-pd-service/src/main/java/org/apache/hugegraph/pd/service/PDService.java + && (boolean) resultMap.get("current_store_is_online")) { +======== + && (boolean) resultMap.get("current_store_is_online")) { +>>>>>>>> d7e3d51dd (3.6.5 -> 4.x diff):hg-pd-service/src/main/java/org/apache/hugegraph/pd/service/PDService.java + log.info("updateStore removeActiveStores store {}", store.getId()); + // Set the status of the online store to Offline and wait for the replica + // to be migrated + store = Metapb.Store.newBuilder(lastStore) + .setState(Metapb.StoreState.Exiting).build(); + // Perform partition migration operations + taskService.movePartitions((Map>) resultMap.get( + "movedPartitions")); + } else { + // If the store is offline, the replica is not migrated + // Change the status to Tombstone + } +<<<<<<<< HEAD:hugegraph-pd/hg-pd-service/src/main/java/org/apache/hugegraph/pd/service/PDService.java + } else { + throw new PDException(Pdpb.ErrorType.UPDATE_STORE_STATE_ERROR_VALUE, + "the resources on other stores may be not enough to " + + "store " + + "the partitions of current store!"); +======== + }else{ + throw new PDException(ErrorType.UPDATE_STORE_STATE_ERROR_VALUE, + "the resources on other stores may be not enough to store " + + "the partitions of current store!"); +>>>>>>>> d7e3d51dd (3.6.5 -> 4.x diff):hg-pd-service/src/main/java/org/apache/hugegraph/pd/service/PDService.java + } + } + store = storeNodeService.updateStore(store); + response = + Pdpb.SetStoreResponse.newBuilder().setHeader(okHeader).setStore(store).build(); + } catch (PDException e) { + response = Pdpb.SetStoreResponse.newBuilder().setHeader(getResponseHeader(e)).build(); + log.error("setStore exception: ", e); + } + + observer.onNext(response); + observer.onCompleted(); + } + + @Override + public void getAllStores(Pdpb.GetAllStoresRequest request, + io.grpc.stub.StreamObserver observer) { + if (!isLeader()) { + redirectToLeader(PDGrpc.getGetAllStoresMethod(), request, observer); + return; + } + Pdpb.GetAllStoresResponse response = null; + try { + List stores = null; + if (request.getExcludeOfflineStores()) { + if (! request.getGraphName().isEmpty()) { + var graph = partitionService.getGraph(request.getGraphName()); + stores = storeNodeService.getActiveStoresByStoreGroup(graph.getStoreGroupId()); + } else { + stores = storeNodeService.getActiveStores(); + } + } else { + stores = storeNodeService.getStores(request.getGraphName()); + } + response = + Pdpb.GetAllStoresResponse.newBuilder().setHeader(okHeader).addAllStores(stores) + .build(); + } catch (PDException e) { + response = Pdpb.GetAllStoresResponse.newBuilder().setHeader(getResponseHeader(e)).build(); + log.error("getAllStores exception: ", e); + } + observer.onNext(response); + observer.onCompleted(); + } + + /** + * Handle store heartbeats + */ + @Override + public void storeHeartbeat(Pdpb.StoreHeartbeatRequest request, + io.grpc.stub.StreamObserver observer) { + if (!isLeader()) { + redirectToLeader(PDGrpc.getStoreHeartbeatMethod(), request, observer); + return; + } + + Metapb.StoreStats stats = request.getStats(); + + // save monitor data when monitor data enabled + if (this.pdConfig.getStore().isMonitorDataEnabled()) { + try { + storeMonitorDataService.saveMonitorData(stats); + } catch (PDException e) { + log.error("save status failed, state:{}", stats); + } +<<<<<<<< HEAD:hugegraph-pd/hg-pd-service/src/main/java/org/apache/hugegraph/pd/service/PDService.java + // remove system_metrics + stats = Metapb.StoreStats.newBuilder() + .mergeFrom(request.getStats()) + .clearField(Metapb.StoreStats.getDescriptor().findFieldByName( + "system_metrics")) + .build(); +======== +>>>>>>>> d7e3d51dd (3.6.5 -> 4.x diff):hg-pd-service/src/main/java/org/apache/hugegraph/pd/service/PDService.java + } + + // remove system_metrics + stats = Metapb.StoreStats.newBuilder() + .mergeFrom(request.getStats()) + .clearSystemMetrics() + .build(); + + Pdpb.StoreHeartbeatResponse response; + try { + Metapb.ClusterStats clusterStats = storeNodeService.heartBeat(stats); + Pdpb.StoreHeartbeatResponse.Builder builder = + Pdpb.StoreHeartbeatResponse.newBuilder().setHeader(okHeader); + if (clusterStats != null) { + builder.setClusterStats(clusterStats); + } + response = builder.build(); + } catch (PDException e) { +<<<<<<<< HEAD:hugegraph-pd/hg-pd-service/src/main/java/org/apache/hugegraph/pd/service/PDService.java + response = + Pdpb.StoreHeartbeatResponse.newBuilder().setHeader(newErrorHeader(e)).build(); +======== + response = Pdpb.StoreHeartbeatResponse.newBuilder().setHeader(getResponseHeader(e)).build(); +>>>>>>>> d7e3d51dd (3.6.5 -> 4.x diff):hg-pd-service/src/main/java/org/apache/hugegraph/pd/service/PDService.java + log.error("storeHeartbeat exception: ", e); + } catch (Exception e2) { + response = Pdpb.StoreHeartbeatResponse.newBuilder().setHeader( + getResponseHeader(ErrorType.UNKNOWN_VALUE, e2.getMessage())).build(); + log.error("storeHeartbeat exception: ", e2); + } + observer.onNext(response); + observer.onCompleted(); + } + + /** + *
+     * Find the partition to which the key belongs
+     * 
+ */ + @Override + public void getPartition(Pdpb.GetPartitionRequest request, + io.grpc.stub.StreamObserver observer) { + if (!isLeader()) { + redirectToLeader(PDGrpc.getGetPartitionMethod(), request, observer); + return; + } + Pdpb.GetPartitionResponse response = null; + try { + Metapb.PartitionShard partShard = + partitionService.getPartitionShard(request.getGraphName(), + request.getKey() + .toByteArray()); + response = Pdpb.GetPartitionResponse.newBuilder().setHeader(okHeader) + .setPartition(partShard.getPartition()) + .setLeader(partShard.getLeader()).build(); + } catch (PDException e) { + response = Pdpb.GetPartitionResponse.newBuilder().setHeader(getResponseHeader(e)).build(); + log.error("getPartition exception: ", e); + } + observer.onNext(response); + observer.onCompleted(); + } + + /** + *
+     * Find the partition to which the HashCode belongs
+     * 
+ */ + @Override + public void getPartitionByCode(Pdpb.GetPartitionByCodeRequest request, + io.grpc.stub.StreamObserver observer) { + if (!isLeader()) { + redirectToLeader(PDGrpc.getGetPartitionByCodeMethod(), request, observer); + return; + } + Pdpb.GetPartitionResponse response = null; + try { + Metapb.PartitionShard partShard = + partitionService.getPartitionByCode(request.getGraphName(), + request.getCode()); + response = Pdpb.GetPartitionResponse.newBuilder().setHeader(okHeader) + .setPartition(partShard.getPartition()) + .setLeader(partShard.getLeader()).build(); + } catch (PDException e) { + response = Pdpb.GetPartitionResponse.newBuilder().setHeader(getResponseHeader(e)).build(); + log.error("getPartitionByCode exception: ", e); + } + observer.onNext(response); + observer.onCompleted(); + } + + /** + * Find partition based on partition_id + */ + @Override + public void getPartitionByID(Pdpb.GetPartitionByIDRequest request, + io.grpc.stub.StreamObserver observer) { + if (!isLeader()) { + redirectToLeader(PDGrpc.getGetPartitionByIDMethod(), request, observer); + return; + } + Pdpb.GetPartitionResponse response = null; + try { + Metapb.PartitionShard partShard = + partitionService.getPartitionShardById(request.getGraphName(), + request.getPartitionId()); + if (partShard == null) { +<<<<<<<< HEAD:hugegraph-pd/hg-pd-service/src/main/java/org/apache/hugegraph/pd/service/PDService.java + throw new PDException(Pdpb.ErrorType.NOT_FOUND_VALUE, + String.format("partition: %s-%s not found", + request.getGraphName(), + request.getPartitionId())); +======== + throw new PDException(ErrorType.NOT_FOUND_VALUE, + String.format("partition: %s-%s not found", request.getGraphName(), request.getPartitionId())); +>>>>>>>> d7e3d51dd (3.6.5 -> 4.x diff):hg-pd-service/src/main/java/org/apache/hugegraph/pd/service/PDService.java + } + response = Pdpb.GetPartitionResponse.newBuilder().setHeader(okHeader) + .setPartition(partShard.getPartition()) + .setLeader(partShard.getLeader()).build(); + } catch (PDException e) { + response = Pdpb.GetPartitionResponse.newBuilder().setHeader(getResponseHeader(e)).build(); + log.error("getPartitionByID exception: ", e); + } + observer.onNext(response); + observer.onCompleted(); + } + + /** + *
+     * Update partition information, mainly used to update the partition key range, call this API with caution, otherwise it will cause data loss.
+     * 
+ */ + @Override + public void updatePartition(Pdpb.UpdatePartitionRequest request, + io.grpc.stub.StreamObserver observer) { + if (!isLeader()) { + redirectToLeader(PDGrpc.getUpdatePartitionMethod(), request, observer); + return; + } + Pdpb.UpdatePartitionResponse response = null; + try { + partitionService.updatePartition(request.getPartitionList()); + response = Pdpb.UpdatePartitionResponse.newBuilder().setHeader(okHeader).build(); + + } catch (PDException e) { +<<<<<<<< HEAD:hugegraph-pd/hg-pd-service/src/main/java/org/apache/hugegraph/pd/service/PDService.java + response = + Pdpb.UpdatePartitionResponse.newBuilder().setHeader(newErrorHeader(e)).build(); +======== + response = Pdpb.UpdatePartitionResponse.newBuilder().setHeader(getResponseHeader(e)).build(); +>>>>>>>> d7e3d51dd (3.6.5 -> 4.x diff):hg-pd-service/src/main/java/org/apache/hugegraph/pd/service/PDService.java + log.error("update partition exception: ", e); + } + observer.onNext(response); + observer.onCompleted(); + } + + /** + * Find partition based on partition_id + */ + @Override + public void delPartition(Pdpb.DelPartitionRequest request, + io.grpc.stub.StreamObserver observer) { + if (!isLeader()) { + redirectToLeader(PDGrpc.getDelPartitionMethod(), request, observer); + return; + } + Pdpb.DelPartitionResponse response = null; + try { + Metapb.Partition partition = partitionService.getPartitionById(request.getGraphName(), + request.getPartitionId()); + if (partition != null) { + partitionService.removePartition(request.getGraphName(), + request.getPartitionId()); + response = Pdpb.DelPartitionResponse.newBuilder().setHeader(okHeader) + .setPartition(partition) + .build(); + } else { + response = Pdpb.DelPartitionResponse.newBuilder().setHeader(okHeader).build(); + } + } catch (PDException e) { + response = Pdpb.DelPartitionResponse.newBuilder().setHeader(getResponseHeader(e)).build(); + log.error("delPartition exception: ", e); + } + observer.onNext(response); + observer.onCompleted(); + } + + /** + * The set of partitions to which a given key range looks + */ + @Override + public void scanPartitions(Pdpb.ScanPartitionsRequest request, + io.grpc.stub.StreamObserver observer) { + if (!isLeader()) { + redirectToLeader(PDGrpc.getScanPartitionsMethod(), request, observer); + return; + } + Pdpb.ScanPartitionsResponse response = null; + try { + List partShards = + partitionService.scanPartitions(request.getGraphName(), + request.getStartKey() + .toByteArray(), + request.getEndKey() + .toByteArray()); + response = Pdpb.ScanPartitionsResponse.newBuilder().setHeader(okHeader) + .addAllPartitions(partShards).build(); + } catch (PDException e) { +<<<<<<<< HEAD:hugegraph-pd/hg-pd-service/src/main/java/org/apache/hugegraph/pd/service/PDService.java + response = + Pdpb.ScanPartitionsResponse.newBuilder().setHeader(newErrorHeader(e)).build(); +======== + response = Pdpb.ScanPartitionsResponse.newBuilder().setHeader(getResponseHeader(e)).build(); +>>>>>>>> d7e3d51dd (3.6.5 -> 4.x diff):hg-pd-service/src/main/java/org/apache/hugegraph/pd/service/PDService.java + log.error("scanPartitions exception: ", e); + } + observer.onNext(response); + observer.onCompleted(); + } + + /** + * Get graph information + */ +<<<<<<<< HEAD:hugegraph-pd/hg-pd-service/src/main/java/org/apache/hugegraph/pd/service/PDService.java + @Override +======== +>>>>>>>> d7e3d51dd (3.6.5 -> 4.x diff):hg-pd-service/src/main/java/org/apache/hugegraph/pd/service/PDService.java + public void getGraph(GetGraphRequest request, + io.grpc.stub.StreamObserver observer) { + if (!isLeader()) { + redirectToLeader(PDGrpc.getGetGraphMethod(), request, observer); + return; + } + + Pdpb.GetGraphResponse response = null; + String graphName = request.getGraphName(); + try { + Metapb.Graph graph = partitionService.getGraph(graphName); + if (graph != null) { + response = Pdpb.GetGraphResponse.newBuilder().setHeader(okHeader).setGraph(graph) + .build(); + } else { + ResponseHeader header = ResponseHeader.newBuilder().setError( + Errors.newBuilder().setType(ErrorType.NOT_FOUND).build()).build(); + response = Pdpb.GetGraphResponse.newBuilder().setHeader(header).build(); + } + } catch (PDException e) { + response = Pdpb.GetGraphResponse.newBuilder().setHeader(getResponseHeader(e)).build(); + log.error("getGraph exception: ", e); + } + observer.onNext(response); + observer.onCompleted(); + } + + /** + * Modify the diagram information + */ +<<<<<<<< HEAD:hugegraph-pd/hg-pd-service/src/main/java/org/apache/hugegraph/pd/service/PDService.java + @Override + public void setGraph(Pdpb.SetGraphRequest request, + io.grpc.stub.StreamObserver observer) { +======== + public void setGraph(Pdpb.CreateGraphRequest request, + io.grpc.stub.StreamObserver observer) { +>>>>>>>> d7e3d51dd (3.6.5 -> 4.x diff):hg-pd-service/src/main/java/org/apache/hugegraph/pd/service/PDService.java + if (!isLeader()) { + redirectToLeader(PDGrpc.getSetGraphMethod(), request, observer); + return; + } + Pdpb.CreateGraphResponse response; + Metapb.Graph graph = request.getGraph(); + try { +<<<<<<<< HEAD:hugegraph-pd/hg-pd-service/src/main/java/org/apache/hugegraph/pd/service/PDService.java + graph = partitionService.updateGraph(graph); + response = + Pdpb.SetGraphResponse.newBuilder().setHeader(okHeader).setGraph(graph).build(); +======== + var lastGraph = partitionService.getGraph(graph.getGraphName()); + if (lastGraph != null) { + graph = partitionService.updateGraphName(graph); + } else { + graph = partitionService.createGraph(graph.getGraphName(), + graph.getPartitionCount(), graph.getStoreGroupId()); + } + + response = Pdpb.CreateGraphResponse.newBuilder().setHeader(okHeader).setGraph(graph).build(); +>>>>>>>> d7e3d51dd (3.6.5 -> 4.x diff):hg-pd-service/src/main/java/org/apache/hugegraph/pd/service/PDService.java + } catch (PDException e) { + log.error("setGraph exception: ", e); + response = Pdpb.CreateGraphResponse.newBuilder().setHeader(getResponseHeader(e)).build(); + } + observer.onNext(response); + observer.onCompleted(); + } + + /** + * Get graph information + */ + @Override + public void delGraph(Pdpb.DelGraphRequest request, + io.grpc.stub.StreamObserver observer) { + if (!isLeader()) { + redirectToLeader(PDGrpc.getDelGraphMethod(), request, observer); + return; + } + + Pdpb.DelGraphResponse response = null; + String graphName = request.getGraphName(); + try { + Metapb.Graph graph = partitionService.delGraph(graphName); + if (graph != null) { + response = Pdpb.DelGraphResponse.newBuilder().setHeader(okHeader).setGraph(graph) + .build(); + } + } catch (PDException e) { + response = Pdpb.DelGraphResponse.newBuilder().setHeader(getResponseHeader(e)).build(); + log.error("getGraph exception: ", e); + } + observer.onNext(response); + observer.onCompleted(); + } + + /** + *
+     * Query partition information based on conditions, such as Store and Graph
+     * 
+ */ + @Override + public void queryPartitions(Pdpb.QueryPartitionsRequest request, + io.grpc.stub.StreamObserver observer) { + if (!isLeader()) { + redirectToLeader(PDGrpc.getQueryPartitionsMethod(), request, observer); + return; + } + // The traversal scheme is used temporarily, and when the rocksdb storage is used in + // the future, it is implemented through KV indexes + Metapb.PartitionQuery query = request.getQuery(); + List partitions = partitionService.getPartitions(query.getGraphName()); + List result = new ArrayList<>(); + if (!CollectionUtils.isEmpty(partitions)) { + for (Metapb.Partition partition : partitions) { + if (query.hasPartitionId() && partition.getId() != query.getPartitionId()) { + continue; + } + if (query.hasGraphName() && + !partition.getGraphName().equals(query.getGraphName())) { + continue; + } + long storeId = query.getStoreId(); + if (query.hasStoreId() && query.getStoreId() != 0) { + try { +<<<<<<<< HEAD:hugegraph-pd/hg-pd-service/src/main/java/org/apache/hugegraph/pd/service/PDService.java + storeNodeService.getShardGroup(partition.getId()).getShardsList() + .forEach(shard -> { + if (shard.getStoreId() == storeId) { + result.add(partition); + } + }); + } catch (PDException e) { + log.error("query partitions error, req:{}, error:{}", request, + e.getMessage()); +======== + var shardGroup = storeNodeService.getShardGroup(partition.getId()); + // 清理的时候,可能导致shard group被删除 + if (shardGroup != null) { + shardGroup.getShardsList().forEach(shard -> { + if (shard.getStoreId() == storeId) { + result.add(partition); + } + }); + } + }catch (PDException e){ + log.error("query partitions error, req:{}, error:{}", request, e.getMessage()); +>>>>>>>> d7e3d51dd (3.6.5 -> 4.x diff):hg-pd-service/src/main/java/org/apache/hugegraph/pd/service/PDService.java + } + } else { + result.add(partition); + } + } + } + Pdpb.QueryPartitionsResponse response = Pdpb.QueryPartitionsResponse.newBuilder() + .addAllPartitions( + result).build(); + observer.onNext(response); + observer.onCompleted(); + + } + + @Override + public void getId(Pdpb.GetIdRequest request, + StreamObserver responseObserver) { + if (!isLeader()) { + redirectToLeader(PDGrpc.getGetIdMethod(), request, responseObserver); + return; + } + long id = 0L; + try { + id = idService.getId(request.getKey(), request.getDelta()); + } catch (PDException e) { + responseObserver.onError(e); + log.error("getId exception: ", e); + return; + } + Pdpb.GetIdResponse response = + Pdpb.GetIdResponse.newBuilder().setId(id).setDelta(request.getDelta()) + .build(); + responseObserver.onNext(response); + responseObserver.onCompleted(); + } + + @Override + public void resetId(Pdpb.ResetIdRequest request, + StreamObserver responseObserver) { + if (!isLeader()) { + redirectToLeader(PDGrpc.getResetIdMethod(), request, responseObserver); + return; + } + try { + idService.resetId(request.getKey()); + } catch (PDException e) { + responseObserver.onError(e); + log.error("getId exception: ", e); + return; + } + Pdpb.ResetIdResponse response = Pdpb.ResetIdResponse.newBuilder().setResult(0).build(); + responseObserver.onNext(response); + responseObserver.onCompleted(); + } + + /** + * Obtain cluster member information + */ + @Override + public void getMembers(Pdpb.GetMembersRequest request, + io.grpc.stub.StreamObserver observer) { + + if (!isLeader()) { + redirectToLeader(PDGrpc.getGetMembersMethod(), request, observer); + return; + } + Pdpb.GetMembersResponse response; + try { + response = Pdpb.GetMembersResponse.newBuilder() + .addAllMembers(RaftEngine.getInstance().getMembers()) + .setLeader(RaftEngine.getInstance().getLocalMember()) + .build(); + + } catch (Exception e) { + log.error("getMembers exception: ", e); +<<<<<<<< HEAD:hugegraph-pd/hg-pd-service/src/main/java/org/apache/hugegraph/pd/service/PDService.java + response = Pdpb.GetMembersResponse.newBuilder() + .setHeader(newErrorHeader(-1, e.getMessage())) +======== + response = Pdpb.GetMembersResponse.newBuilder().setHeader(getResponseHeader(-1, e.getMessage())) +>>>>>>>> d7e3d51dd (3.6.5 -> 4.x diff):hg-pd-service/src/main/java/org/apache/hugegraph/pd/service/PDService.java + .build(); + } + observer.onNext(response); + observer.onCompleted(); + } + + @Override + public void getStoreStatus(Pdpb.GetAllStoresRequest request, + io.grpc.stub.StreamObserver observer) { + if (!isLeader()) { + redirectToLeader(PDGrpc.getGetStoreStatusMethod(), request, observer); + return; + } + Pdpb.GetAllStoresResponse response = null; + try { + List stores = null; + stores = storeNodeService.getStoreStatus(request.getExcludeOfflineStores()); + response = + Pdpb.GetAllStoresResponse.newBuilder().setHeader(okHeader).addAllStores(stores) + .build(); + } catch (PDException e) { + response = Pdpb.GetAllStoresResponse.newBuilder().setHeader(getResponseHeader(e)).build(); + log.error("getAllStores exception: ", e); + } + observer.onNext(response); + observer.onCompleted(); + } + + /** + * Read the PD configuration + */ + @Override + public void getPDConfig(Pdpb.GetPDConfigRequest request, + io.grpc.stub.StreamObserver observer) { + if (!isLeader()) { + redirectToLeader(PDGrpc.getGetPDConfigMethod(), request, observer); + return; + } + Pdpb.GetPDConfigResponse response = null; + try { + Metapb.PDConfig pdConfig = null; + pdConfig = configService.getPDConfig(request.getVersion()); + response = + Pdpb.GetPDConfigResponse.newBuilder().setHeader(okHeader).setPdConfig(pdConfig) + .build(); + } catch (PDException e) { + response = Pdpb.GetPDConfigResponse.newBuilder().setHeader(getResponseHeader(e)).build(); + } + observer.onNext(response); + observer.onCompleted(); + } + + /** + * Modify the PD configuration + */ + @Override + public void setPDConfig(Pdpb.SetPDConfigRequest request, + io.grpc.stub.StreamObserver observer) { + if (!isLeader()) { + redirectToLeader(PDGrpc.getSetPDConfigMethod(), request, observer); + return; + } + Pdpb.SetPDConfigResponse response = null; + try { +<<<<<<<< HEAD:hugegraph-pd/hg-pd-service/src/main/java/org/apache/hugegraph/pd/service/PDService.java + if (request.getPdConfig().getShardCount() % 2 != 1) { + // Parity of the number of replicas + throw new PDException(Pdpb.ErrorType.SET_CONFIG_SHARD_COUNT_ERROR_VALUE, + "shard count must be an odd number!"); + } + if (request.getPdConfig().getShardCount() > + storeNodeService.getActiveStores().size()) { + // It can't be greater than the number of active stores + throw new PDException(Pdpb.ErrorType.SET_CONFIG_SHARD_COUNT_ERROR_VALUE, + "shard count can't be greater than the number of active " + + "stores!"); + } + int oldShardCount = configService.getPDConfig().getShardCount(); + int newShardCount = request.getPdConfig().getShardCount(); + if (newShardCount > oldShardCount) { + // If the number of replicas increases, check whether the resources inside the + // store are sufficient + if (!isResourceEnough(oldShardCount, newShardCount)) { + throw new PDException(Pdpb.ErrorType.SET_CONFIG_SHARD_COUNT_ERROR_VALUE, + "There is not enough disk space left!"); + } + + if (!checkShardCount(newShardCount)) { + throw new PDException(Pdpb.ErrorType.SET_CONFIG_SHARD_COUNT_ERROR_VALUE, + "the cluster can't support so many shard count!"); +======== + if (request.getPdConfig().getShardCount() % 2 != 1){ + // 副本数奇偶校验 + throw new PDException(ErrorType.SET_CONFIG_SHARD_COUNT_ERROR_VALUE, + "shard count must be an odd number!"); + } + if (request.getPdConfig().getShardCount() > + storeNodeService.getActiveStores().size()){ + // 不能大于活跃的store数量 + throw new PDException(ErrorType.SET_CONFIG_SHARD_COUNT_ERROR_VALUE, + "shard count can't be greater than the number of active stores!"); + } + int oldShardCount = configService.getPDConfig().getShardCount(); + int newShardCount = request.getPdConfig().getShardCount(); + if (newShardCount > oldShardCount){ + // 如果副本数增大,则检查store内部的资源是否够用 + if (! isResourceEnough(oldShardCount, newShardCount)) { + throw new PDException(ErrorType.SET_CONFIG_SHARD_COUNT_ERROR_VALUE, + "There is not enough disk space left!"); + } + + if (! checkShardCount(newShardCount)) { + throw new PDException(ErrorType.SET_CONFIG_SHARD_COUNT_ERROR_VALUE, + "the cluster can't support so many shard count!"); +>>>>>>>> d7e3d51dd (3.6.5 -> 4.x diff):hg-pd-service/src/main/java/org/apache/hugegraph/pd/service/PDService.java + } + } + configService.setPDConfig(request.getPdConfig()); + response = Pdpb.SetPDConfigResponse.newBuilder().setHeader(okHeader).build(); + } catch (PDException e) { + response = Pdpb.SetPDConfigResponse.newBuilder().setHeader(getResponseHeader(e)).build(); + } + observer.onNext(response); + observer.onCompleted(); + } + + /** + * Read the graph space configuration + */ + @Override + public void getGraphSpace(Pdpb.GetGraphSpaceRequest request, + io.grpc.stub.StreamObserver observer) { + if (!isLeader()) { + redirectToLeader(PDGrpc.getGetGraphSpaceMethod(), request, observer); + return; + } + Pdpb.GetGraphSpaceResponse response = null; + try { + List graphSpaces = null; + graphSpaces = configService.getGraphSpace(request.getGraphSpaceName()); + response = Pdpb.GetGraphSpaceResponse.newBuilder().setHeader(okHeader) + .addAllGraphSpace(graphSpaces).build(); + } catch (PDException e) { + response = Pdpb.GetGraphSpaceResponse.newBuilder().setHeader(getResponseHeader(e)).build(); + } + observer.onNext(response); + observer.onCompleted(); + } + + /** + * Modify the graph space configuration + */ + @Override + public void setGraphSpace(Pdpb.SetGraphSpaceRequest request, + io.grpc.stub.StreamObserver observer) { + if (!isLeader()) { + redirectToLeader(PDGrpc.getSetGraphSpaceMethod(), request, observer); + return; + } + Pdpb.SetGraphSpaceResponse response = null; + try { + configService.setGraphSpace(request.getGraphSpace()); + response = Pdpb.SetGraphSpaceResponse.newBuilder().setHeader(okHeader).build(); + } catch (PDException e) { + response = Pdpb.SetGraphSpaceResponse.newBuilder().setHeader(getResponseHeader(e)).build(); + } + observer.onNext(response); + observer.onCompleted(); + } + + /** + *
+     * Data fragmentation
+     * 
+ */ +<<<<<<<< HEAD:hugegraph-pd/hg-pd-service/src/main/java/org/apache/hugegraph/pd/service/PDService.java + @Override + public void splitData(Pdpb.SplitDataRequest request, + StreamObserver observer) { +======== + public void splitData(ClusterOp.SplitDataRequest request, StreamObserver observer) { +>>>>>>>> d7e3d51dd (3.6.5 -> 4.x diff):hg-pd-service/src/main/java/org/apache/hugegraph/pd/service/PDService.java + if (!isLeader()) { + redirectToLeader(PDGrpc.getSplitDataMethod(), request, observer); + return; + } + logService.insertLog(LogService.PARTITION_CHANGE, "splitData", request); + ClusterOp.SplitDataResponse response = null; + try { + taskService.splitPartition(request.getMode(), request.getStoreGroupId(), request.getParamList()); + response = ClusterOp.SplitDataResponse.newBuilder().setHeader(okHeader).build(); + } catch (PDException e) { + log.error("splitData exception:", e); + response = ClusterOp.SplitDataResponse.newBuilder().setHeader(getResponseHeader(e)).build(); + } + observer.onNext(response); + observer.onCompleted(); + + } + + @Override +<<<<<<<< HEAD:hugegraph-pd/hg-pd-service/src/main/java/org/apache/hugegraph/pd/service/PDService.java + public void splitGraphData(Pdpb.SplitGraphDataRequest request, + StreamObserver observer) { +======== + public void splitGraphData(ClusterOp.SplitGraphDataRequest request, + StreamObserver observer) { +>>>>>>>> d7e3d51dd (3.6.5 -> 4.x diff):hg-pd-service/src/main/java/org/apache/hugegraph/pd/service/PDService.java + if (!isLeader()) { + redirectToLeader(PDGrpc.getSplitGraphDataMethod(), request, observer); + return; + } + logService.insertLog(LogService.PARTITION_CHANGE, "splitGraphData", request); +<<<<<<<< HEAD:hugegraph-pd/hg-pd-service/src/main/java/org/apache/hugegraph/pd/service/PDService.java + Pdpb.SplitDataResponse response; + try { + partitionService.splitPartition(partitionService.getGraph(request.getGraphName()), + request.getToCount()); + response = Pdpb.SplitDataResponse.newBuilder().setHeader(okHeader).build(); +======== + ClusterOp.SplitDataResponse response ; + try { + partitionService.splitPartition(partitionService.getGraph(request.getGraphName()), request.getToCount()); + response = ClusterOp.SplitDataResponse.newBuilder().setHeader(okHeader).build(); +>>>>>>>> d7e3d51dd (3.6.5 -> 4.x diff):hg-pd-service/src/main/java/org/apache/hugegraph/pd/service/PDService.java + } catch (PDException e) { + log.error("splitGraphData exception", e); + response = ClusterOp.SplitDataResponse.newBuilder().setHeader(getResponseHeader(e)).build(); + } + observer.onNext(response); + observer.onCompleted(); + } + + /** + * Balance data between stores + */ +<<<<<<<< HEAD:hugegraph-pd/hg-pd-service/src/main/java/org/apache/hugegraph/pd/service/PDService.java + @Override + public void movePartition(Pdpb.MovePartitionRequest request, + StreamObserver observer) { +======== + public void movePartition(ClusterOp.MovePartitionRequest request, + StreamObserver observer) { +>>>>>>>> d7e3d51dd (3.6.5 -> 4.x diff):hg-pd-service/src/main/java/org/apache/hugegraph/pd/service/PDService.java + if (!isLeader()) { + redirectToLeader(PDGrpc.getMovePartitionMethod(), request, observer); + return; + } + logService.insertLog(LogService.PARTITION_CHANGE, "balanceData", request); + + ClusterOp.MovePartitionResponse response = null; + try { + if (request.getMode() == ClusterOp.OperationMode.Auto) { + taskService.patrolPartitions(); + taskService.balancePartitionShard(request.getStoreGroupId()); + } else { + for (ClusterOp.MovePartitionParam p : request.getParamList()) { + partitionService.movePartitionsShard(p.getPartitionId(), p.getSrcStoreId(), p.getDstStoreId()); + } + } + response = ClusterOp.MovePartitionResponse.newBuilder().setHeader(okHeader).build(); + } catch (PDException e) { + log.error("transferData exception", e); + response = ClusterOp.MovePartitionResponse.newBuilder().setHeader(getResponseHeader(e)).build(); + } + observer.onNext(response); + observer.onCompleted(); + } + + /** + *
+     * Obtain the cluster health status
+     * 
+ */ + @Override + public void getClusterStats(Pdpb.GetClusterStatsRequest request, + io.grpc.stub.StreamObserver observer) { + if (!isLeader()) { + redirectToLeader(PDGrpc.getGetClusterStatsMethod(), request, observer); + return; + } +<<<<<<<< HEAD:hugegraph-pd/hg-pd-service/src/main/java/org/apache/hugegraph/pd/service/PDService.java + Pdpb.GetClusterStatsResponse response = null; + response = Pdpb.GetClusterStatsResponse.newBuilder().setHeader(okHeader) + .setCluster(storeNodeService.getClusterStats()) + .build(); +======== + Pdpb.GetClusterStatsResponse response; + + try { + Metapb.ClusterStats state; + if (request.getStoreId() != 0) { + state = storeNodeService.getClusterStats(request.getStoreId()); + } else { + state = storeNodeService.getClusterStats(request.getStoreGroup()); + } + response = Pdpb.GetClusterStatsResponse.newBuilder().setHeader(okHeader) + .setCluster(state) + .build(); + } catch (PDException e) { + log.error("getClusterStats exception :", e); + response = Pdpb.GetClusterStatsResponse.newBuilder().setHeader(getResponseHeader(e)).build(); + } +>>>>>>>> d7e3d51dd (3.6.5 -> 4.x diff):hg-pd-service/src/main/java/org/apache/hugegraph/pd/service/PDService.java + observer.onNext(response); + observer.onCompleted(); + } + + /** + *
+     * Report the results of tasks such as partition splitting
+     * 
+ */ +<<<<<<<< HEAD:hugegraph-pd/hg-pd-service/src/main/java/org/apache/hugegraph/pd/service/PDService.java + @Override + public void reportTask(Pdpb.ReportTaskRequest request, + io.grpc.stub.StreamObserver observer) { +======== + public void reportTask(ClusterOp.ReportTaskRequest request, + io.grpc.stub.StreamObserver observer) { +>>>>>>>> d7e3d51dd (3.6.5 -> 4.x diff):hg-pd-service/src/main/java/org/apache/hugegraph/pd/service/PDService.java + if (!isLeader()) { + redirectToLeader(PDGrpc.getReportTaskMethod(), request, observer); + return; + } + try { + taskService.reportTask(request.getTask()); + } catch (Exception e) { + log.error("PDService.reportTask", e); + } + ClusterOp.ReportTaskResponse response = null; + response = ClusterOp.ReportTaskResponse.newBuilder().setHeader(okHeader).build(); + observer.onNext(response); + observer.onCompleted(); + } + + /** + * + */ + @Override + public void getPartitionStats(Pdpb.GetPartitionStatsRequest request, + io.grpc.stub.StreamObserver observer) { + if (!isLeader()) { + redirectToLeader(PDGrpc.getGetPartitionStatsMethod(), request, observer); + return; + } + Pdpb.GetPartitionStatsResponse response; + // TODO + try { + Metapb.PartitionStats stats = partitionService.getPartitionStats(request.getGraphName(), + request.getPartitionId()); + response = Pdpb.GetPartitionStatsResponse.newBuilder().setHeader(okHeader) + .setPartitionStats(stats).build(); + } catch (PDException e) { +<<<<<<<< HEAD:hugegraph-pd/hg-pd-service/src/main/java/org/apache/hugegraph/pd/service/PDService.java + log.error("getPartitionStats exception {}", e); + response = Pdpb.GetPartitionStatsResponse.newBuilder().setHeader(newErrorHeader(e)) + .build(); +======== + log.error("getPartitionStats exception ", e); + response = Pdpb.GetPartitionStatsResponse.newBuilder().setHeader(getResponseHeader(e)).build(); +>>>>>>>> d7e3d51dd (3.6.5 -> 4.x diff):hg-pd-service/src/main/java/org/apache/hugegraph/pd/service/PDService.java + } + + observer.onNext(response); + observer.onCompleted(); + } +<<<<<<<< HEAD:hugegraph-pd/hg-pd-service/src/main/java/org/apache/hugegraph/pd/service/PDService.java + + @Override + public boolean isLeader() { + return RaftEngine.getInstance().isLeader(); + } + + //private > void redirectToLeader( + // MethodDescriptor method, ReqT req, io.grpc.stub.StreamObserver + // observer) { + // try { + // var addr = RaftEngine.getInstance().getLeaderGrpcAddress(); + // ManagedChannel channel; + // + // if ((channel = channelMap.get(addr)) == null) { + // synchronized (this) { + // if ((channel = channelMap.get(addr)) == null|| channel.isShutdown()) { + // channel = ManagedChannelBuilder + // .forTarget(addr).usePlaintext() + // .build(); + // } + // } + // log.info("Grpc get leader address {}", RaftEngine.getInstance() + // .getLeaderGrpcAddress()); + // } + // + // io.grpc.stub.ClientCalls.asyncUnaryCall(channel.newCall(method, CallOptions + // .DEFAULT), req, + // observer); + // } catch (Exception e) { + // e.printStackTrace(); + // } + //} + +======== +>>>>>>>> d7e3d51dd (3.6.5 -> 4.x diff):hg-pd-service/src/main/java/org/apache/hugegraph/pd/service/PDService.java + /** + * Renewal peerList + */ + @Override + public void changePeerList(ClusterOp.ChangePeerListRequest request, + io.grpc.stub.StreamObserver observer) { + if (!isLeader()) { + redirectToLeader(PDGrpc.getChangePeerListMethod(), request, observer); + return; + } + ClusterOp.ChangePeerListResponse response; + try { + Status status = RaftEngine.getInstance().changePeerList(request.getPeerList()); +<<<<<<<< HEAD:hugegraph-pd/hg-pd-service/src/main/java/org/apache/hugegraph/pd/service/PDService.java + Pdpb.ResponseHeader responseHeader = + status.isOk() ? okHeader : newErrorHeader(status.getCode(), + status.getErrorMsg()); + response = + Pdpb.getChangePeerListResponse.newBuilder().setHeader(responseHeader).build(); + + } catch (Exception e) { + log.error("changePeerList exception: ", e); + response = Pdpb.getChangePeerListResponse.newBuilder() + .setHeader(newErrorHeader(-1, e.getMessage())) + .build(); +======== + ResponseHeader responseHeader = status.isOk() ? okHeader : getResponseHeader(status.getCode(), + status.getErrorMsg()); + response = ClusterOp.ChangePeerListResponse.newBuilder().setHeader(responseHeader).build(); + + } catch (Exception e) { + log.error("changePeerList exception: ", e); + response = ClusterOp.ChangePeerListResponse.newBuilder() + .setHeader(getResponseHeader(-1, e.getMessage())).build(); +>>>>>>>> d7e3d51dd (3.6.5 -> 4.x diff):hg-pd-service/src/main/java/org/apache/hugegraph/pd/service/PDService.java + } + observer.onNext(response); + observer.onCompleted(); + } + + @Override + public synchronized void onRaftLeaderChanged() { + log.info("onLeaderChanged"); + // channel = null; +<<<<<<<< HEAD:hugegraph-pd/hg-pd-service/src/main/java/org/apache/hugegraph/pd/service/PDService.java + // TODO: uncomment later + //if (licenseVerifierService == null) { + // licenseVerifierService = new LicenseVerifierService(pdConfig); + //} + //licenseVerifierService.init(); + + try { + PDWatchSubject.notifyNodeChange(NodeEventType.NODE_EVENT_TYPE_PD_LEADER_CHANGE, + RaftEngine.getInstance().getLeaderGrpcAddress(), 0L); + } catch (ExecutionException | InterruptedException e) { +======== + if (licenseVerifierService == null) { + licenseVerifierService = new LicenseVerifierService(pdConfig); + } + licenseVerifierService.init(); + + try { + PDPulseSubjects.notifyNodeChange(StoreNodeEventType.STORE_NODE_EVENT_TYPE_PD_LEADER_CHANGE, + RaftEngine.getInstance().getLeaderGrpcAddress(), 0L); + } catch (Exception e) { +>>>>>>>> d7e3d51dd (3.6.5 -> 4.x diff):hg-pd-service/src/main/java/org/apache/hugegraph/pd/service/PDService.java + log.error("failed to notice client", e); + } + } + + @Override + public void balanceLeaders(ClusterOp.BalanceLeadersRequest request, + StreamObserver observer) { + if (!isLeader()) { + redirectToLeader(PDGrpc.getBalanceLeadersMethod(), request, observer); + return; + } + logService.insertLog(LogService.PARTITION_CHANGE, "balanceLeaders", request); + ClusterOp.BalanceLeadersResponse response = null; + try { + taskService.balancePartitionLeader(true); + response = ClusterOp.BalanceLeadersResponse.newBuilder().setHeader(okHeader).build(); + } catch (PDException e) { +<<<<<<<< HEAD:hugegraph-pd/hg-pd-service/src/main/java/org/apache/hugegraph/pd/service/PDService.java + log.error("balance Leaders exception: ", e); + response = + Pdpb.BalanceLeadersResponse.newBuilder().setHeader(newErrorHeader(e)).build(); +======== + log.error("balance Leaders exception ", e); + response = ClusterOp.BalanceLeadersResponse.newBuilder().setHeader(getResponseHeader(e)).build(); +>>>>>>>> d7e3d51dd (3.6.5 -> 4.x diff):hg-pd-service/src/main/java/org/apache/hugegraph/pd/service/PDService.java + } + observer.onNext(response); + observer.onCompleted(); + } + + // TODO: keep it now & clean it later + @Override + public void putLicense(PutLicenseRequest request, + StreamObserver responseObserver) { + PutLicenseResponse response = null; + boolean moved = false; + String bakPath = pdConfig.getLicensePath() + "-bak"; + File bakFile = new File(bakPath); + File licenseFile = new File(pdConfig.getLicensePath()); + try { + byte[] content = request.getContent().toByteArray(); + if (licenseFile.exists()) { + if (bakFile.exists()) { + FileUtils.deleteQuietly(bakFile); + } + FileUtils.moveFile(licenseFile, bakFile); + moved = true; + } + FileUtils.writeByteArrayToFile(licenseFile, content, false); + } catch (Exception e) { +<<<<<<<< HEAD:hugegraph-pd/hg-pd-service/src/main/java/org/apache/hugegraph/pd/service/PDService.java + log.error("putLicense with error:", e); +======== + log.error("putLicense with error: ", e); +>>>>>>>> d7e3d51dd (3.6.5 -> 4.x diff):hg-pd-service/src/main/java/org/apache/hugegraph/pd/service/PDService.java + if (moved) { + try { + FileUtils.moveFile(bakFile, licenseFile); + } catch (IOException ex) { +<<<<<<<< HEAD:hugegraph-pd/hg-pd-service/src/main/java/org/apache/hugegraph/pd/service/PDService.java + log.error("failed to restore the license file:", ex); + } + } + Pdpb.ResponseHeader header = + newErrorHeader(Pdpb.ErrorType.LICENSE_ERROR_VALUE, e.getMessage()); +======== + log.error("failed to restore the license file.", ex); + } + } + ResponseHeader header = getResponseHeader(ErrorType.LICENSE_ERROR_VALUE, e.getMessage()); +>>>>>>>> d7e3d51dd (3.6.5 -> 4.x diff):hg-pd-service/src/main/java/org/apache/hugegraph/pd/service/PDService.java + response = Pdpb.PutLicenseResponse.newBuilder().setHeader(header).build(); + } + responseObserver.onNext(response); + responseObserver.onCompleted(); + } + + @Override + public void delStore(Pdpb.DetStoreRequest request, + StreamObserver observer) { + if (!isLeader()) { + redirectToLeader(PDGrpc.getDelStoreMethod(), request, observer); + return; + } + long storeId = request.getStoreId(); + Pdpb.DetStoreResponse response = null; + try { + Metapb.Store store = storeNodeService.getStore(storeId); + if (Metapb.StoreState.Tombstone == store.getState()) { + storeNodeService.removeStore(storeId); + response = Pdpb.DetStoreResponse.newBuilder() +<<<<<<<< HEAD:hugegraph-pd/hg-pd-service/src/main/java/org/apache/hugegraph/pd/service/PDService.java + .setHeader(okHeader) + .setStore(store) + .build(); + } else { + throw new PDException(Pdpb.ErrorType.STORE_PROHIBIT_DELETION_VALUE, + "the store can't be deleted, please check store state!"); +======== + .setHeader(okHeader) + .setStore(store) + .build(); + }else{ + throw new PDException(ErrorType.STORE_PROHIBIT_DELETION_VALUE, + "the store can't be deleted, please check store state!"); +>>>>>>>> d7e3d51dd (3.6.5 -> 4.x diff):hg-pd-service/src/main/java/org/apache/hugegraph/pd/service/PDService.java + } + } catch (PDException e) { + log.error("delete store exception:", e); + response = Pdpb.DetStoreResponse.newBuilder() +<<<<<<<< HEAD:hugegraph-pd/hg-pd-service/src/main/java/org/apache/hugegraph/pd/service/PDService.java + .setHeader(newErrorHeader(e)).build(); +======== + .setHeader(getResponseHeader(e)).build(); +>>>>>>>> d7e3d51dd (3.6.5 -> 4.x diff):hg-pd-service/src/main/java/org/apache/hugegraph/pd/service/PDService.java + } + observer.onNext(response); + observer.onCompleted(); + } + + /** + * check the shard whether exceed the cluster's max shard group count + * + * @param newShardCount new shard count + * @return true if can be set to new shard count, otherwise false + */ + private boolean checkShardCount(int newShardCount) { + try { +<<<<<<<< HEAD:hugegraph-pd/hg-pd-service/src/main/java/org/apache/hugegraph/pd/service/PDService.java + var maxCount = pdConfig.getPartition().getMaxShardsPerStore() * + storeNodeService.getActiveStores().size() / + pdConfig.getConfigService().getPartitionCount(); + + if (newShardCount > maxCount) { + log.error("new shard count :{} exceed current cluster max shard count {}", + newShardCount, maxCount); + return false; +======== + var storeGroups = configService.getAllStoreGroup(); + var maxStoreShardCount = pdConfig.getPartition().getMaxShardsPerStore(); + // 检查每个分组是否可以容纳新分片数量 + for (var storeGroup : storeGroups) { + // 每个分组最大允许的shard数量 + int maxCount = storeNodeService.getActiveStoresByStoreGroup(storeGroup.getGroupId()).size() * + maxStoreShardCount / configService.getPartitionCount(storeGroup.getGroupId()); + + if (newShardCount > maxCount) { + log.error("new shard count :{} exceed current cluster max shard count {}", newShardCount, maxCount); + return false; + } + +>>>>>>>> d7e3d51dd (3.6.5 -> 4.x diff):hg-pd-service/src/main/java/org/apache/hugegraph/pd/service/PDService.java + } + } catch (Exception e) { + log.error("checkShardCount: {}", e.getMessage()); + } + return true; + } + + /** + * Check that the store resources are sufficient + */ + public boolean isResourceEnough(int oldShardCount, int newShardCount) { + // Whether the resources of the active store are sufficient + try { +<<<<<<<< HEAD:hugegraph-pd/hg-pd-service/src/main/java/org/apache/hugegraph/pd/service/PDService.java + // The multiple of the storage space occupied + float expansionRatio = newShardCount / oldShardCount; + // The space currently occupied +======== + + double expansionRatio = newShardCount * 1.0 / oldShardCount; // 占用的存储空间膨胀的倍数 + // 当前占用的空间 +>>>>>>>> d7e3d51dd (3.6.5 -> 4.x diff):hg-pd-service/src/main/java/org/apache/hugegraph/pd/service/PDService.java + long currentDataSize = 0L; + // The space occupied after data bloat + long newDataSize = 0L; + // Total free space + long totalAvaible = 0L; + // Statistics on the current storage space + for (Metapb.Store store : storeNodeService.getStores()) { + List graphStatsList = store.getStats().getGraphStatsList(); + for (GraphStats graphStats : graphStatsList) { + currentDataSize += graphStats.getApproximateSize(); + } + } + // Estimate the storage space consumed after data bloat + newDataSize = (long) Math.ceil(currentDataSize * expansionRatio); + // Count the available space in all active stores + List activeStores = storeNodeService.getActiveStores(); + for (Metapb.Store store : activeStores) { + Metapb.StoreStats storeStats = store.getStats(); + totalAvaible += storeStats.getAvailable(); + } + // Consider whether resources are available when partitions are evenly distributed + return totalAvaible > newDataSize - currentDataSize; + } catch (PDException e) { + e.printStackTrace(); + return false; + } + } + + /** + *
+     * Compaction on rocksdb
+     * 
+ */ +<<<<<<<< HEAD:hugegraph-pd/hg-pd-service/src/main/java/org/apache/hugegraph/pd/service/PDService.java + @Override + public void dbCompaction(Pdpb.DbCompactionRequest request, + StreamObserver observer) { +======== + public void dbCompaction(ClusterOp.DbCompactionRequest request, + StreamObserver observer) { +>>>>>>>> d7e3d51dd (3.6.5 -> 4.x diff):hg-pd-service/src/main/java/org/apache/hugegraph/pd/service/PDService.java + if (!isLeader()) { + redirectToLeader(PDGrpc.getDbCompactionMethod(), request, observer); + return; + } + logService.insertLog(LogService.TASK, "dbCompaction", request); + ClusterOp.DbCompactionResponse response = null; + try { + log.info("dbCompaction call dbCompaction"); + taskService.dbCompaction(request.getTableName()); + response = ClusterOp.DbCompactionResponse.newBuilder().setHeader(okHeader).build(); + } catch (PDException e) { + log.error("dbCompaction exception", e); + response = ClusterOp.DbCompactionResponse.newBuilder().setHeader(getResponseHeader(e)).build(); + } + observer.onNext(response); + observer.onCompleted(); + } + + @Override + public void combineCluster(ClusterOp.CombineClusterRequest request, + StreamObserver observer) { + if (!isLeader()) { + redirectToLeader(PDGrpc.getCombineClusterMethod(), request, observer); + return; + } + +<<<<<<<< HEAD:hugegraph-pd/hg-pd-service/src/main/java/org/apache/hugegraph/pd/service/PDService.java + Pdpb.CombineClusterResponse response; + + try { + partitionService.combinePartition(request.getToCount()); + response = Pdpb.CombineClusterResponse.newBuilder().setHeader(okHeader).build(); + } catch (PDException e) { + response = + Pdpb.CombineClusterResponse.newBuilder().setHeader(newErrorHeader(e)).build(); +======== + ClusterOp.CombineClusterResponse response ; + + try{ + partitionService.combinePartition(request.getStoreGroupId(), request.getToCount()); + response = ClusterOp.CombineClusterResponse.newBuilder().setHeader(okHeader).build(); + }catch (PDException e){ + response = ClusterOp.CombineClusterResponse.newBuilder().setHeader(getResponseHeader(e)).build(); +>>>>>>>> d7e3d51dd (3.6.5 -> 4.x diff):hg-pd-service/src/main/java/org/apache/hugegraph/pd/service/PDService.java + } + + observer.onNext(response); + observer.onCompleted(); + } + + @Deprecated + @Override + public void combineGraph(ClusterOp.CombineGraphRequest request, + StreamObserver observer) { + if (!isLeader()) { + redirectToLeader(PDGrpc.getCombineGraphMethod(), request, observer); + return; + } + +<<<<<<<< HEAD:hugegraph-pd/hg-pd-service/src/main/java/org/apache/hugegraph/pd/service/PDService.java + Pdpb.CombineGraphResponse response; +======== + ClusterOp.CombineGraphResponse response ; +>>>>>>>> d7e3d51dd (3.6.5 -> 4.x diff):hg-pd-service/src/main/java/org/apache/hugegraph/pd/service/PDService.java + + try { + partitionService.combineGraphPartition(request.getGraphName(), request.getToCount()); +<<<<<<<< HEAD:hugegraph-pd/hg-pd-service/src/main/java/org/apache/hugegraph/pd/service/PDService.java + response = Pdpb.CombineGraphResponse.newBuilder().setHeader(okHeader).build(); + } catch (PDException e) { + response = Pdpb.CombineGraphResponse.newBuilder().setHeader(newErrorHeader(e)).build(); +======== + response = ClusterOp.CombineGraphResponse.newBuilder().setHeader(okHeader).build(); + }catch (PDException e){ + response = ClusterOp.CombineGraphResponse.newBuilder().setHeader(getResponseHeader(e)).build(); +>>>>>>>> d7e3d51dd (3.6.5 -> 4.x diff):hg-pd-service/src/main/java/org/apache/hugegraph/pd/service/PDService.java + } + + observer.onNext(response); + observer.onCompleted(); + } + + @Override + public void deleteShardGroup(Pdpb.DeleteShardGroupRequest request, + StreamObserver observer) { + if (!isLeader()) { + redirectToLeader(PDGrpc.getDeleteShardGroupMethod(), request, observer); + return; + } + + Pdpb.DeleteShardGroupResponse response; + + try { + storeNodeService.deleteShardGroup(request.getGroupId()); + response = Pdpb.DeleteShardGroupResponse.newBuilder().setHeader(okHeader).build(); + } catch (PDException e) { +<<<<<<<< HEAD:hugegraph-pd/hg-pd-service/src/main/java/org/apache/hugegraph/pd/service/PDService.java + response = + Pdpb.DeleteShardGroupResponse.newBuilder().setHeader(newErrorHeader(e)).build(); +======== + response = Pdpb.DeleteShardGroupResponse.newBuilder().setHeader(getResponseHeader(e)).build(); +>>>>>>>> d7e3d51dd (3.6.5 -> 4.x diff):hg-pd-service/src/main/java/org/apache/hugegraph/pd/service/PDService.java + } + observer.onNext(response); + observer.onCompleted(); + } + + @Override + public void getShardGroup(Pdpb.GetShardGroupRequest request, + io.grpc.stub.StreamObserver observer) { + if (!isLeader()) { + redirectToLeader(PDGrpc.getGetShardGroupMethod(), request, observer); + return; + } + Pdpb.GetShardGroupResponse response; + + try { + Metapb.ShardGroup shardGroup = storeNodeService.getShardGroup(request.getGroupId()); + response = Pdpb.GetShardGroupResponse.newBuilder().setHeader(okHeader) + .setShardGroup(shardGroup).build(); + } catch (PDException e) { + log.error("getShardGroup exception", e); + response = Pdpb.GetShardGroupResponse.newBuilder().setHeader(getResponseHeader(e)).build(); + } + + observer.onNext(response); + observer.onCompleted(); + } + + @Override + public void updateShardGroup(Pdpb.UpdateShardGroupRequest request, + StreamObserver responseObserver) { + if (!isLeader()) { + redirectToLeader(PDGrpc.getUpdateShardGroupMethod(), request, responseObserver); + return; + } + Pdpb.UpdateShardGroupResponse response; + + try { + var group = request.getShardGroup(); + storeNodeService.updateShardGroup(group.getId(), group.getShardsList(), +<<<<<<<< HEAD:hugegraph-pd/hg-pd-service/src/main/java/org/apache/hugegraph/pd/service/PDService.java + group.getVersion(), group.getConfVer()); + response = Pdpb.UpdateShardGroupResponse.newBuilder().setHeader(okHeader).build(); + } catch (PDException e) { + log.error("updateShardGroup exception, ", e); + response = + Pdpb.UpdateShardGroupResponse.newBuilder().setHeader(newErrorHeader(e)).build(); +======== + group.getVersion(), group.getConfVer()); + response = Pdpb.UpdateShardGroupResponse.newBuilder().setHeader(okHeader).build(); + } catch (PDException e) { + log.error("updateShardGroup exception, ", e); + response = Pdpb.UpdateShardGroupResponse.newBuilder().setHeader(getResponseHeader(e)).build(); +>>>>>>>> d7e3d51dd (3.6.5 -> 4.x diff):hg-pd-service/src/main/java/org/apache/hugegraph/pd/service/PDService.java + } + + responseObserver.onNext(response); + responseObserver.onCompleted(); + } + + @Override +<<<<<<<< HEAD:hugegraph-pd/hg-pd-service/src/main/java/org/apache/hugegraph/pd/service/PDService.java + public void updateShardGroupOp(Pdpb.ChangeShardRequest request, + StreamObserver observer) { +======== + public void updateShardGroupOp(ClusterOp.ChangeShardRequest request, + StreamObserver observer) { +>>>>>>>> d7e3d51dd (3.6.5 -> 4.x diff):hg-pd-service/src/main/java/org/apache/hugegraph/pd/service/PDService.java + if (!isLeader()) { + redirectToLeader(PDGrpc.getUpdateShardGroupOpMethod(), request, observer); + return; + } + + ClusterOp.ChangeShardResponse response; + + try { + storeNodeService.shardGroupOp(request.getGroupId(), request.getShardsList()); + response = ClusterOp.ChangeShardResponse.newBuilder().setHeader(okHeader).build(); + } catch (PDException e) { + log.error("changeShard exception, ", e); + response = ClusterOp.ChangeShardResponse.newBuilder().setHeader(getResponseHeader(e)).build(); + } + + observer.onNext(response); + observer.onCompleted(); + } + + @Override +<<<<<<<< HEAD:hugegraph-pd/hg-pd-service/src/main/java/org/apache/hugegraph/pd/service/PDService.java + public void changeShard(Pdpb.ChangeShardRequest request, + StreamObserver observer) { +======== + public void changeShard(ClusterOp.ChangeShardRequest request, + StreamObserver observer) { +>>>>>>>> d7e3d51dd (3.6.5 -> 4.x diff):hg-pd-service/src/main/java/org/apache/hugegraph/pd/service/PDService.java + if (!isLeader()) { + redirectToLeader(PDGrpc.getChangeShardMethod(), request, observer); + return; + } + + ClusterOp.ChangeShardResponse response; + + try { + partitionService.changeShard(request.getGroupId(), request.getShardsList()); +<<<<<<<< HEAD:hugegraph-pd/hg-pd-service/src/main/java/org/apache/hugegraph/pd/service/PDService.java + response = Pdpb.ChangeShardResponse.newBuilder().setHeader(okHeader).build(); +======== + response = ClusterOp.ChangeShardResponse.newBuilder().setHeader(okHeader).build(); +>>>>>>>> d7e3d51dd (3.6.5 -> 4.x diff):hg-pd-service/src/main/java/org/apache/hugegraph/pd/service/PDService.java + } catch (PDException e) { + log.error("changeShard exception, ", e); + response = ClusterOp.ChangeShardResponse.newBuilder().setHeader(getResponseHeader(e)).build(); + } + + observer.onNext(response); + observer.onCompleted(); + } + +<<<<<<<< HEAD:hugegraph-pd/hg-pd-service/src/main/java/org/apache/hugegraph/pd/service/PDService.java + @Override + public void updatePdRaft(Pdpb.UpdatePdRaftRequest request, + StreamObserver observer) { +======== + public void updatePdRaft(ClusterOp.UpdatePdRaftRequest request, + StreamObserver observer){ +>>>>>>>> d7e3d51dd (3.6.5 -> 4.x diff):hg-pd-service/src/main/java/org/apache/hugegraph/pd/service/PDService.java + if (!isLeader()) { + redirectToLeader(PDGrpc.getUpdatePdRaftMethod(), request, observer); + return; + } + +<<<<<<<< HEAD:hugegraph-pd/hg-pd-service/src/main/java/org/apache/hugegraph/pd/service/PDService.java + var list = parseConfig(request.getConfig()); + + log.info("update raft request: {}, list: {}", request.getConfig(), list); + + Pdpb.UpdatePdRaftResponse response = + Pdpb.UpdatePdRaftResponse.newBuilder().setHeader(okHeader).build(); + + do { + var leaders = list.stream().filter(s -> s.getKey().equals("leader")) + .collect(Collectors.toList()); +======== + var list = PeerUtil.parseConfig(request.getConfig()); + + log.info("update raft request: {}, list: {}", request.getConfig(), list); + + ClusterOp.UpdatePdRaftResponse response = ClusterOp.UpdatePdRaftResponse.newBuilder() + .setHeader(okHeader).build(); + + do { + var leaders = list.stream().filter(s -> s.getKey().equals("leader")).collect(Collectors.toList()); +>>>>>>>> d7e3d51dd (3.6.5 -> 4.x diff):hg-pd-service/src/main/java/org/apache/hugegraph/pd/service/PDService.java + var node = RaftEngine.getInstance().getRaftNode(); + + if (leaders.size() == 1) { + var leaderPeer = leaders.get(0).getValue(); + // change leader + var peers = new HashSet<>(node.listPeers()); + +<<<<<<<< HEAD:hugegraph-pd/hg-pd-service/src/main/java/org/apache/hugegraph/pd/service/PDService.java + if (!peerEquals(leaderPeer, node.getLeaderId())) { +======== + if (!PeerUtil.isPeerEquals(leaderPeer, node.getLeaderId())) { +>>>>>>>> d7e3d51dd (3.6.5 -> 4.x diff):hg-pd-service/src/main/java/org/apache/hugegraph/pd/service/PDService.java + if (peers.contains(leaderPeer)) { + log.info("updatePdRaft, transfer to {}", leaderPeer); + node.transferLeadershipTo(leaderPeer); + } else { +<<<<<<<< HEAD:hugegraph-pd/hg-pd-service/src/main/java/org/apache/hugegraph/pd/service/PDService.java + response = Pdpb.UpdatePdRaftResponse.newBuilder() + .setHeader(newErrorHeader(6667, + "new leader" + + " not in " + + "raft peers")) + .build(); +======== + response = ClusterOp.UpdatePdRaftResponse.newBuilder() + .setHeader(getResponseHeader(6667, "new leader not in raft peers")) + .build(); +>>>>>>>> d7e3d51dd (3.6.5 -> 4.x diff):hg-pd-service/src/main/java/org/apache/hugegraph/pd/service/PDService.java + } + break; + } + } else { +<<<<<<<< HEAD:hugegraph-pd/hg-pd-service/src/main/java/org/apache/hugegraph/pd/service/PDService.java + response = Pdpb.UpdatePdRaftResponse.newBuilder() + .setHeader(newErrorHeader(6666, + "leader size != 1")) + .build(); +======== + response = ClusterOp.UpdatePdRaftResponse.newBuilder() + .setHeader(getResponseHeader(6666, "leader size != 1")).build(); +>>>>>>>> d7e3d51dd (3.6.5 -> 4.x diff):hg-pd-service/src/main/java/org/apache/hugegraph/pd/service/PDService.java + break; + } + + Configuration config = new Configuration(); + // add peer + for (var peer : list) { + if (!peer.getKey().equals("learner")) { + config.addPeer(peer.getValue()); + } else { + config.addLearner(peer.getValue()); + } + } + + log.info("pd raft update with new config: {}", config); + + node.changePeers(config, status -> { + if (status.isOk()) { + log.info("updatePdRaft, change peers success"); + } else { + log.error("changePeers status: {}, msg:{}, code: {}, raft error:{}", +<<<<<<<< HEAD:hugegraph-pd/hg-pd-service/src/main/java/org/apache/hugegraph/pd/service/PDService.java + status, status.getErrorMsg(), status.getCode(), + status.getRaftError()); +======== + status, status.getErrorMsg(), status.getCode(), status.getRaftError()); +>>>>>>>> d7e3d51dd (3.6.5 -> 4.x diff):hg-pd-service/src/main/java/org/apache/hugegraph/pd/service/PDService.java + } + }); + } while (false); + + observer.onNext(response); + observer.onCompleted(); + } + + public void getCache(GetGraphRequest request, + StreamObserver observer) { + if (!isLeader()) { + redirectToLeader(PDGrpc.getGetCacheMethod(), request, observer); + return; + } + CacheResponse response; + try { + response = CacheResponse.newBuilder().mergeFrom(storeNodeService.getCache()) + .setHeader(okHeader).build(); + } catch (PDException e) { + log.error("get cache exception, ", e); +<<<<<<<< HEAD:hugegraph-pd/hg-pd-service/src/main/java/org/apache/hugegraph/pd/service/PDService.java + response = CacheResponse.newBuilder().setHeader(newErrorHeader(e)).build(); +======== + response = CacheResponse.newBuilder().setHeader(getResponseHeader(e)).build(); +>>>>>>>> d7e3d51dd (3.6.5 -> 4.x diff):hg-pd-service/src/main/java/org/apache/hugegraph/pd/service/PDService.java + } + observer.onNext(response); + observer.onCompleted(); + } + + public void getPartitions(GetGraphRequest request, + StreamObserver observer) { + if (!isLeader()) { + redirectToLeader(PDGrpc.getGetPartitionsMethod(), request, observer); + return; + } + CachePartitionResponse response; + List partitions = partitionService.getPartitions(request.getGraphName()); + response = CachePartitionResponse.newBuilder().addAllPartitions(partitions) + .setHeader(okHeader).build(); + observer.onNext(response); + observer.onCompleted(); + } + +<<<<<<<< HEAD:hugegraph-pd/hg-pd-service/src/main/java/org/apache/hugegraph/pd/service/PDService.java + private List> parseConfig(String conf) { + List> result = new LinkedList<>(); + + if (conf != null && conf.length() > 0) { + for (var s : conf.split(",")) { + if (s.endsWith("/leader")) { + result.add(new KVPair<>("leader", + JRaftUtils.getPeerId(s.substring(0, s.length() - 7)))); + } else if (s.endsWith("/learner")) { + result.add(new KVPair<>("learner", + JRaftUtils.getPeerId(s.substring(0, s.length() - 8)))); + } else if (s.endsWith("/follower")) { + result.add(new KVPair<>("follower", + JRaftUtils.getPeerId(s.substring(0, s.length() - 9)))); + } else { + result.add(new KVPair<>("follower", JRaftUtils.getPeerId(s))); + } + } + } + + return result; + } + + private boolean peerEquals(PeerId p1, PeerId p2) { + if (p1 == null && p2 == null) { + return true; + } + if (p1 == null || p2 == null) { + return false; + } + return Objects.equals(p1.getIp(), p2.getIp()) && Objects.equals(p1.getPort(), p2.getPort()); + } +======== + + @Override + public void submitIndexTask(Pdpb.IndexTaskCreateRequest request, StreamObserver observer) { + if (!isLeader()) { + redirectToLeader(PDGrpc.getSubmitIndexTaskMethod(), request, observer); + return; + } + + var builder = Pdpb.TaskQueryResponse.newBuilder(); + var param = request.getParam(); + try { + var partitions = partitionService.getPartitions(param.getGraph()); + + if (partitions.isEmpty()) { + throw new PDException(PARTITION_NOT_EXISTS, "graph has no partition"); + } + + var newTaskId = idService.getId(USER_TASK_ID_KEY, 1); + + var taskInfo = storeNodeService.getTaskInfoMeta(); + for (var partition : partitions) { + var buildIndex = Metapb.BuildIndex.newBuilder() + .setPartitionId(partition.getId()) + .setTaskId(newTaskId) + .setParam(param) + .build(); + + var task = MetaTask.Task.newBuilder() + .setId(newTaskId) + .setType(MetaTask.TaskType.Build_Index) + .setState(MetaTask.TaskState.Task_Doing) + .setStartTimestamp(System.currentTimeMillis()) + .setPartition(partition) + .setBuildIndex(buildIndex) + .build(); + + taskInfo.updateUserTask(task); + + log.info("notify client build index task: {}", buildIndex); + + PDPulseSubjects.notifyClient(PartitionHeartbeatResponse.newBuilder() + .setPartition(partition) + // 给store的task id + .setId(newTaskId) + .setBuildIndex(buildIndex)); + } + observer.onNext(builder.setHeader(okHeader).setTaskId(newTaskId).build()); + } catch (PDException e) { + log.error("IndexTaskGrpcService.submitTask", e); + observer.onNext(builder.setHeader(getResponseHeader(e)).build()); + } + observer.onCompleted(); + } + + @Override + public void submitBackupGraphTask(Pdpb.BackupGraphRequest request, + StreamObserver observer) { + if (!isLeader()) { + redirectToLeader(PDGrpc.getSubmitBackupGraphTaskMethod(), request, observer); + return; + } + + var builder = Pdpb.TaskQueryResponse.newBuilder(); + + try { + + var sourceGraph = partitionService.getGraph(request.getGraphName()); + var targetGraph = partitionService.getGraph(request.getTargetGraphName()); + + if (sourceGraph == null || targetGraph == null) { + throw new PDException(GRAPH_NOT_EXISTS, "source or target graph not exists"); + } + + var partitions = partitionService.getPartitions(request.getGraphName()); + var targetPartitions = partitionService.getPartitions(request.getTargetGraphName()); + + if (partitions.isEmpty()) { + throw new PDException(PARTITION_NOT_EXISTS, "source graph has no partition"); + } + + if (targetPartitions.isEmpty()) { + partitionService.allocGraphPartitions(targetGraph); + targetPartitions = partitionService.getPartitions(request.getTargetGraphName()); + } + + var newTaskId = idService.getId(USER_TASK_ID_KEY, 1); + + var taskInfo = storeNodeService.getTaskInfoMeta(); + + for (var partition : partitions) { + + SplitPartition.Builder splitBuilder = SplitPartition.newBuilder().addAllNewPartition(targetPartitions); + + var task = MetaTask.Task.newBuilder() + .setId(newTaskId) + .setType(MetaTask.TaskType.Backup_Graph) + .setState(MetaTask.TaskState.Task_Doing) + .setStartTimestamp(System.currentTimeMillis()) + .setPartition(partition) + .setSplitPartition(splitBuilder.build()) + .build(); + + taskInfo.updateUserTask(task); + + log.info("notify client backup graph: {} - {}", sourceGraph.getGraphName(), partition.getId()); + + PDPulseSubjects.notifyClient(PartitionHeartbeatResponse.newBuilder() + .setPartition(partition) + // 给store的task id + .setId(newTaskId) + .setSplitPartition(splitBuilder.build())); + } + + observer.onNext(builder.setHeader(okHeader).setTaskId(newTaskId).build()); + } catch (PDException e) { + log.error("IndexTaskGrpcService.submitTask", e); + observer.onNext(builder.setHeader(getResponseHeader(e)).build()); + } + observer.onCompleted(); + } + + @Override + public void queryTaskState(Pdpb.TaskQueryRequest request, StreamObserver observer) { + if (!isLeader()) { + redirectToLeader(PDGrpc.getQueryTaskStateMethod(), request, observer); + return; + } + + var taskInfo = storeNodeService.getTaskInfoMeta(); + var builder = Pdpb.TaskQueryResponse.newBuilder(); + + try { + var tasks = taskInfo.scanUserTask(request.getTaskId()); + + if (tasks.isEmpty()) { + throw new PDException(TASK_NOT_EXISTS, "task not found"); + } else { + var state = MetaTask.TaskState.Task_Success; + String message = "OK"; + int countOfSuccess = 0; + int countOfDoing = 0; + + for (var task : tasks) { + var state0 = task.getState(); + if (state0 == MetaTask.TaskState.Task_Failure) { + state = MetaTask.TaskState.Task_Failure; + message = task.getMessage(); + break; + } else if (state0 == MetaTask.TaskState.Task_Doing) { + state = MetaTask.TaskState.Task_Doing; + countOfDoing ++; + } else if (state0 == MetaTask.TaskState.Task_Success) { + countOfSuccess ++; + } + } + + if (state == MetaTask.TaskState.Task_Doing) { + message = "Doing/" + countOfDoing + ", Success/" + countOfSuccess; + } + + builder.setHeader(okHeader).setState(state).setMessage(message); + } + } catch (PDException e) { + builder.setHeader(getResponseHeader(e)); + } + + observer.onNext(builder.build()); + observer.onCompleted(); + } + + @Override + public void retryTask(Pdpb.TaskQueryRequest request, StreamObserver observer) { + if (!isLeader()) { + redirectToLeader(PDGrpc.getRetryTaskMethod(), request, observer); + return; + } + + var taskInfo = storeNodeService.getTaskInfoMeta(); + var builder = Pdpb.TaskQueryResponse.newBuilder(); + var taskId = request.getTaskId(); + + try { + var tasks = taskInfo.scanUserTask(taskId); + + if (tasks.isEmpty()) { + builder.setHeader(okHeader).setState(MetaTask.TaskState.Task_Failure).setMessage("task not found"); + } else { + var state = MetaTask.TaskState.Task_Success; + String message = "OK"; + for (var task : tasks) { + var state0 = task.getState(); + if (state0 == MetaTask.TaskState.Task_Failure || state0 == MetaTask.TaskState.Task_Doing) { + var partition = task.getPartition(); + log.info("notify client retry task: {}", task.getId()); + + var responseBuilder = PartitionHeartbeatResponse.newBuilder() + .setPartition(partition) + .setId(task.getId()); + if (task.hasBuildIndex()) { + responseBuilder.setBuildIndex(task.getBuildIndex()); + } else if (task.hasSplitPartition()) { + responseBuilder.setSplitPartition(task.getSplitPartition()); + } else { + throw new PDException(TASK_NOT_EXISTS, "task type not support"); + } + + PDPulseSubjects.notifyClient(responseBuilder); + } + } + builder.setHeader(okHeader).setState(state).setMessage(message); + } + } catch (PDException e) { + builder.setHeader(getResponseHeader(e)); + } + + observer.onNext(builder.build()); + observer.onCompleted(); + } + + @Override + public void getGraphStats(GetGraphRequest request, + io.grpc.stub.StreamObserver observer) { + if (!isLeader()) { + redirectToLeader(PDGrpc.getGetGraphStatsMethod(), request, observer); + return; + } + String graphName = request.getGraphName(); + GraphStatsResponse.Builder builder = GraphStatsResponse.newBuilder(); + try { + List stores = storeNodeService.getStores(graphName); + long dataSize = 0; + long keySize = 0; + for (Metapb.Store store : stores) { + List gss = store.getStats().getGraphStatsList(); + if (!gss.isEmpty()) { + String gssGraph = gss.get(0).getGraphName(); + String suffix = "/g"; + if (gssGraph.split("/").length > 2 && !graphName.endsWith(suffix)) { + graphName += suffix; + } + for (GraphStats gs : gss) { + boolean nameEqual = graphName.equals(gs.getGraphName()); + boolean roleEqual = Metapb.ShardRole.Leader.equals(gs.getRole()); + if (nameEqual && roleEqual) { + dataSize += gs.getApproximateSize(); + keySize += gs.getApproximateKeys(); + } + } + } + } + GraphStats stats = GraphStats.newBuilder().setApproximateSize(dataSize) + .setApproximateKeys(keySize).setGraphName(request.getGraphName()) + .build(); + builder.setStats(stats); + } catch (PDException e) { + builder.setHeader(getResponseHeader(e)); + } + observer.onNext(builder.build()); + observer.onCompleted(); + } + + @Override + public void getMembersAndClusterState(Pdpb.GetMembersRequest request, + io.grpc.stub.StreamObserver observer) { + if (!isLeader()) { + redirectToLeader(PDGrpc.getGetMembersAndClusterStateMethod(), request, observer); + return; + } + Pdpb.MembersAndClusterState response; + try { + var stateList= storeNodeService.getAllClusterStats() + .entrySet().stream().map(entry -> Metapb.GroupClusterState.newBuilder() + .setStoreGroup(entry.getKey()) + .setState(entry.getValue()).build()) + .collect(Collectors.toList()); + + response = Pdpb.MembersAndClusterState.newBuilder() + .addAllMembers(RaftEngine.getInstance().getMembers()) + .setLeader(RaftEngine.getInstance().getLocalMember()) + .addAllState(stateList) + // .setState(storeNodeService.getClusterStats().getState()) + .build(); + + } catch (Exception e) { + log.error("getMembers exception: ", e); + response = Pdpb.MembersAndClusterState.newBuilder().setHeader(getResponseHeader(-1, e.getMessage())) + .build(); + } + observer.onNext(response); + observer.onCompleted(); + } + + @Override + public void createGraph(Pdpb.CreateGraphRequest request, StreamObserver observer) { + if (!isLeader()) { + redirectToLeader(PDGrpc.getCreateGraphMethod(), request, observer); + return; + } + + Pdpb.CreateGraphResponse response; + Metapb.Graph graph = request.getGraph(); + try { + graph = partitionService.createGraph(graph.getGraphName(), + graph.getPartitionCount(), graph.getStoreGroupId()); + response = Pdpb.CreateGraphResponse.newBuilder().setHeader(okHeader).setGraph(graph).build(); + } catch (PDException e) { + log.error("create exception: ", e); + response = Pdpb.CreateGraphResponse.newBuilder().setHeader(getResponseHeader(e)).build(); + } + + observer.onNext(response); + observer.onCompleted(); + } + + @Override + public void createStoreGroup(StoreGroup.CreateStoreGroupRequest request, + StreamObserver observer) { + if (!isLeader()) { + redirectToLeader(PDGrpc.getCreateStoreGroupMethod(), request, observer); + return; + } + + StoreGroup.CreateStoreGroupResponse response; + + try { + var storeGroup = configService.getStoreGroup(request.getGroupId()); + if (storeGroup == null) { + storeGroup = configService.createStoreGroup(request.getGroupId(), + request.getName(), request.getPartitionCount()); + storeNodeService.updateClusterStatus(request.getGroupId(), Metapb.ClusterState.Cluster_Not_Ready); + } else { + throw new PDException(STORE_GROUP_NOT_EXISTS.getNumber(), "Store Group exists"); + } + + response = StoreGroup.CreateStoreGroupResponse.newBuilder() + .setHeader(okHeader).setStoreGroup(storeGroup).build(); + } catch (PDException e) { + response = StoreGroup.CreateStoreGroupResponse.newBuilder().setHeader(getResponseHeader(e)).build(); + } + + observer.onNext(response); + observer.onCompleted(); + } + + @Override + public void getStoreGroup(StoreGroup.GetStoreGroupRequest request, + StreamObserver observer) { + if (!isLeader()) { + redirectToLeader(PDGrpc.getGetStoreGroupMethod(), request, observer); + return; + } + + StoreGroup.GetStoreGroupResponse response; + try { + var storeGroup = configService.getStoreGroup(request.getGroupId()); + if (storeGroup == null) { + throw new PDException(STORE_GROUP_NOT_EXISTS); + } + response = StoreGroup.GetStoreGroupResponse.newBuilder().setHeader(okHeader) + .setStoreGroup(storeGroup).build(); + } catch (PDException e) { + response = StoreGroup.GetStoreGroupResponse.newBuilder().setHeader(getResponseHeader(e)).build(); + } + observer.onNext(response); + observer.onCompleted(); + } + + @Override + public void getAllStoreGroup(StoreGroup.GetAllStoreGroupRequest request, + StreamObserver observer) { + if (!isLeader()) { + redirectToLeader(PDGrpc.getGetAllStoreGroupMethod(), request, observer); + return; + } + + StoreGroup.GetAllStoreGroupResponse response; + + try { + var groupStoreList = configService.getAllStoreGroup(); + response = StoreGroup.GetAllStoreGroupResponse.newBuilder() + .setHeader(okHeader).addAllStoreGroups(groupStoreList).build(); + } catch (PDException e) { + response = StoreGroup.GetAllStoreGroupResponse.newBuilder().setHeader(getResponseHeader(e)).build(); + } + + observer.onNext(response); + observer.onCompleted(); + } + + @Override + public void updateStoreGroup(StoreGroup.UpdateStoreGroupRequest request, + StreamObserver observer) { + if (!isLeader()) { + redirectToLeader(PDGrpc.getUpdateStoreGroupMethod(), request, observer); + return; + } + + StoreGroup.UpdateStoreGroupResponse response; + + try { + var storeGroup = configService.updateStoreGroup(request.getGroupId(), request.getName()); + response = StoreGroup.UpdateStoreGroupResponse.newBuilder() + .setHeader(okHeader).setStoreGroup(storeGroup).build(); + } catch (PDException e) { + response = StoreGroup.UpdateStoreGroupResponse.newBuilder().setHeader(getResponseHeader(e)).build(); + } + + observer.onNext(response); + observer.onCompleted(); + } + + @Override + public void getStoresByStoreGroup(StoreGroup.GetGroupStoresRequest request, + StreamObserver observer) { + if (!isLeader()) { + redirectToLeader(PDGrpc.getGetStoresByStoreGroupMethod(), request, observer); + return; + } + + StoreGroup.GetGroupStoresResponse response; + + try { + var stores = storeNodeService.getStoresByStoreGroup(request.getStoreGroupId()); + response = StoreGroup.GetGroupStoresResponse.newBuilder().addAllStores(stores).build(); + } catch (PDException e) { + response = StoreGroup.GetGroupStoresResponse.newBuilder().setHeader(getResponseHeader(e)).build(); + } + observer.onNext(response); + observer.onCompleted(); + } + + @Override + public void updateStoreGroupRelation(StoreGroup.UpdateStoreGroupRelationRequest request, + StreamObserver observer) { + if (!isLeader()) { + redirectToLeader(PDGrpc.getUpdateStoreGroupRelationMethod(), request, observer); + return; + } + + StoreGroup.UpdateStoreGroupRelationResponse response; + + try { + // 没做过初始化 或者初始化过但是没有shard group的分配 + if (! storeNodeService.isStoreHasStoreGroup(request.getStoreId()) || storeNodeService.getShardGroups() + .stream().noneMatch(shardGroup -> { + for (var shard : shardGroup.getShardsList()) { + if (shard.getStoreId() == request.getStoreId()) { + return true; + } + } + return false; })) { + storeNodeService.updateStoreGroupRelation(request.getStoreId(), request.getStoreGroupId()); + response = StoreGroup.UpdateStoreGroupRelationResponse.newBuilder().setHeader(okHeader) + .setSuccess(true).setMessage("").build(); + } else { + response = StoreGroup.UpdateStoreGroupRelationResponse.newBuilder() + .setHeader(getResponseHeader(-1, "store has partitions yet")).build(); + } + } catch (PDException e) { + response = StoreGroup.UpdateStoreGroupRelationResponse.newBuilder().setHeader(getResponseHeader(e)).build(); + } + + observer.onNext(response); + observer.onCompleted(); + } + + @Override + public void getLeaderGrpcAddress(NoArg request, + StreamObserver observer) { + GetLeaderGrpcAddressResponse.Builder response = GetLeaderGrpcAddressResponse.newBuilder(); + try { + String grpcAddress = RaftEngine.getInstance().getLeaderGrpcAddress(false); + response.setHeader(okHeader).setAddress(grpcAddress); + } catch (PDException e) { + response.setHeader(getResponseHeader(e)); + } + observer.onNext(response.build()); + observer.onCompleted(); + } + + /** + */ + @Override + public void clearGrpcAddressCache(NoArg request, StreamObserver observer) { + VoidResponse.Builder response = VoidResponse.newBuilder(); + try { + RaftEngine.getInstance().clearGrpcAddresses(); + response.setHeader(okHeader); + } catch (Exception e) { + response.setHeader(getResponseHeader(ErrorType.ERROR_VALUE, e.getMessage())); + } + observer.onNext(response.build()); + observer.onCompleted(); + } + + @Override + public void getAllGrpcAddresses(NoArg request, + StreamObserver observer) { + boolean allows = pdConfig.isAllowsAddressAcquisition(); + Pdpb.GetAllGrpcAddressesResponse.Builder builder = + Pdpb.GetAllGrpcAddressesResponse.newBuilder().setAllowed(allows); + try { + if (!isLeader()) { + redirectToLeader(PDGrpc.getGetAllGrpcAddressesMethod(), request, observer); + return; + } + if (allows) { + List grpcAddresses = RaftEngine.getInstance().getPeerGrpcAddressesByCache(); + builder.addAllAddresses(grpcAddresses).setHeader(okHeader); + } + } catch (Exception e) { + log.error("getAllGrpcAddresses error", e); + builder.setHeader(getResponseHeader(ErrorType.ERROR_VALUE, e.getMessage())); + } + observer.onNext(builder.build()); + observer.onCompleted(); + } + +>>>>>>>> d7e3d51dd (3.6.5 -> 4.x diff):hg-pd-service/src/main/java/org/apache/hugegraph/pd/service/PDService.java +} diff --git a/hg-pd-service/src/main/java/org/apache/hugegraph/pd/service/SDConfigService.java b/hg-pd-service/src/main/java/org/apache/hugegraph/pd/service/SDConfigService.java new file mode 100644 index 0000000000..ad2ae4a528 --- /dev/null +++ b/hg-pd-service/src/main/java/org/apache/hugegraph/pd/service/SDConfigService.java @@ -0,0 +1,250 @@ +package org.apache.hugegraph.pd.service; + +import java.util.Collections; +import java.util.HashSet; +import java.util.LinkedList; +import java.util.List; +import java.util.Map; +import java.util.Optional; +import java.util.Set; +import java.util.function.Supplier; +import java.util.stream.Collectors; + +import org.apache.commons.lang3.StringUtils; +import org.springframework.beans.factory.annotation.Autowired; +import org.springframework.stereotype.Service; + +import org.apache.hugegraph.pd.RegistryService; +import org.apache.hugegraph.pd.common.HgAssert; +import org.apache.hugegraph.pd.common.PDException; +import org.apache.hugegraph.pd.config.PDConfig; +import org.apache.hugegraph.pd.grpc.Metapb; +import org.apache.hugegraph.pd.grpc.Pdpb; +import org.apache.hugegraph.pd.grpc.discovery.NodeInfo; +import org.apache.hugegraph.pd.grpc.discovery.NodeInfos; +import org.apache.hugegraph.pd.grpc.discovery.Query; +import org.apache.hugegraph.pd.model.SDConfig; +import org.apache.hugegraph.pd.rest.MemberAPI.CallStreamObserverWrap; +import org.apache.hugegraph.pd.util.HgMapCache; + +import lombok.extern.slf4j.Slf4j; + +/** + * @author lynn.bond@hotmail.com on 2022/2/24 + * service discovery config for prometheus + */ +@Service +@Slf4j +public class SDConfigService { + + private static String defaultPath = "/actuator/prometheus"; + private final SDConfig pdModel = SDConfig.of() + .addLabel("__app_name", "pd") + .setScheme("http") + .setMetricsPath("/actuator/prometheus"); + private final SDConfig storeModel = SDConfig.of() + .addLabel("__app_name", "store") + .setScheme("http") + .setMetricsPath("/actuator/prometheus"); + @Autowired + private PDConfig pdConfig; + @Autowired + private PDService pdService; + private RegistryService register; + private HgMapCache> targetsCache = HgMapCache.expiredOf(24 * 60 * 60 * 1000); + + private RegistryService getRegister() { + if (this.register == null) { + this.register = new RegistryService(this.pdConfig); + } + return this.register; + } + + public List getAllTargets() { + List res = new LinkedList<>(); + List buf = this.toModels(this.getRegister().getNodes(Query.newBuilder().build())); + if (buf != null) { + res.addAll(buf); + } + res.add(getPdTargets()); + res.add(getStoreTargets()); + return res; + } + + /** + * @param appName + * @return null if it's not existing + */ + public List getTargets(String appName) { + HgAssert.isArgumentNotNull(appName, "appName"); + switch (appName) { + case "pd": + return Collections.singletonList(this.getPdTargets()); + case "store": + return Collections.singletonList(this.getStoreTargets()); + default: + return this.toModels( + this.getRegister().getNodes(Query.newBuilder().setAppName(appName).build())); + } + } + + private SDConfig getPdTargets() { + return setTargets(pdModel, () -> this.mergeCache("pd", getPdAddresses())); + } + + private SDConfig getStoreTargets() { + return setTargets(storeModel, () -> this.mergeCache("store", getStoreAddresses())); + } + + private SDConfig setTargets(SDConfig model, Supplier> supplier) { + return model.setTargets(supplier.get()).setClusterId(String.valueOf(pdConfig.getClusterId())); + } + + private Set mergeCache(String key, Set set) { + Set buf = this.targetsCache.get(key); + + if (buf == null) { + buf = new HashSet<>(); + this.targetsCache.put(key, buf); + } + + if (set != null) { + buf.addAll(set); + } + + return buf; + } + + private List toModels(NodeInfos info) { + if (info == null) { + return null; + } + List nodes = info.getInfoList(); + if (nodes == null || nodes.isEmpty()) { + return null; + } + List res = + nodes.stream().map(e -> { + Map labels = e.getLabelsMap(); + String target = labels.get("target"); + if (HgAssert.isInvalid(target)) { + return null; + } + SDConfig model = SDConfig.of(); + model.addTarget(target); + model.addLabel("__app_name", e.getAppName()); + labels.forEach((k, v) -> { + k = k.trim(); + switch (k) { + case "metrics": + model.setMetricsPath(v.trim()); + break; + case "scheme": + model.setScheme(v.trim()); + break; + default: + if (k.startsWith("__")) { + model.addLabel(k, v); + } + } + }); + return model; + }) + .filter(e -> e != null) + .collect(Collectors.toList()); + + if (res.isEmpty()) { + return null; + } + return res; + } + + private Set getPdAddresses() { + CallStreamObserverWrap response = new CallStreamObserverWrap<>(); + pdService.getMembers(Pdpb.GetMembersRequest.newBuilder().build(), response); + List members = null; + try { + members = response.get().get(0).getMembersList(); + } catch (Throwable e) { + log.error("Failed to get all pd members.", e); + } + Set res = new HashSet<>(); + if (members != null) { + members.stream().forEach(e -> res.add(e.getRestUrl())); + } + return res; + } + + private Set getStoreAddresses() { + Set res = new HashSet<>(); + List stores = null; + try { + stores = pdService.getStoreNodeService().getStores(); + } catch (PDException e) { + log.error("Failed to get all stores.", e); + } + if (stores != null) { + stores.stream().forEach(e -> { + String buf = getRestAddress(e); + if (buf != null) { + res.add(buf); + } + }); + } + return res; + } + + // TODO: optimized store registry data, to add host:port of REST server. + private String getRestAddress(Metapb.Store store) { + String address = store.getAddress(); + if (address == null || address.isEmpty()) { + return null; + } + try { + Optional port = store.getLabelsList().stream().map( + e -> { + if ("rest.port".equals(e.getKey())) { + return e.getValue(); + } + return null; + }).filter(e -> e != null).findFirst(); + + if (port.isPresent()) { + address = address.substring(0, address.indexOf(':') + 1); + address = address + port.get(); + + } + } catch (Throwable t) { + log.error("Failed to extract the REST address of store, cause by:", t); + } + return address; + + } + + public List getConfigs(String appName, String path) { + HgAssert.isArgumentNotNull(appName, "appName"); + SDConfig config; + switch (appName) { + case "pd": + config = getPdConfig(appName, path); + config.setTargets(mergeCache(appName, getPdAddresses())); + return Collections.singletonList(config); + case "store": + config = getPdConfig(appName, path); + config.setTargets(mergeCache(appName, getStoreAddresses())); + return Collections.singletonList(config); + default: + return toModels(getRegister().getNodes(Query.newBuilder().setAppName(appName).build())); + } + } + + public SDConfig getPdConfig(String appName, String path) { + SDConfig config = SDConfig.of().addLabel("__app_name", appName).setScheme("http"); + if (StringUtils.isEmpty(path)) { + config.setMetricsPath(defaultPath); + } else { + config.setMetricsPath(path); + } + return config; + } +} diff --git a/hg-pd-service/src/main/java/org/apache/hugegraph/pd/service/ServiceGrpc.java b/hg-pd-service/src/main/java/org/apache/hugegraph/pd/service/ServiceGrpc.java new file mode 100644 index 0000000000..d7028c3b43 --- /dev/null +++ b/hg-pd-service/src/main/java/org/apache/hugegraph/pd/service/ServiceGrpc.java @@ -0,0 +1,102 @@ +package org.apache.hugegraph.pd.service; + +import java.util.concurrent.ConcurrentHashMap; +import java.util.concurrent.TimeUnit; + +import org.apache.commons.lang3.StringUtils; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +import org.apache.hugegraph.pd.common.PDException; +import org.apache.hugegraph.pd.grpc.common.ErrorType; +import org.apache.hugegraph.pd.grpc.common.Errors; +import org.apache.hugegraph.pd.grpc.common.ResponseHeader; +import org.apache.hugegraph.pd.raft.RaftEngine; +import org.apache.hugegraph.pd.raft.RaftStateListener; +import org.apache.hugegraph.pd.service.interceptor.RedirectInterceptor; + +import io.grpc.CallOptions; +import io.grpc.ClientCall; +import io.grpc.ManagedChannel; +import io.grpc.ManagedChannelBuilder; +import io.grpc.MethodDescriptor; +import io.grpc.stub.StreamObserver; + +/** + * @author zhangyingjie + * @date 2022/6/21 + **/ +public interface ServiceGrpc extends RaftStateListener { + + ConcurrentHashMap channels = new ConcurrentHashMap(); + ManagedChannel channel = null; + Logger log = LoggerFactory.getLogger(ServiceGrpc.class); + + default ResponseHeader getResponseHeader(PDException e) { + Errors.Builder builder = Errors.newBuilder().setTypeValue(e.getErrorCode()); + if (!StringUtils.isEmpty(e.getMessage())) { + builder.setMessage(e.getMessage()); + } + Errors error = builder.build(); + ResponseHeader header = ResponseHeader.newBuilder().setError(error).build(); + return header; + } + + default ResponseHeader getResponseHeader() { + Errors error = Errors.newBuilder().setType(ErrorType.OK).build(); + ResponseHeader header = ResponseHeader.newBuilder().setError(error).build(); + return header; + } + + default ResponseHeader getResponseHeader(int errorCode, String errorMsg) { + Errors.Builder builder = Errors.newBuilder().setTypeValue(errorCode); + if (!StringUtils.isEmpty(errorMsg)) { + builder.setMessage(errorMsg); + } + Errors error = builder.build(); + ResponseHeader header = ResponseHeader.newBuilder().setError(error).build(); + return header; + } + + default boolean isLeader() { + return RaftEngine.getInstance().isLeader(); + } + + default void redirectToLeader(ManagedChannel channel, MethodDescriptor method, + ReqT req, StreamObserver observer) { + try { + String address = RaftEngine.getInstance().getLeaderGrpcAddress(); + if ((channel = channels.get(address)) == null || channel.isTerminated() || channel.isShutdown()) { + synchronized (this) { + if ((channel = channels.get(address)) == null || channel.isTerminated() || + channel.isShutdown()) { + while (channel != null && channel.isShutdown() && !channel.isTerminated()) { + channel.awaitTermination(50, TimeUnit.MILLISECONDS); + } + ManagedChannel c = ManagedChannelBuilder.forTarget(address) + .maxInboundMessageSize(Integer.MAX_VALUE) + .usePlaintext().usePlaintext().build(); + channels.put(address, c); + channel = c; + } + } + } + ClientCall call = new RedirectInterceptor().interceptCall(method, + CallOptions.DEFAULT, + channel); + io.grpc.stub.ClientCalls.asyncUnaryCall(call, req, observer); + } catch (Exception e) { + log.error("Failed to redirect to leader", e); + } + } + + default void redirectToLeader(MethodDescriptor method, ReqT req, + StreamObserver observer) { + redirectToLeader(channel, method, req, observer); + } + + + @Override + default void onRaftLeaderChanged() { + } +} diff --git a/hg-pd-service/src/main/java/org/apache/hugegraph/pd/service/UpgradeService.java b/hg-pd-service/src/main/java/org/apache/hugegraph/pd/service/UpgradeService.java new file mode 100644 index 0000000000..f5e16052cf --- /dev/null +++ b/hg-pd-service/src/main/java/org/apache/hugegraph/pd/service/UpgradeService.java @@ -0,0 +1,88 @@ +package org.apache.hugegraph.pd.service; + +import org.apache.hugegraph.pd.KvService; +import org.apache.hugegraph.pd.common.PDException; +import org.apache.hugegraph.pd.config.PDConfig; +import org.apache.hugegraph.pd.rest.API; +import org.apache.hugegraph.pd.upgrade.VersionScriptFactory; +import org.apache.hugegraph.pd.upgrade.VersionUpgradeScript; +import lombok.extern.slf4j.Slf4j; + +@Slf4j +public class UpgradeService { + + private static final String VERSION_KEY = "DATA_VERSION"; + + private static final String RUN_LOG_PREFIX = "SCRIPT_RUN_LOG"; + + private PDConfig pdConfig; + + private KvService kvService; + + public UpgradeService (PDConfig pdConfig){ + this.pdConfig = pdConfig; + this.kvService = new KvService(pdConfig); + } + + public void upgrade() throws PDException { + + log.info("upgrade service start"); + VersionScriptFactory factory = VersionScriptFactory.getInstance(); + var dataVersion = getDataVersion(); + log.info("now db data version : {}", dataVersion); + for(VersionUpgradeScript script : factory.getScripts()) { + // 执行过,run once的跳过 + if (isExecuted(script.getClass().getName()) && script.isRunOnce()) { + log.info("Script {} is Executed and is run once", script.getClass().getName()); + continue; + } + + // 判断跳过的条件 + if (dataVersion == null && !script.isRunWithoutDataVersion() || dataVersion != null && + !versionCompare(dataVersion, script.getHighVersion(), script.getLowVersion())) { + log.info("Script {} is did not match version requirements, current data version:{}, current version:{}" + + "script run version({} to {}), run without data version:{}", + script.getClass().getName(), + dataVersion, + API.VERSION, + script.getHighVersion(), + script.getLowVersion(), + script.isRunWithoutDataVersion()); + continue; + } + + script.runInstruction(pdConfig); + logRun(script.getClass().getName()); + } + + writeCurrentDataVersion(); + } + + private boolean isExecuted(String className) throws PDException { + var ret = kvService.get(RUN_LOG_PREFIX + "/" + className); + return ret.length() > 0; + } + + private void logRun(String className) throws PDException { + kvService.put(RUN_LOG_PREFIX + "/" + className, API.VERSION); + } + + private String getDataVersion() throws PDException { + return kvService.get(VERSION_KEY); + } + + private boolean versionCompare(String dataVersion, String high, String low) { + var currentVersion = API.VERSION; + if (!high.equals(VersionUpgradeScript.UNLIMITED_VERSION) && high.compareTo(dataVersion) < 0 + || !low.equals(VersionUpgradeScript.UNLIMITED_VERSION) && low.compareTo(currentVersion) > 0){ + return false; + } + return true; + } + + private void writeCurrentDataVersion() throws PDException { + log.info("update db version to {}", API.VERSION); + kvService.put(VERSION_KEY, API.VERSION); + } + +} diff --git a/hg-pd-service/src/main/java/org/apache/hugegraph/pd/service/interceptor/Authentication.java b/hg-pd-service/src/main/java/org/apache/hugegraph/pd/service/interceptor/Authentication.java new file mode 100644 index 0000000000..e4bf05652f --- /dev/null +++ b/hg-pd-service/src/main/java/org/apache/hugegraph/pd/service/interceptor/Authentication.java @@ -0,0 +1,107 @@ +package org.apache.hugegraph.pd.service.interceptor; + +import java.nio.charset.StandardCharsets; +import java.util.Base64; +import java.util.function.Function; + +import org.apache.commons.lang3.StringUtils; +import org.springframework.beans.factory.annotation.Autowired; +import org.springframework.security.access.AccessDeniedException; +import org.springframework.security.authentication.BadCredentialsException; +import org.springframework.stereotype.Component; + +import org.apache.hugegraph.pd.KvService; +import org.apache.hugegraph.pd.common.Cache; +import org.apache.hugegraph.pd.config.PDConfig; +import org.apache.hugegraph.pd.config.Server; +import org.apache.hugegraph.pd.raft.RaftEngine; +import org.apache.hugegraph.pd.util.TokenUtil; +import org.apache.hugegraph.util.StringEncoding; + +/** + * @author zhangyingjie + * @date 2023/5/5 + **/ + +@Component +public class Authentication { + + @Autowired + private KvService kvService; + @Autowired + private PDConfig pdConfig; + @Autowired + private PDConfig.Servers servers; + + private static final Cache TOKEN_CACHE = new Cache<>(); + private static volatile TokenUtil util; + private static String invalidMsg = "invalid token and invalid user name or password, access denied"; + private static String invalidBasicInfo = "invalid basic authentication info"; + + protected T authenticate(String authority, String token, Function tokenCall) { + try { + if (StringUtils.isEmpty(authority)) { + throw new BadCredentialsException(invalidBasicInfo); + } + byte[] bytes = authority.getBytes(StandardCharsets.UTF_8); + byte[] decode = Base64.getDecoder().decode(bytes); + String info = new String(decode); + int delim = info.indexOf(':'); + if (delim == -1) { + throw new BadCredentialsException(invalidBasicInfo); + } + String name = info.substring(0, delim); + String pwd = info.substring(delim + 1); + if (!"store".equals(name)) { + if (util == null) { + synchronized (this) { + if (util == null) { + util = new TokenUtil(pdConfig.getSecretKey(), servers.getServers()); + } + } + } + Server i = util.getInfo(name); + if (i == null) { + throw new AccessDeniedException("invalid service name"); + } + if (!StringUtils.isEmpty(token)) { + String value = TOKEN_CACHE.get(name); + if (StringUtils.isEmpty(value)) { + synchronized (i) { + value = kvService.get(getTokenKey(name)); + } + } + if (!StringUtils.isEmpty(value) && token.equals(value)) { + return tokenCall.apply(""); + } + } + if (StringUtils.isEmpty(pwd) || !StringEncoding.checkPassword(i.getPwd(), pwd)) { + throw new AccessDeniedException(invalidMsg); + } + token = util.getToken(name); + String tokenKey = getTokenKey(name); + String dbToken = kvService.get(tokenKey); + if (StringUtils.isEmpty(dbToken)) { + synchronized (i) { + dbToken = kvService.get(tokenKey); + if (StringUtils.isEmpty(dbToken) && RaftEngine.getInstance().isLeader()) { + kvService.put(tokenKey, token, + TokenUtil.AUTH_TOKEN_EXPIRE); + TOKEN_CACHE.put(name, token, + TokenUtil.AUTH_TOKEN_EXPIRE); + return tokenCall.apply(token); + } + } + } + } + return tokenCall.apply(""); + } catch (Exception e) { + throw new RuntimeException(e); + } + } + + public static String getTokenKey(String name) { + return "PD/TOKEN/" + name; + } + +} diff --git a/hg-pd-service/src/main/java/org/apache/hugegraph/pd/service/interceptor/GrpcAuthentication.java b/hg-pd-service/src/main/java/org/apache/hugegraph/pd/service/interceptor/GrpcAuthentication.java new file mode 100644 index 0000000000..24bf8688c7 --- /dev/null +++ b/hg-pd-service/src/main/java/org/apache/hugegraph/pd/service/interceptor/GrpcAuthentication.java @@ -0,0 +1,62 @@ +package org.apache.hugegraph.pd.service.interceptor; + +import java.util.function.Function; + +import org.apache.commons.lang3.StringUtils; +import org.apache.hugegraph.pd.service.ServiceGrpc; +import org.springframework.stereotype.Service; + +import org.apache.hugegraph.pd.common.Consts; +import org.apache.hugegraph.pd.raft.RaftEngine; + +import io.grpc.ForwardingServerCall.SimpleForwardingServerCall; +import io.grpc.Metadata; +import io.grpc.ServerCall; +import io.grpc.ServerCall.Listener; +import io.grpc.ServerCallHandler; +import io.grpc.ServerInterceptor; +import io.grpc.Status; + +/** + * @author zhangyingjie + * @date 2023/4/23 + **/ +@Service +public class GrpcAuthentication extends Authentication implements ServerInterceptor, ServiceGrpc { + + @Override + public Listener interceptCall( + ServerCall call, Metadata headers, + ServerCallHandler next) { + try { + String authority = headers.get(Consts.CREDENTIAL_KEY); + String token = headers.get(Consts.TOKEN_KEY); + Function> tokenCall = t -> { + ServerCall sc = new SimpleForwardingServerCall(call) { + @Override + public void sendHeaders(Metadata headers) { + if (!StringUtils.isEmpty(t)) { + headers.put(Consts.TOKEN_KEY, t); + } + if (!isLeader()) { + String grpcAddress = null; + try { + grpcAddress = RaftEngine.getInstance().getLeaderGrpcAddress(true); + } catch (Exception e) { + } + if (!StringUtils.isEmpty(grpcAddress)) { + headers.put(Consts.LEADER_KEY, grpcAddress); + } + } + super.sendHeaders(headers); + } + }; + return next.startCall(sc, headers); + }; + return authenticate(authority, token, tokenCall); + } catch (Exception e) { + call.close(Status.UNAUTHENTICATED.withDescription(e.getMessage()), headers); + return next.startCall(call, headers); + } + } +} diff --git a/hg-pd-service/src/main/java/org/apache/hugegraph/pd/service/interceptor/RedirectInterceptor.java b/hg-pd-service/src/main/java/org/apache/hugegraph/pd/service/interceptor/RedirectInterceptor.java new file mode 100644 index 0000000000..001dd8b231 --- /dev/null +++ b/hg-pd-service/src/main/java/org/apache/hugegraph/pd/service/interceptor/RedirectInterceptor.java @@ -0,0 +1,68 @@ +package org.apache.hugegraph.pd.service.interceptor; + +import static java.nio.charset.StandardCharsets.UTF_8; + +import java.util.Base64; + +import org.apache.hugegraph.pd.common.Consts; + +import io.grpc.CallOptions; +import io.grpc.Channel; +import io.grpc.ClientCall; +import io.grpc.ClientInterceptor; +import io.grpc.ForwardingClientCall.SimpleForwardingClientCall; +import io.grpc.ForwardingClientCallListener.SimpleForwardingClientCallListener; +import io.grpc.Metadata; +import io.grpc.MethodDescriptor; +import io.grpc.Status; + +/** + * @author zhangyingjie + * @date 2024/1/17 + **/ +public class RedirectInterceptor implements ClientInterceptor { + + private static String auth = "store:$2a$04$9ZGBULe2vc73DMj7r/iBKeQB1SagtUXPrDbMmNswRkTwlWQURE/Jy"; + private static String authority = new String(Base64.getEncoder().encode(auth.getBytes(UTF_8))); + + public RedirectInterceptor() { + } + + @Override + public ClientCall interceptCall( + MethodDescriptor method, + CallOptions callOptions, Channel next) { + + return new SimpleForwardingClientCall<>( + next.newCall(method, callOptions)) { + + @Override + public void sendMessage(ReqT message) { + super.sendMessage(message); + } + + @Override + public void start(ClientCall.Listener listener, Metadata headers) { + headers.put(Consts.CREDENTIAL_KEY, authority); + SimpleForwardingClientCallListener callListener = + new SimpleForwardingClientCallListener<>(listener) { + @Override + public void onMessage(RespT message) { + super.onMessage(message); + } + + @Override + public void onHeaders(Metadata headers) { + super.onHeaders(headers); + } + + @Override + public void onClose(Status status, Metadata trailers) { + super.onClose(status, trailers); + } + }; + super.start(callListener, headers); + } + }; + } +} diff --git a/hg-pd-service/src/main/java/org/apache/hugegraph/pd/upgrade/VersionScriptFactory.java b/hg-pd-service/src/main/java/org/apache/hugegraph/pd/upgrade/VersionScriptFactory.java new file mode 100644 index 0000000000..cdccef2750 --- /dev/null +++ b/hg-pd-service/src/main/java/org/apache/hugegraph/pd/upgrade/VersionScriptFactory.java @@ -0,0 +1,41 @@ +package org.apache.hugegraph.pd.upgrade; + +import org.apache.hugegraph.pd.upgrade.scripts.PartitionMetaUpgrade; +import org.apache.hugegraph.pd.upgrade.scripts.TaskCleanUpgrade; + +import java.util.LinkedList; +import java.util.List; + +public class VersionScriptFactory { + private static volatile VersionScriptFactory factory; + + private static List scripts = new LinkedList<>(); + + static { + registerScript(new PartitionMetaUpgrade()); + registerScript(new TaskCleanUpgrade()); + } + + private VersionScriptFactory(){ + + } + + public static VersionScriptFactory getInstance(){ + if (factory == null) { + synchronized (VersionScriptFactory.class) { + if (factory == null) { + factory = new VersionScriptFactory(); + } + } + } + return factory; + } + + public static void registerScript(VersionUpgradeScript script) { + scripts.add(script); + } + + public List getScripts() { + return scripts; + } +} diff --git a/hg-pd-service/src/main/java/org/apache/hugegraph/pd/upgrade/VersionUpgradeScript.java b/hg-pd-service/src/main/java/org/apache/hugegraph/pd/upgrade/VersionUpgradeScript.java new file mode 100644 index 0000000000..1b85f01597 --- /dev/null +++ b/hg-pd-service/src/main/java/org/apache/hugegraph/pd/upgrade/VersionUpgradeScript.java @@ -0,0 +1,39 @@ +package org.apache.hugegraph.pd.upgrade; + +import org.apache.hugegraph.pd.config.PDConfig; + +public interface VersionUpgradeScript { + + String UNLIMITED_VERSION = "UNLIMITED_VERSION"; + + /** + * the highest version that need to run upgrade instruction + * @return high version + */ + String getHighVersion(); + + /** + * the lowest version that need to run upgrade instruction + * @return lower version + */ + String getLowVersion(); + + /** + * pd中没有data version的时候,是否执行. 一般是对应3。6。2之前的版本 + * + * @return run when pd has no data version + */ + boolean isRunWithoutDataVersion(); + + /** + * the scrip just run once, ignore versions + * @return run once script + */ + boolean isRunOnce(); + + /** + * run the upgrade instruction + */ + void runInstruction(PDConfig config); + +} diff --git a/hg-pd-service/src/main/java/org/apache/hugegraph/pd/upgrade/scripts/PartitionMetaUpgrade.java b/hg-pd-service/src/main/java/org/apache/hugegraph/pd/upgrade/scripts/PartitionMetaUpgrade.java new file mode 100644 index 0000000000..d4aa1c0003 --- /dev/null +++ b/hg-pd-service/src/main/java/org/apache/hugegraph/pd/upgrade/scripts/PartitionMetaUpgrade.java @@ -0,0 +1,134 @@ +<<<<<<<< HEAD:hugegraph-pd/hg-pd-service/src/main/java/org/apache/hugegraph/pd/upgrade/scripts/PartitionMetaUpgrade.java +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hugegraph.pd.upgrade.scripts; +======== +package org.apache.hugegraph.pd.upgrade.scripts; + +import org.apache.hugegraph.pd.common.PDException; +import org.apache.hugegraph.pd.config.PDConfig; +import org.apache.hugegraph.pd.grpc.Metapb; +import org.apache.hugegraph.pd.meta.MetadataKeyHelper; +import org.apache.hugegraph.pd.meta.MetadataRocksDBStore; +import org.apache.hugegraph.pd.upgrade.VersionUpgradeScript; +import lombok.extern.slf4j.Slf4j; +>>>>>>>> d7e3d51dd (3.6.5 -> 4.x diff):hg-pd-service/src/main/java/org/apache/hugegraph/pd/upgrade/scripts/PartitionMetaUpgrade.java + +import java.util.HashSet; + +import org.apache.hugegraph.pd.common.PDException; +import org.apache.hugegraph.pd.common.Useless; +import org.apache.hugegraph.pd.config.PDConfig; +import org.apache.hugegraph.pd.grpc.Metapb; +import org.apache.hugegraph.pd.meta.MetadataKeyHelper; +import org.apache.hugegraph.pd.meta.MetadataRocksDBStore; +import org.apache.hugegraph.pd.upgrade.VersionUpgradeScript; + +import lombok.extern.slf4j.Slf4j; + +@Useless("upgrade related") +@Slf4j +public class PartitionMetaUpgrade implements VersionUpgradeScript { + + @Override + public String getHighVersion() { + return "3.6.2"; + } + + @Override + public String getLowVersion() { + return UNLIMITED_VERSION; + } + + @Override + public void runInstruction(PDConfig config) { + + log.info("run PartitionMetaUpgrade script"); + var dbStore = new MetadataRocksDBStore(config); + + try { + var partSet = new HashSet(); + for (var graph : dbStore.scanPrefix(Metapb.Graph.parser(), + MetadataKeyHelper.getGraphPrefix())) { + var graphPrefix = MetadataKeyHelper.getPartitionPrefix(graph.getGraphName()); + for (var partition : dbStore.scanPrefix(Metapb.PartitionV36.parser(), + graphPrefix)) { + var newPartition = trans(partition); + var partId = partition.getId(); + log.info("trans partition structure: from {} to {}", partition, newPartition); + // backup + var key36 = MetadataKeyHelper.getPartitionV36Key(graph.getGraphName(), partId); + dbStore.put(key36, partition.toByteArray()); + // write new structure + var key = MetadataKeyHelper.getPartitionKey(graph.getGraphName(), partId); + dbStore.put(key, newPartition.toByteArray()); + + // construct shard group + if (!partSet.contains(partId)) { + var shardGroupKey = MetadataKeyHelper.getShardGroupKey(partId); + var shardGroup = dbStore.getOne(Metapb.ShardGroup.parser(), shardGroupKey); + if (shardGroup == null) { + var shardList = partition.getShardsList(); + if (shardList.size() > 0) { + shardGroup = Metapb.ShardGroup.newBuilder() + .setId(partId) + .setVersion(partition.getVersion()) + .setConfVer(0) + .setState(partition.getState()) + .addAllShards(shardList) + .build(); + dbStore.put(shardGroupKey, shardGroup.toByteArray()); + log.info("extract shard group from partition, {}", shardGroup); + } else { + throw new PDException(1000, + "trans partition failed, no shard list"); + } + } + partSet.add(partId); + } + + } + } + } catch (Exception e) { + log.error("script: {}, run error : {}", getClass().getName(), e.getMessage()); + } + } + + @Override + public boolean isRunOnce() { + return true; + } + + @Override + public boolean isRunWithoutDataVersion() { + return true; + } + + private Metapb.Partition trans(Metapb.PartitionV36 partition) { + + return Metapb.Partition.newBuilder() + .setId(partition.getId()) + .setGraphName(partition.getGraphName()) + .setStartKey(partition.getStartKey()) + .setEndKey(partition.getEndKey()) + .setVersion(partition.getVersion()) + .setState(partition.getState()) + .setMessage(partition.getMessage()) + .build(); + } +} diff --git a/hg-pd-service/src/main/java/org/apache/hugegraph/pd/upgrade/scripts/TaskCleanUpgrade.java b/hg-pd-service/src/main/java/org/apache/hugegraph/pd/upgrade/scripts/TaskCleanUpgrade.java new file mode 100644 index 0000000000..9872326a4e --- /dev/null +++ b/hg-pd-service/src/main/java/org/apache/hugegraph/pd/upgrade/scripts/TaskCleanUpgrade.java @@ -0,0 +1,47 @@ +package org.apache.hugegraph.pd.upgrade.scripts; + +import org.apache.hugegraph.pd.common.PDException; +import org.apache.hugegraph.pd.config.PDConfig; +import org.apache.hugegraph.pd.meta.MetadataKeyHelper; +import org.apache.hugegraph.pd.meta.MetadataRocksDBStore; +import org.apache.hugegraph.pd.upgrade.VersionUpgradeScript; +import lombok.extern.slf4j.Slf4j; + +@Slf4j +public class TaskCleanUpgrade implements VersionUpgradeScript { + @Override + public String getHighVersion() { + return UNLIMITED_VERSION; + } + + @Override + public String getLowVersion() { + return UNLIMITED_VERSION; + } + + @Override + public boolean isRunWithoutDataVersion() { + return true; + } + + @Override + public boolean isRunOnce() { + return true; + } + + @Override + public void runInstruction(PDConfig config) { + log.info("run TaskCleanUpgrade script"); + var dbStore = new MetadataRocksDBStore(config); + + try { + byte[] key = MetadataKeyHelper.getAllSplitTaskPrefix(); + log.info("delete split task:{}", dbStore.removeByPrefix(key)); + byte[] key2 = MetadataKeyHelper.getAllMoveTaskPrefix(); + log.info("delete move task:{}", dbStore.removeByPrefix(key2)); + } catch (PDException e) { + throw new RuntimeException(e); + } + + } +} diff --git a/hg-pd-service/src/main/java/org/apache/hugegraph/pd/util/DateUtil.java b/hg-pd-service/src/main/java/org/apache/hugegraph/pd/util/DateUtil.java new file mode 100644 index 0000000000..162944903f --- /dev/null +++ b/hg-pd-service/src/main/java/org/apache/hugegraph/pd/util/DateUtil.java @@ -0,0 +1,59 @@ +package org.apache.hugegraph.pd.util; + +import org.apache.hugegraph.pd.common.PDException; +import org.apache.hugegraph.pd.grpc.Pdpb; +import org.apache.hugegraph.pd.grpc.common.ErrorType; +import org.apache.commons.lang3.time.DateUtils; + +import java.text.ParseException; +import java.util.Date; + +/** + * @author zhangyingjie + * @date 2022/3/23 + **/ +public class DateUtil { + private static String DATE = "yyyy-MM-dd"; + private static String DATETIME = "yyyy-MM-dd HH:mm:ss"; + private static String DATETIME_MM = "yyyy-MM-dd HH:mm"; + private static String DATETIME_SSS = "yyyy-MM-dd HH:mm:ss.SSS"; + private static String TIME = "HH:mm"; + private static String TIME_SS = "HH:mm:ss"; + private static String SYS_DATE = "yyyy/MM/dd"; + private static String SYS_DATETIME = "yyyy/MM/dd HH:mm:ss"; + private static String SYS_DATETIME_MM = "yyyy/MM/dd HH:mm"; + private static String SYS_DATETIME_SSS = "yyyy/MM/dd HH:mm:ss.SSS"; + private static String NONE_DATE = "yyyyMMdd"; + private static String NONE_DATETIME = "yyyyMMddHHmmss"; + private static String NONE_DATETIME_MM = "yyyyMMddHHmm"; + private static String NONE_DATETIME_SSS = "yyyyMMddHHmmssSSS"; + private static String[] PATTERNS =new String[]{ + DATE, + DATETIME, + DATETIME_MM, + DATETIME_SSS, + TIME, + TIME_SS, + SYS_DATE, + SYS_DATETIME, + SYS_DATETIME_MM, + SYS_DATETIME_SSS, + NONE_DATE, + NONE_DATETIME, + NONE_DATETIME_MM, + NONE_DATETIME_SSS + }; + + public static String[] getDefaultPattern(){ + return PATTERNS; + } + + public static Date getDate(String date) throws PDException { + try { + return DateUtils.parseDate(date,getDefaultPattern()); + } catch (ParseException e) { + throw new PDException(ErrorType.UNKNOWN_VALUE, e.getMessage()); + } + } + +} diff --git a/hg-pd-service/src/main/java/org/apache/hugegraph/pd/util/HgExecutorUtil.java b/hg-pd-service/src/main/java/org/apache/hugegraph/pd/util/HgExecutorUtil.java new file mode 100644 index 0000000000..90700dedc4 --- /dev/null +++ b/hg-pd-service/src/main/java/org/apache/hugegraph/pd/util/HgExecutorUtil.java @@ -0,0 +1,164 @@ +package org.apache.hugegraph.pd.util; + +import java.util.Map; +import java.util.concurrent.BlockingQueue; +import java.util.concurrent.ConcurrentHashMap; +import java.util.concurrent.Executor; +import java.util.concurrent.LinkedBlockingQueue; +import java.util.concurrent.SynchronousQueue; +import java.util.concurrent.ThreadFactory; +import java.util.concurrent.ThreadPoolExecutor; +import java.util.concurrent.TimeUnit; +import java.util.concurrent.atomic.AtomicInteger; + +import org.apache.hugegraph.pd.common.HgAssert; + +import lombok.extern.slf4j.Slf4j; + +/** + * @author lynn.bond@hotmail.com on 2022/03/04 + */ +@Slf4j +public final class HgExecutorUtil { + private static final Map EXECUTOR_MAP = new ConcurrentHashMap<>(); + private static final Executor COMMON_EXECUTOR + = new ThreadPoolExecutor(0, Integer.MAX_VALUE, + 60L, TimeUnit.SECONDS, + new SynchronousQueue(), + newThreadFactory("pd-common")); + + public static void execute(Runnable command) { + if (command == null) { + return; + } + COMMON_EXECUTOR.execute(command); + } + + public static ThreadFactory newThreadFactory(String namePrefix, int priority) { + HgAssert.isArgumentNotNull(namePrefix, "namePrefix"); + return new HgThreadFactory(namePrefix, priority); + } + + public static ThreadFactory newThreadFactory(String namePrefix) { + HgAssert.isArgumentNotNull(namePrefix, "namePrefix"); + return new HgDefaultThreadFactory(namePrefix); + } + + public static ThreadPoolExecutor getThreadPoolExecutor(String name) { + if (name == null) { + return null; + } + return EXECUTOR_MAP.get(name); + } + + /** + * @see HgExecutorUtil:createExecutor(String , int , int , int ) + */ + @Deprecated + public static Executor createExecutor(String name, int coreThreads, int maxThreads) { +/* ThreadPoolExecutor res = + new ThreadPoolExecutor(coreThreads, maxThreads, + 60L, TimeUnit.SECONDS, + new LinkedBlockingQueue(), + newThreadFactory(name)); + if (threadPoolMap.containsKey(name)) { + threadPoolMap.put(name + "-1", res); + } else { + threadPoolMap.put(name, res); + }*/ + return createExecutor(name, coreThreads, maxThreads, Integer.MAX_VALUE); + } + + public static ThreadPoolExecutor createExecutor(String name, int coreThreads, int maxThreads, + int queueSize) { + ThreadPoolExecutor res = EXECUTOR_MAP.get(name); + + if (res != null) { + return res; + } + + synchronized (EXECUTOR_MAP) { + res = EXECUTOR_MAP.get(name); + if (res != null) { + return res; + } + + BlockingQueue queue = null; + + if (queueSize <= 0) { + queue = new SynchronousQueue(); + } else { + queue = new LinkedBlockingQueue<>(queueSize); + } + + res = new ThreadPoolExecutor( + coreThreads, + maxThreads, + 60L, TimeUnit.SECONDS, + queue, + newThreadFactory(name) + ); + EXECUTOR_MAP.put(name, res); + } + + return res; + } + + /** + * The default thread factory + */ + static class HgThreadFactory implements ThreadFactory { + private final AtomicInteger threadNumber = new AtomicInteger(1); + private final String namePrefix; + private final int priority; + + HgThreadFactory(String namePrefix, int priority) { + this.namePrefix = namePrefix; + this.priority = priority; + SecurityManager s = System.getSecurityManager(); + } + @Override + public Thread newThread(Runnable r) { + Thread t = new Thread(null, r, + namePrefix + "-" + threadNumber.getAndIncrement(), + 0); + if (t.isDaemon()) { + t.setDaemon(false); + } + if (t.getPriority() != priority) { + t.setPriority(priority); + } + return t; + } + } + + /** + * The default thread factory, which added threadNamePrefix in construction method. + */ + static class HgDefaultThreadFactory implements ThreadFactory { + private static final AtomicInteger POOL_NUMBER = new AtomicInteger(1); + private final AtomicInteger threadNumber = new AtomicInteger(1); + private final String namePrefix; + + HgDefaultThreadFactory(String threadNamePrefix) { + SecurityManager s = System.getSecurityManager(); + this.namePrefix = threadNamePrefix + "-" + + POOL_NUMBER.getAndIncrement() + + "-thread-"; + } + + @Override + public Thread newThread(Runnable r) { + Thread t = new Thread(null, r, + namePrefix + threadNumber.getAndIncrement(), + 0); + if (t.isDaemon()) { + t.setDaemon(false); + } + if (t.getPriority() != Thread.NORM_PRIORITY) { + t.setPriority(Thread.NORM_PRIORITY); + } + return t; + } + } +} diff --git a/hg-pd-service/src/main/java/org/apache/hugegraph/pd/util/HgMapCache.java b/hg-pd-service/src/main/java/org/apache/hugegraph/pd/util/HgMapCache.java new file mode 100644 index 0000000000..d0cb0e0e25 --- /dev/null +++ b/hg-pd-service/src/main/java/org/apache/hugegraph/pd/util/HgMapCache.java @@ -0,0 +1,82 @@ +package org.apache.hugegraph.pd.util; + +import java.util.Map; +import java.util.concurrent.ConcurrentHashMap; +import java.util.function.Supplier; + +/** + * @param + * @param + * @author lynn.bond@hotmail.com on 2022/3/10 + */ +public class HgMapCache { + private Map cache = new ConcurrentHashMap(); + private Supplier expiry; + + public static HgMapCache expiredOf(long interval){ + return new HgMapCache(new CycleIntervalPolicy(interval)); + } + + private HgMapCache(Supplier expiredPolicy) { + this.expiry = expiredPolicy; + } + + private boolean isExpired() { + if (expiry != null && expiry.get()) { + cache.clear(); + return true; + } + return false; + } + + public void put(K key, V value) { + if (key == null || value == null) return; + this.cache.put(key, value); + } + + + public V get(K key) { + if (isExpired()) return null; + return this.cache.get(key); + } + + public void removeAll() { + this.cache.clear(); + } + + public boolean remove(K key) { + if (key != null) { + this.cache.remove(key); + return true; + } + return false; + } + + public Map getAll() { + return this.cache; + } + + private static class CycleIntervalPolicy implements Supplier{ + private long expireTime=0; + private long interval=0; + + public CycleIntervalPolicy(long interval){ + this.interval=interval; + init(); + } + private void init(){ + expireTime=System.currentTimeMillis()+interval; + } + + @Override + public Boolean get() { + if(System.currentTimeMillis()>expireTime){ + init(); + return true; + } + return false; + } + + } + +} diff --git a/hg-pd-service/src/main/java/org/apache/hugegraph/pd/util/IdUtil.java b/hg-pd-service/src/main/java/org/apache/hugegraph/pd/util/IdUtil.java new file mode 100644 index 0000000000..569702e6c4 --- /dev/null +++ b/hg-pd-service/src/main/java/org/apache/hugegraph/pd/util/IdUtil.java @@ -0,0 +1,32 @@ +package org.apache.hugegraph.pd.util; + +import lombok.extern.slf4j.Slf4j; + +/** + * @author lynn.bond@hotmail.com on 2022/2/8 + */ +@Slf4j +public final class IdUtil { + private final static byte[] lock = new byte[0]; + + public static String createMillisStr(){ + return String.valueOf(createMillisId()); + } + + /** + * Create millisecond style ID; + * @return + */ + public static Long createMillisId() { + synchronized (lock) { + try { + Thread.sleep(1); + } catch (InterruptedException e) { + log.error("Failed to sleep", e); + } + + return System.currentTimeMillis(); + } + + } +} diff --git a/hg-pd-service/src/main/java/org/apache/hugegraph/pd/util/TokenUtil.java b/hg-pd-service/src/main/java/org/apache/hugegraph/pd/util/TokenUtil.java new file mode 100644 index 0000000000..73e2f9d2e2 --- /dev/null +++ b/hg-pd-service/src/main/java/org/apache/hugegraph/pd/util/TokenUtil.java @@ -0,0 +1,81 @@ +package org.apache.hugegraph.pd.util; + +import java.nio.charset.StandardCharsets; +import java.util.Base64; +import java.util.HashMap; +import java.util.List; +import java.util.Map; + +import org.apache.hugegraph.auth.AuthConstant; +import org.apache.hugegraph.auth.TokenGenerator; +import org.apache.hugegraph.pd.config.PDConfig; +import org.apache.hugegraph.pd.config.Server; +import org.apache.hugegraph.util.StringEncoding; +import com.google.common.base.Charsets; +import com.google.common.collect.ImmutableMap; + +/** + * @author zhangyingjie + * @date 2023/4/25 + **/ +public class TokenUtil { + + public static final long AUTH_TOKEN_EXPIRE = 3600 * 24L * 1000; + + private TokenGenerator generator; + private Map servers = new HashMap<>(); + + public TokenUtil(String secretKey, List servers) { + this.generator = new TokenGenerator(secretKey); + for (Server server : servers) { + this.servers.put(server.getServer(), server); + } + } + + public static void main(String[] args) { + + List servers1 = PDConfig.getDefaultServers(); + TokenUtil util = new TokenUtil("FXQXbJtbCLxODc6tGci732pkH1cyf8Qg", servers1); + // String uniqueToken = util.getStoreToken(); + String x = StringEncoding.hashPassword("FqU8BOvTpteT"); + // String x = "$2a$04$i10KooNg6wLvIPVDh909n.RBYlZ/4pJo978nFK86nrqQiGIKV4UGS"; + System.out.println(x); + // System.out.println(StringEncoding.checkPassword("qRyYhxVAWDb5", x)); + // $2a$04$9ZGBULe2vc73DMj7r/iBKeQB1SagtUXPrDbMmNswRkTwlWQURE/Jy "E3UnnQa605go" + // $2a$04$i10KooNg6wLvIPVDh909n.RBYlZ/4pJo978nFK86nrqQiGIKV4UGS "qRyYhxVAWDb5" + // $2a$04$pSGkohaywGgFrJLr6VOPm.IK2WtOjlNLcZN8gct5uIKEDO1I61DGa "iMjHnUl5Pprx" + // eyJhbGciOiJIUzI1NiJ9 + // .eyJ1c2VyX25hbWUiOiJzdG9yZSIsInVzZXJfaWQiOiJhZWEwOTM1Ni0xZWJhLTQ1NjktODk0ZS1kYWIzZTRhYTYyM2MiLCJleHAiOjE2ODI1MDQ0MTd9.lDqbt3vZkE3X2IIK9A404BBlCFHBaEVsIycH0AIXKsw + String token = util.getToken(servers1.get(0).getServer()); + System.out.println(token); + } + + public String createToken(String userName) { + Map payload = ImmutableMap.of(AuthConstant.TOKEN_USER_NAME, userName); + byte[] bytes = generator.create(payload, AUTH_TOKEN_EXPIRE).getBytes(StandardCharsets.UTF_8); + byte[] encode = Base64.getEncoder().encode(bytes); + return new String(encode, Charsets.UTF_8); + } + + public String getToken(String appName) { + Server info = servers.get(appName); + if (info != null) { + return createToken(appName); + } + return null; + } + + public boolean verify(String token, String[] info) { + byte[] decode = Base64.getDecoder().decode(token); + String d = new String(decode, StandardCharsets.UTF_8); + if (d.equals(info[1])) { + return true; + } + return false; + } + + public Server getInfo(String appName) { + Server server = servers.get(appName); + return server; + } +} diff --git a/hg-pd-service/src/main/java/org/apache/hugegraph/pd/util/grpc/GRpcServerConfig.java b/hg-pd-service/src/main/java/org/apache/hugegraph/pd/util/grpc/GRpcServerConfig.java new file mode 100644 index 0000000000..dea0fcefdc --- /dev/null +++ b/hg-pd-service/src/main/java/org/apache/hugegraph/pd/util/grpc/GRpcServerConfig.java @@ -0,0 +1,29 @@ +package org.apache.hugegraph.pd.util.grpc; + +import org.lognet.springboot.grpc.GRpcServerBuilderConfigurer; +import org.springframework.beans.factory.annotation.Autowired; +import org.springframework.stereotype.Component; + +import org.apache.hugegraph.pd.config.PDConfig; +import org.apache.hugegraph.pd.consts.PoolNames; +import org.apache.hugegraph.pd.util.HgExecutorUtil; + +import io.grpc.ServerBuilder; + +@Component +public class GRpcServerConfig extends GRpcServerBuilderConfigurer { + + public static final int MAX_INBOUND_MESSAGE_SIZE = 1024 * 1024 * 1024; + @Autowired + private PDConfig pdConfig; + + @Override + public void configure(ServerBuilder serverBuilder) { + PDConfig.ThreadPoolGrpc poolGrpc = pdConfig.getThreadPoolGrpc(); + serverBuilder.executor( + HgExecutorUtil.createExecutor(PoolNames.GRPC, poolGrpc.getCore(), poolGrpc.getMax(), + poolGrpc.getQueue())); + serverBuilder.maxInboundMessageSize(MAX_INBOUND_MESSAGE_SIZE); + } + +} diff --git a/hg-pd-service/src/main/java/org/apache/hugegraph/pd/util/grpc/StreamObserverUtil.java b/hg-pd-service/src/main/java/org/apache/hugegraph/pd/util/grpc/StreamObserverUtil.java new file mode 100644 index 0000000000..e1fc4a3b10 --- /dev/null +++ b/hg-pd-service/src/main/java/org/apache/hugegraph/pd/util/grpc/StreamObserverUtil.java @@ -0,0 +1,32 @@ +package org.apache.hugegraph.pd.util.grpc; + +import io.grpc.Grpc; +import io.grpc.ServerCall; +import io.grpc.stub.StreamObserver; + +import java.lang.reflect.Field; + +public class StreamObserverUtil { + + static Object fieldLock = new Object(); + static Field callField; + + public static String getRemoteIP(StreamObserver observer) { + String ip = ""; + try { + if (callField == null) { + synchronized (fieldLock) { + callField = observer.getClass().getDeclaredField("call"); + callField.setAccessible(true); + } + } + ServerCall call = (ServerCall) callField.get(observer); + if (call != null) { + ip = call.getAttributes().get(Grpc.TRANSPORT_ATTR_REMOTE_ADDR).toString(); + } + } catch (Exception e) { + + } + return ip; + } +} diff --git a/hg-pd-service/src/main/java/org/apache/hugegraph/pd/watch/KvWatchSubject.java b/hg-pd-service/src/main/java/org/apache/hugegraph/pd/watch/KvWatchSubject.java new file mode 100644 index 0000000000..e21b54512f --- /dev/null +++ b/hg-pd-service/src/main/java/org/apache/hugegraph/pd/watch/KvWatchSubject.java @@ -0,0 +1,329 @@ +<<<<<<<< HEAD:hugegraph-pd/hg-pd-service/src/main/java/org/apache/hugegraph/pd/watch/KvWatchSubject.java +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +======== +>>>>>>>> d7e3d51dd (3.6.5 -> 4.x diff):hg-pd-service/src/main/java/org/apache/hugegraph/pd/watch/KvWatchSubject.java +package org.apache.hugegraph.pd.watch; + +import java.util.Arrays; +import java.util.LinkedList; +import java.util.Map; +import java.util.Set; +import java.util.concurrent.ConcurrentHashMap; +import java.util.concurrent.ConcurrentMap; +import java.util.function.BiPredicate; + +import org.apache.hugegraph.pd.KvService; +import org.apache.hugegraph.pd.common.PDException; +import org.apache.hugegraph.pd.config.PDConfig; +import org.apache.hugegraph.pd.grpc.kv.WatchEvent; +import org.apache.hugegraph.pd.grpc.kv.WatchKv; +import org.apache.hugegraph.pd.grpc.kv.WatchResponse; +import org.apache.hugegraph.pd.grpc.kv.WatchState; +import org.apache.hugegraph.pd.grpc.kv.WatchType; +import org.apache.hugegraph.pd.raft.RaftEngine; +import org.apache.hugegraph.pd.store.RaftKVStore; + +<<<<<<<< HEAD:hugegraph-pd/hg-pd-service/src/main/java/org/apache/hugegraph/pd/watch/KvWatchSubject.java +======== +import io.grpc.Status; +>>>>>>>> d7e3d51dd (3.6.5 -> 4.x diff):hg-pd-service/src/main/java/org/apache/hugegraph/pd/watch/KvWatchSubject.java +import io.grpc.StatusRuntimeException; +import io.grpc.stub.StreamObserver; +import lombok.extern.slf4j.Slf4j; + +<<<<<<<< HEAD:hugegraph-pd/hg-pd-service/src/main/java/org/apache/hugegraph/pd/watch/KvWatchSubject.java +/** + * Watch subscription and response processing classes +======== +/** watch订阅、响应处理类 + * @author zhangyingjie + * @date 2022/6/21 +>>>>>>>> d7e3d51dd (3.6.5 -> 4.x diff):hg-pd-service/src/main/java/org/apache/hugegraph/pd/watch/KvWatchSubject.java + **/ +@Slf4j +public class KvWatchSubject { + + public static final String KEY_DELIMITER = "KW"; + public static final String PREFIX_DELIMITER = "PW"; + public static final String ALL_PREFIX = "W"; +<<<<<<<< HEAD:hugegraph-pd/hg-pd-service/src/main/java/org/apache/hugegraph/pd/watch/KvWatchSubject.java + public static final long WATCH_TTL = 20000L; + private static final ConcurrentMap> clients = + new ConcurrentHashMap<>(); + private final KvService kvService; + BiPredicate equal = String::equals; + BiPredicate startWith = String::startsWith; +======== + public static final long WATCH_TTL = 1800000L; + private static final ConcurrentMap> clients = new ConcurrentHashMap<>(); +>>>>>>>> d7e3d51dd (3.6.5 -> 4.x diff):hg-pd-service/src/main/java/org/apache/hugegraph/pd/watch/KvWatchSubject.java + + /** + * The following three sets of keys will be used: + * clients -> W@KW@key@clientId + * rocksdb key1 ->W@KW@key@clientId + * rocksdb key2 ->W@clientId@KW@key@clientId + **/ + public KvWatchSubject(PDConfig pdConfig) { + this.kvService = new KvService(pdConfig); + } + + public String getWatchKey(String key, String watchDelimiter) { + return KvService.getKeyWithoutPrefix(ALL_PREFIX, watchDelimiter, key); + } + + private void addWatchKey(String key, String delimiter, long clientId) throws PDException { + String watchKey = KvService.getKeyWithoutPrefix(ALL_PREFIX, delimiter, key, clientId); + kvService.put(watchKey, "", WATCH_TTL); + String clientFirstKey = + KvService.getKeyWithoutPrefix(ALL_PREFIX, clientId, delimiter, key, clientId); + kvService.put(clientFirstKey, "", WATCH_TTL); + } + + private void removeWatchKey(String key, String delimiter, long clientId) throws PDException { + String watchKey = KvService.getKeyWithoutPrefix(ALL_PREFIX, delimiter, key, clientId); + kvService.delete(watchKey); + String clientFirstKey = KvService.getKeyWithoutPrefix(ALL_PREFIX, clientId, delimiter, key); + kvService.deleteWithPrefix(clientFirstKey); + } + + /** + * Increase observers + * + * @param key The key of the observation + * @param clientId Client identity + * @param observer + * @param delimiter Observe the type identifier, listen to the prefix or listen to the key + * can be distinguished by this parameter + * @throws PDException + */ + public void addObserver(String key, long clientId, StreamObserver observer, + String delimiter) throws PDException { + String keyWithoutPrefix = + KvService.getKeyWithoutPrefix(ALL_PREFIX, delimiter, key, clientId); + clients.putIfAbsent(keyWithoutPrefix, observer); + addWatchKey(key, delimiter, clientId); + } + + public void removeObserver(String key, long clientId, String delimiter) throws PDException { + removeWatchKey(key, delimiter, clientId); + String keyWithoutPrefix = + KvService.getKeyWithoutPrefix(ALL_PREFIX, delimiter, key, clientId); + clients.remove(keyWithoutPrefix); + } + + /** + * The notification observer method, which is used by both key and prefix, is different from + * predicate + * + * @param key + * @param watchType Observation types, generally additions and deletions + * @param predicate Determine whether it is equal or pre-matched, and use it to adapt to the + * key or prefix observation + * @param kvs + * @throws PDException + */ + public void notifyObserver(String key, WatchType watchType, + BiPredicate predicate, + WatchKv... kvs) throws PDException { + boolean isEqual = predicate.equals(equal); + String watchDelimiter = isEqual ? KEY_DELIMITER : PREFIX_DELIMITER; + String watchKeyPrefix = isEqual ? key : ""; + String storeKey = getWatchKey(watchKeyPrefix, watchDelimiter); + Map map = kvService.scanWithPrefix(storeKey); + String delimiter = String.valueOf(KvService.KV_DELIMITER); + WatchResponse watchResponse; + for (String keyAndClient : map.keySet()) { + String[] values = keyAndClient.split(delimiter); + assert values.length == 4; + String watchKey = values[2]; + String c = values[3]; + long clientId = Long.parseLong(c); + LinkedList watchEvents = new LinkedList<>(); + for (WatchKv kv : kvs) { + String kvKey = kv.getKey(); + boolean match = predicate.test(kvKey, watchKey); + if (!match) { + continue; + } + WatchKv watchKv = + WatchKv.newBuilder().setKey(kvKey).setValue(kv.getValue()).build(); + WatchEvent event = + WatchEvent.newBuilder().setCurrent(watchKv).setType(watchType).build(); + watchEvents.add(event); + } + StreamObserver observer = clients.get(keyAndClient); + watchResponse = + WatchResponse.newBuilder().setState(WatchState.Started).setClientId(clientId) + .addAllEvents(watchEvents).build(); + + try { + if (observer != null) { + synchronized (observer) { + // log.info("notifyObserver for clientId:{}", clientId); + observer.onNext(watchResponse); + } + } else { + log.info("cannot find StreamObserver for clientId:{}", clientId); + } +<<<<<<<< HEAD:hugegraph-pd/hg-pd-service/src/main/java/org/apache/hugegraph/pd/watch/KvWatchSubject.java + } catch (StatusRuntimeException ignored) { +======== + } catch (StatusRuntimeException s) { +>>>>>>>> d7e3d51dd (3.6.5 -> 4.x diff):hg-pd-service/src/main/java/org/apache/hugegraph/pd/watch/KvWatchSubject.java + + } catch (Exception e) { + log.warn("notifyObserver with error:{}", clientId, e); + } + } + } + + public void notifyAllObserver(String key, WatchType watchType, WatchKv[] kvs) throws + PDException { + notifyObserver(key, watchType, equal, kvs); + notifyObserver(key, watchType, startWith, kvs); + } + + /** + * Renew the client + * 1. Send an alive message to the client with a retry + * 2. If there is a response, the two sets of keys saved before will be reactivated + * 3. If it fails multiple times, delete the data of memory and rocksdb + */ + public void keepClientAlive() { + WatchResponse testAlive = WatchResponse.newBuilder().setState(WatchState.Alive).build(); + Set>> entries = clients.entrySet(); + Map.Entry>[] array = + entries.toArray(new Map.Entry[0]); + Arrays.stream(array).parallel().forEach(entry -> { + StreamObserver value = entry.getValue(); + String key = entry.getKey(); + String delimiter = KvService.getDelimiter(); + String client = key.split(delimiter)[3]; + String clientKey = KvService.getKeyWithoutPrefix(ALL_PREFIX, client); + if (value == null) { + removeClient(null, key, clientKey); + } + boolean done = false; + String removes = client + KvService.KV_DELIMITER; + for (int i = 0; i < 3; i++) { + try { + synchronized (value) { + value.onNext(testAlive); + } + Map clientKeys = kvService.scanWithPrefix(clientKey); + Set> set = clientKeys.entrySet(); + for (Map.Entry keyEntry : set) { + String entryKey = keyEntry.getKey(); + String aliveKey = entryKey.replaceFirst(removes, ""); +<<<<<<<< HEAD:hugegraph-pd/hg-pd-service/src/main/java/org/apache/hugegraph/pd/watch/KvWatchSubject.java + boolean keepAliveKey = kvService.keepAlive(aliveKey); + boolean keepAliveEntry = kvService.keepAlive(entryKey); + // log.info("keep alive client:{},{}:{},{}:{}", client, aliveKey, + // keepAliveKey, + // entryKey, + // keepAliveEntry); +======== + kvService.keepAlive(aliveKey); + kvService.keepAlive(entryKey); +>>>>>>>> d7e3d51dd (3.6.5 -> 4.x diff):hg-pd-service/src/main/java/org/apache/hugegraph/pd/watch/KvWatchSubject.java + done = true; + } + break; + } catch (Exception e) { + if (e instanceof StatusRuntimeException && + ((StatusRuntimeException) e).getStatus().getCode().equals(Status.Code.CANCELLED)) { + break; + } + try { + Thread.sleep(100); + } catch (InterruptedException ex) { + + } + } + } + if (!done) { + log.info("remove client {} for no data", client); + removeClient(value, key, clientKey); + } + }); + } + + private void removeClient(StreamObserver value, String key, String clientKey) { + try { + log.info("remove null observer,client:", clientKey); + if (RaftEngine.getInstance().isLeader()) { + kvService.deleteWithPrefix(clientKey); + } else { +<<<<<<<< HEAD:hugegraph-pd/hg-pd-service/src/main/java/org/apache/hugegraph/pd/watch/KvWatchSubject.java + // todo: delete records via client +======== +>>>>>>>> d7e3d51dd (3.6.5 -> 4.x diff):hg-pd-service/src/main/java/org/apache/hugegraph/pd/watch/KvWatchSubject.java + var store = kvService.getMeta().getStore(); + if (store instanceof RaftKVStore) { + ((RaftKVStore) store).doRemoveByPrefix(kvService.getStoreKey(clientKey)); + } + } + + if (value != null) { + synchronized (value) { + try{ + value.onCompleted(); + } catch (Exception e) { + + } + } + } + clients.remove(key); + } catch (PDException e) { + log.error("remove client with error:", e); + } + } + + /** + * Notify the client that the leader has switched and reconnect + */ + public void notifyClientChangeLeader() { + WatchResponse response = + WatchResponse.newBuilder().setState(WatchState.Leader_Changed).build(); + for (Map.Entry> entry : clients.entrySet()) { + StreamObserver value = entry.getValue(); + String key = entry.getKey(); + String client = key.split(KvService.getDelimiter())[3]; + String clientKey = KvService.getKeyWithoutPrefix(ALL_PREFIX, client); + if (value == null) { + removeClient(null, key, clientKey); + } + for (int i = 0; i < 3; i++) { + try { + synchronized (value) { + value.onNext(response); + } + removeClient(value, key, clientKey); + break; + } catch (Exception e) { + try { + Thread.sleep(100); + } catch (InterruptedException ignored) { + } + } + } + } + } +} diff --git a/hg-pd-service/src/test/java/live/PDServer0.java b/hg-pd-service/src/test/java/live/PDServer0.java new file mode 100644 index 0000000000..28fa92d0af --- /dev/null +++ b/hg-pd-service/src/test/java/live/PDServer0.java @@ -0,0 +1,24 @@ +package live; + +import org.apache.hugegraph.pd.boot.HugePDServer; +import org.apache.hugegraph.pd.boot.ShutdownHook; + +import org.apache.commons.io.FileUtils; +import org.springframework.boot.SpringApplication; + +/** + * for 1 store node and 1 pd + * @author zhangyingjie + * @date 2022/1/9 + **/ +public class PDServer0 { + + static String SERVER_NAME = "server0"; + + public static void main(String[] args) { + Runtime.getRuntime().addShutdownHook(new ShutdownHook(Thread.currentThread())); + SpringApplication.run(HugePDServer.class, String.format("--spring.profiles.active=%s", SERVER_NAME)); + System.out.println(SERVER_NAME + " started."); + } + +} diff --git a/hg-pd-service/src/test/java/live/PDServer1.java b/hg-pd-service/src/test/java/live/PDServer1.java new file mode 100644 index 0000000000..bc5b05fdbb --- /dev/null +++ b/hg-pd-service/src/test/java/live/PDServer1.java @@ -0,0 +1,22 @@ +package live; + +import org.springframework.boot.SpringApplication; + +import org.apache.hugegraph.pd.boot.HugePDServer; +import org.apache.hugegraph.pd.boot.ShutdownHook; + +/** for 3 store nodes and 3 pds + * @author zhangyingjie + * @date 2022/1/9 + **/ +public class PDServer1 { + + static String SERVER_NAME = "server1"; + + public static void main(String[] args) { + Runtime.getRuntime().addShutdownHook(new ShutdownHook(Thread.currentThread())); + SpringApplication.run(HugePDServer.class, String.format("--spring.profiles.active=%s", SERVER_NAME)); + System.out.println(SERVER_NAME + " started."); + } + +} diff --git a/hg-pd-service/src/test/java/live/PDServer13.java b/hg-pd-service/src/test/java/live/PDServer13.java new file mode 100644 index 0000000000..a2dc178618 --- /dev/null +++ b/hg-pd-service/src/test/java/live/PDServer13.java @@ -0,0 +1,20 @@ +package live; + +import org.springframework.boot.SpringApplication; + +import org.apache.hugegraph.pd.boot.HugePDServer; +import org.apache.hugegraph.pd.boot.ShutdownHook; + +/** + * for one pd and three nodes + */ +public class PDServer13 { + + static String SERVER_NAME = "server13"; + + public static void main(String[] args) { + Runtime.getRuntime().addShutdownHook(new ShutdownHook(Thread.currentThread())); + SpringApplication.run(HugePDServer.class, String.format("--spring.profiles.active=%s", SERVER_NAME)); + System.out.println(SERVER_NAME + " started."); + } +} diff --git a/hg-pd-service/src/test/java/live/PDServer2.java b/hg-pd-service/src/test/java/live/PDServer2.java new file mode 100644 index 0000000000..df83fd092d --- /dev/null +++ b/hg-pd-service/src/test/java/live/PDServer2.java @@ -0,0 +1,22 @@ +package live; + +import org.apache.hugegraph.pd.boot.HugePDServer; +import org.apache.hugegraph.pd.boot.ShutdownHook; + +import org.apache.commons.io.FileUtils; +import org.springframework.boot.SpringApplication; + +/** for 3 store nodes and 3 pds + * @author zhangyingjie + * @date 2022/1/9 + **/ +public class PDServer2 { + + static String SERVER_NAME = "server2"; + + public static void main(String[] args) { + Runtime.getRuntime().addShutdownHook(new ShutdownHook(Thread.currentThread())); + SpringApplication.run(HugePDServer.class, String.format("--spring.profiles.active=%s", SERVER_NAME)); + System.out.println(SERVER_NAME + " started."); + } +} diff --git a/hg-pd-service/src/test/java/live/PDServer3.java b/hg-pd-service/src/test/java/live/PDServer3.java new file mode 100644 index 0000000000..98eeae89c4 --- /dev/null +++ b/hg-pd-service/src/test/java/live/PDServer3.java @@ -0,0 +1,23 @@ +package live; + +import org.apache.hugegraph.pd.boot.HugePDServer; +import org.apache.hugegraph.pd.boot.ShutdownHook; + +import org.apache.commons.io.FileUtils; +import org.springframework.boot.SpringApplication; + +/** for 3 store nodes and 3 pds + * @author zhangyingjie + * @date 2022/1/9 + **/ +public class PDServer3 { + + static String SERVER_NAME = "server3"; + + public static void main(String[] args) { + Runtime.getRuntime().addShutdownHook(new ShutdownHook(Thread.currentThread())); + SpringApplication.run(HugePDServer.class, String.format("--spring.profiles.active=%s", SERVER_NAME)); + System.out.println(SERVER_NAME + " started."); + } + +} diff --git a/hg-pd-service/src/test/resources/application-server1.yml b/hg-pd-service/src/test/resources/application-server1.yml new file mode 100644 index 0000000000..12545da98a --- /dev/null +++ b/hg-pd-service/src/test/resources/application-server1.yml @@ -0,0 +1,67 @@ +spring: + application: + name: hugegraph-pd + +management: + metrics: + export: + prometheus: + enabled: true + endpoints: + web: + exposure: + include: "*" + +grpc: + port: 8686 + netty-server: + max-inbound-message-size: 100MB + +server: + port : 8620 + +pd: + + patrol-interval: 3000000 + data-path: tmp/8686 + # 最少节点数,少于该数字,集群停止入库 + initial-store-count: 1 + # 初始store列表,在列表内的store自动激活 + initial-store-list: 127.0.0.1:8501,127.0.0.1:8502,127.0.0.1:8503 + #initial-store-list: 127.0.0.1:8501 + # servers: + # - server: store + # token: $2a$04$9ZGBULe2vc73DMj7r/iBKeQB1SagtUXPrDbMmNswRkTwlWQURE/Jy + # pwd: E3UnnQa605go + # - server: hg + # token: $2a$04$i10KooNg6wLvIPVDh909n.RBYlZ/4pJo978nFK86nrqQiGIKV4UGS + # pwd: qRyYhxVAWDb5 + # - server: hubble + # token: $2a$04$pSGkohaywGgFrJLr6VOPm.IK2WtOjlNLcZN8gct5uIKEDO1I61DGa + # pwd: iMjHnUl5Pprx + # - server: vermeer + # token: $2a$04$N89qHe0v5jqNJKhQZHnTdOFSGmiNoiA2B2fdWpV2BwrtJK72dXYD. + # pwd: FqU8BOvTpteT +raft: + address: 127.0.0.1:8610 + # raft集群 + peers-list: 127.0.0.1:8610,127.0.0.1:8611,127.0.0.1:8612 + # raft rpc读写超时时间,单位毫秒 + rpc-timeout: 10000 + # 快照生成时间间隔,单位秒 + snapshotInterval: 30000 + metrics: true +store: + # store心跳超时时间,超过该时间,认为store临时不可用,转移Leader到其他副本,单位秒 + keepAlive-timeout: 300 + # store下线时间。超过该时间,认为store永久不可用,分配副本到其他机器,单位秒 + max-down-time: 180000 +partition: + # 默认每个分区副本数 + default-shard-count: 3 + # 默认每机器最大副本数,初始分区数= store-max-shard-count * store-number / default-shard-count + store-max-shard-count: 6 + +discovery: + #客户端注册后,无心跳最长次数,超过后,之前的注册信息会被删除 + heartbeat-try-count: 3 \ No newline at end of file diff --git a/hg-pd-service/src/test/resources/application-server13.yml b/hg-pd-service/src/test/resources/application-server13.yml new file mode 100644 index 0000000000..f31c28d8c3 --- /dev/null +++ b/hg-pd-service/src/test/resources/application-server13.yml @@ -0,0 +1,54 @@ +spring: + application: + name: hugegraph-pd + +management: + metrics: + export: + prometheus: + enabled: true + endpoints: + web: + exposure: + include: "*" + +grpc: + port: 8686 + netty-server: + max-inbound-message-size: 100MB + +server: + port : 8620 + +pd: + + patrol-interval: 3000000 + data-path: tmp/8686 + # 最少节点数,少于该数字,集群停止入库 + initial-store-count: 2 + # 初始store列表,在列表内的store自动激活 + initial-store-list: 127.0.0.1:8501,127.0.0.1:8502,127.0.0.1:8503 + #initial-store-list: 127.0.0.1:8501 +raft: + address: 127.0.0.1:8610 + # raft集群 + peers-list: 127.0.0.1:8610 + # raft rpc读写超时时间,单位毫秒 + rpc-timeout: 10000 + # 快照生成时间间隔,单位秒 + snapshotInterval: 30000 + metrics: true +store: + # store心跳超时时间,超过该时间,认为store临时不可用,转移Leader到其他副本,单位秒 + keepAlive-timeout: 300 + # store下线时间。超过该时间,认为store永久不可用,分配副本到其他机器,单位秒 + max-down-time: 180000 +partition: + # 默认每个分区副本数 + default-shard-count: 3 + # 默认每机器最大副本数,初始分区数= store-max-shard-count * store-number / default-shard-count + store-max-shard-count: 6 + +discovery: + #客户端注册后,无心跳最长次数,超过后,之前的注册信息会被删除 + heartbeat-try-count: 3 \ No newline at end of file diff --git a/hg-pd-test/src/main/java/org/apache/hugegraph/pd/BaseTest.java b/hg-pd-test/src/main/java/org/apache/hugegraph/pd/BaseTest.java new file mode 100644 index 0000000000..1c6b36c458 --- /dev/null +++ b/hg-pd-test/src/main/java/org/apache/hugegraph/pd/BaseTest.java @@ -0,0 +1,21 @@ +package org.apache.hugegraph.pd; + +import org.apache.hugegraph.pd.client.PDConfig; + +/** + * @author zhangyingjie + * @date 2023/10/11 + **/ +public class BaseTest { + + protected static String pdGrpcAddr = "10.108.17.32:8686"; + protected static String pdRestAddr = "http://10.108.17.32:8620"; + protected static String user = "store"; + protected static String pwd = "$2a$04$9ZGBULe2vc73DMj7r/iBKeQB1SagtUXPrDbMmNswRkTwlWQURE/Jy"; + protected static String key = "Authorization"; + protected static String value = "Basic c3RvcmU6YWRtaW4="; + + protected PDConfig getPdConfig() { + return PDConfig.of(pdGrpcAddr).setAuthority(user, pwd); + } +} diff --git a/hg-pd-test/src/main/java/org/apache/hugegraph/pd/cli/BaseCliToolsTest.java b/hg-pd-test/src/main/java/org/apache/hugegraph/pd/cli/BaseCliToolsTest.java new file mode 100644 index 0000000000..6ab48eee22 --- /dev/null +++ b/hg-pd-test/src/main/java/org/apache/hugegraph/pd/cli/BaseCliToolsTest.java @@ -0,0 +1,19 @@ +package org.apache.hugegraph.pd.cli; + +import org.junit.After; +import org.junit.BeforeClass; + +import org.apache.hugegraph.pd.BaseTest; + + +public class BaseCliToolsTest extends BaseTest { + @BeforeClass + public static void init() { + + } + + @After + public void teardown() { + // pass + } +} \ No newline at end of file diff --git a/hg-pd-test/src/main/java/org/apache/hugegraph/pd/cli/CliToolsSuiteTest.java b/hg-pd-test/src/main/java/org/apache/hugegraph/pd/cli/CliToolsSuiteTest.java new file mode 100644 index 0000000000..b53b1b8e1b --- /dev/null +++ b/hg-pd-test/src/main/java/org/apache/hugegraph/pd/cli/CliToolsSuiteTest.java @@ -0,0 +1,17 @@ +package org.apache.hugegraph.pd.cli; + +import lombok.extern.slf4j.Slf4j; +import org.junit.runner.RunWith; +import org.junit.runners.Suite; + + +@RunWith(Suite.class) +@Suite.SuiteClasses({ + MainTest.class +}) + +@Slf4j +public class CliToolsSuiteTest { + + +} \ No newline at end of file diff --git a/hg-pd-test/src/main/java/org/apache/hugegraph/pd/cli/MainTest.java b/hg-pd-test/src/main/java/org/apache/hugegraph/pd/cli/MainTest.java new file mode 100644 index 0000000000..a1fcba70d3 --- /dev/null +++ b/hg-pd-test/src/main/java/org/apache/hugegraph/pd/cli/MainTest.java @@ -0,0 +1,40 @@ +package org.apache.hugegraph.pd.cli; + +import java.util.Arrays; +import java.util.List; + +import org.junit.Test; + +import lombok.extern.slf4j.Slf4j; + +@Slf4j +public class MainTest extends BaseCliToolsTest { + + + public static boolean test2sup(List arrays, int tail, int res) { + System.out.println(String.format("%d %d", tail, res)); + if (tail == 0) { + System.out.println(String.format("a = %d %d", tail, res)); + return false; + } else if (tail == 1) { + System.out.println(String.format("b = %d %d", arrays.get(0), res)); + return (arrays.get(0) == res); + } else if (tail == 2) { + System.out.println(String.format("c = %d %d %d", arrays.get(0), arrays.get(1), res)); + return (arrays.get(0) + arrays.get(1) == Math.abs(res)) || + (Math.abs(arrays.get(0) - arrays.get(1)) == Math.abs(res)); + } else { + return test2sup(arrays, tail - 1, res + arrays.get(tail - 1)) || + test2sup(arrays, tail - 1, res - arrays.get(tail - 1)); + } + } + + @Test + public void test2() { + Integer[] a = new Integer[]{1, 0, 3, 2}; + List aa = Arrays.asList(a); + System.out.printf(test2sup(aa, aa.size(), 0) ? "TRUE" : "FALSE"); + } + + +} \ No newline at end of file diff --git a/hg-pd-test/src/main/java/org/apache/hugegraph/pd/client/BaseClientTest.java b/hg-pd-test/src/main/java/org/apache/hugegraph/pd/client/BaseClientTest.java new file mode 100644 index 0000000000..7ee7863ceb --- /dev/null +++ b/hg-pd-test/src/main/java/org/apache/hugegraph/pd/client/BaseClientTest.java @@ -0,0 +1,28 @@ +package org.apache.hugegraph.pd.client; + +import org.apache.hugegraph.pd.BaseTest; +import org.junit.After; +import org.junit.BeforeClass; +import org.junit.runner.RunWith; +import org.mockito.runners.MockitoJUnitRunner; + +@RunWith(MockitoJUnitRunner.class) +public class BaseClientTest extends BaseTest { + + public static PDClient pdClient; + public final String storeAddr = "localhost"; + public final String graphName = "default/hugegraph/g"; + public long storeId = 0; + + @BeforeClass + public static void beforeClass() { + PDConfig config = PDConfig.of(pdGrpcAddr).setAuthority(user, pwd); + config.setEnableCache(true); + pdClient = PDClient.create(config); + } + + @After + public void teardown() { + // pass + } +} \ No newline at end of file diff --git a/hg-pd-test/src/main/java/org/apache/hugegraph/pd/client/ChangingLeader.java b/hg-pd-test/src/main/java/org/apache/hugegraph/pd/client/ChangingLeader.java new file mode 100644 index 0000000000..1e996b0614 --- /dev/null +++ b/hg-pd-test/src/main/java/org/apache/hugegraph/pd/client/ChangingLeader.java @@ -0,0 +1,37 @@ +package org.apache.hugegraph.pd.client; + +import com.alipay.sofa.jraft.CliService; +import com.alipay.sofa.jraft.RaftServiceFactory; +import com.alipay.sofa.jraft.Status; +import com.alipay.sofa.jraft.conf.Configuration; +import com.alipay.sofa.jraft.entity.PeerId; +import com.alipay.sofa.jraft.option.CliOptions; + +import java.util.concurrent.CountDownLatch; +import java.util.concurrent.Executors; +import java.util.concurrent.TimeUnit; + +public class ChangingLeader { + + private static CliService cliService = RaftServiceFactory.createAndInitCliService(new CliOptions()); + + public static void main(String[] args) { + var conf = new Configuration(); + conf.addPeer(PeerId.parsePeer("127.0.0.1:8610")); + conf.addPeer(PeerId.parsePeer("127.0.0.1:8611")); + conf.addPeer(PeerId.parsePeer("127.0.0.1:8612")); + CountDownLatch latch = new CountDownLatch(100); + + Executors.newScheduledThreadPool(1).scheduleAtFixedRate(() -> { + Status status = cliService.transferLeader("pd_raft", conf, PeerId.ANY_PEER); + System.out.println("trigger change leader status: "+status); + latch.countDown(); + }, 1,3, TimeUnit.SECONDS); + + try { + latch.await(); + } catch (Exception e) { + System.out.println(e); + } + } +} diff --git a/hg-pd-test/src/main/java/org/apache/hugegraph/pd/client/DiscoveryClientTest.java b/hg-pd-test/src/main/java/org/apache/hugegraph/pd/client/DiscoveryClientTest.java new file mode 100644 index 0000000000..bb35056b19 --- /dev/null +++ b/hg-pd-test/src/main/java/org/apache/hugegraph/pd/client/DiscoveryClientTest.java @@ -0,0 +1,65 @@ +package org.apache.hugegraph.pd.client; + +import java.util.HashMap; +import java.util.Map; +import java.util.function.Consumer; + +import org.apache.hugegraph.pd.BaseTest; +import org.junit.Before; +import org.junit.Test; + +import org.apache.hugegraph.pd.grpc.discovery.NodeInfo; +import org.apache.hugegraph.pd.grpc.discovery.Query; + +public class DiscoveryClientTest extends BaseTest { + + private DiscoveryClientImpl client; + + @Before + public void setUp() { + client = getClient("appName", "localhost:8654", new HashMap()); + } + + @Test + public void testGetRegisterNode() { + // Setup + try { + Consumer result = client.getRegisterConsumer(); + final NodeInfo expectedResult = NodeInfo.newBuilder() + .setAppName("appName") + .build(); + + Thread.sleep(3000); + Query query = Query.newBuilder().setAppName("appName") + .setVersion("0.13.0").build(); + + // Run the test + client.getNodeInfos(query); + } catch (InterruptedException e) { + e.printStackTrace(); + } finally { + client.close(); + } + + } + + private DiscoveryClientImpl getClient(String appName, String address, + Map labels) { + DiscoveryClientImpl discoveryClient = null; + try { + discoveryClient = + DiscoveryClientImpl.newBuilder().setCenterAddress(pdGrpcAddr).setAddress(address) + .setAppName(appName) + .setDelay(2000) + .setVersion("0.13.0") + .setId("0").setLabels(labels) + .setPdConfig(getPdConfig()) + .build(); + discoveryClient.scheduleTask(); + } catch (Exception e) { + e.printStackTrace(); + } + + return discoveryClient; + } +} diff --git a/hg-pd-test/src/main/java/org/apache/hugegraph/pd/client/KvClientTest.java b/hg-pd-test/src/main/java/org/apache/hugegraph/pd/client/KvClientTest.java new file mode 100644 index 0000000000..e66a6d4a35 --- /dev/null +++ b/hg-pd-test/src/main/java/org/apache/hugegraph/pd/client/KvClientTest.java @@ -0,0 +1,74 @@ +package org.apache.hugegraph.pd.client; + +import org.apache.hugegraph.pd.grpc.kv.KResponse; +import org.apache.hugegraph.pd.grpc.kv.ScanPrefixResponse; +import org.apache.hugegraph.pd.grpc.kv.WatchEvent; +import org.apache.hugegraph.pd.grpc.kv.WatchKv; +import org.apache.hugegraph.pd.grpc.kv.WatchResponse; +import org.apache.hugegraph.pd.grpc.kv.WatchState; +import org.apache.hugegraph.pd.grpc.kv.WatchType; +import org.apache.commons.lang3.StringUtils; +import static org.assertj.core.api.Assertions.assertThat; +import org.junit.Before; +import org.junit.Test; +import static org.mockito.Mockito.mock; + +import java.util.function.Consumer; + +public class KvClientTest extends BaseClientTest { + + private KvClient client; + + @Before + public void setUp() { + client = new KvClient<>(getPdConfig()); + } + + String key = "key"; + String value = "value"; + + @Test + public void testPutAndGet() throws Exception { + // Run the test + try { + client.put(key, value); + // Run the test + KResponse result = client.get(key); + + // Verify the results + assertThat(result.getValue()).isEqualTo(value); + client.delete(key); + result = client.get(key); + assertThat(StringUtils.isEmpty(result.getValue())); + client.deletePrefix(key); + client.put(key + "1", value); + client.put(key + "2", value); + ScanPrefixResponse response = client.scanPrefix(key); + assertThat(response.getKvsMap().size() == 2); + client.putTTL(key + "3", value, 1000); + client.keepTTLAlive(key + "3"); + final Consumer mockConsumer = mock(Consumer.class); + + // Run the test + client.listen(key + "3", mockConsumer); + client.listenPrefix(key + "4", mockConsumer); + WatchResponse r = WatchResponse.newBuilder().addEvents( + WatchEvent.newBuilder().setCurrent( + WatchKv.newBuilder().setKey(key).setValue("value") + .build()).setType(WatchType.Put).build()) + .setClientId(0L) + .setState(WatchState.Starting) + .build(); + client.getWatchList(r); + client.getWatchMap(r); + client.lock(key, 3000L); + client.isLocked(key); + client.unlock(key); + client.lock(key, 3000L); + client.keepAlive(key); + client.close(); + } catch (Exception e) { + + } + } +} diff --git a/hg-pd-test/src/main/java/org/apache/hugegraph/pd/client/PDClientSuiteTest.java b/hg-pd-test/src/main/java/org/apache/hugegraph/pd/client/PDClientSuiteTest.java new file mode 100644 index 0000000000..f862196c37 --- /dev/null +++ b/hg-pd-test/src/main/java/org/apache/hugegraph/pd/client/PDClientSuiteTest.java @@ -0,0 +1,18 @@ +package org.apache.hugegraph.pd.client; + +import lombok.extern.slf4j.Slf4j; +import org.junit.runner.RunWith; +import org.junit.runners.Suite; + + +@RunWith(Suite.class) +@Suite.SuiteClasses({ + PDClientTest.class, + KvClientTest.class, + DiscoveryClientTest.class +}) + +@Slf4j +public class PDClientSuiteTest { + +} diff --git a/hg-pd-test/src/main/java/org/apache/hugegraph/pd/client/PDClientTest.java b/hg-pd-test/src/main/java/org/apache/hugegraph/pd/client/PDClientTest.java new file mode 100644 index 0000000000..db85008f4f --- /dev/null +++ b/hg-pd-test/src/main/java/org/apache/hugegraph/pd/client/PDClientTest.java @@ -0,0 +1,436 @@ +<<<<<<<< HEAD:hugegraph-pd/hg-pd-test/src/main/java/org/apache/hugegraph/pd/client/PDClientTest.java +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +======== +>>>>>>>> d7e3d51dd (3.6.5 -> 4.x diff):hg-pd-test/src/main/java/org/apache/hugegraph/pd/client/PDClientTest.java +package org.apache.hugegraph.pd.client; + +import java.nio.charset.StandardCharsets; +import java.util.ArrayList; +import java.util.List; + +<<<<<<<< HEAD:hugegraph-pd/hg-pd-test/src/main/java/org/apache/hugegraph/pd/client/PDClientTest.java +import org.apache.hugegraph.pd.common.PDException; +import org.apache.hugegraph.pd.grpc.MetaTask; +import org.apache.hugegraph.pd.grpc.Metapb; +import org.apache.hugegraph.pd.grpc.Pdpb; +import org.junit.Test; +import org.mockito.Mockito; + +// TODO: Exceptions should be thrown rather than silenced. +======== +import org.apache.hugegraph.pd.grpc.ClusterOp; +import lombok.extern.slf4j.Slf4j; +import org.junit.Test; +import org.mockito.Mockito; + +import org.apache.hugegraph.pd.client.listener.PDEventListener; +import org.apache.hugegraph.pd.common.PDException; +import org.apache.hugegraph.pd.grpc.MetaTask; +import org.apache.hugegraph.pd.grpc.Metapb; + +/** + * @author zhengfuquan + * @date 2022/11/28 + **/ +@Slf4j +>>>>>>>> d7e3d51dd (3.6.5 -> 4.x diff):hg-pd-test/src/main/java/org/apache/hugegraph/pd/client/PDClientTest.java +public class PDClientTest extends BaseClientTest { + + @Test + public void testDbCompaction() { + try { + pdClient.dbCompaction(""); + pdClient.dbCompaction(); + } catch (PDException e) { + e.printStackTrace(); + } + } + + @Test + public void testRegisterStore() { + Metapb.Store store = Metapb.Store.newBuilder().build(); + try { + pdClient.registerStore(store); + } catch (PDException e) { + e.printStackTrace(); + } + } + + @Test + public void testSetGraph() { + Metapb.Graph graph = Metapb.Graph.newBuilder().setGraphName("test").build(); + try { + pdClient.setGraph(graph); + } catch (PDException e) { + e.printStackTrace(); + } + } + + @Test + public void testGetGraph() { + try { + pdClient.getGraph("test"); + } catch (PDException e) { + e.printStackTrace(); + } + } + + @Test + public void testGetStore() { + try { + pdClient.getStore(0L); + } catch (PDException e) { + assert e.getErrorCode() == 101; + } + } + + @Test + public void testUpdateStore() { + Metapb.Store store = Metapb.Store.newBuilder().build(); + try { + pdClient.updateStore(store); + } catch (PDException e) { + } + } + + @Test + public void testGetActiveStores() { + try { + pdClient.getActiveStores("test"); + } catch (PDException e) { + e.printStackTrace(); + } + } + + @Test + public void testGetAllStores() { + try { + pdClient.getAllStores("test"); + } catch (PDException e) { + e.printStackTrace(); + } + } + + @Test + public void testStoreHeartbeat() { + Metapb.StoreStats stats = Metapb.StoreStats.newBuilder().build(); + try { + pdClient.storeHeartbeat(stats); + } catch (PDException e) { + e.printStackTrace(); + } + } + + @Test + public void testKeyToCode() { + pdClient.keyToCode("test", "test".getBytes(StandardCharsets.UTF_8)); + } + + @Test + public void testScanPartitions() { + try { + pdClient.scanPartitions("test", "1".getBytes(StandardCharsets.UTF_8), + "9".getBytes(StandardCharsets.UTF_8)); + } catch (PDException e) { + e.printStackTrace(); + } + } + + @Test + public void testGetPartitionsByStore() { + try { + pdClient.getPartitionsByStore(0L); + } catch (PDException e) { + e.printStackTrace(); + } + } + + @Test + public void testQueryPartitions() { + try { + pdClient.queryPartitions(0L, 0); + } catch (PDException e) { + e.printStackTrace(); + } + } + + @Test + public void testGetPartitions() { + try { + pdClient.getPartitions(0L, "test"); + } catch (PDException e) { + e.printStackTrace(); + } + } + + @Test + public void testUpdatePartitionLeader() { + pdClient.updatePartitionLeader("aaa", 0, 0L); + } + + @Test + public void testInvalidPartitionCache() { + pdClient.invalidPartitionCache(); + } + + @Test + public void testInvalidStoreCache() { + pdClient.invalidStoreCache(0L); + } + + @Test + public void testUpdatePartitionCache() { + Metapb.Partition partition = Metapb.Partition.newBuilder().build(); + Metapb.Shard leader = Metapb.Shard.newBuilder().build(); + pdClient.updatePartitionCache(partition, leader); + } + + @Test + public void testGetIdByKey() { + try { + pdClient.getIdByKey("test", 1); + } catch (PDException e) { + e.printStackTrace(); + } + } + + @Test + public void testResetIdByKey() { + try { + pdClient.resetIdByKey("test"); + } catch (PDException e) { + e.printStackTrace(); + } + } + + @Test + public void testGetGetLeader() { + try { + pdClient.getLeader(); + } catch (PDException e) { + e.printStackTrace(); + } + } + + @Test + public void testGetMembers() { + try { + pdClient.getMembers(); + } catch (PDException e) { + e.printStackTrace(); + } + } + + @Test + public void testGetClusterStats() { + try { + pdClient.getClusterStats(); + } catch (PDException e) { + e.printStackTrace(); + } + } + + @Test + public void testAddEventListener() { + PDEventListener listener = Mockito.mock(PDEventListener.class); + pdClient.addEventListener(listener); + } + + @Test + public void testGetWatchClient() { + pdClient.getWatchClient(); + } + + @Test + public void testGetPulseClient() { + // pdClient.getPulseClient(); + } + + @Test + public void testGetStoreStatus() { + try { + pdClient.getStoreStatus(true); + } catch (PDException e) { + e.printStackTrace(); + } + } + + @Test + public void testGetPartition() { + try { + pdClient.getPartition("test", "test".getBytes(StandardCharsets.UTF_8)); + } catch (PDException e) { + e.printStackTrace(); + } + } + + @Test + public void testSetGraphSpace() { + try { + pdClient.setGraphSpace("test", 1L); + } catch (PDException e) { + e.printStackTrace(); + } + } + + @Test + public void testGetGraphSpace() { + try { + pdClient.getGraphSpace("test"); + } catch (PDException e) { + e.printStackTrace(); + } + } + + @Test + public void testSetPDConfig() { + try { + pdClient.setPDConfig("", 0, 0L); + } catch (PDException e) { + assert e.getErrorCode() == 112; + } + Metapb.PDConfig pdConfig = Metapb.PDConfig.newBuilder().build(); + + try { + pdClient.setPDConfig(pdConfig); + } catch (PDException e) { + assert e.getErrorCode() == 112; + } + } + + @Test + public void testGetPDConfig() { + try { + pdClient.getPDConfig(0L); + } catch (PDException e) { + e.printStackTrace(); + } + } + + @Test + public void testChangePeerList() { + try { + pdClient.changePeerList(""); + } catch (PDException e) { + assert e.getErrorCode() == -1; + } + } + + @Test + public void testSplitData() { + try { + Metapb.PDConfig config = pdClient.getPDConfig(); +<<<<<<<< HEAD:hugegraph-pd/hg-pd-test/src/main/java/org/apache/hugegraph/pd/client/PDClientTest.java + pdClient.setPDConfig(config.toBuilder().setMaxShardsPerStore(12).build()); +======== + pdClient.setPDConfig(config.toBuilder() + .setMaxShardsPerStore(12) + .build()); +>>>>>>>> d7e3d51dd (3.6.5 -> 4.x diff):hg-pd-test/src/main/java/org/apache/hugegraph/pd/client/PDClientTest.java + System.out.println(pdClient.getPDConfig()); + pdClient.splitData(); + } catch (PDException e) { + log.error("testSplitData", e); + } + } + + @Test + public void testBalancePartition() { + try { + pdClient.balancePartition(); + } catch (PDException e) { + + } + } + + @Test + public void testMovePartition() { + ClusterOp.OperationMode mode = ClusterOp.OperationMode.Auto; + List params = new ArrayList<>(1); + try { + pdClient.balancePartition(); + } catch (PDException e) { + e.printStackTrace(); + } + } + + @Test + public void testReportTask() { + MetaTask.Task task = MetaTask.Task.newBuilder().build(); + try { + pdClient.reportTask(task); + } catch (PDException e) { + e.printStackTrace(); + } + } + + @Test + public void testBalanceLeaders() { + try { + pdClient.balanceLeaders(); + } catch (PDException e) { + assert e.getErrorCode() == 1001; + } + } + + @Test + public void testDelStore() { + try { + pdClient.delStore(0L); + } catch (PDException e) { + } + } + +// @Test +// public void testgetQuota() { +// try { +// pdClient.getQuota(); +// } catch (PDException e) { +// e.printStackTrace(); +// } +// } + + @Test + public void testUpdatePartition() { + List partitions = new ArrayList<>(1); + try { + pdClient.updatePartition(partitions); + } catch (PDException e) { + e.printStackTrace(); + } + } + + @Test + public void testDelPartition() { + try { + pdClient.delPartition("test", 0); + } catch (PDException e) { + e.printStackTrace(); + } + } + + @Test + public void testdelGraph() { + try { + pdClient.delGraph("test"); + } catch (PDException e) { + e.printStackTrace(); + } + } +} diff --git a/hg-pd-test/src/main/java/org/apache/hugegraph/pd/client/PDPulseTest.java b/hg-pd-test/src/main/java/org/apache/hugegraph/pd/client/PDPulseTest.java new file mode 100644 index 0000000000..40848100b3 --- /dev/null +++ b/hg-pd-test/src/main/java/org/apache/hugegraph/pd/client/PDPulseTest.java @@ -0,0 +1,156 @@ +package org.apache.hugegraph.pd.client; + +import org.apache.hugegraph.pd.common.PDException; +import org.apache.hugegraph.pd.grpc.Metapb; +import org.apache.hugegraph.pd.grpc.pulse.PartitionHeartbeatRequest; +import org.apache.hugegraph.pd.pulse.Pulse; +import org.apache.hugegraph.pd.pulse.PulseListener; +import org.apache.hugegraph.pd.pulse.PulseNotifier; +import org.apache.hugegraph.pd.pulse.PulseServerNotice; +import org.apache.hugegraph.pd.test.HgPDTestUtil; +import org.junit.BeforeClass; +import org.junit.Test; + +import java.io.IOException; +import java.time.LocalDateTime; +import java.time.format.DateTimeFormatter; +import java.util.concurrent.CountDownLatch; +import java.util.concurrent.TimeUnit; + +/** + * @author lynn.bond@hotmail.com created on 2021/11/8 + */ +public class PDPulseTest extends BaseClientTest { + + @BeforeClass + public static void beforeClass() { + PDConfig pdConfig = PDConfig.of("localhost:8686" + + ",localhost:8687,localhost:8688" + ) + .setAuthority(user, pwd); + pdConfig.setEnableCache(true); + pdClient = PDClient.create(pdConfig); + try { + pdClient.getLeader(); + } catch (PDException e) { + e.printStackTrace(); + } + } + + @Test + public void listen() { + listenWithObserverId(0L); + } + + @Test + public void listenWith1() { + this.listenWithObserverId(1L); + } + + @Test + public void listenWith2() { + this.listenWithObserverId(2L); + } + + @Test + public void listenWith3() { + this.listenWithObserverId(3L); + } + + public void forceReconnect() { +// Executors.newScheduledThreadPool(1).scheduleAtFixedRate(() -> { +// pdClient.forceReconnect(); +// }, 1, 2, TimeUnit.SECONDS); + } + + private void listenWithObserverId(long observerId) { + Pulse pulse; + if (observerId > 0L) { + pulse = pdClient.getPulse(observerId); + } else { + pulse = pdClient.getPulse(); + } + + CountDownLatch latch = new CountDownLatch(100000); + + PulseNotifier notifier1 = pulse.connect(new PulseListenerTest(latch, "test-listener")); + + try { + latch.await(3600 * 24, TimeUnit.SECONDS); + } catch (InterruptedException e) { + e.printStackTrace(); + } + + try { + notifier1.close(); + } catch (IOException e) { + e.printStackTrace(); + } + } + + //@Test + public void notifyServer() { + CountDownLatch latch = new CountDownLatch(100); + Pulse pulse = pdClient.getPulse(); + PulseNotifier notifier = + pulse.connect(new PulseListenerTest<>(latch, "test-listener")); + for (int i = 0; i < 100; i++) { + HgPDTestUtil.println("Notifying server [" + i + "] times."); + notifier.notifyServer(PartitionHeartbeatRequest.newBuilder().setStates( + Metapb.PartitionStats.newBuilder().setId(i) + )); + } + + } + + private class PulseListenerTest implements PulseListener { + CountDownLatch latch = new CountDownLatch(10); + private String listenerName; + + private PulseListenerTest(CountDownLatch latch, String listenerName) { + this.latch = latch; + this.listenerName = listenerName; + } + + @Override + public void onNext(T response) { + // println(this.listenerName+" res: "+response); + // this.latch.countDown(); + } + + @Override + public void onNotice(PulseServerNotice notice) { + try { + log("Sleeping for ACK, noticeID: " + notice.getNoticeId()); + Thread.sleep(100); + // TimeUnit.SECONDS.sleep(15); + } catch (InterruptedException e) { + throw new RuntimeException(e); + } + notice.ack(); + this.latch.countDown(); + } + + @Override + public void onError(Throwable throwable) { + HgPDTestUtil.println(this.listenerName + " error: " + throwable.toString()); + } + + @Override + public void onCompleted() { + HgPDTestUtil.println(this.listenerName + " is completed"); + } + + + } + + public static void log(String s) { + LocalDateTime now = LocalDateTime.now(); + DateTimeFormatter formatter = DateTimeFormatter.ofPattern("yyyy-MM-dd HH:mm:ss"); + + StringBuffer sb = new StringBuffer(); + sb.append(now.format(formatter)).append(" ").append(s); + + System.out.println(sb); + } +} \ No newline at end of file diff --git a/hg-pd-test/src/main/java/org/apache/hugegraph/pd/client/PDWatchTest.java b/hg-pd-test/src/main/java/org/apache/hugegraph/pd/client/PDWatchTest.java new file mode 100644 index 0000000000..2d8eed152e --- /dev/null +++ b/hg-pd-test/src/main/java/org/apache/hugegraph/pd/client/PDWatchTest.java @@ -0,0 +1,79 @@ +package org.apache.hugegraph.pd.client; + +import org.apache.hugegraph.pd.common.PDException; +import org.apache.hugegraph.pd.watch.WatchListener; +import org.apache.hugegraph.pd.watch.Watcher; + +import org.apache.hugegraph.pd.BaseTest; +import org.junit.BeforeClass; +import org.junit.Test; + +import java.io.Closeable; +import java.io.IOException; +import java.util.concurrent.CountDownLatch; +import java.util.concurrent.TimeUnit; + +/** + * @author lynn.bond@hotmail.com created on 2021/11/8 + */ +public class PDWatchTest extends BaseClientTest { + + @BeforeClass + public static void beforeClass() { + PDConfig pdConfig = PDConfig.of("localhost:8686,localhost:8687,localhost:8688") + .setAuthority(BaseTest.user, BaseTest.pwd); + pdConfig.setEnableCache(true); + pdClient = PDClient.create(pdConfig); + try { + pdClient.getLeader(); + } catch (PDException e) { + e.printStackTrace(); + } + } + + @Test + public void watch() throws IOException { + Watcher watch = pdClient.getWatchClient(); + CountDownLatch latch = new CountDownLatch(10000); + + Closeable watcher1 = watch.watchNode(new WatchListenerTest(latch, "node-watcher")); + // PDWatch.Watcher watcher2 = watch.watchPartition(new WatchListener(latch, "watcher2")); + // PDWatch.Watcher watcher3 = watch.watchPartition(new WatchListener(latch, "watcher3")); + + // PDWatch.Watcher nodeWatcher1 = watch.watchNode(new WatchListener(latch, "nodeWatcher1")); + + try { + latch.await(15000, TimeUnit.SECONDS); + } catch (InterruptedException e) { + e.printStackTrace(); + } + watcher1.close(); + // watcher2.close(); + // watcher3.close(); + } + + private class WatchListenerTest implements WatchListener { + + private final CountDownLatch latch; + private final String name; + + public WatchListenerTest(CountDownLatch latch, String s) { + this.latch = latch; + this.name = s; + } + + @Override + public void onNext(T paramT) { + + } + + @Override + public void onError(Throwable paramThrowable) { + latch.countDown(); + } + @Override + public void onCompleted() { + latch.countDown(); + } + } +} \ No newline at end of file diff --git a/hg-pd-test/src/main/java/org/apache/hugegraph/pd/common/BaseCommonTest.java b/hg-pd-test/src/main/java/org/apache/hugegraph/pd/common/BaseCommonTest.java new file mode 100644 index 0000000000..dce20b413c --- /dev/null +++ b/hg-pd-test/src/main/java/org/apache/hugegraph/pd/common/BaseCommonTest.java @@ -0,0 +1,17 @@ +package org.apache.hugegraph.pd.common; + +import org.junit.After; +import org.junit.BeforeClass; + +public class BaseCommonTest { + + @BeforeClass + public static void init() { + + } + + @After + public void teardown() { + // pass + } +} \ No newline at end of file diff --git a/hg-pd-test/src/main/java/org/apache/hugegraph/pd/common/CommonSuiteTest.java b/hg-pd-test/src/main/java/org/apache/hugegraph/pd/common/CommonSuiteTest.java new file mode 100644 index 0000000000..d3c4cfb5f5 --- /dev/null +++ b/hg-pd-test/src/main/java/org/apache/hugegraph/pd/common/CommonSuiteTest.java @@ -0,0 +1,25 @@ +package org.apache.hugegraph.pd.common; + +import org.apache.hugegraph.pd.service.IdServiceTest; +import org.apache.hugegraph.pd.service.KvServiceTest; +import lombok.extern.slf4j.Slf4j; +import org.junit.runner.RunWith; +import org.junit.runners.Suite; + + +@RunWith(Suite.class) +@Suite.SuiteClasses({ + PartitionUtilsTest.class, + PartitionCacheTest.class, + MetadataKeyHelperTest.class, + KvServiceTest.class, + HgAssertTest.class, + KVPairTest.class, + IdServiceTest.class +}) + +@Slf4j +public class CommonSuiteTest { + + +} \ No newline at end of file diff --git a/hg-pd-test/src/main/java/org/apache/hugegraph/pd/common/HgAssertTest.java b/hg-pd-test/src/main/java/org/apache/hugegraph/pd/common/HgAssertTest.java new file mode 100644 index 0000000000..c3994214f7 --- /dev/null +++ b/hg-pd-test/src/main/java/org/apache/hugegraph/pd/common/HgAssertTest.java @@ -0,0 +1,136 @@ +<<<<<<<< HEAD:hugegraph-pd/hg-pd-test/src/main/java/org/apache/hugegraph/pd/common/HgAssertTest.java +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +======== +package org.apache.hugegraph.pd.common; +>>>>>>>> d7e3d51dd (3.6.5 -> 4.x diff):hg-pd-test/src/main/java/org/apache/hugegraph/pd/common/HgAssertTest.java + +package org.apache.hugegraph.pd.common; + +import static org.junit.Assert.assertFalse; +import static org.junit.Assert.assertTrue; + +import java.util.ArrayList; +import java.util.HashMap; + +import org.junit.Test; + +public class HgAssertTest { + + @Test(expected = IllegalArgumentException.class) + public void testIsTrue() { + HgAssert.isTrue(false, ""); + } + + @Test(expected = IllegalArgumentException.class) + public void testIsTrue2() { + HgAssert.isTrue(true, null); + } + + @Test(expected = IllegalArgumentException.class) + public void testIsFalse() { + HgAssert.isFalse(true, ""); + } + + @Test(expected = IllegalArgumentException.class) + public void testIsFalse2() { + HgAssert.isTrue(false, null); + } + + @Test(expected = IllegalArgumentException.class) + public void isArgumentValid() { + HgAssert.isArgumentValid(new byte[0], ""); + } + + @Test(expected = IllegalArgumentException.class) + public void isArgumentValidStr() { + HgAssert.isArgumentValid("", ""); + } + + @Test(expected = IllegalArgumentException.class) + public void testIsArgumentNotNull() { + HgAssert.isArgumentNotNull(null, ""); + } + + @Test(expected = IllegalArgumentException.class) + public void testIstValid() { + HgAssert.istValid(new byte[0], ""); + } + + @Test(expected = IllegalArgumentException.class) + public void testIstValidStr() { + HgAssert.isValid("", ""); + } + + @Test(expected = IllegalArgumentException.class) + public void testIsNotNull() { + HgAssert.isNotNull(null, ""); + } + + @Test + public void testIsInvalid() { + assertFalse(HgAssert.isInvalid("abc", "test")); + assertTrue(HgAssert.isInvalid("", null)); + } + + @Test + public void testIsInvalidByte() { + assertTrue(HgAssert.isInvalid(new byte[0])); + assertFalse(HgAssert.isInvalid(new byte[1])); + } + + @Test + public void testIsInvalidMap() { + assertTrue(HgAssert.isInvalid(new HashMap())); + assertFalse(HgAssert.isInvalid(new HashMap() {{ + put(1, 1); + }})); + } + + @Test + public void testIsInvalidCollection() { + assertTrue(HgAssert.isInvalid(new ArrayList())); + assertFalse(HgAssert.isInvalid(new ArrayList() {{ + add(1); + }})); + } + + @Test + public void testIsContains() { + assertTrue(HgAssert.isContains(new Object[]{Integer.valueOf(1), Long.valueOf(2)}, + Long.valueOf(2))); + assertFalse(HgAssert.isContains(new Object[]{Integer.valueOf(1), Long.valueOf(2)}, + Long.valueOf(3))); + } + + @Test + public void testIsContainsT() { + assertTrue(HgAssert.isContains(new ArrayList<>() {{ + add(1); + }}, 1)); + assertFalse(HgAssert.isContains(new ArrayList<>() {{ + add(1); + }}, 2)); + } + + @Test + public void testIsNull() { + assertTrue(HgAssert.isNull(null)); + assertFalse(HgAssert.isNull("abc", "cdf")); + } + +} diff --git a/hg-pd-test/src/main/java/org/apache/hugegraph/pd/common/KVPairTest.java b/hg-pd-test/src/main/java/org/apache/hugegraph/pd/common/KVPairTest.java new file mode 100644 index 0000000000..054fe5cf15 --- /dev/null +++ b/hg-pd-test/src/main/java/org/apache/hugegraph/pd/common/KVPairTest.java @@ -0,0 +1,56 @@ +package org.apache.hugegraph.pd.common; + +import org.junit.Before; +import org.junit.Test; + +import java.util.Objects; + +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertTrue; + +public class KVPairTest { + + KVPair pair; + @Before + public void init(){ + pair = new KVPair<>("key", 1); + } + + @Test + public void testGetKey(){ + assertEquals(pair.getKey(), "key"); + } + + @Test + public void testSetKey(){ + pair.setKey("key2"); + assertEquals(pair.getKey(), "key2"); + } + + @Test + public void testGetValue(){ + assertTrue(Objects.equals(pair.getValue(), 1)); + } + + @Test + public void testSetValue(){ + pair.setValue(2); + assertTrue(Objects.equals(pair.getValue(), 2)); + } + + @Test + public void testToString(){ + + } + + @Test + public void testHashCode(){ + + } + + @Test + public void testEquals(){ + var pair2 = new KVPair<>("key", 1); + assertTrue(pair2.equals(pair)); + } +} diff --git a/hg-pd-test/src/main/java/org/apache/hugegraph/pd/common/MetadataKeyHelperTest.java b/hg-pd-test/src/main/java/org/apache/hugegraph/pd/common/MetadataKeyHelperTest.java new file mode 100644 index 0000000000..e8e90949f4 --- /dev/null +++ b/hg-pd-test/src/main/java/org/apache/hugegraph/pd/common/MetadataKeyHelperTest.java @@ -0,0 +1,233 @@ +<<<<<<<< HEAD:hugegraph-pd/hg-pd-test/src/main/java/org/apache/hugegraph/pd/core/meta/MetadataKeyHelperTest.java +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hugegraph.pd.core.meta; + +======== +package org.apache.hugegraph.pd.common; + +import org.apache.hugegraph.pd.grpc.Metapb; +import org.apache.hugegraph.pd.meta.MetadataKeyHelper; +>>>>>>>> d7e3d51dd (3.6.5 -> 4.x diff):hg-pd-test/src/main/java/org/apache/hugegraph/pd/common/MetadataKeyHelperTest.java +import static org.assertj.core.api.Assertions.assertThat; +import static org.junit.Assert.assertArrayEquals; + +import org.apache.hugegraph.pd.grpc.Metapb; +import org.apache.hugegraph.pd.meta.MetadataKeyHelper; +import org.junit.Test; + +public class MetadataKeyHelperTest { + + @Test + public void testMoveTaskKey() { + var key = MetadataKeyHelper.getMoveTaskKey("foo", 0, 1); + assertArrayEquals(key, "TASK_MOVE/foo/0/1".getBytes()); + var key2 = MetadataKeyHelper.getMoveTaskPrefix("foo"); + assertArrayEquals(key2, "TASK_MOVE/foo".getBytes()); + } + + @Test + public void testGetStoreInfoKey() { + assertThat(MetadataKeyHelper.getStoreInfoKey(0L)).contains( + MetadataKeyHelper.getDelimiter()); + } + + @Test + public void testGetActiveStoreKey() { + assertThat(MetadataKeyHelper.getActiveStoreKey(0L)).contains( + MetadataKeyHelper.getDelimiter()); + } + + @Test + public void testGetActiveStorePrefix() { + assertThat(MetadataKeyHelper.getActiveStorePrefix()).contains( + MetadataKeyHelper.getDelimiter()); + } + + @Test + public void testGetStorePrefix() { + assertThat(MetadataKeyHelper.getStorePrefix()).contains( + MetadataKeyHelper.getDelimiter()); + } + + @Test + public void testGetStoreStatusKey() { + assertThat(MetadataKeyHelper.getStoreStatusKey(0L)).contains( + MetadataKeyHelper.getDelimiter()); + } + + @Test + public void testGetShardGroupKey() { + assertThat(MetadataKeyHelper.getShardGroupKey(0L)).contains( + MetadataKeyHelper.getDelimiter()); + } + + @Test + public void testGetShardGroupPrefix() { + assertThat(MetadataKeyHelper.getShardGroupPrefix()).contains( + MetadataKeyHelper.getDelimiter()); + } + + @Test + public void testGetPartitionKey() { + assertThat(MetadataKeyHelper.getPartitionKey("graphName", 0)).contains( + MetadataKeyHelper.getDelimiter()); + } + + @Test + public void testGetPartitionPrefix() { + assertThat(MetadataKeyHelper.getPartitionPrefix("graphName")).contains( + MetadataKeyHelper.getDelimiter()); + } + + @Test + public void testGetShardKey() { + assertThat(MetadataKeyHelper.getShardKey(0L, 0)).contains( + MetadataKeyHelper.getDelimiter()); + } + + @Test + public void testGetShardPrefix() { + assertThat(MetadataKeyHelper.getShardPrefix(0L)).contains( + MetadataKeyHelper.getDelimiter()); + } + + @Test + public void testGetGraphKey() { + assertThat(MetadataKeyHelper.getGraphKey("graphName")).contains( + MetadataKeyHelper.getDelimiter()); + } + + @Test + public void testGetGraphPrefix() { + assertThat(MetadataKeyHelper.getGraphPrefix()).contains( + MetadataKeyHelper.getDelimiter()); + } + + @Test + public void testGetPartitionStatusKey() { + assertThat(MetadataKeyHelper.getPartitionStatusKey("graphName", + 0)).contains( + MetadataKeyHelper.getDelimiter()); + } + + @Test + public void testGetPartitionStatusPrefixKey() { + assertThat(MetadataKeyHelper.getPartitionStatusPrefixKey( + "graphName")).contains(MetadataKeyHelper.getDelimiter()); + } + + @Test + public void testGetGraphSpaceKey() { + assertThat(MetadataKeyHelper.getGraphSpaceKey("graphSpace")).contains( + MetadataKeyHelper.getDelimiter()); + } + + @Test + public void testGetPdConfigKey() { + assertThat(MetadataKeyHelper.getPdConfigKey("configKey")).contains( + MetadataKeyHelper.getDelimiter()); + } + + @Test + public void testGetQueueItemPrefix() { + assertThat(MetadataKeyHelper.getQueueItemPrefix()).contains( + MetadataKeyHelper.getDelimiter()); + } + + @Test + public void testGetQueueItemKey() { + assertThat(MetadataKeyHelper.getQueueItemKey("itemId")).contains( + MetadataKeyHelper.getDelimiter()); + } + + @Test + public void testGetSpitTaskKey() { + assertThat(MetadataKeyHelper.getSplitTaskKey("graphName", 0)).contains( + MetadataKeyHelper.getDelimiter()); + } + + @Test + public void testGetSpitTaskPrefix() { + assertThat(MetadataKeyHelper.getSplitTaskPrefix("graph0")).contains( + MetadataKeyHelper.getDelimiter()); + } + + @Test + public void testGetLogKey() { + // Setup + final Metapb.LogRecord record = Metapb.LogRecord.newBuilder() + .setAction("value") + .setTimestamp(0L) + .build(); + + // Run the test + final byte[] result = MetadataKeyHelper.getLogKey(record); + + // Verify the results + assertThat(result).contains(MetadataKeyHelper.getDelimiter()); + } + + @Test + public void testGetLogKeyPrefix() { + assertThat(MetadataKeyHelper.getLogKeyPrefix("action", 0L)).contains( + MetadataKeyHelper.getDelimiter()); + } + + @Test + public void testGetKVPrefix() { + assertThat(MetadataKeyHelper.getKVPrefix("prefix", "key")).contains( + MetadataKeyHelper.getDelimiter()); + } + + @Test + public void testGetKVTTLPrefix() { + assertThat(MetadataKeyHelper.getKVTTLPrefix("ttlPrefix", "prefix", + "key")).contains( + MetadataKeyHelper.getDelimiter()); + } + + @Test + public void testGetKVWatchKeyPrefix1() { + assertThat( + MetadataKeyHelper.getKVWatchKeyPrefix("key", "watchDelimiter", + 0L)).contains( + String.valueOf(MetadataKeyHelper.getDelimiter())); + } + + @Test + public void testGetKVWatchKeyPrefix2() { + assertThat(MetadataKeyHelper.getKVWatchKeyPrefix("key", + "watchDelimiter")).contains( + String.valueOf(MetadataKeyHelper.getDelimiter())); + } + + @Test + public void testGetDelimiter() { + assertThat(MetadataKeyHelper.getDelimiter()).isEqualTo('/'); + } + + @Test + public void testGetStringBuilderHelper() { + try { + MetadataKeyHelper.getStringBuilderHelper(); + } catch (Exception e) { + + } + } +} diff --git a/hg-pd-test/src/main/java/org/apache/hugegraph/pd/common/PartitionCacheTest.java b/hg-pd-test/src/main/java/org/apache/hugegraph/pd/common/PartitionCacheTest.java new file mode 100644 index 0000000000..e25d0e6c83 --- /dev/null +++ b/hg-pd-test/src/main/java/org/apache/hugegraph/pd/common/PartitionCacheTest.java @@ -0,0 +1,374 @@ +package org.apache.hugegraph.pd.common; + +import org.apache.hugegraph.pd.grpc.Metapb; +import org.junit.Before; +import org.junit.Test; + +import java.io.UnsupportedEncodingException; +import java.util.ArrayList; +import java.util.List; +import java.util.Objects; + +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertFalse; +import static org.junit.Assert.assertNotNull; +import static org.junit.Assert.assertNull; +import static org.junit.Assert.assertTrue; + +public class PartitionCacheTest { + + private PartitionCache cache ; + + @Before + public void setup(){ + cache = new PartitionCache(); + } + + @Test + public void testGetPartitionById(){ + var partition = createPartition(0, "graph0", 0, 65535); + cache.updateShardGroup(creteShardGroup(0)); + cache.updatePartition(partition); + var ret = cache.getPartitionById("graph0", 0); + assertNotNull(ret); + assertEquals(ret.getKey(), partition); + } + + @Test + public void testGetPartitionByKey() throws UnsupportedEncodingException { + var partition = createPartition(0, "graph0", 0, 65535); + cache.updateShardGroup(creteShardGroup(0)); + cache.updatePartition(partition); + var ret = cache.getPartitionByKey("graph0", "0".getBytes("utf-8")); + assertNotNull(ret); + assertEquals(ret.getKey(), partition); + } + + @Test + public void getPartitionByCode(){ + var partition = createPartition(0, "graph0", 0, 1024); + cache.updateShardGroup(creteShardGroup(0)); + cache.updatePartition(partition); + var ret = cache.getPartitionByCode("graph0", 10); + assertNotNull(ret); + assertEquals(ret.getKey(), partition); + assertNull(cache.getPartitionByCode("graph0", 2000)); + } + + @Test + public void testGetPartitions(){ + var partition1 = createPartition(0, "graph0", 0, 1024); + cache.updateShardGroup(creteShardGroup(0)); + cache.updatePartition(partition1); + assertEquals(cache.getPartitions("graph0").size(), 1); + var partition2 = createPartition(1, "graph0", 1024, 2048); + cache.updateShardGroup(creteShardGroup(1)); + cache.updatePartition(partition2); + assertEquals(cache.getPartitions("graph0").size(), 2); + System.out.print(cache.debugCacheByGraphName("graph0")); + } + + @Test + public void testAddPartition(){ + var partition = createPartition(0, "graph0", 0, 65535); + cache.addPartition("graph0", 0, partition); + var ret = cache.getPartitionById("graph0", 0); + assertNotNull(ret); + assertEquals(ret.getKey(), partition); + assertNotNull(cache.getPartitionByCode("graph0", 2000)); + System.out.print(cache.debugCacheByGraphName("graph0")); + var partition2 = createPartition(0, "graph0", 0, 1024); + cache.addPartition("graph0", 0, partition2); + ret = cache.getPartitionById("graph0", 0); + assertNotNull(ret); + assertEquals(ret.getKey(), partition2); + assertNull(cache.getPartitionByCode("graph0", 2000)); + System.out.print(cache.debugCacheByGraphName("graph0")); + } + + @Test + public void testUpdatePartition(){ + var partition = createPartition(0, "graph0", 0, 65535); + cache.updateShardGroup(creteShardGroup(0)); + cache.addPartition("graph0", 0, partition); + var partition2 = createPartition(0, "graph0", 0, 1024); + cache.updatePartition("graph0", 0, partition2); + var ret = cache.getPartitionById("graph0", 0); + assertNotNull(ret); + assertEquals(ret.getKey(), partition2); + assertNull(cache.getPartitionByCode("graph0", 2000)); + } + + @Test + public void testUpdatePartition2(){ + var partition = createPartition(0, "graph0", 0, 1024); + cache.updateShardGroup(creteShardGroup(0)); + assertTrue(cache.updatePartition(partition)); + assertFalse(cache.updatePartition(partition)); + var ret = cache.getPartitionById("graph0", 0); + assertNotNull(ret); + assertEquals(ret.getKey(), partition); + assertNull(cache.getPartitionByCode("graph0", 2000)); + } + + @Test + public void testRemovePartition(){ + var partition = createPartition(0, "graph0", 0, 1024); + cache.updateShardGroup(creteShardGroup(0)); + cache.updatePartition(partition); + assertNotNull(cache.getPartitionById("graph0", 0)); + cache.removePartition("graph0", 0); + assertNull(cache.getPartitionById("graph0", 0)); + System.out.print(cache.debugCacheByGraphName("graph0")); + } + + @Test + public void testRange(){ + var partition1 = createPartition(1, "graph0", 0, 3); + var partition2 = createPartition(2, "graph0", 3, 6); + cache.updatePartition(partition1); + cache.updatePartition(partition2); + + var partition3 = createPartition(3, "graph0", 1, 2); + var partition4 = createPartition(4, "graph0", 2, 3); + + cache.updatePartition(partition3); + cache.updatePartition(partition4); + System.out.println(cache.debugCacheByGraphName("graph0")); + var partition6 = createPartition(1, "graph0", 0, 1); + cache.updatePartition(partition6); + + + System.out.println(cache.debugCacheByGraphName("graph0")); + + var partition5 = createPartition(1, "graph0", 0, 3); + cache.updatePartition(partition5); + System.out.println(cache.debugCacheByGraphName("graph0")); + } + + @Test + public void testRange2(){ + var partition1 = createPartition(1, "graph0", 0, 3); + var partition2 = createPartition(2, "graph0", 3, 6); + cache.updatePartition(partition1); + cache.updatePartition(partition2); + + System.out.println(cache.debugCacheByGraphName("graph0")); + + // 中间有缺失 + var partition3 = createPartition(1, "graph0", 2, 3); + cache.updatePartition(partition3); + + System.out.println(cache.debugCacheByGraphName("graph0")); + + var partition5 = createPartition(1, "graph0", 0, 3); + cache.updatePartition(partition5); + System.out.println(cache.debugCacheByGraphName("graph0")); + } + + + @Test + public void testRemovePartitions(){ + var partition1 = createPartition(0, "graph0", 0, 1024); + var partition2 = createPartition(1, "graph0", 1024, 2048); + cache.updateShardGroup(creteShardGroup(0)); + cache.updatePartition(partition1); + cache.updateShardGroup(creteShardGroup(1)); + cache.updatePartition(partition2); + assertEquals(cache.getPartitions("graph0").size(), 2); + cache.removePartitions(); + assertEquals(cache.getPartitions("graph0").size(), 0); + } + + + + @Test + public void testRemoveAll(){ + var partition1 = createPartition(0, "graph0", 0, 1024); + var partition2 = createPartition(1, "graph0", 1024, 2048); + var partition3 = createPartition(0, "graph1", 0, 2048); + cache.updateShardGroup(creteShardGroup(0)); + cache.updateShardGroup(creteShardGroup(1)); + cache.updatePartition(partition1); + cache.updatePartition(partition2); + cache.updatePartition(partition3); + + assertEquals(cache.getPartitions("graph0").size(), 2); + assertEquals(cache.getPartitions("graph1").size(), 1); + cache.removeAll("graph0"); + assertEquals(cache.getPartitions("graph0").size(), 0); + assertEquals(cache.getPartitions("graph1").size(), 1); + } + + @Test + public void testUpdateShardGroup(){ + var shardGroup = createShardGroup(); + cache.updateShardGroup(shardGroup); + assertNotNull(cache.getShardGroup(shardGroup.getId())); + } + + @Test + public void testGetShardGroup(){ + var shardGroup = createShardGroup(); + cache.updateShardGroup(shardGroup); + assertTrue(Objects.equals(cache.getShardGroup(shardGroup.getId()), shardGroup)); + } + + @Test + public void testAddStore(){ + var store = createStore(1); + cache.addStore(1L, store); + assertEquals(cache.getStoreById(1L), store); + } + + @Test + public void testGetStoreById(){ + var store = createStore(1); + cache.addStore(1L, store); + assertEquals(cache.getStoreById(1L), store); + } + + @Test + public void testRemoveStore(){ + var store = createStore(1); + cache.addStore(1L, store); + assertEquals(cache.getStoreById(1L), store); + + cache.removeStore(1L); + assertNull(cache.getStoreById(1L)); + } + + @Test + public void testHasGraph(){ + var partition = createPartition(0, "graph0", 0, 65535); + cache.updateShardGroup(creteShardGroup(0)); + cache.updatePartition(partition); + assertTrue(cache.hasGraph("graph0")); + assertFalse(cache.hasGraph("graph1")); + } + + @Test + public void testUpdateGraph(){ + var graph = createGraph("graph0", 10); + cache.updateGraph(graph); + assertEquals(cache.getGraph("graph0"), graph); + graph = createGraph("graph0", 12); + cache.updateGraph(graph); + assertEquals(cache.getGraph("graph0"), graph); + } + + @Test + public void testGetGraph(){ + var graph = createGraph("graph0", 12); + cache.updateGraph(graph); + assertEquals(cache.getGraph("graph0"), graph); + } + + @Test + public void testGetGraphs(){ + var graph1 = createGraph("graph0", 12); + var graph2 = createGraph("graph1", 12); + var graph3 = createGraph("graph2", 12); + cache.updateGraph(graph1); + cache.updateGraph(graph2); + cache.updateGraph(graph3); + assertEquals(cache.getGraphs().size(), 3); + } + + @Test + public void testReset(){ + var graph1 = createGraph("graph0", 12); + var graph2 = createGraph("graph1", 12); + var graph3 = createGraph("graph2", 12); + cache.updateGraph(graph1); + cache.updateGraph(graph2); + cache.updateGraph(graph3); + assertEquals(cache.getGraphs().size(), 3); + cache.reset(); + assertEquals(cache.getGraphs().size(), 0); + } + + @Test + public void testUpdateShardGroupLeader(){ + var shardGroup = createShardGroup(); + cache.updateShardGroup(shardGroup); + + var leader = Metapb.Shard.newBuilder().setStoreId(2).setRole(Metapb.ShardRole.Leader).build(); + cache.updateShardGroupLeader(shardGroup.getId(), leader); + + assertEquals(cache.getLeaderShard(shardGroup.getId()), leader); + } + + private static Metapb.Partition createPartition(int pid, String graphName, long start, long end){ + return Metapb.Partition.newBuilder() + .setId(pid) + .setGraphName(graphName) + .setStartKey(start) + .setEndKey(end) + .setState(Metapb.PartitionState.PState_Normal) + .setVersion(1) + .build(); + } + + private static Metapb.ShardGroup creteShardGroup(int pid) { + return Metapb.ShardGroup.newBuilder() + .addShards( + Metapb.Shard.newBuilder().setStoreId(0).setRole(Metapb.ShardRole.Leader).build() + ) + .setId(pid) + .setVersion(0) + .setConfVer(0) + .setState(Metapb.PartitionState.PState_Normal) + .build(); + } + + private static Metapb.Shard createShard(){ + return Metapb.Shard.newBuilder() + .setStoreId(0) + .setRole(Metapb.ShardRole.Leader) + .build(); + } + + private static Metapb.Store createStore(long storeId){ + return Metapb.Store.newBuilder() + .setId(storeId) + .setAddress("127.0.0.1") + .setCores(4) + .setVersion("1") + .setDataPath("/tmp/junit") + .setDataVersion(1) + .setLastHeartbeat(System.currentTimeMillis()) + .setStartTimestamp(System.currentTimeMillis()) + .setState(Metapb.StoreState.Up) + .setDeployPath("/tmp/junit") + .build(); + } + + private static Metapb.Graph createGraph(String graphName, int partitionCount){ + return Metapb.Graph.newBuilder() + .setGraphName(graphName) + .setPartitionCount(partitionCount) + .setState(Metapb.PartitionState.PState_Normal) + .build(); + } + + private static Metapb.ShardGroup createShardGroup(){ + List shards = new ArrayList<>() ; + for (int i = 0 ; i < 3 ; i ++ ) { + shards.add(Metapb.Shard.newBuilder() + .setStoreId(i) + .setRole( i == 0 ? Metapb.ShardRole.Leader : Metapb.ShardRole.Follower) + .build() + ); + } + + return Metapb.ShardGroup.newBuilder() + .setId(1) + .setVersion(1) + .setConfVer(1) + .setState(Metapb.PartitionState.PState_Normal) + .addAllShards(shards) + .build(); + } + +} diff --git a/hg-pd-test/src/main/java/org/apache/hugegraph/pd/common/PartitionUtilsTest.java b/hg-pd-test/src/main/java/org/apache/hugegraph/pd/common/PartitionUtilsTest.java new file mode 100644 index 0000000000..cb60bc48ea --- /dev/null +++ b/hg-pd-test/src/main/java/org/apache/hugegraph/pd/common/PartitionUtilsTest.java @@ -0,0 +1,16 @@ +package org.apache.hugegraph.pd.common; + +import lombok.extern.slf4j.Slf4j; +import org.junit.Assert; +import org.junit.Test; + + +@Slf4j +public class PartitionUtilsTest extends BaseCommonTest { + @Test + public void testCalcHashcode() { + byte[] key = new byte[5]; + long code = PartitionUtils.calcHashcode(key); + Assert.assertEquals(code, 31912L); + } +} \ No newline at end of file diff --git a/hg-pd-test/src/main/java/org/apache/hugegraph/pd/core/BaseCoreTest.java b/hg-pd-test/src/main/java/org/apache/hugegraph/pd/core/BaseCoreTest.java new file mode 100644 index 0000000000..deb867d071 --- /dev/null +++ b/hg-pd-test/src/main/java/org/apache/hugegraph/pd/core/BaseCoreTest.java @@ -0,0 +1,61 @@ +package org.apache.hugegraph.pd.core; + +import java.io.File; +import java.io.IOException; + +import org.apache.commons.io.FileUtils; +import org.apache.hugegraph.pd.BaseTest; +import org.junit.After; +import org.junit.BeforeClass; + +import org.apache.hugegraph.pd.ConfigService; +import org.apache.hugegraph.pd.config.PDConfig; + + +public class BaseCoreTest extends BaseTest { + + static PDConfig pdConfig; + + static ConfigService configService; + + @BeforeClass + public static void init() throws Exception { + String path = "tmp/unitTest"; + deleteDirectory(new File(path)); + pdConfig = new PDConfig() {{ + this.setClusterId(100); + this.setInitialStoreList("127.0.0.1:8500,127.0.0.1:8501,127.0.0.1:8502," + + "127.0.0.1:8503,127.0.0.1:8504,127.0.0.1:8505"); + }}; + + pdConfig.setStore(new PDConfig().new Store() {{ + this.setMaxDownTime(3600); + this.setKeepAliveTimeout(3600); + }}); + + pdConfig.setPartition(new PDConfig().new Partition() {{ + this.setShardCount(3); + this.setMaxShardsPerStore(3); + }}); + pdConfig.setRaft(new PDConfig().new Raft() {{ + this.setEnable(false); + }}); + pdConfig.setDiscovery(new PDConfig().new Discovery()); + pdConfig.setDataPath(path); + configService = new ConfigService(pdConfig); + pdConfig = configService.loadConfig(); + } + + public static void deleteDirectory(File dir) { + try { + FileUtils.deleteDirectory(dir); + } catch (IOException e) { + System.out.println(String.format("Failed to start ....,%s", e.getMessage())); + } + } + + @After + public void teardown() { + // pass + } +} \ No newline at end of file diff --git a/hg-pd-test/src/main/java/org/apache/hugegraph/pd/core/PDCoreSuiteTest.java b/hg-pd-test/src/main/java/org/apache/hugegraph/pd/core/PDCoreSuiteTest.java new file mode 100644 index 0000000000..a6060b32e3 --- /dev/null +++ b/hg-pd-test/src/main/java/org/apache/hugegraph/pd/core/PDCoreSuiteTest.java @@ -0,0 +1,19 @@ +package org.apache.hugegraph.pd.core; + +import org.apache.hugegraph.pd.core.meta.MetadataKeyHelperTest; +import lombok.extern.slf4j.Slf4j; +import org.junit.runner.RunWith; +import org.junit.runners.Suite; + + +@RunWith(Suite.class) +@Suite.SuiteClasses({ + StoreNodeServiceTest.class, + MetadataKeyHelperTest.class +}) + +@Slf4j +public class PDCoreSuiteTest { + + +} \ No newline at end of file diff --git a/hg-pd-test/src/main/java/org/apache/hugegraph/pd/core/StoreNodeServiceTest.java b/hg-pd-test/src/main/java/org/apache/hugegraph/pd/core/StoreNodeServiceTest.java new file mode 100644 index 0000000000..1152b70ba0 --- /dev/null +++ b/hg-pd-test/src/main/java/org/apache/hugegraph/pd/core/StoreNodeServiceTest.java @@ -0,0 +1,104 @@ +package org.apache.hugegraph.pd.core; + +import java.util.ArrayList; +import java.util.List; + +import org.junit.Assert; +import org.junit.Test; + +import org.apache.hugegraph.pd.PartitionService; +import org.apache.hugegraph.pd.StoreNodeService; +import org.apache.hugegraph.pd.common.PDException; +import org.apache.hugegraph.pd.grpc.Metapb; + +import lombok.extern.slf4j.Slf4j; + +import static org.apache.hugegraph.pd.common.Consts.DEFAULT_STORE_GROUP_ID; + +@Slf4j +public class StoreNodeServiceTest extends BaseCoreTest { + + + @Test + public void testStoreNodeService() throws PDException { + Assert.assertEquals(configService.getPartitionCount(DEFAULT_STORE_GROUP_ID), + pdConfig.getInitialStoreMap().size() * + pdConfig.getPartition().getMaxShardsPerStore() + / pdConfig.getPartition().getShardCount()); + StoreNodeService storeService = new StoreNodeService(pdConfig); + storeService.init(new PartitionService(pdConfig, storeService, configService)); + int count = 6; + Metapb.Store[] stores = new Metapb.Store[count]; + for (int i = 0; i < count; i++) { + Metapb.Store store = Metapb.Store.newBuilder() + .setId(0) + .setAddress("127.0.0.1:850" + i) + .setDeployPath("/data") + .addLabels(Metapb.StoreLabel.newBuilder() + .setKey("namespace") + .setValue("default").build()) + .build(); + stores[i] = storeService.register(store); + System.out.println("新注册store, id = " + stores[i].getId()); + } + Assert.assertEquals(count, storeService.getStores("").size()); + + for (Metapb.Store store : stores) { + Metapb.StoreStats stats = Metapb.StoreStats.newBuilder() + .setStoreId(store.getId()) + .build(); + storeService.heartBeat(stats); + } + + Assert.assertEquals(6, storeService.getActiveStoresByStoreGroup(DEFAULT_STORE_GROUP_ID).size()); + + Metapb.Graph graph = Metapb.Graph.newBuilder() + .setGraphName("defaultGH") + .setPartitionCount(10) + .build(); + // 分配shard + List shards = storeService.allocShards(graph, 1); + + + Assert.assertEquals(3, shards.size()); + // 设置leader + Assert.assertEquals(configService.getPartitionCount(DEFAULT_STORE_GROUP_ID), + storeService.getShardGroups().size()); + Metapb.Shard leader = Metapb.Shard.newBuilder(shards.get(0)) + .setRole(Metapb.ShardRole.Leader).build(); + shards = new ArrayList<>(shards); + shards.set(0, leader); + // 增加shard + pdConfig.getPartition().setShardCount(5); + + Metapb.ShardGroup shardGroup = Metapb.ShardGroup.newBuilder() + .setId(1) + .addAllShards(shards).build(); + shards = storeService.reallocShards(shardGroup); + + Assert.assertEquals(5, shards.size()); + // 减少shard + pdConfig.getPartition().setShardCount(3); + shards = storeService.reallocShards(shardGroup); + Assert.assertEquals(3, shards.size()); + // 包含leader,leader不能被删除 + Assert.assertTrue(shards.contains(leader)); + + // 减少shard + pdConfig.getPartition().setShardCount(1); + graph = Metapb.Graph.newBuilder(graph).build(); + shards = storeService.reallocShards(shardGroup); + Assert.assertEquals(1, shards.size()); + // 包含leader,leader不能被删除 + Assert.assertTrue(shards.contains(leader)); + + for (Metapb.Store store : stores) { + storeService.removeStore(store.getId()); + } + Assert.assertEquals(0, storeService.getStores("").size()); + + + } + + +} \ No newline at end of file diff --git a/hg-pd-test/src/main/java/org/apache/hugegraph/pd/core/meta/MetadataKeyHelperTest.java b/hg-pd-test/src/main/java/org/apache/hugegraph/pd/core/meta/MetadataKeyHelperTest.java new file mode 100644 index 0000000000..d495c747db --- /dev/null +++ b/hg-pd-test/src/main/java/org/apache/hugegraph/pd/core/meta/MetadataKeyHelperTest.java @@ -0,0 +1,19 @@ +package org.apache.hugegraph.pd.core.meta; + +import org.apache.hugegraph.pd.meta.MetadataKeyHelper; +import org.junit.Test; + +import java.util.Arrays; + +import static org.junit.Assert.assertTrue; + +public class MetadataKeyHelperTest { + + @Test + public void testMoveTaskKey(){ + var key = MetadataKeyHelper.getMoveTaskKey("foo", 0, 1); + assertTrue(Arrays.equals(key, "TASK_MOVE/foo/0/1".getBytes())); + var key2 = MetadataKeyHelper.getMoveTaskPrefix("foo"); + assertTrue(Arrays.equals(key2, "TASK_MOVE/foo".getBytes())); + } +} diff --git a/hg-pd-test/src/main/java/org/apache/hugegraph/pd/grpc/BaseGrpcTest.java b/hg-pd-test/src/main/java/org/apache/hugegraph/pd/grpc/BaseGrpcTest.java new file mode 100644 index 0000000000..e2627b4c18 --- /dev/null +++ b/hg-pd-test/src/main/java/org/apache/hugegraph/pd/grpc/BaseGrpcTest.java @@ -0,0 +1,20 @@ +package org.apache.hugegraph.pd.grpc; + +import org.apache.hugegraph.pd.BaseTest; +import org.junit.After; +import org.junit.BeforeClass; + + +public class BaseGrpcTest extends BaseTest { + + @BeforeClass + public static void init() { + + } + + @After + public void teardown() { + // pass + } + +} \ No newline at end of file diff --git a/hg-pd-test/src/main/java/org/apache/hugegraph/pd/grpc/GrpcSuiteTest.java b/hg-pd-test/src/main/java/org/apache/hugegraph/pd/grpc/GrpcSuiteTest.java new file mode 100644 index 0000000000..acb4b12509 --- /dev/null +++ b/hg-pd-test/src/main/java/org/apache/hugegraph/pd/grpc/GrpcSuiteTest.java @@ -0,0 +1,15 @@ +package org.apache.hugegraph.pd.grpc; + +import lombok.extern.slf4j.Slf4j; +import org.junit.runner.RunWith; +import org.junit.runners.Suite; + + +@RunWith(Suite.class) +@Suite.SuiteClasses({ +}) + +@Slf4j +public class GrpcSuiteTest { + +} diff --git a/hg-pd-test/src/main/java/org/apache/hugegraph/pd/service/BaseServerTest.java b/hg-pd-test/src/main/java/org/apache/hugegraph/pd/service/BaseServerTest.java new file mode 100644 index 0000000000..c54fb22056 --- /dev/null +++ b/hg-pd-test/src/main/java/org/apache/hugegraph/pd/service/BaseServerTest.java @@ -0,0 +1,45 @@ +package org.apache.hugegraph.pd.service; + +import java.io.File; +import java.net.http.HttpClient; + +import org.apache.commons.io.FileUtils; +import org.junit.After; +import org.junit.BeforeClass; + +import org.apache.hugegraph.pd.BaseTest; +import org.apache.hugegraph.pd.config.PDConfig; + + +public class BaseServerTest extends BaseTest { + + public static HttpClient client; + + @BeforeClass + public static void init() { + client = HttpClient.newHttpClient(); + } + + public static PDConfig getConfig() { + FileUtils.deleteQuietly(new File("tmp/test/")); + PDConfig pdConfig = new PDConfig() {{ + this.setClusterId(100); + this.setPatrolInterval(1); + this.setRaft(new Raft() {{ + setEnable(false); + }}); + this.setPartition(new Partition() {{ + setShardCount(1); + setMaxShardsPerStore(12); + }}); + this.setDataPath("tmp/test/"); + }}; + return pdConfig; + } + + @After + public void teardown() { + // pass + } + +} \ No newline at end of file diff --git a/hg-pd-test/src/main/java/org/apache/hugegraph/pd/service/ConfigServiceTest.java b/hg-pd-test/src/main/java/org/apache/hugegraph/pd/service/ConfigServiceTest.java new file mode 100644 index 0000000000..63a8d32b78 --- /dev/null +++ b/hg-pd-test/src/main/java/org/apache/hugegraph/pd/service/ConfigServiceTest.java @@ -0,0 +1,144 @@ +<<<<<<<< HEAD:hugegraph-pd/hg-pd-test/src/main/java/org/apache/hugegraph/pd/core/ConfigServiceTest.java +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hugegraph.pd.core; + +import java.util.List; + +import org.apache.hugegraph.pd.ConfigService; +import org.apache.hugegraph.pd.IdService; +======== +package org.apache.hugegraph.pd.service; + +import org.apache.hugegraph.pd.ConfigService; +import org.apache.hugegraph.pd.IdService; +import org.apache.hugegraph.pd.common.PDException; +>>>>>>>> d7e3d51dd (3.6.5 -> 4.x diff):hg-pd-test/src/main/java/org/apache/hugegraph/pd/service/ConfigServiceTest.java +import org.apache.hugegraph.pd.config.PDConfig; +import org.apache.hugegraph.pd.grpc.Metapb; +import org.junit.Assert; +import org.junit.Before; +import org.junit.Test; + +<<<<<<<< HEAD:hugegraph-pd/hg-pd-test/src/main/java/org/apache/hugegraph/pd/core/ConfigServiceTest.java +public class ConfigServiceTest extends PDCoreTestBase { +======== +import java.util.List; + +import static org.junit.Assert.assertEquals; + +public class ConfigServiceTest { + + private PDConfig config = BaseServerTest.getConfig(); +>>>>>>>> d7e3d51dd (3.6.5 -> 4.x diff):hg-pd-test/src/main/java/org/apache/hugegraph/pd/service/ConfigServiceTest.java + + private ConfigService service; + + @Before + public void setUp() { + this.service = new ConfigService(getPdConfig()); + } + + @Test + public void testGetPDConfig() { + // Setup + try { + final Metapb.PDConfig config = Metapb.PDConfig.newBuilder() + .setVersion(0L) + .setShardCount(55) + .setMaxShardsPerStore(0) + .setTimestamp(0L).build(); + this.service.setPDConfig(config); + + // Run the test + Metapb.PDConfig result = this.service.getPDConfig(0L); + + // Verify the results + Assert.assertEquals(55, result.getShardCount()); + result = this.service.getPDConfig(); + Assert.assertEquals(55, result.getShardCount()); + } catch (Exception e) { + e.printStackTrace(); + } + } + + @Test + public void testGetGraphSpace() throws Exception { + // Setup + Metapb.GraphSpace space = Metapb.GraphSpace.newBuilder() + .setName("gs1") + .setTimestamp(0L).build(); + this.service.setGraphSpace(space); + + // Run the test + final List result = this.service.getGraphSpace("gs1"); + + // Verify the results + Assert.assertEquals(1, result.size()); + Assert.assertEquals(space.getName(), result.get(0).getName()); + } + + @Test + public void testUpdatePDConfig() { + try { + final Metapb.PDConfig mConfig = Metapb.PDConfig.newBuilder() + .setVersion(0L) + .setShardCount(0) + .setMaxShardsPerStore(0) + .setTimestamp(0L) + .build(); + final PDConfig expectedResult = new PDConfig(); + expectedResult.setConfigService(new ConfigService(new PDConfig())); + expectedResult.setIdService(new IdService(new PDConfig())); + expectedResult.setClusterId(0L); + expectedResult.setPatrolInterval(0L); + expectedResult.setDataPath("dataPath"); + expectedResult.setMinStoreCount(0); + expectedResult.setInitialStoreList("initialStoreList"); + expectedResult.setHost("host"); + expectedResult.setVerifyPath("verifyPath"); + expectedResult.setLicensePath("licensePath"); + this.service.updatePDConfig(mConfig); + } catch (Exception e) { + e.printStackTrace(); + } + } + + @Test + public void testStoreGroup() throws PDException { + config.setInitialStoreList("192.168.1.1:8500,192.168.1.1:8501,192.168.1.2:8500/1"); + service.loadConfig(); + + assertEquals(2, service.getAllStoreGroup().size()); + var group1 = service.getStoreGroup(0); + assertEquals(24, group1.getPartitionCount()); + + var group2 = service.getStoreGroup(1); + assertEquals(12, group2.getPartitionCount()); + + service.updateStoreGroup(0, "DEFAULT"); + service.setPartitionCount(0, 36); + + group1 = service.getStoreGroup(0); + assertEquals(36, group1.getPartitionCount()); + assertEquals("DEFAULT", group1.getName()); + + service.createStoreGroup(2, "group2", 12); + assertEquals(3, service.getAllStoreGroup().size()); + } +} diff --git a/hg-pd-test/src/main/java/org/apache/hugegraph/pd/service/IdServiceTest.java b/hg-pd-test/src/main/java/org/apache/hugegraph/pd/service/IdServiceTest.java new file mode 100644 index 0000000000..0e01dded54 --- /dev/null +++ b/hg-pd-test/src/main/java/org/apache/hugegraph/pd/service/IdServiceTest.java @@ -0,0 +1,91 @@ +package org.apache.hugegraph.pd.service; + +import org.apache.hugegraph.pd.IdService; +import org.apache.hugegraph.pd.config.PDConfig; +import org.apache.hugegraph.pd.meta.IdMetaStore; +import org.apache.commons.io.FileUtils; +import org.junit.Assert; +import org.junit.Test; + +import java.io.File; + +public class IdServiceTest { + @Test + public void testCid() { + try{ + PDConfig pdConfig = BaseServerTest.getConfig(); + int max = 0x2000; + IdService idService = new IdService(pdConfig); + for (int i = 0; i < max; i++) { + idService.getCId("test", max); + } + idService.delCId("test", 1); + idService.delCId("test", 0x10); + idService.delCId("test", 0x100); + idService.delCId("test", 0x1000); + + Assert.assertEquals(1, idService.getCId("test", max)); + Assert.assertEquals(0x10, idService.getCId("test", max)); + Assert.assertEquals(0x100, idService.getCId("test", max)); + Assert.assertEquals(0x1000, idService.getCId("test", max)); + Assert.assertEquals(-1, idService.getCId("test", max)); + + idService.delCId("test", 1); + idService.delCId("test", 0x10); + idService.delCId("test", 0x100); + idService.delCId("test", 0x1000); + + long cid1 = idService.getCId("test", "name", max); + idService.delCIdDelay("test", "name", cid1); + long cid2 = idService.getCId("test", "name", max); + + Assert.assertEquals(cid1, cid2); + idService.delCIdDelay("test", "name", cid2); + Thread.sleep(5000); + long cid3 = idService.getCId("test", "name", max); + } catch (Exception e) { + + } + // MetadataFactory.closeStore(); + } + + @Test + public void testId() { + try{ + FileUtils.deleteQuietly(new File("tmp/testId/")); + IdMetaStore.CID_DEL_TIMEOUT = 2000; + PDConfig pdConfig = new PDConfig() {{ + this.setClusterId(100); + this.setPatrolInterval(1); + this.setRaft(new Raft() {{ + setEnable(false); + }}); + this.setDataPath("tmp/testId/"); + }}; + IdService idService = new IdService(pdConfig); + long first = idService.getId("abc", 100); + Assert.assertEquals(first, 0L); + long second = idService.getId("abc", 100); + Assert.assertEquals(second, 100L); + idService.resetId("abc"); + first = idService.getId("abc", 100); + Assert.assertEquals(first, 0L); + } catch (Exception e) { + + } + // MetadataFactory.closeStore(); + } + @Test + public void testMember() { + try{ + PDConfig pdConfig = BaseServerTest.getConfig(); + IdService idService = new IdService(pdConfig); + idService.setPdConfig(pdConfig); + PDConfig config = idService.getPdConfig(); + config.getHost(); + } catch (Exception e) { + e.printStackTrace(); + } + // MetadataFactory.closeStore(); + } +} diff --git a/hg-pd-test/src/main/java/org/apache/hugegraph/pd/service/KvServiceTest.java b/hg-pd-test/src/main/java/org/apache/hugegraph/pd/service/KvServiceTest.java new file mode 100644 index 0000000000..06bfff7ee7 --- /dev/null +++ b/hg-pd-test/src/main/java/org/apache/hugegraph/pd/service/KvServiceTest.java @@ -0,0 +1,43 @@ +package org.apache.hugegraph.pd.service; + +import org.apache.hugegraph.pd.KvService; +import org.apache.hugegraph.pd.config.PDConfig; +import org.junit.Assert; +import org.junit.Test; + +public class KvServiceTest { + + @Test + public void testKv() { + try { + PDConfig pdConfig = BaseServerTest.getConfig(); + KvService service = new KvService(pdConfig); + String key = "kvTest"; + String kvTest = service.get(key); + Assert.assertEquals(kvTest, ""); + service.put(key, "kvTestValue"); + kvTest = service.get(key); + Assert.assertEquals(kvTest, "kvTestValue"); + service.scanWithPrefix(key); + service.delete(key); + service.put(key, "kvTestValue"); + service.deleteWithPrefix(key); + service.put(key, "kvTestValue", 1000L); + service.keepAlive(key); + } catch (Exception e) { + + } + } + + @Test + public void testMember() { + try { + PDConfig pdConfig = BaseServerTest.getConfig(); + KvService service = new KvService(pdConfig); + service.setPdConfig(pdConfig); + PDConfig config = service.getPdConfig(); + } catch (Exception e) { + e.printStackTrace(); + } + } +} diff --git a/hg-pd-test/src/main/java/org/apache/hugegraph/pd/service/LogServiceTest.java b/hg-pd-test/src/main/java/org/apache/hugegraph/pd/service/LogServiceTest.java new file mode 100644 index 0000000000..d6befc7f9c --- /dev/null +++ b/hg-pd-test/src/main/java/org/apache/hugegraph/pd/service/LogServiceTest.java @@ -0,0 +1,36 @@ +package org.apache.hugegraph.pd.service; + +import org.apache.hugegraph.pd.LogService; +import org.apache.hugegraph.pd.config.PDConfig; +import org.apache.hugegraph.pd.grpc.Metapb; +import com.google.protobuf.Any; +import org.junit.Assert; +import org.junit.Before; +import org.junit.Test; + +import java.util.List; + +public class LogServiceTest { + + private PDConfig mockPdConfig = BaseServerTest.getConfig(); + + private LogService logServiceUnderTest; + + @Before + public void setUp() { + logServiceUnderTest = new LogService(mockPdConfig); + } + + @Test + public void testGetLog() throws Exception { + logServiceUnderTest.insertLog("action", "message", + Any.newBuilder().build()); + + // Run the test + final List result = logServiceUnderTest.getLog( + "action", 0L, System.currentTimeMillis()); + + // Verify the results + Assert.assertEquals(result.size(), 1); + } +} diff --git a/hg-pd-test/src/main/java/org/apache/hugegraph/pd/service/PartitionServiceTest.java b/hg-pd-test/src/main/java/org/apache/hugegraph/pd/service/PartitionServiceTest.java new file mode 100644 index 0000000000..20a1e65457 --- /dev/null +++ b/hg-pd-test/src/main/java/org/apache/hugegraph/pd/service/PartitionServiceTest.java @@ -0,0 +1,120 @@ +package org.apache.hugegraph.pd.service; + +import org.apache.hugegraph.pd.PartitionService; +import org.apache.hugegraph.pd.common.PDException; +import org.apache.hugegraph.pd.grpc.MetaTask; +import org.apache.hugegraph.pd.grpc.Metapb; +import org.apache.hugegraph.pd.grpc.pulse.CleanPartition; +import org.apache.hugegraph.pd.grpc.pulse.CleanType; +import org.junit.Before; +import org.junit.Test; + +import java.util.List; + +import static org.apache.hugegraph.pd.common.Consts.DEFAULT_STORE_GROUP_ID; +import static org.junit.Assert.assertEquals; + +public class PartitionServiceTest extends PdTestBase { + + private PartitionService service; + + @Before + public void init(){ + service = getPartitionService(); + } + + @Test + public void testCombinePartition() throws PDException { + buildEnv(); + // 0, 1, 2-> 0, 3,4,5->1, 6,7,8 ->2, 9,10, 11-> 3 + service.combinePartition(DEFAULT_STORE_GROUP_ID, 4); + + var partition = service.getPartitionById("graph0", 0); + assertEquals(0, partition.getStartKey()); + assertEquals(5462, partition.getEndKey()); + + var tasks = getStoreNodeService().getTaskInfoMeta().scanMoveTask("graph0"); + assertEquals(11, tasks.size()); + + for (MetaTask.Task task : tasks){ + var newTask = task.toBuilder().setState(MetaTask.TaskState.Task_Success).build(); + getTaskService().reportTask(newTask); + } + + tasks = getStoreNodeService().getTaskInfoMeta().scanMoveTask("graph0"); + assertEquals(0, tasks.size()); + } + + @Test + public void testCombinePartition2() throws PDException { + buildEnv(); + // 0, 1, 2-> 0, 3,4,5->1, 6,7,8 ->2, 9,10, 11-> 3 + service.combinePartition(DEFAULT_STORE_GROUP_ID, 4); + + var partition = service.getPartitionById("graph0", 0); + assertEquals(0, partition.getStartKey()); + assertEquals(5462, partition.getEndKey()); + + var tasks = getStoreNodeService().getTaskInfoMeta().scanMoveTask("graph0"); + assertEquals(11, tasks.size()); + + for (MetaTask.Task task : tasks){ + var newTask = task.toBuilder().setState(MetaTask.TaskState.Task_Failure).build(); + getTaskService().reportTask(newTask); + } + + tasks = getStoreNodeService().getTaskInfoMeta().scanMoveTask("graph0"); + assertEquals(0, tasks.size()); + } + + @Test + public void testHandleCleanTask(){ + MetaTask.Task task = MetaTask.Task.newBuilder() + .setType(MetaTask.TaskType.Clean_Partition) + .setPartition(Metapb.Partition.newBuilder().setGraphName("foo").setId(0).build()) + .setCleanPartition(CleanPartition.newBuilder() + .setCleanType(CleanType.CLEAN_TYPE_KEEP_RANGE) + .setDeletePartition(true) + .setKeyStart(0) + .setKeyEnd(10) + .build()) + .build(); + getTaskService().reportTask(task); + } + + private void buildEnv() throws PDException { + var graph = service.getGraph("graph0"); + if (graph == null) { + service.createGraph("graph0", 0, 0); + } + + var storeInfoMeta = getStoreNodeService().getStoreInfoMeta(); + storeInfoMeta.updateStore(Metapb.Store.newBuilder() + .setId(99) + .setState(Metapb.StoreState.Up) + .build()); + + getStoreNodeService().updateStoreGroupRelation(99, 0); + + long lastId = 0; + for (int i = 0; i < 12; i++){ + Metapb.Shard shard = Metapb.Shard.newBuilder() + .setStoreId(99) + .setRole(Metapb.ShardRole.Leader) + .build(); + + Metapb.ShardGroup shardGroup = Metapb.ShardGroup.newBuilder() + .setId(i) + .setState(Metapb.PartitionState.PState_Normal) + .addAllShards(List.of(shard)) + .build(); + storeInfoMeta.updateShardGroup(shardGroup); + + var partitionShard = service.getPartitionByCode("graph0", lastId); + if (partitionShard != null){ + lastId = partitionShard.getPartition().getEndKey(); + } + } + + } +} diff --git a/hg-pd-test/src/main/java/org/apache/hugegraph/pd/service/PdTestBase.java b/hg-pd-test/src/main/java/org/apache/hugegraph/pd/service/PdTestBase.java new file mode 100644 index 0000000000..a5cd15cbb4 --- /dev/null +++ b/hg-pd-test/src/main/java/org/apache/hugegraph/pd/service/PdTestBase.java @@ -0,0 +1,268 @@ +<<<<<<<< HEAD:hugegraph-pd/hg-pd-test/src/main/java/org/apache/hugegraph/pd/core/PDCoreTestBase.java +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hugegraph.pd.core; + +import java.io.File; + +import org.apache.hugegraph.pd.ConfigService; +import org.apache.hugegraph.pd.IdService; +import org.apache.hugegraph.pd.PartitionInstructionListener; +import org.apache.hugegraph.pd.PartitionService; +import org.apache.hugegraph.pd.PartitionStatusListener; +import org.apache.hugegraph.pd.StoreMonitorDataService; +import org.apache.hugegraph.pd.StoreNodeService; +import org.apache.hugegraph.pd.StoreStatusListener; +import org.apache.hugegraph.pd.TaskScheduleService; +import org.apache.hugegraph.pd.common.PDException; +import org.apache.hugegraph.pd.config.PDConfig; +import org.apache.hugegraph.pd.grpc.Metapb; +import org.apache.hugegraph.pd.grpc.pulse.ChangeShard; +import org.apache.hugegraph.pd.grpc.pulse.CleanPartition; +import org.apache.hugegraph.pd.grpc.pulse.DbCompaction; +import org.apache.hugegraph.pd.grpc.pulse.MovePartition; +import org.apache.hugegraph.pd.grpc.pulse.PartitionKeyRange; +import org.apache.hugegraph.pd.grpc.pulse.SplitPartition; +import org.apache.hugegraph.pd.grpc.pulse.TransferLeader; +import org.apache.hugegraph.pd.raft.RaftEngine; +import org.junit.AfterClass; +import org.junit.BeforeClass; + +public class PDCoreTestBase { + + private static final String DATA_PATH = "/tmp/pd_data"; + private static PDConfig pdConfig; +======== +package org.apache.hugegraph.pd.service; + +import org.apache.hugegraph.pd.ConfigService; +import org.apache.hugegraph.pd.IdService; +import org.apache.hugegraph.pd.listener.PartitionInstructionListener; +import org.apache.hugegraph.pd.PartitionService; +import org.apache.hugegraph.pd.listener.PartitionStatusListener; +import org.apache.hugegraph.pd.StoreMonitorDataService; +import org.apache.hugegraph.pd.StoreNodeService; +import org.apache.hugegraph.pd.listener.StoreStatusListener; +import org.apache.hugegraph.pd.TaskScheduleService; +import org.apache.hugegraph.pd.common.PDException; +import org.apache.hugegraph.pd.config.PDConfig; +import org.apache.hugegraph.pd.grpc.Metapb; +import org.apache.hugegraph.pd.grpc.pulse.ChangeShard; +import org.apache.hugegraph.pd.grpc.pulse.CleanPartition; +import org.apache.hugegraph.pd.grpc.pulse.DbCompaction; +import org.apache.hugegraph.pd.grpc.pulse.MovePartition; +import org.apache.hugegraph.pd.grpc.pulse.PartitionKeyRange; +import org.apache.hugegraph.pd.grpc.pulse.SplitPartition; +import org.apache.hugegraph.pd.grpc.pulse.TransferLeader; +import org.apache.hugegraph.pd.raft.RaftEngine; +import lombok.Getter; +import org.junit.AfterClass; +import org.junit.BeforeClass; + +import java.io.File; + +public class PdTestBase { + @Getter + private static PDConfig pdConfig; + + @Getter +>>>>>>>> d7e3d51dd (3.6.5 -> 4.x diff):hg-pd-test/src/main/java/org/apache/hugegraph/pd/service/PdTestBase.java + private static StoreNodeService storeNodeService; + @Getter + private static PartitionService partitionService; + @Getter + private static TaskScheduleService taskService; + @Getter + private static StoreMonitorDataService storeMonitorDataService; + +<<<<<<<< HEAD:hugegraph-pd/hg-pd-test/src/main/java/org/apache/hugegraph/pd/core/PDCoreTestBase.java +======== + @Getter + private static ConfigService configService; + + private static final String DATA_PATH = "/tmp/pd_data"; + +>>>>>>>> d7e3d51dd (3.6.5 -> 4.x diff):hg-pd-test/src/main/java/org/apache/hugegraph/pd/service/PdTestBase.java + @BeforeClass + public static void initService() throws PDException { + deleteDir(new File(DATA_PATH)); + + PDConfig config = new PDConfig(); + config.setDataPath(DATA_PATH); + config.setMinStoreCount(3); + config.setInitialStoreList("127.0.0.1:8501"); + config.setHost("127.0.0.1"); + config.setVerifyPath(""); + config.setLicensePath(""); + + PDConfig.Raft raft = new PDConfig().new Raft(); + raft.setAddress("127.0.0.1:8601"); + raft.setPeersList("127.0.0.1:8601"); + raft.setDataPath(DATA_PATH); + raft.setHost("127.0.0.1"); + raft.setGrpcPort(8688); + raft.setPort(8621); + + config.setRaft(raft); + config.setStore(new PDConfig().new Store()); + config.setPartition(new PDConfig().new Partition() {{ + setShardCount(1); + setMaxShardsPerStore(12); + }}); + config.setDiscovery(new PDConfig().new Discovery()); + + pdConfig = config; + + configService = new ConfigService(pdConfig); + configService.loadConfig(); + + var engine = RaftEngine.getInstance(); + engine.addStateListener(configService); + engine.init(pdConfig.getRaft()); + engine.waitingForLeader(5000); + + storeNodeService = new StoreNodeService(pdConfig); + partitionService = new PartitionService(pdConfig, storeNodeService, configService); + taskService = new TaskScheduleService(pdConfig, storeNodeService, partitionService, configService); + var idService = new IdService(pdConfig); + storeMonitorDataService = new StoreMonitorDataService(pdConfig); + RaftEngine.getInstance().addStateListener(partitionService); + pdConfig.setIdService(idService); + + storeNodeService.init(partitionService); + partitionService.init(); + partitionService.addInstructionListener(new PartitionInstructionListener() { + @Override + public void changeShard(Metapb.Partition partition, ChangeShard changeShard) throws + PDException { + + } + + @Override + public void transferLeader(Metapb.Partition partition, + TransferLeader transferLeader) throws PDException { + + } + + @Override + public void splitPartition(Metapb.Partition partition, + SplitPartition splitPartition) throws PDException { + + } + + @Override + public void dbCompaction(Metapb.Partition partition, DbCompaction dbCompaction) throws + PDException { + + } + + @Override + public void movePartition(Metapb.Partition partition, + MovePartition movePartition) throws PDException { + + } + + @Override + public void cleanPartition(Metapb.Partition partition, + CleanPartition cleanPartition) throws PDException { + + } + + @Override + public void changePartitionKeyRange(Metapb.Partition partition, + PartitionKeyRange partitionKeyRange) + throws PDException { + + } + }); + + partitionService.addStatusListener(new PartitionStatusListener() { + @Override + public void onPartitionChanged(Metapb.Partition partition, + Metapb.Partition newPartition) { + + } + + @Override + public void onPartitionRemoved(Metapb.Partition partition) { + + } + }); + + storeNodeService.addStatusListener(new StoreStatusListener() { + @Override + public void onStoreStatusChanged(Metapb.Store store, Metapb.StoreState old, + Metapb.StoreState status) { + + } + + @Override + public void onGraphChange(Metapb.Graph graph, Metapb.GraphState stateOld, + Metapb.GraphState stateNew) { + + } + + @Override + public void onStoreRaftChanged(Metapb.Store store) { + + } + }); + + taskService.init(); + } + + @AfterClass + public static void shutdownService() { + var instance = RaftEngine.getInstance(); + if (instance != null) { + instance.shutDown(); + } + } + + private static boolean deleteDir(File dir) { + if (dir.isDirectory()) { + for (File file : dir.listFiles()) { + deleteDir(file); + } + } + return dir.delete(); + } + +<<<<<<<< HEAD:hugegraph-pd/hg-pd-test/src/main/java/org/apache/hugegraph/pd/core/PDCoreTestBase.java + public static StoreNodeService getStoreNodeService() { + return storeNodeService; + } + + public static PartitionService getPartitionService() { + return partitionService; + } + + public static PDConfig getPdConfig() { + return pdConfig; + } + + public static TaskScheduleService getTaskService() { + return taskService; + } + + public static StoreMonitorDataService getStoreMonitorDataService() { + return storeMonitorDataService; + } +======== +>>>>>>>> d7e3d51dd (3.6.5 -> 4.x diff):hg-pd-test/src/main/java/org/apache/hugegraph/pd/service/PdTestBase.java +} diff --git a/hg-pd-test/src/main/java/org/apache/hugegraph/pd/service/RestApiTest.java b/hg-pd-test/src/main/java/org/apache/hugegraph/pd/service/RestApiTest.java new file mode 100644 index 0000000000..06ad865eb9 --- /dev/null +++ b/hg-pd-test/src/main/java/org/apache/hugegraph/pd/service/RestApiTest.java @@ -0,0 +1,167 @@ +<<<<<<<< HEAD:hugegraph-pd/hg-pd-test/src/main/java/org/apache/hugegraph/pd/rest/RestApiTest.java +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hugegraph.pd.rest; +======== +package org.apache.hugegraph.pd.service; +>>>>>>>> d7e3d51dd (3.6.5 -> 4.x diff):hg-pd-test/src/main/java/org/apache/hugegraph/pd/service/RestApiTest.java + +import java.io.IOException; +import java.net.URI; +import java.net.URISyntaxException; +import java.net.http.HttpRequest; +import java.net.http.HttpResponse; + +import org.json.JSONException; +import org.json.JSONObject; +import org.junit.Test; + +<<<<<<<< HEAD:hugegraph-pd/hg-pd-test/src/main/java/org/apache/hugegraph/pd/rest/RestApiTest.java +======== +/** + * @author tianxiaohui + * @date 20221220 + **/ +>>>>>>>> d7e3d51dd (3.6.5 -> 4.x diff):hg-pd-test/src/main/java/org/apache/hugegraph/pd/service/RestApiTest.java +public class RestApiTest extends BaseServerTest { + + @Test + public void testQueryClusterInfo() throws URISyntaxException, IOException, InterruptedException, + JSONException { + String url = pdRestAddr + "/v1/cluster"; + HttpRequest request = HttpRequest.newBuilder() +<<<<<<<< HEAD:hugegraph-pd/hg-pd-test/src/main/java/org/apache/hugegraph/pd/rest/RestApiTest.java + .uri(new URI(url)) +======== + .uri(new URI(url)).header(key, value) +>>>>>>>> d7e3d51dd (3.6.5 -> 4.x diff):hg-pd-test/src/main/java/org/apache/hugegraph/pd/service/RestApiTest.java + .GET() + .build(); + HttpResponse response = client.send(request, HttpResponse.BodyHandlers.ofString()); + JSONObject obj = new JSONObject(response.body()); + assert obj.getInt("status") == 0; + } + + @Test +<<<<<<<< HEAD:hugegraph-pd/hg-pd-test/src/main/java/org/apache/hugegraph/pd/rest/RestApiTest.java + public void testQueryClusterMembers() throws URISyntaxException, IOException, + InterruptedException, JSONException { + String url = pdRestAddr + "/v1/members"; + HttpRequest request = HttpRequest.newBuilder() + .uri(new URI(url)) +======== + public void testQueryClusterMembers() throws URISyntaxException, IOException, InterruptedException, + JSONException { + String url = pdRestAddr + "/v1/members"; + HttpRequest request = HttpRequest.newBuilder() + .uri(new URI(url)).header(key, value) +>>>>>>>> d7e3d51dd (3.6.5 -> 4.x diff):hg-pd-test/src/main/java/org/apache/hugegraph/pd/service/RestApiTest.java + .GET() + .build(); + HttpResponse response = client.send(request, HttpResponse.BodyHandlers.ofString()); + JSONObject obj = new JSONObject(response.body()); + assert obj.getInt("status") == 0; + } + + @Test + public void testQueryStoresInfo() throws URISyntaxException, IOException, InterruptedException, + JSONException { + String url = pdRestAddr + "/v1/stores"; + HttpRequest request = HttpRequest.newBuilder() +<<<<<<<< HEAD:hugegraph-pd/hg-pd-test/src/main/java/org/apache/hugegraph/pd/rest/RestApiTest.java + .uri(new URI(url)) +======== + .uri(new URI(url)).header(key, value) +>>>>>>>> d7e3d51dd (3.6.5 -> 4.x diff):hg-pd-test/src/main/java/org/apache/hugegraph/pd/service/RestApiTest.java + .GET() + .build(); + HttpResponse response = client.send(request, HttpResponse.BodyHandlers.ofString()); + JSONObject obj = new JSONObject(response.body()); + assert obj.getInt("status") == 0; + } + + @Test + public void testQueryGraphsInfo() throws IOException, InterruptedException, JSONException, + URISyntaxException { + String url = pdRestAddr + "/v1/graphs"; + HttpRequest request = HttpRequest.newBuilder() +<<<<<<<< HEAD:hugegraph-pd/hg-pd-test/src/main/java/org/apache/hugegraph/pd/rest/RestApiTest.java + .uri(new URI(url)) +======== + .uri(new URI(url)).header(key, value) +>>>>>>>> d7e3d51dd (3.6.5 -> 4.x diff):hg-pd-test/src/main/java/org/apache/hugegraph/pd/service/RestApiTest.java + .GET() + .build(); + HttpResponse response = client.send(request, HttpResponse.BodyHandlers.ofString()); + JSONObject obj = new JSONObject(response.body()); + assert obj.getInt("status") == 0; + } + + @Test + public void testQueryPartitionsInfo() throws IOException, InterruptedException, JSONException, + URISyntaxException { + String url = pdRestAddr + "/v1/highLevelPartitions"; + HttpRequest request = HttpRequest.newBuilder() +<<<<<<<< HEAD:hugegraph-pd/hg-pd-test/src/main/java/org/apache/hugegraph/pd/rest/RestApiTest.java + .uri(new URI(url)) +======== + .uri(new URI(url)).header(key, value) +>>>>>>>> d7e3d51dd (3.6.5 -> 4.x diff):hg-pd-test/src/main/java/org/apache/hugegraph/pd/service/RestApiTest.java + .GET() + .build(); + HttpResponse response = client.send(request, HttpResponse.BodyHandlers.ofString()); + JSONObject obj = new JSONObject(response.body()); + assert obj.getInt("status") == 0; + } + + @Test + public void testQueryDebugPartitionsInfo() throws URISyntaxException, IOException, + InterruptedException { + String url = pdRestAddr + "/v1/partitions"; + HttpRequest request = HttpRequest.newBuilder() +<<<<<<<< HEAD:hugegraph-pd/hg-pd-test/src/main/java/org/apache/hugegraph/pd/rest/RestApiTest.java + .uri(new URI(url)) +======== + .uri(new URI(url)).header(key, value) +>>>>>>>> d7e3d51dd (3.6.5 -> 4.x diff):hg-pd-test/src/main/java/org/apache/hugegraph/pd/service/RestApiTest.java + .GET() + .build(); + HttpResponse response = client.send(request, HttpResponse.BodyHandlers.ofString()); + assert response.statusCode() == 200; + } + + @Test + public void testQueryShards() throws URISyntaxException, IOException, InterruptedException, + JSONException { + String url = pdRestAddr + "/v1/shards"; + HttpRequest request = HttpRequest.newBuilder() + .uri(new URI(url)) +<<<<<<<< HEAD:hugegraph-pd/hg-pd-test/src/main/java/org/apache/hugegraph/pd/rest/RestApiTest.java + .GET() + .build(); +======== + .header(key, value) + .GET() + .build(); + +>>>>>>>> d7e3d51dd (3.6.5 -> 4.x diff):hg-pd-test/src/main/java/org/apache/hugegraph/pd/service/RestApiTest.java + HttpResponse response = client.send(request, HttpResponse.BodyHandlers.ofString()); + JSONObject obj = new JSONObject(response.body()); + assert obj.getInt("status") == 0; + } +} diff --git a/hg-pd-test/src/main/java/org/apache/hugegraph/pd/service/ServerSuiteTest.java b/hg-pd-test/src/main/java/org/apache/hugegraph/pd/service/ServerSuiteTest.java new file mode 100644 index 0000000000..a38de53637 --- /dev/null +++ b/hg-pd-test/src/main/java/org/apache/hugegraph/pd/service/ServerSuiteTest.java @@ -0,0 +1,24 @@ +package org.apache.hugegraph.pd.service; + +import lombok.extern.slf4j.Slf4j; +import org.junit.runner.RunWith; +import org.junit.runners.Suite; + + +@RunWith(Suite.class) +@Suite.SuiteClasses({ + RestApiTest.class, + ConfigServiceTest.class, + IdServiceTest.class, + KvServiceTest.class, + LogServiceTest.class, + StoreServiceTest.class, + StoreNodeServiceNewTest.class, + StoreMonitorDataServiceTest.class, + TaskScheduleServiceTest.class, + PartitionServiceTest.class +}) + +@Slf4j +public class ServerSuiteTest { +} diff --git a/hg-pd-test/src/main/java/org/apache/hugegraph/pd/service/StoreMonitorDataServiceTest.java b/hg-pd-test/src/main/java/org/apache/hugegraph/pd/service/StoreMonitorDataServiceTest.java new file mode 100644 index 0000000000..a24d0ee0de --- /dev/null +++ b/hg-pd-test/src/main/java/org/apache/hugegraph/pd/service/StoreMonitorDataServiceTest.java @@ -0,0 +1,63 @@ +package org.apache.hugegraph.pd.service; + +import org.apache.hugegraph.pd.StoreMonitorDataService; +import org.apache.hugegraph.pd.common.PDException; +import org.apache.hugegraph.pd.grpc.Metapb; +import org.junit.Before; +import org.junit.Test; + +import java.util.List; + +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertNotNull; +import static org.junit.Assert.assertTrue; + +public class StoreMonitorDataServiceTest extends PdTestBase{ + + StoreMonitorDataService service; + + @Before + public void init(){ + service = getStoreMonitorDataService(); + var store = getPdConfig().getStore(); + store.setMonitorDataEnabled(true); + store.setMonitorDataInterval("1s"); + getPdConfig().setStore(store); + } + + @Test + public void test() throws InterruptedException, PDException { + long now = System.currentTimeMillis() / 1000; + for (int i = 0; i < 5; i++) { + service.saveMonitorData(genStats()); + now = System.currentTimeMillis() / 1000; + Thread.sleep(1100); + } + assertTrue(service.getLatestStoreMonitorDataTimeStamp(1) == 0 || + service.getLatestStoreMonitorDataTimeStamp(1) == now); + + var data = service.getStoreMonitorData(1); + assertEquals(5, data.size()); + + assertNotNull(service.debugMonitorInfo(List.of(Metapb.RecordPair.newBuilder() + .setKey("key1") + .setValue(1) + .build()))); + + assertNotNull(service.getStoreMonitorDataText(1)); + + + service.removeExpiredMonitorData(1, now + 1); + assertEquals(0, service.getStoreMonitorData(1).size()); + } + + + private Metapb.StoreStats genStats(){ + return Metapb.StoreStats.newBuilder() + .setStoreId(1) + .addSystemMetrics(Metapb.RecordPair.newBuilder().setKey("key1").setValue(1).build()) + .build(); + } + + +} diff --git a/hg-pd-test/src/main/java/org/apache/hugegraph/pd/service/StoreNodeServiceNewTest.java b/hg-pd-test/src/main/java/org/apache/hugegraph/pd/service/StoreNodeServiceNewTest.java new file mode 100644 index 0000000000..1c6cb39168 --- /dev/null +++ b/hg-pd-test/src/main/java/org/apache/hugegraph/pd/service/StoreNodeServiceNewTest.java @@ -0,0 +1,48 @@ +package org.apache.hugegraph.pd.service; + +import org.apache.hugegraph.pd.StoreNodeService; +import org.apache.hugegraph.pd.common.PDException; +import org.apache.hugegraph.pd.grpc.Metapb; +import org.junit.Before; +import org.junit.Test; + +import static org.apache.hugegraph.pd.common.Consts.DEFAULT_STORE_GROUP_ID; +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertNotNull; + +public class StoreNodeServiceNewTest extends PdTestBase{ + private StoreNodeService service; + + @Before + public void init(){ + service = getStoreNodeService(); + } + + @Test + public void testGetTaskInfoMeta(){ + assertNotNull(service.getTaskInfoMeta()); + } + + @Test + public void testGetStoreInfoMeta(){ + assertNotNull(service.getStoreInfoMeta()); + } + + @Test + public void testRemoveShardGroup() throws PDException { + for (int i = 0; i < 12; i++) { + Metapb.ShardGroup group = Metapb.ShardGroup.newBuilder() + .setId(i) + .setState(Metapb.PartitionState.PState_Offline) + .build(); + service.getStoreInfoMeta().updateShardGroup(group); + } + + service.deleteShardGroup(11); + service.deleteShardGroup(10); + + assertEquals(10, getConfigService().getPartitionCount(DEFAULT_STORE_GROUP_ID)); + // restore + getPdConfig().getConfigService().setPartitionCount(DEFAULT_STORE_GROUP_ID, 12); + } +} diff --git a/hg-pd-test/src/main/java/org/apache/hugegraph/pd/service/StoreServiceTest.java b/hg-pd-test/src/main/java/org/apache/hugegraph/pd/service/StoreServiceTest.java new file mode 100644 index 0000000000..16d9ba8838 --- /dev/null +++ b/hg-pd-test/src/main/java/org/apache/hugegraph/pd/service/StoreServiceTest.java @@ -0,0 +1,933 @@ +<<<<<<<< HEAD:hugegraph-pd/hg-pd-test/src/main/java/org/apache/hugegraph/pd/core/StoreServiceTest.java +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hugegraph.pd.core; + +======== +package org.apache.hugegraph.pd.service; + +import org.apache.hugegraph.pd.ConfigService; +import org.apache.hugegraph.pd.IdService; +import org.apache.hugegraph.pd.PartitionService; +import org.apache.hugegraph.pd.StoreNodeService; +import org.apache.hugegraph.pd.listener.StoreStatusListener; +import org.apache.hugegraph.pd.common.PDException; +import org.apache.hugegraph.pd.config.PDConfig; +import org.apache.hugegraph.pd.grpc.MetaTask; +import org.apache.hugegraph.pd.grpc.Metapb; + +import static org.apache.hugegraph.pd.common.Consts.DEFAULT_STORE_GROUP_ID; +>>>>>>>> d7e3d51dd (3.6.5 -> 4.x diff):hg-pd-test/src/main/java/org/apache/hugegraph/pd/service/StoreServiceTest.java +import static org.assertj.core.api.Assertions.assertThat; +import static org.junit.jupiter.api.Assertions.assertEquals; +import static org.mockito.Mockito.mock; +import static org.mockito.Mockito.when; + +import java.util.List; +import java.util.Map; +import java.util.function.Consumer; + +import org.apache.hugegraph.pd.PartitionService; +import org.apache.hugegraph.pd.StoreNodeService; +import org.apache.hugegraph.pd.StoreStatusListener; +import org.apache.hugegraph.pd.common.PDException; +import org.apache.hugegraph.pd.config.PDConfig; +import org.apache.hugegraph.pd.grpc.MetaTask; +import org.apache.hugegraph.pd.grpc.Metapb; +import org.junit.Before; +import org.junit.Ignore; +import org.junit.Test; + +public class StoreServiceTest extends PDCoreTestBase { + + private PDConfig config; + + private StoreNodeService service; + + @Before + public void setUp() { + this.config = getPdConfig(); + this.service = new StoreNodeService(this.config); + } + + @Test + public void testInit() { + // Setup +<<<<<<<< HEAD:hugegraph-pd/hg-pd-test/src/main/java/org/apache/hugegraph/pd/core/StoreServiceTest.java + PDConfig pdConfig = getPdConfig(); + final PartitionService partitionService = new PartitionService(pdConfig, + new StoreNodeService( + pdConfig)); +======== + PDConfig pdConfig = getConfig(); + final PDConfig pdConfig1 = getConfig(); + final PartitionService partitionService = new PartitionService(pdConfig, new StoreNodeService(pdConfig1), + new ConfigService(pdConfig1)); +>>>>>>>> d7e3d51dd (3.6.5 -> 4.x diff):hg-pd-test/src/main/java/org/apache/hugegraph/pd/service/StoreServiceTest.java + + // Run the test + this.service.init(partitionService); + + // Verify the results + } + +<<<<<<<< HEAD:hugegraph-pd/hg-pd-test/src/main/java/org/apache/hugegraph/pd/core/StoreServiceTest.java +======== + private PDConfig getConfig() { + PDConfig pdConfig = new PDConfig(); + pdConfig.setConfigService( + new ConfigService(BaseServerTest.getConfig())); + pdConfig.setIdService(new IdService(BaseServerTest.getConfig())); + pdConfig.setClusterId(0L); + pdConfig.setPatrolInterval(0L); + pdConfig.setDataPath("dataPath"); + pdConfig.setMinStoreCount(0); + pdConfig.setInitialStoreList("initialStoreList"); + pdConfig.setHost("host"); + pdConfig.setVerifyPath("verifyPath"); + pdConfig.setLicensePath("licensePath"); + PDConfig.Raft raft = new PDConfig().new Raft(); + raft.setEnable(false); + pdConfig.setRaft(raft); + final PDConfig.Partition partition = new PDConfig().new Partition(); + partition.setShardCount(0); + pdConfig.setPartition(partition); + pdConfig.setInitialStoreMap(Map.ofEntries(Map.entry("value", "value"))); + return pdConfig; + } + +>>>>>>>> d7e3d51dd (3.6.5 -> 4.x diff):hg-pd-test/src/main/java/org/apache/hugegraph/pd/service/StoreServiceTest.java + @Test + public void testIsOK() { + // Setup + // Run the test +<<<<<<<< HEAD:hugegraph-pd/hg-pd-test/src/main/java/org/apache/hugegraph/pd/core/StoreServiceTest.java + final boolean result = this.service.isOK(); +======== + final boolean result = service.isOK(DEFAULT_STORE_GROUP_ID); +>>>>>>>> d7e3d51dd (3.6.5 -> 4.x diff):hg-pd-test/src/main/java/org/apache/hugegraph/pd/service/StoreServiceTest.java + + // Verify the results + assertThat(result).isTrue(); + } + + @Test + public void testRegister() throws Exception { + // Setup + final Metapb.Store store = Metapb.Store.newBuilder().setId(0L) + .setAddress("address") + .setRaftAddress("raftAddress") + .addLabels(Metapb.StoreLabel + .newBuilder() + .build()) + .setVersion("version").setState( + Metapb.StoreState.Unknown).setStartTimestamp(0L) + .setDeployPath("deployPath") + .setLastHeartbeat(0L).setStats( + Metapb.StoreStats.newBuilder().setStoreId(0L) + .setPartitionCount(0).addGraphStats( + Metapb.GraphStats.newBuilder() + .setGraphName("value") + .setApproximateSize(0L) + .setRole(Metapb.ShardRole.None) + .build()).build()) + .setDataVersion(0).setCores(0) + .setDataPath("dataPath").build(); + final Metapb.Store expectedResult = Metapb.Store.newBuilder().setId(0L) + .setAddress("address") + .setRaftAddress( + "raftAddress") + .addLabels( + Metapb.StoreLabel + .newBuilder() + .build()) + .setVersion("version") + .setState( + Metapb.StoreState.Unknown) + .setStartTimestamp(0L) + .setDeployPath( + "deployPath") + .setLastHeartbeat(0L) + .setStats( + Metapb.StoreStats + .newBuilder() + .setStoreId( + 0L) + .setPartitionCount( + 0) + .addGraphStats( + Metapb.GraphStats + .newBuilder() + .setGraphName( + "value") + .setApproximateSize( + 0L) + .setRole( + Metapb.ShardRole.None) + .build()) + .build()) + .setDataVersion(0) + .setCores(0) + .setDataPath("dataPath") + .build(); + + // Configure PDConfig.getInitialStoreMap(...). + final Map stringStringMap = Map.ofEntries( + Map.entry("value", "value")); + + // Run the test + final Metapb.Store result = this.service.register(store); + } + + @Test + public void testGetStore() throws Exception { + // Setup + try { + Metapb.GraphStats stats = Metapb.GraphStats.newBuilder() + .setGraphName("value") + .setApproximateSize(0L) + .setRole( + Metapb.ShardRole.None) + .build(); + Metapb.StoreStats storeStats = Metapb.StoreStats.newBuilder() + .setStoreId(0L) + .setPartitionCount( + 0) + .addGraphStats( + stats) + .build(); + final Metapb.Store expectedResult = Metapb.Store.newBuilder() + .setId(0L) + .setAddress( + "address") + .setRaftAddress( + "raftAddress") + .addLabels( + Metapb.StoreLabel + .newBuilder() + .build()) + .setVersion( + "version") + .setState( + Metapb.StoreState.Unknown) + .setStartTimestamp( + 0L) + .setDeployPath( + "deployPath") + .setLastHeartbeat( + 0L) + .setStats( + storeStats) + .setDataVersion(0) + .setCores(0) + .setDataPath( + "dataPath") + .build(); + + // Run the test + final Metapb.Store result = this.service.getStore(0L); + } catch (Exception e) { + e.printStackTrace(); + } + } + + @Test + public void testUpdateStore() throws Exception { + // Setup + final Metapb.Store store = Metapb.Store.newBuilder().setId(0L) + .setAddress("address") + .setRaftAddress("raftAddress") + .addLabels(Metapb.StoreLabel + .newBuilder() + .build()) + .setVersion("version").setState( + Metapb.StoreState.Unknown).setStartTimestamp(0L) + .setDeployPath("deployPath") + .setLastHeartbeat(0L).setStats( + Metapb.StoreStats.newBuilder().setStoreId(0L) + .setPartitionCount(0).addGraphStats( + Metapb.GraphStats.newBuilder() + .setGraphName("value") + .setApproximateSize(0L) + .setRole(Metapb.ShardRole.None) + .build()).build()) + .setDataVersion(0).setCores(0) + .setDataPath("dataPath").build(); + final Metapb.Store expectedResult = Metapb.Store.newBuilder().setId(0L) + .setAddress("address") + .setRaftAddress( + "raftAddress") + .addLabels( + Metapb.StoreLabel + .newBuilder() + .build()) + .setVersion("version") + .setState( + Metapb.StoreState.Unknown) + .setStartTimestamp(0L) + .setDeployPath( + "deployPath") + .setLastHeartbeat(0L) + .setStats( + Metapb.StoreStats + .newBuilder() + .setStoreId( + 0L) + .setPartitionCount( + 0) + .addGraphStats( + Metapb.GraphStats + .newBuilder() + .setGraphName( + "value") + .setApproximateSize( + 0L) + .setRole( + Metapb.ShardRole.None) + .build()) + .build()) + .setDataVersion(0) + .setCores(0) + .setDataPath("dataPath") + .build(); + + // Configure PDConfig.getPartition(...). + final PDConfig.Partition partition = new PDConfig().new Partition(); + partition.setMaxShardsPerStore(0); + partition.setShardCount(0); + + // Run the test + final Metapb.Store result = this.service.updateStore(store); + } + + @Test + public void testStoreTurnoff() throws Exception { + // Setup + try { + final Metapb.Store store = Metapb.Store.newBuilder().setId(0L) + .setAddress("address") + .setRaftAddress("raftAddress") + .addLabels(Metapb.StoreLabel + .newBuilder() + .build()) + .setVersion("version").setState( + Metapb.StoreState.Unknown).setStartTimestamp(0L) + .setDeployPath("deployPath") + .setLastHeartbeat(0L).setStats( + Metapb.StoreStats.newBuilder().setStoreId(0L) + .setPartitionCount(0).addGraphStats( + Metapb.GraphStats.newBuilder() + .setGraphName("value") + .setApproximateSize(0L) + .setRole(Metapb.ShardRole.None) + .build()).build()) + .setDataVersion(0).setCores(0) + .setDataPath("dataPath").build(); + +<<<<<<<< HEAD:hugegraph-pd/hg-pd-test/src/main/java/org/apache/hugegraph/pd/core/StoreServiceTest.java + // Configure PDConfig.getPartition(...). + final PDConfig.Partition partition = new PDConfig().new Partition(); + partition.setTotalCount(0); + partition.setMaxShardsPerStore(0); + partition.setShardCount(0); +======== + // Configure PDConfig.getPartition(...). + final PDConfig.Partition partition = new PDConfig().new Partition(); + partition.setMaxShardsPerStore(0); + partition.setShardCount(0); +>>>>>>>> d7e3d51dd (3.6.5 -> 4.x diff):hg-pd-test/src/main/java/org/apache/hugegraph/pd/service/StoreServiceTest.java + + // Run the test + this.service.storeTurnoff(store); + + // Verify the results + } catch (Exception e) { + e.printStackTrace(); + } + } + + @Test + public void testGetStores1() throws Exception { + // Setup + final List expectedResult = List.of( + Metapb.Store.newBuilder().setId(0L).setAddress("address") + .setRaftAddress("raftAddress") + .addLabels(Metapb.StoreLabel.newBuilder().build()) + .setVersion("version") + .setState(Metapb.StoreState.Unknown) + .setStartTimestamp(0L).setDeployPath("deployPath") + .setLastHeartbeat(0L).setStats( + Metapb.StoreStats.newBuilder().setStoreId(0L) + .setPartitionCount(0).addGraphStats( + Metapb.GraphStats.newBuilder() + .setGraphName("value") + .setApproximateSize(0L) + .setRole(Metapb.ShardRole.None) + .build()).build()) + .setDataVersion(0).setCores(0) + .setDataPath("dataPath").build()); + + // Run the test + final List result = this.service.getStores(); + } + + @Test + public void testGetStores2() throws Exception { + // Setup + final List expectedResult = List.of( + Metapb.Store.newBuilder().setId(0L).setAddress("address") + .setRaftAddress("raftAddress") + .addLabels(Metapb.StoreLabel.newBuilder().build()) + .setVersion("version") + .setState(Metapb.StoreState.Unknown) + .setStartTimestamp(0L).setDeployPath("deployPath") + .setLastHeartbeat(0L).setStats( + Metapb.StoreStats.newBuilder().setStoreId(0L) + .setPartitionCount(0).addGraphStats( + Metapb.GraphStats.newBuilder() + .setGraphName("value") + .setApproximateSize(0L) + .setRole(Metapb.ShardRole.None) + .build()).build()) + .setDataVersion(0).setCores(0) + .setDataPath("dataPath").build()); + + // Run the test + final List result = this.service.getStores("graphName"); + } + + @Test + public void testGetStoreStatus() throws Exception { + // Setup + final List expectedResult = List.of( + Metapb.Store.newBuilder().setId(0L).setAddress("address") + .setRaftAddress("raftAddress") + .addLabels(Metapb.StoreLabel.newBuilder().build()) + .setVersion("version") + .setState(Metapb.StoreState.Unknown) + .setStartTimestamp(0L).setDeployPath("deployPath") + .setLastHeartbeat(0L).setStats( + Metapb.StoreStats.newBuilder().setStoreId(0L) + .setPartitionCount(0).addGraphStats( + Metapb.GraphStats.newBuilder() + .setGraphName("value") + .setApproximateSize(0L) + .setRole(Metapb.ShardRole.None) + .build()).build()) + .setDataVersion(0).setCores(0) + .setDataPath("dataPath").build()); + + // Run the test + final List result = this.service.getStoreStatus(false); + + } + + @Test + public void testGetShardGroups() throws Exception { + // Setup + final List expectedResult = List.of( + Metapb.ShardGroup.newBuilder().setId(0).addShards( + Metapb.Shard.newBuilder().setStoreId(0L) + .setRole(Metapb.ShardRole.None).build()) + .setState(Metapb.PartitionState.PState_None) + .build()); + + // Run the test + final List result = this.service.getShardGroups(); + + } + + @Test + public void testGetShardGroup() throws Exception { + // Setup + final Metapb.ShardGroup expectedResult = Metapb.ShardGroup.newBuilder() + .setId(0) + .addShards( + Metapb.Shard + .newBuilder() + .setStoreId( + 0L) + .setRole( + Metapb.ShardRole.None) + .build()) + .setState( + Metapb.PartitionState.PState_None) + .build(); + + // Run the test + final Metapb.ShardGroup result = this.service.getShardGroup(0); + + // Verify the results + } + + @Test + public void testGetShardGroupsByStore() throws Exception { + // Setup + final List expectedResult = List.of( + Metapb.ShardGroup.newBuilder().setId(0).addShards( + Metapb.Shard.newBuilder().setStoreId(0L) + .setRole(Metapb.ShardRole.None).build()) + .setState(Metapb.PartitionState.PState_None) + .build()); + + // Run the test + final List result = this.service.getShardGroupsByStore( + 0L); + } + + @Test + public void testGetActiveStores1() throws Exception { + // Setup + final List expectedResult = List.of( + Metapb.Store.newBuilder().setId(0L).setAddress("address") + .setRaftAddress("raftAddress") + .addLabels(Metapb.StoreLabel.newBuilder().build()) + .setVersion("version") + .setState(Metapb.StoreState.Unknown) + .setStartTimestamp(0L).setDeployPath("deployPath") + .setLastHeartbeat(0L).setStats( + Metapb.StoreStats.newBuilder().setStoreId(0L) + .setPartitionCount(0).addGraphStats( + Metapb.GraphStats.newBuilder() + .setGraphName("value") + .setApproximateSize(0L) + .setRole(Metapb.ShardRole.None) + .build()).build()) + .setDataVersion(0).setCores(0) + .setDataPath("dataPath").build()); + + // Run the test +<<<<<<<< HEAD:hugegraph-pd/hg-pd-test/src/main/java/org/apache/hugegraph/pd/core/StoreServiceTest.java + final List result = this.service.getActiveStores("graphName"); +======== + final List result = service.getActiveStoresByStoreGroup(DEFAULT_STORE_GROUP_ID); +>>>>>>>> d7e3d51dd (3.6.5 -> 4.x diff):hg-pd-test/src/main/java/org/apache/hugegraph/pd/service/StoreServiceTest.java + + // Verify the results + } + + @Test + public void testGetActiveStores1ThrowsPDException() { + try { + List stores = this.service.getActiveStores(); + assertThat(stores.size()).isEqualTo(0); + } catch (Exception e) { + e.printStackTrace(); + } + } + + @Ignore // state is Pending instead of Tombstone + @Test + public void testGetTombStores() throws Exception { +<<<<<<<< HEAD:hugegraph-pd/hg-pd-test/src/main/java/org/apache/hugegraph/pd/core/StoreServiceTest.java + // Setup + final List storeList = List.of( + Metapb.Store.newBuilder().setId(0L).setAddress("address") + .setRaftAddress("raftAddress") + .addLabels(Metapb.StoreLabel.newBuilder().build()) + .setVersion("version") + .setState(Metapb.StoreState.Tombstone) + .setStartTimestamp(0L).setDeployPath("deployPath") + .setLastHeartbeat(0L).setStats( + Metapb.StoreStats.newBuilder().setStoreId(0L) + .setPartitionCount(0).addGraphStats( + Metapb.GraphStats.newBuilder() + .setGraphName("value") + .setApproximateSize(0L) + .setRole(Metapb.ShardRole.None) + .build()).build()) + .setDataVersion(0).setCores(0) + .setDataPath("dataPath").build()); + this.service.register(storeList.get(0)); + + // Run the test + final List result = this.service.getTombStores(); + + // Verify the results + assertThat(result.size()).isEqualTo(1); + this.service.removeStore(result.get(0).getId()); + List stores = this.service.getStores(); + assertThat(stores.size()).isEqualTo(0); +======== + //// Setup + //final List storeList = List.of( + // Metapb.Store.newBuilder().setId(0L).setAddress("address") + // .setRaftAddress("raftAddress") + // .addLabels(Metapb.StoreLabel.newBuilder().build()) + // .setVersion("version") + // .setState(Metapb.StoreState.Tombstone) + // .setStartTimestamp(0L).setDeployPath("deployPath") + // .setLastHeartbeat(0L).setStats( + // Metapb.StoreStats.newBuilder().setStoreId(0L) + // .setPartitionCount(0).addGraphStats( + // Metapb.GraphStats.newBuilder() + // .setGraphName("value") + // .setApproximateSize(0L) + // .setRole(Metapb.ShardRole.None) + // .build()).build()) + // .setDataVersion(0).setCores(0) + // .setDataPath("dataPath").build()); + //service.register(storeList.get(0)); + // + //// Run the test + //final List result = service.getTombStores(); + // + //// Verify the results + //assertThat(result.size() == 1); + //service.removeStore(result.get(0).getId()); + //List stores = service.getStores(); + //assertThat(stores.size() == 0); +>>>>>>>> d7e3d51dd (3.6.5 -> 4.x diff):hg-pd-test/src/main/java/org/apache/hugegraph/pd/service/StoreServiceTest.java + } + + @Test + public void testAllocShards() throws Exception { + // Setup + try { + final Metapb.Graph graph = Metapb.Graph.newBuilder() + .setGraphName("graphName") + .setGraphState( + Metapb.GraphState + .newBuilder() + .setMode( + Metapb.GraphMode.ReadWrite) + .setReason( + Metapb.GraphModeReason.Quota) + .build()) + .build(); + final List expectedResult = List.of( + Metapb.Shard.newBuilder().setStoreId(0L) + .setRole(Metapb.ShardRole.None).build()); + + // Configure PDConfig.getPartition(...). + final PDConfig.Partition partition = new PDConfig().new Partition(); + partition.setMaxShardsPerStore(0); + partition.setShardCount(0); + + // Run the test + final List result = this.service.allocShards(graph, 0); + } catch (Exception e) { + e.printStackTrace(); + } + + } + + @Test + public void testReallocShards() throws Exception { + // Setup + try { + final Metapb.ShardGroup shardGroup = Metapb.ShardGroup.newBuilder() + .setId(0) + .addShards( + Metapb.Shard + .newBuilder() + .setStoreId( + 0L) + .setRole( + Metapb.ShardRole.None) + .build()) + .setState( + Metapb.PartitionState.PState_None) + .build(); + final List expectedResult = List.of( + Metapb.Shard.newBuilder().setStoreId(0L) + .setRole(Metapb.ShardRole.None).build()); + + // Configure PDConfig.getPartition(...). + final PDConfig.Partition partition = new PDConfig().new Partition(); + partition.setMaxShardsPerStore(0); + partition.setShardCount(0); + when(this.config.getPartition()).thenReturn(partition); + + // Run the test + final List result = this.service.reallocShards(shardGroup); + + // Verify the results + assertThat(result).isEqualTo(expectedResult); + } catch (Exception e) { + e.printStackTrace(); + } + + } + + @Test + public void testUpdateShardGroup() { + try { + final List shards = List.of( + Metapb.Shard.newBuilder().setStoreId(0L) + .setRole(Metapb.ShardRole.None).build()); + + // Run the test + this.service.updateShardGroup(0, shards, 0, 0); + } catch (Exception e) { + e.printStackTrace(); + } + } + + @Test + public void testUpdateShardGroupState() throws Exception { + try { + this.service.updateShardGroupState(0, Metapb.PartitionState.PState_None); + } catch (Exception e) { + e.printStackTrace(); + } + } + + @Test + public void testHeartBeat() throws Exception { + // Setup + try { + final Metapb.StoreStats storeStats = Metapb.StoreStats.newBuilder() + .setStoreId( + 0L) + .setPartitionCount( + 0) + .addGraphStats( + Metapb.GraphStats + .newBuilder() + .setGraphName( + "value") + .setApproximateSize( + 0L) + .setRole( + Metapb.ShardRole.None) + .build()) + .build(); + final Metapb.ClusterStats expectedResult = Metapb.ClusterStats + .newBuilder().setState(Metapb.ClusterState.Cluster_OK) + .setMessage("message").setTimestamp(0L).build(); + when(this.config.getMinStoreCount()).thenReturn(0); + + // Configure PDConfig.getPartition(...). + final PDConfig.Partition partition = new PDConfig().new Partition(); + partition.setMaxShardsPerStore(0); + partition.setShardCount(0); + when(this.config.getPartition()).thenReturn(partition); + + // Run the test + final Metapb.ClusterStats result = this.service.heartBeat(storeStats); + + // Verify the results + assertThat(result).isEqualTo(expectedResult); + } catch (Exception e) { + e.printStackTrace(); + } + } + + @Test + public void testUpdateClusterStatus1() throws PDException { + // Setup + final Metapb.ClusterStats expectedResult = Metapb.ClusterStats + .newBuilder().setState(Metapb.ClusterState.Cluster_OK) + .setMessage("message").setTimestamp(0L).build(); + + // Run the test +<<<<<<<< HEAD:hugegraph-pd/hg-pd-test/src/main/java/org/apache/hugegraph/pd/core/StoreServiceTest.java + final Metapb.ClusterStats result = this.service.updateClusterStatus( +======== + final Metapb.ClusterStats result = service.updateClusterStatus(DEFAULT_STORE_GROUP_ID, +>>>>>>>> d7e3d51dd (3.6.5 -> 4.x diff):hg-pd-test/src/main/java/org/apache/hugegraph/pd/service/StoreServiceTest.java + Metapb.ClusterState.Cluster_OK); + } + + @Test + public void testUpdateClusterStatus2() throws PDException { + // Setup + final Metapb.ClusterStats expectedResult = Metapb.ClusterStats + .newBuilder().setState(Metapb.ClusterState.Cluster_OK) + .setMessage("message").setTimestamp(0L).build(); + + // Run the test +<<<<<<<< HEAD:hugegraph-pd/hg-pd-test/src/main/java/org/apache/hugegraph/pd/core/StoreServiceTest.java + final Metapb.ClusterStats result = this.service.updateClusterStatus( +======== + final Metapb.ClusterStats result = service.updateClusterStatus( DEFAULT_STORE_GROUP_ID, +>>>>>>>> d7e3d51dd (3.6.5 -> 4.x diff):hg-pd-test/src/main/java/org/apache/hugegraph/pd/service/StoreServiceTest.java + Metapb.PartitionState.PState_None); + } + + @Test + public void testCheckStoreStatus() { + // Setup + // Run the test +<<<<<<<< HEAD:hugegraph-pd/hg-pd-test/src/main/java/org/apache/hugegraph/pd/core/StoreServiceTest.java + this.service.checkStoreStatus(); +======== + service.checkStoreStatus(DEFAULT_STORE_GROUP_ID); +>>>>>>>> d7e3d51dd (3.6.5 -> 4.x diff):hg-pd-test/src/main/java/org/apache/hugegraph/pd/service/StoreServiceTest.java + + // Verify the results + } + + @Test + public void testAddStatusListener() { + // Setup + final StoreStatusListener mockListener = mock( + StoreStatusListener.class); + + // Run the test + this.service.addStatusListener(mockListener); + + // Verify the results + } + + @Test + public void testOnStoreStatusChanged() { + // Setup + final Metapb.Store store = Metapb.Store.newBuilder().setId(0L) + .setAddress("address") + .setRaftAddress("raftAddress") + .addLabels(Metapb.StoreLabel + .newBuilder() + .build()) + .setVersion("version").setState( + Metapb.StoreState.Unknown).setStartTimestamp(0L) + .setDeployPath("deployPath") + .setLastHeartbeat(0L).setStats( + Metapb.StoreStats.newBuilder().setStoreId(0L) + .setPartitionCount(0).addGraphStats( + Metapb.GraphStats.newBuilder() + .setGraphName("value") + .setApproximateSize(0L) + .setRole(Metapb.ShardRole.None) + .build()).build()) + .setDataVersion(0).setCores(0) + .setDataPath("dataPath").build(); + + // Verify the results + } + + @Test + public void testOnShardGroupSplit() { + // Setup + final Metapb.ShardGroup shardGroup = Metapb.ShardGroup.newBuilder() + .setId(0) + .addShards( + Metapb.Shard + .newBuilder() + .setStoreId( + 0L) + .setRole( + Metapb.ShardRole.None) + .build()) + .setState( + Metapb.PartitionState.PState_None) + .build(); + final List newShardGroups = List.of( + Metapb.ShardGroup.newBuilder().setId(0).addShards( + Metapb.Shard.newBuilder().setStoreId(0L) + .setRole(Metapb.ShardRole.None).build()) + .setState(Metapb.PartitionState.PState_None) + .build()); + final Consumer mockTask = mock(Consumer.class); + + // Verify the results + } + + @Ignore // active stores are fewer than min store count in pd config + @Test + public void testCheckStoreCanOffline() { + // Setup + final Metapb.Store currentStore = Metapb.Store.newBuilder().setId(0L) + .setAddress("address") + .setRaftAddress( + "raftAddress") + .addLabels( + Metapb.StoreLabel + .newBuilder() + .build()) + .setVersion("version") + .setState( + Metapb.StoreState.Unknown) + .setStartTimestamp(0L) + .setDeployPath( + "deployPath") + .setLastHeartbeat(0L) + .setStats( + Metapb.StoreStats + .newBuilder() + .setStoreId( + 0L) + .setPartitionCount( + 0) + .addGraphStats( + Metapb.GraphStats + .newBuilder() + .setGraphName( + "value") + .setApproximateSize( + 0L) + .setRole( + Metapb.ShardRole.None) + .build()) + .build()) + .setDataVersion(0) + .setCores(0) + .setDataPath("dataPath") + .build(); + // Run the test + final boolean result = this.service.checkStoreCanOffline(currentStore); + + // Verify the results + assertThat(result).isTrue(); + } + + @Test + public void testShardGroupsDbCompaction() throws Exception { + // Setup + // Run the test + try { + this.service.shardGroupsDbCompaction(0, "tableName"); + } catch (Exception e) { + e.printStackTrace(); + } + + // Verify the results + } + + @Test + public void testGetQuota() throws Exception { + // Setup + // Run the test + try { + this.service.getQuota(); + } catch (Exception e) { + e.printStackTrace(); + } + } + + // migrated from StoreNodeServiceNewTest + @Test + public void testRemoveShardGroup() throws PDException { + for (int i = 0; i < 12; i++) { + Metapb.ShardGroup group = Metapb.ShardGroup.newBuilder() + .setId(i) + .setState( + Metapb.PartitionState.PState_Offline) + .build(); + this.service.getStoreInfoMeta().updateShardGroup(group); + } + + this.service.deleteShardGroup(11); + this.service.deleteShardGroup(10); + + assertEquals(10, getPdConfig().getConfigService().getPDConfig().getPartitionCount()); + // restore + getPdConfig().getConfigService().setPartitionCount(12); + } +} diff --git a/hg-pd-test/src/main/java/org/apache/hugegraph/pd/service/TaskScheduleServiceTest.java b/hg-pd-test/src/main/java/org/apache/hugegraph/pd/service/TaskScheduleServiceTest.java new file mode 100644 index 0000000000..2d71595f32 --- /dev/null +++ b/hg-pd-test/src/main/java/org/apache/hugegraph/pd/service/TaskScheduleServiceTest.java @@ -0,0 +1,105 @@ +package org.apache.hugegraph.pd.service; + +import org.apache.hugegraph.pd.TaskScheduleService; +import org.apache.hugegraph.pd.common.PDException; +import org.apache.hugegraph.pd.grpc.Metapb; +import org.junit.Before; +import org.junit.Test; + +import java.util.ArrayList; +import java.util.List; + + +public class TaskScheduleServiceTest extends PdTestBase{ + + TaskScheduleService service; + + @Before + public void init() throws PDException { + service = getTaskService(); + + var partitionService = getPartitionService(); + var graph = partitionService.getGraph("graph0"); + if (graph == null) { + partitionService.createGraph("graph0", 0, 0); + } + + var storeNodeService = getStoreNodeService(); + storeNodeService.updateStoreGroupRelation(1, 0); + storeNodeService.updateStoreGroupRelation(2, 0); + storeNodeService.updateStoreGroupRelation(3, 0); + } + + @Test + public void testStoreOffline(){ + + } + + public void testPatrolStores(){ + + } + + public void testPatrolPartitions(){ + + } + + public void testBalancePartitionShard(){ + + } + + @Test + public void testBalancePartitionLeader() throws PDException { + + var list = new ArrayList(); + for (int i = 0; i < 6; i++){ + getStoreNodeService().getStoreInfoMeta().updateShardGroup(genShardGroup(i)); + list.add(genPartition(i)); + } + + getPdConfig().getPartition().setShardCount(3); + + getPartitionService().updatePartition(list); + var rst = service.balancePartitionLeader(true); + // assertTrue(rst.size() > 0 ); + // recover + getPdConfig().getPartition().setShardCount(1); + getStoreNodeService().getStoreInfoMeta().removeAll(); + } + + public void testSplitPartition(){ + + } + public void testSplitPartition2(){ + + } + + public void testCanAllPartitionsMovedOut(){ + + } + + private Metapb.ShardGroup genShardGroup(int groupId){ + return Metapb.ShardGroup.newBuilder() + .setId(groupId) + .addAllShards(genShards()) + .build(); + } + + private Metapb.Partition genPartition(int groupId){ + return Metapb.Partition.newBuilder() + .setId(groupId) + .setState(Metapb.PartitionState.PState_Normal) + .setGraphName("graph0") + .setStartKey(groupId * 10) + .setEndKey(groupId * 10 + 10) + .build(); + } + + private List genShards(){ + return List.of(Metapb.Shard.newBuilder().setStoreId(1).setRole(Metapb.ShardRole.Leader).build(), + Metapb.Shard.newBuilder().setStoreId(2).setRole(Metapb.ShardRole.Follower).build(), + Metapb.Shard.newBuilder().setStoreId(3).setRole(Metapb.ShardRole.Follower).build()); + } + +} + + diff --git a/hg-pd-test/src/main/java/org/apache/hugegraph/pd/test/HgPDTestUtil.java b/hg-pd-test/src/main/java/org/apache/hugegraph/pd/test/HgPDTestUtil.java new file mode 100644 index 0000000000..de8532e7fc --- /dev/null +++ b/hg-pd-test/src/main/java/org/apache/hugegraph/pd/test/HgPDTestUtil.java @@ -0,0 +1,78 @@ +package org.apache.hugegraph.pd.test; + +import java.nio.ByteBuffer; +import java.nio.charset.StandardCharsets; +import java.util.Iterator; +import java.util.List; + +/** + * @author lynn.bond@hotmail.com created on 2021/11/8 + */ +public class HgPDTestUtil { + + public static void println(Object str) { + System.out.println(str); + } + + public static String toStr(byte[] b) { + if (b == null) return ""; + if (b.length == 0) return ""; + return new String(b, StandardCharsets.UTF_8); + } + + public static byte[] toBytes(String str) { + if (str == null) return null; + return str.getBytes(StandardCharsets.UTF_8); + } + + public static byte[] toBytes(long l) { + ByteBuffer buffer = ByteBuffer.allocate(Long.BYTES); + buffer.putLong(l); + return buffer.array(); + } + + private static byte[] toBytes(final int i) { + ByteBuffer buffer = ByteBuffer.allocate(Integer.BYTES); + buffer.putInt(i); + return buffer.array(); + } + + public static long toLong(byte[] bytes) { + ByteBuffer buffer = ByteBuffer.allocate(Long.BYTES); + buffer.put(bytes); + buffer.flip();//need flip + return buffer.getLong(); + } + + public static long toInt(byte[] bytes) { + ByteBuffer buffer = ByteBuffer.allocate(Integer.BYTES); + buffer.put(bytes); + buffer.flip();//need flip + return buffer.getInt(); + } + + public static String padLeftZeros(String str, int n) { + return String.format("%1$" + n + "s", str).replace(' ', '0'); + } + + public static String toSuffix(int num, int length) { + return "-" + padLeftZeros(String.valueOf(num), length); + } + + public static int amountOf(List list) { + if (list == null) { + return 0; + } + return list.size(); + } + + public static int amountOf(Iterator iterator) { + if (iterator == null) return 0; + int count = 0; + while (iterator.hasNext()) { + iterator.next(); + count++; + } + return count; + } +} diff --git a/hugegraph-pd/hg-pd-client/pom.xml b/hugegraph-pd/hg-pd-client/pom.xml index 2eaab8ac0a..e92bc6ace5 100644 --- a/hugegraph-pd/hg-pd-client/pom.xml +++ b/hugegraph-pd/hg-pd-client/pom.xml @@ -53,7 +53,7 @@ junit junit - 4.13.2 + ${junit.version} test @@ -61,6 +61,11 @@ commons-io 2.8.0 + + org.apache.commons + commons-lang3 + ${commons-lang3.version} + org.yaml snakeyaml @@ -68,4 +73,21 @@ test + + + + org.apache.maven.plugins + maven-source-plugin + + + attach-sources + + jar + + + + + + + diff --git a/hugegraph-pd/hg-pd-client/src/main/java/org/apache/hugegraph/pd/client/DiscoveryClientImpl.java b/hugegraph-pd/hg-pd-client/src/main/java/org/apache/hugegraph/pd/client/DiscoveryClientImpl.java index 4f76d5ac9b..bdfa4ebf7e 100644 --- a/hugegraph-pd/hg-pd-client/src/main/java/org/apache/hugegraph/pd/client/DiscoveryClientImpl.java +++ b/hugegraph-pd/hg-pd-client/src/main/java/org/apache/hugegraph/pd/client/DiscoveryClientImpl.java @@ -24,20 +24,25 @@ import org.apache.hugegraph.pd.grpc.discovery.NodeInfo; import org.apache.hugegraph.pd.grpc.discovery.RegisterType; -@Useless("discovery related") +/** + * @author zhangyingjie + * @date 2021/12/20 + **/ public class DiscoveryClientImpl extends DiscoveryClient { - private final String id; - private final RegisterType type; - private final String version; - private final String appName; - private final int times; - private final String address; - private final Map labels; - private final Consumer registerConsumer; + private volatile String id ; + private RegisterType type; // 心跳类型,备用 + private String version; + private String appName; + private int times; // 心跳过期次数,备用 + private String address; + private Map labels; + private Consumer registerConsumer; + private PDConfig conf; + private DiscoveryClientImpl(Builder builder) { - super(builder.centerAddress, builder.delay); + super(builder.delay, builder.conf); period = builder.delay; id = builder.id; type = builder.type; @@ -66,6 +71,11 @@ Consumer getRegisterConsumer() { return registerConsumer; } + @Override + public void onLeaderChanged(String leaderAddress) { + + } + public static final class Builder { private int delay; @@ -78,6 +88,7 @@ public static final class Builder { private String appName; private int times; private Consumer registerConsumer; + private PDConfig conf; private Builder() { } @@ -127,6 +138,11 @@ public Builder setTimes(int val) { return this; } + public Builder setPdConfig(PDConfig val) { + this.conf = val; + return this; + } + public Builder setRegisterConsumer(Consumer registerConsumer) { this.registerConsumer = registerConsumer; return this; diff --git a/hugegraph-pd/hg-pd-client/src/main/java/org/apache/hugegraph/pd/client/LicenseClient.java b/hugegraph-pd/hg-pd-client/src/main/java/org/apache/hugegraph/pd/client/LicenseClient.java index 268ccb68dd..710e985604 100644 --- a/hugegraph-pd/hg-pd-client/src/main/java/org/apache/hugegraph/pd/client/LicenseClient.java +++ b/hugegraph-pd/hg-pd-client/src/main/java/org/apache/hugegraph/pd/client/LicenseClient.java @@ -1,3 +1,4 @@ +<<<<<<<< HEAD:hugegraph-pd/hg-pd-client/src/main/java/org/apache/hugegraph/pd/client/LicenseClient.java /* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with @@ -26,24 +27,25 @@ import io.grpc.stub.AbstractBlockingStub; import io.grpc.stub.AbstractStub; +======== +package org.apache.hugegraph.pd.client; + +import org.apache.hugegraph.pd.common.KVPair; +import org.apache.hugegraph.pd.grpc.PDGrpc; +import org.apache.hugegraph.pd.grpc.Pdpb; +import org.apache.hugegraph.pd.grpc.common.ErrorType; +import org.apache.hugegraph.pd.grpc.common.ResponseHeader; +import com.google.protobuf.ByteString; + +>>>>>>>> d7e3d51dd (3.6.5 -> 4.x diff):hg-pd-client/src/main/java/org/apache/hugegraph/pd/client/LicenseClient.java import lombok.extern.slf4j.Slf4j; @Useless("license related") @Slf4j -public class LicenseClient extends AbstractClient { +public class LicenseClient extends BaseClient { public LicenseClient(PDConfig config) { - super(config); - } - - @Override - protected AbstractStub createStub() { - return PDGrpc.newStub(channel); - } - - @Override - protected AbstractBlockingStub createBlockingStub() { - return PDGrpc.newBlockingStub(channel); + super(config, PDGrpc::newStub, PDGrpc::newBlockingStub); } public Pdpb.PutLicenseResponse putLicense(byte[] content) { @@ -54,10 +56,10 @@ public Pdpb.PutLicenseResponse putLicense(byte[] content) { try { KVPair pair = concurrentBlockingUnaryCall( PDGrpc.getPutLicenseMethod(), request, - (rs) -> rs.getHeader().getError().getType().equals(Pdpb.ErrorType.OK)); + (rs) -> rs.getHeader().getError().getType().equals(ErrorType.OK)); if (pair.getKey()) { Pdpb.PutLicenseResponse.Builder builder = Pdpb.PutLicenseResponse.newBuilder(); - builder.setHeader(okHeader); + builder.setHeader(OK_HEADER); return builder.build(); } else { return pair.getValue(); @@ -65,9 +67,16 @@ public Pdpb.PutLicenseResponse putLicense(byte[] content) { } catch (Exception e) { e.printStackTrace(); log.debug("put license with error:{} ", e); +<<<<<<<< HEAD:hugegraph-pd/hg-pd-client/src/main/java/org/apache/hugegraph/pd/client/LicenseClient.java Pdpb.ResponseHeader rh = newErrorHeader(Pdpb.ErrorType.LICENSE_ERROR_VALUE, e.getMessage()); +======== + ResponseHeader rh = createErrorHeader(ErrorType.LICENSE_ERROR_VALUE, e.getMessage()); +>>>>>>>> d7e3d51dd (3.6.5 -> 4.x diff):hg-pd-client/src/main/java/org/apache/hugegraph/pd/client/LicenseClient.java return Pdpb.PutLicenseResponse.newBuilder().setHeader(rh).build(); } } + + public void onLeaderChanged(String leader) { + } } diff --git a/hugegraph-pd/hg-pd-client/src/main/java/org/apache/hugegraph/pd/client/PDClient.java b/hugegraph-pd/hg-pd-client/src/main/java/org/apache/hugegraph/pd/client/PDClient.java index 200a35ee87..ae1570725e 100644 --- a/hugegraph-pd/hg-pd-client/src/main/java/org/apache/hugegraph/pd/client/PDClient.java +++ b/hugegraph-pd/hg-pd-client/src/main/java/org/apache/hugegraph/pd/client/PDClient.java @@ -1,3 +1,4 @@ +<<<<<<<< HEAD:hugegraph-pd/hg-pd-client/src/main/java/org/apache/hugegraph/pd/client/PDClient.java /* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with @@ -309,21 +310,95 @@ private String getLeaderIp(PDGrpc.PDBlockingStub stub) { /** * Store registration, the store ID will be returned, and the initial registration will * return a new ID +======== +package org.apache.hugegraph.pd.client.impl; + +import java.util.ArrayList; +import java.util.HashSet; +import java.util.List; +import java.util.Set; + +import org.apache.hugegraph.pd.client.ClientCache; +import org.apache.hugegraph.pd.client.PDClient; +import org.apache.hugegraph.pd.client.PDConfig; +import org.apache.hugegraph.pd.client.rpc.ConnectionManager; +import org.apache.hugegraph.pd.client.rpc.Invoker; +import org.apache.hugegraph.pd.common.KVPair; +import org.apache.hugegraph.pd.common.PDException; +import org.apache.hugegraph.pd.common.PartitionUtils; +import org.apache.hugegraph.pd.grpc.ClusterOp; +import org.apache.hugegraph.pd.grpc.MetaTask; +import org.apache.hugegraph.pd.grpc.Metapb; +import org.apache.hugegraph.pd.grpc.PDGrpc; +import org.apache.hugegraph.pd.grpc.Pdpb; +import org.apache.hugegraph.pd.grpc.StoreGroup; +import org.apache.hugegraph.pd.grpc.common.ErrorType; +import org.apache.hugegraph.pd.grpc.common.RequestHeader; +import org.apache.hugegraph.pd.grpc.common.ResponseHeader; +import com.google.protobuf.ByteString; + +import io.grpc.MethodDescriptor; +import lombok.extern.slf4j.Slf4j; + +/** + * @author lynn.bond@hotmail.com on 2023/12/8 + */ +@Slf4j +public class PDApi { + private final PDConfig config; + private final ConnectionManager cm; + private final ClientCache cache; + private final RequestHeader header = RequestHeader.getDefaultInstance(); + private final Invoker invoker; + private PDClient client; + + public PDApi(PDClient client, ClientCache cache) { + this.client = client; + this.config = client.getConfig(); + this.cm = client.getCm(); + this.cache = cache; + this.invoker = client.getLeaderInvoker(); + } + + private RespT blockingUnaryCall( + MethodDescriptor method, ReqT req) throws PDException { + return invoker.blockingCall(method, req); + } + + private void handleResponseError(ResponseHeader header) throws PDException { + var errorType = header.getError().getType(); + if (header.hasError() && errorType != ErrorType.OK) { + throw new PDException(header.getError().getTypeValue(), + String.format("PD request error, error code = %d, msg = %s", + header.getError().getTypeValue(), + header.getError().getMessage())); + } + } + + /** + * Store注册,返回storeID,初次注册会返回新ID +>>>>>>>> d7e3d51dd (3.6.5 -> 4.x diff):hg-pd-client/src/main/java/org/apache/hugegraph/pd/client/impl/PDApi.java * * @param store * @return */ public long registerStore(Metapb.Store store) throws PDException { Pdpb.RegisterStoreRequest request = Pdpb.RegisterStoreRequest.newBuilder() +<<<<<<<< HEAD:hugegraph-pd/hg-pd-client/src/main/java/org/apache/hugegraph/pd/client/PDClient.java .setHeader(header) .setStore(store).build(); +======== + .setHeader(header) + .setStore(store).build(); +>>>>>>>> d7e3d51dd (3.6.5 -> 4.x diff):hg-pd-client/src/main/java/org/apache/hugegraph/pd/client/impl/PDApi.java Pdpb.RegisterStoreResponse response = blockingUnaryCall(PDGrpc.getRegisterStoreMethod(), request); handleResponseError(response.getHeader()); return response.getStoreId(); } +<<<<<<<< HEAD:hugegraph-pd/hg-pd-client/src/main/java/org/apache/hugegraph/pd/client/PDClient.java /** * Returns the Store object based on the store ID * @@ -443,11 +518,23 @@ private KVPair getKvPair(String graphName, byte[ .setKey(ByteString.copyFrom(key)) .build(); GetPartitionResponse response = +======== + public KVPair getKvPair(String graphName, byte[] key, KVPair partShard) throws PDException { + if (partShard == null) { + Pdpb.GetPartitionRequest request = Pdpb.GetPartitionRequest.newBuilder() + .setHeader(header) + .setGraphName(graphName) + .setKey(ByteString.copyFrom(key)) + .build(); + Pdpb.GetPartitionResponse response = +>>>>>>>> d7e3d51dd (3.6.5 -> 4.x diff):hg-pd-client/src/main/java/org/apache/hugegraph/pd/client/impl/PDApi.java blockingUnaryCall(PDGrpc.getGetPartitionMethod(), request); handleResponseError(response.getHeader()); partShard = new KVPair<>(response.getPartition(), response.getLeader()); cache.update(graphName, partShard.getKey().getId(), partShard.getKey()); } +<<<<<<<< HEAD:hugegraph-pd/hg-pd-client/src/main/java/org/apache/hugegraph/pd/client/PDClient.java return partShard; } @@ -474,11 +561,18 @@ public KVPair getPartition(String graphName, byt KVPair partShard = cache.getPartitionByCode(graphName, code); partShard = getKvPair(graphName, key, partShard); +======== + +>>>>>>>> d7e3d51dd (3.6.5 -> 4.x diff):hg-pd-client/src/main/java/org/apache/hugegraph/pd/client/impl/PDApi.java return partShard; } /** +<<<<<<<< HEAD:hugegraph-pd/hg-pd-client/src/main/java/org/apache/hugegraph/pd/client/PDClient.java * Query the partition information based on the hashcode +======== + * 根据hashcode查询所属分区信息 +>>>>>>>> d7e3d51dd (3.6.5 -> 4.x diff):hg-pd-client/src/main/java/org/apache/hugegraph/pd/client/impl/PDApi.java * * @param graphName * @param hashCode @@ -488,6 +582,7 @@ public KVPair getPartition(String graphName, byt public KVPair getPartitionByCode(String graphName, long hashCode) throws PDException { +<<<<<<<< HEAD:hugegraph-pd/hg-pd-client/src/main/java/org/apache/hugegraph/pd/client/PDClient.java KVPair partShard = cache.getPartitionByCode(graphName, hashCode); if (partShard == null) { @@ -496,6 +591,16 @@ public KVPair getPartitionByCode(String graphNam .setGraphName(graphName) .setCode(hashCode).build(); GetPartitionResponse response = +======== + // 先查cache,cache没有命中,在调用PD + KVPair partShard = cache.getPartitionByCode(graphName, hashCode); + if (partShard == null) { + Pdpb.GetPartitionByCodeRequest request = Pdpb.GetPartitionByCodeRequest.newBuilder() + .setHeader(header) + .setGraphName(graphName) + .setCode(hashCode).build(); + Pdpb.GetPartitionResponse response = +>>>>>>>> d7e3d51dd (3.6.5 -> 4.x diff):hg-pd-client/src/main/java/org/apache/hugegraph/pd/client/impl/PDApi.java blockingUnaryCall(PDGrpc.getGetPartitionByCodeMethod(), request); handleResponseError(response.getHeader()); partShard = new KVPair<>(response.getPartition(), response.getLeader()); @@ -504,7 +609,11 @@ public KVPair getPartitionByCode(String graphNam } if (partShard.getValue() == null) { +<<<<<<<< HEAD:hugegraph-pd/hg-pd-client/src/main/java/org/apache/hugegraph/pd/client/PDClient.java ShardGroup shardGroup = getShardGroup(partShard.getKey().getId()); +======== + Metapb.ShardGroup shardGroup = getShardGroup(partShard.getKey().getId()); +>>>>>>>> d7e3d51dd (3.6.5 -> 4.x diff):hg-pd-client/src/main/java/org/apache/hugegraph/pd/client/impl/PDApi.java if (shardGroup != null) { for (var shard : shardGroup.getShardsList()) { if (shard.getRole() == Metapb.ShardRole.Leader) { @@ -512,13 +621,18 @@ public KVPair getPartitionByCode(String graphNam } } } else { +<<<<<<<< HEAD:hugegraph-pd/hg-pd-client/src/main/java/org/apache/hugegraph/pd/client/PDClient.java log.error("getPartitionByCode: get shard group failed, {}", partShard.getKey().getId()); +======== + log.error("getPartitionByCode: get shard group failed, {}", partShard.getKey().getId()); +>>>>>>>> d7e3d51dd (3.6.5 -> 4.x diff):hg-pd-client/src/main/java/org/apache/hugegraph/pd/client/impl/PDApi.java } } return partShard; } +<<<<<<<< HEAD:hugegraph-pd/hg-pd-client/src/main/java/org/apache/hugegraph/pd/client/PDClient.java /** * Obtain the hash value of the key */ @@ -528,6 +642,10 @@ public int keyToCode(String graphName, byte[] key) { /** * Returns partition information based on the partition ID and RPC request +======== + /** + * 根据分区id返回分区信息, RPC请求 +>>>>>>>> d7e3d51dd (3.6.5 -> 4.x diff):hg-pd-client/src/main/java/org/apache/hugegraph/pd/client/impl/PDApi.java * * @param graphName * @param partId @@ -540,6 +658,7 @@ public KVPair getPartitionById(String graphName, cache.getPartitionById(graphName, partId); if (partShard == null) { Pdpb.GetPartitionByIDRequest request = Pdpb.GetPartitionByIDRequest.newBuilder() +<<<<<<<< HEAD:hugegraph-pd/hg-pd-client/src/main/java/org/apache/hugegraph/pd/client/PDClient.java .setHeader(header) .setGraphName( graphName) @@ -547,6 +666,13 @@ public KVPair getPartitionById(String graphName, partId) .build(); GetPartitionResponse response = +======== + .setHeader(header) + .setGraphName(graphName) + .setPartitionId(partId) + .build(); + Pdpb.GetPartitionResponse response = +>>>>>>>> d7e3d51dd (3.6.5 -> 4.x diff):hg-pd-client/src/main/java/org/apache/hugegraph/pd/client/impl/PDApi.java blockingUnaryCall(PDGrpc.getGetPartitionByIDMethod(), request); handleResponseError(response.getHeader()); partShard = new KVPair<>(response.getPartition(), response.getLeader()); @@ -564,13 +690,18 @@ public KVPair getPartitionById(String graphName, } } } else { +<<<<<<<< HEAD:hugegraph-pd/hg-pd-client/src/main/java/org/apache/hugegraph/pd/client/PDClient.java log.error("getPartitionById: get shard group failed, {}", partShard.getKey().getId()); +======== + log.error("getPartitionById: get shard group failed, {}", partShard.getKey().getId()); +>>>>>>>> d7e3d51dd (3.6.5 -> 4.x diff):hg-pd-client/src/main/java/org/apache/hugegraph/pd/client/impl/PDApi.java } } return partShard; } +<<<<<<<< HEAD:hugegraph-pd/hg-pd-client/src/main/java/org/apache/hugegraph/pd/client/PDClient.java public ShardGroup getShardGroup(int partId) throws PDException { ShardGroup group = cache.getShardGroup(partId); if (group == null) { @@ -587,14 +718,31 @@ public ShardGroup getShardGroup(int partId) throws PDException { } } return group; +======== + + public Metapb.ShardGroup getShardGroupDirect(int partId) throws PDException { + Pdpb.GetShardGroupRequest request = Pdpb.GetShardGroupRequest.newBuilder() + .setHeader(header) + .setGroupId(partId) + .build(); + Pdpb.GetShardGroupResponse response = blockingUnaryCall(PDGrpc.getGetShardGroupMethod(), request); + handleResponseError(response.getHeader()); + return response.getShardGroup(); +>>>>>>>> d7e3d51dd (3.6.5 -> 4.x diff):hg-pd-client/src/main/java/org/apache/hugegraph/pd/client/impl/PDApi.java } public void updateShardGroup(ShardGroup shardGroup) throws PDException { Pdpb.UpdateShardGroupRequest request = Pdpb.UpdateShardGroupRequest.newBuilder() +<<<<<<<< HEAD:hugegraph-pd/hg-pd-client/src/main/java/org/apache/hugegraph/pd/client/PDClient.java .setHeader(header) .setShardGroup( shardGroup) .build(); +======== + .setHeader(header) + .setShardGroup(shardGroup) + .build(); +>>>>>>>> d7e3d51dd (3.6.5 -> 4.x diff):hg-pd-client/src/main/java/org/apache/hugegraph/pd/client/impl/PDApi.java Pdpb.UpdateShardGroupResponse response = blockingUnaryCall(PDGrpc.getUpdateShardGroupMethod(), request); handleResponseError(response.getHeader()); @@ -605,6 +753,7 @@ public void updateShardGroup(ShardGroup shardGroup) throws PDException { } /** +<<<<<<<< HEAD:hugegraph-pd/hg-pd-client/src/main/java/org/apache/hugegraph/pd/client/PDClient.java * Returns information about all partitions spanned by the start and end keys * * @param graphName @@ -635,6 +784,9 @@ public List> scanPartitions(String graphN /** * Query partition information based on conditions +======== + * 根据条件查询分区信息 +>>>>>>>> d7e3d51dd (3.6.5 -> 4.x diff):hg-pd-client/src/main/java/org/apache/hugegraph/pd/client/impl/PDApi.java * * @return * @throws PDException @@ -653,8 +805,18 @@ public List getPartitionsByStore(long storeId) throws PDExcept return response.getPartitionsList(); } +<<<<<<<< HEAD:hugegraph-pd/hg-pd-client/src/main/java/org/apache/hugegraph/pd/client/PDClient.java public List queryPartitions(long storeId, int partitionId) throws PDException { +======== + /** + * 查找指定store上的指定partitionId + * + * @return + * @throws PDException + */ + public List queryPartitions(long storeId, int partitionId) throws PDException { +>>>>>>>> d7e3d51dd (3.6.5 -> 4.x diff):hg-pd-client/src/main/java/org/apache/hugegraph/pd/client/impl/PDApi.java Metapb.PartitionQuery query = Metapb.PartitionQuery.newBuilder() .setStoreId(storeId) @@ -672,10 +834,10 @@ public List queryPartitions(long storeId, int partitionId) thr public List getPartitions(long storeId, String graphName) throws PDException { Metapb.PartitionQuery query = Metapb.PartitionQuery.newBuilder() - .setStoreId(storeId) - .setGraphName(graphName).build(); + .setStoreId(storeId) + .setGraphName(graphName).build(); Pdpb.QueryPartitionsRequest request = Pdpb.QueryPartitionsRequest.newBuilder() - .setQuery(query).build(); + .setQuery(query).build(); Pdpb.QueryPartitionsResponse response = blockingUnaryCall(PDGrpc.getQueryPartitionsMethod(), request); @@ -684,11 +846,28 @@ public List getPartitions(long storeId, String graphName) thro } +<<<<<<<< HEAD:hugegraph-pd/hg-pd-client/src/main/java/org/apache/hugegraph/pd/client/PDClient.java public Metapb.Graph setGraph(Metapb.Graph graph) throws PDException { Pdpb.SetGraphRequest request = Pdpb.SetGraphRequest.newBuilder() .setGraph(graph) .build(); Pdpb.SetGraphResponse response = +======== + public Metapb.Graph createGraph(Metapb.Graph graph) throws PDException { + Pdpb.CreateGraphRequest request = Pdpb.CreateGraphRequest.newBuilder() + .setGraph(graph) + .build(); + Pdpb.CreateGraphResponse response = blockingUnaryCall(PDGrpc.getCreateGraphMethod(), request); + handleResponseError(response.getHeader()); + return response.getGraph(); + } + + public Metapb.Graph setGraph(Metapb.Graph graph) throws PDException { + Pdpb.CreateGraphRequest request = Pdpb.CreateGraphRequest.newBuilder() + .setGraph(graph) + .build(); + Pdpb.CreateGraphResponse response = +>>>>>>>> d7e3d51dd (3.6.5 -> 4.x diff):hg-pd-client/src/main/java/org/apache/hugegraph/pd/client/impl/PDApi.java blockingUnaryCall(PDGrpc.getSetGraphMethod(), request); handleResponseError(response.getHeader()); @@ -707,11 +886,19 @@ public Metapb.Graph getGraph(String graphName) throws PDException { } public Metapb.Graph getGraphWithOutException(String graphName) throws +<<<<<<<< HEAD:hugegraph-pd/hg-pd-client/src/main/java/org/apache/hugegraph/pd/client/PDClient.java PDException { GetGraphRequest request = GetGraphRequest.newBuilder() .setGraphName( graphName) .build(); +======== + PDException { + Pdpb.GetGraphRequest request = Pdpb.GetGraphRequest.newBuilder() + .setGraphName( + graphName) + .build(); +>>>>>>>> d7e3d51dd (3.6.5 -> 4.x diff):hg-pd-client/src/main/java/org/apache/hugegraph/pd/client/impl/PDApi.java Pdpb.GetGraphResponse response = blockingUnaryCall( PDGrpc.getGetGraphMethod(), request); return response.getGraph(); @@ -756,6 +943,7 @@ public Metapb.Partition delPartition(String graphName, int partitionId) throws P invalidPartitionCache(graphName, partitionId); return response.getPartition(); } +<<<<<<<< HEAD:hugegraph-pd/hg-pd-client/src/main/java/org/apache/hugegraph/pd/client/PDClient.java /** * Delete the partitioned cache @@ -828,6 +1016,8 @@ public void updatePartitionCache(Metapb.Partition partition, Metapb.Shard leader cache.updateLeader(partition.getId(), leader); } } +======== +>>>>>>>> d7e3d51dd (3.6.5 -> 4.x diff):hg-pd-client/src/main/java/org/apache/hugegraph/pd/client/impl/PDApi.java public Pdpb.GetIdResponse getIdByKey(String key, int delta) throws PDException { Pdpb.GetIdRequest request = Pdpb.GetIdRequest.newBuilder() @@ -842,9 +1032,15 @@ public Pdpb.GetIdResponse getIdByKey(String key, int delta) throws PDException { public Pdpb.ResetIdResponse resetIdByKey(String key) throws PDException { Pdpb.ResetIdRequest request = Pdpb.ResetIdRequest.newBuilder() +<<<<<<<< HEAD:hugegraph-pd/hg-pd-client/src/main/java/org/apache/hugegraph/pd/client/PDClient.java .setHeader(header) .setKey(key) .build(); +======== + .setHeader(header) + .setKey(key) + .build(); +>>>>>>>> d7e3d51dd (3.6.5 -> 4.x diff):hg-pd-client/src/main/java/org/apache/hugegraph/pd/client/impl/PDApi.java Pdpb.ResetIdResponse response = blockingUnaryCall(PDGrpc.getResetIdMethod(), request); handleResponseError(response.getHeader()); return response; @@ -861,23 +1057,31 @@ public Metapb.Member getLeader() throws PDException { public Pdpb.GetMembersResponse getMembers() throws PDException { Pdpb.GetMembersRequest request = Pdpb.GetMembersRequest.newBuilder() - .setHeader(header) - .build(); + .setHeader(header) + .build(); Pdpb.GetMembersResponse response = blockingUnaryCall(PDGrpc.getGetMembersMethod(), request); handleResponseError(response.getHeader()); return response; } - public Metapb.ClusterStats getClusterStats() throws PDException { + public Metapb.ClusterStats getClusterStats(long storeId) throws PDException { Pdpb.GetClusterStatsRequest request = Pdpb.GetClusterStatsRequest.newBuilder() +<<<<<<<< HEAD:hugegraph-pd/hg-pd-client/src/main/java/org/apache/hugegraph/pd/client/PDClient.java .setHeader(header) .build(); Pdpb.GetClusterStatsResponse response = blockingUnaryCall(PDGrpc.getGetClusterStatsMethod(), request); +======== + .setHeader(header) + .setStoreId(storeId) + .build(); + Pdpb.GetClusterStatsResponse response = blockingUnaryCall(PDGrpc.getGetClusterStatsMethod(), request); +>>>>>>>> d7e3d51dd (3.6.5 -> 4.x diff):hg-pd-client/src/main/java/org/apache/hugegraph/pd/client/impl/PDApi.java handleResponseError(response.getHeader()); return response.getCluster(); } +<<<<<<<< HEAD:hugegraph-pd/hg-pd-client/src/main/java/org/apache/hugegraph/pd/client/PDClient.java private > RespT blockingUnaryCall(MethodDescriptor method, ReqT req) throws PDException { return blockingUnaryCall(method, req, 1); @@ -898,11 +1102,59 @@ public Metapb.ClusterStats getClusterStats() throws PDException { closeStub(true); return blockingUnaryCall(method, req, ++retry); } +======== + public Metapb.ClusterStats getClusterStats(int storeGroupId) throws PDException { + Pdpb.GetClusterStatsRequest request = Pdpb.GetClusterStatsRequest.newBuilder() + .setHeader(header) + .setStoreGroup(storeGroupId) + .build(); + Pdpb.GetClusterStatsResponse response = blockingUnaryCall(PDGrpc.getGetClusterStatsMethod(), request); + handleResponseError(response.getHeader()); + return response.getCluster(); + } + + public void changePeerList(String peerList) throws PDException { + ClusterOp.ChangePeerListRequest request = ClusterOp.ChangePeerListRequest.newBuilder() + .setPeerList(peerList) + .setHeader(header).build(); + ClusterOp.ChangePeerListResponse response = + blockingUnaryCall(PDGrpc.getChangePeerListMethod(), request); + handleResponseError(response.getHeader()); + } + + public void reportTask(MetaTask.Task task) throws PDException { + ClusterOp.ReportTaskRequest request = ClusterOp.ReportTaskRequest.newBuilder() + .setHeader(header) + .setTask(task).build(); + ClusterOp.ReportTaskResponse response = blockingUnaryCall(PDGrpc.getReportTaskMethod(), request); + handleResponseError(response.getHeader()); + } + + public void deleteShardGroup(int groupId) throws PDException { + Pdpb.DeleteShardGroupRequest request = Pdpb.DeleteShardGroupRequest + .newBuilder() + .setHeader(header) + .setGroupId(groupId) + .build(); + Pdpb.DeleteShardGroupResponse response = + blockingUnaryCall(PDGrpc.getDeleteShardGroupMethod(), request); + + handleResponseError(response.getHeader()); + } + + public Metapb.ShardGroup getShardGroup(int partId) throws PDException { + Metapb.ShardGroup group = cache.getShardGroup(partId); + if (group == null) { + group = getShardGroupDirect(partId); + if (config.isEnableCache()) { + cache.updateShardGroup(group); +>>>>>>>> d7e3d51dd (3.6.5 -> 4.x diff):hg-pd-client/src/main/java/org/apache/hugegraph/pd/client/impl/PDApi.java } } - return null; + return group; } +<<<<<<<< HEAD:hugegraph-pd/hg-pd-client/src/main/java/org/apache/hugegraph/pd/client/PDClient.java private void handleResponseError(Pdpb.ResponseHeader header) throws PDException { var errorType = header.getError().getType(); @@ -913,9 +1165,25 @@ private void handleResponseError(Pdpb.ResponseHeader header) throws "PD request error, error code = %d, msg = %s", header.getError().getTypeValue(), header.getError().getMessage())); +======== + public void invalidPartitionCache() { + // 检查是否存在缓存 + cache.removePartitions(); + } + + /** + * 删除分区缓存 + */ + public void invalidPartitionCache(String graphName, int partitionId) { + // 检查是否存在缓存 + if (null != cache.getPartitionById(graphName, partitionId)) { + cache.removePartition(graphName, partitionId); +>>>>>>>> d7e3d51dd (3.6.5 -> 4.x diff):hg-pd-client/src/main/java/org/apache/hugegraph/pd/client/impl/PDApi.java } + } +<<<<<<<< HEAD:hugegraph-pd/hg-pd-client/src/main/java/org/apache/hugegraph/pd/client/PDClient.java public void addEventListener(PDEventListener listener) { eventListeners.add(listener); } @@ -934,6 +1202,131 @@ public List getStoreStatus(boolean offlineExcluded) throws PDExcep offlineExcluded) .build(); Pdpb.GetAllStoresResponse response = getStub().getStoreStatus(request); +======== + /** + * 根据storeId返回Store对象 + * + * @param storeId + * @return + * @throws PDException + */ + public Metapb.Store getStore(long storeId) throws PDException { + Metapb.Store store = cache.getStoreById(storeId); + if (store == null) { + Pdpb.GetStoreRequest request = Pdpb.GetStoreRequest.newBuilder() + .setHeader(header) + .setStoreId(storeId).build(); + // Pdpb.GetStoreResponse response = getStub().getStore(request); + Pdpb.GetStoreResponse response = blockingUnaryCall(PDGrpc.getGetStoreMethod(), request); + handleResponseError(response.getHeader()); + store = response.getStore(); + if (config.isEnableCache()) { + cache.addStore(storeId, store); + } + } + return store; + } + + /** + * 更新Store信息,包括上下线等 + * + * @param store + * @return + */ + public Metapb.Store updateStore(Metapb.Store store) throws PDException { + Pdpb.SetStoreRequest request = Pdpb.SetStoreRequest.newBuilder() + .setHeader(header) + .setStore(store).build(); + + // Pdpb.SetStoreResponse response = getStub().setStore(request); + Pdpb.SetStoreResponse response = blockingUnaryCall(PDGrpc.getSetStoreMethod(), request); + handleResponseError(response.getHeader()); + store = response.getStore(); + if (config.isEnableCache()) { + cache.addStore(store.getId(), store); + } + return store; + } + + public List getActiveStores() throws PDException { + Pdpb.GetAllStoresRequest request = Pdpb.GetAllStoresRequest.newBuilder() + .setHeader(header) + .setGraphName("") + .setExcludeOfflineStores(true) + .build(); + // Pdpb.GetAllStoresResponse response = getStub().getAllStores(request); + Pdpb.GetAllStoresResponse response = blockingUnaryCall(PDGrpc.getGetAllStoresMethod(), request); + handleResponseError(response.getHeader()); + return response.getStoresList(); + + } + + /** + * 返回活跃的Store + * + * @param graphName + * @return + */ + public List getActiveStores(String graphName) throws PDException { + Set stores = new HashSet<>(); + KVPair ptShard = this.getPartitionByCode(graphName, 0); + while (ptShard != null) { + stores.add(this.getStore(ptShard.getValue().getStoreId())); + if (ptShard.getKey().getEndKey() < PartitionUtils.MAX_VALUE) { + ptShard = this.getPartitionByCode(graphName, ptShard.getKey().getEndKey()); + } else { + ptShard = null; + } + } + return new ArrayList<>(stores); + } + + /** + * 返回活跃的Store + * + * @param graphName + * @return + */ + public List getAllStores(String graphName) throws PDException { + Pdpb.GetAllStoresRequest request = Pdpb.GetAllStoresRequest.newBuilder() + .setHeader(header) + .setGraphName(graphName) + .setExcludeOfflineStores(false) + .build(); + // Pdpb.GetAllStoresResponse response = getStub().getAllStores(request); + Pdpb.GetAllStoresResponse response = blockingUnaryCall(PDGrpc.getGetAllStoresMethod(), request); + handleResponseError(response.getHeader()); + return response.getStoresList(); + + } + + /** + * Store心跳,定期调用,保持在线状态 + * + * @param stats + * @throws PDException + */ + public Metapb.ClusterStats storeHeartbeat(Metapb.StoreStats stats) throws PDException { + Pdpb.StoreHeartbeatRequest request = Pdpb.StoreHeartbeatRequest.newBuilder() + .setHeader(header) + .setStats(stats).build(); + // Pdpb.StoreHeartbeatResponse response = getStub().storeHeartbeat(request); + Pdpb.StoreHeartbeatResponse response = blockingUnaryCall(PDGrpc.getStoreHeartbeatMethod(), request); + handleResponseError(response.getHeader()); + return response.getClusterStats(); + } + + /** + * 返回Store状态信息 + */ + public List getStoreStatus(boolean offlineExcluded) throws PDException { + Pdpb.GetAllStoresRequest request = Pdpb.GetAllStoresRequest.newBuilder() + .setHeader(header) + .setExcludeOfflineStores(offlineExcluded) + .build(); + // Pdpb.GetAllStoresResponse response = getStub().getStoreStatus(request); + Pdpb.GetAllStoresResponse response = blockingUnaryCall(PDGrpc.getGetStoreStatusMethod(), request); +>>>>>>>> d7e3d51dd (3.6.5 -> 4.x diff):hg-pd-client/src/main/java/org/apache/hugegraph/pd/client/impl/PDApi.java handleResponseError(response.getHeader()); List stores = response.getStoresList(); return stores; @@ -941,19 +1334,25 @@ public List getStoreStatus(boolean offlineExcluded) throws PDExcep public void setGraphSpace(String graphSpaceName, long storageLimit) throws PDException { Metapb.GraphSpace graphSpace = Metapb.GraphSpace.newBuilder().setName(graphSpaceName) +<<<<<<<< HEAD:hugegraph-pd/hg-pd-client/src/main/java/org/apache/hugegraph/pd/client/PDClient.java .setStorageLimit(storageLimit) .setTimestamp(System.currentTimeMillis()) .build(); +======== + .setStorageLimit(storageLimit) + .setTimestamp(System.currentTimeMillis()).build(); +>>>>>>>> d7e3d51dd (3.6.5 -> 4.x diff):hg-pd-client/src/main/java/org/apache/hugegraph/pd/client/impl/PDApi.java Pdpb.SetGraphSpaceRequest request = Pdpb.SetGraphSpaceRequest.newBuilder() - .setHeader(header) - .setGraphSpace(graphSpace) - .build(); - Pdpb.SetGraphSpaceResponse response = getStub().setGraphSpace(request); + .setHeader(header) + .setGraphSpace(graphSpace) + .build(); + // Pdpb.SetGraphSpaceResponse response = getStub().setGraphSpace(request); + Pdpb.SetGraphSpaceResponse response = blockingUnaryCall(PDGrpc.getSetGraphSpaceMethod(), request); handleResponseError(response.getHeader()); } public List getGraphSpace(String graphSpaceName) throws - PDException { + PDException { Pdpb.GetGraphSpaceRequest.Builder builder = Pdpb.GetGraphSpaceRequest.newBuilder(); Pdpb.GetGraphSpaceRequest request; builder.setHeader(header); @@ -961,12 +1360,14 @@ public List getGraphSpace(String graphSpaceName) throws builder.setGraphSpaceName(graphSpaceName); } request = builder.build(); - Pdpb.GetGraphSpaceResponse response = getStub().getGraphSpace(request); + // Pdpb.GetGraphSpaceResponse response = getStub().getGraphSpace(request); + Pdpb.GetGraphSpaceResponse response = blockingUnaryCall(PDGrpc.getGetGraphSpaceMethod(), request); List graphSpaceList = response.getGraphSpaceList(); handleResponseError(response.getHeader()); return graphSpaceList; } +<<<<<<<< HEAD:hugegraph-pd/hg-pd-client/src/main/java/org/apache/hugegraph/pd/client/PDClient.java public void setPDConfig(int partitionCount, String peerList, int shardCount, long version) throws PDException { Metapb.PDConfig pdConfig = Metapb.PDConfig.newBuilder().setPartitionCount(partitionCount) @@ -979,14 +1380,47 @@ public void setPDConfig(int partitionCount, String peerList, int shardCount, .setPdConfig(pdConfig) .build(); Pdpb.SetPDConfigResponse response = getStub().setPDConfig(request); +======== + public void setPDConfig(int partitionCount, String peerList, int shardCount, long version) throws + PDException { + Metapb.PDConfig pdConfig = Metapb.PDConfig.newBuilder() + .setPeersList(peerList) + .setShardCount(shardCount) + .setVersion(version) + .setTimestamp(System.currentTimeMillis()) + .build(); + Pdpb.SetPDConfigRequest request = Pdpb.SetPDConfigRequest.newBuilder() + .setHeader(header) + .setPdConfig(pdConfig) + .build(); + // Pdpb.SetPDConfigResponse response = getStub().setPDConfig(request); + Pdpb.SetPDConfigResponse response = blockingUnaryCall(PDGrpc.getSetPDConfigMethod(), request); + handleResponseError(response.getHeader()); + } + + public void setPDConfig(Metapb.PDConfig pdConfig) throws PDException { + Pdpb.SetPDConfigRequest request = Pdpb.SetPDConfigRequest.newBuilder() + .setHeader(header) + .setPdConfig(pdConfig) + .build(); + // Pdpb.SetPDConfigResponse response = getStub().setPDConfig(request); + Pdpb.SetPDConfigResponse response = blockingUnaryCall(PDGrpc.getSetPDConfigMethod(), request); +>>>>>>>> d7e3d51dd (3.6.5 -> 4.x diff):hg-pd-client/src/main/java/org/apache/hugegraph/pd/client/impl/PDApi.java handleResponseError(response.getHeader()); } public Metapb.PDConfig getPDConfig() throws PDException { Pdpb.GetPDConfigRequest request = Pdpb.GetPDConfigRequest.newBuilder() +<<<<<<<< HEAD:hugegraph-pd/hg-pd-client/src/main/java/org/apache/hugegraph/pd/client/PDClient.java .setHeader(header) .build(); Pdpb.GetPDConfigResponse response = getStub().getPDConfig(request); +======== + .setHeader(header) + .build(); + // Pdpb.GetPDConfigResponse response = getStub().getPDConfig(request); + Pdpb.GetPDConfigResponse response = blockingUnaryCall(PDGrpc.getGetPDConfigMethod(), request); +>>>>>>>> d7e3d51dd (3.6.5 -> 4.x diff):hg-pd-client/src/main/java/org/apache/hugegraph/pd/client/impl/PDApi.java handleResponseError(response.getHeader()); return response.getPdConfig(); } @@ -1003,11 +1437,13 @@ public void setPDConfig(Metapb.PDConfig pdConfig) throws PDException { public Metapb.PDConfig getPDConfig(long version) throws PDException { Pdpb.GetPDConfigRequest request = Pdpb.GetPDConfigRequest.newBuilder().setHeader( header).setVersion(version).build(); - Pdpb.GetPDConfigResponse response = getStub().getPDConfig(request); + // Pdpb.GetPDConfigResponse response = getStub().getPDConfig(request); + Pdpb.GetPDConfigResponse response = blockingUnaryCall(PDGrpc.getGetPDConfigMethod(), request); handleResponseError(response.getHeader()); return response.getPdConfig(); } +<<<<<<<< HEAD:hugegraph-pd/hg-pd-client/src/main/java/org/apache/hugegraph/pd/client/PDClient.java public void changePeerList(String peerList) throws PDException { Pdpb.ChangePeerListRequest request = Pdpb.ChangePeerListRequest.newBuilder() .setPeerList(peerList) @@ -1054,20 +1490,43 @@ public void splitData(Pdpb.OperationMode mode, List params) .setMode(mode) .addAllParam(params).build(); Pdpb.SplitDataResponse response = getStub().splitData(request); +======== + public void splitData(ClusterOp.OperationMode mode, int storeGroupId, List params) + throws PDException { + ClusterOp.SplitDataRequest request = ClusterOp.SplitDataRequest.newBuilder() + .setHeader(header) + .setMode(mode) + .setStoreGroupId(storeGroupId) + .addAllParam(params).build(); + ; + ClusterOp.SplitDataResponse response = blockingUnaryCall(PDGrpc.getSplitDataMethod(), request); +>>>>>>>> d7e3d51dd (3.6.5 -> 4.x diff):hg-pd-client/src/main/java/org/apache/hugegraph/pd/client/impl/PDApi.java handleResponseError(response.getHeader()); } + public void splitGraphData(String graphName, int toCount) throws PDException { +<<<<<<<< HEAD:hugegraph-pd/hg-pd-client/src/main/java/org/apache/hugegraph/pd/client/PDClient.java Pdpb.SplitGraphDataRequest request = Pdpb.SplitGraphDataRequest.newBuilder() .setHeader(header) .setGraphName(graphName) .setToCount(toCount) .build(); Pdpb.SplitDataResponse response = getStub().splitGraphData(request); +======== + ClusterOp.SplitGraphDataRequest request = ClusterOp.SplitGraphDataRequest.newBuilder() + .setHeader(header) + .setGraphName(graphName) + .setToCount(toCount) + .build(); + // Pdpb.SplitDataResponse response = getStub().splitGraphData(request); + ClusterOp.SplitDataResponse response = blockingUnaryCall(PDGrpc.getSplitGraphDataMethod(), request); +>>>>>>>> d7e3d51dd (3.6.5 -> 4.x diff):hg-pd-client/src/main/java/org/apache/hugegraph/pd/client/impl/PDApi.java handleResponseError(response.getHeader()); } /** +<<<<<<<< HEAD:hugegraph-pd/hg-pd-client/src/main/java/org/apache/hugegraph/pd/client/PDClient.java * To automatically transfer to the same number of partitions on each Store, it is * recommended to use balancePartition(int storeGroupId) to specify the storeGroupId * @@ -1108,16 +1567,41 @@ public void reportTask(MetaTask.Task task) throws PDException { .setHeader(header) .setTask(task).build(); Pdpb.ReportTaskResponse response = blockingUnaryCall(PDGrpc.getReportTaskMethod(), request); +======== + * 平衡分区 + * @param mode auto or expert + * @param storeGroupId for auto + * @param params for expert + * @throws PDException errors occurs + */ + public void balancePartition(ClusterOp.OperationMode mode, int storeGroupId, + List params) throws PDException { + ClusterOp.MovePartitionRequest request = ClusterOp.MovePartitionRequest.newBuilder() + .setHeader(header) + .setMode(mode) + .setStoreGroupId(storeGroupId) + .addAllParam(params) + .build(); + ClusterOp.MovePartitionResponse response = blockingUnaryCall(PDGrpc.getMovePartitionMethod(), request); +>>>>>>>> d7e3d51dd (3.6.5 -> 4.x diff):hg-pd-client/src/main/java/org/apache/hugegraph/pd/client/impl/PDApi.java handleResponseError(response.getHeader()); } public Metapb.PartitionStats getPartitionsStats(String graph, int partId) throws PDException { Pdpb.GetPartitionStatsRequest request = Pdpb.GetPartitionStatsRequest.newBuilder() +<<<<<<<< HEAD:hugegraph-pd/hg-pd-client/src/main/java/org/apache/hugegraph/pd/client/PDClient.java .setHeader(header) .setGraphName(graph) .setPartitionId(partId) .build(); Pdpb.GetPartitionStatsResponse response = getStub().getPartitionStats(request); +======== + .setHeader(header) + .setGraphName(graph) + .setPartitionId(partId).build(); + // Pdpb.GetPartitionStatsResponse response = getStub().getPartitionStats(request); + Pdpb.GetPartitionStatsResponse response = blockingUnaryCall(PDGrpc.getGetPartitionStatsMethod(), request); +>>>>>>>> d7e3d51dd (3.6.5 -> 4.x diff):hg-pd-client/src/main/java/org/apache/hugegraph/pd/client/impl/PDApi.java handleResponseError(response.getHeader()); return response.getPartitionStats(); } @@ -1126,10 +1610,18 @@ public Metapb.PartitionStats getPartitionsStats(String graph, int partId) throws * Balance the number of leaders in different stores */ public void balanceLeaders() throws PDException { +<<<<<<<< HEAD:hugegraph-pd/hg-pd-client/src/main/java/org/apache/hugegraph/pd/client/PDClient.java Pdpb.BalanceLeadersRequest request = Pdpb.BalanceLeadersRequest.newBuilder() .setHeader(header) .build(); Pdpb.BalanceLeadersResponse response = getStub().balanceLeaders(request); +======== + ClusterOp.BalanceLeadersRequest request = ClusterOp.BalanceLeadersRequest.newBuilder() + .setHeader(header) + .build(); + // Pdpb.BalanceLeadersResponse response = getStub().balanceLeaders(request); + ClusterOp.BalanceLeadersResponse response = blockingUnaryCall(PDGrpc.getBalanceLeadersMethod(), request); +>>>>>>>> d7e3d51dd (3.6.5 -> 4.x diff):hg-pd-client/src/main/java/org/apache/hugegraph/pd/client/impl/PDApi.java handleResponseError(response.getHeader()); } @@ -1138,41 +1630,59 @@ public void balanceLeaders() throws PDException { */ public Metapb.Store delStore(long storeId) throws PDException { Pdpb.DetStoreRequest request = Pdpb.DetStoreRequest.newBuilder() +<<<<<<<< HEAD:hugegraph-pd/hg-pd-client/src/main/java/org/apache/hugegraph/pd/client/PDClient.java .setHeader(header) .setStoreId(storeId) .build(); Pdpb.DetStoreResponse response = getStub().delStore(request); +======== + .setHeader(header) + .setStoreId(storeId) + .build(); + // Pdpb.DetStoreResponse response = getStub().delStore(request); + Pdpb.DetStoreResponse response = blockingUnaryCall(PDGrpc.getDelStoreMethod(), request); +>>>>>>>> d7e3d51dd (3.6.5 -> 4.x diff):hg-pd-client/src/main/java/org/apache/hugegraph/pd/client/impl/PDApi.java handleResponseError(response.getHeader()); return response.getStore(); } /** +<<<<<<<< HEAD:hugegraph-pd/hg-pd-client/src/main/java/org/apache/hugegraph/pd/client/PDClient.java * Compaction on rocksdb as a whole +======== + * 对rocksdb整体进行compaction +>>>>>>>> d7e3d51dd (3.6.5 -> 4.x diff):hg-pd-client/src/main/java/org/apache/hugegraph/pd/client/impl/PDApi.java * * @throws PDException */ public void dbCompaction() throws PDException { - Pdpb.DbCompactionRequest request = Pdpb.DbCompactionRequest + ClusterOp.DbCompactionRequest request = ClusterOp.DbCompactionRequest .newBuilder() .setHeader(header) .build(); - Pdpb.DbCompactionResponse response = getStub().dbCompaction(request); + // Pdpb.DbCompactionResponse response = getStub().dbCompaction(request); + ClusterOp.DbCompactionResponse response = blockingUnaryCall(PDGrpc.getDbCompactionMethod(), request); handleResponseError(response.getHeader()); } /** +<<<<<<<< HEAD:hugegraph-pd/hg-pd-client/src/main/java/org/apache/hugegraph/pd/client/PDClient.java * Compaction on rocksdb specified tables +======== + * 对rocksdb指定表进行compaction +>>>>>>>> d7e3d51dd (3.6.5 -> 4.x diff):hg-pd-client/src/main/java/org/apache/hugegraph/pd/client/impl/PDApi.java * * @param tableName * @throws PDException */ public void dbCompaction(String tableName) throws PDException { - Pdpb.DbCompactionRequest request = Pdpb.DbCompactionRequest + ClusterOp.DbCompactionRequest request = ClusterOp.DbCompactionRequest .newBuilder() .setHeader(header) .setTableName(tableName) .build(); - Pdpb.DbCompactionResponse response = getStub().dbCompaction(request); + // Pdpb.DbCompactionResponse response = getStub().dbCompaction(request); + ClusterOp.DbCompactionResponse response = blockingUnaryCall(PDGrpc.getDbCompactionMethod(), request); handleResponseError(response.getHeader()); } @@ -1182,32 +1692,47 @@ public void dbCompaction(String tableName) throws PDException { * @param toCount The number of partitions that can be scaled down * @throws PDException */ +<<<<<<<< HEAD:hugegraph-pd/hg-pd-client/src/main/java/org/apache/hugegraph/pd/client/PDClient.java public void combineCluster(int toCount) throws PDException { Pdpb.CombineClusterRequest request = Pdpb.CombineClusterRequest +======== + public void combineCluster(int shardGroupId, int toCount) throws PDException { + ClusterOp.CombineClusterRequest request = ClusterOp.CombineClusterRequest +>>>>>>>> d7e3d51dd (3.6.5 -> 4.x diff):hg-pd-client/src/main/java/org/apache/hugegraph/pd/client/impl/PDApi.java .newBuilder() .setHeader(header) + .setStoreGroupId(shardGroupId) .setToCount(toCount) .build(); - Pdpb.CombineClusterResponse response = getStub().combineCluster(request); + ClusterOp.CombineClusterResponse response = blockingUnaryCall(PDGrpc.getCombineClusterMethod(), request); handleResponseError(response.getHeader()); } /** +<<<<<<<< HEAD:hugegraph-pd/hg-pd-client/src/main/java/org/apache/hugegraph/pd/client/PDClient.java * Scaling a single image to toCount is similar to splitting to ensure that the number of * partitions in the same store group is the same. * If you have special requirements, you can consider migrating to other groups +======== + * 将单图缩容到 toCount个 +>>>>>>>> d7e3d51dd (3.6.5 -> 4.x diff):hg-pd-client/src/main/java/org/apache/hugegraph/pd/client/impl/PDApi.java * * @param graphName graph name * @param toCount target count * @throws PDException */ public void combineGraph(String graphName, int toCount) throws PDException { +<<<<<<<< HEAD:hugegraph-pd/hg-pd-client/src/main/java/org/apache/hugegraph/pd/client/PDClient.java Pdpb.CombineGraphRequest request = Pdpb.CombineGraphRequest +======== + ClusterOp.CombineGraphRequest request = ClusterOp.CombineGraphRequest +>>>>>>>> d7e3d51dd (3.6.5 -> 4.x diff):hg-pd-client/src/main/java/org/apache/hugegraph/pd/client/impl/PDApi.java .newBuilder() .setHeader(header) .setGraphName(graphName) .setToCount(toCount) .build(); +<<<<<<<< HEAD:hugegraph-pd/hg-pd-client/src/main/java/org/apache/hugegraph/pd/client/PDClient.java Pdpb.CombineGraphResponse response = getStub().combineGraph(request); handleResponseError(response.getHeader()); } @@ -1221,22 +1746,40 @@ public void deleteShardGroup(int groupId) throws PDException { Pdpb.DeleteShardGroupResponse response = blockingUnaryCall(PDGrpc.getDeleteShardGroupMethod(), request); +======== + // Pdpb.CombineGraphResponse response = getStub().combineGraph(request); + ClusterOp.CombineGraphResponse response = blockingUnaryCall(PDGrpc.getCombineGraphMethod(), request); +>>>>>>>> d7e3d51dd (3.6.5 -> 4.x diff):hg-pd-client/src/main/java/org/apache/hugegraph/pd/client/impl/PDApi.java handleResponseError(response.getHeader()); } /** +<<<<<<<< HEAD:hugegraph-pd/hg-pd-client/src/main/java/org/apache/hugegraph/pd/client/PDClient.java * Used for the store's shard list rebuild +======== + * 用于 store的 shard list重建 +>>>>>>>> d7e3d51dd (3.6.5 -> 4.x diff):hg-pd-client/src/main/java/org/apache/hugegraph/pd/client/impl/PDApi.java * * @param groupId shard group id * @param shards shard list, delete when shards size is 0 */ public void updateShardGroupOp(int groupId, List shards) throws PDException { +<<<<<<<< HEAD:hugegraph-pd/hg-pd-client/src/main/java/org/apache/hugegraph/pd/client/PDClient.java Pdpb.ChangeShardRequest request = Pdpb.ChangeShardRequest.newBuilder() .setHeader(header) .setGroupId(groupId) .addAllShards(shards) .build(); Pdpb.ChangeShardResponse response = getStub().updateShardGroupOp(request); +======== + ClusterOp.ChangeShardRequest request = ClusterOp.ChangeShardRequest.newBuilder() + .setHeader(header) + .setGroupId(groupId) + .addAllShards(shards) + .build(); + // Pdpb.ChangeShardResponse response = getStub().updateShardGroupOp(request); + ClusterOp.ChangeShardResponse response = blockingUnaryCall(PDGrpc.getUpdateShardGroupOpMethod(), request); +>>>>>>>> d7e3d51dd (3.6.5 -> 4.x diff):hg-pd-client/src/main/java/org/apache/hugegraph/pd/client/impl/PDApi.java handleResponseError(response.getHeader()); } @@ -1247,6 +1790,7 @@ public void updateShardGroupOp(int groupId, List shards) throws PD * @param shards shard list */ public void changeShard(int groupId, List shards) throws PDException { +<<<<<<<< HEAD:hugegraph-pd/hg-pd-client/src/main/java/org/apache/hugegraph/pd/client/PDClient.java Pdpb.ChangeShardRequest request = Pdpb.ChangeShardRequest.newBuilder() .setHeader(header) .setGroupId(groupId) @@ -1271,11 +1815,36 @@ public CachePartitionResponse getPartitionCache(String graph) throws PDException GetGraphRequest request = GetGraphRequest.newBuilder().setHeader(header).setGraphName(graph).build(); CachePartitionResponse ps = getStub().getPartitions(request); +======== + ClusterOp.ChangeShardRequest request = ClusterOp.ChangeShardRequest.newBuilder() + .setHeader(header) + .setGroupId(groupId) + .addAllShards(shards) + .build(); + // Pdpb.ChangeShardResponse response = getStub().changeShard(request); + ClusterOp.ChangeShardResponse response = blockingUnaryCall(PDGrpc.getChangeShardMethod(), request); + handleResponseError(response.getHeader()); + } + + public Pdpb.CacheResponse getClientCache() throws PDException { + Pdpb.GetGraphRequest request = Pdpb.GetGraphRequest.newBuilder().setHeader(header).build(); + // Pdpb.CacheResponse cache = getStub().getCache(request); + Pdpb.CacheResponse cache = blockingUnaryCall(PDGrpc.getGetCacheMethod(), request); + handleResponseError(cache.getHeader()); + return cache; + } + + public Pdpb.CachePartitionResponse getPartitionCache(String graph) throws PDException { + Pdpb.GetGraphRequest request = Pdpb.GetGraphRequest.newBuilder().setHeader(header).setGraphName(graph).build(); + // Pdpb.CachePartitionResponse ps = getStub().getPartitions(request); + Pdpb.CachePartitionResponse ps = blockingUnaryCall(PDGrpc.getGetPartitionsMethod(), request); +>>>>>>>> d7e3d51dd (3.6.5 -> 4.x diff):hg-pd-client/src/main/java/org/apache/hugegraph/pd/client/impl/PDApi.java handleResponseError(ps.getHeader()); return ps; } public void updatePdRaft(String raftConfig) throws PDException { +<<<<<<<< HEAD:hugegraph-pd/hg-pd-client/src/main/java/org/apache/hugegraph/pd/client/PDClient.java Pdpb.UpdatePdRaftRequest request = Pdpb.UpdatePdRaftRequest.newBuilder() .setHeader(header) .setConfig(raftConfig) @@ -1341,4 +1910,225 @@ public void setLeader(String leader) { this.leader = leader; } } +======== + ClusterOp.UpdatePdRaftRequest request = ClusterOp.UpdatePdRaftRequest.newBuilder() + .setHeader(header) + .setConfig(raftConfig) + .build(); + // Pdpb.UpdatePdRaftResponse response = getStub().updatePdRaft(request); + ClusterOp.UpdatePdRaftResponse response = blockingUnaryCall(PDGrpc.getUpdatePdRaftMethod(), request); + handleResponseError(response.getHeader()); + } + + public long submitBuildIndexTask(Metapb.BuildIndexParam param) throws PDException { + Pdpb.IndexTaskCreateRequest request = Pdpb.IndexTaskCreateRequest.newBuilder() + .setHeader(header) + .setParam(param) + .build(); + // var response = getStub().submitTask(request); + var response = blockingUnaryCall(PDGrpc.getSubmitIndexTaskMethod(), request); + handleResponseError(response.getHeader()); + return response.getTaskId(); + } + + public long submitBackupGraphTask(String sourceGraph, String targetGraph) throws PDException { + Pdpb.BackupGraphRequest request = Pdpb.BackupGraphRequest.newBuilder() + .setGraphName(sourceGraph) + .setTargetGraphName(targetGraph) + .build(); + // var response = getStub().submitTask(request); + var response = blockingUnaryCall(PDGrpc.getSubmitBackupGraphTaskMethod(), request); + handleResponseError(response.getHeader()); + return response.getTaskId(); + } + + public Pdpb.TaskQueryResponse queryBuildIndexTaskStatus(long taskId) throws PDException { + Pdpb.TaskQueryRequest request = Pdpb.TaskQueryRequest.newBuilder() + .setHeader(header) + .setTaskId(taskId) + .build(); + // var response = getStub().queryTaskState(request); + var response = blockingUnaryCall(PDGrpc.getQueryTaskStateMethod(), request); + handleResponseError(response.getHeader()); + return response; + } + + public Pdpb.TaskQueryResponse retryTask(long taskId) throws PDException { + Pdpb.TaskQueryRequest request = Pdpb.TaskQueryRequest.newBuilder() + .setHeader(header) + .setTaskId(taskId) + .build(); + // var response = getStub().retryIndexTask(request); + var response = blockingUnaryCall(PDGrpc.getRetryTaskMethod(), request); + handleResponseError(response.getHeader()); + return response; + } + + public Pdpb.GraphStatsResponse getGraphStats(String graphName) throws PDException { + Pdpb.GetGraphRequest request = + Pdpb.GetGraphRequest.newBuilder().setHeader(header).setGraphName(graphName).build(); + // Pdpb.GraphStatsResponse graphStats = getStub().getGraphStats(request); + Pdpb.GraphStatsResponse graphStats = blockingUnaryCall(PDGrpc.getGetGraphStatsMethod(), request); + handleResponseError(graphStats.getHeader()); + return graphStats; + } + + /** + * 返回startKey和endKey跨越的所有分区信息 + * + * @param graphName + * @param startKey + * @param endKey + * @return + * @throws PDException + */ + public List> scanPartitions(String graphName, byte[] startKey, + byte[] endKey) throws PDException { + List> partitions = new ArrayList<>(); + KVPair startPartShard = getPartition(graphName, startKey); + KVPair endPartShard = getPartition(graphName, endKey); + if (startPartShard == null || endPartShard == null) { + return null; + } + partitions.add(startPartShard); + while (startPartShard.getKey().getEndKey() < endPartShard.getKey().getEndKey() + && startPartShard.getKey().getEndKey() < PartitionUtils.MAX_VALUE /*排除最后一个分区*/) { + startPartShard = getPartitionByCode(graphName, startPartShard.getKey().getEndKey()); + partitions.add(startPartShard); + } + return partitions; + } + + /** + * 查询Key所属分区信息 + * + * @param graphName + * @param key + * @return + * @throws PDException + */ + public KVPair getPartition(String graphName, byte[] key) throws PDException { + // 先查cache,cache没有命中,在调用PD + KVPair partShard = cache.getPartitionByKey(graphName, key); + partShard = getKvPair(graphName, key, partShard); + return partShard; + } + + public KVPair getPartition(String graphName, byte[] key, int code) throws PDException { + KVPair partShard = cache.getPartitionByCode(graphName, code); + partShard = getKvPair(graphName, key, partShard); + return partShard; + } + + /** + * Hugegraph-store调用,更新缓存 + * + * @param partition + */ + public void updatePartitionCache(Metapb.Partition partition, Metapb.Shard leader) { + if (config.isEnableCache()) { + cache.update(partition.getGraphName(), partition.getId(), partition); + cache.updateLeader(partition.getId(), leader); + } + } + + /** + * Hugegraph server 调用,Leader发生改变,更新缓存 + */ + public void updatePartitionLeader(String graphName, int partId, long leaderStoreId) { + KVPair partShard = null; + try { + partShard = this.getPartitionById(graphName, partId); + + if (partShard != null && partShard.getValue().getStoreId() != leaderStoreId) { + var shardGroup = this.getShardGroup(partId); + Metapb.Shard shard = null; + List shards = new ArrayList<>(); + + for (Metapb.Shard s : shardGroup.getShardsList()) { + if (s.getStoreId() == leaderStoreId) { + shard = s; + shards.add(Metapb.Shard.newBuilder(s) + .setStoreId(s.getStoreId()) + .setRole(Metapb.ShardRole.Leader).build()); + } else { + shards.add(Metapb.Shard.newBuilder(s) + .setStoreId(s.getStoreId()) + .setRole(Metapb.ShardRole.Follower).build()); + } + } + + if (config.isEnableCache()) { + if (shard == null) { + // 分区的shard中未找到leader,说明分区发生了迁移 + cache.removePartition(graphName, partId); + } else { + cache.updateLeader(partId, shard); + } + } + } + } catch (PDException e) { + log.error("getPartitionException: {}", e.getMessage()); + } + } + + public Metapb.StoreGroup createStoreGroup(int groupId, String name, int partitionCount) throws PDException { + StoreGroup.CreateStoreGroupRequest request = StoreGroup.CreateStoreGroupRequest.newBuilder() + .setHeader(header) + .setGroupId(groupId) + .setName(name) + .setPartitionCount(partitionCount) + .build(); + + StoreGroup.CreateStoreGroupResponse response = blockingUnaryCall(PDGrpc.getCreateStoreGroupMethod(), request); + handleResponseError(response.getHeader()); + return response.getStoreGroup(); + } + + public Metapb.StoreGroup getStoreGroup(int groupId) throws PDException { + StoreGroup.GetStoreGroupRequest request = StoreGroup.GetStoreGroupRequest.newBuilder() + .setHeader(header) + .setGroupId(groupId) + .build(); + StoreGroup.GetStoreGroupResponse response = blockingUnaryCall(PDGrpc.getGetStoreGroupMethod(), request); + handleResponseError(response.getHeader()); + return response.getStoreGroup(); + } + + public List getAllStoreGroups() throws PDException { + StoreGroup.GetAllStoreGroupRequest request = StoreGroup.GetAllStoreGroupRequest.newBuilder() + .setHeader(header).build(); + StoreGroup.GetAllStoreGroupResponse response = blockingUnaryCall(PDGrpc.getGetAllStoreGroupMethod(), request); + handleResponseError(response.getHeader()); + return response.getStoreGroupsList(); + } + + public Metapb.StoreGroup updateStoreGroup(int groupId, String name) throws PDException { + StoreGroup.UpdateStoreGroupRequest request = StoreGroup.UpdateStoreGroupRequest.newBuilder().setHeader(header) + .setGroupId(groupId) + .setName(name) + .build(); + StoreGroup.UpdateStoreGroupResponse response = blockingUnaryCall(PDGrpc.getUpdateStoreGroupMethod(), request); + handleResponseError(response.getHeader()); + return response.getStoreGroup(); + } + + public List getStoresByStoreGroup(int groupId) throws PDException { + StoreGroup.GetGroupStoresRequest request = StoreGroup.GetGroupStoresRequest.newBuilder() + .setHeader(header).setStoreGroupId(groupId).build(); + StoreGroup.GetGroupStoresResponse response = blockingUnaryCall(PDGrpc.getGetStoresByStoreGroupMethod(), request); + handleResponseError(response.getHeader()); + return response.getStoresList(); + } + + public boolean updateStoreGroupRelation(long storeId, int groupId) throws PDException { + StoreGroup.UpdateStoreGroupRelationRequest request = StoreGroup.UpdateStoreGroupRelationRequest.newBuilder() + .setHeader(header).setStoreId(storeId) + .setStoreGroupId(groupId).build(); + var response = blockingUnaryCall(PDGrpc.getUpdateStoreGroupRelationMethod(), request); + handleResponseError(response.getHeader()); + return response.getSuccess(); + } + +>>>>>>>> d7e3d51dd (3.6.5 -> 4.x diff):hg-pd-client/src/main/java/org/apache/hugegraph/pd/client/impl/PDApi.java } diff --git a/hugegraph-pd/hg-pd-client/src/main/java/org/apache/hugegraph/pd/client/PDWatch.java b/hugegraph-pd/hg-pd-client/src/main/java/org/apache/hugegraph/pd/client/PDWatch.java deleted file mode 100644 index c6c46d03d1..0000000000 --- a/hugegraph-pd/hg-pd-client/src/main/java/org/apache/hugegraph/pd/client/PDWatch.java +++ /dev/null @@ -1,140 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright ownership. - * The ASF licenses this file to You under the Apache License, Version 2.0 - * (the "License"); you may not use this file except in compliance with - * the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.hugegraph.pd.client; - -import java.io.Closeable; -import java.util.function.Consumer; - -import org.apache.hugegraph.pd.grpc.watch.WatchResponse; -import org.apache.hugegraph.pd.watch.NodeEvent; -import org.apache.hugegraph.pd.watch.PartitionEvent; - -public interface PDWatch { - - /** - * Watch the events of all store-nodes registered in the remote PD-Server. - * - * @param listener - * @return - */ - //PDWatcher watchNode(Listener listener); - - /*** inner static methods ***/ - static Listener listener(Consumer onNext) { - return listener(onNext, t -> { - }, () -> { - }); - } - - static Listener listener(Consumer onNext, Consumer onError) { - return listener(onNext, onError, () -> { - }); - } - - static Listener listener(Consumer onNext, Runnable onCompleted) { - return listener(onNext, t -> { - }, onCompleted); - } - - static Listener listener(Consumer onNext, Consumer onError, - Runnable onCompleted) { - return new Listener() { - @Override - public void onNext(T response) { - onNext.accept(response); - } - - @Override - public void onError(Throwable throwable) { - onError.accept(throwable); - } - - @Override - public void onCompleted() { - onCompleted.run(); - } - }; - } - - /** - * Watch the events of the store-nodes assigned to a specified graph. - * - * @param graph the graph name which you want to watch - * @param listener - * @return - */ - //PDWatcher watchNode(String graph, Listener listener); - - String getCurrentHost(); - - boolean checkChannel(); - - /** - * @param listener - * @return - */ - Watcher watchPartition(Listener listener); - - Watcher watchNode(Listener listener); - - Watcher watchGraph(Listener listener); - - Watcher watchShardGroup(Listener listener); - - /** - * Interface of Watcher. - */ - interface Listener { - - /** - * Invoked on new events. - * - * @param response the response. - */ - void onNext(T response); - - /** - * Invoked on errors. - * - * @param throwable the error. - */ - void onError(Throwable throwable); - - /** - * Invoked on completion. - */ - default void onCompleted() { - } - - } - - interface Watcher extends Closeable { - - /** - * closes this watcher and all its resources. - */ - @Override - void close(); - - /** - * Requests the latest revision processed and propagates it to listeners - */ - // TODO: what's it for? - //void requestProgress(); - } -} diff --git a/hugegraph-pd/hg-pd-client/src/main/java/org/apache/hugegraph/pd/client/PDWatchImpl.java b/hugegraph-pd/hg-pd-client/src/main/java/org/apache/hugegraph/pd/client/PDWatchImpl.java deleted file mode 100644 index 9b136bb26a..0000000000 --- a/hugegraph-pd/hg-pd-client/src/main/java/org/apache/hugegraph/pd/client/PDWatchImpl.java +++ /dev/null @@ -1,204 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright ownership. - * The ASF licenses this file to You under the Apache License, Version 2.0 - * (the "License"); you may not use this file except in compliance with - * the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.hugegraph.pd.client; - -import java.util.function.Supplier; - -import org.apache.hugegraph.pd.grpc.watch.HgPdWatchGrpc; -import org.apache.hugegraph.pd.grpc.watch.WatchCreateRequest; -import org.apache.hugegraph.pd.grpc.watch.WatchNodeResponse; -import org.apache.hugegraph.pd.grpc.watch.WatchPartitionResponse; -import org.apache.hugegraph.pd.grpc.watch.WatchRequest; -import org.apache.hugegraph.pd.grpc.watch.WatchResponse; -import org.apache.hugegraph.pd.grpc.watch.WatchType; -import org.apache.hugegraph.pd.watch.NodeEvent; -import org.apache.hugegraph.pd.watch.PartitionEvent; - -import io.grpc.ManagedChannel; -import io.grpc.stub.StreamObserver; - -final class PDWatchImpl implements PDWatch { - - private final HgPdWatchGrpc.HgPdWatchStub stub; - - private final String pdServerAddress; - - // TODO: support several servers. - PDWatchImpl(String pdServerAddress) { - this.pdServerAddress = pdServerAddress; - this.stub = HgPdWatchGrpc.newStub(Channels.getChannel(pdServerAddress)); - } - - @Override - public String getCurrentHost() { - return this.pdServerAddress; - } - - @Override - public boolean checkChannel() { - return stub != null && !((ManagedChannel) stub.getChannel()).isShutdown(); - } - - /** - * Get Partition change watcher. - * - * @param listener - * @return - */ - @Override - public Watcher watchPartition(Listener listener) { - return new PartitionWatcher(listener); - } - - /** - * Get Store-Node change watcher. - * - * @param listener - * @return - */ - @Override - public Watcher watchNode(Listener listener) { - return new NodeWatcher(listener); - } - - @Override - public Watcher watchGraph(Listener listener) { - return new GraphWatcher(listener); - } - - @Override - public Watcher watchShardGroup(Listener listener) { - return new ShardGroupWatcher(listener); - } - - private class GraphWatcher extends AbstractWatcher { - - private GraphWatcher(Listener listener) { - super(listener, - () -> WatchCreateRequest - .newBuilder() - .setWatchType(WatchType.WATCH_TYPE_GRAPH_CHANGE) - .build() - ); - } - - @Override - public void onNext(WatchResponse watchResponse) { - this.listener.onNext(watchResponse); - } - } - - private class ShardGroupWatcher extends AbstractWatcher { - - private ShardGroupWatcher(Listener listener) { - super(listener, - () -> WatchCreateRequest - .newBuilder() - .setWatchType(WatchType.WATCH_TYPE_SHARD_GROUP_CHANGE) - .build() - ); - } - - @Override - public void onNext(WatchResponse watchResponse) { - this.listener.onNext(watchResponse); - } - } - - private class PartitionWatcher extends AbstractWatcher { - - private PartitionWatcher(Listener listener) { - super(listener, - () -> WatchCreateRequest - .newBuilder() - .setWatchType(WatchType.WATCH_TYPE_PARTITION_CHANGE) - .build() - ); - } - - @Override - public void onNext(WatchResponse watchResponse) { - WatchPartitionResponse res = watchResponse.getPartitionResponse(); - PartitionEvent event = new PartitionEvent(res.getGraph(), res.getPartitionId(), - PartitionEvent.ChangeType.grpcTypeOf( - res.getChangeType())); - this.listener.onNext(event); - } - } - - private class NodeWatcher extends AbstractWatcher { - - private NodeWatcher(Listener listener) { - super(listener, - () -> WatchCreateRequest - .newBuilder() - .setWatchType(WatchType.WATCH_TYPE_STORE_NODE_CHANGE) - .build() - ); - } - - @Override - public void onNext(WatchResponse watchResponse) { - WatchNodeResponse res = watchResponse.getNodeResponse(); - NodeEvent event = new NodeEvent(res.getGraph(), res.getNodeId(), - NodeEvent.EventType.grpcTypeOf(res.getNodeEventType())); - this.listener.onNext(event); - } - } - - private abstract class AbstractWatcher implements Watcher, StreamObserver { - - Listener listener; - StreamObserver reqStream; - Supplier requestSupplier; - - private AbstractWatcher(Listener listener, - Supplier requestSupplier) { - this.listener = listener; - this.requestSupplier = requestSupplier; - this.init(); - } - - void init() { - this.reqStream = PDWatchImpl.this.stub.watch(this); - this.reqStream.onNext(WatchRequest.newBuilder().setCreateRequest( - this.requestSupplier.get() - ).build()); - } - - @Override - public void close() { - this.reqStream.onCompleted(); - } - - @Override - public abstract void onNext(WatchResponse watchResponse); - - @Override - public void onError(Throwable throwable) { - - this.listener.onError(throwable); - } - - @Override - public void onCompleted() { - this.listener.onCompleted(); - } - } - -} diff --git a/hugegraph-pd/hg-pd-common/src/main/java/org/apache/hugegraph/pd/common/HgAssert.java b/hugegraph-pd/hg-pd-common/src/main/java/org/apache/hugegraph/pd/common/HgAssert.java index 710f96f28c..ab36d709bc 100644 --- a/hugegraph-pd/hg-pd-common/src/main/java/org/apache/hugegraph/pd/common/HgAssert.java +++ b/hugegraph-pd/hg-pd-common/src/main/java/org/apache/hugegraph/pd/common/HgAssert.java @@ -1,3 +1,4 @@ +<<<<<<<< HEAD:hugegraph-pd/hg-pd-common/src/main/java/org/apache/hugegraph/pd/common/HgAssert.java /* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with @@ -15,6 +16,8 @@ * limitations under the License. */ +======== +>>>>>>>> d7e3d51dd (3.6.5 -> 4.x diff):hg-pd-common/src/main/java/org/apache/hugegraph/pd/common/HgAssert.java package org.apache.hugegraph.pd.common; import java.util.Collection; @@ -32,20 +35,30 @@ public static void isTrue(boolean expression, String message) { } } + public static void assertTrue(boolean expression, String... message) { + if (!expression) { + throw new IllegalArgumentException(String.join("", message)); + } + } + public static void isFalse(boolean expression, String message) { isTrue(!expression, message); } + public static void assertFalse(boolean expression, String... message) { + assertTrue(!expression, message); + } + public static void isArgumentValid(byte[] bytes, String parameter) { - isFalse(isInvalid(bytes), "The argument is invalid: " + parameter); + assertFalse(isInvalid(bytes), "The argument is invalid: ", parameter); } public static void isArgumentValid(String str, String parameter) { - isFalse(isInvalid(str), "The argument is invalid: " + parameter); + assertFalse(isInvalid(str), "The argument is invalid: ", parameter); } public static void isArgumentNotNull(Object obj, String parameter) { - isTrue(obj != null, "The argument is null: " + parameter); + assertTrue(obj != null, "The argument is null: ", parameter); } public static void istValid(byte[] bytes, String msg) { diff --git a/hugegraph-pd/hg-pd-common/src/main/java/org/apache/hugegraph/pd/common/KVPair.java b/hugegraph-pd/hg-pd-common/src/main/java/org/apache/hugegraph/pd/common/KVPair.java index b5e916c481..771064086b 100644 --- a/hugegraph-pd/hg-pd-common/src/main/java/org/apache/hugegraph/pd/common/KVPair.java +++ b/hugegraph-pd/hg-pd-common/src/main/java/org/apache/hugegraph/pd/common/KVPair.java @@ -1,3 +1,4 @@ +<<<<<<<< HEAD:hugegraph-pd/hg-pd-common/src/main/java/org/apache/hugegraph/pd/common/KVPair.java /* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with @@ -15,6 +16,8 @@ * limitations under the License. */ +======== +>>>>>>>> d7e3d51dd (3.6.5 -> 4.x diff):hg-pd-common/src/main/java/org/apache/hugegraph/pd/common/KVPair.java package org.apache.hugegraph.pd.common; import java.io.Serializable; diff --git a/hugegraph-pd/hg-pd-common/src/main/java/org/apache/hugegraph/pd/common/PartitionCache.java b/hugegraph-pd/hg-pd-common/src/main/java/org/apache/hugegraph/pd/common/PartitionCache.java index 31cc29deed..d27054c032 100644 --- a/hugegraph-pd/hg-pd-common/src/main/java/org/apache/hugegraph/pd/common/PartitionCache.java +++ b/hugegraph-pd/hg-pd-common/src/main/java/org/apache/hugegraph/pd/common/PartitionCache.java @@ -1,3 +1,4 @@ +<<<<<<<< HEAD:hugegraph-pd/hg-pd-common/src/main/java/org/apache/hugegraph/pd/common/PartitionCache.java /* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with @@ -16,9 +17,16 @@ */ package org.apache.hugegraph.pd.common; +======== +package org.apache.hugegraph.pd.common; + +import org.apache.hugegraph.pd.grpc.Metapb; +import com.google.common.collect.Range; +import com.google.common.collect.RangeMap; +import com.google.common.collect.TreeRangeMap; +>>>>>>>> d7e3d51dd (3.6.5 -> 4.x diff):hg-pd-common/src/main/java/org/apache/hugegraph/pd/common/PartitionCache.java import java.util.ArrayList; -import java.util.HashMap; import java.util.List; import java.util.Map; import java.util.Objects; @@ -28,6 +36,7 @@ import java.util.concurrent.locks.ReadWriteLock; import java.util.concurrent.locks.ReentrantReadWriteLock; +<<<<<<<< HEAD:hugegraph-pd/hg-pd-common/src/main/java/org/apache/hugegraph/pd/common/PartitionCache.java import org.apache.hugegraph.pd.grpc.Metapb; import com.google.common.collect.Range; @@ -43,6 +52,26 @@ public class PartitionCache { private final ReadWriteLock readWriteLock = new ReentrantReadWriteLock(); private final Map locks = new HashMap<>(); +======== +/** + * 放弃copy on write的方式 + * 1. 在 graph * partition 数量极多的时候,效率严重下降,不能用 + */ +public class PartitionCache { + + // 每张图一个缓存 + private volatile Map> keyToPartIdCache; + // graphName + PartitionID组成key + private volatile Map> partitionCache; + + private volatile Map shardGroupCache; + + private volatile Map storeCache; + + private volatile Map graphCache; + // 读写锁对象 + private ReadWriteLock readWriteLock = new ReentrantReadWriteLock(); +>>>>>>>> d7e3d51dd (3.6.5 -> 4.x diff):hg-pd-common/src/main/java/org/apache/hugegraph/pd/common/PartitionCache.java Lock writeLock = readWriteLock.writeLock(); // One cache per graph private volatile Map> keyToPartIdCache; @@ -52,9 +81,11 @@ public class PartitionCache { private volatile Map storeCache; private volatile Map graphCache; + private volatile Map locks = new ConcurrentHashMap<>(); + public PartitionCache() { - keyToPartIdCache = new HashMap<>(); - partitionCache = new HashMap<>(); + keyToPartIdCache = new ConcurrentHashMap<>(); + partitionCache = new ConcurrentHashMap<>(); shardGroupCache = new ConcurrentHashMap<>(); storeCache = new ConcurrentHashMap<>(); graphCache = new ConcurrentHashMap<>(); @@ -69,7 +100,11 @@ private AtomicBoolean getOrCreateGraphLock(String graphName) { lock = new AtomicBoolean(); locks.put(graphName, lock); } +<<<<<<<< HEAD:hugegraph-pd/hg-pd-common/src/main/java/org/apache/hugegraph/pd/common/PartitionCache.java } finally { +======== + }finally { +>>>>>>>> d7e3d51dd (3.6.5 -> 4.x diff):hg-pd-common/src/main/java/org/apache/hugegraph/pd/common/PartitionCache.java writeLock.unlock(); } } @@ -85,7 +120,11 @@ public void waitGraphLock(String graphName) { public void lockGraph(String graphName) { var lock = getOrCreateGraphLock(graphName); +<<<<<<<< HEAD:hugegraph-pd/hg-pd-common/src/main/java/org/apache/hugegraph/pd/common/PartitionCache.java while (!lock.compareAndSet(false, true)) { +======== + while (! lock.compareAndSet(false, true)) { +>>>>>>>> d7e3d51dd (3.6.5 -> 4.x diff):hg-pd-common/src/main/java/org/apache/hugegraph/pd/common/PartitionCache.java Thread.onSpinWait(); } } @@ -106,7 +145,11 @@ public KVPair getPartitionById(String graphName, waitGraphLock(graphName); var graphs = partitionCache.get(graphName); if (graphs != null) { +<<<<<<<< HEAD:hugegraph-pd/hg-pd-common/src/main/java/org/apache/hugegraph/pd/common/PartitionCache.java var partition = graphs.get(partId); +======== + var partition = graphs.get(partId ); +>>>>>>>> d7e3d51dd (3.6.5 -> 4.x diff):hg-pd-common/src/main/java/org/apache/hugegraph/pd/common/PartitionCache.java if (partition != null) { return new KVPair<>(partition, getLeaderShard(partId)); } @@ -149,11 +192,19 @@ public List getPartitions(String graphName) { waitGraphLock(graphName); List partitions = new ArrayList<>(); +<<<<<<<< HEAD:hugegraph-pd/hg-pd-common/src/main/java/org/apache/hugegraph/pd/common/PartitionCache.java if (!partitionCache.containsKey(graphName)) { return partitions; } partitionCache.get(graphName).forEach((k, v) -> { partitions.add(v); +======== + if (! partitionCache.containsKey(graphName)) { + return partitions; + } + partitionCache.get(graphName).forEach((k,v) -> { + partitions.add(v); +>>>>>>>> d7e3d51dd (3.6.5 -> 4.x diff):hg-pd-common/src/main/java/org/apache/hugegraph/pd/common/PartitionCache.java }); return partitions; @@ -173,6 +224,7 @@ public boolean addPartition(String graphName, int partId, Metapb.Partition parti try { lockGraph(graphName); +<<<<<<<< HEAD:hugegraph-pd/hg-pd-common/src/main/java/org/apache/hugegraph/pd/common/PartitionCache.java partitionCache.computeIfAbsent(graphName, k -> new HashMap<>()).put(partId, partition); @@ -181,6 +233,17 @@ public boolean addPartition(String graphName, int partId, Metapb.Partition parti // should not be deleted // When you confirm that the old start and end are your own, you can delete the // old ones. (i.e. not covered yet) +======== + if (! partitionCache.containsKey(graphName)) { + partitionCache.put(graphName, new ConcurrentHashMap<>()); + } + + partitionCache.get(graphName).put(partId, partition); + + if (old != null) { + // old [1-3) 被 [2-3)覆盖了。当 [1-3) 变成[1-2) 不应该删除原先的[1-3) + // 当确认老的 start, end 都是自己的时候,才可以删除老的. (即还没覆盖) +>>>>>>>> d7e3d51dd (3.6.5 -> 4.x diff):hg-pd-common/src/main/java/org/apache/hugegraph/pd/common/PartitionCache.java var graphRange = keyToPartIdCache.get(graphName); if (Objects.equals(partition.getId(), graphRange.get(partition.getStartKey())) && Objects.equals(partition.getId(), graphRange.get(partition.getEndKey() - 1))) { @@ -188,9 +251,18 @@ public boolean addPartition(String graphName, int partId, Metapb.Partition parti } } +<<<<<<<< HEAD:hugegraph-pd/hg-pd-common/src/main/java/org/apache/hugegraph/pd/common/PartitionCache.java keyToPartIdCache.computeIfAbsent(graphName, k -> TreeRangeMap.create()) .put(Range.closedOpen(partition.getStartKey(), partition.getEndKey()), partId); +======== + if (! keyToPartIdCache.containsKey(graphName)) { + keyToPartIdCache.put(graphName, TreeRangeMap.create()); + } + keyToPartIdCache.get(graphName).put(Range.closedOpen(partition.getStartKey(), + partition.getEndKey()), partId); + +>>>>>>>> d7e3d51dd (3.6.5 -> 4.x diff):hg-pd-common/src/main/java/org/apache/hugegraph/pd/common/PartitionCache.java } finally { unlockGraph(graphName); } @@ -214,10 +286,16 @@ public void updatePartition(String graphName, int partId, Metapb.Partition parti } } +<<<<<<<< HEAD:hugegraph-pd/hg-pd-common/src/main/java/org/apache/hugegraph/pd/common/PartitionCache.java partitionCache.computeIfAbsent(graphName, k -> new HashMap<>()).put(partId, partition); keyToPartIdCache.computeIfAbsent(graphName, k -> TreeRangeMap.create()) .put(Range.closedOpen(partition.getStartKey(), partition.getEndKey()), partId); +======== + partitionCache.computeIfAbsent(graphName, k -> new ConcurrentHashMap<>()).put(partId, partition); + keyToPartIdCache.computeIfAbsent(graphName, k -> TreeRangeMap.create()) + .put(Range.closedOpen(partition.getStartKey(), partition.getEndKey()), partId); +>>>>>>>> d7e3d51dd (3.6.5 -> 4.x diff):hg-pd-common/src/main/java/org/apache/hugegraph/pd/common/PartitionCache.java } finally { unlockGraph(graphName); } @@ -270,8 +348,13 @@ public void remove(String graphName, int id) { public void removePartitions() { writeLock.lock(); try { +<<<<<<<< HEAD:hugegraph-pd/hg-pd-common/src/main/java/org/apache/hugegraph/pd/common/PartitionCache.java partitionCache = new HashMap<>(); keyToPartIdCache = new HashMap<>(); +======== + partitionCache = new ConcurrentHashMap<>(); + keyToPartIdCache = new ConcurrentHashMap<>(); +>>>>>>>> d7e3d51dd (3.6.5 -> 4.x diff):hg-pd-common/src/main/java/org/apache/hugegraph/pd/common/PartitionCache.java locks.clear(); } finally { writeLock.unlock(); @@ -315,6 +398,10 @@ public Metapb.ShardGroup getShardGroup(int groupId) { return shardGroupCache.get(groupId); } + public Map getShardGroups() { + return this.shardGroupCache; + } + public boolean addStore(Long storeId, Metapb.Store store) { Metapb.Store oldStore = storeCache.get(storeId); if (oldStore != null && oldStore.equals(store)) { @@ -333,7 +420,7 @@ public void removeStore(Long storeId) { } public boolean hasGraph(String graphName) { - return getPartitions(graphName).size() > 0; + return !getPartitions(graphName).isEmpty() || getGraph(graphName) != null; } public void updateGraph(Metapb.Graph graph) { @@ -358,8 +445,8 @@ public List getGraphs() { public void reset() { writeLock.lock(); try { - partitionCache = new HashMap<>(); - keyToPartIdCache = new HashMap<>(); + partitionCache = new ConcurrentHashMap<>(); + keyToPartIdCache = new ConcurrentHashMap<>(); shardGroupCache = new ConcurrentHashMap<>(); storeCache = new ConcurrentHashMap<>(); graphCache = new ConcurrentHashMap<>(); diff --git a/hugegraph-pd/hg-pd-core/pom.xml b/hugegraph-pd/hg-pd-core/pom.xml index e17570d592..546d0cee25 100644 --- a/hugegraph-pd/hg-pd-core/pom.xml +++ b/hugegraph-pd/hg-pd-core/pom.xml @@ -44,6 +44,10 @@ org.rocksdb rocksdbjni + + com.google.protobuf + protobuf-java + @@ -69,14 +73,15 @@ spring-boot 2.5.14 - - org.projectlombok - lombok - + + + + + org.apache.commons commons-lang3 - 3.12.0 + ${commons-lang3.version} com.google.code.gson diff --git a/hugegraph-pd/hg-pd-core/src/main/java/org/apache/hugegraph/pd/KvService.java b/hugegraph-pd/hg-pd-core/src/main/java/org/apache/hugegraph/pd/KvService.java index c693a67b49..0e57cd241d 100644 --- a/hugegraph-pd/hg-pd-core/src/main/java/org/apache/hugegraph/pd/KvService.java +++ b/hugegraph-pd/hg-pd-core/src/main/java/org/apache/hugegraph/pd/KvService.java @@ -1,3 +1,4 @@ +<<<<<<<< HEAD:hugegraph-pd/hg-pd-core/src/main/java/org/apache/hugegraph/pd/KvService.java /* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with @@ -15,6 +16,8 @@ * limitations under the License. */ +======== +>>>>>>>> d7e3d51dd (3.6.5 -> 4.x diff):hg-pd-core/src/main/java/org/apache/hugegraph/pd/KvService.java package org.apache.hugegraph.pd; import java.nio.charset.Charset; @@ -25,13 +28,21 @@ import org.apache.hugegraph.pd.common.PDException; import org.apache.hugegraph.pd.config.PDConfig; +<<<<<<<< HEAD:hugegraph-pd/hg-pd-core/src/main/java/org/apache/hugegraph/pd/KvService.java import org.apache.hugegraph.pd.grpc.kv.Kv; import org.apache.hugegraph.pd.grpc.kv.V; +======== +>>>>>>>> d7e3d51dd (3.6.5 -> 4.x diff):hg-pd-core/src/main/java/org/apache/hugegraph/pd/KvService.java import org.apache.hugegraph.pd.meta.MetadataKeyHelper; import org.apache.hugegraph.pd.meta.MetadataRocksDBStore; import org.apache.hugegraph.pd.store.KV; import org.springframework.stereotype.Service; +<<<<<<<< HEAD:hugegraph-pd/hg-pd-core/src/main/java/org/apache/hugegraph/pd/KvService.java +======== +import org.apache.hugegraph.pd.grpc.kv.Kv; +import org.apache.hugegraph.pd.grpc.kv.V; +>>>>>>>> d7e3d51dd (3.6.5 -> 4.x diff):hg-pd-core/src/main/java/org/apache/hugegraph/pd/KvService.java import com.google.protobuf.InvalidProtocolBufferException; import lombok.extern.slf4j.Slf4j; @@ -223,16 +234,30 @@ public Map scanWithPrefix(String key) throws PDException { public boolean locked(String key) throws PDException { String lockKey = KvService.getKeyWithoutPrefix(KvService.LOCK_PREFIX, key); +<<<<<<<< HEAD:hugegraph-pd/hg-pd-core/src/main/java/org/apache/hugegraph/pd/KvService.java Map allLock = scanWithPrefix(lockKey); return allLock != null && allLock.size() != 0; +======== + Map allLock = scanWithPrefix(lockKey + KV_DELIMITER); + if (allLock == null || allLock.size() == 0) { + return false; + } else { + return true; + } +>>>>>>>> d7e3d51dd (3.6.5 -> 4.x diff):hg-pd-core/src/main/java/org/apache/hugegraph/pd/KvService.java } private boolean owned(String key, long clientId) throws PDException { String lockKey = KvService.getKeyWithoutPrefix(KvService.LOCK_PREFIX, key); +<<<<<<<< HEAD:hugegraph-pd/hg-pd-core/src/main/java/org/apache/hugegraph/pd/KvService.java Map allLock = scanWithPrefix(lockKey); if (allLock.size() == 0) { return true; } +======== + Map allLock = scanWithPrefix(lockKey + KV_DELIMITER); + if (allLock.size() == 0) return true; +>>>>>>>> d7e3d51dd (3.6.5 -> 4.x diff):hg-pd-core/src/main/java/org/apache/hugegraph/pd/KvService.java for (Map.Entry entry : allLock.entrySet()) { String entryKey = entry.getKey(); String[] split = entryKey.split(String.valueOf(KV_DELIMITER)); @@ -312,4 +337,28 @@ public void clearTTLData() { public MetadataRocksDBStore getMeta() { return meta; } +<<<<<<<< HEAD:hugegraph-pd/hg-pd-core/src/main/java/org/apache/hugegraph/pd/KvService.java +======== + + public static byte[] getKeyBytes(Object... keys) { + String key = getKey(keys); + return key.getBytes(Charset.defaultCharset()); + } + + public static String getKeyWithoutPrefix(Object... keys) { + StringBuilder builder = MetadataKeyHelper.getStringBuilderHelper(); + for (Object key : keys) { + builder.append(key == null ? "" : key).append(KV_DELIMITER); + } + return builder.substring(0, builder.length() - 1); + } + + public static String getDelimiter() { + return String.valueOf(KV_DELIMITER); + } + + public MetadataRocksDBStore getMeta() { + return meta; + } +>>>>>>>> d7e3d51dd (3.6.5 -> 4.x diff):hg-pd-core/src/main/java/org/apache/hugegraph/pd/KvService.java } diff --git a/hugegraph-pd/hg-pd-core/src/main/java/org/apache/hugegraph/pd/LogService.java b/hugegraph-pd/hg-pd-core/src/main/java/org/apache/hugegraph/pd/LogService.java index 35959849bc..c752a6c572 100644 --- a/hugegraph-pd/hg-pd-core/src/main/java/org/apache/hugegraph/pd/LogService.java +++ b/hugegraph-pd/hg-pd-core/src/main/java/org/apache/hugegraph/pd/LogService.java @@ -1,3 +1,4 @@ +<<<<<<<< HEAD:hugegraph-pd/hg-pd-core/src/main/java/org/apache/hugegraph/pd/LogService.java /* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with @@ -16,6 +17,19 @@ */ package org.apache.hugegraph.pd; +======== +package org.apache.hugegraph.pd; + +import org.apache.hugegraph.pd.common.PDException; +import org.apache.hugegraph.pd.config.PDConfig; +import org.apache.hugegraph.pd.grpc.Metapb; +import org.apache.hugegraph.pd.meta.LogMeta; +import org.apache.hugegraph.pd.meta.MetadataFactory; +import com.google.protobuf.Any; +import com.google.protobuf.GeneratedMessageV3; +import lombok.extern.slf4j.Slf4j; +import org.springframework.stereotype.Service; +>>>>>>>> d7e3d51dd (3.6.5 -> 4.x diff):hg-pd-core/src/main/java/org/apache/hugegraph/pd/LogService.java import java.util.List; diff --git a/hugegraph-pd/hg-pd-core/src/main/java/org/apache/hugegraph/pd/PartitionService.java b/hugegraph-pd/hg-pd-core/src/main/java/org/apache/hugegraph/pd/PartitionService.java index 9f4dda31f5..0e64e0bc73 100644 --- a/hugegraph-pd/hg-pd-core/src/main/java/org/apache/hugegraph/pd/PartitionService.java +++ b/hugegraph-pd/hg-pd-core/src/main/java/org/apache/hugegraph/pd/PartitionService.java @@ -1,3 +1,4 @@ +<<<<<<<< HEAD:hugegraph-pd/hg-pd-core/src/main/java/org/apache/hugegraph/pd/PartitionService.java /* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with @@ -16,6 +17,37 @@ */ package org.apache.hugegraph.pd; +======== +package org.apache.hugegraph.pd; + +import org.apache.hugegraph.pd.common.Consts; +import org.apache.hugegraph.pd.common.KVPair; +import org.apache.hugegraph.pd.common.PDException; +import org.apache.hugegraph.pd.common.PartitionUtils; +import org.apache.hugegraph.pd.config.PDConfig; +import org.apache.hugegraph.pd.grpc.MetaTask; +import org.apache.hugegraph.pd.grpc.Metapb; +import org.apache.hugegraph.pd.grpc.common.ErrorType; +import org.apache.hugegraph.pd.grpc.pulse.ChangeShard; +import org.apache.hugegraph.pd.grpc.pulse.CleanPartition; +import org.apache.hugegraph.pd.grpc.pulse.CleanType; +import org.apache.hugegraph.pd.grpc.pulse.ConfChangeType; +import org.apache.hugegraph.pd.grpc.pulse.DbCompaction; +import org.apache.hugegraph.pd.grpc.pulse.MovePartition; +import org.apache.hugegraph.pd.grpc.pulse.PartitionKeyRange; +import org.apache.hugegraph.pd.grpc.pulse.SplitPartition; +import org.apache.hugegraph.pd.grpc.pulse.TransferLeader; +import org.apache.hugegraph.pd.listener.PartitionInstructionListener; +import org.apache.hugegraph.pd.listener.PartitionStatusListener; +import org.apache.hugegraph.pd.listener.StoreStatusListener; +import org.apache.hugegraph.pd.meta.MetadataFactory; +import org.apache.hugegraph.pd.meta.PartitionMeta; +import org.apache.hugegraph.pd.meta.TaskInfoMeta; +import org.apache.hugegraph.pd.raft.RaftStateListener; +import lombok.extern.slf4j.Slf4j; +import org.apache.commons.collections4.SetUtils; +import org.apache.commons.lang3.StringUtils; +>>>>>>>> d7e3d51dd (3.6.5 -> 4.x diff):hg-pd-core/src/main/java/org/apache/hugegraph/pd/PartitionService.java import java.util.ArrayList; import java.util.Collections; @@ -27,6 +59,7 @@ import java.util.Optional; import java.util.stream.Collectors; +<<<<<<<< HEAD:hugegraph-pd/hg-pd-core/src/main/java/org/apache/hugegraph/pd/PartitionService.java import org.apache.commons.lang3.StringUtils; import org.apache.hugegraph.pd.common.KVPair; import org.apache.hugegraph.pd.common.PDException; @@ -50,6 +83,9 @@ import org.apache.hugegraph.pd.raft.RaftStateListener; import lombok.extern.slf4j.Slf4j; +======== + +>>>>>>>> d7e3d51dd (3.6.5 -> 4.x diff):hg-pd-core/src/main/java/org/apache/hugegraph/pd/PartitionService.java /** * Partition management @@ -57,23 +93,45 @@ @Slf4j public class PartitionService implements RaftStateListener { +<<<<<<<< HEAD:hugegraph-pd/hg-pd-core/src/main/java/org/apache/hugegraph/pd/PartitionService.java private final long Partition_Version_Skip = 0x0F; +======== + // private final long Partition_Version_Skip = 0x0F; + private final PartitionMeta partitionMeta; +>>>>>>>> d7e3d51dd (3.6.5 -> 4.x diff):hg-pd-core/src/main/java/org/apache/hugegraph/pd/PartitionService.java private final StoreNodeService storeService; private final PartitionMeta partitionMeta; private final PDConfig pdConfig; // Partition command listening private final List instructionListeners; +<<<<<<<< HEAD:hugegraph-pd/hg-pd-core/src/main/java/org/apache/hugegraph/pd/PartitionService.java // Partition status listeners +======== + private final PDConfig pdConfig; + + private final ConfigService configService; + + // 分区命令监听 + private final List instructionListeners; + + // 分区状态监听 +>>>>>>>> d7e3d51dd (3.6.5 -> 4.x diff):hg-pd-core/src/main/java/org/apache/hugegraph/pd/PartitionService.java private final List statusListeners; - public PartitionService(PDConfig config, StoreNodeService storeService) { + public PartitionService(PDConfig config, StoreNodeService storeService, ConfigService configService) { this.pdConfig = config; this.storeService = storeService; partitionMeta = MetadataFactory.newPartitionMeta(config); +<<<<<<<< HEAD:hugegraph-pd/hg-pd-core/src/main/java/org/apache/hugegraph/pd/PartitionService.java instructionListeners = Collections.synchronizedList(new ArrayList()); statusListeners = Collections.synchronizedList(new ArrayList()); +======== + instructionListeners = Collections.synchronizedList(new ArrayList<>()); + statusListeners = Collections.synchronizedList(new ArrayList<>()); + this.configService = configService; +>>>>>>>> d7e3d51dd (3.6.5 -> 4.x diff):hg-pd-core/src/main/java/org/apache/hugegraph/pd/PartitionService.java } public void init() throws PDException { @@ -123,23 +181,30 @@ public Metapb.PartitionShard getPartitionShard(String graphName, byte[] key) thr * @param code * @return */ +<<<<<<<< HEAD:hugegraph-pd/hg-pd-core/src/main/java/org/apache/hugegraph/pd/PartitionService.java public Metapb.PartitionShard getPartitionByCode(String graphName, long code) throws PDException { if (code < 0 || code >= PartitionUtils.MAX_VALUE) { throw new PDException(Pdpb.ErrorType.NOT_FOUND_VALUE, "code error"); +======== + public Metapb.PartitionShard getPartitionByCode(String graphName, long code) throws PDException { + if ( code < 0 || code >= PartitionUtils.MAX_VALUE) { + throw new PDException(ErrorType.NOT_FOUND_VALUE, "code error"); +>>>>>>>> d7e3d51dd (3.6.5 -> 4.x diff):hg-pd-core/src/main/java/org/apache/hugegraph/pd/PartitionService.java } // Find the partition ID based on the code, and if it doesn't find, create a new partition Metapb.Partition partition = partitionMeta.getPartitionByCode(graphName, code); if (partition == null) { synchronized (this) { - if (partition == null) { + if ((partition = partitionMeta.getPartitionByCode(graphName, code)) == null) { partition = newPartition(graphName, code); } } } Metapb.PartitionShard partShard = Metapb.PartitionShard.newBuilder() +<<<<<<<< HEAD:hugegraph-pd/hg-pd-core/src/main/java/org/apache/hugegraph/pd/PartitionService.java .setPartition(partition) .setLeader(storeService.getLeader( partition, 0)) @@ -148,6 +213,14 @@ public Metapb.PartitionShard getPartitionByCode(String graphName, long code) thr "{} Partition get code = {}, partition id = {}, start = {}, end = {}, leader = {}", graphName, (code), partition.getId(), partition.getStartKey(), partition.getEndKey(), partShard.getLeader()); +======== + .setPartition(partition) + .setLeader(storeService.getLeader(partition, 0)) + .build(); + log.debug("{} Partition get code = {}, partition id = {}, start = {}, end = {}, leader = {}", + graphName, (code), partition.getId(), partition.getStartKey(), partition.getEndKey(), + partShard.getLeader()); +>>>>>>>> d7e3d51dd (3.6.5 -> 4.x diff):hg-pd-core/src/main/java/org/apache/hugegraph/pd/PartitionService.java return partShard; } @@ -163,9 +236,18 @@ public Metapb.PartitionShard getPartitionByCode(String graphName, long code) thr public Metapb.PartitionShard getPartitionShardById(String graphName, int partId) throws PDException { Metapb.Partition partition = partitionMeta.getPartitionById(graphName, partId); - if (partition == null) { - return null; + if (partition != null) { + var shard = storeService.getLeader(partition, 0); + if (shard != null) { + return Metapb.PartitionShard.newBuilder() + .setPartition(partition) + // 此处需要返回正确的leader,暂时默认取第一个 + .setLeader(shard) + .build(); + } + } +<<<<<<<< HEAD:hugegraph-pd/hg-pd-core/src/main/java/org/apache/hugegraph/pd/PartitionService.java Metapb.PartitionShard partShard = Metapb.PartitionShard.newBuilder() .setPartition(partition) @@ -174,6 +256,9 @@ public Metapb.PartitionShard getPartitionShardById(String graphName, int partId) .build(); return partShard; +======== + return null; +>>>>>>>> d7e3d51dd (3.6.5 -> 4.x diff):hg-pd-core/src/main/java/org/apache/hugegraph/pd/PartitionService.java } public Metapb.Partition getPartitionById(String graphName, int partId) throws PDException { @@ -205,7 +290,9 @@ public List getPartitions(String graphName) { * @return */ public List getPartitionByStore(Metapb.Store store) throws PDException { + List partitions = new ArrayList<>(); +<<<<<<<< HEAD:hugegraph-pd/hg-pd-core/src/main/java/org/apache/hugegraph/pd/PartitionService.java getGraphs().forEach(graph -> { getPartitions(graph.getGraphName()).forEach(partition -> { try { @@ -220,6 +307,12 @@ public List getPartitionByStore(Metapb.Store store) throws PDE }); }); return partitions; +======== + for (Metapb.ShardGroup group : storeService.getShardGroupsByStore(store.getId())) { + partitions.addAll(getPartitionById(group.getId())); + } + return partitions; +>>>>>>>> d7e3d51dd (3.6.5 -> 4.x diff):hg-pd-core/src/main/java/org/apache/hugegraph/pd/PartitionService.java } /** @@ -229,24 +322,51 @@ public List getPartitionByStore(Metapb.Store store) throws PDE * @return */ private Metapb.Partition newPartition(String graphName, long code) throws PDException { - Metapb.Graph graph = partitionMeta.getAndCreateGraph(graphName); + Metapb.Graph graph = partitionMeta.getGraph(graphName); + if (graph == null) { + throw new PDException(ErrorType.GRAPH_NOT_EXISTS, "graph not exists:" + graphName); + } + int partitionSize = PartitionUtils.MAX_VALUE / graph.getPartitionCount(); if (PartitionUtils.MAX_VALUE % graph.getPartitionCount() != 0) { // There is a remainder, and the partition is inexhaustible partitionSize++; } +<<<<<<<< HEAD:hugegraph-pd/hg-pd-core/src/main/java/org/apache/hugegraph/pd/PartitionService.java int partitionId = (int) (code / partitionSize); long startKey = (long) partitionSize * partitionId; long endKey = (long) partitionSize * (partitionId + 1); // Check Local Metapb.Partition partition = partitionMeta.getPartitionById(graphName, partitionId); +======== + int offset = (int) (code / partitionSize); + return newPartition(graph, offset); + } + + private Metapb.Partition newPartition(Metapb.Graph graph, int offset) throws PDException { + int partitionId = getPartitionId(graph, offset); + + int partitionSize = PartitionUtils.MAX_VALUE / graph.getPartitionCount(); + if (PartitionUtils.MAX_VALUE % graph.getPartitionCount() != 0) { + // 有余数,分区除不尽 + partitionSize++; + } + + // 检查本地 + Metapb.Partition partition = partitionMeta.getPartitionById(graph.getGraphName(), partitionId); +>>>>>>>> d7e3d51dd (3.6.5 -> 4.x diff):hg-pd-core/src/main/java/org/apache/hugegraph/pd/PartitionService.java if (partition == null) { - storeService.allocShards(null, partitionId); + log.info("alloc partition for graph: {}, partition id: {}" , graph.getGraphName(), partitionId); + storeService.allocShards(graph, partitionId); + + long startKey = (long) partitionSize * offset; + long endKey = (long) partitionSize * (offset + 1); // Assign a store partition = Metapb.Partition.newBuilder() +<<<<<<<< HEAD:hugegraph-pd/hg-pd-core/src/main/java/org/apache/hugegraph/pd/PartitionService.java .setId(partitionId) .setVersion(0) .setState(Metapb.PartitionState.PState_Normal) @@ -254,6 +374,15 @@ private Metapb.Partition newPartition(String graphName, long code) throws PDExce .setEndKey(endKey) .setGraphName(graphName) .build(); +======== + .setId(partitionId) + .setVersion(0) + .setState(Metapb.PartitionState.PState_Normal) + .setStartKey(startKey) + .setEndKey(endKey) + .setGraphName(graph.getGraphName()) + .build(); +>>>>>>>> d7e3d51dd (3.6.5 -> 4.x diff):hg-pd-core/src/main/java/org/apache/hugegraph/pd/PartitionService.java log.info("Create newPartition {}", partition); } @@ -263,6 +392,24 @@ private Metapb.Partition newPartition(String graphName, long code) throws PDExce return partition; } + public void allocGraphPartitions(Metapb.Graph graph) throws PDException { + for (int i = 0; i < graph.getPartitionCount(); i++) { + newPartition(graph, i); + } + } + + /** + * 计算graph的分区id。 partition gap * store group id + offset + * + * @param graph graph + * @param offset 偏移量,从0开始 + * @return new partition id + * @throws PDException + */ + private int getPartitionId(Metapb.Graph graph, int offset) { + return graph.getStoreGroupId() * Consts.PARTITION_GAP + offset; + } + /** * compute graph partition id, partition gap * store group id + offset * @@ -324,8 +471,13 @@ public synchronized long updatePartition(List partitions) thro * @param state * @throws PDException */ +<<<<<<<< HEAD:hugegraph-pd/hg-pd-core/src/main/java/org/apache/hugegraph/pd/PartitionService.java public synchronized void updatePartitionState(String graph, int partId, Metapb.PartitionState state) throws PDException { +======== + public synchronized void updatePartitionState(String graph, int partId, Metapb.PartitionState state) + throws PDException { +>>>>>>>> d7e3d51dd (3.6.5 -> 4.x diff):hg-pd-core/src/main/java/org/apache/hugegraph/pd/PartitionService.java Metapb.Partition partition = getPartitionById(graph, partId); if (partition.getState() != state) { @@ -340,9 +492,14 @@ public synchronized void updatePartitionState(String graph, int partId, public synchronized void updateGraphState(String graphName, Metapb.PartitionState state) throws PDException { Metapb.Graph graph = getGraph(graphName); +<<<<<<<< HEAD:hugegraph-pd/hg-pd-core/src/main/java/org/apache/hugegraph/pd/PartitionService.java if (graph != null) { partitionMeta.updateGraph(graph.toBuilder() .setState(state).build()); +======== + if (graph != null && graph.getState() != state) { + partitionMeta.updateGraph(graph.toBuilder().setState(state).build()); +>>>>>>>> d7e3d51dd (3.6.5 -> 4.x diff):hg-pd-core/src/main/java/org/apache/hugegraph/pd/PartitionService.java } } @@ -350,9 +507,10 @@ public synchronized long removePartition(String graphName, int partId) throws PD log.info("Partition {}-{} removePartition", graphName, partId); Metapb.Partition partition = partitionMeta.getPartitionById(graphName, partId); var ret = partitionMeta.removePartition(graphName, partId); - partitionMeta.reload(); + partitionMeta.loadGraph(graphName); onPartitionRemoved(partition); +<<<<<<<< HEAD:hugegraph-pd/hg-pd-core/src/main/java/org/apache/hugegraph/pd/PartitionService.java try { Metapb.PartitionState state = Metapb.PartitionState.PState_Normal; for (Metapb.Partition pt : partitionMeta.getPartitions(partition.getGraphName())) { @@ -373,13 +531,43 @@ public synchronized long removePartition(String graphName, int partId) throws PD } catch (PDException e) { log.error("onPartitionChanged", e); } - +======== + // source中有些是 offline的,删除后,需要更新图的状态 +// try { + // partition状态与 partition无关 +// Metapb.PartitionState state = Metapb.PartitionState.PState_Normal; +// for (Metapb.Partition pt : partitionMeta.getPartitions(partition.getGraphName())) { +// if (pt.getState().getNumber() > state.getNumber()) { +// state = pt.getState(); +// } +// } +// updateGraphState(partition.getGraphName(), state); +>>>>>>>> d7e3d51dd (3.6.5 -> 4.x diff):hg-pd-core/src/main/java/org/apache/hugegraph/pd/PartitionService.java + + // 理论上不需要处理, shard group更新状态的时候,已经更新了cluster的状态 +// Metapb.PartitionState state = Metapb.PartitionState.PState_Normal; +// for(Metapb.ShardGroup group : storeService.getShardGroups()){ +// if ( group.getState().getNumber() > state.getNumber()) { +// state = group.getState(); +// } +// } +// +// storeService.updateClusterStatus(getStoreGroupByGraph(graphName), state); +// }catch ( PDException e){ +// log.error("onPartitionChanged", e); +// } +// return ret; } +<<<<<<<< HEAD:hugegraph-pd/hg-pd-core/src/main/java/org/apache/hugegraph/pd/PartitionService.java public Metapb.PartitionStats getPartitionStats(String graphName, int partitionId) throws PDException { return partitionMeta.getPartitionStats(graphName, partitionId); +======== + public Metapb.PartitionStats getPartitionStats(String graphName, int partitionId) throws PDException { + return partitionMeta.getPartitionStats("", partitionId); +>>>>>>>> d7e3d51dd (3.6.5 -> 4.x diff):hg-pd-core/src/main/java/org/apache/hugegraph/pd/PartitionService.java } /** @@ -397,6 +585,12 @@ public List getGraphs() throws PDException { return partitionMeta.getGraphs(); } + public List getGraphs(int shardGroup) throws PDException { + return partitionMeta.getGraphs().stream() + .filter(graph -> graph.getStoreGroupId() == shardGroup / Consts.PARTITION_GAP) + .collect(Collectors.toList()); + } + public Metapb.Graph getGraph(String graphName) throws PDException { return partitionMeta.getGraph(graphName); } @@ -407,11 +601,35 @@ public Metapb.Graph getGraph(String graphName) throws PDException { public Metapb.Graph delGraph(String graphName) throws PDException { log.info("delGraph {}", graphName); Metapb.Graph graph = getGraph(graphName); - getPartitions(graphName).forEach(partition -> { - onPartitionRemoved(partition); - }); + getPartitions(graphName).forEach(this::onPartitionRemoved); partitionMeta.removeAllPartitions(graphName); partitionMeta.removeGraph(graphName); + if (!StringUtils.isEmpty(graphName)) { + partitionMeta.removePartitionStats(graphName); + } + return graph; + } + + public synchronized Metapb.Graph createGraph(String graphName, int partitionCount, int storeGroupId) + throws PDException { + var lastGraph = partitionMeta.getGraph(graphName); + if (lastGraph != null) { + throw new PDException(ErrorType.GRAPH_ALREADY_EXISTS, "graph already exists:" + graphName); + } + + if (partitionCount == 0) { + partitionCount = configService.getPartitionCount(storeGroupId); + } + + var graph = partitionMeta.createGraph(graphName, partitionCount, storeGroupId); + try { + // alloc partition + allocGraphPartitions(graph); + } catch (PDException e) { + // when errors occur, remove graph + partitionMeta.removeGraph(graphName); + throw e; + } return graph; } @@ -419,12 +637,21 @@ public Metapb.Graph delGraph(String graphName) throws PDException { * To modify the graph information, you need to notify the store */ public synchronized Metapb.Graph updateGraph(Metapb.Graph graph) throws PDException { - Metapb.Graph lastGraph = partitionMeta.getAndCreateGraph(graph.getGraphName()); + Metapb.Graph lastGraph = partitionMeta.getGraph(graph.getGraphName()); + if (lastGraph == null) { + throw new PDException(ErrorType.GRAPH_NOT_EXISTS, "graph not exists:" + graph.getGraphName()); + } + log.info("updateGraph graph: {}, last: {}", graph, lastGraph); +<<<<<<<< HEAD:hugegraph-pd/hg-pd-core/src/main/java/org/apache/hugegraph/pd/PartitionService.java int partCount = (graph.getGraphName().endsWith("/s") || graph.getGraphName().endsWith("/m")) ? 1 : pdConfig.getPartition().getTotalCount(); +======== + int partCount = (graph.getGraphName().endsWith("/s") || graph.getGraphName().endsWith("/m")) ? + 1 : configService.getPartitionCount(graph.getStoreGroupId()); +>>>>>>>> d7e3d51dd (3.6.5 -> 4.x diff):hg-pd-core/src/main/java/org/apache/hugegraph/pd/PartitionService.java // set the partition count to specified if legal. if (graph.getPartitionCount() <= partCount && graph.getPartitionCount() > 0) { @@ -432,7 +659,11 @@ public synchronized Metapb.Graph updateGraph(Metapb.Graph graph) throws PDExcept } if (partCount == 0) { +<<<<<<<< HEAD:hugegraph-pd/hg-pd-core/src/main/java/org/apache/hugegraph/pd/PartitionService.java throw new PDException(10010, "update graph error, partition count = 0"); +======== + throw new PDException(ErrorType.Invalid_Partition_count, "partition count = 0"); +>>>>>>>> d7e3d51dd (3.6.5 -> 4.x diff):hg-pd-core/src/main/java/org/apache/hugegraph/pd/PartitionService.java } graph = lastGraph.toBuilder() @@ -444,9 +675,28 @@ public synchronized Metapb.Graph updateGraph(Metapb.Graph graph) throws PDExcept // The number of partitions has changed if (lastGraph.getPartitionCount() != graph.getPartitionCount()) { log.info("updateGraph graph: {}, partition count changed from {} to {}", +<<<<<<<< HEAD:hugegraph-pd/hg-pd-core/src/main/java/org/apache/hugegraph/pd/PartitionService.java graph.getGraphName(), lastGraph.getPartitionCount(), graph.getPartitionCount()); +======== + graph.getGraphName(), lastGraph.getPartitionCount(), graph.getPartitionCount()); +>>>>>>>> d7e3d51dd (3.6.5 -> 4.x diff):hg-pd-core/src/main/java/org/apache/hugegraph/pd/PartitionService.java + } + return graph; + } + + /** + * 修改图名称信息 + */ + public synchronized Metapb.Graph updateGraphName(Metapb.Graph graph) throws PDException { + Metapb.Graph lastGraph = partitionMeta.getGraph(graph.getGraphName()); + if (lastGraph == null) { + throw new PDException(ErrorType.GRAPH_NOT_EXISTS, "update graph: " + graph.getGraphName() + ", not exists"); } + graph = lastGraph.toBuilder() + .setGraphName(graph.getGraphName()) + .build(); + partitionMeta.updateGraph(graph); return graph; } @@ -496,8 +746,13 @@ public void storeOffline(Metapb.Store store) { */ public synchronized void shardOffline(Metapb.Partition partition, long storeId) { try { +<<<<<<<< HEAD:hugegraph-pd/hg-pd-core/src/main/java/org/apache/hugegraph/pd/PartitionService.java log.info("shardOffline Partition {} - {} shardOffline store : {}", partition.getGraphName(), partition.getId(), storeId); +======== + log.info("shardOffline Partition {}-{} shardOffline store : {}", + partition.getGraphName(), partition.getId(), storeId); +>>>>>>>> d7e3d51dd (3.6.5 -> 4.x diff):hg-pd-core/src/main/java/org/apache/hugegraph/pd/PartitionService.java // partition = getPartitionById(partition.getGraphName(), partition.getId()); // Metapb.Partition.Builder builder = Metapb.Partition.newBuilder(partition); // builder.clearShards(); @@ -508,12 +763,12 @@ public synchronized void shardOffline(Metapb.Partition partition, long storeId) // partition = builder.build(); Metapb.Graph graph = getGraph(partition.getGraphName()); reallocPartitionShards(graph, partition); - } catch (PDException e) { log.error("storeOffline exception: ", e); } } +<<<<<<<< HEAD:hugegraph-pd/hg-pd-core/src/main/java/org/apache/hugegraph/pd/PartitionService.java private boolean isShardListEquals(List list1, List list2) { if (list1 == list2) { return true; @@ -532,8 +787,22 @@ private boolean isShardListEquals(List list1, List l } return true; } +======== + /** + * 判定两个shard列表成员(store id)是否相同 + * @param list1 shard list1 + * @param list2 shard list2 + * @return true if members are same, false otherwise + */ + private boolean isShardListMemberEquals(List list1, List list2){ + if (list1 == list2) { + return true; + }else if (list1 != null && list2 != null && list1.size() == list2.size()) { + var s1 = list1.stream().map(Metapb.Shard::getStoreId).collect(Collectors.toSet()); + var s2 = list2.stream().map(Metapb.Shard::getStoreId).collect(Collectors.toSet()); + return SetUtils.isEqualSet(s1, s2); +>>>>>>>> d7e3d51dd (3.6.5 -> 4.x diff):hg-pd-core/src/main/java/org/apache/hugegraph/pd/PartitionService.java } - return false; } @@ -555,7 +824,7 @@ public void reallocPartitionShards(Metapb.Graph graph, Metapb.Partition partitio List shards = storeService.reallocShards(shardGroup); - if (isShardListEquals(originalShards, shards)) { + if (isShardListMemberEquals(originalShards, shards)) { log.info("reallocPartitionShards:{} vs {}", shardGroup, shards); // partition = Metapb.Partition.newBuilder(partition) // .clearShards().addAllShards(shards) @@ -565,11 +834,18 @@ public void reallocPartitionShards(Metapb.Graph graph, Metapb.Partition partitio } } +<<<<<<<< HEAD:hugegraph-pd/hg-pd-core/src/main/java/org/apache/hugegraph/pd/PartitionService.java public synchronized void reallocPartitionShards(String graphName, int partitionId) throws PDException { reallocPartitionShards(partitionMeta.getGraph(graphName), partitionMeta.getPartitionById(graphName, partitionId)); } +======== +// public synchronized void reallocPartitionShards(String graphName, int partitionId) throws PDException { +// reallocPartitionShards(partitionMeta.getGraph(graphName), +// partitionMeta.getPartitionById(graphName, partitionId)); +// } +>>>>>>>> d7e3d51dd (3.6.5 -> 4.x diff):hg-pd-core/src/main/java/org/apache/hugegraph/pd/PartitionService.java /** * Migrate partition copies @@ -577,8 +853,18 @@ public synchronized void reallocPartitionShards(String graphName, int partitionI public synchronized void movePartitionsShard(Integer partitionId, long fromStore, long toStore) { try { +<<<<<<<< HEAD:hugegraph-pd/hg-pd-core/src/main/java/org/apache/hugegraph/pd/PartitionService.java log.info("movePartitionsShard partitionId {} from store {} to store {}", partitionId, fromStore, toStore); +======== + if (storeService.getStoreGroupByStore(fromStore) != storeService.getStoreGroupByStore(toStore)) { + log.error("move partition shard: source store {} and dest store {} has different store group", + fromStore, toStore); + return; + } + + log.info("movePartitionsShard partitionId {} from store {} to store {}", partitionId, fromStore, toStore); +>>>>>>>> d7e3d51dd (3.6.5 -> 4.x diff):hg-pd-core/src/main/java/org/apache/hugegraph/pd/PartitionService.java for (Metapb.Graph graph : getGraphs()) { Metapb.Partition partition = this.getPartitionById(graph.getGraphName(), partitionId); @@ -618,7 +904,11 @@ public synchronized void splitPartition(List> splits) t var tasks = new HashMap>>(); for (var pair : splits) { +<<<<<<<< HEAD:hugegraph-pd/hg-pd-core/src/main/java/org/apache/hugegraph/pd/PartitionService.java for (var partition : getPartitionById(pair.getKey())) { +======== + for (var partition : getPartitionById(pair.getKey())){ +>>>>>>>> d7e3d51dd (3.6.5 -> 4.x diff):hg-pd-core/src/main/java/org/apache/hugegraph/pd/PartitionService.java if (!tasks.containsKey(partition.getGraphName())) { tasks.put(partition.getGraphName(), new ArrayList<>()); } @@ -638,6 +928,7 @@ public synchronized void splitPartition(List> splits) t * @param toCount target count * @throws PDException */ +<<<<<<<< HEAD:hugegraph-pd/hg-pd-core/src/main/java/org/apache/hugegraph/pd/PartitionService.java public synchronized void splitPartition(Metapb.Graph graph, int toCount) throws PDException { var partitionCount = getPartitions(graph.getGraphName()).size(); @@ -654,6 +945,24 @@ public synchronized void splitPartition(Metapb.Graph graph, int toCount) throws throw new PDException(Pdpb.ErrorType.Invalid_Split_Partition_Count_VALUE, "invalid split partition count, make sure to count is N time of" + " current partition count"); +======== + + public synchronized void splitPartition(Metapb.Graph graph, int toCount) throws PDException{ + assert graph != null; + var partitionCount = graph.getPartitionCount(); + var maxShardsPerStore = pdConfig.getPartition().getMaxShardsPerStore(); + var shardCount = pdConfig.getPartition().getShardCount(); + + if ( shardCount * toCount > + storeService.getActiveStoresByStoreGroup(graph.getStoreGroupId()).size() * maxShardsPerStore){ + throw new PDException(ErrorType.Too_Many_Partitions_Per_Store_VALUE, + "can't satisfy target shard group count, reached the upper limit of the cluster"); + } + + if (toCount % partitionCount != 0 || toCount <= partitionCount) { + throw new PDException(ErrorType.Invalid_Split_Partition_Count_VALUE, + "invalid split partition count, make sure to count is N time of current partition count"); +>>>>>>>> d7e3d51dd (3.6.5 -> 4.x diff):hg-pd-core/src/main/java/org/apache/hugegraph/pd/PartitionService.java } // Since it is an integer multiple,The enrichment factor is toCount / current count @@ -666,11 +975,21 @@ public synchronized void splitPartition(Metapb.Graph graph, int toCount) throws splitPartition(graph, list); } +<<<<<<<< HEAD:hugegraph-pd/hg-pd-core/src/main/java/org/apache/hugegraph/pd/PartitionService.java private synchronized void splitPartition(Metapb.Graph graph, List> splits) +======== + /** + * split graph + * @param graph graph + * @param splits pairs of (partition id, count) + * @throws PDException + */ + private synchronized void splitPartition(Metapb.Graph graph, List> splits) +>>>>>>>> d7e3d51dd (3.6.5 -> 4.x diff):hg-pd-core/src/main/java/org/apache/hugegraph/pd/PartitionService.java throws PDException { var taskInfoMeta = storeService.getTaskInfoMeta(); - if (taskInfoMeta.scanSplitTask(graph.getGraphName()).size() > 0) { + if (!taskInfoMeta.scanSplitTask(graph.getGraphName()).isEmpty()) { return; } @@ -700,21 +1019,37 @@ private synchronized void splitPartition(Metapb.Graph graph, for (; idx < splitCount - 2; idx++) { newPartitions.add(partition.toBuilder() +<<<<<<<< HEAD:hugegraph-pd/hg-pd-core/src/main/java/org/apache/hugegraph/pd/PartitionService.java .setStartKey(newPartitions.get(idx).getEndKey()) .setEndKey(newPartitions.get(idx).getEndKey() + splitLen) .setId(i) .setState(Metapb.PartitionState.PState_Offline) .build()); +======== + .setStartKey(newPartitions.get(idx).getEndKey()) + .setEndKey(newPartitions.get(idx).getEndKey() + splitLen) + .setId(getPartitionId(graph, i)) + .setState(Metapb.PartitionState.PState_Offline) + .build()); +>>>>>>>> d7e3d51dd (3.6.5 -> 4.x diff):hg-pd-core/src/main/java/org/apache/hugegraph/pd/PartitionService.java i += 1; } newPartitions.add(partition.toBuilder() +<<<<<<<< HEAD:hugegraph-pd/hg-pd-core/src/main/java/org/apache/hugegraph/pd/PartitionService.java .setStartKey(newPartitions.get(idx).getEndKey()) .setEndKey(partition.getEndKey()) .setId(i) .setState(Metapb.PartitionState.PState_Offline) .build()); +======== + .setStartKey(newPartitions.get(idx).getEndKey()) + .setEndKey(partition.getEndKey()) + .setId(getPartitionId(graph, i)) + .setState(Metapb.PartitionState.PState_Offline) + .build()); +>>>>>>>> d7e3d51dd (3.6.5 -> 4.x diff):hg-pd-core/src/main/java/org/apache/hugegraph/pd/PartitionService.java i += 1; // try to save new partitions, and repair shard group @@ -748,10 +1083,15 @@ private synchronized void splitPartition(Metapb.Graph graph, .build(); fireSplitPartition(partition, splitPartition); +<<<<<<<< HEAD:hugegraph-pd/hg-pd-core/src/main/java/org/apache/hugegraph/pd/PartitionService.java // Change the partition status to Offline, and resume the partition status to // Offline after the task is completed updatePartitionState(partition.getGraphName(), partition.getId(), Metapb.PartitionState.PState_Offline); +======== + // 修改Partition状态为下线,任务完成后恢复为上线 + updatePartitionState(partition.getGraphName(), partition.getId(), Metapb.PartitionState.PState_Offline); +>>>>>>>> d7e3d51dd (3.6.5 -> 4.x diff):hg-pd-core/src/main/java/org/apache/hugegraph/pd/PartitionService.java // Record transactions var task = MetaTask.Task.newBuilder().setPartition(partition) @@ -770,9 +1110,14 @@ private synchronized void splitPartition(Metapb.Graph graph, public void transferLeader(Integer partId, Metapb.Shard shard) { try { var partitions = getPartitionById(partId); +<<<<<<<< HEAD:hugegraph-pd/hg-pd-core/src/main/java/org/apache/hugegraph/pd/PartitionService.java if (partitions.size() > 0) { fireTransferLeader(partitions.get(0), TransferLeader.newBuilder().setShard(shard).build()); +======== + if (!partitions.isEmpty()) { + fireTransferLeader(partitions.get(0), TransferLeader.newBuilder().setShard(shard).build()); +>>>>>>>> d7e3d51dd (3.6.5 -> 4.x diff):hg-pd-core/src/main/java/org/apache/hugegraph/pd/PartitionService.java } // for (Metapb.Graph graph : getGraphs()) { // Metapb.Partition partition = this.getPartitionById(graph.getGraphName(), partId); @@ -787,18 +1132,33 @@ public void transferLeader(Integer partId, Metapb.Shard shard) { } /** +<<<<<<<< HEAD:hugegraph-pd/hg-pd-core/src/main/java/org/apache/hugegraph/pd/PartitionService.java * // todo : Check the corresponding store group and check the logic * Partition merging: Merges the number of partitions in the entire cluster into toCount +======== + * // todo : 检查对应的store group, 检查逻辑 + * 分区合并,将整个集群的分区数,合并到toCount个 +>>>>>>>> d7e3d51dd (3.6.5 -> 4.x diff):hg-pd-core/src/main/java/org/apache/hugegraph/pd/PartitionService.java * * @param toCount The number of partitions to be targeted * @throws PDException when query errors */ - public void combinePartition(int toCount) throws PDException { + public void combinePartition(int storeGroupId, int toCount) throws PDException { +<<<<<<<< HEAD:hugegraph-pd/hg-pd-core/src/main/java/org/apache/hugegraph/pd/PartitionService.java int shardsTotalCount = getShardGroupCount(); for (var graph : getGraphs()) { // All graphs larger than the toCount partition are scaled in if (graph.getPartitionCount() > toCount) { +======== + int shardsTotalCount = getShardGroupCount(storeGroupId); + for (var graph : getGraphs()){ + if (graph.getStoreGroupId() != storeGroupId){ + continue; + } + // 对所有大于toCount分区的图,都进行缩容 + if (graph.getPartitionCount() > toCount){ +>>>>>>>> d7e3d51dd (3.6.5 -> 4.x diff):hg-pd-core/src/main/java/org/apache/hugegraph/pd/PartitionService.java combineGraphPartition(graph, toCount, shardsTotalCount); } } @@ -812,7 +1172,9 @@ public void combinePartition(int toCount) throws PDException { * @throws PDException when query errors */ public void combineGraphPartition(String graphName, int toCount) throws PDException { - combineGraphPartition(getGraph(graphName), toCount, getShardGroupCount()); + var graph = getGraph(graphName); + assert graph != null; + combineGraphPartition(graph, toCount, getShardGroupCount(graph.getStoreGroupId())); } /** @@ -825,15 +1187,21 @@ public void combineGraphPartition(String graphName, int toCount) throws PDExcept */ private synchronized void combineGraphPartition(Metapb.Graph graph, int toCount, int shardCount) throws PDException { +<<<<<<<< HEAD:hugegraph-pd/hg-pd-core/src/main/java/org/apache/hugegraph/pd/PartitionService.java if (graph == null) { throw new PDException(1, "Graph not exists, try to use full graph name, like " + "/DEFAULT/GRAPH_NAME/g"); +======== + if (graph == null){ + throw new PDException(ErrorType.GRAPH_NOT_EXISTS); +>>>>>>>> d7e3d51dd (3.6.5 -> 4.x diff):hg-pd-core/src/main/java/org/apache/hugegraph/pd/PartitionService.java } log.info("Combine graph {} partition, from {}, to {}, with shard count:{}", graph.getGraphName(), graph.getPartitionCount(), toCount, shardCount); +<<<<<<<< HEAD:hugegraph-pd/hg-pd-core/src/main/java/org/apache/hugegraph/pd/PartitionService.java if (!checkTargetCount(graph.getPartitionCount(), toCount, shardCount)) { log.error("Combine partition, illegal toCount:{}, graph:{}", toCount, graph.getGraphName()); @@ -846,6 +1214,16 @@ private synchronized void combineGraphPartition(Metapb.Graph graph, int toCount, var taskInfoMeta = storeService.getTaskInfoMeta(); if (taskInfoMeta.scanMoveTask(graph.getGraphName()).size() > 0) { throw new PDException(3, "Graph Combine process exists"); +======== + if (! checkTargetCount(graph.getPartitionCount(), toCount, shardCount)) { + log.error("Combine partition, illegal toCount:{}, graph:{}", toCount, graph.getGraphName()); + throw new PDException(ErrorType.Invalid_Combine_Partition_Count); + } + + var taskInfoMeta = storeService.getTaskInfoMeta(); + if (!taskInfoMeta.scanMoveTask(graph.getGraphName()).isEmpty()) { + throw new PDException(ErrorType.Combine_Partition_Doing); +>>>>>>>> d7e3d51dd (3.6.5 -> 4.x diff):hg-pd-core/src/main/java/org/apache/hugegraph/pd/PartitionService.java } // According to key start sort @@ -901,20 +1279,33 @@ private synchronized void combineGraphPartition(Metapb.Graph graph, int toCount, Metapb.PartitionState.PState_Offline); } - storeService.updateClusterStatus(Metapb.ClusterState.Cluster_Offline); + storeService.updateClusterStatus(getStoreGroupByGraph(graph.getGraphName()), + Metapb.ClusterState.Cluster_Offline); } /** +<<<<<<<< HEAD:hugegraph-pd/hg-pd-core/src/main/java/org/apache/hugegraph/pd/PartitionService.java * get raft group count from storeService * +======== + * // todo: 调用?? + * 通过 storeService 获取 raft group 总数 +>>>>>>>> d7e3d51dd (3.6.5 -> 4.x diff):hg-pd-core/src/main/java/org/apache/hugegraph/pd/PartitionService.java * @return the count of raft groups */ - private int getShardGroupCount() { + private int getShardGroupCount(int storeGroupId) { try { +<<<<<<<< HEAD:hugegraph-pd/hg-pd-core/src/main/java/org/apache/hugegraph/pd/PartitionService.java return Optional.ofNullable(storeService.getShardGroups()).orElseGet(ArrayList::new) .size(); } catch (PDException e) { log.error("get shard group failed, error: {}", e); +======== + // todo: 检查调用逻辑 + return Optional.ofNullable(storeService.getShardGroups(storeGroupId)).orElseGet(ArrayList::new).size(); + }catch (PDException e){ + log.error("get shard group failed, error: ", e); +>>>>>>>> d7e3d51dd (3.6.5 -> 4.x diff):hg-pd-core/src/main/java/org/apache/hugegraph/pd/PartitionService.java } return 0; } @@ -941,6 +1332,7 @@ private boolean checkTargetCount(int fromCount, int toCount, int shardCount) { public void partitionHeartbeat(Metapb.PartitionStats stats) throws PDException { Metapb.ShardGroup shardGroup = storeService.getShardGroup(stats.getId()); +<<<<<<<< HEAD:hugegraph-pd/hg-pd-core/src/main/java/org/apache/hugegraph/pd/PartitionService.java // shard group version changes or leader changes // (The shard group is controlled by the PD, and there may be brief inconsistencies after // operations such as splitting, subject to PD) @@ -951,16 +1343,49 @@ public void partitionHeartbeat(Metapb.PartitionStats stats) throws PDException { storeService.updateShardGroup(stats.getId(), stats.getShardList(), stats.getLeaderTerm(), stats.getConfVer()); +======== + + // shard group version changes or leader changes + // (shard group 由pd控制, 在分裂等操作后,可能出现短暂不一致的情况,以pd为准) + // store 上传最终的 raft group 数据 + if (shardGroup != null) { + if (shardGroup.getVersion() < stats.getLeaderTerm() || shardGroup.getConfVer() < stats.getConfVer() || + ! isShardEquals(shardGroup.getShardsList(), stats.getShardList())) { + storeService.updateShardGroup(stats.getId(), + stats.getShardList(), stats.getLeaderTerm(), stats.getConfVer()); + } + // 更新state信息 + checkShardState(shardGroup, stats); +>>>>>>>> d7e3d51dd (3.6.5 -> 4.x diff):hg-pd-core/src/main/java/org/apache/hugegraph/pd/PartitionService.java } - List partitions = getPartitionById(stats.getId()); - for (Metapb.Partition partition : partitions) { + // List partitions = getPartitionById(stats.getId()); + // for (Metapb.Partition partition : partitions) { // partitionMeta.getAndCreateGraph(partition.getGraphName()); +<<<<<<<< HEAD:hugegraph-pd/hg-pd-core/src/main/java/org/apache/hugegraph/pd/PartitionService.java checkShardState(partition, stats); } // statistics partitionMeta.updatePartitionStats(stats.toBuilder() .setTimestamp(System.currentTimeMillis()).build()); +======== + // } + // 统计信息 + partitionMeta.updatePartitionStats(stats.toBuilder() .setTimestamp(System.currentTimeMillis()).build()); + } + + private boolean isShardEquals(List list1, List list2) { + return SetUtils.isEqualSet(list1, list2); + } + + private Long getLeader(Metapb.ShardGroup group) { + for (var shard : group.getShardsList()) { + if (shard.getRole() == Metapb.ShardRole.Leader) { + return shard.getStoreId(); + } + } + return null; +>>>>>>>> d7e3d51dd (3.6.5 -> 4.x diff):hg-pd-core/src/main/java/org/apache/hugegraph/pd/PartitionService.java } /** @@ -968,15 +1393,19 @@ public void partitionHeartbeat(Metapb.PartitionStats stats) throws PDException { * * @param stats */ - private void checkShardState(Metapb.Partition partition, Metapb.PartitionStats stats) { + private void checkShardState(Metapb.ShardGroup shardGroup, Metapb.PartitionStats stats) { try { + Metapb.PartitionState state = Metapb.PartitionState.PState_Normal; + int offCount = 0; + for (Metapb.ShardStats shard : stats.getShardStatsList()) { if (shard.getState() == Metapb.ShardState.SState_Offline) { offCount++; } } +<<<<<<<< HEAD:hugegraph-pd/hg-pd-core/src/main/java/org/apache/hugegraph/pd/PartitionService.java if (partition.getState() != Metapb.PartitionState.PState_Offline) { if (offCount == 0) { updatePartitionState(partition.getGraphName(), partition.getId(), @@ -988,10 +1417,39 @@ private void checkShardState(Metapb.Partition partition, Metapb.PartitionStats s updatePartitionState(partition.getGraphName(), partition.getId(), Metapb.PartitionState.PState_Warn); } +======== + + if (offCount > 0 && offCount * 2 < stats.getShardStatsCount()) { + state = Metapb.PartitionState.PState_Warn; +>>>>>>>> d7e3d51dd (3.6.5 -> 4.x diff):hg-pd-core/src/main/java/org/apache/hugegraph/pd/PartitionService.java } +// if (offCount == 0) { +// state = Metapb.PartitionState.PState_Normal; +// } else if (offCount * 2 < stats.getShardStatsCount()) { +// state = Metapb.PartitionState.PState_Warn; +// } + + if (shardGroup.getState() != state) { + // 更新graph state + for (var graph : getGraphs(shardGroup.getId())) { + if (graph.getState() != state) { + updateGraphState(graph.getGraphName(), state); + } + } + + storeService.updateShardGroupState(shardGroup.getId(), state); + } + +// if (partition.getState() != state) { +// updatePartitionState(partition.getGraphName(), partition.getId(), state); +// } } catch (Exception e) { +<<<<<<<< HEAD:hugegraph-pd/hg-pd-core/src/main/java/org/apache/hugegraph/pd/PartitionService.java log.error("Partition {}-{} checkShardState exception {}", partition.getGraphName(), partition.getId(), e); +======== + log.error("checkShardState {} failed, error: ", shardGroup.getId(), e); +>>>>>>>> d7e3d51dd (3.6.5 -> 4.x diff):hg-pd-core/src/main/java/org/apache/hugegraph/pd/PartitionService.java } } @@ -1008,10 +1466,16 @@ public void addStatusListener(PartitionStatusListener listener) { * * @param changeType */ +<<<<<<<< HEAD:hugegraph-pd/hg-pd-core/src/main/java/org/apache/hugegraph/pd/PartitionService.java protected void fireChangeShard(Metapb.Partition partition, List shards, ConfChangeType changeType) { log.info("fireChangeShard partition: {}-{}, changeType: {} {}", partition.getGraphName(), partition.getId(), changeType, shards); +======== + protected void fireChangeShard(Metapb.Partition partition, List shards, ConfChangeType changeType) { + log.info("fireChangeShard partition: {}-{}, changeType:{} {}", + partition.getGraphName(), partition.getId(), changeType, shards); +>>>>>>>> d7e3d51dd (3.6.5 -> 4.x diff):hg-pd-core/src/main/java/org/apache/hugegraph/pd/PartitionService.java instructionListeners.forEach(cmd -> { try { cmd.changeShard(partition, ChangeShard.newBuilder() @@ -1025,7 +1489,7 @@ protected void fireChangeShard(Metapb.Partition partition, List sh public void changeShard(int groupId, List shards) throws PDException { var partitions = getPartitionById(groupId); - if (partitions.size() == 0) { + if (partitions.isEmpty()) { return; } fireChangeShard(partitions.get(0), shards, ConfChangeType.CONF_CHANGE_TYPE_ADJUST); @@ -1124,10 +1588,16 @@ public synchronized void handleMoveTask(MetaTask.Task task) throws PDException { .getId(), partition.getId()); +<<<<<<<< HEAD:hugegraph-pd/hg-pd-core/src/main/java/org/apache/hugegraph/pd/PartitionService.java log.info("report move task, graph:{}, pid : {}->{}, state: {}", task.getPartition().getGraphName(), task.getPartition().getId(), task.getMovePartition().getTargetPartition().getId(), task.getState()); +======== + log.info("report move task (id: {}), graph:{}, pid : {}->{}, state: {}", task.getId(), + task.getPartition().getGraphName(), task.getPartition().getId(), + task.getMovePartition().getTargetPartition().getId(), task.getState()); +>>>>>>>> d7e3d51dd (3.6.5 -> 4.x diff):hg-pd-core/src/main/java/org/apache/hugegraph/pd/PartitionService.java // HAS BEEN PROCESSED(There is it in front) if (pdMetaTask != null) { @@ -1251,12 +1721,18 @@ private void handleMoveTaskAllSuccess(List subTasks, String graph } }); - partitionMeta.reload(); + partitionMeta.loadGraph(graphName); // renewal graph partition count var graph = getGraph(graphName).toBuilder() +<<<<<<<< HEAD:hugegraph-pd/hg-pd-core/src/main/java/org/apache/hugegraph/pd/PartitionService.java .setPartitionCount(targetPartitionIds.size()) .build(); +======== + .setPartitionCount(targetPartitionIds.size()) + .setState(Metapb.PartitionState.PState_Normal) + .build(); +>>>>>>>> d7e3d51dd (3.6.5 -> 4.x diff):hg-pd-core/src/main/java/org/apache/hugegraph/pd/PartitionService.java updateGraph(graph); // The transaction is complete @@ -1306,7 +1782,13 @@ private void handleMoveTaskIfFailed(String graphName, TaskInfoMeta taskInfoMeta) updatePartitionState(targetPartition.getGraphName(), targetPartition.getId(), Metapb.PartitionState.PState_Normal); } +<<<<<<<< HEAD:hugegraph-pd/hg-pd-core/src/main/java/org/apache/hugegraph/pd/PartitionService.java // Clean up the task list +======== + + updateGraphState(graphName, Metapb.PartitionState.PState_Normal); + // 清理掉任务列表 +>>>>>>>> d7e3d51dd (3.6.5 -> 4.x diff):hg-pd-core/src/main/java/org/apache/hugegraph/pd/PartitionService.java taskInfoMeta.removeMoveTaskPrefix(graphName); } @@ -1315,6 +1797,7 @@ private void handleMoveTaskIfFailed(String graphName, TaskInfoMeta taskInfoMeta) * * @param task clean task */ +<<<<<<<< HEAD:hugegraph-pd/hg-pd-core/src/main/java/org/apache/hugegraph/pd/PartitionService.java public void handleCleanPartitionTask(MetaTask.Task task) { log.info("clean task {} -{}, key range:{}~{}, report: {}", task.getPartition().getGraphName(), @@ -1322,11 +1805,26 @@ public void handleCleanPartitionTask(MetaTask.Task task) { task.getCleanPartition().getKeyStart(), task.getCleanPartition().getKeyEnd(), task.getState() +======== + public void handleCleanPartitionTask(MetaTask.Task task){ + log.info("clean task (id: {}) {} -{}, key range:{}~{}, report: {}", task.getId(), + task.getPartition().getGraphName(), + task.getPartition().getId(), + task.getCleanPartition().getKeyStart(), + task.getCleanPartition().getKeyEnd(), + task.getState() +>>>>>>>> d7e3d51dd (3.6.5 -> 4.x diff):hg-pd-core/src/main/java/org/apache/hugegraph/pd/PartitionService.java ); // If it fails, try again? } + public void handleBuildIndexTask(MetaTask.Task task) throws PDException { + log.info("build index task (id: {}), {} -{} , report state: {}", task.getId(), + task.getPartition().getGraphName(), task.getPartition().getId(), task.getState()); + storeService.getTaskInfoMeta().updateUserTask(task); + } + public synchronized void handleSplitTask(MetaTask.Task task) throws PDException { var taskInfoMeta = storeService.getTaskInfoMeta(); @@ -1335,9 +1833,14 @@ public synchronized void handleSplitTask(MetaTask.Task task) throws PDException MetaTask.Task pdMetaTask = taskInfoMeta.getSplitTask(partition.getGraphName(), partition.getId()); +<<<<<<<< HEAD:hugegraph-pd/hg-pd-core/src/main/java/org/apache/hugegraph/pd/PartitionService.java log.info("report split task, graph:{}, pid : {}, state: {}", task.getPartition().getGraphName(), task.getPartition().getId(), task.getState()); +======== + log.info("report split task (id: {}), graph:{}, pid : {}, state: {}", task.getId(), + task.getPartition().getGraphName(), task.getPartition().getId(), task.getState()); +>>>>>>>> d7e3d51dd (3.6.5 -> 4.x diff):hg-pd-core/src/main/java/org/apache/hugegraph/pd/PartitionService.java if (pdMetaTask != null) { var newTask = pdMetaTask.toBuilder().setState(task.getState()).build(); @@ -1417,16 +1920,24 @@ private void handleSplitTaskAllSuccess(List subTasks, String grap } updatePartition(partitions); - partitionMeta.reload(); + partitionMeta.loadGraph(graphName); var graph = getGraph(graphName); + var storeGroupId = getStoreGroupByGraph(graphName); + int partitionCount = storeService.getShardGroups(storeGroupId).size(); // set partition count +<<<<<<<< HEAD:hugegraph-pd/hg-pd-core/src/main/java/org/apache/hugegraph/pd/PartitionService.java if (pdConfig.getConfigService().getPartitionCount() != storeService.getShardGroups().size()) { pdConfig.getConfigService().setPartitionCount(storeService.getShardGroups().size()); log.info("set the partition count of config server to {}", storeService.getShardGroups().size()); +======== + if (pdConfig.getConfigService().getPartitionCount(storeGroupId) != partitionCount) { + pdConfig.getConfigService().setPartitionCount(storeGroupId, partitionCount); + log.info("set the partition count of config server to {}", partitionCount); +>>>>>>>> d7e3d51dd (3.6.5 -> 4.x diff):hg-pd-core/src/main/java/org/apache/hugegraph/pd/PartitionService.java } // renewal graph partition count @@ -1466,10 +1977,23 @@ private void handleSplitTaskIfFailed(List subTasks, String graphN taskInfoMeta.removeSplitTaskPrefix(graphName); } + + public void handleBackupGraphTask(MetaTask.Task task) throws PDException { + log.info("backup graph task (id: {}), {} -{} , report state: {}", task.getId(), + task.getPartition().getGraphName(), task.getPartition().getId(), task.getState()); + storeService.getTaskInfoMeta().updateUserTask(task); + } + /** +<<<<<<<< HEAD:hugegraph-pd/hg-pd-core/src/main/java/org/apache/hugegraph/pd/PartitionService.java * todo : What is the impact of partition changes?? * Received a message that the leader has changed * Update the status of the graph and trigger a partition change +======== + * todo : partition 变更的影响?? + * 接收到Leader改变的消息 + * 更新图状态,触发分区变更 +>>>>>>>> d7e3d51dd (3.6.5 -> 4.x diff):hg-pd-core/src/main/java/org/apache/hugegraph/pd/PartitionService.java */ protected void onPartitionChanged(Metapb.Partition old, Metapb.Partition partition) { log.info("onPartitionChanged partition: {}", partition); @@ -1509,10 +2033,11 @@ public void onRaftLeaderChanged() { try { partitionMeta.reload(); } catch (PDException e) { - log.error("Partition meta reload exception {}", e); + log.error("Partition meta reload exception ", e); } } +<<<<<<<< HEAD:hugegraph-pd/hg-pd-core/src/main/java/org/apache/hugegraph/pd/PartitionService.java public void onPartitionStateChanged(String graph, int partId, Metapb.PartitionState state) throws PDException { updatePartitionState(graph, partId, state); @@ -1525,15 +2050,27 @@ public void onShardStateChanged(String graph, int partId, Metapb.PartitionState /** * Send rocksdb compaction message * +======== + /** + * 发送rocksdb compaction 消息 +>>>>>>>> d7e3d51dd (3.6.5 -> 4.x diff):hg-pd-core/src/main/java/org/apache/hugegraph/pd/PartitionService.java * @param partId * @param tableName */ - public void fireDbCompaction(int partId, String tableName) { + public void fireDbCompaction(int partId, String tableName) throws PDException { - try { + // try { for (Metapb.Graph graph : getGraphs()) { +<<<<<<<< HEAD:hugegraph-pd/hg-pd-core/src/main/java/org/apache/hugegraph/pd/PartitionService.java Metapb.Partition partition = partitionMeta.getPartitionById(graph.getGraphName(), partId); +======== + Metapb.Partition partition = partitionMeta.getPartitionById(graph.getGraphName(), partId); + // some graphs may doesn't have such partition + if (partition == null) { + continue; + } +>>>>>>>> d7e3d51dd (3.6.5 -> 4.x diff):hg-pd-core/src/main/java/org/apache/hugegraph/pd/PartitionService.java DbCompaction dbCompaction = DbCompaction.newBuilder() .setTableName(tableName) @@ -1541,18 +2078,39 @@ public void fireDbCompaction(int partId, String tableName) { instructionListeners.forEach(cmd -> { try { cmd.dbCompaction(partition, dbCompaction); +<<<<<<<< HEAD:hugegraph-pd/hg-pd-core/src/main/java/org/apache/hugegraph/pd/PartitionService.java } catch (Exception e) { +======== + log.info("compact partition: {}", partId); + }catch (Exception e){ +>>>>>>>> d7e3d51dd (3.6.5 -> 4.x diff):hg-pd-core/src/main/java/org/apache/hugegraph/pd/PartitionService.java log.error("firedbCompaction", e); } }); + break; } - } catch (PDException e) { - e.printStackTrace(); - } + // } catch (PDException e) { + // e.printStackTrace(); + // } } +<<<<<<<< HEAD:hugegraph-pd/hg-pd-core/src/main/java/org/apache/hugegraph/pd/PartitionService.java public void updateShardGroupCache(Metapb.ShardGroup group) { partitionMeta.getPartitionCache().updateShardGroup(group); +======== + public void updateShardGroupCache(Metapb.ShardGroup group){ + partitionMeta.updateShardGroupCache(group); +>>>>>>>> d7e3d51dd (3.6.5 -> 4.x diff):hg-pd-core/src/main/java/org/apache/hugegraph/pd/PartitionService.java + } + + public Map getShardGroupCache() { + return partitionMeta.getShardGroupCache(); } + + private Integer getStoreGroupByGraph(String graphName) throws PDException { + Metapb.Graph graph = getGraph(graphName); + return graph.getStoreGroupId(); + } + } diff --git a/hugegraph-pd/hg-pd-core/src/main/java/org/apache/hugegraph/pd/StoreMonitorDataService.java b/hugegraph-pd/hg-pd-core/src/main/java/org/apache/hugegraph/pd/StoreMonitorDataService.java index 54ff6b6e8d..29f802bcdb 100644 --- a/hugegraph-pd/hg-pd-core/src/main/java/org/apache/hugegraph/pd/StoreMonitorDataService.java +++ b/hugegraph-pd/hg-pd-core/src/main/java/org/apache/hugegraph/pd/StoreMonitorDataService.java @@ -1,3 +1,4 @@ +<<<<<<<< HEAD:hugegraph-pd/hg-pd-core/src/main/java/org/apache/hugegraph/pd/StoreMonitorDataService.java /* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with @@ -16,6 +17,16 @@ */ package org.apache.hugegraph.pd; +======== +package org.apache.hugegraph.pd; + +import org.apache.hugegraph.pd.common.PDException; +import org.apache.hugegraph.pd.config.PDConfig; +import org.apache.hugegraph.pd.grpc.Metapb; +import org.apache.hugegraph.pd.meta.MetadataKeyHelper; +import lombok.extern.slf4j.Slf4j; +import org.springframework.stereotype.Service; +>>>>>>>> d7e3d51dd (3.6.5 -> 4.x diff):hg-pd-core/src/main/java/org/apache/hugegraph/pd/StoreMonitorDataService.java import java.time.Instant; import java.time.LocalDateTime; diff --git a/hugegraph-pd/hg-pd-core/src/main/java/org/apache/hugegraph/pd/StoreNodeService.java b/hugegraph-pd/hg-pd-core/src/main/java/org/apache/hugegraph/pd/StoreNodeService.java index 9ca248022c..71f4f6c8d9 100644 --- a/hugegraph-pd/hg-pd-core/src/main/java/org/apache/hugegraph/pd/StoreNodeService.java +++ b/hugegraph-pd/hg-pd-core/src/main/java/org/apache/hugegraph/pd/StoreNodeService.java @@ -1,3 +1,4 @@ +<<<<<<<< HEAD:hugegraph-pd/hg-pd-core/src/main/java/org/apache/hugegraph/pd/StoreNodeService.java /* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with @@ -15,17 +16,50 @@ * limitations under the License. */ +======== +>>>>>>>> d7e3d51dd (3.6.5 -> 4.x diff):hg-pd-core/src/main/java/org/apache/hugegraph/pd/StoreNodeService.java package org.apache.hugegraph.pd; import java.util.ArrayList; import java.util.Collections; import java.util.HashMap; +import java.util.HashSet; import java.util.Iterator; import java.util.List; import java.util.Map; import java.util.Objects; import java.util.Random; +import java.util.Set; +import java.util.concurrent.ConcurrentHashMap; import java.util.concurrent.atomic.AtomicBoolean; +<<<<<<<< HEAD:hugegraph-pd/hg-pd-core/src/main/java/org/apache/hugegraph/pd/StoreNodeService.java +======== +import java.util.stream.Collectors; + +import org.apache.commons.lang3.StringUtils; + +import org.apache.hugegraph.pd.common.Consts; +import org.apache.hugegraph.pd.common.KVPair; +import org.apache.hugegraph.pd.common.PDException; +import org.apache.hugegraph.pd.config.PDConfig; +import org.apache.hugegraph.pd.grpc.Metapb; +import org.apache.hugegraph.pd.grpc.Metapb.GraphMode; +import org.apache.hugegraph.pd.grpc.Metapb.GraphModeReason; +import org.apache.hugegraph.pd.grpc.Metapb.GraphState; +import org.apache.hugegraph.pd.grpc.Pdpb.CacheResponse; +import org.apache.hugegraph.pd.grpc.common.ErrorType; +import org.apache.hugegraph.pd.grpc.pulse.ConfChangeType; +import org.apache.hugegraph.pd.listener.ShardGroupStatusListener; +import org.apache.hugegraph.pd.listener.StoreStatusListener; +import org.apache.hugegraph.pd.meta.MetadataFactory; +import org.apache.hugegraph.pd.meta.MetadataKeyHelper; +import org.apache.hugegraph.pd.meta.StoreInfoMeta; +import org.apache.hugegraph.pd.meta.TaskInfoMeta; +import com.google.gson.Gson; + +import lombok.Getter; +import lombok.extern.slf4j.Slf4j; +>>>>>>>> d7e3d51dd (3.6.5 -> 4.x diff):hg-pd-core/src/main/java/org/apache/hugegraph/pd/StoreNodeService.java import org.apache.commons.lang3.StringUtils; import org.apache.hugegraph.pd.common.KVPair; @@ -53,6 +87,26 @@ @Slf4j public class StoreNodeService { +<<<<<<<< HEAD:hugegraph-pd/hg-pd-core/src/main/java/org/apache/hugegraph/pd/StoreNodeService.java +======== + // Store状态监听 + private List statusListeners; + private List shardGroupStatusListeners; + + private PartitionService partitionService; + @Getter + private StoreInfoMeta storeInfoMeta; + @Getter + private TaskInfoMeta taskInfoMeta; + private Random random = new Random(System.currentTimeMillis()); + private Map clusterStats = new ConcurrentHashMap<>(); + private KvService kvService; + private ConfigService configService; + private PDConfig pdConfig; + private static Metapb.ClusterStats statsNotReady = + Metapb.ClusterStats.newBuilder().setState(Metapb.ClusterState.Cluster_Not_Ready).build(); + +>>>>>>>> d7e3d51dd (3.6.5 -> 4.x diff):hg-pd-core/src/main/java/org/apache/hugegraph/pd/StoreNodeService.java private static final Long STORE_HEART_BEAT_INTERVAL = 30000L; private static final String graphSpaceConfPrefix = "HUGEGRAPH/hg/GRAPHSPACE/CONF/"; private final List statusListeners; @@ -80,6 +134,7 @@ public StoreNodeService(PDConfig config) { storeInfoMeta = MetadataFactory.newStoreInfoMeta(pdConfig); taskInfoMeta = MetadataFactory.newTaskInfoMeta(pdConfig); shardGroupStatusListeners = Collections.synchronizedList(new ArrayList<>()); +<<<<<<<< HEAD:hugegraph-pd/hg-pd-core/src/main/java/org/apache/hugegraph/pd/StoreNodeService.java statusListeners = Collections.synchronizedList(new ArrayList()); clusterStats = Metapb.ClusterStats.newBuilder() .setState(Metapb.ClusterState.Cluster_Not_Ready) @@ -87,10 +142,32 @@ public StoreNodeService(PDConfig config) { .build(); kvService = new KvService(pdConfig); configService = new ConfigService(pdConfig); +======== + statusListeners = Collections.synchronizedList(new ArrayList<>()); + configService = new ConfigService(pdConfig); + kvService = new KvService(pdConfig); + + try { + for (var group: configService.getAllStoreGroup()) { + clusterStats.put(group.getGroupId(), getDefaultClusterStats()); + } + } catch (PDException e) { + log.error("init exception: ", e); + } + + } + + private Metapb.ClusterStats getDefaultClusterStats() { + return Metapb.ClusterStats.newBuilder() + .setState(Metapb.ClusterState.Cluster_Not_Ready) + .setTimestamp(System.currentTimeMillis()) + .build(); +>>>>>>>> d7e3d51dd (3.6.5 -> 4.x diff):hg-pd-core/src/main/java/org/apache/hugegraph/pd/StoreNodeService.java } public void init(PartitionService partitionService) { this.partitionService = partitionService; +<<<<<<<< HEAD:hugegraph-pd/hg-pd-core/src/main/java/org/apache/hugegraph/pd/StoreNodeService.java partitionService.addStatusListener(new PartitionStatusListener() { @Override public void onPartitionChanged(Metapb.Partition old, Metapb.Partition partition) { @@ -123,6 +200,40 @@ public void onPartitionRemoved(Metapb.Partition partition) { } }); +======== +// partitionService.addStatusListener(new PartitionStatusListener() { +// @Override +// public void onPartitionChanged(Metapb.Partition old, Metapb.Partition partition) { +// if (old != null && old.getState() != partition.getState()) { +// // 状态改变,重置集群状态 +// try { +// List partitions = partitionService.getPartitionById(partition.getId()); +// Metapb.PartitionState state = Metapb.PartitionState.PState_Normal; +// for(Metapb.Partition pt : partitions){ +// if ( pt.getState().getNumber() > state.getNumber()) { +// state = pt.getState(); +// } +// } +// updateShardGroupState(partition.getId(), state); +// +// for(Metapb.ShardGroup group : getShardGroups()){ +// if ( group.getState().getNumber() > state.getNumber()) +// state = group.getState(); +// } +// +// updateClusterStatus(state); +// } catch (PDException e) { +// log.error("onPartitionChanged exception: ", e); +// } +// } +// } +// +// @Override +// public void onPartitionRemoved(Metapb.Partition partition) { +// +// } +// }); +>>>>>>>> d7e3d51dd (3.6.5 -> 4.x diff):hg-pd-core/src/main/java/org/apache/hugegraph/pd/StoreNodeService.java } /** @@ -130,9 +241,18 @@ public void onPartitionRemoved(Metapb.Partition partition) { * * @return */ +<<<<<<<< HEAD:hugegraph-pd/hg-pd-core/src/main/java/org/apache/hugegraph/pd/StoreNodeService.java public boolean isOK() { return this.clusterStats.getState().getNumber() < Metapb.ClusterState.Cluster_Offline.getNumber(); +======== + public boolean isOK(int storeGroup){ + if (! this.clusterStats.containsKey(storeGroup)) { + return false; + } + return this.clusterStats.get(storeGroup).getState().getNumber() < + Metapb.ClusterState.Cluster_Offline.getNumber(); +>>>>>>>> d7e3d51dd (3.6.5 -> 4.x diff):hg-pd-core/src/main/java/org/apache/hugegraph/pd/StoreNodeService.java } /** @@ -148,28 +268,46 @@ public Metapb.Store register(Metapb.Store store) throws PDException { } if (!storeInfoMeta.storeExists(store.getId())) { +<<<<<<<< HEAD:hugegraph-pd/hg-pd-core/src/main/java/org/apache/hugegraph/pd/StoreNodeService.java log.error("Store id {} does not belong to this PD, address = {}", store.getId(), store.getAddress()); // storeId does not exist, an exception is thrown throw new PDException(Pdpb.ErrorType.STORE_ID_NOT_EXIST_VALUE, String.format("Store id %d doest not exist.", store.getId())); +======== + log.error("Store id {} does not belong to this PD, address = {}", store.getId(), store.getAddress()); + // storeId不存在,抛出异常 + throw new PDException(ErrorType.STORE_ID_NOT_EXIST_VALUE, + String.format("Store id %d doest not exist.", store.getId())); +>>>>>>>> d7e3d51dd (3.6.5 -> 4.x diff):hg-pd-core/src/main/java/org/apache/hugegraph/pd/StoreNodeService.java } // If the store status is Tombstone, the registration is denied. Metapb.Store lastStore = storeInfoMeta.getStore(store.getId()); if (lastStore.getState() == Metapb.StoreState.Tombstone) { log.error("Store id {} has been removed, Please reinitialize, address = {}", +<<<<<<<< HEAD:hugegraph-pd/hg-pd-core/src/main/java/org/apache/hugegraph/pd/StoreNodeService.java store.getId(), store.getAddress()); // storeId does not exist, an exception is thrown throw new PDException(Pdpb.ErrorType.STORE_HAS_BEEN_REMOVED_VALUE, String.format("Store id %d has been removed. %s", store.getId(), store.getAddress())); +======== + store.getId(), store.getAddress()); + // storeId不存在,抛出异常 + throw new PDException(ErrorType.STORE_HAS_BEEN_REMOVED_VALUE, + String.format("Store id %d has been removed. %s", store.getId(), store.getAddress())); +>>>>>>>> d7e3d51dd (3.6.5 -> 4.x diff):hg-pd-core/src/main/java/org/apache/hugegraph/pd/StoreNodeService.java } // offline or up, or in the initial activation list, go live automatically Metapb.StoreState storeState = lastStore.getState(); +<<<<<<<< HEAD:hugegraph-pd/hg-pd-core/src/main/java/org/apache/hugegraph/pd/StoreNodeService.java if (storeState == Metapb.StoreState.Offline || storeState == Metapb.StoreState.Up || inInitialStoreList(store)) { +======== + if (storeState == Metapb.StoreState.Offline || storeState == Metapb.StoreState.Up || inInitialStoreList(store)){ +>>>>>>>> d7e3d51dd (3.6.5 -> 4.x diff):hg-pd-core/src/main/java/org/apache/hugegraph/pd/StoreNodeService.java storeState = Metapb.StoreState.Up; } else { storeState = Metapb.StoreState.Pending; @@ -188,6 +326,7 @@ public Metapb.Store register(Metapb.Store store) throws PDException { long current = System.currentTimeMillis(); boolean raftChanged = false; +<<<<<<<< HEAD:hugegraph-pd/hg-pd-core/src/main/java/org/apache/hugegraph/pd/StoreNodeService.java // On-line status Raft Address there has been a change if (!Objects.equals(lastStore.getRaftAddress(), store.getRaftAddress()) && storeState == Metapb.StoreState.Up) { @@ -199,6 +338,16 @@ public Metapb.Store register(Metapb.Store store) throws PDException { store.getId(), store.getAddress())); } else if (current - lastStore.getLastHeartbeat() > STORE_HEART_BEAT_INTERVAL * 1.2) { // It is considered that a change has occurred +======== + // 上线状态的Raft Address 发生了变更 + if (!Objects.equals(lastStore.getRaftAddress(), store.getRaftAddress()) && storeState == Metapb.StoreState.Up) { + // 时间间隔太短,而且raft有变更,则认为是无效的store + if (current - lastStore.getLastHeartbeat() < STORE_HEART_BEAT_INTERVAL * 0.8){ + throw new PDException(ErrorType.STORE_PROHIBIT_DUPLICATE_VALUE, + String.format("Store id %d may be duplicate. addr: %s", store.getId(), store.getAddress())); + } else if(current - lastStore.getLastHeartbeat() > STORE_HEART_BEAT_INTERVAL * 1.2 ) { + // 认为发生了变更 +>>>>>>>> d7e3d51dd (3.6.5 -> 4.x diff):hg-pd-core/src/main/java/org/apache/hugegraph/pd/StoreNodeService.java raftChanged = true; } else { // Wait for the next registration @@ -206,13 +355,23 @@ public Metapb.Store register(Metapb.Store store) throws PDException { } } +<<<<<<<< HEAD:hugegraph-pd/hg-pd-core/src/main/java/org/apache/hugegraph/pd/StoreNodeService.java // Store information +======== + // 兼容性处理,如果在初始化列表中,则自动插入storeGroup + if (inInitialStoreList(store) && ! isStoreHasStoreGroup(store.getId())) { + int groupId = this.pdConfig.getInitialStoreGroup(store.getAddress()); + updateStoreGroupRelation(store.getId(), groupId); + } + + // 存储store信息 +>>>>>>>> d7e3d51dd (3.6.5 -> 4.x diff):hg-pd-core/src/main/java/org/apache/hugegraph/pd/StoreNodeService.java storeInfoMeta.updateStore(store); if (storeState == Metapb.StoreState.Up) { // Update the store active status storeInfoMeta.keepStoreAlive(store); onStoreStatusChanged(store, Metapb.StoreState.Offline, Metapb.StoreState.Up); - checkStoreStatus(); + checkStoreStatus(storeInfoMeta.getStoreGroupByStoreId(store.getId())); } // Wait for the store information to be saved before sending the changes @@ -237,7 +396,11 @@ private boolean inInitialStoreList(Metapb.Store store) { */ private synchronized Metapb.Store newStoreNode(Metapb.Store store) throws PDException { long id = random.nextLong() & Long.MAX_VALUE; +<<<<<<<< HEAD:hugegraph-pd/hg-pd-core/src/main/java/org/apache/hugegraph/pd/StoreNodeService.java while (id == 0 || storeInfoMeta.storeExists(id)) { +======== + while( id == 0 || storeInfoMeta.storeExists(id) ) { +>>>>>>>> d7e3d51dd (3.6.5 -> 4.x diff):hg-pd-core/src/main/java/org/apache/hugegraph/pd/StoreNodeService.java id = random.nextLong() & Long.MAX_VALUE; } store = Metapb.Store.newBuilder(store) @@ -257,9 +420,15 @@ private synchronized Metapb.Store newStoreNode(Metapb.Store store) throws PDExce */ public Metapb.Store getStore(long id) throws PDException { Metapb.Store store = storeInfoMeta.getStore(id); +<<<<<<<< HEAD:hugegraph-pd/hg-pd-core/src/main/java/org/apache/hugegraph/pd/StoreNodeService.java if (store == null) { throw new PDException(Pdpb.ErrorType.STORE_ID_NOT_EXIST_VALUE, String.format("Store id %x doest not exist.", id)); +======== + if ( store == null ) { + throw new PDException(ErrorType.STORE_ID_NOT_EXIST_VALUE, + String.format("Store id %x doest not exist.", id)); +>>>>>>>> d7e3d51dd (3.6.5 -> 4.x diff):hg-pd-core/src/main/java/org/apache/hugegraph/pd/StoreNodeService.java } return store; } @@ -268,22 +437,38 @@ public Metapb.Store getStore(long id) throws PDException { * Update the store information, detect the change of store status, and notify Hugestore */ public synchronized Metapb.Store updateStore(Metapb.Store store) throws PDException { +<<<<<<<< HEAD:hugegraph-pd/hg-pd-core/src/main/java/org/apache/hugegraph/pd/StoreNodeService.java log.info("updateStore storeId: {}, address: {}, state: {}", store.getId(), store.getAddress(), store.getState()); +======== + log.info("updateStore storeId: {}, address: {}, state: {}", + store.getId(), store.getAddress(), store.getState()); +>>>>>>>> d7e3d51dd (3.6.5 -> 4.x diff):hg-pd-core/src/main/java/org/apache/hugegraph/pd/StoreNodeService.java Metapb.Store lastStore = storeInfoMeta.getStore(store.getId()); if (lastStore == null) { return null; } +<<<<<<<< HEAD:hugegraph-pd/hg-pd-core/src/main/java/org/apache/hugegraph/pd/StoreNodeService.java Metapb.Store.Builder builder = Metapb.Store.newBuilder(lastStore).clearLabels().clearStats(); +======== + + Metapb.Store.Builder builder = Metapb.Store.newBuilder(lastStore).clearLabels().clearStats(); +>>>>>>>> d7e3d51dd (3.6.5 -> 4.x diff):hg-pd-core/src/main/java/org/apache/hugegraph/pd/StoreNodeService.java store = builder.mergeFrom(store).build(); if (store.getState() == Metapb.StoreState.Tombstone) { - List activeStores = getStores(); + List activeStores = getStores(getStoreInfoMeta().getStoreGroupByStoreId(store.getId())); if (lastStore.getState() == Metapb.StoreState.Up +<<<<<<<< HEAD:hugegraph-pd/hg-pd-core/src/main/java/org/apache/hugegraph/pd/StoreNodeService.java && activeStores.size() - 1 < pdConfig.getMinStoreCount()) { throw new PDException(Pdpb.ErrorType.LESS_ACTIVE_STORE_VALUE, "The number of active stores is less then " + pdConfig.getMinStoreCount()); +======== + && activeStores.size() - 1 < pdConfig.getMinStoreCount()) { + throw new PDException(ErrorType.LESS_ACTIVE_STORE_VALUE, + "The number of active stores is less then " + pdConfig.getMinStoreCount()); +>>>>>>>> d7e3d51dd (3.6.5 -> 4.x diff):hg-pd-core/src/main/java/org/apache/hugegraph/pd/StoreNodeService.java } } @@ -297,6 +482,7 @@ public synchronized Metapb.Store updateStore(Metapb.Store store) throws PDExcept return lastStore; } +<<<<<<<< HEAD:hugegraph-pd/hg-pd-core/src/main/java/org/apache/hugegraph/pd/StoreNodeService.java List activeStores = this.getActiveStores(); Map storeMap = new HashMap<>(); activeStores.forEach(s -> { @@ -305,6 +491,19 @@ public synchronized Metapb.Store updateStore(Metapb.Store store) throws PDExcept // If the store is offline, delete it directly from active, and if the store is // online, temporarily delete it from active, and then delete it when the status // is set to Tombstone +======== +// List activeStores = this.getActiveStores(getStoreInfoMeta().getStoreGroup(store.getId())); +// Map storeMap = new HashMap<>(); +// activeStores.forEach(s -> { +// storeMap.put(s.getId(), s); +// }); + + var storeMap = getActiveStoresByStoreGroup(getStoreInfoMeta().getStoreGroupByStoreId(store.getId())) + .stream() + .collect(Collectors.toMap(Metapb.Store::getId, store1 -> store1)); + + //如果store已经离线,直接从活跃中删除,如果store在线,暂时不从活跃中删除,等把状态置成Tombstone的时候再删除 +>>>>>>>> d7e3d51dd (3.6.5 -> 4.x diff):hg-pd-core/src/main/java/org/apache/hugegraph/pd/StoreNodeService.java if (!storeMap.containsKey(store.getId())) { log.info("updateStore removeActiveStores store {}", store.getId()); storeInfoMeta.removeActiveStore(store); @@ -322,7 +521,7 @@ public synchronized Metapb.Store updateStore(Metapb.Store store) throws PDExcept storeTurnoff(store); } else if (store.getState() == Metapb.StoreState.Up) { storeInfoMeta.keepStoreAlive(store); - checkStoreStatus(); + checkStoreStatus(storeInfoMeta.getStoreGroupByStoreId(store.getId())); } onStoreStatusChanged(lastStore, lastStore.getState(), store.getState()); } @@ -355,12 +554,29 @@ public synchronized void storeTurnoff(Metapb.Store store) throws PDException { * * @throws PDException */ +<<<<<<<< HEAD:hugegraph-pd/hg-pd-core/src/main/java/org/apache/hugegraph/pd/StoreNodeService.java public List getStores() throws PDException { return storeInfoMeta.getStores(null); } public List getStores(String graphName) throws PDException { return storeInfoMeta.getStores(graphName); +======== + public List getStores() throws PDException{ + return storeInfoMeta.getAllStores(); + } + + public List getStores(String graphName) throws PDException { + Metapb.Graph graph = partitionService.getGraph(graphName); + return graph == null ? getStores() : getStoresByStoreGroup(graph.getStoreGroupId()); + } + + public List getStores(int storeGroupId) throws PDException{ + Set set = storeInfoMeta.getStoreIdsByGroup(storeGroupId); + return storeInfoMeta.getAllStores().stream() + .filter(store -> set.contains(store.getId())) + .collect(Collectors.toList()); +>>>>>>>> d7e3d51dd (3.6.5 -> 4.x diff):hg-pd-core/src/main/java/org/apache/hugegraph/pd/StoreNodeService.java } public List getStoreStatus(boolean isActive) throws PDException { @@ -371,6 +587,18 @@ public List getShardGroups() throws PDException { return storeInfoMeta.getShardGroups(); } + public List getShardGroups(int storeGroup) throws PDException { + Set storeIds = storeInfoMeta.getStoreIdsByGroup(storeGroup); + return storeInfoMeta.getShardGroups().stream().filter(shardGroup -> { + for (var shard : shardGroup.getShardsList()) { + if (storeIds.contains(shard.getStoreId())) { + return true; + } + } + return false; + }).collect(Collectors.toList()); + } + public Metapb.ShardGroup getShardGroup(int groupId) throws PDException { return storeInfoMeta.getShardGroup(groupId); } @@ -403,13 +631,31 @@ public List getShardGroupsByStore(long storeId) throws PDExce * @throws PDException */ public List getActiveStores(String graphName) throws PDException { - return storeInfoMeta.getActiveStores(graphName); + // todo: + Metapb.Graph graph = partitionService.getGraph(graphName); + return graph == null ? List.of() : getActiveStoresByStoreGroup(graph.getStoreGroupId()); } public List getActiveStores() throws PDException { return storeInfoMeta.getActiveStores(); } + public List getActiveStoresByStoreGroup(int storeGroupId) throws PDException { + Set ids = storeInfoMeta.getStoreIdsByGroup(storeGroupId); + return storeInfoMeta.getActiveStores() + .stream() + .filter(store -> ids.contains(store.getId())) + .collect(Collectors.toList()); + } + + public List getActiveStoresByPartition(int partitionId) throws PDException { + var shardGroup = getShardGroup(partitionId); + if (shardGroup != null) { + return shardGroup.getShardsList().stream().map(Metapb.Shard::getStoreId).collect(Collectors.toList()); + } + return List.of(); + } + public List getTombStores() throws PDException { List stores = new ArrayList<>(); for (Metapb.Store store : this.getStores()) { @@ -425,11 +671,17 @@ public long removeStore(Long storeId) throws PDException { } /** +<<<<<<<< HEAD:hugegraph-pd/hg-pd-core/src/main/java/org/apache/hugegraph/pd/StoreNodeService.java * todo : New logic * Assign a store to the partition and decide how many peers to allocate according to the * configuration of the graph * After allocating all the shards, save the ShardGroup object (store does not change, only * executes once) +======== + * todo : 新逻辑 + * 给partition分配store,根据图的配置,决定分配几个peer + * 分配完所有的shards,保存ShardGroup对象(store不变动,只执行一次) +>>>>>>>> d7e3d51dd (3.6.5 -> 4.x diff):hg-pd-core/src/main/java/org/apache/hugegraph/pd/StoreNodeService.java */ public synchronized List allocShards(Metapb.Graph graph, int partId) throws PDException { @@ -437,6 +689,7 @@ public synchronized List allocShards(Metapb.Graph graph, int partI // The number of partitions can be set based on the size of the data, but the total // number cannot exceed the number of raft groups if (storeInfoMeta.getShardGroup(partId) == null) { +<<<<<<<< HEAD:hugegraph-pd/hg-pd-core/src/main/java/org/apache/hugegraph/pd/StoreNodeService.java // Get active store key List stores = storeInfoMeta.getActiveStores(); @@ -449,8 +702,24 @@ public synchronized List allocShards(Metapb.Graph graph, int partI throw new PDException(Pdpb.ErrorType.LESS_ACTIVE_STORE_VALUE, "The number of active stores is less then " + pdConfig.getMinStoreCount()); +======== + // 获取活跃的store key + // 根据 partionID计算store + List stores = storeInfoMeta.getActiveStores(graph.getStoreGroupId()); + + if (stores.isEmpty()) { + throw new PDException(ErrorType.NO_ACTIVE_STORE_VALUE, "There is no any online store"); } + var minStoreCount = Math.max(pdConfig.getMinStoreCount(), configService.getPDConfig().getShardCount()); + + if (stores.size() < minStoreCount) { + throw new PDException(ErrorType.LESS_ACTIVE_STORE_VALUE, + "The number of active stores is less then " + minStoreCount); +>>>>>>>> d7e3d51dd (3.6.5 -> 4.x diff):hg-pd-core/src/main/java/org/apache/hugegraph/pd/StoreNodeService.java + } + + // todo: 根据graph 配置的store group,获取 shard count int shardCount = pdConfig.getPartition().getShardCount(); shardCount = Math.min(shardCount, stores.size()); // Two shards could not elect a leader @@ -460,11 +729,20 @@ public synchronized List allocShards(Metapb.Graph graph, int partI shardCount = 1; } +<<<<<<<< HEAD:hugegraph-pd/hg-pd-core/src/main/java/org/apache/hugegraph/pd/StoreNodeService.java // All ShardGroups are created at one time to ensure that the initial groupIDs are // orderly and easy for humans to read for (int groupId = 0; groupId < pdConfig.getConfigService().getPartitionCount(); groupId++) { int storeIdx = groupId % stores.size(); // Assignment rules, simplified to modulo +======== + // todo: 获取partition count by group + var partitionCount = pdConfig.getConfigService().getPartitionCount(graph.getStoreGroupId()); + int baseId = partId / Consts.PARTITION_GAP * Consts.PARTITION_GAP; + // 一次创建完所有的ShardGroup,保证初始的groupID有序,方便人工阅读 + for (int groupId = 0; groupId >>>>>>> d7e3d51dd (3.6.5 -> 4.x diff):hg-pd-core/src/main/java/org/apache/hugegraph/pd/StoreNodeService.java List shards = new ArrayList<>(); for (int i = 0; i < shardCount; i++) { Metapb.Shard shard = @@ -477,22 +755,30 @@ public synchronized List allocShards(Metapb.Graph graph, int partI } Metapb.ShardGroup group = Metapb.ShardGroup.newBuilder() +<<<<<<<< HEAD:hugegraph-pd/hg-pd-core/src/main/java/org/apache/hugegraph/pd/StoreNodeService.java .setId(groupId) .setState( Metapb.PartitionState.PState_Normal) .addAllShards(shards).build(); +======== + .setId(groupId + baseId) + .setState(Metapb.PartitionState.PState_Normal) + .addAllShards(shards).build(); +>>>>>>>> d7e3d51dd (3.6.5 -> 4.x diff):hg-pd-core/src/main/java/org/apache/hugegraph/pd/StoreNodeService.java // new group storeInfoMeta.updateShardGroup(group); - partitionService.updateShardGroupCache(group); - onShardGroupStatusChanged(group, group); - log.info("alloc shard group: id {}", groupId); + updateShardGroupCache(group); + onShardGroupStatusChanged(null, group); + log.info("alloc shard group: id {}", groupId + baseId); } } + return storeInfoMeta.getShardGroup(partId).getShardsList(); } /** +<<<<<<<< HEAD:hugegraph-pd/hg-pd-core/src/main/java/org/apache/hugegraph/pd/StoreNodeService.java * Based on the shard_count of the graph, reallocate shards * Send change shard */ @@ -509,8 +795,28 @@ public synchronized List reallocShards(Metapb.ShardGroup shardGrou throw new PDException(Pdpb.ErrorType.LESS_ACTIVE_STORE_VALUE, "The number of active stores is less then " + pdConfig.getMinStoreCount()); +======== + * todo : 新逻辑 + * 根据graph的shard_count,重新分配shard + * 发送变更change shard指令 + */ + public synchronized List reallocShards(Metapb.ShardGroup shardGroup) throws PDException { + // todo:检查 shard group在哪个store group里面, 以及 shard group 对应的partition count + // todo: store group 内部分组 + int storeGroup = getShardGroupBelongsToStoreGroup(shardGroup); + List stores = storeInfoMeta.getActiveStores(storeGroup); + + if (stores.isEmpty()) { + throw new PDException(ErrorType.NO_ACTIVE_STORE_VALUE, "There is no any online store"); } + if (stores.size() < pdConfig.getMinStoreCount()) { + throw new PDException(ErrorType.LESS_ACTIVE_STORE_VALUE, + "The number of active stores is less then " + pdConfig.getMinStoreCount()); +>>>>>>>> d7e3d51dd (3.6.5 -> 4.x diff):hg-pd-core/src/main/java/org/apache/hugegraph/pd/StoreNodeService.java + } + + // todo: check partition count by store group int shardCount = pdConfig.getPartition().getShardCount(); shardCount = Math.min(shardCount, stores.size()); if (shardCount == 2 || shardCount < 1) { @@ -519,8 +825,7 @@ public synchronized List reallocShards(Metapb.ShardGroup shardGrou shardCount = 1; } - List shards = new ArrayList<>(); - shards.addAll(shardGroup.getShardsList()); + List shards = new ArrayList<>(shardGroup.getShardsList()); if (shardCount > shards.size()) { // Need to add shards @@ -559,12 +864,12 @@ public synchronized List reallocShards(Metapb.ShardGroup shardGrou .clearShards() .addAllShards(shards).build(); storeInfoMeta.updateShardGroup(group); - partitionService.updateShardGroupCache(group); + updateShardGroupCache(group); // change shard group - onShardGroupStatusChanged(shardGroup, group); + // onShardGroupStatusChanged(shardGroup, group); var partitions = partitionService.getPartitionById(shardGroup.getId()); - if (partitions.size() > 0) { + if (!partitions.isEmpty()) { // send one message, change shard is regardless with partition/graph partitionService.fireChangeShard(partitions.get(0), shards, ConfChangeType.CONF_CHANGE_TYPE_ADJUST); @@ -580,6 +885,7 @@ public synchronized List reallocShards(Metapb.ShardGroup shardGrou * @param groups list of (partition id, count) * @return total groups */ +<<<<<<<< HEAD:hugegraph-pd/hg-pd-core/src/main/java/org/apache/hugegraph/pd/StoreNodeService.java public synchronized int splitShardGroups(List> groups) throws PDException { int sum = groups.stream().map(pair -> pair.getValue()).reduce(0, Integer::sum); @@ -587,24 +893,57 @@ public synchronized int splitShardGroups(List> groups) if (sum > getActiveStores().size() * pdConfig.getPartition().getMaxShardsPerStore()) { throw new PDException(Pdpb.ErrorType.Too_Many_Partitions_Per_Store_VALUE, "can't satisfy target shard group count"); +======== + public synchronized int splitShardGroups(List> groups) throws PDException { + // 1. 检查所有的groups是否属于一个 store group + Set storeGroups = new HashSet<>(); + + for (var group : groups) { + var shardGroup = storeInfoMeta.getShardGroup(group.getKey()); + if (shardGroup == null){ + throw new PDException(ErrorType.SHARD_GROUPS_NOT_EXISTS); + } + storeGroups.add(getShardGroupBelongsToStoreGroup(shardGroup)); + } + assert storeGroups.size() == 1; + + int storeGroup = storeGroups.iterator().next(); + int sum = groups.stream().map(KVPair::getValue).reduce(0, Integer::sum); + + // 2. 检查split后的count, 增加的 + 原有的 + int newCount = (sum - groups.size()) + getShardGroups(storeGroup).size(); + + // shard group 太大 + if (newCount > getActiveStoresByStoreGroup(storeGroup).size() * pdConfig.getPartition().getMaxShardsPerStore()){ + throw new PDException(ErrorType.Too_Many_Partitions_Per_Store); +>>>>>>>> d7e3d51dd (3.6.5 -> 4.x diff):hg-pd-core/src/main/java/org/apache/hugegraph/pd/StoreNodeService.java } partitionService.splitPartition(groups); - return sum; + return newCount; } /** +<<<<<<<< HEAD:hugegraph-pd/hg-pd-core/src/main/java/org/apache/hugegraph/pd/StoreNodeService.java * Alloc shard group, prepare for the split * +======== + * 分配shard group,为分裂做准备 +>>>>>>>> d7e3d51dd (3.6.5 -> 4.x diff):hg-pd-core/src/main/java/org/apache/hugegraph/pd/StoreNodeService.java * @param * @return true * @throws PDException */ private boolean isStoreInShards(List shards, long storeId) { AtomicBoolean exist = new AtomicBoolean(false); +<<<<<<<< HEAD:hugegraph-pd/hg-pd-core/src/main/java/org/apache/hugegraph/pd/StoreNodeService.java shards.forEach(s -> { if (s.getStoreId() == storeId) { +======== + shards.forEach(s->{ + if (s.getStoreId() == storeId ) { +>>>>>>>> d7e3d51dd (3.6.5 -> 4.x diff):hg-pd-core/src/main/java/org/apache/hugegraph/pd/StoreNodeService.java exist.set(true); } }); @@ -622,8 +961,12 @@ private boolean isStoreInShards(List shards, long storeId) { * @return */ public synchronized Metapb.ShardGroup updateShardGroup(int groupId, List shards, +<<<<<<<< HEAD:hugegraph-pd/hg-pd-core/src/main/java/org/apache/hugegraph/pd/StoreNodeService.java long version, long confVersion) throws PDException { +======== + long version, long confVersion) throws PDException { +>>>>>>>> d7e3d51dd (3.6.5 -> 4.x diff):hg-pd-core/src/main/java/org/apache/hugegraph/pd/StoreNodeService.java Metapb.ShardGroup group = this.storeInfoMeta.getShardGroup(groupId); if (group == null) { @@ -631,6 +974,7 @@ public synchronized Metapb.ShardGroup updateShardGroup(int groupId, List= 0) { builder.setVersion(version); } @@ -645,6 +989,22 @@ public synchronized Metapb.ShardGroup updateShardGroup(int groupId, List= 0){ + builder.setVersion(version); + } + + if (confVersion >= 0){ + builder.setConfVer(confVersion); + } + + var newGroup = builder.clearShards() .addAllShards(shards) .build(); + + storeInfoMeta.updateShardGroup(newGroup); + updateShardGroupCache(newGroup); + onShardGroupStatusChanged(group, newGroup); + // log.info("Raft {} updateShardGroup {}", groupId, newGroup); +>>>>>>>> d7e3d51dd (3.6.5 -> 4.x diff):hg-pd-core/src/main/java/org/apache/hugegraph/pd/StoreNodeService.java return group; } @@ -663,7 +1023,7 @@ public void shardGroupOp(int groupId, List shards) throws PDExcept } var newGroup = shardGroup.toBuilder().clearShards().addAllShards(shards).build(); - if (shards.size() == 0) { + if (shards.isEmpty()) { var partitions = partitionService.getPartitionById(groupId); for (var partition : partitions) { partitionService.removePartition(partition.getGraphName(), groupId); @@ -681,12 +1041,15 @@ public void shardGroupOp(int groupId, List shards) throws PDExcept */ public synchronized void deleteShardGroup(int groupId) throws PDException { Metapb.ShardGroup group = this.storeInfoMeta.getShardGroup(groupId); + int storeGroupId = getShardGroupBelongsToStoreGroup(group); + if (group != null) { storeInfoMeta.deleteShardGroup(groupId); } onShardGroupStatusChanged(group, null); +<<<<<<<< HEAD:hugegraph-pd/hg-pd-core/src/main/java/org/apache/hugegraph/pd/StoreNodeService.java // Fix the number of partitions for the store. (Result from partition merge) var shardGroups = getShardGroups(); if (shardGroups != null) { @@ -695,10 +1058,33 @@ public synchronized void deleteShardGroup(int groupId) throws PDException { getShardGroups().stream().map(Metapb.ShardGroup::getId).max(Integer::compareTo); if (maxGroupId.get() < count1) { pdConfig.getConfigService().setPartitionCount(maxGroupId.get() + 1); +======== + // 修正store的分区数. (分区合并导致) + var shardGroups = getShardGroups(storeGroupId); + if (shardGroups != null) { + var count1 = pdConfig.getConfigService().getPartitionCount(storeGroupId); + var maxGroupId = getShardGroups(storeGroupId) + .stream().map(Metapb.ShardGroup::getId) + .max(Integer::compareTo); + // 考虑分组的影响 + var groupId2 = maxGroupId.get() % Consts.PARTITION_GAP; + if (groupId2 < count1) { + configService.setPartitionCount(storeGroupId, groupId2 + 1); +>>>>>>>> d7e3d51dd (3.6.5 -> 4.x diff):hg-pd-core/src/main/java/org/apache/hugegraph/pd/StoreNodeService.java } } + + // 没有数据的partition,可能上报不上来 + var partitions = partitionService.getPartitions().stream() + .filter(partition -> partition.getId() == groupId) + .collect(Collectors.toList()); + + for (var partition : partitions) { + partitionService.removePartition(partition.getGraphName(), groupId); + } } +<<<<<<<< HEAD:hugegraph-pd/hg-pd-core/src/main/java/org/apache/hugegraph/pd/StoreNodeService.java public synchronized void updateShardGroupState(int groupId, Metapb.PartitionState state) throws PDException { Metapb.ShardGroup shardGroup = storeInfoMeta.getShardGroup(groupId) @@ -706,6 +1092,32 @@ public synchronized void updateShardGroupState(int groupId, Metapb.PartitionStat .setState(state).build(); storeInfoMeta.updateShardGroup(shardGroup); partitionService.updateShardGroupCache(shardGroup); +======== + // todo : update cluster state + public synchronized void updateShardGroupState(int groupId, Metapb.PartitionState state) throws PDException { + Metapb.ShardGroup shardGroup = storeInfoMeta.getShardGroup(groupId); + + if (state != shardGroup.getState()) { + var newShardGroup = shardGroup.toBuilder().setState(state).build(); + storeInfoMeta.updateShardGroup(newShardGroup); + + updateShardGroupCache(newShardGroup); + + log.debug("update shard group {} state: {}", groupId, state); + } + + // 检查集群的状态 + // todo : 更明确的集群状态定义 + Metapb.PartitionState clusterState = Metapb.PartitionState.PState_None; + for(Metapb.ShardGroup group : getShardGroups()){ + if (group.getState().getNumber() > state.getNumber()) { + clusterState = group.getState(); + } + } + + var storeGroupId = getShardGroupBelongsToStoreGroup(shardGroup); + updateClusterStatus(storeGroupId, clusterState); +>>>>>>>> d7e3d51dd (3.6.5 -> 4.x diff):hg-pd-core/src/main/java/org/apache/hugegraph/pd/StoreNodeService.java } /** @@ -717,6 +1129,7 @@ public synchronized void updateShardGroupState(int groupId, Metapb.PartitionStat public Metapb.ClusterStats heartBeat(Metapb.StoreStats storeStats) throws PDException { this.storeInfoMeta.updateStoreStats(storeStats); Metapb.Store lastStore = this.getStore(storeStats.getStoreId()); +<<<<<<<< HEAD:hugegraph-pd/hg-pd-core/src/main/java/org/apache/hugegraph/pd/StoreNodeService.java if (lastStore == null) { // store does not exist throw new PDException(Pdpb.ErrorType.STORE_ID_NOT_EXIST_VALUE, @@ -742,30 +1155,50 @@ public Metapb.ClusterStats heartBeat(Metapb.StoreStats storeStats) throws PDExce // progress and you need to wait if (storeStats.getPartitionCount() > 0 && storeMap.containsKey(storeStats.getStoreId())) { +======== + if (lastStore == null){ + //store不存在 + throw new PDException(ErrorType.STORE_ID_NOT_EXIST_VALUE, + String.format("Store id %d does not exist.", storeStats.getStoreId())); + } + if (lastStore.getState() == Metapb.StoreState.Tombstone){ + throw new PDException(ErrorType.STORE_HAS_BEEN_REMOVED_VALUE, + String.format("Store id %d is useless since it's state is Tombstone", + storeStats.getStoreId())); + } + Metapb.Store nowStore; + // 如果正在做store下线操作 + if (lastStore.getState() == Metapb.StoreState.Exiting){ + var storeMap = this.getActiveStoresByStoreGroup(storeInfoMeta.getStoreGroupByStoreId(lastStore.getId())) + .stream().collect(Collectors.toMap(Metapb.Store::getId, store -> store)); + + // 下线的store的分区为0,说明已经迁移完毕,可以下线,如果非0,则迁移还在进行,需要等待 + if (storeStats.getPartitionCount() > 0 && storeMap.containsKey(storeStats.getStoreId())){ +>>>>>>>> d7e3d51dd (3.6.5 -> 4.x diff):hg-pd-core/src/main/java/org/apache/hugegraph/pd/StoreNodeService.java nowStore = Metapb.Store.newBuilder(lastStore) .setStats(storeStats) .setLastHeartbeat(System.currentTimeMillis()) .setState(Metapb.StoreState.Exiting).build(); this.storeInfoMeta.updateStore(nowStore); +<<<<<<<< HEAD:hugegraph-pd/hg-pd-core/src/main/java/org/apache/hugegraph/pd/StoreNodeService.java return this.clusterStats; } else { +======== + }else { +>>>>>>>> d7e3d51dd (3.6.5 -> 4.x diff):hg-pd-core/src/main/java/org/apache/hugegraph/pd/StoreNodeService.java nowStore = Metapb.Store.newBuilder(lastStore) .setStats(storeStats) .setLastHeartbeat(System.currentTimeMillis()) .setState(Metapb.StoreState.Tombstone).build(); this.storeInfoMeta.updateStore(nowStore); storeInfoMeta.removeActiveStore(nowStore); - return this.clusterStats; } - } - - if (lastStore.getState() == Metapb.StoreState.Pending) { + } else if (lastStore.getState() == Metapb.StoreState.Pending) { nowStore = Metapb.Store.newBuilder(lastStore) .setStats(storeStats) .setLastHeartbeat(System.currentTimeMillis()) .setState(Metapb.StoreState.Pending).build(); this.storeInfoMeta.updateStore(nowStore); - return this.clusterStats; } else { if (lastStore.getState() == Metapb.StoreState.Offline) { this.updateStore( @@ -777,17 +1210,39 @@ public Metapb.ClusterStats heartBeat(Metapb.StoreStats storeStats) throws PDExce .setLastHeartbeat(System.currentTimeMillis()).build(); this.storeInfoMeta.updateStore(nowStore); this.storeInfoMeta.keepStoreAlive(nowStore); - this.checkStoreStatus(); - return this.clusterStats; + this.checkStoreStatus(storeInfoMeta.getStoreGroupByStoreId(lastStore.getId())); } + + return this.clusterStats.get(getStoreGroupByStore(lastStore.getId())); } +<<<<<<<< HEAD:hugegraph-pd/hg-pd-core/src/main/java/org/apache/hugegraph/pd/StoreNodeService.java public synchronized Metapb.ClusterStats updateClusterStatus(Metapb.ClusterState state) { this.clusterStats = clusterStats.toBuilder().setState(state).build(); return this.clusterStats; } public Metapb.ClusterStats updateClusterStatus(Metapb.PartitionState state) { +======== + + public synchronized Metapb.ClusterStats updateClusterStatus(int storeGroupId, Metapb.ClusterState state) + throws PDException { + var stats = this.clusterStats.get(storeGroupId); + if (stats == null) { + var storeGroup = configService.getStoreGroup(storeGroupId); + if (storeGroup != null) { + this.clusterStats.put(storeGroupId, Metapb.ClusterStats.newBuilder().setState(state).build()); + } else { + throw new PDException(ErrorType.NOT_FOUND.getNumber(), "store group not exists"); + } + } else if (stats != null && stats.getState() != state) { + this.clusterStats.put(storeGroupId, stats.toBuilder().setState(state).build()); + } + return this.clusterStats.get(storeGroupId); + } + + public Metapb.ClusterStats updateClusterStatus(int storeGroupId, Metapb.PartitionState state) throws PDException { +>>>>>>>> d7e3d51dd (3.6.5 -> 4.x diff):hg-pd-core/src/main/java/org/apache/hugegraph/pd/StoreNodeService.java Metapb.ClusterState cstate = Metapb.ClusterState.Cluster_OK; switch (state) { case PState_Normal: @@ -802,12 +1257,28 @@ public Metapb.ClusterStats updateClusterStatus(Metapb.PartitionState state) { case PState_Offline: cstate = Metapb.ClusterState.Cluster_Offline; break; + default: + cstate = Metapb.ClusterState.Cluster_Not_Ready; } - return updateClusterStatus(cstate); + return updateClusterStatus(storeGroupId, cstate); } +<<<<<<<< HEAD:hugegraph-pd/hg-pd-core/src/main/java/org/apache/hugegraph/pd/StoreNodeService.java public Metapb.ClusterStats getClusterStats() { return this.clusterStats; +======== + public Metapb.ClusterStats getClusterStats(int storeGroup) { + return this.clusterStats.getOrDefault(storeGroup, statsNotReady); + } + + public Metapb.ClusterStats getClusterStats(long storeId) throws PDException { + return this.clusterStats.getOrDefault(storeInfoMeta.getStoreGroupByStoreId(storeId), statsNotReady); + } + + public Map getAllClusterStats() { + return this.clusterStats.entrySet().stream() + .collect(Collectors.toMap(Map.Entry::getKey, entry -> entry.getValue().getState())); +>>>>>>>> d7e3d51dd (3.6.5 -> 4.x diff):hg-pd-core/src/main/java/org/apache/hugegraph/pd/StoreNodeService.java } /** @@ -815,12 +1286,12 @@ public Metapb.ClusterStats getClusterStats() { * Whether the number of active machines is greater than the minimum threshold * The number of partition shards online has exceeded half */ - public synchronized void checkStoreStatus() { + public synchronized void checkStoreStatus(int storeGroup) { Metapb.ClusterStats.Builder builder = Metapb.ClusterStats.newBuilder() .setState( Metapb.ClusterState.Cluster_OK); try { - List activeStores = this.getActiveStores(); + List activeStores = this.getActiveStoresByStoreGroup(storeGroup); if (activeStores.size() < pdConfig.getMinStoreCount()) { builder.setState(Metapb.ClusterState.Cluster_Not_Ready); builder.setMessage("The number of active stores is " + activeStores.size() @@ -833,8 +1304,13 @@ public synchronized void checkStoreStatus() { }); if (builder.getState() == Metapb.ClusterState.Cluster_OK) { +<<<<<<<< HEAD:hugegraph-pd/hg-pd-core/src/main/java/org/apache/hugegraph/pd/StoreNodeService.java // Check whether the number of online shards for each partition is greater than half for (Metapb.ShardGroup group : this.getShardGroups()) { +======== + // 检查每个分区的在线shard数量是否大于半数 + for (Metapb.ShardGroup group : this.getShardGroups(storeGroup)) { +>>>>>>>> d7e3d51dd (3.6.5 -> 4.x diff):hg-pd-core/src/main/java/org/apache/hugegraph/pd/StoreNodeService.java int count = 0; for (Metapb.Shard shard : group.getShardsList()) { count += storeMap.containsKey(shard.getStoreId()) ? 1 : 0; @@ -849,10 +1325,17 @@ public synchronized void checkStoreStatus() { } } catch (PDException e) { - log.error("StoreNodeService updateClusterStatus exception {}", e); + log.error("StoreNodeService updateClusterStatus exception", e); } +<<<<<<<< HEAD:hugegraph-pd/hg-pd-core/src/main/java/org/apache/hugegraph/pd/StoreNodeService.java this.clusterStats = builder.setTimestamp(System.currentTimeMillis()).build(); if (this.clusterStats.getState() != Metapb.ClusterState.Cluster_OK) { +======== + + this.clusterStats.put(storeGroup, builder.setTimestamp(System.currentTimeMillis()).build()); + + if (this.clusterStats.get(storeGroup).getState() != Metapb.ClusterState.Cluster_OK) { +>>>>>>>> d7e3d51dd (3.6.5 -> 4.x diff):hg-pd-core/src/main/java/org/apache/hugegraph/pd/StoreNodeService.java log.error("The cluster is not ready, {}", this.clusterStats); } } @@ -862,8 +1345,12 @@ public void addStatusListener(StoreStatusListener listener) { } protected void onStoreRaftAddressChanged(Metapb.Store store) { +<<<<<<<< HEAD:hugegraph-pd/hg-pd-core/src/main/java/org/apache/hugegraph/pd/StoreNodeService.java log.info("onStoreRaftAddressChanged storeId = {}, new raft addr:", store.getId(), store.getRaftAddress()); +======== + log.info("onStoreRaftAddressChanged storeId = {}, new raft address: {}", store.getId(), store.getRaftAddress()); +>>>>>>>> d7e3d51dd (3.6.5 -> 4.x diff):hg-pd-core/src/main/java/org/apache/hugegraph/pd/StoreNodeService.java statusListeners.forEach(e -> { e.onStoreRaftChanged(store); }); @@ -881,10 +1368,21 @@ protected void onStoreStatusChanged(Metapb.Store store, Metapb.StoreState old, }); } +<<<<<<<< HEAD:hugegraph-pd/hg-pd-core/src/main/java/org/apache/hugegraph/pd/StoreNodeService.java protected void onShardGroupStatusChanged(Metapb.ShardGroup group, Metapb.ShardGroup newGroup) { log.info("onShardGroupStatusChanged, groupId: {}, from {} to {}", group.getId(), group, newGroup); shardGroupStatusListeners.forEach(e -> e.onShardListChanged(group, newGroup)); +======== + protected void onShardGroupStatusChanged(Metapb.ShardGroup group, Metapb.ShardGroup newGroup){ + if (group == null && newGroup == null) { + return; + } + + var id = group == null ? newGroup.getId() : group.getId(); + log.info("onShardGroupStatusChanged, groupId: {}, from {} to {}", id, group, newGroup); + shardGroupStatusListeners.forEach( e -> e.onShardListChanged(group, newGroup)); +>>>>>>>> d7e3d51dd (3.6.5 -> 4.x diff):hg-pd-core/src/main/java/org/apache/hugegraph/pd/StoreNodeService.java } protected void onShardGroupOp(Metapb.ShardGroup shardGroup) { @@ -900,21 +1398,20 @@ protected void onShardGroupOp(Metapb.ShardGroup shardGroup) { */ public boolean checkStoreCanOffline(Metapb.Store currentStore) { try { - long currentStoreId = currentStore.getId(); - List activeStores = this.getActiveStores(); - Map storeMap = new HashMap<>(); - activeStores.forEach(store -> { - if (store.getId() != currentStoreId) { - storeMap.put(store.getId(), store); - } - }); + Map storeMap = getActiveStoresByStoreGroup(getStoreGroupByStore(currentStore.getId())) + .stream().collect(Collectors.toMap(Metapb.Store::getId, store -> store)); if (storeMap.size() < pdConfig.getMinStoreCount()) { return false; } +<<<<<<<< HEAD:hugegraph-pd/hg-pd-core/src/main/java/org/apache/hugegraph/pd/StoreNodeService.java // Check whether the number of online shards for each partition is greater than half for (Metapb.ShardGroup group : this.getShardGroups()) { +======== + // 检查每个分区的在线shard数量是否大于半数 + for (Metapb.ShardGroup group : this.getShardGroups(getStoreGroupByStore(currentStore.getId()))) { +>>>>>>>> d7e3d51dd (3.6.5 -> 4.x diff):hg-pd-core/src/main/java/org/apache/hugegraph/pd/StoreNodeService.java int count = 0; for (Metapb.Shard shard : group.getShardsList()) { long storeId = shard.getStoreId(); @@ -925,7 +1422,7 @@ public boolean checkStoreCanOffline(Metapb.Store currentStore) { } } } catch (PDException e) { - log.error("StoreNodeService checkStoreCanOffline exception {}", e); + log.error("StoreNodeService checkStoreCanOffline exception ", e); return false; } @@ -954,7 +1451,7 @@ public Map getQuota() throws PDException { for (Metapb.Graph g : graphs) { String graphName = g.getGraphName(); String[] splits = graphName.split(delimiter); - if (!graphName.endsWith("/g") || splits.length < 2) { + if (splits.length < 2) { continue; } String graphSpace = splits[0]; @@ -1011,7 +1508,7 @@ public Map getQuota() throws PDException { for (Metapb.Graph g : graphs) { String graphName = g.getGraphName(); String[] splits = graphName.split(delimiter); - if (!graphName.endsWith("/g") || splits.length < 2) { + if (splits.length < 2) { continue; } String graphSpace = splits[0]; @@ -1037,6 +1534,7 @@ public Map getQuota() throws PDException { return limits; } +<<<<<<<< HEAD:hugegraph-pd/hg-pd-core/src/main/java/org/apache/hugegraph/pd/StoreNodeService.java public Runnable getQuotaChecker() { return quotaChecker; } @@ -1048,6 +1546,19 @@ public TaskInfoMeta getTaskInfoMeta() { public StoreInfoMeta getStoreInfoMeta() { return storeInfoMeta; } +======== + + @Getter + private Runnable quotaChecker = () -> { + try { + getQuota(); + } catch (Exception e) { + log.error( + "obtaining and sending graph space quota information with error: ", + e); + } + }; +>>>>>>>> d7e3d51dd (3.6.5 -> 4.x diff):hg-pd-core/src/main/java/org/apache/hugegraph/pd/StoreNodeService.java /** * Get the leader of the partition @@ -1060,6 +1571,7 @@ public Metapb.Shard getLeader(Metapb.Partition partition, int initIdx) { Metapb.Shard leader = null; try { var shardGroup = this.getShardGroup(partition.getId()); +<<<<<<<< HEAD:hugegraph-pd/hg-pd-core/src/main/java/org/apache/hugegraph/pd/StoreNodeService.java for (Metapb.Shard shard : shardGroup.getShardsList()) { if (shard.getRole() == Metapb.ShardRole.Leader) { leader = shard; @@ -1068,10 +1580,31 @@ public Metapb.Shard getLeader(Metapb.Partition partition, int initIdx) { } catch (Exception e) { log.error("get leader error: group id:{}, error: {}", partition.getId(), e.getMessage()); +======== + if (shardGroup != null) { + for (Metapb.Shard shard : shardGroup.getShardsList()) { + if (shard.getRole() == Metapb.ShardRole.Leader) { + leader = shard; + } + } + } + }catch (Exception e){ + log.error("get leader error: group id:{}, error:", partition.getId(), e); +>>>>>>>> d7e3d51dd (3.6.5 -> 4.x diff):hg-pd-core/src/main/java/org/apache/hugegraph/pd/StoreNodeService.java } return leader; } +<<<<<<<< HEAD:hugegraph-pd/hg-pd-core/src/main/java/org/apache/hugegraph/pd/StoreNodeService.java +======== + private void updateShardGroupCache(Metapb.ShardGroup group) { + if (group == null || group.getShardsList().isEmpty()) { + return; + } + partitionService.updateShardGroupCache(group); + } + +>>>>>>>> d7e3d51dd (3.6.5 -> 4.x diff):hg-pd-core/src/main/java/org/apache/hugegraph/pd/StoreNodeService.java public CacheResponse getCache() throws PDException { List stores = getStores(); @@ -1083,4 +1616,53 @@ public CacheResponse getCache() throws PDException { .build(); return cache; } +<<<<<<<< HEAD:hugegraph-pd/hg-pd-core/src/main/java/org/apache/hugegraph/pd/StoreNodeService.java +======== + + public int getStoreGroupByStore(Metapb.Store store ) throws PDException { + return getStoreGroupByStore(store.getId()); + } + + /** + * get the store group id by the store id + * @param storeId store id + * @return group id + * @throws PDException + */ + public int getStoreGroupByStore(Long storeId) throws PDException { + return storeInfoMeta.getStoreGroupByStoreId(storeId); + } + + public boolean isStoreHasStoreGroup(long storeId) throws PDException { + return storeInfoMeta.isStoreHasGroup(storeId); + } + + public List getStoresByStoreGroup(int storeGroupId) throws PDException { + Set storeIds = storeInfoMeta.getStoreIdsByGroup(storeGroupId); + return getStores().stream().filter(store -> storeIds.contains(store.getId())).collect(Collectors.toList()); + } + + /** + * need check the store group id is exist && the store has no partition + * + * @param storeId store id + * @param storeGroupId group id + * @throws PDException + */ + public void updateStoreGroupRelation(long storeId, int storeGroupId) throws PDException { + var storeGroup = configService.getStoreGroup(storeGroupId); + if (storeGroup != null) { + storeInfoMeta.updateStoreGroup(storeId, storeGroupId); + } else { + throw new PDException(-1, "store group not found"); + } + } + + public int getShardGroupBelongsToStoreGroup(Metapb.ShardGroup group) throws PDException { + if (group == null || group.getShardsList().isEmpty()) { + return 0; + } + return getStoreGroupByStore(group.getShardsList().get(0).getStoreId()); + } +>>>>>>>> d7e3d51dd (3.6.5 -> 4.x diff):hg-pd-core/src/main/java/org/apache/hugegraph/pd/StoreNodeService.java } diff --git a/hugegraph-pd/hg-pd-core/src/main/java/org/apache/hugegraph/pd/TaskScheduleService.java b/hugegraph-pd/hg-pd-core/src/main/java/org/apache/hugegraph/pd/TaskScheduleService.java index 9e933a6368..0df75a7047 100644 --- a/hugegraph-pd/hg-pd-core/src/main/java/org/apache/hugegraph/pd/TaskScheduleService.java +++ b/hugegraph-pd/hg-pd-core/src/main/java/org/apache/hugegraph/pd/TaskScheduleService.java @@ -1,3 +1,4 @@ +<<<<<<<< HEAD:hugegraph-pd/hg-pd-core/src/main/java/org/apache/hugegraph/pd/TaskScheduleService.java /* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with @@ -15,6 +16,8 @@ * limitations under the License. */ +======== +>>>>>>>> d7e3d51dd (3.6.5 -> 4.x diff):hg-pd-core/src/main/java/org/apache/hugegraph/pd/TaskScheduleService.java package org.apache.hugegraph.pd; import java.util.ArrayList; @@ -35,13 +38,21 @@ import org.apache.hugegraph.pd.common.KVPair; import org.apache.hugegraph.pd.common.PDException; import org.apache.hugegraph.pd.config.PDConfig; +<<<<<<<< HEAD:hugegraph-pd/hg-pd-core/src/main/java/org/apache/hugegraph/pd/TaskScheduleService.java import org.apache.hugegraph.pd.grpc.MetaTask; import org.apache.hugegraph.pd.grpc.Metapb; import org.apache.hugegraph.pd.grpc.Pdpb; +======== +import org.apache.hugegraph.pd.grpc.ClusterOp; +import org.apache.hugegraph.pd.grpc.MetaTask; +import org.apache.hugegraph.pd.grpc.Metapb; +import org.apache.hugegraph.pd.grpc.common.ErrorType; +>>>>>>>> d7e3d51dd (3.6.5 -> 4.x diff):hg-pd-core/src/main/java/org/apache/hugegraph/pd/TaskScheduleService.java import org.apache.hugegraph.pd.meta.TaskInfoMeta; import org.apache.hugegraph.pd.raft.RaftEngine; import lombok.extern.slf4j.Slf4j; +import org.apache.hugegraph.pd.common.Consts; /** * The task scheduling service checks the status of stores, resources, and partitions on a @@ -53,6 +64,7 @@ */ @Slf4j public class TaskScheduleService { +<<<<<<<< HEAD:hugegraph-pd/hg-pd-core/src/main/java/org/apache/hugegraph/pd/TaskScheduleService.java private static final String BALANCE_SHARD_KEY = "BALANCE_SHARD_KEY"; // The dynamic balancing can only be carried out after the machine is offline for 30 minutes @@ -82,9 +94,49 @@ public class TaskScheduleService { }; private long lastStoreTurnoffTime = 0; private long lastBalanceLeaderTime = 0; +======== + private static final String KEY_ENABLE_AUTO_BALANCE = "key/ENABLE_AUTO_BALANCE"; + private final long TurnOffAndBalanceInterval = 30 * 60 * 1000; //机器下线30后才能进行动态平衡 + + private final long BalanceLeaderInterval = 30 * 1000; // leader平衡时间间隔 + private final PDConfig pdConfig; + private StoreNodeService storeService; + private PartitionService partitionService; + private ScheduledExecutorService executor; + private TaskInfoMeta taskInfoMeta; + private StoreMonitorDataService storeMonitorDataService; + private KvService kvService; + private LogService logService; + private ConfigService configService; + private long lastStoreTurnoffTime = 0; + private long lastBalanceLeaderTime = 0; + private final long clusterStartTime; + + /** + * 按照value的排序,相同的按照key排序 + * @param + * @param + */ + private static class KvPairComparator, V extends Comparable> + implements Comparator> { + private boolean ascend; + + public KvPairComparator(boolean ascend) { + this.ascend = ascend; + } + + @Override + public int compare(KVPair o1, KVPair o2) { + if (Objects.equals(o1.getValue(), o2.getValue())) { + return o1.getKey().compareTo(o2.getKey()) * (ascend ? 1 : -1); + } + return (o1.getValue().compareTo(o2.getValue())) * (ascend ? 1 : -1); + } + } +>>>>>>>> d7e3d51dd (3.6.5 -> 4.x diff):hg-pd-core/src/main/java/org/apache/hugegraph/pd/TaskScheduleService.java public TaskScheduleService(PDConfig config, StoreNodeService storeService, - PartitionService partitionService) { + PartitionService partitionService, ConfigService configService) { this.pdConfig = config; this.storeService = storeService; this.partitionService = partitionService; @@ -94,38 +146,54 @@ public TaskScheduleService(PDConfig config, StoreNodeService storeService, this.clusterStartTime = System.currentTimeMillis(); this.kvService = new KvService(pdConfig); this.executor = new ScheduledThreadPoolExecutor(16); +<<<<<<<< HEAD:hugegraph-pd/hg-pd-core/src/main/java/org/apache/hugegraph/pd/TaskScheduleService.java +======== + this.configService = configService; +>>>>>>>> d7e3d51dd (3.6.5 -> 4.x diff):hg-pd-core/src/main/java/org/apache/hugegraph/pd/TaskScheduleService.java } + /** + * * 初始化方法,用于启动定时任务 + */ public void init() { executor.scheduleWithFixedDelay(() -> { try { + // if (! kvService.get(KEY_ENABLE_AUTO_BALANCE).isEmpty()) { patrolStores(); + // } } catch (Throwable e) { log.error("patrolStores exception: ", e); } }, 60, 60, TimeUnit.SECONDS); - executor.scheduleWithFixedDelay(() -> { - try { - patrolPartitions(); - balancePartitionLeader(false); - balancePartitionShard(); - } catch (Throwable e) { - log.error("patrolPartitions exception: ", e); - } - }, pdConfig.getPatrolInterval(), pdConfig.getPatrolInterval(), TimeUnit.SECONDS); +// executor.scheduleWithFixedDelay(() -> { +// try { +// if (! kvService.get(KEY_ENABLE_AUTO_BALANCE).isEmpty()) { +// patrolPartitions(); +// balancePartitionLeader(false); +// balancePartitionShard(); +// } +// } catch (Throwable e) { +// log.error("patrolPartitions exception: ", e); +// } +// }, pdConfig.getPatrolInterval(), pdConfig.getPatrolInterval(), TimeUnit.SECONDS); executor.scheduleWithFixedDelay(() -> { if (isLeader()) { kvService.clearTTLData(); } }, 1000, 1000, TimeUnit.MILLISECONDS); + executor.scheduleWithFixedDelay( () -> { - if (isLeader()) { - storeService.getQuotaChecker(); + try { + if (isLeader()) { + storeService.getQuota(); + } + } catch (Exception e) { + log.warn("get quota with error:", e); } - }, 2, 30, - TimeUnit.SECONDS); + }, 2, 30, TimeUnit.SECONDS); + // clean expired monitor data each 10 minutes, delay 3min. if (isLeader() && this.pdConfig.getStore().isMonitorDataEnabled()) { executor.scheduleAtFixedRate(() -> { @@ -148,6 +216,7 @@ public void init() { }, 180, 600, TimeUnit.SECONDS); } +<<<<<<<< HEAD:hugegraph-pd/hg-pd-core/src/main/java/org/apache/hugegraph/pd/TaskScheduleService.java storeService.addStatusListener(new StoreStatusListener() { @Override public void onStoreStatusChanged(Metapb.Store store, Metapb.StoreState old, @@ -180,16 +249,55 @@ public void onStoreRaftChanged(Metapb.Store store) { } }); +======== +// storeService.addStatusListener(new StoreStatusListener() { +// @Override +// public void onStoreStatusChanged(Metapb.Store store, Metapb.StoreState old, Metapb.StoreState status) { +// if ( status == Metapb.StoreState.Tombstone ) { +// lastStoreTurnoffTime = System.currentTimeMillis(); +// } + +// if ( status == Metapb.StoreState.Up) { +// executor.schedule(()->{ +// try { //store 上线后延时1分钟进行leader平衡 +// balancePartitionLeader(false); +// } catch (PDException e) { +// log.error("exception {}", e); +// } +// }, BalanceLeaderInterval, TimeUnit.MILLISECONDS); +// +// } +// } +// +// @Override +// public void onGraphChange(Metapb.Graph graph, +// Metapb.GraphState stateOld, +// Metapb.GraphState stateNew) { +// +// } +// +// @Override +// public void onStoreRaftChanged(Metapb.Store store) { +// +// } +// }); +>>>>>>>> d7e3d51dd (3.6.5 -> 4.x diff):hg-pd-core/src/main/java/org/apache/hugegraph/pd/TaskScheduleService.java } public void shutDown() { executor.shutdownNow(); } +<<<<<<<< HEAD:hugegraph-pd/hg-pd-core/src/main/java/org/apache/hugegraph/pd/TaskScheduleService.java private boolean isLeader() { return RaftEngine.getInstance().isLeader(); } +======== + private boolean isLeader(){ + return RaftEngine.getInstance().isLeader(); + } +>>>>>>>> d7e3d51dd (3.6.5 -> 4.x diff):hg-pd-core/src/main/java/org/apache/hugegraph/pd/TaskScheduleService.java /** * Inspect all stores to see if they are online and have enough storage space */ @@ -199,11 +307,18 @@ public List patrolStores() throws PDException { } List changedStores = new ArrayList<>(); +<<<<<<<< HEAD:hugegraph-pd/hg-pd-core/src/main/java/org/apache/hugegraph/pd/TaskScheduleService.java // Check your store online status List stores = storeService.getStores(""); Map activeStores = storeService.getActiveStores("") .stream().collect( Collectors.toMap(Metapb.Store::getId, t -> t)); +======== + // 检查store在线状态 + List stores = storeService.getStores(); + Map activeStores = storeService.getActiveStores() + .stream().collect(Collectors.toMap(Metapb.Store::getId, t -> t)); +>>>>>>>> d7e3d51dd (3.6.5 -> 4.x diff):hg-pd-core/src/main/java/org/apache/hugegraph/pd/TaskScheduleService.java for (Metapb.Store store : stores) { Metapb.Store changeStore = null; if ((store.getState() == Metapb.StoreState.Up @@ -214,6 +329,7 @@ public List patrolStores() throws PDException { .setState(Metapb.StoreState.Offline) .build(); +<<<<<<<< HEAD:hugegraph-pd/hg-pd-core/src/main/java/org/apache/hugegraph/pd/TaskScheduleService.java } else if ((store.getState() == Metapb.StoreState.Exiting && !activeStores.containsKey(store.getId())) || (store.getState() == Metapb.StoreState.Offline && @@ -230,7 +346,26 @@ public List patrolStores() throws PDException { LogService.TASK, changeStore); log.info("patrolStores store {} Offline", changeStore.getId()); } +======== +>>>>>>>> d7e3d51dd (3.6.5 -> 4.x diff):hg-pd-core/src/main/java/org/apache/hugegraph/pd/TaskScheduleService.java } + // tomb store 会导致从新分区,暂时不处理 +// else if ((store.getState() == Metapb.StoreState.Exiting && !activeStores.containsKey(store.getId())) || +// (store.getState() == Metapb.StoreState.Offline && +// (System.currentTimeMillis() - store.getLastHeartbeat() > +// pdConfig.getStore().getMaxDownTime() * 1000) && +// (System.currentTimeMillis() - clusterStartTime > +// pdConfig.getStore().getMaxDownTime() * 1000))) { +// //手工修改为下线或者离线达到时长 +// // 修改状态为关机, 增加 checkStoreCanOffline 检测 +// if (storeService.checkStoreCanOffline(store)) { +// changeStore = Metapb.Store.newBuilder(store) +// .setState(Metapb.StoreState.Tombstone).build(); +// this.logService.insertLog(LogService.NODE_CHANGE, +// LogService.TASK, changeStore); +// log.info("patrolStores store {} Offline", changeStore.getId()); +// } +// } if (changeStore != null) { storeService.updateStore(changeStore); changedStores.add(changeStore); @@ -240,8 +375,12 @@ public List patrolStores() throws PDException { } /** +<<<<<<<< HEAD:hugegraph-pd/hg-pd-core/src/main/java/org/apache/hugegraph/pd/TaskScheduleService.java * Inspect all partitions to check whether the number of replicas is correct and the number * of replicas in the shard group +======== + * 巡查所有的分区,检查副本数是否正确, shard group的副本数 +>>>>>>>> d7e3d51dd (3.6.5 -> 4.x diff):hg-pd-core/src/main/java/org/apache/hugegraph/pd/TaskScheduleService.java */ public List patrolPartitions() throws PDException { if (!isLeader()) { @@ -252,7 +391,10 @@ public List patrolPartitions() throws PDException { for (Metapb.ShardGroup group : storeService.getShardGroups()) { if (group.getShardsCount() != pdConfig.getPartition().getShardCount()) { storeService.reallocShards(group); +<<<<<<<< HEAD:hugegraph-pd/hg-pd-core/src/main/java/org/apache/hugegraph/pd/TaskScheduleService.java kvService.put(BALANCE_SHARD_KEY, "DOING", 180 * 1000); +======== +>>>>>>>> d7e3d51dd (3.6.5 -> 4.x diff):hg-pd-core/src/main/java/org/apache/hugegraph/pd/TaskScheduleService.java } } // Check if the shard is online. @@ -271,34 +413,52 @@ public List patrolPartitions() throws PDException { storeService.storeTurnoff(pair.getValue()); partitionService.shardOffline(partition, pair.getValue().getId()); } - } return null; } /** +<<<<<<<< HEAD:hugegraph-pd/hg-pd-core/src/main/java/org/apache/hugegraph/pd/TaskScheduleService.java * Balance the number of partitions between stores * It takes half an hour for the machine to turn to UP before it can be dynamically balanced */ public synchronized Map> balancePartitionShard() throws PDException { log.info("balancePartitions starting, isleader:{}", isLeader()); +======== + * 在Store之间平衡分区的数量 + * 机器转为UP半小时后才能进行动态平衡 + * + */ + @Deprecated + public synchronized Map> balancePartitionShard() throws PDException { + return balancePartitionShard(Consts.DEFAULT_STORE_GROUP_ID); + } +>>>>>>>> d7e3d51dd (3.6.5 -> 4.x diff):hg-pd-core/src/main/java/org/apache/hugegraph/pd/TaskScheduleService.java - if (!isLeader()) { + public synchronized Map> balancePartitionShard(int storeGroupId) throws PDException { + log.info("balancePartitionShard starting, is leader:{}", isLeader()); + if (!isLeader() || System.currentTimeMillis() - lastStoreTurnoffTime < TurnOffAndBalanceInterval) { return null; } +<<<<<<<< HEAD:hugegraph-pd/hg-pd-core/src/main/java/org/apache/hugegraph/pd/TaskScheduleService.java if (System.currentTimeMillis() - lastStoreTurnoffTime < TurnOffAndBalanceInterval) { return null; } int activeStores = storeService.getActiveStores().size(); if (activeStores == 0) { +======== + var activeStores = storeService.getActiveStoresByStoreGroup(storeGroupId); + if (activeStores.isEmpty()) { +>>>>>>>> d7e3d51dd (3.6.5 -> 4.x diff):hg-pd-core/src/main/java/org/apache/hugegraph/pd/TaskScheduleService.java log.warn("balancePartitionShard non active stores, skip to balancePartitionShard"); return null; } +<<<<<<<< HEAD:hugegraph-pd/hg-pd-core/src/main/java/org/apache/hugegraph/pd/TaskScheduleService.java if (Objects.equals(kvService.get(BALANCE_SHARD_KEY), "DOING")) { return null; } @@ -314,9 +474,25 @@ public synchronized Map> balancePartitionShard() thr partitionMap.put(store.getId(), new HashMap<>()); }); +======== + int totalShards = configService.getPartitionCount(storeGroupId) * pdConfig.getPartition().getShardCount(); + int averageCount = totalShards / activeStores.size(); + int remainder = totalShards % activeStores.size(); + + // 统计每个store上分区, StoreId -> PartitionID, ShardRole + Map> partitionMap = activeStores.stream() + .collect(Collectors.toMap(Metapb.Store::getId, s-> new HashMap<>())); + +>>>>>>>> d7e3d51dd (3.6.5 -> 4.x diff):hg-pd-core/src/main/java/org/apache/hugegraph/pd/TaskScheduleService.java AtomicReference isLeaner = new AtomicReference<>(false); - partitionService.getPartitions().forEach(partition -> { + for (var shardGroup : storeService.getShardGroups(storeGroupId)) { + for (var shard : shardGroup.getShardsList()) { + if (shard.getRole() == Metapb.ShardRole.Learner) { + isLeaner.set(true); + break; + } +<<<<<<<< HEAD:hugegraph-pd/hg-pd-core/src/main/java/org/apache/hugegraph/pd/TaskScheduleService.java try { storeService.getShardList(partition.getId()).forEach(shard -> { Long storeId = shard.getStoreId(); @@ -331,14 +507,19 @@ public synchronized Map> balancePartitionShard() thr } catch (PDException e) { log.error("get partition {} shard list error:{}.", partition.getId(), e.getMessage()); +======== + long storeId = shard.getStoreId(); + partitionMap.get(storeId).put(shardGroup.getId(), shard.getRole()); +>>>>>>>> d7e3d51dd (3.6.5 -> 4.x diff):hg-pd-core/src/main/java/org/apache/hugegraph/pd/TaskScheduleService.java } - }); + } if (isLeaner.get()) { log.warn("balancePartitionShard is doing, skip this balancePartitionShard task"); return null; } +<<<<<<<< HEAD:hugegraph-pd/hg-pd-core/src/main/java/org/apache/hugegraph/pd/TaskScheduleService.java // According to shard sort the quantity from highest to lowest List> sortedList = new ArrayList<>(); partitionMap.forEach((storeId, shards) -> { @@ -346,6 +527,15 @@ public synchronized Map> balancePartitionShard() thr }); sortedList.sort(((o1, o2) -> o2.getValue().compareTo(o1.getValue()))); // The largest heap, moved in store -> shard count +======== + // 按照shard数量由高到低排序store + List> sortedList = partitionMap.entrySet().stream() + .map(entry -> new KVPair<>(entry.getKey(), entry.getValue().size())) + .sorted((o1, o2) -> o2.getValue().compareTo(o1.getValue())) + .collect(Collectors.toList()); + + // 最大堆, 被移入的store -> shard count +>>>>>>>> d7e3d51dd (3.6.5 -> 4.x diff):hg-pd-core/src/main/java/org/apache/hugegraph/pd/TaskScheduleService.java PriorityQueue> maxHeap = new PriorityQueue<>(sortedList.size(), (o1, o2) -> o2.getValue() .compareTo( @@ -353,11 +543,19 @@ public synchronized Map> balancePartitionShard() thr // of individual copies committedIndex Map> committedIndexMap = partitionService.getCommittedIndexStats(); +<<<<<<<< HEAD:hugegraph-pd/hg-pd-core/src/main/java/org/apache/hugegraph/pd/TaskScheduleService.java // Partition ID -->source StoreID, target StoreID Map> movedPartitions = new HashMap<>(); // Remove redundant shards, traverse the stores in the order of shards from most to // least, and the remainder is allocated to the store with more shards first, reducing // the probability of migration +======== + + // 分区ID --> 源StoreID,目标StoreID + Map> movedPartitions = new HashMap<>(); + + // 移除多余的shard, 按照shards由多到少的顺序遍历store,余数remainder优先给shards多的store分配,减少迁移的概率 +>>>>>>>> d7e3d51dd (3.6.5 -> 4.x diff):hg-pd-core/src/main/java/org/apache/hugegraph/pd/TaskScheduleService.java for (int index = 0; index < sortedList.size(); index++) { long storeId = sortedList.get(index).getKey(); if (!partitionMap.containsKey(storeId)) { @@ -374,11 +572,14 @@ public synchronized Map> balancePartitionShard() thr "balancePartitionShard storeId {}, shardsSize {}, targetCount {}, " + "moveCount {}", storeId, shards.size(), targetCount, movedCount); - for (Iterator iterator = shards.keySet().iterator(); - movedCount > 0 && iterator.hasNext(); ) { + for (Iterator iterator = shards.keySet().iterator(); movedCount > 0 && iterator.hasNext(); ) { Integer id = iterator.next(); +<<<<<<<< HEAD:hugegraph-pd/hg-pd-core/src/main/java/org/apache/hugegraph/pd/TaskScheduleService.java if (!movedPartitions.containsKey(id)) { +======== + if ( !movedPartitions.containsKey(id)) { +>>>>>>>> d7e3d51dd (3.6.5 -> 4.x diff):hg-pd-core/src/main/java/org/apache/hugegraph/pd/TaskScheduleService.java log.info("store {}, shard of partition {} can be moved", storeId, id); movedPartitions.put(id, new KVPair<>(storeId, 0L)); movedCount--; @@ -394,6 +595,7 @@ public synchronized Map> balancePartitionShard() thr } } +<<<<<<<< HEAD:hugegraph-pd/hg-pd-core/src/main/java/org/apache/hugegraph/pd/TaskScheduleService.java if (movedPartitions.size() == 0) { log.warn( "movedPartitions is empty, totalShards:{} averageCount:{} remainder:{} " + @@ -405,6 +607,17 @@ public synchronized Map> balancePartitionShard() thr while (moveIterator.hasNext()) { if (maxHeap.size() == 0) { +======== + if (movedPartitions.isEmpty()){ + log.warn("movedPartitions is empty, totalShards:{} averageCount:{} remainder:{} sortedList:{}", + totalShards, averageCount, remainder, sortedList); + } + + Iterator>> moveIterator = movedPartitions.entrySet().iterator(); + + while (moveIterator.hasNext()) { + if(maxHeap.isEmpty()) { +>>>>>>>> d7e3d51dd (3.6.5 -> 4.x diff):hg-pd-core/src/main/java/org/apache/hugegraph/pd/TaskScheduleService.java break; } Map.Entry> moveEntry = moveIterator.next(); @@ -412,15 +625,20 @@ public synchronized Map> balancePartitionShard() thr long sourceStoreId = moveEntry.getValue().getKey(); List> tmpList = new ArrayList<>(maxHeap.size()); - while (maxHeap.size() > 0) { + while (!maxHeap.isEmpty()) { KVPair pair = maxHeap.poll(); long destStoreId = pair.getKey(); boolean destContains = false; if (partitionMap.containsKey(destStoreId)) { destContains = partitionMap.get(destStoreId).containsKey(partitionId); } +<<<<<<<< HEAD:hugegraph-pd/hg-pd-core/src/main/java/org/apache/hugegraph/pd/TaskScheduleService.java // If the destination store already contains the partition, take the store if (!destContains) { +======== + // 如果目的store已经包含了该partition,则取一下store + if(!destContains) { +>>>>>>>> d7e3d51dd (3.6.5 -> 4.x diff):hg-pd-core/src/main/java/org/apache/hugegraph/pd/TaskScheduleService.java moveEntry.getValue().setValue(pair.getKey()); log.info( "balancePartitionShard will move partition {} from store {} to store " + @@ -439,9 +657,13 @@ public synchronized Map> balancePartitionShard() thr maxHeap.addAll(tmpList); } +<<<<<<<< HEAD:hugegraph-pd/hg-pd-core/src/main/java/org/apache/hugegraph/pd/TaskScheduleService.java kvService.put(BALANCE_SHARD_KEY, "DOING", 180 * 1000); // Start the migration +======== + // 开始迁移 +>>>>>>>> d7e3d51dd (3.6.5 -> 4.x diff):hg-pd-core/src/main/java/org/apache/hugegraph/pd/TaskScheduleService.java movedPartitions.forEach((partId, storePair) -> { // Neither the source nor destination storeID is 0 if (storePair.getKey() > 0 && storePair.getValue() > 0) { @@ -470,9 +692,8 @@ public synchronized Map balancePartitionLeader(boolean immediatel System.currentTimeMillis() - lastBalanceLeaderTime < BalanceLeaderInterval) { return results; } - lastBalanceLeaderTime = System.currentTimeMillis(); - List shardGroups = storeService.getShardGroups(); + lastBalanceLeaderTime = System.currentTimeMillis(); // When a task is split or scaled-in, it is exited var taskMeta = storeService.getTaskInfoMeta(); @@ -480,16 +701,58 @@ public synchronized Map balancePartitionLeader(boolean immediatel throw new PDException(1001, "split or combine task is processing, please try later!"); } +<<<<<<<< HEAD:hugegraph-pd/hg-pd-core/src/main/java/org/apache/hugegraph/pd/TaskScheduleService.java if (Objects.equals(kvService.get(BALANCE_SHARD_KEY), "DOING")) { throw new PDException(1001, "balance shard is processing, please try later!"); +======== + for (var storeGroup : configService.getAllStoreGroup()) { + results.putAll(balanceShardLeaderByStoreGroup(storeGroup.getGroupId())); +>>>>>>>> d7e3d51dd (3.6.5 -> 4.x diff):hg-pd-core/src/main/java/org/apache/hugegraph/pd/TaskScheduleService.java } - if (shardGroups.size() == 0) { - return results; + return results; + } + + private Map balanceShardLeaderByStoreGroup(int storeGroupId) throws PDException { + Map results = new HashMap<>(); + + List shardGroups = storeService.getShardGroups(storeGroupId); + + // store id -> shard group count + Map storeShardCount = shardGroups.stream() + .flatMap(shardGroup -> shardGroup.getShardsList().stream()) + .map(Metapb.Shard::getStoreId) + .collect(Collectors.groupingBy(o -> o, Collectors.counting())); + + log.info("balancePartitionLeader, storeGroup: {}, shard group size: {}, by store: {}", storeGroupId, + shardGroups.size(), storeShardCount); + // total + int shardCountPerPartition = pdConfig.getPartition().getShardCount(); + + // part 1 : shard count % shard count per partition + var targetCountMap = storeShardCount.entrySet().stream() + .map(e -> new KVPair<>(e.getKey(), e.getValue() / shardCountPerPartition)) + .collect(Collectors.toMap(KVPair::getKey, KVPair::getValue)); + + var allocCount = targetCountMap.values().stream().mapToInt(Long::intValue).sum(); + int shardGroupCount = shardGroups.size(); + + if (allocCount != shardGroupCount) { + // part 2 : reminder count + var reminderList = storeShardCount.entrySet().stream() + .map(e -> new KVPair<>(e.getKey(), e.getValue() % shardCountPerPartition)) + .filter(e -> e.getValue() > 0) + .sorted(new KvPairComparator<>(false)) + .collect(Collectors.toList()); + for (int i = 0; i < shardGroupCount - allocCount; i++) { + var pair = reminderList.get(i); + targetCountMap.put(pair.getKey(), targetCountMap.getOrDefault(pair.getKey(), 0L) + 1); + } } - Map storeShardCount = new HashMap<>(); + PriorityQueue> targetCount = new PriorityQueue<>(new KvPairComparator<>(true)); +<<<<<<<< HEAD:hugegraph-pd/hg-pd-core/src/main/java/org/apache/hugegraph/pd/TaskScheduleService.java shardGroups.forEach(group -> { group.getShardsList().forEach(shard -> { storeShardCount.put(shard.getStoreId(), @@ -519,15 +782,27 @@ public synchronized Map balancePartitionLeader(boolean immediatel } targetCount.add(new KVPair<>(sortedGroups.get(sortedGroups.size() - 1).getKey(), shardGroups.size() - sum)); +======== + targetCount.addAll(targetCountMap.entrySet().stream() + .map(e -> new KVPair<>(e.getKey(), e.getValue())) + .collect(Collectors.toList())); +>>>>>>>> d7e3d51dd (3.6.5 -> 4.x diff):hg-pd-core/src/main/java/org/apache/hugegraph/pd/TaskScheduleService.java log.info("target count: {}", targetCount); for (var group : shardGroups) { var map = group.getShardsList().stream() +<<<<<<<< HEAD:hugegraph-pd/hg-pd-core/src/main/java/org/apache/hugegraph/pd/TaskScheduleService.java .collect(Collectors.toMap(Metapb.Shard::getStoreId, shard -> shard)); var tmpList = new ArrayList>(); // If there are many stores, they may not contain the corresponding store ID. Save // the non-compliant stores to the temporary list until you find a suitable store while (!targetCount.isEmpty()) { +======== + .collect(Collectors.toMap(Metapb.Shard::getStoreId, shard -> shard)); + var tmpList = new ArrayList>(); + // store比较多的情况,可能不包含对应的store id. 则先将不符合的store保存到临时列表,直到找到一个合适的store + while (!targetCount.isEmpty()){ +>>>>>>>> d7e3d51dd (3.6.5 -> 4.x diff):hg-pd-core/src/main/java/org/apache/hugegraph/pd/TaskScheduleService.java var pair = targetCount.poll(); var storeId = pair.getKey(); if (map.containsKey(storeId)) { @@ -552,12 +827,14 @@ public synchronized Map balancePartitionLeader(boolean immediatel tmpList.add(pair); } } + // 设置完成后,如果没达到target count,还要放回去 targetCount.addAll(tmpList); } return results; } +<<<<<<<< HEAD:hugegraph-pd/hg-pd-core/src/main/java/org/apache/hugegraph/pd/TaskScheduleService.java private long getMaxIndexGap(Map> committedIndexMap, int partitionId) { long maxGap = Long.MAX_VALUE; if (committedIndexMap == null || !committedIndexMap.containsKey(partitionId)) { @@ -575,6 +852,26 @@ private long getMaxIndexGap(Map> committedIndexMap, int maxGap = sortedList.get(0) - sortedList.get(sortedList.size() - 1); return maxGap; } +======== +// private long getMaxIndexGap(Map> committedIndexMap, int partitionId) { +// long maxGap = Long.MAX_VALUE; +// if (committedIndexMap == null || !committedIndexMap.containsKey(partitionId)) { +// return maxGap; +// } +// Map shardMap = committedIndexMap.get(partitionId); +// if(shardMap == null || shardMap.size() == 0) { +// return maxGap; +// } +// List sortedList = new ArrayList<>(); +// shardMap.forEach((storeId, committedIndex) -> { +// sortedList.add(committedIndex); +// }); +// // 由大到小排序的list +// sortedList.sort(Comparator.reverseOrder()); +// maxGap = sortedList.get(0) - sortedList.get(sortedList.size() - 1); +// return maxGap; +// } +>>>>>>>> d7e3d51dd (3.6.5 -> 4.x diff):hg-pd-core/src/main/java/org/apache/hugegraph/pd/TaskScheduleService.java /** * Perform partition splitting, which is divided into automatic splitting and manual splitting @@ -582,11 +879,11 @@ private long getMaxIndexGap(Map> committedIndexMap, int * @return * @throws PDException */ - public List splitPartition( - Pdpb.OperationMode mode, List params) throws PDException { + public List splitPartition( ClusterOp.OperationMode mode, int storeGroupId, + List params) throws PDException { - if (mode == Pdpb.OperationMode.Auto) { - return autoSplitPartition(); + if (mode == ClusterOp.OperationMode.Auto) { + return autoSplitPartition(storeGroupId); } var list = params.stream() @@ -606,11 +903,16 @@ public List splitPartition( * * @throws PDException */ +<<<<<<<< HEAD:hugegraph-pd/hg-pd-core/src/main/java/org/apache/hugegraph/pd/TaskScheduleService.java public List autoSplitPartition() throws PDException { +======== + public List autoSplitPartition(int storeGroupId) throws PDException { +>>>>>>>> d7e3d51dd (3.6.5 -> 4.x diff):hg-pd-core/src/main/java/org/apache/hugegraph/pd/TaskScheduleService.java if (!isLeader()) { return null; } +<<<<<<<< HEAD:hugegraph-pd/hg-pd-core/src/main/java/org/apache/hugegraph/pd/TaskScheduleService.java if (Metapb.ClusterState.Cluster_OK != storeService.getClusterStats().getState()) { if (Metapb.ClusterState.Cluster_Offline == storeService.getClusterStats().getState()) { throw new PDException(Pdpb.ErrorType.Split_Partition_Doing_VALUE, @@ -632,11 +934,34 @@ public List autoSplitPartition() throws PDException { "Too many partitions per store, partition.store-max-shard-count" + " = " + pdConfig.getPartition().getMaxShardsPerStore()); +======== + if (Metapb.ClusterState.Cluster_OK != storeService.getClusterStats(storeGroupId).getState()) { + if (Metapb.ClusterState.Cluster_Offline == storeService.getClusterStats(storeGroupId).getState()) { + throw new PDException(ErrorType.Split_Partition_Doing_VALUE, "The data is splitting"); + } + + else { + throw new PDException(ErrorType.Cluster_State_Forbid_Splitting_VALUE, + "The current state of the cluster prohibits splitting data"); + } + } + + // 计算集群能能支持的最大split count + int splitCount = pdConfig.getPartition().getMaxShardsPerStore() * + storeService.getActiveStoresByStoreGroup(storeGroupId).size() / + (configService.getPartitionCount(storeGroupId) * pdConfig.getPartition().getShardCount()); + + if (splitCount < 2) { + throw new PDException(ErrorType.Too_Many_Partitions_Per_Store_VALUE, + "Too many partitions per store, partition.store-max-shard-count = " + + pdConfig.getPartition().getMaxShardsPerStore()); +>>>>>>>> d7e3d51dd (3.6.5 -> 4.x diff):hg-pd-core/src/main/java/org/apache/hugegraph/pd/TaskScheduleService.java } // If the maximum number of partitions per store is not reached, it will be split log.info("Start to split partitions..., split count = {}", splitCount); +<<<<<<<< HEAD:hugegraph-pd/hg-pd-core/src/main/java/org/apache/hugegraph/pd/TaskScheduleService.java // Set the cluster status to Offline storeService.updateClusterStatus(Metapb.ClusterState.Cluster_Offline); // Modify the default number of partitions @@ -646,6 +971,16 @@ public List autoSplitPartition() throws PDException { var list = storeService.getShardGroups().stream() .map(shardGroup -> new KVPair<>(shardGroup.getId(), splitCount)) .collect(Collectors.toList()); +======== + // 设置集群状态为下线 + storeService.updateClusterStatus(storeGroupId, Metapb.ClusterState.Cluster_Offline); + // 修改默认分区数量 + // pdConfig.getConfigService().setPartitionCount(storeService.getShardGroups().size() * splitCount); + + var list = storeService.getShardGroups(storeGroupId).stream() + .map(shardGroup -> new KVPair<>(shardGroup.getId(), splitCount)) + .collect(Collectors.toList()); +>>>>>>>> d7e3d51dd (3.6.5 -> 4.x diff):hg-pd-core/src/main/java/org/apache/hugegraph/pd/TaskScheduleService.java storeService.splitShardGroups(list); return null; @@ -670,6 +1005,12 @@ public void reportTask(MetaTask.Task task) { case Clean_Partition: partitionService.handleCleanPartitionTask(task); break; + case Build_Index: + partitionService.handleBuildIndexTask(task); + break; + case Backup_Graph: + partitionService.handleBackupGraphTask(task); + break; default: break; } @@ -692,7 +1033,6 @@ public Boolean dbCompaction(String tableName) throws PDException { storeService.shardGroupsDbCompaction(shardGroup.getId(), tableName); } - // return true; } @@ -819,7 +1159,10 @@ public Map canAllPartitionsMovedOut(Metapb.Store sourceStore) th remainPartitions.add(partId); } }); - if (remainPartitions.size() > 0) { + + boolean isExecutingTasks = storeService.getStore(sourceStore.getId()).getStats().getExecutingTask(); + + if (!remainPartitions.isEmpty() || isExecutingTasks) { resultMap.put("flag", false); resultMap.put("movedPartitions", null); } else { diff --git a/hugegraph-pd/hg-pd-core/src/main/java/org/apache/hugegraph/pd/config/PDConfig.java b/hugegraph-pd/hg-pd-core/src/main/java/org/apache/hugegraph/pd/config/PDConfig.java index 0478b33da6..b04c2feb20 100644 --- a/hugegraph-pd/hg-pd-core/src/main/java/org/apache/hugegraph/pd/config/PDConfig.java +++ b/hugegraph-pd/hg-pd-core/src/main/java/org/apache/hugegraph/pd/config/PDConfig.java @@ -1,3 +1,4 @@ +<<<<<<<< HEAD:hugegraph-pd/hg-pd-core/src/main/java/org/apache/hugegraph/pd/config/PDConfig.java /* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with @@ -16,13 +17,33 @@ */ package org.apache.hugegraph.pd.config; +======== +package org.apache.hugegraph.pd.config; + +import static org.apache.hugegraph.pd.common.Consts.DEFAULT_STORE_GROUP_ID; +import java.util.ArrayList; import java.util.Arrays; -import java.util.HashMap; +import java.util.List; import java.util.Map; +import java.util.concurrent.ConcurrentHashMap; import java.util.regex.Matcher; import java.util.regex.Pattern; +import org.apache.commons.collections4.CollectionUtils; +import org.springframework.beans.factory.annotation.Autowired; +import org.springframework.beans.factory.annotation.Value; +import org.springframework.boot.context.properties.ConfigurationProperties; +import org.springframework.context.annotation.Configuration; +import org.springframework.stereotype.Component; +>>>>>>>> d7e3d51dd (3.6.5 -> 4.x diff):hg-pd-core/src/main/java/org/apache/hugegraph/pd/config/PDConfig.java + +import org.apache.hugegraph.pd.ConfigService; +import org.apache.hugegraph.pd.IdService; + +import lombok.Data; +import lombok.Getter; + import org.apache.hugegraph.pd.ConfigService; import org.apache.hugegraph.pd.IdService; import org.springframework.beans.factory.annotation.Autowired; @@ -39,11 +60,42 @@ @Component public class PDConfig { +<<<<<<<< HEAD:hugegraph-pd/hg-pd-core/src/main/java/org/apache/hugegraph/pd/config/PDConfig.java // cluster ID +======== + private static String[] storeInfo = {"store", + "$2a$04$9ZGBULe2vc73DMj7r/iBKeQB1SagtUXPrDbMmNswRkTwlWQURE/Jy", + "E3UnnQa605go"}; + private static String[] serverInfo = {"hg", + "$2a$04$i10KooNg6wLvIPVDh909n.RBYlZ/4pJo978nFK86nrqQiGIKV4UGS", + "qRyYhxVAWDb5"}; + private static String[] hubbleInfo = {"hubble", + "$2a$04$pSGkohaywGgFrJLr6VOPm.IK2WtOjlNLcZN8gct5uIKEDO1I61DGa", + "iMjHnUl5Pprx"}; + private static String[] vermeer = {"vermeer", + "$2a$04$N89qHe0v5jqNJKhQZHnTdOFSGmiNoiA2B2fdWpV2BwrtJK72dXYD.", + "FqU8BOvTpteT"}; + private static String[][] infos = new String[][]{storeInfo, serverInfo, hubbleInfo, vermeer}; + + @Getter + private static List defaultServers; + + static { + defaultServers = new ArrayList<>(infos.length); + for (String[] info : infos) { + defaultServers.add(new Server(info[0], info[1], info[2])); + } + } + +>>>>>>>> d7e3d51dd (3.6.5 -> 4.x diff):hg-pd-core/src/main/java/org/apache/hugegraph/pd/config/PDConfig.java @Value("${pd.cluster_id:1}") private long clusterId; +<<<<<<<< HEAD:hugegraph-pd/hg-pd-core/src/main/java/org/apache/hugegraph/pd/config/PDConfig.java // The patrol task interval +======== + // 巡查任务时间间隔 +>>>>>>>> d7e3d51dd (3.6.5 -> 4.x diff):hg-pd-core/src/main/java/org/apache/hugegraph/pd/config/PDConfig.java @Value("${pd.patrol-interval:300}") private long patrolInterval = 300; @Value("${pd.data-path}") @@ -51,7 +103,11 @@ public class PDConfig { @Value("${pd.initial-store-count:3}") private int minStoreCount; +<<<<<<<< HEAD:hugegraph-pd/hg-pd-core/src/main/java/org/apache/hugegraph/pd/config/PDConfig.java // The initial store list, within which the store is automatically activated +======== + // 初始store列表,该列表内的store自动激活 +>>>>>>>> d7e3d51dd (3.6.5 -> 4.x diff):hg-pd-core/src/main/java/org/apache/hugegraph/pd/config/PDConfig.java // format: store_addresss, store_address, store_address/group_id, store_address/group_id @Value("${pd.initial-store-list: ''}") private String initialStoreList; @@ -63,7 +119,29 @@ public class PDConfig { @Value("${license.license-path}") private String licensePath; @Autowired +<<<<<<<< HEAD:hugegraph-pd/hg-pd-core/src/main/java/org/apache/hugegraph/pd/config/PDConfig.java + private ThreadPoolGrpc threadPoolGrpc; +======== + private JobConfig jobConfig; + + @Autowired private ThreadPoolGrpc threadPoolGrpc; + + @Data + @Configuration + public class ThreadPoolGrpc { + @Value("${thread.pool.grpc.core:600}") + private int core; + @Value("${thread.pool.grpc.max:1000}") + private int max; + @Value("${thread.pool.grpc.queue:" + Integer.MAX_VALUE + "}") + private int queue; + } + + @Value("${auth.secret-key: 'FXQXbJtbCLxODc6tGci732pkH1cyf8Qg'}") + private String secretKey; + +>>>>>>>> d7e3d51dd (3.6.5 -> 4.x diff):hg-pd-core/src/main/java/org/apache/hugegraph/pd/config/PDConfig.java @Autowired private Raft raft; @Autowired @@ -72,20 +150,46 @@ public class PDConfig { private Partition partition; @Autowired private Discovery discovery; +<<<<<<<< HEAD:hugegraph-pd/hg-pd-core/src/main/java/org/apache/hugegraph/pd/config/PDConfig.java private Map initialStoreMap = null; private ConfigService configService; private IdService idService; +======== + + private volatile Map initialStoreMap = null; + private volatile Map initialStoreGroupMap = null; +>>>>>>>> d7e3d51dd (3.6.5 -> 4.x diff):hg-pd-core/src/main/java/org/apache/hugegraph/pd/config/PDConfig.java public Map getInitialStoreMap() { if (initialStoreMap == null) { - initialStoreMap = new HashMap<>(); + initialStoreMap = new ConcurrentHashMap<>(); Arrays.asList(initialStoreList.split(",")).forEach(s -> { - initialStoreMap.put(s, s); + String[] arr = s.split("/"); + initialStoreMap.put(arr[0], arr[0]); }); } return initialStoreMap; } + public int getInitialStoreGroup(String address) { + if (initialStoreGroupMap == null) { + synchronized (this) { + if (initialStoreGroupMap == null) { + initialStoreGroupMap = new ConcurrentHashMap<>(); + Arrays.asList(initialStoreList.split(",")).forEach(s -> { + String[] arr = s.split("/"); + if (arr.length == 2) { + initialStoreGroupMap.put(arr[0], Integer.parseInt(arr[1])); + } else { + initialStoreGroupMap.put(arr[0], DEFAULT_STORE_GROUP_ID); + } + }); + } + } + } + return initialStoreGroupMap.getOrDefault(address, DEFAULT_STORE_GROUP_ID); + } + /** * The initial number of partitions * Number of Stores * Maximum number of replicas per Store / Number of replicas per partition @@ -166,8 +270,8 @@ public class Store { @Value("${store.max-down-time:1800}") private long maxDownTime = 1800; - @Value("${store.monitor_data_enabled:true}") - private boolean monitorDataEnabled = true; + @Value("${store.monitor_data_enabled:false}") + private boolean monitorDataEnabled = false; @Value("${store.monitor_data_interval: 1 minute}") private String monitorDataInterval = "1 minute"; @@ -248,9 +352,14 @@ private Long parseTimeExpression(String exp) { @Data @Configuration +<<<<<<<< HEAD:hugegraph-pd/hg-pd-core/src/main/java/org/apache/hugegraph/pd/config/PDConfig.java public class Partition { private int totalCount = 0; +======== + public class Partition{ +// private int totalCount = 0; +>>>>>>>> d7e3d51dd (3.6.5 -> 4.x diff):hg-pd-core/src/main/java/org/apache/hugegraph/pd/config/PDConfig.java // Maximum number of replicas per Store @Value("${partition.store-max-shard-count:24}") @@ -259,6 +368,7 @@ public class Partition { @Value("${partition.default-shard-count:3}") private int shardCount = 3; +<<<<<<<< HEAD:hugegraph-pd/hg-pd-core/src/main/java/org/apache/hugegraph/pd/config/PDConfig.java public int getTotalCount() { if (totalCount == 0) { totalCount = getInitialPartitionCount(); @@ -269,6 +379,18 @@ public int getTotalCount() { public void setTotalCount(int totalCount) { this.totalCount = totalCount; } +======== +// public void setTotalCount(int totalCount){ +// this.totalCount = totalCount; +// } +// +// public int getTotalCount() { +// if ( totalCount == 0 ) { +// totalCount = getInitialPartitionCount(); +// } +// return totalCount; +// } +>>>>>>>> d7e3d51dd (3.6.5 -> 4.x diff):hg-pd-core/src/main/java/org/apache/hugegraph/pd/config/PDConfig.java } @Data @@ -281,4 +403,58 @@ public class Discovery { private int heartbeatOutTimes = 3; } +<<<<<<<< HEAD:hugegraph-pd/hg-pd-core/src/main/java/org/apache/hugegraph/pd/config/PDConfig.java +======== + @Data + @Configuration + public class JobConfig { + @Value("${job.interruptableThreadPool.core:0}") + private int core; + @Value("${job.interruptableThreadPool.max:256}") + private int max; + @Value("${job.interruptableThreadPool.queue:" + Integer.MAX_VALUE + "}") + private int queueSize; + @Value("${job.start-time:19}") + private int startTime; + @Value("${job.uninterruptibleThreadPool.core:0}") + private int uninterruptibleCore; + @Value("${job.uninterruptibleThreadPool.max:256}") + private int uninterruptibleMax; + @Value("${job.uninterruptibleThreadPool.queue:" + Integer.MAX_VALUE + "}") + private int uninterruptibleQueueSize; + } + + + @Data + @Configuration + @ConfigurationProperties(prefix = "pd") + public class Servers { + List servers; + + public List getServers() { + if (CollectionUtils.isEmpty(servers)) { + return defaultServers; + } + return servers; + } + } + + @Value("${pd.allows-address-acquisition: false}") + private boolean allowsAddressAcquisition = false; + + @Getter + private ConfigService configService; + + @Getter + private IdService idService; + + public void setConfigService(ConfigService configService) { + this.configService = configService; + } + + public void setIdService(IdService idService) { + this.idService = idService; + } + +>>>>>>>> d7e3d51dd (3.6.5 -> 4.x diff):hg-pd-core/src/main/java/org/apache/hugegraph/pd/config/PDConfig.java } diff --git a/hugegraph-pd/hg-pd-core/src/main/java/org/apache/hugegraph/pd/meta/ConfigMetaStore.java b/hugegraph-pd/hg-pd-core/src/main/java/org/apache/hugegraph/pd/meta/ConfigMetaStore.java index edcec9fc85..cfa80678ed 100644 --- a/hugegraph-pd/hg-pd-core/src/main/java/org/apache/hugegraph/pd/meta/ConfigMetaStore.java +++ b/hugegraph-pd/hg-pd-core/src/main/java/org/apache/hugegraph/pd/meta/ConfigMetaStore.java @@ -1,3 +1,4 @@ +<<<<<<<< HEAD:hugegraph-pd/hg-pd-core/src/main/java/org/apache/hugegraph/pd/meta/ConfigMetaStore.java /* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with @@ -16,6 +17,13 @@ */ package org.apache.hugegraph.pd.meta; +======== +package org.apache.hugegraph.pd.meta; + +import org.apache.hugegraph.pd.common.PDException; +import org.apache.hugegraph.pd.config.PDConfig; +import org.apache.hugegraph.pd.grpc.Metapb; +>>>>>>>> d7e3d51dd (3.6.5 -> 4.x diff):hg-pd-core/src/main/java/org/apache/hugegraph/pd/meta/ConfigMetaStore.java import java.util.List; import java.util.Optional; @@ -24,8 +32,11 @@ import org.apache.hugegraph.pd.config.PDConfig; import org.apache.hugegraph.pd.grpc.Metapb; +<<<<<<<< HEAD:hugegraph-pd/hg-pd-core/src/main/java/org/apache/hugegraph/pd/meta/ConfigMetaStore.java public class ConfigMetaStore extends MetadataRocksDBStore { +======== +>>>>>>>> d7e3d51dd (3.6.5 -> 4.x diff):hg-pd-core/src/main/java/org/apache/hugegraph/pd/meta/ConfigMetaStore.java private final long clusterId; public ConfigMetaStore(PDConfig pdConfig) { @@ -68,4 +79,22 @@ public Metapb.PDConfig getPdConfig(long version) throws PDException { return max.isPresent() ? max.get() : null; } +<<<<<<<< HEAD:hugegraph-pd/hg-pd-core/src/main/java/org/apache/hugegraph/pd/meta/ConfigMetaStore.java +======== + public Metapb.StoreGroup saveStoreGroup(Metapb.StoreGroup storeGroup) throws PDException { + byte[] graphSpaceKey = MetadataKeyHelper.getStoreGroupKey(storeGroup.getGroupId()); + put(graphSpaceKey, storeGroup.toByteArray()); + return storeGroup; + } + + public Metapb.StoreGroup getStoreGroup(int groupId) throws PDException { + byte[] graphSpaceKey = MetadataKeyHelper.getStoreGroupKey(groupId); + return getOne(Metapb.StoreGroup.parser(), graphSpaceKey); + } + + public List getStoreGroups() throws PDException { + byte[] graphSpaceKey = MetadataKeyHelper.getStoreGroupPrefix(); + return scanPrefix(Metapb.StoreGroup.parser(), graphSpaceKey); + } +>>>>>>>> d7e3d51dd (3.6.5 -> 4.x diff):hg-pd-core/src/main/java/org/apache/hugegraph/pd/meta/ConfigMetaStore.java } diff --git a/hugegraph-pd/hg-pd-core/src/main/java/org/apache/hugegraph/pd/meta/DiscoveryMetaStore.java b/hugegraph-pd/hg-pd-core/src/main/java/org/apache/hugegraph/pd/meta/DiscoveryMetaStore.java index e774539c1e..7a0b6f8585 100644 --- a/hugegraph-pd/hg-pd-core/src/main/java/org/apache/hugegraph/pd/meta/DiscoveryMetaStore.java +++ b/hugegraph-pd/hg-pd-core/src/main/java/org/apache/hugegraph/pd/meta/DiscoveryMetaStore.java @@ -1,3 +1,4 @@ +<<<<<<<< HEAD:hugegraph-pd/hg-pd-core/src/main/java/org/apache/hugegraph/pd/meta/DiscoveryMetaStore.java /* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with @@ -16,6 +17,17 @@ */ package org.apache.hugegraph.pd.meta; +======== +package org.apache.hugegraph.pd.meta; + +import org.apache.hugegraph.pd.common.PDException; +import org.apache.hugegraph.pd.config.PDConfig; +import org.apache.hugegraph.pd.grpc.discovery.NodeInfo; +import org.apache.hugegraph.pd.grpc.discovery.NodeInfos; +import org.apache.hugegraph.pd.grpc.discovery.Query; +import lombok.extern.slf4j.Slf4j; +import org.apache.commons.lang3.StringUtils; +>>>>>>>> d7e3d51dd (3.6.5 -> 4.x diff):hg-pd-core/src/main/java/org/apache/hugegraph/pd/meta/DiscoveryMetaStore.java import java.util.LinkedList; import java.util.List; diff --git a/hugegraph-pd/hg-pd-core/src/main/java/org/apache/hugegraph/pd/meta/IdMetaStore.java b/hugegraph-pd/hg-pd-core/src/main/java/org/apache/hugegraph/pd/meta/IdMetaStore.java index 661670b8a8..fc467b1add 100644 --- a/hugegraph-pd/hg-pd-core/src/main/java/org/apache/hugegraph/pd/meta/IdMetaStore.java +++ b/hugegraph-pd/hg-pd-core/src/main/java/org/apache/hugegraph/pd/meta/IdMetaStore.java @@ -1,3 +1,4 @@ +<<<<<<<< HEAD:hugegraph-pd/hg-pd-core/src/main/java/org/apache/hugegraph/pd/meta/IdMetaStore.java /* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with @@ -16,6 +17,16 @@ */ package org.apache.hugegraph.pd.meta; +======== +package org.apache.hugegraph.pd.meta; + +import org.apache.hugegraph.pd.common.PDException; +import org.apache.hugegraph.pd.config.PDConfig; +import org.apache.hugegraph.pd.store.KV; +import com.caucho.hessian.io.Hessian2Input; +import com.caucho.hessian.io.Hessian2Output; +import lombok.extern.slf4j.Slf4j; +>>>>>>>> d7e3d51dd (3.6.5 -> 4.x diff):hg-pd-core/src/main/java/org/apache/hugegraph/pd/meta/IdMetaStore.java import java.io.ByteArrayInputStream; import java.io.ByteArrayOutputStream; diff --git a/hugegraph-pd/hg-pd-core/src/main/java/org/apache/hugegraph/pd/meta/MetadataKeyHelper.java b/hugegraph-pd/hg-pd-core/src/main/java/org/apache/hugegraph/pd/meta/MetadataKeyHelper.java index 193b3b7229..42060a622e 100644 --- a/hugegraph-pd/hg-pd-core/src/main/java/org/apache/hugegraph/pd/meta/MetadataKeyHelper.java +++ b/hugegraph-pd/hg-pd-core/src/main/java/org/apache/hugegraph/pd/meta/MetadataKeyHelper.java @@ -1,3 +1,4 @@ +<<<<<<<< HEAD:hugegraph-pd/hg-pd-core/src/main/java/org/apache/hugegraph/pd/meta/MetadataKeyHelper.java /* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with @@ -16,6 +17,12 @@ */ package org.apache.hugegraph.pd.meta; +======== +package org.apache.hugegraph.pd.meta; + +import org.apache.hugegraph.pd.grpc.Metapb; +import org.apache.commons.lang3.StringUtils; +>>>>>>>> d7e3d51dd (3.6.5 -> 4.x diff):hg-pd-core/src/main/java/org/apache/hugegraph/pd/meta/MetadataKeyHelper.java import java.nio.charset.Charset; @@ -40,9 +47,15 @@ public class MetadataKeyHelper { private static final String PD_CONFIG = "PD_CONFIG"; private static final String TASK_SPLIT = "TASK_SPLIT"; private static final String TASK_MOVE = "TASK_MOVE"; + private static final String TASK_USER = "TASK_USER"; private static final String LOG_RECORD = "LOG_RECORD"; private static final String QUEUE = "QUEUE"; + private static final String OBSERVER_NOTICE = "OB_N"; + private static final String NOTICE_CONTENT = "NOTICE_C"; + private static final String STORE_GROUP = "STORE_GROUP"; + + private static final String STORE_GROUP_RELATION = "STORE_GROUP_RELATION"; public static byte[] getStoreInfoKey(final long storeId) { //STORE/{storeId} @@ -175,11 +188,19 @@ public static byte[] getGraphPrefix() { public static byte[] getPartitionStatusKey(String graphName, int id) { //PARTITION_STATUS/{ String key = StringBuilderHelper.get() +<<<<<<<< HEAD:hugegraph-pd/hg-pd-core/src/main/java/org/apache/hugegraph/pd/meta/MetadataKeyHelper.java .append(PARTITION_STATUS) .append(DELIMITER) .append(graphName).append(DELIMITER) .append(id).append(DELIMITER) .toString(); +======== + .append(PARTITION_STATUS) + .append(DELIMITER) + // .append(graphName).append(DELIMITER) + .append(id).append(DELIMITER) + .toString(); +>>>>>>>> d7e3d51dd (3.6.5 -> 4.x diff):hg-pd-core/src/main/java/org/apache/hugegraph/pd/meta/MetadataKeyHelper.java return key.getBytes(Charset.defaultCharset()); } @@ -214,7 +235,7 @@ public static byte[] getPdConfigKey(String configKey) { } public static byte[] getQueueItemPrefix() { - //QUEUE + // QUEUE String key = StringBuilderHelper.get() .append(QUEUE).append(DELIMITER) .toString(); @@ -222,7 +243,7 @@ public static byte[] getQueueItemPrefix() { } public static byte[] getQueueItemKey(String itemId) { - //QUEUE + // QUEUE StringBuilder builder = StringBuilderHelper.get() .append(QUEUE).append(DELIMITER); if (!StringUtils.isEmpty(itemId)) { @@ -231,6 +252,7 @@ public static byte[] getQueueItemKey(String itemId) { return builder.toString().getBytes(Charset.defaultCharset()); } +<<<<<<<< HEAD:hugegraph-pd/hg-pd-core/src/main/java/org/apache/hugegraph/pd/meta/MetadataKeyHelper.java public static byte[] getSplitTaskKey(String graphName, int groupId) { // TASK_SPLIT/{GraphName}/{partitionID} StringBuilder builder = StringBuilderHelper.get() @@ -240,7 +262,52 @@ public static byte[] getSplitTaskKey(String graphName, int groupId) { return builder.toString().getBytes(Charset.defaultCharset()); } + public static byte[] getSplitTaskPrefix(String graphName) { +======== + public static byte[] getNoticeContentKey(long noticeId) { + // NOTICE_C/{noticeId} + StringBuilder builder = StringBuilderHelper.get() + .append(NOTICE_CONTENT).append(DELIMITER).append(noticeId); + + return builder.toString().getBytes(Charset.defaultCharset()); + } + + public static byte[] getObserverNoticePrefix() { + // OB_N + String key = StringBuilderHelper.get() + .append(OBSERVER_NOTICE).append(DELIMITER) + .toString(); + + return key.getBytes(Charset.defaultCharset()); + } + + public static byte[] getObserverNoticeKey(long observerId, long noticeId) { + // OB_N/{observerId}/{noticeId} + StringBuilder builder = StringBuilderHelper.get() + .append(OBSERVER_NOTICE).append(DELIMITER).append(observerId) + .append(DELIMITER).append(noticeId); + return builder.toString().getBytes(Charset.defaultCharset()); + } + + public static byte[] getSplitTaskKey(String graphName, int groupId) { + // TASK_SPLIT/{GraphName}/{partitionID} + StringBuilder builder = StringBuilderHelper.get() + .append(TASK_SPLIT).append(DELIMITER) + .append(graphName).append(DELIMITER) + .append(groupId); + return builder.toString().getBytes(Charset.defaultCharset()); + } + public static byte[] getSplitTaskPrefix(String graphName) { + // TASK_SPLIT/{GraphName}/ + StringBuilder builder = StringBuilderHelper.get() + .append(TASK_SPLIT).append(DELIMITER) + .append(graphName); + return builder.toString().getBytes(Charset.defaultCharset()); + } + + public static byte[] getAllSplitTaskPrefix() { +>>>>>>>> d7e3d51dd (3.6.5 -> 4.x diff):hg-pd-core/src/main/java/org/apache/hugegraph/pd/meta/MetadataKeyHelper.java // TASK_SPLIT/{GraphName}/ StringBuilder builder = StringBuilderHelper.get() .append(TASK_SPLIT).append(DELIMITER) @@ -248,6 +315,7 @@ public static byte[] getSplitTaskPrefix(String graphName) { return builder.toString().getBytes(Charset.defaultCharset()); } +<<<<<<<< HEAD:hugegraph-pd/hg-pd-core/src/main/java/org/apache/hugegraph/pd/meta/MetadataKeyHelper.java public static byte[] getAllSplitTaskPrefix() { // TASK_SPLIT/{GraphName}/ StringBuilder builder = StringBuilderHelper.get() @@ -255,6 +323,8 @@ public static byte[] getAllSplitTaskPrefix() { return builder.toString().getBytes(Charset.defaultCharset()); } +======== +>>>>>>>> d7e3d51dd (3.6.5 -> 4.x diff):hg-pd-core/src/main/java/org/apache/hugegraph/pd/meta/MetadataKeyHelper.java public static byte[] getMoveTaskKey(String graphName, int targetGroupId, int groupId) { // TASK_MOVE/{GraphName}/to PartitionID/{source partitionID} StringBuilder builder = StringBuilderHelper.get() @@ -273,6 +343,26 @@ public static byte[] getMoveTaskPrefix(String graphName) { return builder.toString().getBytes(Charset.defaultCharset()); } +<<<<<<<< HEAD:hugegraph-pd/hg-pd-core/src/main/java/org/apache/hugegraph/pd/meta/MetadataKeyHelper.java +======== + public static byte[] getUserTaskKey(long taskId, int partitionId) { + // TASK_BI/ task id / partition id + StringBuilder builder = StringBuilderHelper.get() + .append(TASK_USER).append(DELIMITER) + .append(taskId).append(DELIMITER) + .append(partitionId); + return builder.toString().getBytes(Charset.defaultCharset()); + } + + public static byte[] getUserTaskPrefix(long taskId) { + // TASK_MOVE/{GraphName}/to PartitionID/{source partitionID} + StringBuilder builder = StringBuilderHelper.get() + .append(TASK_USER).append(DELIMITER) + .append(taskId); + return builder.toString().getBytes(Charset.defaultCharset()); + } + +>>>>>>>> d7e3d51dd (3.6.5 -> 4.x diff):hg-pd-core/src/main/java/org/apache/hugegraph/pd/meta/MetadataKeyHelper.java public static byte[] getAllMoveTaskPrefix() { // TASK_MOVE/{graphName}/toPartitionId/ StringBuilder builder = StringBuilderHelper.get() @@ -281,6 +371,7 @@ public static byte[] getAllMoveTaskPrefix() { } public static byte[] getLogKey(Metapb.LogRecord record) { +<<<<<<<< HEAD:hugegraph-pd/hg-pd-core/src/main/java/org/apache/hugegraph/pd/meta/MetadataKeyHelper.java //LOG_RECORD/{action}/{time}/ StringBuilder builder = StringBuilderHelper.get() .append(LOG_RECORD) @@ -304,8 +395,33 @@ public static byte[] getLogKeyPrefix(String action, long time) { public static byte[] getKVPrefix(String prefix, String key) { //K@/{key} +======== + // LOG_RECORD/{action}/{time}/ + StringBuilder builder = StringBuilderHelper.get() + .append(LOG_RECORD) + .append(DELIMITER) + .append(record.getAction()) + .append(DELIMITER) + .append(record.getTimestamp()); + return builder.toString().getBytes(Charset.defaultCharset()); + } + + public static byte[] getLogKeyPrefix(String action, long time) { + // LOG_DATA_SPLIT/{time}/{GraphName} +>>>>>>>> d7e3d51dd (3.6.5 -> 4.x diff):hg-pd-core/src/main/java/org/apache/hugegraph/pd/meta/MetadataKeyHelper.java + StringBuilder builder = StringBuilderHelper.get() + .append(LOG_RECORD) + .append(DELIMITER) + .append(action) + .append(DELIMITER) + .append(time); + return builder.toString().getBytes(Charset.defaultCharset()); + } + + public static byte[] getKVPrefix(String prefix, String key) { + // K@/{key} StringBuilder builder = StringBuilderHelper.get() - .append(prefix).append(DELIMITER); + .append(prefix).append(DELIMITER); if (!StringUtils.isEmpty(key)) { builder.append(key).append(DELIMITER); } @@ -314,7 +430,7 @@ public static byte[] getKVPrefix(String prefix, String key) { public static byte[] getKVTTLPrefix(String ttlPrefix, String prefix, String key) { StringBuilder builder = StringBuilderHelper.get().append(ttlPrefix) - .append(prefix).append(DELIMITER); + .append(prefix).append(DELIMITER); if (!StringUtils.isEmpty(key)) { builder.append(key).append(DELIMITER); } @@ -336,6 +452,33 @@ public static String getKVWatchKeyPrefix(String key, String watchDelimiter) { return builder.toString(); } +<<<<<<<< HEAD:hugegraph-pd/hg-pd-core/src/main/java/org/apache/hugegraph/pd/meta/MetadataKeyHelper.java +======== + public static byte[] getStoreGroupRelationKey(Long storeId) { + StringBuilder builder = StringBuilderHelper.get(); + builder.append(STORE_GROUP_RELATION).append(DELIMITER).append(storeId); + return builder.toString().getBytes(Charset.defaultCharset()); + } + + public static byte[] getStoreGroupRelationPrefix() { + StringBuilder builder = StringBuilderHelper.get(); + builder.append(STORE_GROUP_RELATION).append(DELIMITER); + return builder.toString().getBytes(Charset.defaultCharset()); + } + + public static byte[] getStoreGroupKey(int groupId) { + StringBuilder builder = StringBuilderHelper.get(); + builder.append(STORE_GROUP).append(DELIMITER).append(groupId); + return builder.toString().getBytes(Charset.defaultCharset()); + } + + public static byte[] getStoreGroupPrefix() { + StringBuilder builder = StringBuilderHelper.get(); + builder.append(STORE_GROUP).append(DELIMITER); + return builder.toString().getBytes(Charset.defaultCharset()); + } + +>>>>>>>> d7e3d51dd (3.6.5 -> 4.x diff):hg-pd-core/src/main/java/org/apache/hugegraph/pd/meta/MetadataKeyHelper.java public static char getDelimiter() { return DELIMITER; } diff --git a/hugegraph-pd/hg-pd-core/src/main/java/org/apache/hugegraph/pd/meta/MetadataRocksDBStore.java b/hugegraph-pd/hg-pd-core/src/main/java/org/apache/hugegraph/pd/meta/MetadataRocksDBStore.java index 7a12a0afa0..38502e718c 100644 --- a/hugegraph-pd/hg-pd-core/src/main/java/org/apache/hugegraph/pd/meta/MetadataRocksDBStore.java +++ b/hugegraph-pd/hg-pd-core/src/main/java/org/apache/hugegraph/pd/meta/MetadataRocksDBStore.java @@ -1,3 +1,4 @@ +<<<<<<<< HEAD:hugegraph-pd/hg-pd-core/src/main/java/org/apache/hugegraph/pd/meta/MetadataRocksDBStore.java /* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with @@ -16,6 +17,18 @@ */ package org.apache.hugegraph.pd.meta; +======== +package org.apache.hugegraph.pd.meta; + +import org.apache.hugegraph.pd.common.PDException; +import org.apache.hugegraph.pd.config.PDConfig; +import org.apache.hugegraph.pd.grpc.Pdpb; +import org.apache.hugegraph.pd.grpc.common.ErrorType; +import org.apache.hugegraph.pd.store.HgKVStore; +import org.apache.hugegraph.pd.store.KV; +import com.google.protobuf.Parser; +import org.apache.commons.lang3.ArrayUtils; +>>>>>>>> d7e3d51dd (3.6.5 -> 4.x diff):hg-pd-core/src/main/java/org/apache/hugegraph/pd/meta/MetadataRocksDBStore.java import java.util.LinkedList; import java.util.List; @@ -41,7 +54,11 @@ public MetadataRocksDBStore(PDConfig pdConfig) { this.pdConfig = pdConfig; } +<<<<<<<< HEAD:hugegraph-pd/hg-pd-core/src/main/java/org/apache/hugegraph/pd/meta/MetadataRocksDBStore.java public HgKVStore getStore() { +======== + public HgKVStore getStore(){ +>>>>>>>> d7e3d51dd (3.6.5 -> 4.x diff):hg-pd-core/src/main/java/org/apache/hugegraph/pd/meta/MetadataRocksDBStore.java if (store == null) { store = MetadataFactory.getStore(pdConfig); } @@ -53,8 +70,13 @@ public byte[] getOne(byte[] key) throws PDException { try { byte[] bytes = store.get(key); return bytes; +<<<<<<<< HEAD:hugegraph-pd/hg-pd-core/src/main/java/org/apache/hugegraph/pd/meta/MetadataRocksDBStore.java } catch (Exception e) { throw new PDException(Pdpb.ErrorType.ROCKSDB_READ_ERROR_VALUE, e); +======== + }catch (Exception e){ + throw new PDException(ErrorType.ROCKSDB_READ_ERROR_VALUE, e); +>>>>>>>> d7e3d51dd (3.6.5 -> 4.x diff):hg-pd-core/src/main/java/org/apache/hugegraph/pd/meta/MetadataRocksDBStore.java } } @@ -66,8 +88,13 @@ public E getOne(Parser parser, byte[] key) throws PDException { return null; } return parser.parseFrom(bytes); +<<<<<<<< HEAD:hugegraph-pd/hg-pd-core/src/main/java/org/apache/hugegraph/pd/meta/MetadataRocksDBStore.java } catch (Exception e) { throw new PDException(Pdpb.ErrorType.ROCKSDB_READ_ERROR_VALUE, e); +======== + }catch (Exception e){ + throw new PDException(ErrorType.ROCKSDB_READ_ERROR_VALUE, e); +>>>>>>>> d7e3d51dd (3.6.5 -> 4.x diff):hg-pd-core/src/main/java/org/apache/hugegraph/pd/meta/MetadataRocksDBStore.java } } @@ -75,8 +102,13 @@ public E getOne(Parser parser, byte[] key) throws PDException { public void put(byte[] key, byte[] value) throws PDException { try { getStore().put(key, value); +<<<<<<<< HEAD:hugegraph-pd/hg-pd-core/src/main/java/org/apache/hugegraph/pd/meta/MetadataRocksDBStore.java } catch (Exception e) { throw new PDException(Pdpb.ErrorType.ROCKSDB_WRITE_ERROR_VALUE, e); +======== + } catch (Exception e){ + throw new PDException(ErrorType.ROCKSDB_WRITE_ERROR_VALUE, e); +>>>>>>>> d7e3d51dd (3.6.5 -> 4.x diff):hg-pd-core/src/main/java/org/apache/hugegraph/pd/meta/MetadataRocksDBStore.java } } @@ -111,7 +143,7 @@ public List scanPrefix(byte[] prefix) throws PDException { try { return this.store.scanPrefix(prefix); } catch (Exception e) { - throw new PDException(Pdpb.ErrorType.ROCKSDB_READ_ERROR_VALUE, e); + throw new PDException(ErrorType.ROCKSDB_READ_ERROR_VALUE, e); } } @@ -129,7 +161,7 @@ public List scanRange(Parser parser, byte[] start, byte[] end) throws stores.add(parser.parseFrom(keyValue.getValue())); } } catch (Exception e) { - throw new PDException(Pdpb.ErrorType.ROCKSDB_READ_ERROR_VALUE, e); + throw new PDException(ErrorType.ROCKSDB_READ_ERROR_VALUE, e); } return stores; } @@ -143,7 +175,7 @@ public List scanPrefix(Parser parser, byte[] prefix) throws PDExceptio stores.add(parser.parseFrom(keyValue.getValue())); } } catch (Exception e) { - throw new PDException(Pdpb.ErrorType.ROCKSDB_READ_ERROR_VALUE, e); + throw new PDException(ErrorType.ROCKSDB_READ_ERROR_VALUE, e); } return stores; } @@ -158,7 +190,7 @@ public long remove(byte[] key) throws PDException { try { return this.store.remove(key); } catch (Exception e) { - throw new PDException(Pdpb.ErrorType.ROCKSDB_WRITE_ERROR_VALUE, e); + throw new PDException(ErrorType.ROCKSDB_WRITE_ERROR_VALUE, e); } } @@ -167,7 +199,7 @@ public long removeByPrefix(byte[] prefix) throws PDException { try { return this.store.removeByPrefix(prefix); } catch (Exception e) { - throw new PDException(Pdpb.ErrorType.ROCKSDB_WRITE_ERROR_VALUE, e); + throw new PDException(ErrorType.ROCKSDB_WRITE_ERROR_VALUE, e); } } diff --git a/hugegraph-pd/hg-pd-core/src/main/java/org/apache/hugegraph/pd/meta/MetadataStoreBase.java b/hugegraph-pd/hg-pd-core/src/main/java/org/apache/hugegraph/pd/meta/MetadataStoreBase.java index ae7fd2079c..7f4f61cdf5 100644 --- a/hugegraph-pd/hg-pd-core/src/main/java/org/apache/hugegraph/pd/meta/MetadataStoreBase.java +++ b/hugegraph-pd/hg-pd-core/src/main/java/org/apache/hugegraph/pd/meta/MetadataStoreBase.java @@ -1,3 +1,4 @@ +<<<<<<<< HEAD:hugegraph-pd/hg-pd-core/src/main/java/org/apache/hugegraph/pd/meta/MetadataStoreBase.java /* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with @@ -14,9 +15,19 @@ * See the License for the specific language governing permissions and * limitations under the License. */ +======== +package org.apache.hugegraph.pd.meta; +>>>>>>>> d7e3d51dd (3.6.5 -> 4.x diff):hg-pd-core/src/main/java/org/apache/hugegraph/pd/meta/MetadataStoreBase.java package org.apache.hugegraph.pd.meta; +<<<<<<<< HEAD:hugegraph-pd/hg-pd-core/src/main/java/org/apache/hugegraph/pd/meta/MetadataStoreBase.java +======== +import org.apache.hugegraph.pd.common.PDException; +import org.apache.hugegraph.pd.grpc.Pdpb; +import org.apache.hugegraph.pd.grpc.common.ErrorType; +import org.apache.hugegraph.pd.store.KV; +>>>>>>>> d7e3d51dd (3.6.5 -> 4.x diff):hg-pd-core/src/main/java/org/apache/hugegraph/pd/meta/MetadataStoreBase.java import java.io.IOException; import java.util.LinkedList; import java.util.List; @@ -100,7 +111,11 @@ public T getInstanceWithTTL(Parser parser, byte[] key) throws PDException byte[] withTTL = this.getWithTTL(key); return parser.parseFrom(withTTL); } catch (Exception e) { +<<<<<<<< HEAD:hugegraph-pd/hg-pd-core/src/main/java/org/apache/hugegraph/pd/meta/MetadataStoreBase.java throw new PDException(Pdpb.ErrorType.ROCKSDB_READ_ERROR_VALUE, e); +======== + throw new PDException(ErrorType.ROCKSDB_READ_ERROR_VALUE,e); +>>>>>>>> d7e3d51dd (3.6.5 -> 4.x diff):hg-pd-core/src/main/java/org/apache/hugegraph/pd/meta/MetadataStoreBase.java } } @@ -114,7 +129,11 @@ public List getInstanceListWithTTL(Parser parser, byte[] key) } return ts; } catch (Exception e) { +<<<<<<<< HEAD:hugegraph-pd/hg-pd-core/src/main/java/org/apache/hugegraph/pd/meta/MetadataStoreBase.java throw new PDException(Pdpb.ErrorType.ROCKSDB_READ_ERROR_VALUE, e); +======== + throw new PDException(ErrorType.ROCKSDB_READ_ERROR_VALUE,e); +>>>>>>>> d7e3d51dd (3.6.5 -> 4.x diff):hg-pd-core/src/main/java/org/apache/hugegraph/pd/meta/MetadataStoreBase.java } } } diff --git a/hugegraph-pd/hg-pd-core/src/main/java/org/apache/hugegraph/pd/meta/PartitionMeta.java b/hugegraph-pd/hg-pd-core/src/main/java/org/apache/hugegraph/pd/meta/PartitionMeta.java index 599d5f5c9e..b83e48207c 100644 --- a/hugegraph-pd/hg-pd-core/src/main/java/org/apache/hugegraph/pd/meta/PartitionMeta.java +++ b/hugegraph-pd/hg-pd-core/src/main/java/org/apache/hugegraph/pd/meta/PartitionMeta.java @@ -1,3 +1,4 @@ +<<<<<<<< HEAD:hugegraph-pd/hg-pd-core/src/main/java/org/apache/hugegraph/pd/meta/PartitionMeta.java /* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with @@ -15,10 +16,24 @@ * limitations under the License. */ +======== +>>>>>>>> d7e3d51dd (3.6.5 -> 4.x diff):hg-pd-core/src/main/java/org/apache/hugegraph/pd/meta/PartitionMeta.java package org.apache.hugegraph.pd.meta; import java.util.ArrayList; import java.util.List; +import java.util.Map; + +import org.apache.hugegraph.pd.grpc.common.ErrorType; +import org.apache.commons.collections4.CollectionUtils; + +import org.apache.hugegraph.pd.common.PDException; +import org.apache.hugegraph.pd.common.PDRuntimeException; +import org.apache.hugegraph.pd.common.PartitionCache; +import org.apache.hugegraph.pd.config.PDConfig; +import org.apache.hugegraph.pd.grpc.Metapb; + +import lombok.extern.slf4j.Slf4j; import org.apache.hugegraph.pd.common.PDException; import org.apache.hugegraph.pd.common.PartitionCache; @@ -32,6 +47,7 @@ */ @Slf4j public class PartitionMeta extends MetadataRocksDBStore { +<<<<<<<< HEAD:hugegraph-pd/hg-pd-core/src/main/java/org/apache/hugegraph/pd/meta/PartitionMeta.java static String CID_GRAPH_ID_KEY = "GraphID"; static int CID_GRAPH_ID_MAX = 0xFFFE; @@ -40,6 +56,18 @@ public class PartitionMeta extends MetadataRocksDBStore { public PartitionMeta(PDConfig pdConfig) { super(pdConfig); //this.timeout = pdConfig.getEtcd().getTimeout(); +======== + + public static final String CID_GRAPH_ID_KEY = "GraphID"; + public static final int CID_GRAPH_ID_MAX = 0xFFFE; + private PDConfig pdConfig; + private PartitionCache cache; + + public PartitionMeta(PDConfig pdConfig) { + super(pdConfig); + this.pdConfig = pdConfig; + // this.timeout = pdConfig.getEtcd().getTimeout(); +>>>>>>>> d7e3d51dd (3.6.5 -> 4.x diff):hg-pd-core/src/main/java/org/apache/hugegraph/pd/meta/PartitionMeta.java this.cache = new PartitionCache(); } @@ -66,12 +94,28 @@ private void loadGraphs() throws PDException { } } +<<<<<<<< HEAD:hugegraph-pd/hg-pd-core/src/main/java/org/apache/hugegraph/pd/meta/PartitionMeta.java /** * The partition and shard group are stored separately, and when they are init, they need to * be loaded * * @throws PDException */ +======== + public void loadGraph(String graphName) throws PDException { + Metapb.Graph graph = getGraph(graphName); + if (graph != null) { + cache.updateGraph(graph); + loadPartitions(graph); + } + } + + /** + * partition 和 shard group分开存储,再init的时候,需要加载进来 + * + * @throws PDException + */ +>>>>>>>> d7e3d51dd (3.6.5 -> 4.x diff):hg-pd-core/src/main/java/org/apache/hugegraph/pd/meta/PartitionMeta.java private void loadShardGroups() throws PDException { byte[] shardGroupPrefix = MetadataKeyHelper.getShardGroupPrefix(); for (var shardGroup : scanPrefix(Metapb.ShardGroup.parser(), shardGroupPrefix)) { @@ -82,6 +126,7 @@ private void loadShardGroups() throws PDException { private void loadPartitions(Metapb.Graph graph) throws PDException { byte[] prefix = MetadataKeyHelper.getPartitionPrefix(graph.getGraphName()); List partitions = scanPrefix(Metapb.Partition.parser(), prefix); +<<<<<<<< HEAD:hugegraph-pd/hg-pd-core/src/main/java/org/apache/hugegraph/pd/meta/PartitionMeta.java partitions.forEach(p -> { cache.updatePartition(p); }); @@ -89,6 +134,13 @@ private void loadPartitions(Metapb.Graph graph) throws PDException { /** * Find partitions by ID (first from the cache, then from the database) +======== + partitions.forEach(p -> cache.updatePartition(p)); + } + + /** + * 根据id查找分区 (先从缓存找,再到数据库中找) +>>>>>>>> d7e3d51dd (3.6.5 -> 4.x diff):hg-pd-core/src/main/java/org/apache/hugegraph/pd/meta/PartitionMeta.java * * @param graphName * @param partId @@ -112,6 +164,7 @@ public Metapb.Partition getPartitionById(String graphName, int partId) throws PD public List getPartitionById(int partId) throws PDException { List partitions = new ArrayList<>(); +<<<<<<<< HEAD:hugegraph-pd/hg-pd-core/src/main/java/org/apache/hugegraph/pd/meta/PartitionMeta.java cache.getGraphs().forEach(graph -> { cache.getPartitions(graph.getGraphName()).forEach(partition -> { if (partition.getId() == partId) { @@ -119,11 +172,22 @@ public List getPartitionById(int partId) throws PDException { } }); }); +======== + cache.getGraphs().forEach(graph -> cache.getPartitions(graph.getGraphName()).forEach(partition -> { + if (partition.getId() == partId) { + partitions.add(partition); + } + })); +>>>>>>>> d7e3d51dd (3.6.5 -> 4.x diff):hg-pd-core/src/main/java/org/apache/hugegraph/pd/meta/PartitionMeta.java return partitions; } /** +<<<<<<<< HEAD:hugegraph-pd/hg-pd-core/src/main/java/org/apache/hugegraph/pd/meta/PartitionMeta.java * Find partitions based on code +======== + * 根据code查找分区 +>>>>>>>> d7e3d51dd (3.6.5 -> 4.x diff):hg-pd-core/src/main/java/org/apache/hugegraph/pd/meta/PartitionMeta.java */ public Metapb.Partition getPartitionByCode(String graphName, long code) throws PDException { var pair = cache.getPartitionByCode(graphName, code); @@ -133,6 +197,7 @@ public Metapb.Partition getPartitionByCode(String graphName, long code) throws P return null; } +<<<<<<<< HEAD:hugegraph-pd/hg-pd-core/src/main/java/org/apache/hugegraph/pd/meta/PartitionMeta.java public Metapb.Graph getAndCreateGraph(String graphName) throws PDException { return getAndCreateGraph(graphName, pdConfig.getPartition().getTotalCount()); } @@ -161,6 +226,19 @@ public Metapb.Graph getAndCreateGraph(String graphName, int partitionCount) thro /** * Save the partition information +======== + public Metapb.Graph createGraph(String graphName, int partitionCount, int groupId) throws PDException { + return updateGraph(Metapb.Graph.newBuilder() + .setGraphName(graphName) + .setPartitionCount(partitionCount) + .setStoreGroupId(groupId) + .setState(Metapb.PartitionState.PState_Normal) + .build()); + } + + /** + * 保存分区信息 +>>>>>>>> d7e3d51dd (3.6.5 -> 4.x diff):hg-pd-core/src/main/java/org/apache/hugegraph/pd/meta/PartitionMeta.java * * @param partition * @return @@ -168,14 +246,21 @@ public Metapb.Graph getAndCreateGraph(String graphName, int partitionCount) thro */ public Metapb.Partition updatePartition(Metapb.Partition partition) throws PDException { if (!cache.hasGraph(partition.getGraphName())) { +<<<<<<<< HEAD:hugegraph-pd/hg-pd-core/src/main/java/org/apache/hugegraph/pd/meta/PartitionMeta.java getAndCreateGraph(partition.getGraphName()); } +======== + throw new PDException(ErrorType.GRAPH_NOT_EXISTS, "Graph " + partition.getGraphName() + " not exist"); + } + +>>>>>>>> d7e3d51dd (3.6.5 -> 4.x diff):hg-pd-core/src/main/java/org/apache/hugegraph/pd/meta/PartitionMeta.java byte[] key = MetadataKeyHelper.getPartitionKey(partition.getGraphName(), partition.getId()); put(key, partition.toByteString().toByteArray()); cache.updatePartition(partition); return partition; } +<<<<<<<< HEAD:hugegraph-pd/hg-pd-core/src/main/java/org/apache/hugegraph/pd/meta/PartitionMeta.java public Metapb.Partition updateShardList(Metapb.Partition partition) throws PDException { if (!cache.hasGraph(partition.getGraphName())) { getAndCreateGraph(partition.getGraphName()); @@ -195,6 +280,10 @@ public Metapb.Partition updateShardList(Metapb.Partition partition) throws PDExc /** * Delete all partitions +======== + /** + * 删除所有分区 +>>>>>>>> d7e3d51dd (3.6.5 -> 4.x diff):hg-pd-core/src/main/java/org/apache/hugegraph/pd/meta/PartitionMeta.java */ public long removeAllPartitions(String graphName) throws PDException { cache.removeAll(graphName); @@ -209,10 +298,17 @@ public long removePartition(String graphName, int id) throws PDException { } public void updatePartitionStats(Metapb.PartitionStats stats) throws PDException { +<<<<<<<< HEAD:hugegraph-pd/hg-pd-core/src/main/java/org/apache/hugegraph/pd/meta/PartitionMeta.java for (String graphName : stats.getGraphNameList()) { byte[] prefix = MetadataKeyHelper.getPartitionStatusKey(graphName, stats.getId()); put(prefix, stats.toByteArray()); } +======== + // for (String graphName : stats.getGraphNameList()) { + byte[] prefix = MetadataKeyHelper.getPartitionStatusKey("", stats.getId()); + put(prefix, stats.toByteArray()); + // } +>>>>>>>> d7e3d51dd (3.6.5 -> 4.x diff):hg-pd-core/src/main/java/org/apache/hugegraph/pd/meta/PartitionMeta.java } /** @@ -232,7 +328,11 @@ public List getPartitionStats(String graphName) throws PD } /** +<<<<<<<< HEAD:hugegraph-pd/hg-pd-core/src/main/java/org/apache/hugegraph/pd/meta/PartitionMeta.java * Update the diagram information +======== + * 更新图信息 +>>>>>>>> d7e3d51dd (3.6.5 -> 4.x diff):hg-pd-core/src/main/java/org/apache/hugegraph/pd/meta/PartitionMeta.java * * @param graph * @return @@ -240,6 +340,10 @@ public List getPartitionStats(String graphName) throws PD public Metapb.Graph updateGraph(Metapb.Graph graph) throws PDException { log.info("updateGraph {}", graph); byte[] key = MetadataKeyHelper.getGraphKey(graph.getGraphName()); +<<<<<<<< HEAD:hugegraph-pd/hg-pd-core/src/main/java/org/apache/hugegraph/pd/meta/PartitionMeta.java +======== + // 保存图信息 +>>>>>>>> d7e3d51dd (3.6.5 -> 4.x diff):hg-pd-core/src/main/java/org/apache/hugegraph/pd/meta/PartitionMeta.java put(key, graph.toByteString().toByteArray()); cache.updateGraph(graph); return graph; @@ -247,10 +351,23 @@ public Metapb.Graph updateGraph(Metapb.Graph graph) throws PDException { public List getPartitions() { List partitions = new ArrayList<>(); +<<<<<<<< HEAD:hugegraph-pd/hg-pd-core/src/main/java/org/apache/hugegraph/pd/meta/PartitionMeta.java List graphs = cache.getGraphs(); graphs.forEach(e -> { partitions.addAll(cache.getPartitions(e.getGraphName())); }); +======== + try { + List graphs = cache.getGraphs(); + if (CollectionUtils.isEmpty(graphs)) { + loadGraphs(); + graphs = cache.getGraphs(); + } + graphs.forEach(e -> partitions.addAll(cache.getPartitions(e.getGraphName()))); + } catch (PDException e) { + throw new PDRuntimeException(e.getErrorCode(), e); + } +>>>>>>>> d7e3d51dd (3.6.5 -> 4.x diff):hg-pd-core/src/main/java/org/apache/hugegraph/pd/meta/PartitionMeta.java return partitions; } @@ -277,7 +394,23 @@ public long removeGraph(String graphName) throws PDException { return l; } +<<<<<<<< HEAD:hugegraph-pd/hg-pd-core/src/main/java/org/apache/hugegraph/pd/meta/PartitionMeta.java +======== + public long removePartitionStats(String graphName) throws PDException { + byte[] prefix = MetadataKeyHelper.getPartitionStatusPrefixKey(graphName); + return removeByPrefix(prefix); + } + +>>>>>>>> d7e3d51dd (3.6.5 -> 4.x diff):hg-pd-core/src/main/java/org/apache/hugegraph/pd/meta/PartitionMeta.java public PartitionCache getPartitionCache() { return cache; } + + public void updateShardGroupCache(Metapb.ShardGroup group) { + cache.updateShardGroup(group); + } + + public Map getShardGroupCache() { + return cache.getShardGroups(); + } } diff --git a/hugegraph-pd/hg-pd-core/src/main/java/org/apache/hugegraph/pd/meta/StoreInfoMeta.java b/hugegraph-pd/hg-pd-core/src/main/java/org/apache/hugegraph/pd/meta/StoreInfoMeta.java index 3037d457ba..2c9e38161f 100644 --- a/hugegraph-pd/hg-pd-core/src/main/java/org/apache/hugegraph/pd/meta/StoreInfoMeta.java +++ b/hugegraph-pd/hg-pd-core/src/main/java/org/apache/hugegraph/pd/meta/StoreInfoMeta.java @@ -1,3 +1,4 @@ +<<<<<<<< HEAD:hugegraph-pd/hg-pd-core/src/main/java/org/apache/hugegraph/pd/meta/StoreInfoMeta.java /* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with @@ -16,10 +17,23 @@ */ package org.apache.hugegraph.pd.meta; +======== +package org.apache.hugegraph.pd.meta; + +import org.apache.hugegraph.pd.common.PDException; +import org.apache.hugegraph.pd.config.PDConfig; +import org.apache.hugegraph.pd.grpc.Metapb; +import lombok.extern.slf4j.Slf4j; +>>>>>>>> d7e3d51dd (3.6.5 -> 4.x diff):hg-pd-core/src/main/java/org/apache/hugegraph/pd/meta/StoreInfoMeta.java import java.util.LinkedList; import java.util.List; import java.util.ListIterator; +import java.util.Objects; +import java.util.Set; +import java.util.stream.Collectors; + +import static org.apache.hugegraph.pd.common.Consts.DEFAULT_STORE_GROUP_ID; import org.apache.hugegraph.pd.common.PDException; import org.apache.hugegraph.pd.config.PDConfig; @@ -33,6 +47,10 @@ @Slf4j public class StoreInfoMeta extends MetadataRocksDBStore { +<<<<<<<< HEAD:hugegraph-pd/hg-pd-core/src/main/java/org/apache/hugegraph/pd/meta/StoreInfoMeta.java +======== + private PDConfig pdConfig; +>>>>>>>> d7e3d51dd (3.6.5 -> 4.x diff):hg-pd-core/src/main/java/org/apache/hugegraph/pd/meta/StoreInfoMeta.java public StoreInfoMeta(PDConfig pdConfig) { super(pdConfig); @@ -91,9 +109,18 @@ public Metapb.Store getStore(Long storeId) throws PDException { * @return * @throws PDException */ + @Deprecated public List getStores(String graphName) throws PDException { byte[] storePrefix = MetadataKeyHelper.getStorePrefix(); return scanPrefix(Metapb.Store.parser(), storePrefix); +<<<<<<<< HEAD:hugegraph-pd/hg-pd-core/src/main/java/org/apache/hugegraph/pd/meta/StoreInfoMeta.java +======== + } + + public List getAllStores() throws PDException { + byte[] storePrefix = MetadataKeyHelper.getStorePrefix(); + return scanPrefix(Metapb.Store.parser(), storePrefix); +>>>>>>>> d7e3d51dd (3.6.5 -> 4.x diff):hg-pd-core/src/main/java/org/apache/hugegraph/pd/meta/StoreInfoMeta.java } /** @@ -102,11 +129,19 @@ public List getStores(String graphName) throws PDException { * @return * @throws PDException */ +<<<<<<<< HEAD:hugegraph-pd/hg-pd-core/src/main/java/org/apache/hugegraph/pd/meta/StoreInfoMeta.java public List getActiveStores(String graphName) throws PDException { byte[] activePrefix = MetadataKeyHelper.getActiveStorePrefix(); List listWithTTL = getInstanceListWithTTL(Metapb.Store.parser(), activePrefix); return listWithTTL; +======== + public List getActiveStores(int storeGroupId) throws PDException { + Set storeIds = getStoreIdsByGroup(storeGroupId); + return getActiveStores().stream() + .filter(store -> storeIds.contains(store.getId())) + .collect(Collectors.toList()); +>>>>>>>> d7e3d51dd (3.6.5 -> 4.x diff):hg-pd-core/src/main/java/org/apache/hugegraph/pd/meta/StoreInfoMeta.java } public List getActiveStores() throws PDException { @@ -187,18 +222,51 @@ public Metapb.StoreStats getStoreStats(long storeId) throws PDException { */ public List getStoreStatus(boolean isActive) throws PDException { byte[] storePrefix = MetadataKeyHelper.getStorePrefix(); +<<<<<<<< HEAD:hugegraph-pd/hg-pd-core/src/main/java/org/apache/hugegraph/pd/meta/StoreInfoMeta.java List stores = isActive ? getActiveStores() : scanPrefix(Metapb.Store.parser(), storePrefix); +======== + List stores =isActive ? getActiveStores() : + scanPrefix(Metapb.Store.parser(), storePrefix); +>>>>>>>> d7e3d51dd (3.6.5 -> 4.x diff):hg-pd-core/src/main/java/org/apache/hugegraph/pd/meta/StoreInfoMeta.java LinkedList list = new LinkedList<>(); for (int i = 0; i < stores.size(); i++) { Metapb.Store store = stores.get(i); Metapb.StoreStats stats = getStoreStats(store.getId()); if (stats != null) { +<<<<<<<< HEAD:hugegraph-pd/hg-pd-core/src/main/java/org/apache/hugegraph/pd/meta/StoreInfoMeta.java store = Metapb.Store.newBuilder(store).setStats(getStoreStats(store.getId())) .build(); +======== + store = Metapb.Store.newBuilder(store).setStats(getStoreStats(store.getId())) .build(); +>>>>>>>> d7e3d51dd (3.6.5 -> 4.x diff):hg-pd-core/src/main/java/org/apache/hugegraph/pd/meta/StoreInfoMeta.java } list.add(store); } return list; } + + public void updateStoreGroup(Long storeId, int storeGroupId) throws PDException { + byte[] groupKey = MetadataKeyHelper.getStoreGroupRelationKey(storeId); + put(groupKey, String.valueOf(storeGroupId).getBytes()); + } + + public Set getStoreIdsByGroup(int storeGroupId) throws PDException { + byte[] groupKey = MetadataKeyHelper.getStoreGroupRelationPrefix(); + return scanPrefix(groupKey).stream() + .filter(g -> Objects.equals(storeGroupId, Integer.parseInt(new String(g.getValue())))) + .map(g -> Long.parseLong(new String(g.getKey()).split("/")[1])) + .collect(Collectors.toSet()); + } + + public int getStoreGroupByStoreId(long storeId) throws PDException { + byte[] storeInfoKey = MetadataKeyHelper.getStoreGroupRelationKey(storeId); + byte[] value = getOne(storeInfoKey); + return value == null ? DEFAULT_STORE_GROUP_ID : Integer.parseInt(new String(value)); + } + + public boolean isStoreHasGroup(long storeId) throws PDException { + byte[] groupKey = MetadataKeyHelper.getStoreGroupRelationKey(storeId); + return getOne(groupKey) != null; + } } diff --git a/hugegraph-pd/hg-pd-core/src/main/java/org/apache/hugegraph/pd/meta/TaskInfoMeta.java b/hugegraph-pd/hg-pd-core/src/main/java/org/apache/hugegraph/pd/meta/TaskInfoMeta.java index 5dbda2b097..ef346c5a2e 100644 --- a/hugegraph-pd/hg-pd-core/src/main/java/org/apache/hugegraph/pd/meta/TaskInfoMeta.java +++ b/hugegraph-pd/hg-pd-core/src/main/java/org/apache/hugegraph/pd/meta/TaskInfoMeta.java @@ -1,3 +1,4 @@ +<<<<<<<< HEAD:hugegraph-pd/hg-pd-core/src/main/java/org/apache/hugegraph/pd/meta/TaskInfoMeta.java /* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with @@ -16,6 +17,16 @@ */ package org.apache.hugegraph.pd.meta; +======== +package org.apache.hugegraph.pd.meta; + +import org.apache.hugegraph.pd.common.PDException; +import org.apache.hugegraph.pd.config.PDConfig; +import org.apache.hugegraph.pd.grpc.MetaTask; +import org.apache.hugegraph.pd.grpc.Metapb; +import org.apache.hugegraph.pd.grpc.pulse.MovePartition; +import org.apache.hugegraph.pd.grpc.pulse.SplitPartition; +>>>>>>>> d7e3d51dd (3.6.5 -> 4.x diff):hg-pd-core/src/main/java/org/apache/hugegraph/pd/meta/TaskInfoMeta.java import java.util.List; @@ -74,7 +85,7 @@ public void removeSplitTaskPrefix(String graphName) throws PDException { public boolean hasSplitTaskDoing() throws PDException { byte[] key = MetadataKeyHelper.getAllSplitTaskPrefix(); - return scanPrefix(key).size() > 0; + return !scanPrefix(key).isEmpty(); } public void addMovePartitionTask(Metapb.Partition partition, MovePartition movePartition) @@ -114,6 +125,21 @@ public List scanMoveTask(String graphName) throws PDException { return scanPrefix(MetaTask.Task.parser(), prefix); } + public List scanUserTask(long taskId) throws PDException { + byte[] prefix = MetadataKeyHelper.getUserTaskPrefix(taskId); + return scanPrefix(MetaTask.Task.parser(), prefix); + } + +// public MetaTask.Task getBuildIndexTask(long taskId, int partitionId) throws PDException { +// byte[] key = MetadataKeyHelper.getUserTaskKey(taskId, partitionId); +// return getOne(MetaTask.Task.parser(), key); +// } + + public void updateUserTask(MetaTask.Task task) throws PDException { + byte[] key = MetadataKeyHelper.getUserTaskKey(task.getId(), task.getPartition().getId()); + put(key, task.toByteArray()); + } + /** * Delete the migration task by prefixing it and group them all at once * @@ -127,7 +153,7 @@ public void removeMoveTaskPrefix(String graphName) throws PDException { public boolean hasMoveTaskDoing() throws PDException { byte[] key = MetadataKeyHelper.getAllMoveTaskPrefix(); - return scanPrefix(key).size() > 0; + return !scanPrefix(key).isEmpty(); } } diff --git a/hugegraph-pd/hg-pd-core/src/main/java/org/apache/hugegraph/pd/raft/KVOperation.java b/hugegraph-pd/hg-pd-core/src/main/java/org/apache/hugegraph/pd/raft/KVOperation.java index 6afc6d6e94..ca752e7a65 100644 --- a/hugegraph-pd/hg-pd-core/src/main/java/org/apache/hugegraph/pd/raft/KVOperation.java +++ b/hugegraph-pd/hg-pd-core/src/main/java/org/apache/hugegraph/pd/raft/KVOperation.java @@ -1,3 +1,4 @@ +<<<<<<<< HEAD:hugegraph-pd/hg-pd-core/src/main/java/org/apache/hugegraph/pd/raft/KVOperation.java /* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with @@ -16,6 +17,15 @@ */ package org.apache.hugegraph.pd.raft; +======== +package org.apache.hugegraph.pd.raft; + +import com.alipay.sofa.jraft.util.BytesUtil; +import com.alipay.sofa.jraft.util.Requires; +import com.caucho.hessian.io.Hessian2Input; +import com.caucho.hessian.io.Hessian2Output; +import lombok.Data; +>>>>>>>> d7e3d51dd (3.6.5 -> 4.x diff):hg-pd-core/src/main/java/org/apache/hugegraph/pd/raft/KVOperation.java import java.io.ByteArrayInputStream; import java.io.ByteArrayOutputStream; diff --git a/hugegraph-pd/hg-pd-core/src/main/java/org/apache/hugegraph/pd/raft/RaftRpcClient.java b/hugegraph-pd/hg-pd-core/src/main/java/org/apache/hugegraph/pd/raft/RaftRpcClient.java index 8c7398a53a..e5ee055cde 100644 --- a/hugegraph-pd/hg-pd-core/src/main/java/org/apache/hugegraph/pd/raft/RaftRpcClient.java +++ b/hugegraph-pd/hg-pd-core/src/main/java/org/apache/hugegraph/pd/raft/RaftRpcClient.java @@ -1,3 +1,4 @@ +<<<<<<<< HEAD:hugegraph-pd/hg-pd-core/src/main/java/org/apache/hugegraph/pd/raft/RaftRpcClient.java /* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with @@ -18,6 +19,9 @@ package org.apache.hugegraph.pd.raft; import java.util.concurrent.CompletableFuture; +======== +package org.apache.hugegraph.pd.raft; +>>>>>>>> d7e3d51dd (3.6.5 -> 4.x diff):hg-pd-core/src/main/java/org/apache/hugegraph/pd/raft/RaftRpcClient.java import com.alipay.sofa.jraft.JRaftUtils; import com.alipay.sofa.jraft.Status; diff --git a/hugegraph-pd/hg-pd-core/src/main/java/org/apache/hugegraph/pd/raft/RaftRpcProcessor.java b/hugegraph-pd/hg-pd-core/src/main/java/org/apache/hugegraph/pd/raft/RaftRpcProcessor.java index ed950a4ee1..212dfefbc9 100644 --- a/hugegraph-pd/hg-pd-core/src/main/java/org/apache/hugegraph/pd/raft/RaftRpcProcessor.java +++ b/hugegraph-pd/hg-pd-core/src/main/java/org/apache/hugegraph/pd/raft/RaftRpcProcessor.java @@ -1,3 +1,4 @@ +<<<<<<<< HEAD:hugegraph-pd/hg-pd-core/src/main/java/org/apache/hugegraph/pd/raft/RaftRpcProcessor.java /* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with @@ -14,6 +15,9 @@ * See the License for the specific language governing permissions and * limitations under the License. */ +======== +package org.apache.hugegraph.pd.raft; +>>>>>>>> d7e3d51dd (3.6.5 -> 4.x diff):hg-pd-core/src/main/java/org/apache/hugegraph/pd/raft/RaftRpcProcessor.java package org.apache.hugegraph.pd.raft; diff --git a/hugegraph-pd/hg-pd-core/src/main/java/org/apache/hugegraph/pd/raft/RaftStateMachine.java b/hugegraph-pd/hg-pd-core/src/main/java/org/apache/hugegraph/pd/raft/RaftStateMachine.java index c7537d30a0..f7670f59c7 100644 --- a/hugegraph-pd/hg-pd-core/src/main/java/org/apache/hugegraph/pd/raft/RaftStateMachine.java +++ b/hugegraph-pd/hg-pd-core/src/main/java/org/apache/hugegraph/pd/raft/RaftStateMachine.java @@ -1,3 +1,4 @@ +<<<<<<<< HEAD:hugegraph-pd/hg-pd-core/src/main/java/org/apache/hugegraph/pd/raft/RaftStateMachine.java /* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with @@ -15,6 +16,8 @@ * limitations under the License. */ +======== +>>>>>>>> d7e3d51dd (3.6.5 -> 4.x diff):hg-pd-core/src/main/java/org/apache/hugegraph/pd/raft/RaftStateMachine.java package org.apache.hugegraph.pd.raft; import java.io.File; @@ -23,11 +26,20 @@ import java.util.concurrent.CopyOnWriteArrayList; import java.util.concurrent.CountDownLatch; import java.util.concurrent.atomic.AtomicLong; +<<<<<<<< HEAD:hugegraph-pd/hg-pd-core/src/main/java/org/apache/hugegraph/pd/raft/RaftStateMachine.java import java.util.zip.Checksum; import org.apache.commons.io.FileUtils; import org.apache.hugegraph.pd.common.PDException; import org.apache.hugegraph.pd.grpc.Pdpb; +======== +import java.util.concurrent.locks.ReentrantLock; +import java.util.zip.Checksum; + +import org.apache.hugegraph.pd.grpc.common.ErrorType; +import org.apache.hugegraph.pd.grpc.common.Errors; +import org.apache.commons.io.FileUtils; +>>>>>>>> d7e3d51dd (3.6.5 -> 4.x diff):hg-pd-core/src/main/java/org/apache/hugegraph/pd/raft/RaftStateMachine.java import org.springframework.util.CollectionUtils; import com.alipay.sofa.jraft.Closure; @@ -43,11 +55,23 @@ import com.alipay.sofa.jraft.storage.snapshot.SnapshotWriter; import com.alipay.sofa.jraft.util.CRC64; import com.alipay.sofa.jraft.util.Utils; +<<<<<<<< HEAD:hugegraph-pd/hg-pd-core/src/main/java/org/apache/hugegraph/pd/raft/RaftStateMachine.java +======== +import org.apache.hugegraph.pd.common.PDException; +import org.apache.hugegraph.pd.grpc.Pdpb; +import org.apache.hugegraph.pd.service.MetadataService; +>>>>>>>> d7e3d51dd (3.6.5 -> 4.x diff):hg-pd-core/src/main/java/org/apache/hugegraph/pd/raft/RaftStateMachine.java import lombok.extern.slf4j.Slf4j; @Slf4j public class RaftStateMachine extends StateMachineAdapter { +<<<<<<<< HEAD:hugegraph-pd/hg-pd-core/src/main/java/org/apache/hugegraph/pd/raft/RaftStateMachine.java +======== + private List taskHandlers; + private List stateListeners; + private ReentrantLock lock = new ReentrantLock(); +>>>>>>>> d7e3d51dd (3.6.5 -> 4.x diff):hg-pd-core/src/main/java/org/apache/hugegraph/pd/raft/RaftStateMachine.java private static final String SNAPSHOT_DIR_NAME = "snapshot"; private static final String SNAPSHOT_ARCHIVE_NAME = "snapshot.zip"; @@ -151,29 +175,20 @@ public void onConfigurationCommitted(final Configuration conf) { @Override public void onSnapshotSave(final SnapshotWriter writer, final Closure done) { - - String snapshotDir = writer.getPath() + File.separator + SNAPSHOT_DIR_NAME; - try { - FileUtils.deleteDirectory(new File(snapshotDir)); - FileUtils.forceMkdir(new File(snapshotDir)); - } catch (IOException e) { - log.error("Failed to create snapshot directory {}", snapshotDir); - done.run(new Status(RaftError.EIO, e.toString())); - return; - } - - CountDownLatch latch = new CountDownLatch(taskHandlers.size()); - for (RaftTaskHandler taskHandler : taskHandlers) { - Utils.runInThread(() -> { + MetadataService.getUninterruptibleJobs().submit(() -> { + lock.lock(); + try { + log.info("start snapshot save"); + String snapshotDir = writer.getPath() + File.separator + SNAPSHOT_DIR_NAME; try { - KVOperation op = KVOperation.createSaveSnapshot(snapshotDir); - taskHandler.invoke(op, null); - log.info("Raft onSnapshotSave success"); - latch.countDown(); - } catch (PDException e) { - log.error("Raft onSnapshotSave failed. {}", e.toString()); + FileUtils.deleteDirectory(new File(snapshotDir)); + FileUtils.forceMkdir(new File(snapshotDir)); + } catch (IOException e) { + log.error("Failed to create snapshot directory {}", snapshotDir); done.run(new Status(RaftError.EIO, e.toString())); + return; } +<<<<<<<< HEAD:hugegraph-pd/hg-pd-core/src/main/java/org/apache/hugegraph/pd/raft/RaftStateMachine.java }); } try { @@ -194,6 +209,36 @@ public void onSnapshotSave(final SnapshotWriter writer, final Closure done) { return; } done.run(Status.OK()); +======== + for (RaftTaskHandler taskHandler : taskHandlers) { + try { + KVOperation op = KVOperation.createSaveSnapshot(snapshotDir); + taskHandler.invoke(op, null); + log.info("Raft onSnapshotSave success"); + } catch (PDException e) { + log.error("Raft onSnapshotSave failed. {}", e.toString()); + done.run(new Status(RaftError.EIO, e.toString())); + } + } + // compress + try { + compressSnapshot(writer); + FileUtils.deleteDirectory(new File(snapshotDir)); + } catch (Exception e) { + log.error("Failed to delete snapshot directory {}, {}", snapshotDir, e.toString()); + done.run(new Status(RaftError.EIO, e.toString())); + return; + } + done.run(Status.OK()); + log.info("snapshot save done"); + } catch (Exception e) { + log.error("failed to save snapshot", e); + done.run(new Status(RaftError.EIO, e.toString())); + } finally { + lock.unlock(); + } + }); +>>>>>>>> d7e3d51dd (3.6.5 -> 4.x diff):hg-pd-core/src/main/java/org/apache/hugegraph/pd/raft/RaftStateMachine.java } @Override @@ -202,10 +247,9 @@ public boolean onSnapshotLoad(final SnapshotReader reader) { log.warn("Leader is not supposed to load snapshot"); return false; } - String snapshotDir = reader.getPath() + File.separator + SNAPSHOT_DIR_NAME; - String snapshotArchive = reader.getPath() + File.separator + SNAPSHOT_ARCHIVE_NAME; - // 2. decompress snapshot archive + lock.lock(); try { +<<<<<<<< HEAD:hugegraph-pd/hg-pd-core/src/main/java/org/apache/hugegraph/pd/raft/RaftStateMachine.java decompressSnapshot(reader); } catch (PDException e) { log.error("Failed to delete snapshot directory {}, {}", snapshotDir, e.toString()); @@ -214,23 +258,38 @@ public boolean onSnapshotLoad(final SnapshotReader reader) { CountDownLatch latch = new CountDownLatch(taskHandlers.size()); for (RaftTaskHandler taskHandler : taskHandlers) { +======== + String snapshotDir = reader.getPath() + File.separator + SNAPSHOT_DIR_NAME; + String snapshotArchive = reader.getPath() + File.separator + SNAPSHOT_ARCHIVE_NAME; + // 2. decompress snapshot archive +>>>>>>>> d7e3d51dd (3.6.5 -> 4.x diff):hg-pd-core/src/main/java/org/apache/hugegraph/pd/raft/RaftStateMachine.java try { - KVOperation op = KVOperation.createLoadSnapshot(snapshotDir); - taskHandler.invoke(op, null); - log.info("Raft onSnapshotLoad success"); - latch.countDown(); + decompressSnapshot(reader); } catch (PDException e) { - log.error("Raft onSnapshotLoad failed. {}", e.toString()); + log.error("Failed to delete snapshot directory {}, {}", snapshotDir, e.toString()); + return true; + } + + CountDownLatch latch = new CountDownLatch(taskHandlers.size()); + for (RaftTaskHandler taskHandler : taskHandlers) { + try { + KVOperation op = KVOperation.createLoadSnapshot(snapshotDir); + taskHandler.invoke(op, null); + log.info("Raft onSnapshotLoad success"); + latch.countDown(); + } catch (PDException e) { + log.error("Raft onSnapshotLoad failed. {}", e.toString()); + return false; + } + } + try { + latch.await(); + } catch (InterruptedException e) { + log.error("Raft onSnapshotSave failed. {}", e.toString()); return false; } - } - try { - latch.await(); - } catch (InterruptedException e) { - log.error("Raft onSnapshotSave failed. {}", e.toString()); - return false; - } +<<<<<<<< HEAD:hugegraph-pd/hg-pd-core/src/main/java/org/apache/hugegraph/pd/raft/RaftStateMachine.java try { // TODO: remove file from meta FileUtils.deleteDirectory(new File(snapshotDir)); @@ -241,10 +300,28 @@ public boolean onSnapshotLoad(final SnapshotReader reader) { } catch (IOException e) { log.error("Failed to delete snapshot directory {} and file {}", snapshotDir, snapshotArchive); +======== + + try { + // TODO: remove file from meta + // SnapshotReader 沒有提供刪除文件的接口 + FileUtils.deleteDirectory(new File(snapshotDir)); + // File file = new File(snapshotArchive); + // if (file.exists()) { + // FileUtils.forceDelete(file); + // } + } catch (IOException e) { + log.error("Failed to delete snapshot directory {} and file {}", snapshotDir, snapshotArchive); + return false; + } + return true; + } catch (Exception e) { + log.error("load snapshot with error:", e); +>>>>>>>> d7e3d51dd (3.6.5 -> 4.x diff):hg-pd-core/src/main/java/org/apache/hugegraph/pd/raft/RaftStateMachine.java return false; + } finally { + lock.unlock(); } - - return true; } private void compressSnapshot(final SnapshotWriter writer) throws PDException { @@ -256,11 +333,15 @@ private void compressSnapshot(final SnapshotWriter writer) throws PDException { LocalFileMetaOutter.LocalFileMeta.newBuilder(); metaBuild.setChecksum(Long.toHexString(checksum.getValue())); if (!writer.addFile(SNAPSHOT_ARCHIVE_NAME, metaBuild.build())) { +<<<<<<<< HEAD:hugegraph-pd/hg-pd-core/src/main/java/org/apache/hugegraph/pd/raft/RaftStateMachine.java throw new PDException(Pdpb.ErrorType.ROCKSDB_SAVE_SNAPSHOT_ERROR_VALUE, "failed to add file to LocalFileMeta"); +======== + throw new PDException(ErrorType.ROCKSDB_SAVE_SNAPSHOT_ERROR_VALUE, "failed to add file to LocalFileMeta"); +>>>>>>>> d7e3d51dd (3.6.5 -> 4.x diff):hg-pd-core/src/main/java/org/apache/hugegraph/pd/raft/RaftStateMachine.java } } catch (IOException e) { - throw new PDException(Pdpb.ErrorType.ROCKSDB_SAVE_SNAPSHOT_ERROR_VALUE, e); + throw new PDException(ErrorType.ROCKSDB_SAVE_SNAPSHOT_ERROR_VALUE, e); } } @@ -273,12 +354,16 @@ private void decompressSnapshot(final SnapshotReader reader) throws PDException ZipUtils.decompress(snapshotArchive, new File(reader.getPath()), checksum); if (meta.hasChecksum()) { if (!meta.getChecksum().equals(Long.toHexString(checksum.getValue()))) { +<<<<<<<< HEAD:hugegraph-pd/hg-pd-core/src/main/java/org/apache/hugegraph/pd/raft/RaftStateMachine.java throw new PDException(Pdpb.ErrorType.ROCKSDB_LOAD_SNAPSHOT_ERROR_VALUE, "Snapshot checksum failed"); +======== + throw new PDException(ErrorType.ROCKSDB_LOAD_SNAPSHOT_ERROR_VALUE, "Snapshot checksum failed"); +>>>>>>>> d7e3d51dd (3.6.5 -> 4.x diff):hg-pd-core/src/main/java/org/apache/hugegraph/pd/raft/RaftStateMachine.java } } } catch (IOException e) { - throw new PDException(Pdpb.ErrorType.ROCKSDB_LOAD_SNAPSHOT_ERROR_VALUE, e); + throw new PDException(ErrorType.ROCKSDB_LOAD_SNAPSHOT_ERROR_VALUE, e); } } @@ -302,12 +387,12 @@ public void run(Status status) { } @Override - public Pdpb.Error getError() { + public Errors getError() { return null; } @Override - public void setError(Pdpb.Error error) { + public void setError(Errors error) { } diff --git a/hugegraph-pd/hg-pd-core/src/main/java/org/apache/hugegraph/pd/store/HgKVStoreImpl.java b/hugegraph-pd/hg-pd-core/src/main/java/org/apache/hugegraph/pd/store/HgKVStoreImpl.java index bd2e7a9e22..8f6dab6466 100644 --- a/hugegraph-pd/hg-pd-core/src/main/java/org/apache/hugegraph/pd/store/HgKVStoreImpl.java +++ b/hugegraph-pd/hg-pd-core/src/main/java/org/apache/hugegraph/pd/store/HgKVStoreImpl.java @@ -1,3 +1,4 @@ +<<<<<<<< HEAD:hugegraph-pd/hg-pd-core/src/main/java/org/apache/hugegraph/pd/store/HgKVStoreImpl.java /* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with @@ -16,6 +17,22 @@ */ package org.apache.hugegraph.pd.store; +======== +package org.apache.hugegraph.pd.store; + +import com.alipay.sofa.jraft.util.Utils; +import org.apache.hugegraph.pd.common.PDException; +import org.apache.hugegraph.pd.config.PDConfig; +import org.apache.hugegraph.pd.grpc.Pdpb; +import org.apache.hugegraph.pd.grpc.common.ErrorType; +import org.apache.hugegraph.pd.grpc.discovery.RegisterInfo; +import com.google.common.cache.CacheBuilder; +import com.google.common.primitives.Bytes; + +import lombok.extern.slf4j.Slf4j; +import org.apache.commons.io.FileUtils; +import org.rocksdb.*; +>>>>>>>> d7e3d51dd (3.6.5 -> 4.x diff):hg-pd-core/src/main/java/org/apache/hugegraph/pd/store/HgKVStoreImpl.java import java.io.File; import java.io.IOException; @@ -91,7 +108,7 @@ public void put(byte[] key, byte[] value) throws PDException { try { db.put(key, value); } catch (RocksDBException e) { - throw new PDException(Pdpb.ErrorType.ROCKSDB_WRITE_ERROR_VALUE, e); + throw new PDException(ErrorType.ROCKSDB_WRITE_ERROR_VALUE, e); } finally { readLock.unlock(); } @@ -104,7 +121,7 @@ public byte[] get(byte[] key) throws PDException { try { return db.get(key); } catch (RocksDBException e) { - throw new PDException(Pdpb.ErrorType.ROCKSDB_READ_ERROR_VALUE, e); + throw new PDException(ErrorType.ROCKSDB_READ_ERROR_VALUE, e); } finally { readLock.unlock(); } @@ -136,7 +153,7 @@ public long remove(byte[] key) throws PDException { try { db.delete(key); } catch (RocksDBException e) { - throw new PDException(Pdpb.ErrorType.ROCKSDB_DEL_ERROR_VALUE, e); + throw new PDException(ErrorType.ROCKSDB_DEL_ERROR_VALUE, e); } finally { readLock.unlock(); } @@ -161,7 +178,7 @@ public long removeByPrefix(byte[] prefix) throws PDException { iterator.next(); } } catch (Exception e) { - throw new PDException(Pdpb.ErrorType.ROCKSDB_WRITE_ERROR_VALUE, e); + throw new PDException(ErrorType.ROCKSDB_WRITE_ERROR_VALUE, e); } finally { readLock.unlock(); } @@ -232,7 +249,7 @@ public void putWithTTL(byte[] key, byte[] value, long ttl, TimeUnit timeUnit) th } space.put(storeKey, value); } catch (Exception e) { - throw new PDException(Pdpb.ErrorType.ROCKSDB_WRITE_ERROR_VALUE, e); + throw new PDException(ErrorType.ROCKSDB_WRITE_ERROR_VALUE, e); } } @@ -250,15 +267,20 @@ public void saveSnapshot(String snapshotPath) throws PDException { FileUtils.deleteDirectory(snapshotFile); if (!Utils.atomicMoveFile(tempFile, snapshotFile, true)) { log.error("Fail to rename {} to {}", tempPath, snapshotPath); +<<<<<<<< HEAD:hugegraph-pd/hg-pd-core/src/main/java/org/apache/hugegraph/pd/store/HgKVStoreImpl.java throw new PDException(Pdpb.ErrorType.ROCKSDB_SAVE_SNAPSHOT_ERROR_VALUE, String.format("Fail to rename %s to %s", tempPath, snapshotPath)); +======== + throw new PDException(ErrorType.ROCKSDB_SAVE_SNAPSHOT_ERROR_VALUE, + String.format("Fail to rename %s to %s", tempPath, snapshotPath)); +>>>>>>>> d7e3d51dd (3.6.5 -> 4.x diff):hg-pd-core/src/main/java/org/apache/hugegraph/pd/store/HgKVStoreImpl.java } } catch (final PDException e) { throw e; } catch (final Exception e) { log.error("Fail to write snapshot at path: {}", snapshotPath, e); - throw new PDException(Pdpb.ErrorType.ROCKSDB_SAVE_SNAPSHOT_ERROR_VALUE, e); + throw new PDException(ErrorType.ROCKSDB_SAVE_SNAPSHOT_ERROR_VALUE, e); } finally { writeLock.unlock(); } @@ -283,9 +305,14 @@ public void loadSnapshot(String snapshotPath) throws PDException { FileUtils.deleteDirectory(dbFile); if (!Utils.atomicMoveFile(snapshotFile, dbFile, true)) { log.error("Fail to rename {} to {}", snapshotPath, this.dbPath); +<<<<<<<< HEAD:hugegraph-pd/hg-pd-core/src/main/java/org/apache/hugegraph/pd/store/HgKVStoreImpl.java throw new PDException(Pdpb.ErrorType.ROCKSDB_LOAD_SNAPSHOT_ERROR_VALUE, String.format("Fail to rename %s to %s", snapshotPath, this.dbPath)); +======== + throw new PDException(ErrorType.ROCKSDB_LOAD_SNAPSHOT_ERROR_VALUE, + String.format("Fail to rename %s to %s", snapshotPath, this.dbPath)); +>>>>>>>> d7e3d51dd (3.6.5 -> 4.x diff):hg-pd-core/src/main/java/org/apache/hugegraph/pd/store/HgKVStoreImpl.java } // reopen the db openRocksDB(this.dbPath); @@ -293,7 +320,7 @@ public void loadSnapshot(String snapshotPath) throws PDException { throw e; } catch (final Exception e) { log.error("failed to load snapshot from {}", snapshotPath); - throw new PDException(Pdpb.ErrorType.ROCKSDB_LOAD_SNAPSHOT_ERROR_VALUE, e); + throw new PDException(ErrorType.ROCKSDB_LOAD_SNAPSHOT_ERROR_VALUE, e); } finally { writeLock.unlock(); } @@ -327,6 +354,11 @@ public void close() { private void closeRocksDB() { if (this.db != null) { + try { + this.db.syncWal(); + } catch (RocksDBException e) { + log.warn("exception ", e); + } this.db.close(); this.db = null; } @@ -337,7 +369,7 @@ private void openRocksDB(String dbPath) throws PDException { this.db = RocksDB.open(dbOptions, dbPath); } catch (RocksDBException e) { log.error("Failed to open RocksDB from {}", dbPath, e); - throw new PDException(Pdpb.ErrorType.ROCKSDB_LOAD_SNAPSHOT_ERROR_VALUE, e); + throw new PDException(ErrorType.ROCKSDB_LOAD_SNAPSHOT_ERROR_VALUE, e); } } } diff --git a/hugegraph-pd/hg-pd-core/src/main/java/org/apache/hugegraph/pd/store/RaftKVStore.java b/hugegraph-pd/hg-pd-core/src/main/java/org/apache/hugegraph/pd/store/RaftKVStore.java index b61f07ac1d..5af8d6b226 100644 --- a/hugegraph-pd/hg-pd-core/src/main/java/org/apache/hugegraph/pd/store/RaftKVStore.java +++ b/hugegraph-pd/hg-pd-core/src/main/java/org/apache/hugegraph/pd/store/RaftKVStore.java @@ -1,3 +1,4 @@ +<<<<<<<< HEAD:hugegraph-pd/hg-pd-core/src/main/java/org/apache/hugegraph/pd/store/RaftKVStore.java /* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with @@ -16,6 +17,23 @@ */ package org.apache.hugegraph.pd.store; +======== +package org.apache.hugegraph.pd.store; + +import com.alipay.sofa.jraft.Status; +import com.alipay.sofa.jraft.entity.Task; +import com.alipay.sofa.jraft.error.RaftError; +import org.apache.hugegraph.pd.common.PDException; +import org.apache.hugegraph.pd.config.PDConfig; +import org.apache.hugegraph.pd.grpc.common.ErrorType; +import org.apache.hugegraph.pd.grpc.common.Errors; +import org.apache.hugegraph.pd.raft.KVOperation; +import org.apache.hugegraph.pd.raft.KVStoreClosure; +import org.apache.hugegraph.pd.raft.RaftEngine; +import org.apache.hugegraph.pd.raft.RaftTaskHandler; +import lombok.extern.slf4j.Slf4j; +import org.apache.hugegraph.pd.raft.RaftStateMachine.RaftClosureAdapter; +>>>>>>>> d7e3d51dd (3.6.5 -> 4.x diff):hg-pd-core/src/main/java/org/apache/hugegraph/pd/store/RaftKVStore.java import java.nio.ByteBuffer; import java.util.List; @@ -73,8 +91,13 @@ public void put(byte[] key, byte[] value) throws PDException { KVOperation operation = KVOperation.createPut(key, value); try { applyOperation(operation).get(); +<<<<<<<< HEAD:hugegraph-pd/hg-pd-core/src/main/java/org/apache/hugegraph/pd/store/RaftKVStore.java } catch (Exception e) { throw new PDException(Pdpb.ErrorType.UNKNOWN_VALUE, e.getMessage()); +======== + } catch (Exception e){ + throw new PDException(ErrorType.UNKNOWN_VALUE, e.getMessage()); +>>>>>>>> d7e3d51dd (3.6.5 -> 4.x diff):hg-pd-core/src/main/java/org/apache/hugegraph/pd/store/RaftKVStore.java } } @@ -96,8 +119,13 @@ public List scanPrefix(byte[] prefix) { public long remove(byte[] bytes) throws PDException { try { applyOperation(KVOperation.createRemove(bytes)).get(); +<<<<<<<< HEAD:hugegraph-pd/hg-pd-core/src/main/java/org/apache/hugegraph/pd/store/RaftKVStore.java } catch (Exception e) { throw new PDException(Pdpb.ErrorType.UNKNOWN_VALUE, e.getMessage()); +======== + } catch (Exception e){ + throw new PDException(ErrorType.UNKNOWN_VALUE, e.getMessage()); +>>>>>>>> d7e3d51dd (3.6.5 -> 4.x diff):hg-pd-core/src/main/java/org/apache/hugegraph/pd/store/RaftKVStore.java } return 0; } @@ -106,8 +134,13 @@ public long remove(byte[] bytes) throws PDException { public long removeByPrefix(byte[] bytes) throws PDException { try { applyOperation(KVOperation.createRemoveByPrefix(bytes)).get(); +<<<<<<<< HEAD:hugegraph-pd/hg-pd-core/src/main/java/org/apache/hugegraph/pd/store/RaftKVStore.java } catch (Exception e) { throw new PDException(Pdpb.ErrorType.UNKNOWN_VALUE, e.getMessage()); +======== + } catch (Exception e){ + throw new PDException(ErrorType.UNKNOWN_VALUE, e.getMessage()); +>>>>>>>> d7e3d51dd (3.6.5 -> 4.x diff):hg-pd-core/src/main/java/org/apache/hugegraph/pd/store/RaftKVStore.java } return 0; } @@ -116,8 +149,13 @@ public long removeByPrefix(byte[] bytes) throws PDException { public void clear() throws PDException { try { applyOperation(KVOperation.createClear()).get(); +<<<<<<<< HEAD:hugegraph-pd/hg-pd-core/src/main/java/org/apache/hugegraph/pd/store/RaftKVStore.java } catch (Exception e) { throw new PDException(Pdpb.ErrorType.UNKNOWN_VALUE, e.getMessage()); +======== + } catch (Exception e){ + throw new PDException(ErrorType.UNKNOWN_VALUE, e.getMessage()); +>>>>>>>> d7e3d51dd (3.6.5 -> 4.x diff):hg-pd-core/src/main/java/org/apache/hugegraph/pd/store/RaftKVStore.java } } @@ -125,8 +163,13 @@ public void clear() throws PDException { public void putWithTTL(byte[] key, byte[] value, long ttl) throws PDException { try { applyOperation(KVOperation.createPutWithTTL(key, value, ttl)).get(); +<<<<<<<< HEAD:hugegraph-pd/hg-pd-core/src/main/java/org/apache/hugegraph/pd/store/RaftKVStore.java } catch (Exception e) { throw new PDException(Pdpb.ErrorType.UNKNOWN_VALUE, e.getMessage()); +======== + } catch (Exception e){ + throw new PDException(ErrorType.UNKNOWN_VALUE, e.getMessage()); +>>>>>>>> d7e3d51dd (3.6.5 -> 4.x diff):hg-pd-core/src/main/java/org/apache/hugegraph/pd/store/RaftKVStore.java } } @@ -135,8 +178,13 @@ public void putWithTTL(byte[] key, byte[] value, long ttl, TimeUnit timeUnit) th PDException { try { applyOperation(KVOperation.createPutWithTTL(key, value, ttl, timeUnit)).get(); +<<<<<<<< HEAD:hugegraph-pd/hg-pd-core/src/main/java/org/apache/hugegraph/pd/store/RaftKVStore.java } catch (Exception e) { throw new PDException(Pdpb.ErrorType.UNKNOWN_VALUE, e.getMessage()); +======== + } catch (Exception e){ + throw new PDException(ErrorType.UNKNOWN_VALUE, e.getMessage()); +>>>>>>>> d7e3d51dd (3.6.5 -> 4.x diff):hg-pd-core/src/main/java/org/apache/hugegraph/pd/store/RaftKVStore.java } } @@ -154,8 +202,13 @@ public byte[] getWithTTL(byte[] key) throws PDException { public void removeWithTTL(byte[] key) throws PDException { try { applyOperation(KVOperation.createRemoveWithTTL(key)).get(); +<<<<<<<< HEAD:hugegraph-pd/hg-pd-core/src/main/java/org/apache/hugegraph/pd/store/RaftKVStore.java } catch (Exception e) { throw new PDException(Pdpb.ErrorType.UNKNOWN_VALUE, e.getMessage()); +======== + } catch (Exception e){ + throw new PDException(ErrorType.UNKNOWN_VALUE, e.getMessage()); +>>>>>>>> d7e3d51dd (3.6.5 -> 4.x diff):hg-pd-core/src/main/java/org/apache/hugegraph/pd/store/RaftKVStore.java } } @@ -225,8 +278,9 @@ private CompletableFuture applyOperation(final KVOperation op) throws PDE try { final Task task = new Task(); task.setData(ByteBuffer.wrap(op.toByteArray())); - task.setDone(new RaftStateMachine.RaftClosureAdapter(op, new KVStoreClosure() { + task.setDone(new RaftClosureAdapter(op, new KVStoreClosure() { Object data; +<<<<<<<< HEAD:hugegraph-pd/hg-pd-core/src/main/java/org/apache/hugegraph/pd/store/RaftKVStore.java Pdpb.Error error; @Override @@ -245,6 +299,25 @@ public Object getData() { } @Override +======== + Errors error; + @Override + public Errors getError() { + return error; + } + + @Override + public void setError(Errors error) { + this.error = error; + } + + @Override + public Object getData() { + return data; + } + + @Override +>>>>>>>> d7e3d51dd (3.6.5 -> 4.x diff):hg-pd-core/src/main/java/org/apache/hugegraph/pd/store/RaftKVStore.java public void setData(Object data) { this.data = data; } @@ -255,13 +328,13 @@ public void run(Status status) { future.complete((T) data); } else { RaftError raftError = status.getRaftError(); - Pdpb.ErrorType type; + ErrorType type; if (RaftError.EPERM.equals(raftError)) { - type = Pdpb.ErrorType.NOT_LEADER; + type = ErrorType.NOT_LEADER; } else { - type = Pdpb.ErrorType.UNKNOWN; + type = ErrorType.UNKNOWN; } - error = Pdpb.Error.newBuilder().setType(type) + error = Errors.newBuilder().setType(type) .setMessage(status.getErrorMsg()) .build(); future.completeExceptionally( diff --git a/hugegraph-pd/hg-pd-dist/pom.xml b/hugegraph-pd/hg-pd-dist/pom.xml index 251ec8bcb7..68986938c9 100644 --- a/hugegraph-pd/hg-pd-dist/pom.xml +++ b/hugegraph-pd/hg-pd-dist/pom.xml @@ -30,7 +30,9 @@ hg-pd-dist - ${project.parent.basedir} + + ${project.parent.basedir}/dist + bash ${project.basedir}/src/assembly ${assembly.dir}/descriptor ${assembly.dir}/static diff --git a/hugegraph-pd/hg-pd-dist/src/assembly/static/conf/log4j2.xml b/hugegraph-pd/hg-pd-dist/src/assembly/static/conf/log4j2.xml index a804948703..9f6c695028 100644 --- a/hugegraph-pd/hg-pd-dist/src/assembly/static/conf/log4j2.xml +++ b/hugegraph-pd/hg-pd-dist/src/assembly/static/conf/log4j2.xml @@ -26,28 +26,28 @@ - - + + - - + + - + - + @@ -61,20 +61,20 @@ - - + + - + - + @@ -88,22 +88,22 @@ - - + + - + - + - - + + @@ -115,16 +115,20 @@ - + + - + + - + + - + + diff --git a/hugegraph-pd/hg-pd-grpc/pom.xml b/hugegraph-pd/hg-pd-grpc/pom.xml index 7df8622e19..bb7b9966d3 100644 --- a/hugegraph-pd/hg-pd-grpc/pom.xml +++ b/hugegraph-pd/hg-pd-grpc/pom.xml @@ -43,16 +43,49 @@ io.grpc grpc-netty-shaded ${grpc.version} + + + com.google.guava + guava + + io.grpc grpc-protobuf ${grpc.version} + + + com.google.api.grpc + proto-google-common-protos + + + com.google.guava + guava + + io.grpc grpc-stub ${grpc.version} + + + com.google.guava + guava + + + + + com.google.guava + guava + 30.1-android + + + com.google.errorprone + error_prone_annotations + + javax.annotation diff --git a/hugegraph-pd/hg-pd-grpc/src/main/proto/cluster_op.proto b/hugegraph-pd/hg-pd-grpc/src/main/proto/cluster_op.proto new file mode 100644 index 0000000000..2622e7fb0a --- /dev/null +++ b/hugegraph-pd/hg-pd-grpc/src/main/proto/cluster_op.proto @@ -0,0 +1,134 @@ +syntax = "proto3"; + +import "common.proto"; +import "metapb.proto"; +import "metaTask.proto"; + +option java_package = "org.apache.hugegraph.pd.grpc"; + +message ChangePeerListRequest{ + RequestHeader header = 1; + string peer_List = 2; +} +message ChangePeerListResponse{ + ResponseHeader header = 1; +} + +message SplitDataParam{ + // 被分裂的源分区ID + uint32 partition_id = 1; + //目标分区数量 + uint32 count = 2; +} + +enum OperationMode { + Auto = 0; + Expert = 1; +} + +message SplitDataRequest{ + RequestHeader header = 1; + //工作模式 + // Auto:自动分裂,每个Store上分区数达到最大值 + // Expert:专家模式,需要指定splitParams + OperationMode mode = 2; + repeated SplitDataParam param = 3; + uint32 store_group_id = 4; +} + +message SplitGraphDataRequest{ + RequestHeader header = 1; + //工作模式 + string graph_name = 2; + uint32 to_count = 3; +} + +message SplitDataResponse{ + ResponseHeader header = 1; +} + +message MovePartitionParam{ + uint32 partition_id = 1; + uint64 src_store_id = 2; + uint64 dst_store_id = 3; +} + +message MovePartitionRequest{ + RequestHeader header = 1; + //工作模式 + // Auto:自动转移,达到每个Store上分区数量相同 + // Expert:专家模式,需要指定transferParams + OperationMode mode = 2; + repeated MovePartitionParam param = 3; + uint32 store_group_id = 4; +} + +message MovePartitionResponse{ + ResponseHeader header = 1; +} + +message BalanceLeadersRequest{ + RequestHeader header = 1; +} + +message BalanceLeadersResponse{ + ResponseHeader header = 1; +} + +message DbCompactionRequest{ + RequestHeader header = 1; + string tableName = 2; +} + +message DbCompactionResponse{ + ResponseHeader header = 1; +} + +message CombineClusterRequest { + RequestHeader header = 1; + uint32 toCount = 2; + uint32 storeGroupId = 3; +} + +message CombineClusterResponse { + ResponseHeader header = 1; +} + +message CombineGraphRequest { + RequestHeader header = 1; + string graphName = 2; + uint32 toCount = 3; +} + +message CombineGraphResponse { + ResponseHeader header = 1; +} + +message ChangeShardRequest{ + RequestHeader header = 1; + uint32 groupId = 2; + repeated metapb.Shard shards = 3; +} + +message ChangeShardResponse { + ResponseHeader header = 1; +} + +message UpdatePdRaftRequest{ + RequestHeader header = 1; + string config = 3; +} + +message UpdatePdRaftResponse{ + ResponseHeader header = 1; + string message = 2; +} + +message ReportTaskRequest{ + RequestHeader header = 1; + metaTask.Task task = 2; +} + +message ReportTaskResponse{ + ResponseHeader header = 1; +} diff --git a/hugegraph-pd/hg-pd-grpc/src/main/proto/common.proto b/hugegraph-pd/hg-pd-grpc/src/main/proto/common.proto new file mode 100644 index 0000000000..e4cc4b7e93 --- /dev/null +++ b/hugegraph-pd/hg-pd-grpc/src/main/proto/common.proto @@ -0,0 +1,104 @@ +syntax = "proto3"; + +import "google/protobuf/any.proto"; +option java_multiple_files = true; +option java_package = "org.apache.hugegraph.pd.grpc.common"; +option java_outer_classname = "HgPdCommonProto"; + +message RequestHeader { + // 集群 ID. + uint64 cluster_id = 1; + // 发送者 ID. + uint64 sender_id = 2; +} + +message ResponseHeader { + // cluster_id is the ID of the cluster which sent the response. + uint64 cluster_id = 1; + Errors error = 2; +} + +enum ErrorType { + OK = 0; + WARNING = 1; + UNKNOWN = 2; + ALREADY_BOOTSTRAPPED = 4; + INCOMPATIBLE_VERSION = 5; + + // meta not exists + SHARD_GROUPS_NOT_EXISTS = 10; + GRAPH_NOT_EXISTS = 11; + PARTITION_NOT_EXISTS = 12; + STORE_GROUP_NOT_EXISTS = 13; + STORE_NOT_EXISTS = 14; + TASK_NOT_EXISTS = 15; + + GRAPH_ALREADY_EXISTS = 20; + + NOT_LEADER = 100; + STORE_ID_NOT_EXIST = 101; + NO_ACTIVE_STORE = 102; + NOT_FOUND = 103; + PD_UNREACHABLE = 104; + LESS_ACTIVE_STORE = 105; + STORE_HAS_BEEN_REMOVED = 106; + + // license 错误 + LICENSE_ERROR= 107; + // license 认证错误 + LICENSE_VERIFY_ERROR= 108; + + STORE_PROHIBIT_DELETION = 111; + SET_CONFIG_SHARD_COUNT_ERROR = 112; + UPDATE_STORE_STATE_ERROR = 113; + STORE_PROHIBIT_DUPLICATE = 114; + STORE_TOMBSTONE = 203; + + ETCD_READ_ERROR = 1000; + ETCD_WRITE_ERROR = 1001; + + ROCKSDB_READ_ERROR = 1002; + ROCKSDB_WRITE_ERROR = 1003; + ROCKSDB_DEL_ERROR = 1004; + ROCKSDB_SAVE_SNAPSHOT_ERROR = 1005; + ROCKSDB_LOAD_SNAPSHOT_ERROR = 1006; + + // 当前集群状态禁止分裂 + Cluster_State_Forbid_Splitting = 1007; + // 正在分裂中 + Split_Partition_Doing = 1008; + // store上分区数量超过上限 + Too_Many_Partitions_Per_Store = 1009; + //分区下线正在进行 + Store_Tombstone_Doing = 1010; + // 不合法的分裂个数 + Invalid_Split_Partition_Count = 1011; + Invalid_Combine_Partition_Count = 1012; + Combine_Partition_Doing = 1013; + Invalid_Partition_count = 1014; + ERROR = 9999; + // PD 状态 + PD_UNAVAILABLE = 10000; + PD_NOT_LEADER = 10001; + PD_RAFT_NOT_READY = 10002; + PD_UNAUTHENTICATED = 10003; + // Store 状态 + STORE_UNAVAILABLE = 20000; + STORE_NOT_LEADER = 20001; + // Client 状态 + CLIENT_INVALID_PARAMETER = 30000; +} + +message Errors { + ErrorType type = 1; + string message = 2; + google.protobuf.Any data = 3; +} + +message NoArg{ + RequestHeader header = 1; +} + +message VoidResponse{ + ResponseHeader header = 1; +} \ No newline at end of file diff --git a/hugegraph-pd/hg-pd-grpc/src/main/proto/kv.proto b/hugegraph-pd/hg-pd-grpc/src/main/proto/kv.proto index 22007cda31..23f73030dd 100644 --- a/hugegraph-pd/hg-pd-grpc/src/main/proto/kv.proto +++ b/hugegraph-pd/hg-pd-grpc/src/main/proto/kv.proto @@ -17,8 +17,7 @@ syntax = "proto3"; package kv; -import "pdpb.proto"; -import "metapb.proto"; +import "common.proto"; option java_package = "org.apache.hugegraph.pd.grpc.kv"; option java_multiple_files = true; @@ -43,37 +42,37 @@ service KvService { /* requests */ message Kv { - pdpb.RequestHeader header = 1; + RequestHeader header = 1; string key = 2; string value = 3; } message KvResponse { - pdpb.ResponseHeader header = 1; + ResponseHeader header = 1; } message K{ - pdpb.RequestHeader header = 1; + RequestHeader header = 1; string key = 2; } message KResponse{ - pdpb.ResponseHeader header = 1; + ResponseHeader header = 1; string value = 2; } message ScanPrefixResponse { - pdpb.ResponseHeader header = 1; - map kvs = 2; + ResponseHeader header = 1; + map kvs = 2; } message LockRequest{ - pdpb.RequestHeader header = 1; + RequestHeader header = 1; string key = 2; int64 ttl = 3; int64 clientId = 4; } message LockResponse{ - pdpb.ResponseHeader header = 1; + ResponseHeader header = 1; string key = 2; int64 ttl = 3; int64 clientId = 4; @@ -81,7 +80,7 @@ message LockResponse{ } message LockAliveResponse{ - pdpb.ResponseHeader header = 1; + ResponseHeader header = 1; int64 clientId = 2; } @@ -104,8 +103,8 @@ message WatchEvent { } message WatchResponse { - pdpb.ResponseHeader header = 1; - repeated WatchEvent events = 2; + ResponseHeader header = 1; + repeated WatchEvent events= 2; int64 clientId = 3; WatchState state = 4; } @@ -118,8 +117,8 @@ enum WatchState { } message WatchRequest { - pdpb.RequestHeader header = 1; - WatchState state = 2; + RequestHeader header = 1; + WatchState state= 2; string key = 3; int64 clientId = 4; } @@ -131,13 +130,13 @@ message V{ } message TTLRequest{ - pdpb.RequestHeader header = 1; + RequestHeader header = 1; string key = 2; string value = 3; int64 ttl = 4; } message TTLResponse{ - pdpb.ResponseHeader header = 1; + ResponseHeader header = 1; bool succeed = 2; } diff --git a/hugegraph-pd/hg-pd-grpc/src/main/proto/meta.proto b/hugegraph-pd/hg-pd-grpc/src/main/proto/meta.proto new file mode 100644 index 0000000000..72e4683728 --- /dev/null +++ b/hugegraph-pd/hg-pd-grpc/src/main/proto/meta.proto @@ -0,0 +1,50 @@ +syntax = "proto3"; +package meta; +option java_package = "org.apache.hugegraph.pd.grpc"; +import "google/protobuf/any.proto"; +import "metapb.proto"; +import "common.proto"; +option java_multiple_files = true; + +service MetaService{ + rpc getStores(NoArg) returns(Stores); + rpc getPartitions(NoArg) returns(Partitions); + rpc getShardGroups(NoArg) returns(ShardGroups); + rpc getGraphSpaces(NoArg) returns(GraphSpaces); + rpc getGraphs(NoArg) returns(Graphs); + rpc updateStore(metapb.Store) returns(VoidResponse); + rpc updatePartition(metapb.Partition) returns(VoidResponse); + rpc updateShardGroup(metapb.ShardGroup) returns(VoidResponse); + rpc updateGraphSpace(metapb.GraphSpace) returns(VoidResponse); + rpc updateGraph(metapb.Graph) returns(VoidResponse); + rpc updatePeers(NoArg) returns(VoidResponse); +} +message Stores{ + ResponseHeader header = 1; + repeated metapb.Store data = 2; +} +message Partitions{ + ResponseHeader header = 1; + repeated metapb.Partition data = 2; +} +message ShardGroups{ + ResponseHeader header = 1; + repeated metapb.ShardGroup data = 2; +} +message Shards{ + ResponseHeader header = 1; + repeated metapb.Shard data = 2; +} +message GraphSpaces{ + ResponseHeader header = 1; + repeated metapb.GraphSpace data = 2; +} +message Graphs{ + ResponseHeader header = 1; + repeated metapb.Graph data = 2; +} + +message DefaultResponse{ + ResponseHeader header = 1; + repeated google.protobuf.Any data = 2; +} \ No newline at end of file diff --git a/hugegraph-pd/hg-pd-grpc/src/main/proto/pdpb.proto b/hugegraph-pd/hg-pd-grpc/src/main/proto/pdpb.proto index f7754824ec..8ea09fad59 100644 --- a/hugegraph-pd/hg-pd-grpc/src/main/proto/pdpb.proto +++ b/hugegraph-pd/hg-pd-grpc/src/main/proto/pdpb.proto @@ -18,8 +18,11 @@ syntax = "proto3"; package pdpb; +import "common.proto"; import "metapb.proto"; import "metaTask.proto"; +import "store_group.proto"; +import "cluster_op.proto"; option java_package = "org.apache.hugegraph.pd.grpc"; @@ -48,10 +51,12 @@ service PD { rpc DelPartition(DelPartitionRequest) returns (DelPartitionResponse) {} // Query partition information based on conditions, including Store, Graph and other conditions rpc QueryPartitions(QueryPartitionsRequest) returns (QueryPartitionsResponse){} + + rpc CreateGraph(CreateGraphRequest) returns (CreateGraphResponse){} // Read graph information rpc GetGraph(GetGraphRequest) returns (GetGraphResponse){} // Modify graph information - rpc SetGraph(SetGraphRequest) returns (SetGraphResponse){} + rpc SetGraph(CreateGraphRequest) returns (CreateGraphResponse){} rpc DelGraph(DelGraphRequest) returns (DelGraphResponse){} // Global unique incremental ID rpc GetId(GetIdRequest) returns (GetIdResponse){} @@ -65,17 +70,30 @@ service PD { rpc SetGraphSpace(SetGraphSpaceRequest) returns (SetGraphSpaceResponse){} // Get cluster health status rpc GetClusterStats(GetClusterStatsRequest) returns (GetClusterStatsResponse){} + rpc GetMembersAndClusterState(GetMembersRequest) returns (MembersAndClusterState) {} // Replace PD cluster nodes rpc ChangePeerList(ChangePeerListRequest) returns (getChangePeerListResponse) {} // Data splitting rpc SplitData(SplitDataRequest) returns (SplitDataResponse){} - rpc SplitGraphData(SplitGraphDataRequest) returns (SplitDataResponse) {} + // change shard + rpc ChangeShard(ChangeShardRequest) returns (ChangeShardResponse) {} // Data migration rpc MovePartition(MovePartitionRequest) returns (MovePartitionResponse){} + // 合并分区 + rpc CombineCluster(CombineClusterRequest) returns (CombineClusterResponse){} + rpc CombineGraph(CombineGraphRequest) returns (CombineGraphResponse) {} + //平衡store中分区leader的数量 + rpc BalanceLeaders(BalanceLeadersRequest) returns (BalanceLeadersResponse){} + // 通知rocksdb进行compaction + rpc DbCompaction(DbCompactionRequest) returns (DbCompactionResponse){} // Report partition splitting and other task execution results rpc ReportTask(ReportTaskRequest) returns (ReportTaskResponse){} + // 更新pd raft + rpc updatePdRaft(UpdatePdRaftRequest) returns (UpdatePdRaftResponse) {} + // shard group 运维相关的处理 + rpc UpdateShardGroupOp(ChangeShardRequest) returns (ChangeShardResponse){} rpc GetPartitionStats(GetPartitionStatsRequest) returns (GetPartitionStatsResponse){} // Balance the number of partition leaders in the store rpc BalanceLeaders(BalanceLeadersRequest) returns (BalanceLeadersResponse){} @@ -103,8 +121,29 @@ service PD { // Update pd raft rpc updatePdRaft(UpdatePdRaftRequest) returns (UpdatePdRaftResponse) {} + // cache rpc getCache(GetGraphRequest) returns (CacheResponse) {} rpc getPartitions(GetGraphRequest) returns (CachePartitionResponse) {} + rpc getGraphStats(GetGraphRequest) returns (GraphStatsResponse) {} + + // 索引创建下沉, 提交重建索引任务, 查询, 重试 + rpc submitIndexTask(IndexTaskCreateRequest) returns (TaskQueryResponse) {} + rpc submitBackupGraphTask(BackupGraphRequest) returns (TaskQueryResponse) {} + + // task 的通用接口,包含索引创建下沉,图备份等 + rpc queryTaskState(TaskQueryRequest) returns (TaskQueryResponse) {} + rpc retryTask(TaskQueryRequest) returns (TaskQueryResponse){} + + // store group + rpc createStoreGroup(storeGroup.CreateStoreGroupRequest) returns (storeGroup.CreateStoreGroupResponse){} + rpc getStoreGroup(storeGroup.GetStoreGroupRequest) returns (storeGroup.GetStoreGroupResponse){} + rpc getAllStoreGroup(storeGroup.GetAllStoreGroupRequest) returns (storeGroup.GetAllStoreGroupResponse){} + rpc updateStoreGroup(storeGroup.UpdateStoreGroupRequest) returns (storeGroup.UpdateStoreGroupResponse){} + rpc getStoresByStoreGroup(storeGroup.GetGroupStoresRequest) returns (storeGroup.GetGroupStoresResponse){} + rpc updateStoreGroupRelation(storeGroup.UpdateStoreGroupRelationRequest) returns (storeGroup.UpdateStoreGroupRelationResponse){} + rpc getLeaderGrpcAddress(NoArg) returns (GetLeaderGrpcAddressResponse){} + rpc clearGrpcAddressCache(NoArg) returns (VoidResponse){} + rpc getAllGrpcAddresses(NoArg) returns (GetAllGrpcAddressesResponse){} } message RequestHeader { @@ -161,6 +200,7 @@ message Error { ErrorType type = 1; string message = 2; } + message GetStoreRequest { RequestHeader header = 1; uint64 store_id = 2; @@ -188,7 +228,6 @@ message RegisterStoreRequest { metapb.Store store = 2; } - message RegisterStoreResponse { ResponseHeader header = 1; // Upon initial registration, returns the new store_id @@ -220,10 +259,8 @@ message GetAllStoresResponse { repeated metapb.Store stores = 2; } - message StoreHeartbeatRequest { RequestHeader header = 1; - metapb.StoreStats stats = 2; } @@ -239,14 +276,12 @@ message GetPartitionRequest { bytes key = 3; } - message GetPartitionByCodeRequest { RequestHeader header = 1; string graph_name = 2; uint64 code = 3; } - message GetPartitionResponse { ResponseHeader header = 1; metapb.Partition partition = 2; @@ -289,15 +324,11 @@ message ScanPartitionsRequest { bytes end_key = 4; // end_key is +inf when it is empty. } - - message ScanPartitionsResponse { ResponseHeader header = 1; repeated metapb.PartitionShard partitions = 4; } - - message QueryPartitionsRequest{ RequestHeader header = 1; metapb.PartitionQuery query = 2; @@ -308,8 +339,6 @@ message QueryPartitionsResponse { repeated metapb.Partition partitions = 4; } - - message GetGraphRequest{ RequestHeader header = 1; string graph_name = 2; @@ -320,12 +349,12 @@ message GetGraphResponse{ metapb.Graph graph = 2; } -message SetGraphRequest{ +message CreateGraphRequest{ RequestHeader header = 1; metapb.Graph graph = 2; } -message SetGraphResponse{ +message CreateGraphResponse{ ResponseHeader header = 1; metapb.Graph graph = 2; } @@ -372,6 +401,13 @@ message GetMembersResponse{ metapb.Member leader = 3; } +message MembersAndClusterState{ + ResponseHeader header = 1; + repeated metapb.Member members = 2; + metapb.Member leader = 3; + repeated metapb.GroupClusterState state = 4; +} + message GetPDConfigRequest{ RequestHeader header = 1; uint64 version = 2 ; @@ -391,7 +427,6 @@ message SetPDConfigResponse{ ResponseHeader header = 1; } - message GetGraphSpaceRequest{ RequestHeader header = 1; string graph_Space_Name = 2; @@ -413,6 +448,8 @@ message SetGraphSpaceResponse{ message GetClusterStatsRequest{ RequestHeader header = 1; + int64 storeId = 2; + int32 storeGroup = 3; } message GetClusterStatsResponse{ @@ -516,70 +553,77 @@ message PutLicenseResponse{ ResponseHeader header = 1; } -message DbCompactionRequest{ +message DeleteShardGroupRequest { RequestHeader header = 1; - string tableName = 2; + uint32 groupId = 2; } -message DbCompactionResponse{ +message DeleteShardGroupResponse { ResponseHeader header = 1; } -message CombineClusterRequest { +message GetShardGroupRequest{ RequestHeader header = 1; - uint32 toCount = 2; + uint32 group_id = 2 ; } -message CombineClusterResponse { +message GetShardGroupResponse{ ResponseHeader header = 1; + metapb.ShardGroup shardGroup = 2; } -message CombineGraphRequest { +message UpdateShardGroupRequest{ RequestHeader header = 1; - string graphName = 2; - uint32 toCount = 3; + metapb.ShardGroup shardGroup = 2; } -message CombineGraphResponse { +message UpdateShardGroupResponse{ ResponseHeader header = 1; } -message DeleteShardGroupRequest { - RequestHeader header = 1; - uint32 groupId = 2; +message CacheResponse { + ResponseHeader header = 1; + // 返回修改后的Store + repeated metapb.Store stores = 2; + repeated metapb.ShardGroup shards = 3; + repeated metapb.Graph graphs = 4; } - -message DeleteShardGroupResponse { +message CachePartitionResponse { ResponseHeader header = 1; + repeated metapb.Partition partitions = 2; } -message GetShardGroupRequest{ +message IndexTaskCreateRequest { RequestHeader header = 1; - uint32 group_id = 2 ; + metapb.BuildIndexParam param = 2; } -message GetShardGroupResponse{ - ResponseHeader header = 1; - metapb.ShardGroup shardGroup = 2; +message BackupGraphRequest { + RequestHeader header = 1; + string graph_name = 2; + string target_graph_name = 3; } -message UpdateShardGroupRequest{ +message TaskQueryRequest { RequestHeader header = 1; - metapb.ShardGroup shardGroup = 2; + uint64 task_id = 2; } -message UpdateShardGroupResponse{ +message TaskQueryResponse{ ResponseHeader header = 1; + metaTask.TaskState state = 2; + string message = 3; + uint64 task_id = 4; } -message ChangeShardRequest{ - RequestHeader header = 1; - uint32 groupId = 2; - repeated metapb.Shard shards = 3; +message GraphStatsResponse { + ResponseHeader header = 1; + metapb.GraphStats stats = 2; } -message ChangeShardResponse { +message GetLeaderGrpcAddressResponse { ResponseHeader header = 1; + string address = 2; } message UpdatePdRaftRequest{ @@ -602,3 +646,9 @@ message CachePartitionResponse { ResponseHeader header = 1; repeated metapb.Partition partitions = 2; } + +message GetAllGrpcAddressesResponse { + ResponseHeader header = 1; + bool allowed = 2; + repeated string addresses = 3; +} diff --git a/hugegraph-pd/hg-pd-grpc/src/main/proto/pulse.proto b/hugegraph-pd/hg-pd-grpc/src/main/proto/pulse.proto new file mode 100644 index 0000000000..e82f090a0d --- /dev/null +++ b/hugegraph-pd/hg-pd-grpc/src/main/proto/pulse.proto @@ -0,0 +1,226 @@ +syntax = "proto3"; + +import "metapb.proto"; +import "common.proto"; + +option java_multiple_files = true; +option java_package = "org.apache.hugegraph.pd.grpc.pulse"; +option java_outer_classname = "HgPdPulseProto"; + +service HgPdPulse { + rpc Pulse (stream PulseRequest) returns (stream PulseResponse); +} + +/* requests */ +message PulseRequest { + PulseCreateRequest create_request = 1; + PulseCancelRequest cancel_request = 2; + PulseNoticeRequest notice_request = 3; + PulseAckRequest ack_request = 4; +} + +message PulseCreateRequest { + PulseType pulse_type = 1; + int64 observer_id = 2; +} + +message PulseCancelRequest { + int64 observer_id = 1; +} + +message PulseNoticeRequest { + int64 observer_id = 1; + oneof request_union { + PartitionHeartbeatRequest partition_heartbeat_request = 10; + PdInstructionRequest pd_instruction_request = 11; + PulsePartitionRequest partition_request = 12; + PulseNodeRequest node_request = 13; + PulseGraphRequest graph_request = 14; + PulseShardGroupRequest shard_group_request = 15; + } +} + +message PulseAckRequest { + int64 observer_id = 1; + int64 notice_id = 2; +} + +// 分区心跳,分区的peer增减、leader改变等事件发生时,由leader发送心跳。 +// 同时pd对分区进行shard增减通过Response发送给leader +message PartitionHeartbeatRequest { + RequestHeader header = 1; + // Leader Peer sending the heartbeat + metapb.PartitionStats states = 4; +} + +// placeholder +message PdInstructionRequest { + RequestHeader header = 1; +} +// placeholder +message PulsePartitionRequest { + RequestHeader header = 1; +} +// placeholder +message PulseNodeRequest { + RequestHeader header = 1; +} +// placeholder +message PulseGraphRequest { + RequestHeader header = 1; +} +// placeholder +message PulseShardGroupRequest { + RequestHeader header = 1; +} + +/* responses */ +message PulseResponse { + PulseType pulse_type = 1; + int64 observer_id = 2; + int32 status = 3; //0=ok,1=fail + int64 notice_id = 4; + string origin_id = 5; + oneof response_union { + PartitionHeartbeatResponse partition_heartbeat_response = 10; + PdInstructionResponse instruction_response = 11; + PulsePartitionResponse partition_response = 12; + PulseNodeResponse node_response = 13; + PulseGraphResponse graph_response = 14; + PulseShardGroupResponse shard_group_response = 15; + } +} + +message PartitionHeartbeatResponse { + ResponseHeader header = 1; + uint64 id = 3; + metapb.Partition partition = 2; + ChangeShard change_shard = 4; + + TransferLeader transfer_leader = 5; + // 拆分成多个分区,第一个SplitPartition是原分区,从第二开始是新分区 + SplitPartition split_partition = 6; + // rocksdb compaction 指定的表,null是针对所有 + DbCompaction db_compaction = 7; + // 将partition的数据,迁移到 target + MovePartition move_partition = 8; + // 清理partition的graph的数据 + CleanPartition clean_partition = 9; + // partition key range 变化 + PartitionKeyRange key_range = 10; + // 创建索引的任务 + metapb.BuildIndex build_index = 11; +} +message PulsePartitionResponse { + string graph = 1; + int32 partition_id = 2; + PulseChangeType change_type = 3; +} + +message PulseNodeResponse { + string graph = 1; + uint64 node_id = 2; + StoreNodeEventType node_event_type = 3; +} + +message PulseGraphResponse { + metapb.Graph graph = 1; + PulseType type = 2;//? for what? +} + +message PulseShardGroupResponse { + metapb.ShardGroup shard_group = 1; + PulseChangeType type = 2; + int32 shard_group_id = 3; +} + +/* Date model */ +message ChangeShard { + repeated metapb.Shard shard = 1; + ConfChangeType change_type = 2; +} + +message TransferLeader { + metapb.Shard shard = 1; +} + +message SplitPartition { + repeated metapb.Partition new_partition = 1; +} + +message DbCompaction { + string table_name = 3; +} + +message MovePartition { + // target partition的key range为,迁移后的新range + metapb.Partition target_partition = 1; + // partition 的 key start 和 key end的所有数据, + // 会迁移到 target partition 上 + uint64 key_start = 2; + uint64 key_end = 3; +} + +message CleanPartition { + uint64 key_start = 1; + uint64 key_end = 2; + CleanType clean_type = 3; + bool delete_partition = 4; //是否删除分区 +} + +message PartitionKeyRange { + uint32 partition_id = 1; + uint64 key_start = 2; + uint64 key_end = 3; +} + +message PdInstructionResponse { + PdInstructionType instruction_type = 1; + string leader_ip = 2; + repeated string peers = 3; +} + +/* enums */ +enum PulseType { + PULSE_TYPE_UNKNOWN = 0; + PULSE_TYPE_PARTITION_HEARTBEAT = 1; + PULSE_TYPE_PD_INSTRUCTION = 2; + PULSE_TYPE_PARTITION_CHANGE = 3; + PULSE_TYPE_STORE_NODE_CHANGE = 4; + PULSE_TYPE_GRAPH_CHANGE = 5; + PULSE_TYPE_SHARD_GROUP_CHANGE = 6; +} + +enum PulseChangeType { + PULSE_CHANGE_TYPE_UNKNOWN = 0; + PULSE_CHANGE_TYPE_ADD = 1; + PULSE_CHANGE_TYPE_ALTER = 2; + PULSE_CHANGE_TYPE_DEL = 3; + PULSE_CHANGE_TYPE_SPECIAL1 = 4; +} + +enum ConfChangeType { + CONF_CHANGE_TYPE_UNKNOWN = 0; + CONF_CHANGE_TYPE_ADD_NODE = 1; + CONF_CHANGE_TYPE_REMOVE_NODE = 2; + CONF_CHANGE_TYPE_ADD_LEARNER_NODE = 3; + CONF_CHANGE_TYPE_ADJUST = 4; // 调整shard,leader根据新的配置动态增减。 +} + +enum CleanType { + CLEAN_TYPE_KEEP_RANGE = 0; // 仅保留这个range + CLEAN_TYPE_EXCLUDE_RANGE = 1; // 删除这个range +} + +enum PdInstructionType { + CHANGE_TO_FOLLOWER = 0; + CHANGE_PEERS = 1; +} +enum StoreNodeEventType { + STORE_NODE_EVENT_TYPE_UNKNOWN = 0; + STORE_NODE_EVENT_TYPE_NODE_ONLINE = 1; + STORE_NODE_EVENT_TYPE_NODE_OFFLINE = 2; + STORE_NODE_EVENT_TYPE_NODE_RAFT_CHANGE = 3; + // pd leader 变更 + STORE_NODE_EVENT_TYPE_PD_LEADER_CHANGE = 4; +} \ No newline at end of file diff --git a/hugegraph-pd/hg-pd-grpc/src/main/proto/store_group.proto b/hugegraph-pd/hg-pd-grpc/src/main/proto/store_group.proto new file mode 100644 index 0000000000..a5efa71f25 --- /dev/null +++ b/hugegraph-pd/hg-pd-grpc/src/main/proto/store_group.proto @@ -0,0 +1,72 @@ +syntax = "proto3"; +package storeGroup; + +import "common.proto"; +import "metapb.proto"; + +option java_package = "org.apache.hugegraph.pd.grpc"; + +message CreateStoreGroupRequest { + RequestHeader header = 1; + uint32 groupId = 2; + string name = 3; + uint32 partition_count = 4; +} + +message CreateStoreGroupResponse { + ResponseHeader header = 1; + metapb.StoreGroup storeGroup = 2; +} + +message GetStoreGroupRequest { + RequestHeader header = 1; + uint32 groupId = 2; +} + +message GetStoreGroupResponse { + ResponseHeader header = 1; + metapb.StoreGroup storeGroup = 2; +} + +message GetAllStoreGroupRequest { + RequestHeader header = 1; + uint32 groupId = 2; +} + +message GetAllStoreGroupResponse { + ResponseHeader header = 1; + repeated metapb.StoreGroup storeGroups = 2; +} + +message UpdateStoreGroupRequest { + RequestHeader header = 1; + uint32 groupId = 2; + string name = 3; +} + +message UpdateStoreGroupResponse { + ResponseHeader header = 1; + metapb.StoreGroup storeGroup = 2; +} + +message GetGroupStoresRequest { + RequestHeader header = 1; + uint32 store_groupId = 2; +} + +message GetGroupStoresResponse { + ResponseHeader header = 1; + repeated metapb.Store stores = 2; +} + +message UpdateStoreGroupRelationRequest { + RequestHeader header = 1; + uint64 storeId = 2; + uint32 store_groupId = 3; +} + +message UpdateStoreGroupRelationResponse { + ResponseHeader header = 1; + bool success = 2; + string message = 3; +} \ No newline at end of file diff --git a/hugegraph-pd/hg-pd-grpc/src/main/proto/watch.proto b/hugegraph-pd/hg-pd-grpc/src/main/proto/watch.proto new file mode 100644 index 0000000000..5c8882cc43 --- /dev/null +++ b/hugegraph-pd/hg-pd-grpc/src/main/proto/watch.proto @@ -0,0 +1,86 @@ +syntax = "proto3"; + +import "metapb.proto"; + +option java_multiple_files = true; +option java_package = "org.apache.hugegraph.pd.grpc.watch"; +option java_outer_classname = "HgPdWatchProto"; + +service HgPdWatch { + rpc Watch(stream WatchRequest) returns (stream WatchResponse); +} + +message WatchRequest { + WatchCreateRequest create_request = 1; + WatchCancelRequest cancel_request = 2; +} + +message WatchCreateRequest { + WatchType watch_type = 1; +} + +message WatchCancelRequest { + int64 watcher_id = 1; +} + +message WatchResponse { + WatchType watch_type = 1; + int64 watcher_id = 2; + int32 status = 3; //0=ok,1=fail + int64 notice_id = 4; + string msg = 5; + oneof response_union { + WatchPartitionResponse partition_response = 10; + WatchNodeResponse node_response = 11; + WatchGraphResponse graph_response = 12; + WatchShardGroupResponse shard_group_response = 13; + } +} + +message WatchPartitionResponse { + string graph = 1; + int32 partition_id = 2; + WatchChangeType change_type = 3; +} + +message WatchNodeResponse { + string graph = 1; + uint64 node_id = 2; + NodeEventType node_event_type = 3; +} + +message WatchGraphResponse { + metapb.Graph graph = 1; + WatchType type = 2; +} + +message WatchShardGroupResponse { + metapb.ShardGroup shard_group = 1; + WatchChangeType type = 2; + int32 shard_group_id = 3; +} + +enum WatchType { + WATCH_TYPE_UNKNOWN = 0; + WATCH_TYPE_PARTITION_CHANGE = 1; + WATCH_TYPE_STORE_NODE_CHANGE = 2; + WATCH_TYPE_GRAPH_CHANGE = 3; + WATCH_TYPE_SHARD_GROUP_CHANGE = 4; +} + +enum WatchChangeType { + WATCH_CHANGE_TYPE_UNKNOWN = 0; + WATCH_CHANGE_TYPE_ADD = 1; + WATCH_CHANGE_TYPE_ALTER = 2; + WATCH_CHANGE_TYPE_DEL = 3; + WATCH_CHANGE_TYPE_SPECIAL1 = 4; +} + +enum NodeEventType { + NODE_EVENT_TYPE_UNKNOWN = 0; + NODE_EVENT_TYPE_NODE_ONLINE = 1; + NODE_EVENT_TYPE_NODE_OFFLINE = 2; + NODE_EVENT_TYPE_NODE_RAFT_CHANGE = 3; + // pd leader 变更 + NODE_EVENT_TYPE_PD_LEADER_CHANGE = 4; +} \ No newline at end of file diff --git a/hugegraph-pd/hg-pd-service/pom.xml b/hugegraph-pd/hg-pd-service/pom.xml index 37c90fe869..d9ae2e721d 100644 --- a/hugegraph-pd/hg-pd-service/pom.xml +++ b/hugegraph-pd/hg-pd-service/pom.xml @@ -44,6 +44,69 @@ + + org.apache.hugegraph + hugegraph-struct + ${hugegraph.struct.version} + + + org.apache.lucene + * + + + com.hankcs + * + + + io.jsonwebtoken + * + + + org.eclipse.collections + * + + + org.apdplat + * + + + org.ansj + * + + + com.janeluo + * + + + org.apache.tinkerpop + * + + + it.unimi.dsi + * + + + org.lz4 + * + + + com.huaban + * + + + org.lionsoul + * + + + com.chenlb.mmseg4j + * + + + org.apache.hugegraph + hg-pd-client + + + org.apache.hugegraph @@ -106,6 +169,11 @@ + + org.springframework.security + spring-security-core + 5.8.3 + com.lmax disruptor @@ -120,6 +188,17 @@ com.google.protobuf protobuf-java-util 3.17.2 + + + com.google.errorprone + error_prone_annotations + + + + + de.schlichtherle.truelicense + truelicense-core + 1.33 org.apache.hugegraph diff --git a/hugegraph-pd/hg-pd-service/src/main/java/org/apache/hugegraph/pd/model/PromTargetsModel.java b/hugegraph-pd/hg-pd-service/src/main/java/org/apache/hugegraph/pd/model/PromTargetsModel.java index 9ed16f962a..8c8c4d9b96 100644 --- a/hugegraph-pd/hg-pd-service/src/main/java/org/apache/hugegraph/pd/model/PromTargetsModel.java +++ b/hugegraph-pd/hg-pd-service/src/main/java/org/apache/hugegraph/pd/model/PromTargetsModel.java @@ -1,3 +1,4 @@ +<<<<<<<< HEAD:hugegraph-pd/hg-pd-service/src/main/java/org/apache/hugegraph/pd/model/PromTargetsModel.java /* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with @@ -15,6 +16,8 @@ * limitations under the License. */ +======== +>>>>>>>> d7e3d51dd (3.6.5 -> 4.x diff):hg-pd-service/src/main/java/org/apache/hugegraph/pd/model/SDConfig.java package org.apache.hugegraph.pd.model; import java.util.HashMap; @@ -22,12 +25,20 @@ import java.util.Map; import java.util.Set; +<<<<<<<< HEAD:hugegraph-pd/hg-pd-service/src/main/java/org/apache/hugegraph/pd/model/PromTargetsModel.java public class PromTargetsModel { +======== +/** + * @author lynn.bond@hotmail.com on 2022/2/14 + */ +public class SDConfig { +>>>>>>>> d7e3d51dd (3.6.5 -> 4.x diff):hg-pd-service/src/main/java/org/apache/hugegraph/pd/model/SDConfig.java private static final String LABEL_METRICS_PATH = "__metrics_path__"; private static final String LABEL_SCHEME = "__scheme__"; private static final String LABEL_JOB_NAME = "job"; private static final String LABEL_CLUSTER = "cluster"; +<<<<<<<< HEAD:hugegraph-pd/hg-pd-service/src/main/java/org/apache/hugegraph/pd/model/PromTargetsModel.java private final Map labels = new HashMap<>(); private Set targets = new HashSet<>(); @@ -36,13 +47,28 @@ private PromTargetsModel() { public static PromTargetsModel of() { return new PromTargetsModel(); +======== + + private Set targets = new HashSet<>(); + private Map labels = new HashMap<>(); + + private SDConfig() { + } + + public static SDConfig of() { + return new SDConfig(); +>>>>>>>> d7e3d51dd (3.6.5 -> 4.x diff):hg-pd-service/src/main/java/org/apache/hugegraph/pd/model/SDConfig.java } public Set getTargets() { return targets; } +<<<<<<<< HEAD:hugegraph-pd/hg-pd-service/src/main/java/org/apache/hugegraph/pd/model/PromTargetsModel.java public PromTargetsModel setTargets(Set targets) { +======== + public SDConfig setTargets(Set targets) { +>>>>>>>> d7e3d51dd (3.6.5 -> 4.x diff):hg-pd-service/src/main/java/org/apache/hugegraph/pd/model/SDConfig.java if (targets != null) { this.targets = targets; } @@ -53,14 +79,20 @@ public Map getLabels() { return labels; } +<<<<<<<< HEAD:hugegraph-pd/hg-pd-service/src/main/java/org/apache/hugegraph/pd/model/PromTargetsModel.java public PromTargetsModel addTarget(String target) { if (target == null) { return this; } +======== + public SDConfig addTarget(String target) { + if (target == null) return this; +>>>>>>>> d7e3d51dd (3.6.5 -> 4.x diff):hg-pd-service/src/main/java/org/apache/hugegraph/pd/model/SDConfig.java this.targets.add(target); return this; } +<<<<<<<< HEAD:hugegraph-pd/hg-pd-service/src/main/java/org/apache/hugegraph/pd/model/PromTargetsModel.java public PromTargetsModel setMetricsPath(String path) { return this.addLabel(LABEL_METRICS_PATH, path); } @@ -77,13 +109,33 @@ public PromTargetsModel addLabel(String label, String value) { if (label == null || value == null) { return this; } +======== + public SDConfig setMetricsPath(String path) { + return this.addLabel(LABEL_METRICS_PATH, path); + } + + public SDConfig setScheme(String scheme) { + return this.addLabel(LABEL_SCHEME, scheme); + } + + public SDConfig setClusterId(String clusterId) { + return this.addLabel(LABEL_CLUSTER, clusterId); + } + + public SDConfig addLabel(String label, String value) { + if (label == null || value == null) return this; +>>>>>>>> d7e3d51dd (3.6.5 -> 4.x diff):hg-pd-service/src/main/java/org/apache/hugegraph/pd/model/SDConfig.java this.labels.put(label, value); return this; } @Override public String toString() { +<<<<<<<< HEAD:hugegraph-pd/hg-pd-service/src/main/java/org/apache/hugegraph/pd/model/PromTargetsModel.java return "PromTargetModel{" + +======== + return "SDConfig{" + +>>>>>>>> d7e3d51dd (3.6.5 -> 4.x diff):hg-pd-service/src/main/java/org/apache/hugegraph/pd/model/SDConfig.java "targets=" + targets + ", labels=" + labels + '}'; diff --git a/hugegraph-pd/hg-pd-service/src/main/java/org/apache/hugegraph/pd/notice/NoticeBroadcaster.java b/hugegraph-pd/hg-pd-service/src/main/java/org/apache/hugegraph/pd/notice/NoticeBroadcaster.java index 9644e78c19..3a4fc9254d 100644 --- a/hugegraph-pd/hg-pd-service/src/main/java/org/apache/hugegraph/pd/notice/NoticeBroadcaster.java +++ b/hugegraph-pd/hg-pd-service/src/main/java/org/apache/hugegraph/pd/notice/NoticeBroadcaster.java @@ -1,3 +1,4 @@ +<<<<<<<< HEAD:hugegraph-pd/hg-pd-service/src/main/java/org/apache/hugegraph/pd/notice/NoticeBroadcaster.java /* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with @@ -61,26 +62,85 @@ public NoticeBroadcaster notifying() { if (this.state >= 2) { log.warn("Aborted notifying as ack has done. notice: {}", this); +======== +package org.apache.hugegraph.pd.notice; + +import org.apache.hugegraph.pd.common.HgAssert; +import lombok.extern.slf4j.Slf4j; + +/** + * @author lynn.bond@hotmail.com on 2022/2/10 + * @version 2.0 added the NoticeDeliver on 2023/11/29 + */ +@Slf4j +public class NoticeBroadcaster { + private final NoticeDeliver noticeDeliver; + private long noticeId; + private String durableId; + private int state; // 0=ready; 1=notified; 2=done ack; 10=invalid, -1=error + private int counter; + private long timestamp; + + public static NoticeBroadcaster of(NoticeDeliver noticeDeliver) { + HgAssert.isArgumentNotNull(noticeDeliver, "noticeDeliver"); + return new NoticeBroadcaster(noticeDeliver); + } + + private NoticeBroadcaster(NoticeDeliver noticeDeliver) { + this.noticeDeliver = noticeDeliver; + this.timestamp = System.currentTimeMillis(); + } + + public NoticeBroadcaster notifying() { + try { + if (!this.noticeDeliver.isDuty()) { + this.state = 10; + log.warn("Notification aborted due to not in duty state. notice: {}", this.getNoticeString()); + return this; + } + } catch (Throwable t) { + log.error("Failed to invoke `NoticeDeliver::isDuty`, but continuing the the notification, caused by:", t); + } + + if (this.state >= 2) { + log.warn("Notification aborted as acknowledgment has been received. notice: {}", this); +>>>>>>>> d7e3d51dd (3.6.5 -> 4.x diff):hg-pd-service/src/main/java/org/apache/hugegraph/pd/notice/NoticeBroadcaster.java return this; } this.counter++; +<<<<<<<< HEAD:hugegraph-pd/hg-pd-service/src/main/java/org/apache/hugegraph/pd/notice/NoticeBroadcaster.java if (this.durableId == null && this.durableSupplier != null) { try { this.durableId = this.durableSupplier.get(); } catch (Throwable t) { log.error("Failed to invoke durableSupplier, cause by:", t); +======== + if (this.durableId == null) { + try { + this.durableId = this.noticeDeliver.save(); + } catch (Throwable t) { + log.error("Failed to invoke `NoticeDeliver::save`, caused by:", t); +>>>>>>>> d7e3d51dd (3.6.5 -> 4.x diff):hg-pd-service/src/main/java/org/apache/hugegraph/pd/notice/NoticeBroadcaster.java } } try { +<<<<<<<< HEAD:hugegraph-pd/hg-pd-service/src/main/java/org/apache/hugegraph/pd/notice/NoticeBroadcaster.java this.noticeId = this.noticeSupplier.get(); state = 1; } catch (Throwable t) { state = -1; log.error("Failed to invoke noticeSupplier: {}; cause by: " + this.noticeSupplier.toString(), t); +======== + this.noticeId = this.noticeDeliver.send(this.durableId); + state = 1; + } catch (Throwable t) { + state = -1; + log.error("Failed to invoke `NoticeDeliver::send`, notice: {}, caused by: " + this.noticeDeliver.toNoticeString(), t); +>>>>>>>> d7e3d51dd (3.6.5 -> 4.x diff):hg-pd-service/src/main/java/org/apache/hugegraph/pd/notice/NoticeBroadcaster.java } return this; @@ -102,6 +162,7 @@ public boolean checkAck(long ackNoticeId) { } public boolean doRemoveDurable() { +<<<<<<<< HEAD:hugegraph-pd/hg-pd-service/src/main/java/org/apache/hugegraph/pd/notice/NoticeBroadcaster.java log.info("Removing NoticeBroadcaster is stating, noticeId:{}, durableId: {}" , this.noticeId, this.durableId); boolean flag = false; @@ -110,6 +171,12 @@ public boolean doRemoveDurable() { log.warn("The remove-function hasn't been set."); return false; } +======== + log.info("NoticeBroadcaster is being removed, noticeId:{}, durableId: {}" + , this.noticeId, this.durableId); + boolean flag = false; + +>>>>>>>> d7e3d51dd (3.6.5 -> 4.x diff):hg-pd-service/src/main/java/org/apache/hugegraph/pd/notice/NoticeBroadcaster.java if (this.durableId == null) { log.warn("The durableId hasn't been set."); @@ -117,6 +184,7 @@ public boolean doRemoveDurable() { } try { +<<<<<<<< HEAD:hugegraph-pd/hg-pd-service/src/main/java/org/apache/hugegraph/pd/notice/NoticeBroadcaster.java if (!(flag = this.removeFunction.apply(this.durableId))) { log.error("Removing NoticeBroadcaster was not complete, noticeId: {}, durableId: {}" , this.noticeId, this.durableId); @@ -124,11 +192,31 @@ public boolean doRemoveDurable() { } catch (Throwable t) { log.error("Failed to remove NoticeBroadcaster, noticeId: " + this.noticeId + ", durableId: " + this.durableId + ". Cause by:", t); +======== + if (!(flag = this.noticeDeliver.remove(this.durableId))) { + log.error("Removing NoticeBroadcaster was not complete, noticeId: {}, durableId: {}", + this.noticeId, this.durableId); + } + } catch (Throwable t) { + log.error("Failed to remove NoticeBroadcaster, noticeId: {}, durableId: {}. Caused by:", + this.noticeId, this.durableId, t); +>>>>>>>> d7e3d51dd (3.6.5 -> 4.x diff):hg-pd-service/src/main/java/org/apache/hugegraph/pd/notice/NoticeBroadcaster.java } return flag; } +<<<<<<<< HEAD:hugegraph-pd/hg-pd-service/src/main/java/org/apache/hugegraph/pd/notice/NoticeBroadcaster.java +======== + public void setDurableId(String durableId) { + if (HgAssert.isInvalid(durableId)) { + log.warn("Set an invalid durable id to the NoticeBroadcaster."); + } + + this.durableId = durableId; + } + +>>>>>>>> d7e3d51dd (3.6.5 -> 4.x diff):hg-pd-service/src/main/java/org/apache/hugegraph/pd/notice/NoticeBroadcaster.java public long getNoticeId() { return noticeId; } @@ -145,6 +233,7 @@ public String getDurableId() { return durableId; } +<<<<<<<< HEAD:hugegraph-pd/hg-pd-service/src/main/java/org/apache/hugegraph/pd/notice/NoticeBroadcaster.java public void setDurableId(String durableId) { if (HgAssert.isInvalid(durableId)) { @@ -154,6 +243,8 @@ public void setDurableId(String durableId) { this.durableId = durableId; } +======== +>>>>>>>> d7e3d51dd (3.6.5 -> 4.x diff):hg-pd-service/src/main/java/org/apache/hugegraph/pd/notice/NoticeBroadcaster.java public long getTimestamp() { return timestamp; } @@ -162,6 +253,7 @@ public void setTimestamp(long timestamp) { this.timestamp = timestamp; } +<<<<<<<< HEAD:hugegraph-pd/hg-pd-service/src/main/java/org/apache/hugegraph/pd/notice/NoticeBroadcaster.java @Override public String toString() { return "NoticeBroadcaster{" + @@ -171,5 +263,20 @@ public String toString() { ", counter=" + counter + ", timestamp=" + timestamp + '}'; +======== + public String getNoticeString() { + return this.noticeDeliver.toNoticeString(); + } + + @Override + public String toString() { + return "NoticeBroadcaster{" + + "noticeId=" + noticeId + + ", durableId='" + durableId + '\'' + + ", state=" + state + + ", counter=" + counter + + ", timestamp=" + timestamp + + '}'; +>>>>>>>> d7e3d51dd (3.6.5 -> 4.x diff):hg-pd-service/src/main/java/org/apache/hugegraph/pd/notice/NoticeBroadcaster.java } } diff --git a/hugegraph-pd/hg-pd-service/src/main/java/org/apache/hugegraph/pd/rest/API.java b/hugegraph-pd/hg-pd-service/src/main/java/org/apache/hugegraph/pd/rest/API.java index a2287cb83e..b9b5dc1c72 100644 --- a/hugegraph-pd/hg-pd-service/src/main/java/org/apache/hugegraph/pd/rest/API.java +++ b/hugegraph-pd/hg-pd-service/src/main/java/org/apache/hugegraph/pd/rest/API.java @@ -1,3 +1,4 @@ +<<<<<<<< HEAD:hugegraph-pd/hg-pd-service/src/main/java/org/apache/hugegraph/pd/rest/API.java /* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with @@ -23,6 +24,11 @@ import org.apache.hugegraph.pd.common.PDException; +======== +package org.apache.hugegraph.pd.rest; + +import org.apache.hugegraph.pd.common.PDException; +>>>>>>>> d7e3d51dd (3.6.5 -> 4.x diff):hg-pd-service/src/main/java/org/apache/hugegraph/pd/rest/API.java import com.fasterxml.jackson.core.JsonProcessingException; import com.fasterxml.jackson.databind.ObjectMapper; import com.google.protobuf.InvalidProtocolBufferException; @@ -41,6 +47,13 @@ public class API { public static String QUOTATION = "\""; public static String COMMA = ","; public static String COLON = ": "; +<<<<<<<< HEAD:hugegraph-pd/hg-pd-service/src/main/java/org/apache/hugegraph/pd/rest/API.java +======== + public static final String VERSION = "4.0.0"; + public static final String PD = "PD"; + public static final String STORE = "STORE"; + +>>>>>>>> d7e3d51dd (3.6.5 -> 4.x diff):hg-pd-service/src/main/java/org/apache/hugegraph/pd/rest/API.java public String toJSON(List values, String key) { @@ -127,6 +140,7 @@ public String toJSON(PDException exception) { return builder; } +<<<<<<<< HEAD:hugegraph-pd/hg-pd-service/src/main/java/org/apache/hugegraph/pd/rest/API.java public String toJSON(Exception exception) { String builder = "{" + QUOTATION + STATUS_KEY + QUOTATION + COLON + "-1" + @@ -134,6 +148,15 @@ public String toJSON(Exception exception) { QUOTATION + ERROR_KEY + QUOTATION + COLON + QUOTATION + exception.getMessage() + QUOTATION + "}"; +======== + public String toJSON(Throwable exception) { + StringBuilder builder = new StringBuilder(); + builder.append("{") + .append(QUOTATION).append(STATUS_KEY).append(QUOTATION).append(COLON).append("-1").append(COMMA) + .append(QUOTATION).append(ERROR_KEY).append(QUOTATION).append(COLON) + .append(QUOTATION).append(exception.getMessage()).append(QUOTATION); + builder.append("}"); +>>>>>>>> d7e3d51dd (3.6.5 -> 4.x diff):hg-pd-service/src/main/java/org/apache/hugegraph/pd/rest/API.java return builder; } diff --git a/hugegraph-pd/hg-pd-service/src/main/java/org/apache/hugegraph/pd/rest/IndexAPI.java b/hugegraph-pd/hg-pd-service/src/main/java/org/apache/hugegraph/pd/rest/IndexAPI.java index 61f3c5a2c6..aabaeaefe9 100644 --- a/hugegraph-pd/hg-pd-service/src/main/java/org/apache/hugegraph/pd/rest/IndexAPI.java +++ b/hugegraph-pd/hg-pd-service/src/main/java/org/apache/hugegraph/pd/rest/IndexAPI.java @@ -1,3 +1,4 @@ +<<<<<<<< HEAD:hugegraph-pd/hg-pd-service/src/main/java/org/apache/hugegraph/pd/rest/IndexAPI.java /* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with @@ -25,11 +26,24 @@ import org.apache.hugegraph.pd.common.PDException; import org.apache.hugegraph.pd.grpc.Metapb; import org.apache.hugegraph.pd.grpc.Pdpb; +======== +package org.apache.hugegraph.pd.rest; + +import org.apache.hugegraph.pd.common.PDException; +import org.apache.hugegraph.pd.grpc.Metapb; +import org.apache.hugegraph.pd.grpc.Pdpb; +import org.apache.hugegraph.pd.grpc.common.ErrorType; +>>>>>>>> d7e3d51dd (3.6.5 -> 4.x diff):hg-pd-service/src/main/java/org/apache/hugegraph/pd/rest/IndexAPI.java import org.apache.hugegraph.pd.model.RestApiResponse; import org.apache.hugegraph.pd.raft.RaftEngine; import org.apache.hugegraph.pd.rest.MemberAPI.CallStreamObserverWrap; import org.apache.hugegraph.pd.service.PDRestService; import org.apache.hugegraph.pd.service.PDService; +<<<<<<<< HEAD:hugegraph-pd/hg-pd-service/src/main/java/org/apache/hugegraph/pd/rest/IndexAPI.java +======== +import lombok.Data; +import lombok.extern.slf4j.Slf4j; +>>>>>>>> d7e3d51dd (3.6.5 -> 4.x diff):hg-pd-service/src/main/java/org/apache/hugegraph/pd/rest/IndexAPI.java import org.springframework.beans.factory.annotation.Autowired; import org.springframework.http.MediaType; import org.springframework.web.bind.annotation.GetMapping; @@ -37,8 +51,19 @@ import org.springframework.web.bind.annotation.ResponseBody; import org.springframework.web.bind.annotation.RestController; +<<<<<<<< HEAD:hugegraph-pd/hg-pd-service/src/main/java/org/apache/hugegraph/pd/rest/IndexAPI.java import lombok.Data; import lombok.extern.slf4j.Slf4j; +======== +import java.lang.management.ManagementFactory; +import java.util.ArrayList; +import java.util.HashMap; +import java.util.List; +import java.util.Map; +import java.util.concurrent.ExecutionException; +>>>>>>>> d7e3d51dd (3.6.5 -> 4.x diff):hg-pd-service/src/main/java/org/apache/hugegraph/pd/rest/IndexAPI.java + +import static org.apache.hugegraph.pd.common.Consts.DEFAULT_STORE_GROUP_ID; @RestController @Slf4j @@ -56,7 +81,7 @@ public BriefStatistics index() throws PDException, ExecutionException, Interrupt BriefStatistics statistics = new BriefStatistics(); statistics.leader = RaftEngine.getInstance().getLeaderGrpcAddress(); - statistics.state = pdService.getStoreNodeService().getClusterStats().getState().toString(); + statistics.state = getClusterState(); statistics.storeSize = pdService.getStoreNodeService().getActiveStores().size(); statistics.graphSize = pdService.getPartitionService().getGraphs().size(); statistics.partitionSize = pdService.getStoreNodeService().getShardGroups().size(); @@ -64,16 +89,36 @@ public BriefStatistics index() throws PDException, ExecutionException, Interrupt } +<<<<<<<< HEAD:hugegraph-pd/hg-pd-service/src/main/java/org/apache/hugegraph/pd/rest/IndexAPI.java +======== + @Data + class BriefStatistics { + Map state; + String leader; + int memberSize; + int storeSize; + int graphSize; + int partitionSize; + } + +>>>>>>>> d7e3d51dd (3.6.5 -> 4.x diff):hg-pd-service/src/main/java/org/apache/hugegraph/pd/rest/IndexAPI.java @GetMapping(value = "/v1/cluster", produces = MediaType.APPLICATION_JSON_VALUE) @ResponseBody public RestApiResponse cluster() throws InterruptedException, ExecutionException { Statistics statistics = new Statistics(); try { +<<<<<<<< HEAD:hugegraph-pd/hg-pd-service/src/main/java/org/apache/hugegraph/pd/rest/IndexAPI.java statistics.state = String.valueOf(pdService.getStoreNodeService().getClusterStats().getState()); String leaderGrpcAddress = RaftEngine.getInstance().getLeaderGrpcAddress(); CallStreamObserverWrap response = new CallStreamObserverWrap<>(); +======== + statistics.states = getClusterState(); + statistics.state = statistics.getStates().get(DEFAULT_STORE_GROUP_ID); + String leaderGrpcAddress = RaftEngine.getInstance().getLeaderGrpcAddress(); + CallStreamObserverWrap response = new CallStreamObserverWrap<>(); +>>>>>>>> d7e3d51dd (3.6.5 -> 4.x diff):hg-pd-service/src/main/java/org/apache/hugegraph/pd/rest/IndexAPI.java pdService.getMembers(Pdpb.GetMembersRequest.newBuilder().build(), response); List pdList = new ArrayList<>(); for (Metapb.Member member : response.get().get(0).getMembersList()) { @@ -127,13 +172,26 @@ public RestApiResponse cluster() throws InterruptedException, ExecutionException } } statistics.dataState = dataState.name(); +<<<<<<<< HEAD:hugegraph-pd/hg-pd-service/src/main/java/org/apache/hugegraph/pd/rest/IndexAPI.java return new RestApiResponse(statistics, Pdpb.ErrorType.OK, Pdpb.ErrorType.OK.name()); } catch (PDException e) { +======== + return new RestApiResponse(statistics, ErrorType.OK, ErrorType.OK.name()); + } catch (PDException e){ +>>>>>>>> d7e3d51dd (3.6.5 -> 4.x diff):hg-pd-service/src/main/java/org/apache/hugegraph/pd/rest/IndexAPI.java log.error("PD Exception: ", e); return new RestApiResponse(null, e.getErrorCode(), e.getMessage()); } } + private Map getClusterState() { + Map state = new HashMap<>(); + for (var entry : pdService.getStoreNodeService().getAllClusterStats().entrySet()) { + state.put(entry.getKey(), String.valueOf(entry.getValue())); + } + return state; + } + @Data class BriefStatistics { @@ -203,11 +261,23 @@ public Member() { class Statistics { /** +<<<<<<<< HEAD:hugegraph-pd/hg-pd-service/src/main/java/org/apache/hugegraph/pd/rest/IndexAPI.java * Cluster status, default of the cluster */ String state; /** * Data status +======== + * 集群状态, 默认集群的 + */ + String state; + /** + * 集群状态 + */ + Map states; + /** + * 数据状态 +>>>>>>>> d7e3d51dd (3.6.5 -> 4.x diff):hg-pd-service/src/main/java/org/apache/hugegraph/pd/rest/IndexAPI.java */ String dataState; /** diff --git a/hugegraph-pd/hg-pd-service/src/main/java/org/apache/hugegraph/pd/rest/MemberAPI.java b/hugegraph-pd/hg-pd-service/src/main/java/org/apache/hugegraph/pd/rest/MemberAPI.java index c6542c47ae..8d9634ebed 100644 --- a/hugegraph-pd/hg-pd-service/src/main/java/org/apache/hugegraph/pd/rest/MemberAPI.java +++ b/hugegraph-pd/hg-pd-service/src/main/java/org/apache/hugegraph/pd/rest/MemberAPI.java @@ -1,3 +1,4 @@ +<<<<<<<< HEAD:hugegraph-pd/hg-pd-service/src/main/java/org/apache/hugegraph/pd/rest/MemberAPI.java /* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with @@ -16,6 +17,11 @@ */ package org.apache.hugegraph.pd.rest; +======== +package org.apache.hugegraph.pd.rest; + +import static org.apache.hugegraph.pd.common.Consts.DEFAULT_STORE_GROUP_ID; +>>>>>>>> d7e3d51dd (3.6.5 -> 4.x diff):hg-pd-service/src/main/java/org/apache/hugegraph/pd/rest/MemberAPI.java import java.lang.management.ManagementFactory; import java.util.ArrayList; @@ -28,6 +34,7 @@ import java.util.concurrent.Future; import java.util.concurrent.TimeUnit; import java.util.concurrent.TimeoutException; +<<<<<<<< HEAD:hugegraph-pd/hg-pd-service/src/main/java/org/apache/hugegraph/pd/rest/MemberAPI.java import javax.servlet.http.HttpServletRequest; @@ -37,6 +44,12 @@ import org.apache.hugegraph.pd.model.PeerRestRequest; import org.apache.hugegraph.pd.model.RestApiResponse; import org.apache.hugegraph.pd.raft.RaftEngine; +======== +import java.util.stream.Collectors; + +import javax.servlet.http.HttpServletRequest; + +>>>>>>>> d7e3d51dd (3.6.5 -> 4.x diff):hg-pd-service/src/main/java/org/apache/hugegraph/pd/rest/MemberAPI.java import org.apache.hugegraph.pd.service.PDService; import org.springframework.beans.factory.annotation.Autowired; import org.springframework.http.MediaType; @@ -47,6 +60,19 @@ import org.springframework.web.bind.annotation.ResponseBody; import org.springframework.web.bind.annotation.RestController; +<<<<<<<< HEAD:hugegraph-pd/hg-pd-service/src/main/java/org/apache/hugegraph/pd/rest/MemberAPI.java +======== +import org.apache.hugegraph.pd.grpc.ClusterOp; +import org.apache.hugegraph.pd.grpc.Metapb; +import org.apache.hugegraph.pd.grpc.Pdpb; +import org.apache.hugegraph.pd.grpc.common.ErrorType; +import org.apache.hugegraph.pd.grpc.common.Errors; +import org.apache.hugegraph.pd.grpc.common.ResponseHeader; +import org.apache.hugegraph.pd.model.PeerRestRequest; +import org.apache.hugegraph.pd.model.RestApiResponse; +import org.apache.hugegraph.pd.raft.RaftEngine; + +>>>>>>>> d7e3d51dd (3.6.5 -> 4.x diff):hg-pd-service/src/main/java/org/apache/hugegraph/pd/rest/MemberAPI.java import io.grpc.stub.CallStreamObserver; import io.grpc.stub.StreamObserver; import lombok.Data; @@ -63,15 +89,16 @@ public class MemberAPI extends API { @GetMapping(value = "/members", produces = MediaType.APPLICATION_JSON_VALUE) @ResponseBody - public RestApiResponse getMembers() throws InterruptedException, ExecutionException { + public RestApiResponse getMembers() throws Exception { String leaderGrpcAddress = RaftEngine.getInstance().getLeaderGrpcAddress(); - CallStreamObserverWrap response = new CallStreamObserverWrap<>(); - pdService.getMembers(Pdpb.GetMembersRequest.newBuilder().build(), response); + CallStreamObserverWrap response = new CallStreamObserverWrap<>(); + pdService.getMembersAndClusterState(Pdpb.GetMembersRequest.newBuilder().build(), response); List members = new ArrayList<>(); Member leader = null; Map stateCountMap = new HashMap<>(); - for (Metapb.Member member : response.get().get(0).getMembersList()) { + Pdpb.MembersAndClusterState membersAndClusterState = response.get().get(0); + for (Metapb.Member member : membersAndClusterState.getMembersList()) { String stateKey = member.getState().name(); stateCountMap.put(stateKey, stateCountMap.getOrDefault(stateKey, 0) + 1); Member member1 = new Member(member); @@ -81,16 +108,21 @@ public RestApiResponse getMembers() throws InterruptedException, ExecutionExcept member1.role = member.getRole().name(); members.add(member1); } - String state = pdService.getStoreNodeService().getClusterStats().getState().toString(); + + var stateMap = membersAndClusterState.getStateList().stream() + .collect(Collectors.toMap(Metapb.GroupClusterState::getStoreGroup, + state -> String.valueOf(state.getState()))); + HashMap resultMap = new HashMap<>(); - resultMap.put("state", state); + resultMap.put("state", stateMap.get(DEFAULT_STORE_GROUP_ID)); + resultMap.put("states", stateMap); resultMap.put("pdList", members); resultMap.put("pdLeader", leader); resultMap.put("numOfService", members.size()); resultMap.put("numOfNormalService", stateCountMap.getOrDefault(Metapb.StoreState.Up.name(), 0)); resultMap.put("stateCountMap", stateCountMap); - return new RestApiResponse(resultMap, Pdpb.ErrorType.OK, Pdpb.ErrorType.OK.name()); + return new RestApiResponse(resultMap, ErrorType.OK, ErrorType.OK.name()); } @PostMapping(value = "/members/change", consumes = MediaType.APPLICATION_JSON_VALUE, @@ -98,6 +130,7 @@ public RestApiResponse getMembers() throws InterruptedException, ExecutionExcept @ResponseBody public String changePeerList(@RequestBody PeerRestRequest body, HttpServletRequest request) { try { +<<<<<<<< HEAD:hugegraph-pd/hg-pd-service/src/main/java/org/apache/hugegraph/pd/rest/MemberAPI.java Pdpb.ChangePeerListRequest rpcRequest = Pdpb.ChangePeerListRequest.newBuilder().setPeerList( body.getPeerList()).build(); @@ -118,6 +151,26 @@ public void onError(Throwable t) { t.getMessage()).build()).build(); latch.countDown(); } +======== + ClusterOp.ChangePeerListRequest rpcRequest = ClusterOp.ChangePeerListRequest.newBuilder().setPeerList( + body.getPeerList()).build(); + CountDownLatch latch = new CountDownLatch(1); + final ResponseHeader[] responseHeader = {null}; + StreamObserver observer = new StreamObserver<>() { + @Override + public void onNext(ClusterOp.ChangePeerListResponse value) { + responseHeader[0] = value.getHeader(); + } + + @Override + public void onError(Throwable t) { + responseHeader[0] = ResponseHeader.newBuilder().setError( + Errors.newBuilder().setType( + ErrorType.UNKNOWN).setMessage( + t.getMessage()).build()).build(); + latch.countDown(); + } +>>>>>>>> d7e3d51dd (3.6.5 -> 4.x diff):hg-pd-service/src/main/java/org/apache/hugegraph/pd/rest/MemberAPI.java @Override public void onCompleted() { @@ -216,9 +269,15 @@ class Member { String dataPath; String role; String replicateState; +<<<<<<<< HEAD:hugegraph-pd/hg-pd-service/src/main/java/org/apache/hugegraph/pd/rest/MemberAPI.java String serviceName; // Service name, custom attributes String serviceVersion; // Static definitions long startTimeStamp; // Startup time: temporarily takes the startup time of the process +======== + String serviceName; //服务名称,自定义属性 + String serviceVersion; //静态定义 + long startTimeStamp; //启动时间,暂时取进程的启动时间 +>>>>>>>> d7e3d51dd (3.6.5 -> 4.x diff):hg-pd-service/src/main/java/org/apache/hugegraph/pd/rest/MemberAPI.java public Member(Metapb.Member member) { if (member != null) { @@ -228,7 +287,11 @@ public Member(Metapb.Member member) { state = String.valueOf(member.getState()); dataPath = member.getDataPath(); serviceName = grpcUrl + "-PD"; +<<<<<<<< HEAD:hugegraph-pd/hg-pd-service/src/main/java/org/apache/hugegraph/pd/rest/MemberAPI.java serviceVersion = VERSION; +======== + serviceVersion = API.VERSION; +>>>>>>>> d7e3d51dd (3.6.5 -> 4.x diff):hg-pd-service/src/main/java/org/apache/hugegraph/pd/rest/MemberAPI.java startTimeStamp = ManagementFactory.getRuntimeMXBean().getStartTime(); replicateState = member.getReplicatorState(); } diff --git a/hugegraph-pd/hg-pd-service/src/main/java/org/apache/hugegraph/pd/rest/PartitionAPI.java b/hugegraph-pd/hg-pd-service/src/main/java/org/apache/hugegraph/pd/rest/PartitionAPI.java index 5fd10cf790..9b8b87e2a6 100644 --- a/hugegraph-pd/hg-pd-service/src/main/java/org/apache/hugegraph/pd/rest/PartitionAPI.java +++ b/hugegraph-pd/hg-pd-service/src/main/java/org/apache/hugegraph/pd/rest/PartitionAPI.java @@ -1,3 +1,4 @@ +<<<<<<<< HEAD:hugegraph-pd/hg-pd-service/src/main/java/org/apache/hugegraph/pd/rest/PartitionAPI.java /* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with @@ -25,6 +26,21 @@ import java.util.Map; import java.util.concurrent.ExecutionException; +======== +package org.apache.hugegraph.pd.rest; + +import org.apache.hugegraph.pd.common.PDException; +import org.apache.hugegraph.pd.grpc.ClusterOp; +import org.apache.hugegraph.pd.grpc.Metapb; +import org.apache.hugegraph.pd.grpc.common.ErrorType; +import org.apache.hugegraph.pd.model.RestApiResponse; +import org.apache.hugegraph.pd.model.TimeRangeRequest; +import org.apache.hugegraph.pd.service.PDRestService; +import org.apache.hugegraph.pd.util.DateUtil; +import com.google.protobuf.util.JsonFormat; +import lombok.Data; +import lombok.extern.slf4j.Slf4j; +>>>>>>>> d7e3d51dd (3.6.5 -> 4.x diff):hg-pd-service/src/main/java/org/apache/hugegraph/pd/rest/PartitionAPI.java import org.apache.commons.lang.time.DateFormatUtils; import org.apache.hugegraph.pd.common.PDException; import org.apache.hugegraph.pd.grpc.Metapb; @@ -134,11 +150,17 @@ public RestApiResponse getHighLevelPartitions() { // Assign values to the address and partition information of the replica shard.address = storesMap.get(shard.storeId).getAddress(); shard.partitionId = partition.getId(); +<<<<<<<< HEAD:hugegraph-pd/hg-pd-service/src/main/java/org/apache/hugegraph/pd/rest/PartitionAPI.java } if ((partitionStats != null) && (partitionStats.getLeader() != null)) { long storeId = partitionStats.getLeader().getStoreId(); resultPartition.leaderAddress = storesMap.get(storeId).getAddress(); +======== + if (shard.getRole().equalsIgnoreCase(Metapb.ShardRole.Leader.name())){ + resultPartition.leaderAddress = shard.address; + } +>>>>>>>> d7e3d51dd (3.6.5 -> 4.x diff):hg-pd-service/src/main/java/org/apache/hugegraph/pd/rest/PartitionAPI.java } resultPartitionsMap.put(partition.getId(), resultPartition); } @@ -163,7 +185,7 @@ public RestApiResponse getHighLevelPartitions() { postfixLength); graphsList.add(tmpGraph); } - graphsList.sort((o1, o2) -> o1.graphName.compareTo(o2.graphName)); + graphsList.sort(Comparator.comparing(o -> o.graphName)); currentPartition.graphs = graphsList; } List resultPartitionList = new ArrayList<>(); @@ -176,7 +198,7 @@ public RestApiResponse getHighLevelPartitions() { } HashMap dataMap = new HashMap<>(); dataMap.put("partitions", resultPartitionList); - return new RestApiResponse(dataMap, Pdpb.ErrorType.OK, Pdpb.ErrorType.OK.name()); + return new RestApiResponse(dataMap, ErrorType.OK, ErrorType.OK.name()); } @GetMapping(value = "/partitions", produces = MediaType.APPLICATION_JSON_VALUE) @@ -262,7 +284,7 @@ public RestApiResponse getPartitions() { Comparator.comparing(Partition::getGraphName).thenComparing(Partition::getId)); HashMap dataMap = new HashMap<>(); dataMap.put("partitions", partitions); - return new RestApiResponse(dataMap, Pdpb.ErrorType.OK, Pdpb.ErrorType.OK.name()); + return new RestApiResponse(dataMap, ErrorType.OK, ErrorType.OK.name()); } catch (PDException e) { log.error("query metric data error", e); return new RestApiResponse(null, e.getErrorCode(), e.getMessage()); @@ -319,14 +341,27 @@ public String getPartitionLog(@RequestBody TimeRangeRequest request) { dateEnd.getTime()); if (changedRecords != null) { JsonFormat.TypeRegistry registry = JsonFormat.TypeRegistry - .newBuilder().add(Pdpb.SplitDataRequest.getDescriptor()).build(); + .newBuilder().add(ClusterOp.SplitDataRequest.getDescriptor()).build(); return toJSON(changedRecords, registry); } else { - return toJSON(new PDException(Pdpb.ErrorType.NOT_FOUND_VALUE, "error")); + return toJSON(new PDException(ErrorType.NOT_FOUND_VALUE, "error")); + } + } catch (PDException e) { + return toJSON(e); + } + } + + + @GetMapping(value = "/resetPartitionState", produces = MediaType.APPLICATION_JSON_VALUE) + public String resetPartitionState() { + try { + for (Metapb.Partition partition : pdRestService.getPartitions("")) { + pdRestService.resetPartitionState(partition); } } catch (PDException e) { return toJSON(e); } + return "OK"; } @GetMapping(value = "/", produces = MediaType.APPLICATION_JSON_VALUE) @@ -405,6 +440,7 @@ class HighLevelPartition { if (partitionStats != null) { raftTerm = partitionStats.getLeaderTerm(); } + Metapb.ShardState tmpShardState = Metapb.ShardState.SState_Normal; if (partitionStats != null) { shards = new ArrayList<>(); diff --git a/hugegraph-pd/hg-pd-service/src/main/java/org/apache/hugegraph/pd/rest/RegistryAPI.java b/hugegraph-pd/hg-pd-service/src/main/java/org/apache/hugegraph/pd/rest/RegistryAPI.java index 482eac40a0..5711968262 100644 --- a/hugegraph-pd/hg-pd-service/src/main/java/org/apache/hugegraph/pd/rest/RegistryAPI.java +++ b/hugegraph-pd/hg-pd-service/src/main/java/org/apache/hugegraph/pd/rest/RegistryAPI.java @@ -1,3 +1,4 @@ +<<<<<<<< HEAD:hugegraph-pd/hg-pd-service/src/main/java/org/apache/hugegraph/pd/rest/RegistryAPI.java /* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with @@ -25,6 +26,26 @@ import javax.servlet.http.HttpServletRequest; +======== +package org.apache.hugegraph.pd.rest; + +import org.apache.hugegraph.pd.common.PDException; +import org.apache.hugegraph.pd.common.PDRuntimeException; +import org.apache.hugegraph.pd.grpc.Metapb; +import org.apache.hugegraph.pd.grpc.Pdpb; +import org.apache.hugegraph.pd.grpc.Pdpb.GetMembersResponse; +import org.apache.hugegraph.pd.grpc.common.ErrorType; +import org.apache.hugegraph.pd.grpc.discovery.NodeInfo; +import org.apache.hugegraph.pd.grpc.discovery.Query; +import org.apache.hugegraph.pd.license.LicenseVerifierService; +import org.apache.hugegraph.pd.model.RegistryQueryRestRequest; +import org.apache.hugegraph.pd.model.RegistryRestRequest; +import org.apache.hugegraph.pd.model.RegistryRestResponse; +import org.apache.hugegraph.pd.rest.MemberAPI.CallStreamObserverWrap; +import org.apache.hugegraph.pd.service.PDRestService; +import org.apache.hugegraph.pd.service.PDService; +import lombok.extern.slf4j.Slf4j; +>>>>>>>> d7e3d51dd (3.6.5 -> 4.x diff):hg-pd-service/src/main/java/org/apache/hugegraph/pd/rest/RegistryAPI.java import org.apache.commons.lang3.StringUtils; import org.apache.hugegraph.pd.common.PDException; import org.apache.hugegraph.pd.common.PDRuntimeException; @@ -75,11 +96,11 @@ public RegistryRestResponse register(@RequestBody RegistryRestRequest body, registryResponse = pdRestService.register(info); } catch (PDException e) { registryResponse = new RegistryRestResponse(); - registryResponse.setErrorType(Pdpb.ErrorType.UNRECOGNIZED); + registryResponse.setErrorType(ErrorType.UNRECOGNIZED); registryResponse.setMessage(e.getMessage()); } catch (PDRuntimeException e) { registryResponse = new RegistryRestResponse(); - registryResponse.setErrorType(Pdpb.ErrorType.LICENSE_VERIFY_ERROR); + registryResponse.setErrorType(ErrorType.LICENSE_VERIFY_ERROR); registryResponse.setMessage(e.getMessage()); } return registryResponse; @@ -101,11 +122,11 @@ public RegistryRestResponse getInfo(@RequestBody RegistryQueryRestRequest body, body.getVersion()) .build(); ArrayList registryResponse = pdRestService.getNodeInfo(query); - response.setErrorType(Pdpb.ErrorType.OK); + response.setErrorType(ErrorType.OK); response.setData(registryResponse); } catch (Exception e) { log.warn(e.getMessage()); - response.setErrorType(Pdpb.ErrorType.UNRECOGNIZED); + response.setErrorType(ErrorType.UNRECOGNIZED); response.setMessage(e.getMessage()); } return response; @@ -135,7 +156,7 @@ public RegistryRestResponse allInfo(HttpServletRequest request) { restRequest.setId(String.valueOf(store.getId())); storeMembers.add(restRequest); } - response.setErrorType(Pdpb.ErrorType.OK); + response.setErrorType(ErrorType.OK); HashMap result = new HashMap<>(); result.put("other", registryResponse); result.put(PD, pdMembers); @@ -143,7 +164,7 @@ public RegistryRestResponse allInfo(HttpServletRequest request) { response.setData(result); } catch (Exception e) { log.warn(e.getMessage()); - response.setErrorType(Pdpb.ErrorType.UNRECOGNIZED); + response.setErrorType(ErrorType.UNRECOGNIZED); response.setMessage(e.getMessage()); } return response; @@ -170,13 +191,19 @@ private LinkedList getMembers() throws Exception { public RegistryRestResponse getLicenseInfo(HttpServletRequest request) { RegistryRestResponse response = new RegistryRestResponse(); try { +<<<<<<<< HEAD:hugegraph-pd/hg-pd-service/src/main/java/org/apache/hugegraph/pd/rest/RegistryAPI.java response.setErrorType(Pdpb.ErrorType.OK); // TODO: uncomment later //LicenseVerifierService licenseVerifierService = pdService.getLicenseVerifierService(); //response.setData(licenseVerifierService.getContext()); +======== + response.setErrorType(ErrorType.OK); + LicenseVerifierService licenseVerifierService = pdService.getLicenseVerifierService(); + response.setData(licenseVerifierService.getContext()); +>>>>>>>> d7e3d51dd (3.6.5 -> 4.x diff):hg-pd-service/src/main/java/org/apache/hugegraph/pd/rest/RegistryAPI.java } catch (Exception e) { log.warn(e.getMessage()); - response.setErrorType(Pdpb.ErrorType.UNRECOGNIZED); + response.setErrorType(ErrorType.UNRECOGNIZED); response.setMessage(e.getMessage()); } return response; @@ -188,13 +215,19 @@ public RegistryRestResponse getLicenseInfo(HttpServletRequest request) { public RegistryRestResponse getLicenseMachineInfo(HttpServletRequest request) { RegistryRestResponse response = new RegistryRestResponse(); try { +<<<<<<<< HEAD:hugegraph-pd/hg-pd-service/src/main/java/org/apache/hugegraph/pd/rest/RegistryAPI.java response.setErrorType(Pdpb.ErrorType.OK); // TODO: uncomment later //LicenseVerifierService licenseVerifierService = pdService.getLicenseVerifierService(); //response.setData(licenseVerifierService.getIpAndMac()); +======== + response.setErrorType(ErrorType.OK); + LicenseVerifierService licenseVerifierService = pdService.getLicenseVerifierService(); + response.setData(licenseVerifierService.getIpAndMac()); +>>>>>>>> d7e3d51dd (3.6.5 -> 4.x diff):hg-pd-service/src/main/java/org/apache/hugegraph/pd/rest/RegistryAPI.java } catch (Exception e) { log.warn(e.getMessage()); - response.setErrorType(Pdpb.ErrorType.UNRECOGNIZED); + response.setErrorType(ErrorType.UNRECOGNIZED); response.setMessage(e.getMessage()); } return response; diff --git a/hugegraph-pd/hg-pd-service/src/main/java/org/apache/hugegraph/pd/rest/ShardAPI.java b/hugegraph-pd/hg-pd-service/src/main/java/org/apache/hugegraph/pd/rest/ShardAPI.java index 53637806db..5c531142b3 100644 --- a/hugegraph-pd/hg-pd-service/src/main/java/org/apache/hugegraph/pd/rest/ShardAPI.java +++ b/hugegraph-pd/hg-pd-service/src/main/java/org/apache/hugegraph/pd/rest/ShardAPI.java @@ -1,3 +1,4 @@ +<<<<<<<< HEAD:hugegraph-pd/hg-pd-service/src/main/java/org/apache/hugegraph/pd/rest/ShardAPI.java /* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with @@ -14,9 +15,13 @@ * See the License for the specific language governing permissions and * limitations under the License. */ +======== +package org.apache.hugegraph.pd.rest; +>>>>>>>> d7e3d51dd (3.6.5 -> 4.x diff):hg-pd-service/src/main/java/org/apache/hugegraph/pd/rest/ShardAPI.java package org.apache.hugegraph.pd.rest; +<<<<<<<< HEAD:hugegraph-pd/hg-pd-service/src/main/java/org/apache/hugegraph/pd/rest/ShardAPI.java import java.util.ArrayList; import java.util.HashMap; import java.util.List; @@ -27,6 +32,17 @@ import org.apache.hugegraph.pd.model.RestApiResponse; import org.apache.hugegraph.pd.service.PDRestService; import org.apache.hugegraph.pd.service.PDService; +======== +import org.apache.hugegraph.pd.common.PDException; +import org.apache.hugegraph.pd.grpc.Metapb; +import org.apache.hugegraph.pd.grpc.Pdpb; +import org.apache.hugegraph.pd.grpc.common.ErrorType; +import org.apache.hugegraph.pd.model.RestApiResponse; +import org.apache.hugegraph.pd.service.PDRestService; +import org.apache.hugegraph.pd.service.PDService; +import lombok.Data; +import lombok.extern.slf4j.Slf4j; +>>>>>>>> d7e3d51dd (3.6.5 -> 4.x diff):hg-pd-service/src/main/java/org/apache/hugegraph/pd/rest/ShardAPI.java import org.springframework.beans.factory.annotation.Autowired; import org.springframework.http.MediaType; import org.springframework.web.bind.annotation.GetMapping; @@ -98,7 +114,7 @@ public RestApiResponse getShards() { } HashMap dataMap = new HashMap<>(); dataMap.put("shards", resultShardList); - return new RestApiResponse(dataMap, Pdpb.ErrorType.OK, Pdpb.ErrorType.OK.name()); + return new RestApiResponse(dataMap, ErrorType.OK, ErrorType.OK.name()); } catch (PDException e) { log.error("PDException: ", e); return new RestApiResponse(null, e.getErrorCode(), e.getMessage()); diff --git a/hugegraph-pd/hg-pd-service/src/main/java/org/apache/hugegraph/pd/rest/StoreAPI.java b/hugegraph-pd/hg-pd-service/src/main/java/org/apache/hugegraph/pd/rest/StoreAPI.java index 10c783f7db..304858876f 100644 --- a/hugegraph-pd/hg-pd-service/src/main/java/org/apache/hugegraph/pd/rest/StoreAPI.java +++ b/hugegraph-pd/hg-pd-service/src/main/java/org/apache/hugegraph/pd/rest/StoreAPI.java @@ -1,3 +1,4 @@ +<<<<<<<< HEAD:hugegraph-pd/hg-pd-service/src/main/java/org/apache/hugegraph/pd/rest/StoreAPI.java /* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with @@ -18,12 +19,20 @@ package org.apache.hugegraph.pd.rest; import java.util.ArrayList; +======== +package org.apache.hugegraph.pd.rest; + +import java.io.Serializable; +import java.util.ArrayList; +import java.util.Comparator; +>>>>>>>> d7e3d51dd (3.6.5 -> 4.x diff):hg-pd-service/src/main/java/org/apache/hugegraph/pd/rest/StoreAPI.java import java.util.Date; import java.util.HashMap; import java.util.HashSet; import java.util.List; import java.util.Map; +<<<<<<<< HEAD:hugegraph-pd/hg-pd-service/src/main/java/org/apache/hugegraph/pd/rest/StoreAPI.java import org.apache.hugegraph.pd.common.PDException; import org.apache.hugegraph.pd.grpc.Metapb; import org.apache.hugegraph.pd.grpc.Pdpb; @@ -32,6 +41,10 @@ import org.apache.hugegraph.pd.model.TimeRangeRequest; import org.apache.hugegraph.pd.service.PDRestService; import org.apache.hugegraph.pd.util.DateUtil; +======== +import org.apache.hugegraph.pd.grpc.common.ErrorType; +import org.apache.hugegraph.pd.service.PDRestService; +>>>>>>>> d7e3d51dd (3.6.5 -> 4.x diff):hg-pd-service/src/main/java/org/apache/hugegraph/pd/rest/StoreAPI.java import org.springframework.beans.factory.annotation.Autowired; import org.springframework.http.MediaType; import org.springframework.web.bind.annotation.DeleteMapping; @@ -43,6 +56,16 @@ import org.springframework.web.bind.annotation.ResponseBody; import org.springframework.web.bind.annotation.RestController; +<<<<<<<< HEAD:hugegraph-pd/hg-pd-service/src/main/java/org/apache/hugegraph/pd/rest/StoreAPI.java +======== +import org.apache.hugegraph.pd.common.PDException; +import org.apache.hugegraph.pd.grpc.Metapb; +import org.apache.hugegraph.pd.grpc.Pdpb; +import org.apache.hugegraph.pd.model.RestApiResponse; +import org.apache.hugegraph.pd.model.StoreRestRequest; +import org.apache.hugegraph.pd.model.TimeRangeRequest; +import org.apache.hugegraph.pd.util.DateUtil; +>>>>>>>> d7e3d51dd (3.6.5 -> 4.x diff):hg-pd-service/src/main/java/org/apache/hugegraph/pd/rest/StoreAPI.java import com.google.protobuf.util.JsonFormat; import lombok.Data; @@ -68,13 +91,13 @@ public RestApiResponse getStores() { stateCountMap.put(stateKey, stateCountMap.getOrDefault(stateKey, 0) + 1); storeStatsList.add(new StoreStatistics(store)); } - storeStatsList.sort((o1, o2) -> o1.address.compareTo(o2.address)); + storeStatsList.sort(Comparator.comparing(o -> o.address)); dataMap.put("stores", storeStatsList); dataMap.put("numOfService", storeStatsList.size()); dataMap.put("numOfNormalService", stateCountMap.getOrDefault(Metapb.StoreState.Up.name(), 0)); dataMap.put("stateCountMap", stateCountMap); - return new RestApiResponse(dataMap, Pdpb.ErrorType.OK, Pdpb.ErrorType.OK.name()); + return new RestApiResponse(dataMap, ErrorType.OK, ErrorType.OK.name()); } catch (PDException e) { log.error("PDException", e); return new RestApiResponse(null, e.getErrorCode(), e.getMessage()); @@ -95,7 +118,7 @@ public String setStore(@PathVariable long storeId, @RequestBody StoreRestRequest Metapb.Store newStore = pdRestService.updateStore(builder.build()); return toJSON(newStore, "store"); } else { - return toJSON(new PDException(Pdpb.ErrorType.NOT_FOUND_VALUE, "error")); + return toJSON(new PDException(ErrorType.NOT_FOUND_VALUE, "error")); } } catch (PDException e) { return toJSON(e); @@ -174,7 +197,7 @@ public String getStoreLog(@RequestBody TimeRangeRequest request) { .newBuilder().add(Metapb.Store.getDescriptor()).build(); return toJSON(changedStore, registry); } else { - return toJSON(new PDException(Pdpb.ErrorType.NOT_FOUND_VALUE, "error")); + return toJSON(new PDException(ErrorType.NOT_FOUND_VALUE, "error")); } } catch (PDException e) { return toJSON(e); @@ -193,11 +216,17 @@ public RestApiResponse getStore(@PathVariable long storeId) { } if (store != null) { StoreStatistics resultStoreStats = resultStoreStats = new StoreStatistics(store); +<<<<<<<< HEAD:hugegraph-pd/hg-pd-service/src/main/java/org/apache/hugegraph/pd/rest/StoreAPI.java return new RestApiResponse(resultStoreStats, Pdpb.ErrorType.OK, Pdpb.ErrorType.OK.name()); } else { return new RestApiResponse(null, Pdpb.ErrorType.STORE_ID_NOT_EXIST, Pdpb.ErrorType.STORE_ID_NOT_EXIST.name()); +======== + return new RestApiResponse(resultStoreStats, ErrorType.OK, ErrorType.OK.name()); + } else { + return new RestApiResponse(null, ErrorType.STORE_ID_NOT_EXIST, ErrorType.STORE_ID_NOT_EXIST.name()); +>>>>>>>> d7e3d51dd (3.6.5 -> 4.x diff):hg-pd-service/src/main/java/org/apache/hugegraph/pd/rest/StoreAPI.java } } @@ -218,8 +247,13 @@ public String getStoresAndStats() { @ResponseBody public RestApiResponse getStoreMonitorData(@PathVariable long storeId) { try { +<<<<<<<< HEAD:hugegraph-pd/hg-pd-service/src/main/java/org/apache/hugegraph/pd/rest/StoreAPI.java List> result = pdRestService.getMonitorData(storeId); return new RestApiResponse(result, Pdpb.ErrorType.OK, Pdpb.ErrorType.OK.name()); +======== + List> result = pdRestService.getMonitorData(storeId); + return new RestApiResponse(result, ErrorType.OK, ErrorType.OK.name()); +>>>>>>>> d7e3d51dd (3.6.5 -> 4.x diff):hg-pd-service/src/main/java/org/apache/hugegraph/pd/rest/StoreAPI.java } catch (PDException e) { return new RestApiResponse(null, e.getErrorCode(), e.getMessage()); } @@ -235,6 +269,12 @@ public String getStoreMonitorDataText(@PathVariable long storeId) { } } + @GetMapping(value = "/shardGroupsCache", produces = MediaType.APPLICATION_JSON_VALUE) + @ResponseBody + public String getShardGroupsCache() { + return toJSON(new ArrayList<>(pdRestService.getShardGroupCache().values()), "shardGroups"); + } + @Data class Partition { @@ -260,9 +300,14 @@ class Partition { @Data class StoreStatistics { +<<<<<<<< HEAD:hugegraph-pd/hg-pd-service/src/main/java/org/apache/hugegraph/pd/rest/StoreAPI.java // store statistics long storeId; +======== + // store的统计信息 + String storeId; +>>>>>>>> d7e3d51dd (3.6.5 -> 4.x diff):hg-pd-service/src/main/java/org/apache/hugegraph/pd/rest/StoreAPI.java String address; String raftAddress; String version; @@ -278,7 +323,12 @@ class StoreStatistics { int partitionCount; int graphSize; long keyCount; +<<<<<<<< HEAD:hugegraph-pd/hg-pd-service/src/main/java/org/apache/hugegraph/pd/rest/StoreAPI.java long leaderCount; // shard role = 'Leader' The number of partitions +======== + int storeGroupId; + long leaderCount; // shard role = 'Leader'的分区数量 +>>>>>>>> d7e3d51dd (3.6.5 -> 4.x diff):hg-pd-service/src/main/java/org/apache/hugegraph/pd/rest/StoreAPI.java String serviceName; String serviceVersion; long serviceCreatedTimeStamp; // The time when the service was created @@ -286,7 +336,7 @@ class StoreStatistics { StoreStatistics(Metapb.Store store) { if (store != null) { - storeId = store.getId(); + storeId = String.valueOf(store.getId()); address = store.getAddress(); raftAddress = store.getRaftAddress(); state = String.valueOf(store.getState()); @@ -320,6 +370,12 @@ class StoreStatistics { partitionCount = store.getStats().getPartitionCount(); serviceName = address + "-store"; serviceVersion = store.getVersion(); + try { + storeGroupId = pdRestService.getStoreGroupId(store.getId()); + } catch (PDException e) { + storeGroupId = -1; + log.error("get store group id failed,", e); + } List graphStatsList = store.getStats().getGraphStatsList(); // Save the partition information List partitionStatsList = new ArrayList<>(); @@ -357,4 +413,8 @@ class StoreStatistics { } } + @GetMapping(value = "/health", produces = MediaType.TEXT_PLAIN_VALUE) + public Serializable checkHealthy() { + return ""; + } } diff --git a/hugegraph-pd/hg-pd-service/src/main/java/org/apache/hugegraph/pd/rest/TaskAPI.java b/hugegraph-pd/hg-pd-service/src/main/java/org/apache/hugegraph/pd/rest/TaskAPI.java index a1876141b9..2820b8648f 100644 --- a/hugegraph-pd/hg-pd-service/src/main/java/org/apache/hugegraph/pd/rest/TaskAPI.java +++ b/hugegraph-pd/hg-pd-service/src/main/java/org/apache/hugegraph/pd/rest/TaskAPI.java @@ -1,3 +1,4 @@ +<<<<<<<< HEAD:hugegraph-pd/hg-pd-service/src/main/java/org/apache/hugegraph/pd/rest/TaskAPI.java /* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with @@ -20,10 +21,18 @@ import java.util.List; import java.util.Map; +======== +package org.apache.hugegraph.pd.rest; + +>>>>>>>> d7e3d51dd (3.6.5 -> 4.x diff):hg-pd-service/src/main/java/org/apache/hugegraph/pd/rest/TaskAPI.java import org.apache.hugegraph.pd.common.KVPair; import org.apache.hugegraph.pd.common.PDException; import org.apache.hugegraph.pd.grpc.Metapb; import org.apache.hugegraph.pd.service.PDRestService; +<<<<<<<< HEAD:hugegraph-pd/hg-pd-service/src/main/java/org/apache/hugegraph/pd/rest/TaskAPI.java +======== +import lombok.extern.slf4j.Slf4j; +>>>>>>>> d7e3d51dd (3.6.5 -> 4.x diff):hg-pd-service/src/main/java/org/apache/hugegraph/pd/rest/TaskAPI.java import org.springframework.beans.factory.annotation.Autowired; import org.springframework.http.MediaType; import org.springframework.web.bind.annotation.GetMapping; diff --git a/hugegraph-pd/hg-pd-service/src/main/java/org/apache/hugegraph/pd/service/KvServiceGrpcImpl.java b/hugegraph-pd/hg-pd-service/src/main/java/org/apache/hugegraph/pd/service/KvServiceGrpcImpl.java index 088403fb5a..05596bd3b7 100644 --- a/hugegraph-pd/hg-pd-service/src/main/java/org/apache/hugegraph/pd/service/KvServiceGrpcImpl.java +++ b/hugegraph-pd/hg-pd-service/src/main/java/org/apache/hugegraph/pd/service/KvServiceGrpcImpl.java @@ -1,3 +1,4 @@ +<<<<<<<< HEAD:hugegraph-pd/hg-pd-service/src/main/java/org/apache/hugegraph/pd/service/KvServiceGrpcImpl.java /* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with @@ -17,6 +18,10 @@ package org.apache.hugegraph.pd.service; +======== +package org.apache.hugegraph.pd.service; + +>>>>>>>> d7e3d51dd (3.6.5 -> 4.x diff):hg-pd-service/src/main/java/org/apache/hugegraph/pd/service/KvServiceGrpcImpl.java import java.util.List; import java.util.Map; import java.util.Random; @@ -27,6 +32,12 @@ import javax.annotation.PostConstruct; +<<<<<<<< HEAD:hugegraph-pd/hg-pd-service/src/main/java/org/apache/hugegraph/pd/service/KvServiceGrpcImpl.java +======== +import org.lognet.springboot.grpc.GRpcService; +import org.springframework.beans.factory.annotation.Autowired; + +>>>>>>>> d7e3d51dd (3.6.5 -> 4.x diff):hg-pd-service/src/main/java/org/apache/hugegraph/pd/service/KvServiceGrpcImpl.java import org.apache.hugegraph.pd.KvService; import org.apache.hugegraph.pd.common.PDException; import org.apache.hugegraph.pd.config.PDConfig; @@ -46,12 +57,18 @@ import org.apache.hugegraph.pd.grpc.kv.WatchState; import org.apache.hugegraph.pd.grpc.kv.WatchType; import org.apache.hugegraph.pd.raft.RaftEngine; +<<<<<<<< HEAD:hugegraph-pd/hg-pd-service/src/main/java/org/apache/hugegraph/pd/service/KvServiceGrpcImpl.java import org.apache.hugegraph.pd.raft.RaftStateListener; import org.apache.hugegraph.pd.watch.KvWatchSubject; import org.lognet.springboot.grpc.GRpcService; import org.springframework.beans.factory.annotation.Autowired; import io.grpc.ManagedChannel; +======== +import org.apache.hugegraph.pd.service.interceptor.GrpcAuthentication; +import org.apache.hugegraph.pd.watch.KvWatchSubject; + +>>>>>>>> d7e3d51dd (3.6.5 -> 4.x diff):hg-pd-service/src/main/java/org/apache/hugegraph/pd/service/KvServiceGrpcImpl.java import io.grpc.stub.StreamObserver; import lombok.extern.slf4j.Slf4j; @@ -59,9 +76,14 @@ * The core implementation class of KV storage */ @Slf4j +<<<<<<<< HEAD:hugegraph-pd/hg-pd-service/src/main/java/org/apache/hugegraph/pd/service/KvServiceGrpcImpl.java @GRpcService public class KvServiceGrpcImpl extends KvServiceGrpc.KvServiceImplBase implements RaftStateListener, ServiceGrpc { +======== +@GRpcService(interceptors = {GrpcAuthentication.class}) +public class KvServiceGrpcImpl extends KvServiceGrpc.KvServiceImplBase implements ServiceGrpc { +>>>>>>>> d7e3d51dd (3.6.5 -> 4.x diff):hg-pd-service/src/main/java/org/apache/hugegraph/pd/service/KvServiceGrpcImpl.java private final ManagedChannel channel = null; KvService kvService; @@ -69,6 +91,10 @@ public class KvServiceGrpcImpl extends KvServiceGrpc.KvServiceImplBase implement String msg = "node is not leader,it is necessary to redirect to the leader on the client"; @Autowired private PDConfig pdConfig; +<<<<<<<< HEAD:hugegraph-pd/hg-pd-service/src/main/java/org/apache/hugegraph/pd/service/KvServiceGrpcImpl.java +======== + KvService kvService; +>>>>>>>> d7e3d51dd (3.6.5 -> 4.x diff):hg-pd-service/src/main/java/org/apache/hugegraph/pd/service/KvServiceGrpcImpl.java private KvWatchSubject subjects; private ScheduledExecutorService executor; @@ -83,7 +109,11 @@ public void init() { if (isLeader()) { subjects.keepClientAlive(); } +<<<<<<<< HEAD:hugegraph-pd/hg-pd-service/src/main/java/org/apache/hugegraph/pd/service/KvServiceGrpcImpl.java }, 0, KvWatchSubject.WATCH_TTL / 2, TimeUnit.MILLISECONDS); +======== + }, 0, KvWatchSubject.WATCH_TTL * 1 / 3, TimeUnit.MILLISECONDS); +>>>>>>>> d7e3d51dd (3.6.5 -> 4.x diff):hg-pd-service/src/main/java/org/apache/hugegraph/pd/service/KvServiceGrpcImpl.java } /** @@ -285,6 +315,10 @@ public void watch(WatchRequest request, StreamObserver responseOb if (!isLeader()) { try { responseObserver.onError(new PDException(-1, msg)); +<<<<<<<< HEAD:hugegraph-pd/hg-pd-service/src/main/java/org/apache/hugegraph/pd/service/KvServiceGrpcImpl.java +======== + return; +>>>>>>>> d7e3d51dd (3.6.5 -> 4.x diff):hg-pd-service/src/main/java/org/apache/hugegraph/pd/service/KvServiceGrpcImpl.java } catch (IllegalStateException ie) { } catch (Exception e1) { @@ -312,6 +346,10 @@ public void watchPrefix(WatchRequest request, StreamObserver resp if (!isLeader()) { try { responseObserver.onError(new PDException(-1, msg)); +<<<<<<<< HEAD:hugegraph-pd/hg-pd-service/src/main/java/org/apache/hugegraph/pd/service/KvServiceGrpcImpl.java +======== + return; +>>>>>>>> d7e3d51dd (3.6.5 -> 4.x diff):hg-pd-service/src/main/java/org/apache/hugegraph/pd/service/KvServiceGrpcImpl.java } catch (IllegalStateException ie) { } catch (Exception e1) { diff --git a/hugegraph-pd/hg-pd-service/src/main/java/org/apache/hugegraph/pd/service/PDRestService.java b/hugegraph-pd/hg-pd-service/src/main/java/org/apache/hugegraph/pd/service/PDRestService.java index 9df8381112..a117a3d351 100644 --- a/hugegraph-pd/hg-pd-service/src/main/java/org/apache/hugegraph/pd/service/PDRestService.java +++ b/hugegraph-pd/hg-pd-service/src/main/java/org/apache/hugegraph/pd/service/PDRestService.java @@ -1,3 +1,4 @@ +<<<<<<<< HEAD:hugegraph-pd/hg-pd-service/src/main/java/org/apache/hugegraph/pd/service/PDRestService.java /* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with @@ -16,12 +17,52 @@ */ package org.apache.hugegraph.pd.service; +======== +package org.apache.hugegraph.pd.service; + +import static org.apache.hugegraph.pd.common.Consts.DEFAULT_STORE_GROUP_ID; +>>>>>>>> d7e3d51dd (3.6.5 -> 4.x diff):hg-pd-service/src/main/java/org/apache/hugegraph/pd/service/PDRestService.java import java.util.ArrayList; import java.util.HashMap; import java.util.List; import java.util.Map; import java.util.concurrent.CountDownLatch; +import java.util.concurrent.TimeUnit; + +import javax.annotation.PreDestroy; + +import org.springframework.beans.factory.InitializingBean; +import org.springframework.beans.factory.annotation.Autowired; +import org.springframework.stereotype.Service; + +import com.alipay.sofa.jraft.Node; +import com.alipay.sofa.jraft.Status; +import com.alipay.sofa.jraft.entity.PeerId; +import org.apache.hugegraph.pd.ConfigService; +import org.apache.hugegraph.pd.LogService; +import org.apache.hugegraph.pd.PartitionService; +import org.apache.hugegraph.pd.StoreMonitorDataService; +import org.apache.hugegraph.pd.StoreNodeService; +import org.apache.hugegraph.pd.TaskScheduleService; +import org.apache.hugegraph.pd.boot.ShutdownHook; +import org.apache.hugegraph.pd.common.HgAssert; +import org.apache.hugegraph.pd.common.KVPair; +import org.apache.hugegraph.pd.common.PDException; +import org.apache.hugegraph.pd.grpc.Metapb; +import org.apache.hugegraph.pd.grpc.common.ErrorType; +import org.apache.hugegraph.pd.grpc.common.Errors; +import org.apache.hugegraph.pd.grpc.discovery.NodeInfo; +import org.apache.hugegraph.pd.grpc.discovery.NodeInfos; +import org.apache.hugegraph.pd.grpc.discovery.Query; +import org.apache.hugegraph.pd.grpc.discovery.RegisterInfo; +import org.apache.hugegraph.pd.meta.MetadataFactory; +import org.apache.hugegraph.pd.model.RegistryRestRequest; +import org.apache.hugegraph.pd.model.RegistryRestResponse; +import org.apache.hugegraph.pd.raft.RaftEngine; + +import io.grpc.stub.StreamObserver; +import lombok.extern.slf4j.Slf4j; import org.apache.hugegraph.pd.ConfigService; import org.apache.hugegraph.pd.LogService; @@ -62,6 +103,14 @@ public class PDRestService implements InitializingBean { private ConfigService configService; private LogService logService; private StoreMonitorDataService storeMonitorDataService; +<<<<<<<< HEAD:hugegraph-pd/hg-pd-service/src/main/java/org/apache/hugegraph/pd/service/PDRestService.java +======== + private static final int WAIT_TIMEOUT = 45; + @Autowired + PDService pdService; + @Autowired + DiscoveryService discoveryService; +>>>>>>>> d7e3d51dd (3.6.5 -> 4.x diff):hg-pd-service/src/main/java/org/apache/hugegraph/pd/service/PDRestService.java /** * initialize @@ -125,13 +174,17 @@ public Metapb.Graph getGraph(String graphName) throws PDException { } public Metapb.Graph updateGraph(Metapb.Graph graph) throws PDException { - return partitionService.updateGraph(graph); + return partitionService.updateGraphName(graph); } public List getPartitions(String graphName) { return partitionService.getPartitions(graphName); } + public Map getShardGroupCache() { + return partitionService.getShardGroupCache(); + } + public List patrolStores() throws PDException { return monitorService.patrolStores(); } @@ -150,11 +203,11 @@ public List getPartitionStatus(String graphName) throws P } public Map> balancePartitions() throws PDException { - return monitorService.balancePartitionShard(); + return monitorService.balancePartitionShard(DEFAULT_STORE_GROUP_ID); } public List splitPartitions() throws PDException { - return monitorService.autoSplitPartition(); + return monitorService.autoSplitPartition(DEFAULT_STORE_GROUP_ID); } public List getStoreStats(boolean isActive) throws PDException { @@ -192,12 +245,12 @@ public void onCompleted() { } }; this.discoveryService.register(nodeInfo, observer); - latch.await(); - Pdpb.Error error = info[0].getHeader().getError(); + latch.await(WAIT_TIMEOUT, TimeUnit.SECONDS); + Errors error = info[0].getHeader().getError(); response.setErrorType(error.getType()); response.setMessage(error.getMessage()); - } catch (InterruptedException e) { - response.setErrorType(Pdpb.ErrorType.UNRECOGNIZED); + } catch (Exception e) { + response.setErrorType(ErrorType.UNRECOGNIZED); response.setMessage(e.getMessage()); } return response; @@ -227,7 +280,7 @@ public void onCompleted() { } }; this.discoveryService.getNodes(request, observer); - latch.await(); + latch.await(WAIT_TIMEOUT, TimeUnit.SECONDS); List infoList = info[0].getInfoList(); registryRestRequests = new ArrayList(infoList.size()); for (int i = 0; i < infoList.size(); i++) { @@ -242,8 +295,8 @@ public void onCompleted() { registryRestRequest.setLabels(labels); registryRestRequests.add(registryRestRequest); } - } catch (InterruptedException e) { - response.setErrorType(Pdpb.ErrorType.UNRECOGNIZED); + } catch (Exception e) { + response.setErrorType(ErrorType.UNRECOGNIZED); response.setMessage(e.getMessage()); } return registryRestRequests; @@ -268,4 +321,46 @@ public void dbCompaction() throws PDException { public List getShardList(int partitionId) throws PDException { return storeNodeService.getShardList(partitionId); } + + public void resetPartitionState(Metapb.Partition partition) throws PDException { + partitionService.updatePartitionState(partition.getGraphName(), partition.getId(), + Metapb.PartitionState.PState_Normal); + } + + public int getStoreGroupId(long storeId) throws PDException { + return storeNodeService.getStoreGroupByStore(storeId); + } + + @PreDestroy + public void shutdown(){ + try { + log.info("shutdown RaftEngine...."); + RaftEngine engine = RaftEngine.getInstance(); + int count = 0; + while (count++ < 3) { + Node raftNode = engine.getRaftNode(); + if (raftNode.isLeader(true)) { + Status status = raftNode.transferLeadershipTo(PeerId.ANY_PEER); + if (status.isOk()) { + raftNode.disableVote(); + break; + } else { + log.warn("transfer leader with warning: {}", status); + synchronized (ShutdownHook.class) { + ShutdownHook.class.wait(1000); + } + } + } else { + break; + } + } + engine.shutDown(); + log.info("RaftEngine shutdown and start to shutdown db...."); + MetadataFactory.closeStore(); + log.info("db shutdown"); + log.info("all resources have been closed"); + } catch (Exception e) { + log.warn("shutdown with error:", e); + } + } } diff --git a/hugegraph-pd/hg-pd-service/src/main/java/org/apache/hugegraph/pd/service/PDService.java b/hugegraph-pd/hg-pd-service/src/main/java/org/apache/hugegraph/pd/service/PDService.java index 25b3f74295..a73dcd0ed6 100644 --- a/hugegraph-pd/hg-pd-service/src/main/java/org/apache/hugegraph/pd/service/PDService.java +++ b/hugegraph-pd/hg-pd-service/src/main/java/org/apache/hugegraph/pd/service/PDService.java @@ -1,3 +1,4 @@ +<<<<<<<< HEAD:hugegraph-pd/hg-pd-service/src/main/java/org/apache/hugegraph/pd/service/PDService.java /* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with @@ -81,6 +82,80 @@ import com.alipay.sofa.jraft.Status; import com.alipay.sofa.jraft.conf.Configuration; import com.alipay.sofa.jraft.entity.PeerId; +======== +package org.apache.hugegraph.pd.service; + +import static org.apache.hugegraph.pd.grpc.common.ErrorType.GRAPH_NOT_EXISTS; +import static org.apache.hugegraph.pd.grpc.common.ErrorType.PARTITION_NOT_EXISTS; +import static org.apache.hugegraph.pd.grpc.common.ErrorType.STORE_GROUP_NOT_EXISTS; +import static org.apache.hugegraph.pd.grpc.common.ErrorType.TASK_NOT_EXISTS; + +import java.io.File; +import java.io.IOException; +import java.util.ArrayList; +import java.util.HashSet; +import java.util.List; +import java.util.Map; +import java.util.stream.Collectors; + +import javax.annotation.PostConstruct; + +import org.apache.commons.io.FileUtils; +import org.lognet.springboot.grpc.GRpcService; +import org.springframework.beans.factory.annotation.Autowired; +import org.springframework.context.annotation.DependsOn; +import org.springframework.util.CollectionUtils; + +import com.alipay.sofa.jraft.Status; +import com.alipay.sofa.jraft.conf.Configuration; +import org.apache.hugegraph.pd.ConfigService; +import org.apache.hugegraph.pd.IdService; +import org.apache.hugegraph.pd.LogService; +import org.apache.hugegraph.pd.PartitionService; +import org.apache.hugegraph.pd.StoreMonitorDataService; +import org.apache.hugegraph.pd.StoreNodeService; +import org.apache.hugegraph.pd.TaskScheduleService; +import org.apache.hugegraph.pd.common.KVPair; +import org.apache.hugegraph.pd.common.PDException; +import org.apache.hugegraph.pd.config.PDConfig; +import org.apache.hugegraph.pd.grpc.ClusterOp; +import org.apache.hugegraph.pd.grpc.MetaTask; +import org.apache.hugegraph.pd.grpc.Metapb; +import org.apache.hugegraph.pd.grpc.Metapb.GraphStats; +import org.apache.hugegraph.pd.grpc.PDGrpc; +import org.apache.hugegraph.pd.grpc.Pdpb; +import org.apache.hugegraph.pd.grpc.Pdpb.CachePartitionResponse; +import org.apache.hugegraph.pd.grpc.Pdpb.CacheResponse; +import org.apache.hugegraph.pd.grpc.Pdpb.GetGraphRequest; +import org.apache.hugegraph.pd.grpc.Pdpb.GetLeaderGrpcAddressResponse; +import org.apache.hugegraph.pd.grpc.Pdpb.GraphStatsResponse; +import org.apache.hugegraph.pd.grpc.Pdpb.PutLicenseRequest; +import org.apache.hugegraph.pd.grpc.Pdpb.PutLicenseResponse; +import org.apache.hugegraph.pd.grpc.StoreGroup; +import org.apache.hugegraph.pd.grpc.common.ErrorType; +import org.apache.hugegraph.pd.grpc.common.Errors; +import org.apache.hugegraph.pd.grpc.common.NoArg; +import org.apache.hugegraph.pd.grpc.common.ResponseHeader; +import org.apache.hugegraph.pd.grpc.common.VoidResponse; +import org.apache.hugegraph.pd.grpc.pulse.PartitionHeartbeatResponse; +import org.apache.hugegraph.pd.grpc.pulse.SplitPartition; +import org.apache.hugegraph.pd.grpc.pulse.StoreNodeEventType; +import org.apache.hugegraph.pd.license.LicenseVerifierService; +import org.apache.hugegraph.pd.pulse.PDPulseSubjects; +import org.apache.hugegraph.pd.pulse.impl.PartitionInstructionListenerImpl; +import org.apache.hugegraph.pd.pulse.impl.PartitionStatusListenerImpl; +import org.apache.hugegraph.pd.pulse.impl.PulseListenerImpl; +import org.apache.hugegraph.pd.pulse.impl.ShardGroupStatusListenerImpl; +import org.apache.hugegraph.pd.pulse.impl.StoreStatusListenerImpl; +import org.apache.hugegraph.pd.raft.PeerUtil; +import org.apache.hugegraph.pd.raft.RaftEngine; +import org.apache.hugegraph.pd.service.interceptor.GrpcAuthentication; +import org.apache.hugegraph.pd.util.grpc.StreamObserverUtil; + +import io.grpc.stub.StreamObserver; +import lombok.Getter; +import lombok.extern.slf4j.Slf4j; +>>>>>>>> d7e3d51dd (3.6.5 -> 4.x diff):hg-pd-service/src/main/java/org/apache/hugegraph/pd/service/PDService.java import io.grpc.ManagedChannel; import io.grpc.stub.StreamObserver; @@ -88,6 +163,7 @@ // TODO: uncomment later - remove license verifier service now @Slf4j +<<<<<<<< HEAD:hugegraph-pd/hg-pd-service/src/main/java/org/apache/hugegraph/pd/service/PDService.java @GRpcService public class PDService extends PDGrpc.PDImplBase implements ServiceGrpc, RaftStateListener { @@ -98,12 +174,29 @@ public class PDService extends PDGrpc.PDImplBase implements ServiceGrpc, RaftSta private final Map channelMap = new ConcurrentHashMap<>(); @Autowired private PDConfig pdConfig; +======== +@GRpcService(interceptors = {GrpcAuthentication.class}) +@DependsOn("pdPulseService") +public class PDService extends PDGrpc.PDImplBase implements ServiceGrpc { + + public static final String TASK_ID_KEY = "task_id"; + private static final String USER_TASK_ID_KEY = "user_task_key"; + @Autowired + private PDConfig pdConfig; + @Getter +>>>>>>>> d7e3d51dd (3.6.5 -> 4.x diff):hg-pd-service/src/main/java/org/apache/hugegraph/pd/service/PDService.java private StoreNodeService storeNodeService; + @Getter private PartitionService partitionService; + @Getter private TaskScheduleService taskService; + @Getter private IdService idService; + @Getter private ConfigService configService; + @Getter private LogService logService; +<<<<<<<< HEAD:hugegraph-pd/hg-pd-service/src/main/java/org/apache/hugegraph/pd/service/PDService.java //private LicenseVerifierService licenseVerifierService; private StoreMonitorDataService storeMonitorDataService; private ManagedChannel channel; @@ -148,25 +241,41 @@ public LogService getLogService() { //public LicenseVerifierService getLicenseVerifierService() { // return licenseVerifierService; //} +======== + @Getter + private LicenseVerifierService licenseVerifierService; + @Getter + private StoreMonitorDataService storeMonitorDataService; + private ResponseHeader okHeader = getResponseHeader(); +>>>>>>>> d7e3d51dd (3.6.5 -> 4.x diff):hg-pd-service/src/main/java/org/apache/hugegraph/pd/service/PDService.java /** * initialize */ @PostConstruct public void init() throws PDException { - log.info("PDService init………… {}", pdConfig); + log.info("PDService init……{}", pdConfig); configService = new ConfigService(pdConfig); +<<<<<<<< HEAD:hugegraph-pd/hg-pd-service/src/main/java/org/apache/hugegraph/pd/service/PDService.java RaftEngine.getInstance().addStateListener(this); RaftEngine.getInstance().addStateListener(configService); RaftEngine.getInstance().init(pdConfig.getRaft()); //pdConfig = configService.loadConfig(); onLeaderChanged +======== + RaftEngine engine = RaftEngine.getInstance(); + engine.addStateListener(this); + engine.addStateListener(configService); + engine.init(pdConfig.getRaft()); + // pdConfig = configService.loadConfig(); onLeaderChanged中加载 +>>>>>>>> d7e3d51dd (3.6.5 -> 4.x diff):hg-pd-service/src/main/java/org/apache/hugegraph/pd/service/PDService.java storeNodeService = new StoreNodeService(pdConfig); - partitionService = new PartitionService(pdConfig, storeNodeService); - taskService = new TaskScheduleService(pdConfig, storeNodeService, partitionService); + partitionService = new PartitionService(pdConfig, storeNodeService, configService); + taskService = new TaskScheduleService(pdConfig, storeNodeService, partitionService, configService); idService = new IdService(pdConfig); logService = new LogService(pdConfig); storeMonitorDataService = new StoreMonitorDataService(pdConfig); +<<<<<<<< HEAD:hugegraph-pd/hg-pd-service/src/main/java/org/apache/hugegraph/pd/service/PDService.java //if (licenseVerifierService == null) { // licenseVerifierService = new LicenseVerifierService(pdConfig); //} @@ -331,14 +440,48 @@ public void onStoreRaftChanged(Metapb.Store store) { store.getId()); } }); +======== + if (licenseVerifierService == null) { + licenseVerifierService = new LicenseVerifierService(pdConfig); + } + engine.addStateListener(partitionService); + pdConfig.setIdService(idService); + // 接收心跳消息 + PDPulseSubjects.listenPartitionHeartbeat(new PulseListenerImpl(this)); + // 处理心跳Listener异常,返回0,不中断其他Listener。 +// PDPulseSubjects.setPartitionErrInterceptor( +// (req,e) -> { +// if (e instanceof PDException) { +// var pde = (PDException) e; +// if (pde.getErrorCode() == NOT_LEADER.getNumber()) { +// try { +// log.info("send change leader command to watch, due to ERROR-100", pde); +// PDPulseSubjects.notifyClient(PdInstructionResponse.newBuilder() +// .setInstructionType(PdInstructionType.CHANGE_TO_FOLLOWER) +// .setLeaderIp(engine.getLeaderGrpcAddress()) +// .build()); +// } catch (Exception ex) { +// log.error("send notice to observer failed, ", ex); +// } +// return 1; // Aborting other listeners. +// } +// } else { +// log.error("handleNotice error", e); +// } +// return 0; +// } +// ); + // 监听分区指令,并转发给Store + partitionService.addInstructionListener(new PartitionInstructionListenerImpl(this)); + // 监听分区状态改变消息,并转发给Client + partitionService.addStatusListener(new PartitionStatusListenerImpl()); + storeNodeService.addShardGroupStatusListener(new ShardGroupStatusListenerImpl()); + // 监听store状态改变消息,并转发给Client + storeNodeService.addStatusListener(new StoreStatusListenerImpl()); +>>>>>>>> d7e3d51dd (3.6.5 -> 4.x diff):hg-pd-service/src/main/java/org/apache/hugegraph/pd/service/PDService.java storeNodeService.init(partitionService); partitionService.init(); taskService.init(); - // log.info("init ......."); - // licenseVerifierService.init(); - - // UpgradeService upgradeService = new UpgradeService(pdConfig); - // upgradeService.upgrade(); } /** @@ -360,7 +503,7 @@ public void registerStore(Pdpb.RegisterStoreRequest request, .setStoreId(store.getId()) .build(); } catch (PDException e) { - response = Pdpb.RegisterStoreResponse.newBuilder().setHeader(newErrorHeader(e)).build(); + response = Pdpb.RegisterStoreResponse.newBuilder().setHeader(getResponseHeader(e)).build(); log.error("registerStore exception: ", e); } observer.onNext(response); @@ -384,7 +527,7 @@ public void getStore(Pdpb.GetStoreRequest request, response = Pdpb.GetStoreResponse.newBuilder().setHeader(okHeader).setStore(store).build(); } catch (PDException e) { - response = Pdpb.GetStoreResponse.newBuilder().setHeader(newErrorHeader(e)).build(); + response = Pdpb.GetStoreResponse.newBuilder().setHeader(getResponseHeader(e)).build(); log.error("{} getStore exception: {}", StreamObserverUtil.getRemoteIP(observer), e); } @@ -410,6 +553,7 @@ public void setStore(Pdpb.SetStoreRequest request, Long storeId = request.getStore().getId(); // In the Pending state, you can go online Metapb.Store lastStore = storeNodeService.getStore(request.getStore().getId()); +<<<<<<<< HEAD:hugegraph-pd/hg-pd-service/src/main/java/org/apache/hugegraph/pd/service/PDService.java if (lastStore == null) { // storeId does not exist, an exception is thrown throw new PDException(Pdpb.ErrorType.STORE_ID_NOT_EXIST_VALUE, @@ -419,12 +563,23 @@ public void setStore(Pdpb.SetStoreRequest request, if (!Metapb.StoreState.Pending.equals(lastStore.getState())) { throw new PDException(Pdpb.ErrorType.UPDATE_STORE_STATE_ERROR_VALUE, "only stores in Pending state can be set to Up!"); +======== + if (lastStore == null){ + // storeId不存在,抛出异常 + throw new PDException(ErrorType.STORE_ID_NOT_EXIST_VALUE, + String.format("Store id %d does not exist!", storeId)); + } + if (Metapb.StoreState.Up.equals(state)){ + if (!Metapb.StoreState.Pending.equals(lastStore.getState())){ + throw new PDException(ErrorType.UPDATE_STORE_STATE_ERROR_VALUE, + "only stores in Pending state can be set to Up!"); +>>>>>>>> d7e3d51dd (3.6.5 -> 4.x diff):hg-pd-service/src/main/java/org/apache/hugegraph/pd/service/PDService.java } } if (state.equals(Metapb.StoreState.Offline)) { - Metapb.ClusterStats stats = storeNodeService.getClusterStats(); + Metapb.ClusterStats stats = storeNodeService.getClusterStats(storeId); if (stats.getState() != Metapb.ClusterState.Cluster_OK) { - Pdpb.ResponseHeader errorHeader = newErrorHeader(-1, + ResponseHeader errorHeader = getResponseHeader(-1, "can not offline node " + "when cluster state is not " + @@ -459,7 +614,7 @@ public void setStore(Pdpb.SetStoreRequest request, Metapb.Store store = Metapb.Store.newBuilder(request.getStore()) .setState(Metapb.StoreState.Pending).build(); storeNodeService.updateStore(store); - throw new PDException(Pdpb.ErrorType.LICENSE_ERROR_VALUE, + throw new PDException(ErrorType.LICENSE_ERROR_VALUE, "check license with error :" + e.getMessage() + ", and changed node state to 'Pending'"); @@ -471,6 +626,7 @@ public void setStore(Pdpb.SetStoreRequest request, if (state.equals(Metapb.StoreState.Tombstone)) { List activeStores = storeNodeService.getActiveStores(); if (lastStore.getState() == Metapb.StoreState.Up +<<<<<<<< HEAD:hugegraph-pd/hg-pd-service/src/main/java/org/apache/hugegraph/pd/service/PDService.java && activeStores.size() - 1 < pdConfig.getMinStoreCount()) { throw new PDException(Pdpb.ErrorType.LESS_ACTIVE_STORE_VALUE, "The number of active stores is less then " + @@ -484,11 +640,29 @@ public void setStore(Pdpb.SetStoreRequest request, // If it is already in the offline state, no further processing will be made throw new PDException(Pdpb.ErrorType.Store_Tombstone_Doing_VALUE, "Downline is in progress, do not resubmit"); +======== + && activeStores.size() - 1 < pdConfig.getMinStoreCount()) { + throw new PDException(ErrorType.LESS_ACTIVE_STORE_VALUE, + "The number of active stores is less then " + pdConfig.getMinStoreCount()); + } + if (!storeNodeService.checkStoreCanOffline(request.getStore())){ + throw new PDException(ErrorType.LESS_ACTIVE_STORE_VALUE, + "check activeStores or online shardsList size"); + } + if (lastStore.getState() == Metapb.StoreState.Exiting){ + // 如果已经是下线中的状态,则不作进一步处理 + throw new PDException(ErrorType.Store_Tombstone_Doing_VALUE, + "Downline is in progress, do not resubmit"); +>>>>>>>> d7e3d51dd (3.6.5 -> 4.x diff):hg-pd-service/src/main/java/org/apache/hugegraph/pd/service/PDService.java } Map resultMap = taskService.canAllPartitionsMovedOut(lastStore); if ((boolean) resultMap.get("flag")) { if (resultMap.get("current_store_is_online") != null +<<<<<<<< HEAD:hugegraph-pd/hg-pd-service/src/main/java/org/apache/hugegraph/pd/service/PDService.java && (boolean) resultMap.get("current_store_is_online")) { +======== + && (boolean) resultMap.get("current_store_is_online")) { +>>>>>>>> d7e3d51dd (3.6.5 -> 4.x diff):hg-pd-service/src/main/java/org/apache/hugegraph/pd/service/PDService.java log.info("updateStore removeActiveStores store {}", store.getId()); // Set the status of the online store to Offline and wait for the replica // to be migrated @@ -501,18 +675,25 @@ public void setStore(Pdpb.SetStoreRequest request, // If the store is offline, the replica is not migrated // Change the status to Tombstone } +<<<<<<<< HEAD:hugegraph-pd/hg-pd-service/src/main/java/org/apache/hugegraph/pd/service/PDService.java } else { throw new PDException(Pdpb.ErrorType.UPDATE_STORE_STATE_ERROR_VALUE, "the resources on other stores may be not enough to " + "store " + "the partitions of current store!"); +======== + }else{ + throw new PDException(ErrorType.UPDATE_STORE_STATE_ERROR_VALUE, + "the resources on other stores may be not enough to store " + + "the partitions of current store!"); +>>>>>>>> d7e3d51dd (3.6.5 -> 4.x diff):hg-pd-service/src/main/java/org/apache/hugegraph/pd/service/PDService.java } } store = storeNodeService.updateStore(store); response = Pdpb.SetStoreResponse.newBuilder().setHeader(okHeader).setStore(store).build(); } catch (PDException e) { - response = Pdpb.SetStoreResponse.newBuilder().setHeader(newErrorHeader(e)).build(); + response = Pdpb.SetStoreResponse.newBuilder().setHeader(getResponseHeader(e)).build(); log.error("setStore exception: ", e); } @@ -531,7 +712,12 @@ public void getAllStores(Pdpb.GetAllStoresRequest request, try { List stores = null; if (request.getExcludeOfflineStores()) { - stores = storeNodeService.getActiveStores(request.getGraphName()); + if (! request.getGraphName().isEmpty()) { + var graph = partitionService.getGraph(request.getGraphName()); + stores = storeNodeService.getActiveStoresByStoreGroup(graph.getStoreGroupId()); + } else { + stores = storeNodeService.getActiveStores(); + } } else { stores = storeNodeService.getStores(request.getGraphName()); } @@ -539,7 +725,7 @@ public void getAllStores(Pdpb.GetAllStoresRequest request, Pdpb.GetAllStoresResponse.newBuilder().setHeader(okHeader).addAllStores(stores) .build(); } catch (PDException e) { - response = Pdpb.GetAllStoresResponse.newBuilder().setHeader(newErrorHeader(e)).build(); + response = Pdpb.GetAllStoresResponse.newBuilder().setHeader(getResponseHeader(e)).build(); log.error("getAllStores exception: ", e); } observer.onNext(response); @@ -566,26 +752,43 @@ public void storeHeartbeat(Pdpb.StoreHeartbeatRequest request, } catch (PDException e) { log.error("save status failed, state:{}", stats); } +<<<<<<<< HEAD:hugegraph-pd/hg-pd-service/src/main/java/org/apache/hugegraph/pd/service/PDService.java // remove system_metrics stats = Metapb.StoreStats.newBuilder() .mergeFrom(request.getStats()) .clearField(Metapb.StoreStats.getDescriptor().findFieldByName( "system_metrics")) .build(); +======== +>>>>>>>> d7e3d51dd (3.6.5 -> 4.x diff):hg-pd-service/src/main/java/org/apache/hugegraph/pd/service/PDService.java } - Pdpb.StoreHeartbeatResponse response = null; + // remove system_metrics + stats = Metapb.StoreStats.newBuilder() + .mergeFrom(request.getStats()) + .clearSystemMetrics() + .build(); + + Pdpb.StoreHeartbeatResponse response; try { Metapb.ClusterStats clusterStats = storeNodeService.heartBeat(stats); - response = Pdpb.StoreHeartbeatResponse.newBuilder().setHeader(okHeader) - .setClusterStats(clusterStats).build(); + Pdpb.StoreHeartbeatResponse.Builder builder = + Pdpb.StoreHeartbeatResponse.newBuilder().setHeader(okHeader); + if (clusterStats != null) { + builder.setClusterStats(clusterStats); + } + response = builder.build(); } catch (PDException e) { +<<<<<<<< HEAD:hugegraph-pd/hg-pd-service/src/main/java/org/apache/hugegraph/pd/service/PDService.java response = Pdpb.StoreHeartbeatResponse.newBuilder().setHeader(newErrorHeader(e)).build(); +======== + response = Pdpb.StoreHeartbeatResponse.newBuilder().setHeader(getResponseHeader(e)).build(); +>>>>>>>> d7e3d51dd (3.6.5 -> 4.x diff):hg-pd-service/src/main/java/org/apache/hugegraph/pd/service/PDService.java log.error("storeHeartbeat exception: ", e); } catch (Exception e2) { response = Pdpb.StoreHeartbeatResponse.newBuilder().setHeader( - newErrorHeader(Pdpb.ErrorType.UNKNOWN_VALUE, e2.getMessage())).build(); + getResponseHeader(ErrorType.UNKNOWN_VALUE, e2.getMessage())).build(); log.error("storeHeartbeat exception: ", e2); } observer.onNext(response); @@ -614,7 +817,7 @@ public void getPartition(Pdpb.GetPartitionRequest request, .setPartition(partShard.getPartition()) .setLeader(partShard.getLeader()).build(); } catch (PDException e) { - response = Pdpb.GetPartitionResponse.newBuilder().setHeader(newErrorHeader(e)).build(); + response = Pdpb.GetPartitionResponse.newBuilder().setHeader(getResponseHeader(e)).build(); log.error("getPartition exception: ", e); } observer.onNext(response); @@ -642,7 +845,7 @@ public void getPartitionByCode(Pdpb.GetPartitionByCodeRequest request, .setPartition(partShard.getPartition()) .setLeader(partShard.getLeader()).build(); } catch (PDException e) { - response = Pdpb.GetPartitionResponse.newBuilder().setHeader(newErrorHeader(e)).build(); + response = Pdpb.GetPartitionResponse.newBuilder().setHeader(getResponseHeader(e)).build(); log.error("getPartitionByCode exception: ", e); } observer.onNext(response); @@ -665,16 +868,21 @@ public void getPartitionByID(Pdpb.GetPartitionByIDRequest request, partitionService.getPartitionShardById(request.getGraphName(), request.getPartitionId()); if (partShard == null) { +<<<<<<<< HEAD:hugegraph-pd/hg-pd-service/src/main/java/org/apache/hugegraph/pd/service/PDService.java throw new PDException(Pdpb.ErrorType.NOT_FOUND_VALUE, String.format("partition: %s-%s not found", request.getGraphName(), request.getPartitionId())); +======== + throw new PDException(ErrorType.NOT_FOUND_VALUE, + String.format("partition: %s-%s not found", request.getGraphName(), request.getPartitionId())); +>>>>>>>> d7e3d51dd (3.6.5 -> 4.x diff):hg-pd-service/src/main/java/org/apache/hugegraph/pd/service/PDService.java } response = Pdpb.GetPartitionResponse.newBuilder().setHeader(okHeader) .setPartition(partShard.getPartition()) .setLeader(partShard.getLeader()).build(); } catch (PDException e) { - response = Pdpb.GetPartitionResponse.newBuilder().setHeader(newErrorHeader(e)).build(); + response = Pdpb.GetPartitionResponse.newBuilder().setHeader(getResponseHeader(e)).build(); log.error("getPartitionByID exception: ", e); } observer.onNext(response); @@ -699,8 +907,12 @@ public void updatePartition(Pdpb.UpdatePartitionRequest request, response = Pdpb.UpdatePartitionResponse.newBuilder().setHeader(okHeader).build(); } catch (PDException e) { +<<<<<<<< HEAD:hugegraph-pd/hg-pd-service/src/main/java/org/apache/hugegraph/pd/service/PDService.java response = Pdpb.UpdatePartitionResponse.newBuilder().setHeader(newErrorHeader(e)).build(); +======== + response = Pdpb.UpdatePartitionResponse.newBuilder().setHeader(getResponseHeader(e)).build(); +>>>>>>>> d7e3d51dd (3.6.5 -> 4.x diff):hg-pd-service/src/main/java/org/apache/hugegraph/pd/service/PDService.java log.error("update partition exception: ", e); } observer.onNext(response); @@ -731,7 +943,7 @@ public void delPartition(Pdpb.DelPartitionRequest request, response = Pdpb.DelPartitionResponse.newBuilder().setHeader(okHeader).build(); } } catch (PDException e) { - response = Pdpb.DelPartitionResponse.newBuilder().setHeader(newErrorHeader(e)).build(); + response = Pdpb.DelPartitionResponse.newBuilder().setHeader(getResponseHeader(e)).build(); log.error("delPartition exception: ", e); } observer.onNext(response); @@ -759,8 +971,12 @@ public void scanPartitions(Pdpb.ScanPartitionsRequest request, response = Pdpb.ScanPartitionsResponse.newBuilder().setHeader(okHeader) .addAllPartitions(partShards).build(); } catch (PDException e) { +<<<<<<<< HEAD:hugegraph-pd/hg-pd-service/src/main/java/org/apache/hugegraph/pd/service/PDService.java response = Pdpb.ScanPartitionsResponse.newBuilder().setHeader(newErrorHeader(e)).build(); +======== + response = Pdpb.ScanPartitionsResponse.newBuilder().setHeader(getResponseHeader(e)).build(); +>>>>>>>> d7e3d51dd (3.6.5 -> 4.x diff):hg-pd-service/src/main/java/org/apache/hugegraph/pd/service/PDService.java log.error("scanPartitions exception: ", e); } observer.onNext(response); @@ -770,7 +986,10 @@ public void scanPartitions(Pdpb.ScanPartitionsRequest request, /** * Get graph information */ +<<<<<<<< HEAD:hugegraph-pd/hg-pd-service/src/main/java/org/apache/hugegraph/pd/service/PDService.java @Override +======== +>>>>>>>> d7e3d51dd (3.6.5 -> 4.x diff):hg-pd-service/src/main/java/org/apache/hugegraph/pd/service/PDService.java public void getGraph(GetGraphRequest request, io.grpc.stub.StreamObserver observer) { if (!isLeader()) { @@ -786,12 +1005,12 @@ public void getGraph(GetGraphRequest request, response = Pdpb.GetGraphResponse.newBuilder().setHeader(okHeader).setGraph(graph) .build(); } else { - Pdpb.ResponseHeader header = Pdpb.ResponseHeader.newBuilder().setError( - Pdpb.Error.newBuilder().setType(Pdpb.ErrorType.NOT_FOUND).build()).build(); + ResponseHeader header = ResponseHeader.newBuilder().setError( + Errors.newBuilder().setType(ErrorType.NOT_FOUND).build()).build(); response = Pdpb.GetGraphResponse.newBuilder().setHeader(header).build(); } } catch (PDException e) { - response = Pdpb.GetGraphResponse.newBuilder().setHeader(newErrorHeader(e)).build(); + response = Pdpb.GetGraphResponse.newBuilder().setHeader(getResponseHeader(e)).build(); log.error("getGraph exception: ", e); } observer.onNext(response); @@ -801,22 +1020,39 @@ public void getGraph(GetGraphRequest request, /** * Modify the diagram information */ +<<<<<<<< HEAD:hugegraph-pd/hg-pd-service/src/main/java/org/apache/hugegraph/pd/service/PDService.java @Override public void setGraph(Pdpb.SetGraphRequest request, io.grpc.stub.StreamObserver observer) { +======== + public void setGraph(Pdpb.CreateGraphRequest request, + io.grpc.stub.StreamObserver observer) { +>>>>>>>> d7e3d51dd (3.6.5 -> 4.x diff):hg-pd-service/src/main/java/org/apache/hugegraph/pd/service/PDService.java if (!isLeader()) { redirectToLeader(PDGrpc.getSetGraphMethod(), request, observer); return; } - Pdpb.SetGraphResponse response = null; + Pdpb.CreateGraphResponse response; Metapb.Graph graph = request.getGraph(); try { +<<<<<<<< HEAD:hugegraph-pd/hg-pd-service/src/main/java/org/apache/hugegraph/pd/service/PDService.java graph = partitionService.updateGraph(graph); response = Pdpb.SetGraphResponse.newBuilder().setHeader(okHeader).setGraph(graph).build(); +======== + var lastGraph = partitionService.getGraph(graph.getGraphName()); + if (lastGraph != null) { + graph = partitionService.updateGraphName(graph); + } else { + graph = partitionService.createGraph(graph.getGraphName(), + graph.getPartitionCount(), graph.getStoreGroupId()); + } + + response = Pdpb.CreateGraphResponse.newBuilder().setHeader(okHeader).setGraph(graph).build(); +>>>>>>>> d7e3d51dd (3.6.5 -> 4.x diff):hg-pd-service/src/main/java/org/apache/hugegraph/pd/service/PDService.java } catch (PDException e) { log.error("setGraph exception: ", e); - response = Pdpb.SetGraphResponse.newBuilder().setHeader(newErrorHeader(e)).build(); + response = Pdpb.CreateGraphResponse.newBuilder().setHeader(getResponseHeader(e)).build(); } observer.onNext(response); observer.onCompleted(); @@ -842,7 +1078,7 @@ public void delGraph(Pdpb.DelGraphRequest request, .build(); } } catch (PDException e) { - response = Pdpb.DelGraphResponse.newBuilder().setHeader(newErrorHeader(e)).build(); + response = Pdpb.DelGraphResponse.newBuilder().setHeader(getResponseHeader(e)).build(); log.error("getGraph exception: ", e); } observer.onNext(response); @@ -878,6 +1114,7 @@ public void queryPartitions(Pdpb.QueryPartitionsRequest request, long storeId = query.getStoreId(); if (query.hasStoreId() && query.getStoreId() != 0) { try { +<<<<<<<< HEAD:hugegraph-pd/hg-pd-service/src/main/java/org/apache/hugegraph/pd/service/PDService.java storeNodeService.getShardGroup(partition.getId()).getShardsList() .forEach(shard -> { if (shard.getStoreId() == storeId) { @@ -887,6 +1124,19 @@ public void queryPartitions(Pdpb.QueryPartitionsRequest request, } catch (PDException e) { log.error("query partitions error, req:{}, error:{}", request, e.getMessage()); +======== + var shardGroup = storeNodeService.getShardGroup(partition.getId()); + // 清理的时候,可能导致shard group被删除 + if (shardGroup != null) { + shardGroup.getShardsList().forEach(shard -> { + if (shard.getStoreId() == storeId) { + result.add(partition); + } + }); + } + }catch (PDException e){ + log.error("query partitions error, req:{}, error:{}", request, e.getMessage()); +>>>>>>>> d7e3d51dd (3.6.5 -> 4.x diff):hg-pd-service/src/main/java/org/apache/hugegraph/pd/service/PDService.java } } else { result.add(partition); @@ -948,6 +1198,7 @@ public void resetId(Pdpb.ResetIdRequest request, @Override public void getMembers(Pdpb.GetMembersRequest request, io.grpc.stub.StreamObserver observer) { + if (!isLeader()) { redirectToLeader(PDGrpc.getGetMembersMethod(), request, observer); return; @@ -961,8 +1212,12 @@ public void getMembers(Pdpb.GetMembersRequest request, } catch (Exception e) { log.error("getMembers exception: ", e); +<<<<<<<< HEAD:hugegraph-pd/hg-pd-service/src/main/java/org/apache/hugegraph/pd/service/PDService.java response = Pdpb.GetMembersResponse.newBuilder() .setHeader(newErrorHeader(-1, e.getMessage())) +======== + response = Pdpb.GetMembersResponse.newBuilder().setHeader(getResponseHeader(-1, e.getMessage())) +>>>>>>>> d7e3d51dd (3.6.5 -> 4.x diff):hg-pd-service/src/main/java/org/apache/hugegraph/pd/service/PDService.java .build(); } observer.onNext(response); @@ -984,7 +1239,7 @@ public void getStoreStatus(Pdpb.GetAllStoresRequest request, Pdpb.GetAllStoresResponse.newBuilder().setHeader(okHeader).addAllStores(stores) .build(); } catch (PDException e) { - response = Pdpb.GetAllStoresResponse.newBuilder().setHeader(newErrorHeader(e)).build(); + response = Pdpb.GetAllStoresResponse.newBuilder().setHeader(getResponseHeader(e)).build(); log.error("getAllStores exception: ", e); } observer.onNext(response); @@ -1009,7 +1264,7 @@ public void getPDConfig(Pdpb.GetPDConfigRequest request, Pdpb.GetPDConfigResponse.newBuilder().setHeader(okHeader).setPdConfig(pdConfig) .build(); } catch (PDException e) { - response = Pdpb.GetPDConfigResponse.newBuilder().setHeader(newErrorHeader(e)).build(); + response = Pdpb.GetPDConfigResponse.newBuilder().setHeader(getResponseHeader(e)).build(); } observer.onNext(response); observer.onCompleted(); @@ -1027,6 +1282,7 @@ public void setPDConfig(Pdpb.SetPDConfigRequest request, } Pdpb.SetPDConfigResponse response = null; try { +<<<<<<<< HEAD:hugegraph-pd/hg-pd-service/src/main/java/org/apache/hugegraph/pd/service/PDService.java if (request.getPdConfig().getShardCount() % 2 != 1) { // Parity of the number of replicas throw new PDException(Pdpb.ErrorType.SET_CONFIG_SHARD_COUNT_ERROR_VALUE, @@ -1052,12 +1308,37 @@ public void setPDConfig(Pdpb.SetPDConfigRequest request, if (!checkShardCount(newShardCount)) { throw new PDException(Pdpb.ErrorType.SET_CONFIG_SHARD_COUNT_ERROR_VALUE, "the cluster can't support so many shard count!"); +======== + if (request.getPdConfig().getShardCount() % 2 != 1){ + // 副本数奇偶校验 + throw new PDException(ErrorType.SET_CONFIG_SHARD_COUNT_ERROR_VALUE, + "shard count must be an odd number!"); + } + if (request.getPdConfig().getShardCount() > + storeNodeService.getActiveStores().size()){ + // 不能大于活跃的store数量 + throw new PDException(ErrorType.SET_CONFIG_SHARD_COUNT_ERROR_VALUE, + "shard count can't be greater than the number of active stores!"); + } + int oldShardCount = configService.getPDConfig().getShardCount(); + int newShardCount = request.getPdConfig().getShardCount(); + if (newShardCount > oldShardCount){ + // 如果副本数增大,则检查store内部的资源是否够用 + if (! isResourceEnough(oldShardCount, newShardCount)) { + throw new PDException(ErrorType.SET_CONFIG_SHARD_COUNT_ERROR_VALUE, + "There is not enough disk space left!"); + } + + if (! checkShardCount(newShardCount)) { + throw new PDException(ErrorType.SET_CONFIG_SHARD_COUNT_ERROR_VALUE, + "the cluster can't support so many shard count!"); +>>>>>>>> d7e3d51dd (3.6.5 -> 4.x diff):hg-pd-service/src/main/java/org/apache/hugegraph/pd/service/PDService.java } } configService.setPDConfig(request.getPdConfig()); response = Pdpb.SetPDConfigResponse.newBuilder().setHeader(okHeader).build(); } catch (PDException e) { - response = Pdpb.SetPDConfigResponse.newBuilder().setHeader(newErrorHeader(e)).build(); + response = Pdpb.SetPDConfigResponse.newBuilder().setHeader(getResponseHeader(e)).build(); } observer.onNext(response); observer.onCompleted(); @@ -1080,7 +1361,7 @@ public void getGraphSpace(Pdpb.GetGraphSpaceRequest request, response = Pdpb.GetGraphSpaceResponse.newBuilder().setHeader(okHeader) .addAllGraphSpace(graphSpaces).build(); } catch (PDException e) { - response = Pdpb.GetGraphSpaceResponse.newBuilder().setHeader(newErrorHeader(e)).build(); + response = Pdpb.GetGraphSpaceResponse.newBuilder().setHeader(getResponseHeader(e)).build(); } observer.onNext(response); observer.onCompleted(); @@ -1101,7 +1382,7 @@ public void setGraphSpace(Pdpb.SetGraphSpaceRequest request, configService.setGraphSpace(request.getGraphSpace()); response = Pdpb.SetGraphSpaceResponse.newBuilder().setHeader(okHeader).build(); } catch (PDException e) { - response = Pdpb.SetGraphSpaceResponse.newBuilder().setHeader(newErrorHeader(e)).build(); + response = Pdpb.SetGraphSpaceResponse.newBuilder().setHeader(getResponseHeader(e)).build(); } observer.onNext(response); observer.onCompleted(); @@ -1112,21 +1393,25 @@ public void setGraphSpace(Pdpb.SetGraphSpaceRequest request, * Data fragmentation * */ +<<<<<<<< HEAD:hugegraph-pd/hg-pd-service/src/main/java/org/apache/hugegraph/pd/service/PDService.java @Override public void splitData(Pdpb.SplitDataRequest request, StreamObserver observer) { +======== + public void splitData(ClusterOp.SplitDataRequest request, StreamObserver observer) { +>>>>>>>> d7e3d51dd (3.6.5 -> 4.x diff):hg-pd-service/src/main/java/org/apache/hugegraph/pd/service/PDService.java if (!isLeader()) { redirectToLeader(PDGrpc.getSplitDataMethod(), request, observer); return; } logService.insertLog(LogService.PARTITION_CHANGE, "splitData", request); - Pdpb.SplitDataResponse response = null; + ClusterOp.SplitDataResponse response = null; try { - taskService.splitPartition(request.getMode(), request.getParamList()); - response = Pdpb.SplitDataResponse.newBuilder().setHeader(okHeader).build(); + taskService.splitPartition(request.getMode(), request.getStoreGroupId(), request.getParamList()); + response = ClusterOp.SplitDataResponse.newBuilder().setHeader(okHeader).build(); } catch (PDException e) { - log.error("splitData exception {}", e); - response = Pdpb.SplitDataResponse.newBuilder().setHeader(newErrorHeader(e)).build(); + log.error("splitData exception:", e); + response = ClusterOp.SplitDataResponse.newBuilder().setHeader(getResponseHeader(e)).build(); } observer.onNext(response); observer.onCompleted(); @@ -1134,21 +1419,33 @@ public void splitData(Pdpb.SplitDataRequest request, } @Override +<<<<<<<< HEAD:hugegraph-pd/hg-pd-service/src/main/java/org/apache/hugegraph/pd/service/PDService.java public void splitGraphData(Pdpb.SplitGraphDataRequest request, StreamObserver observer) { +======== + public void splitGraphData(ClusterOp.SplitGraphDataRequest request, + StreamObserver observer) { +>>>>>>>> d7e3d51dd (3.6.5 -> 4.x diff):hg-pd-service/src/main/java/org/apache/hugegraph/pd/service/PDService.java if (!isLeader()) { redirectToLeader(PDGrpc.getSplitGraphDataMethod(), request, observer); return; } logService.insertLog(LogService.PARTITION_CHANGE, "splitGraphData", request); +<<<<<<<< HEAD:hugegraph-pd/hg-pd-service/src/main/java/org/apache/hugegraph/pd/service/PDService.java Pdpb.SplitDataResponse response; try { partitionService.splitPartition(partitionService.getGraph(request.getGraphName()), request.getToCount()); response = Pdpb.SplitDataResponse.newBuilder().setHeader(okHeader).build(); +======== + ClusterOp.SplitDataResponse response ; + try { + partitionService.splitPartition(partitionService.getGraph(request.getGraphName()), request.getToCount()); + response = ClusterOp.SplitDataResponse.newBuilder().setHeader(okHeader).build(); +>>>>>>>> d7e3d51dd (3.6.5 -> 4.x diff):hg-pd-service/src/main/java/org/apache/hugegraph/pd/service/PDService.java } catch (PDException e) { - log.error("splitGraphData exception {}", e); - response = Pdpb.SplitDataResponse.newBuilder().setHeader(newErrorHeader(e)).build(); + log.error("splitGraphData exception", e); + response = ClusterOp.SplitDataResponse.newBuilder().setHeader(getResponseHeader(e)).build(); } observer.onNext(response); observer.onCompleted(); @@ -1157,22 +1454,34 @@ public void splitGraphData(Pdpb.SplitGraphDataRequest request, /** * Balance data between stores */ +<<<<<<<< HEAD:hugegraph-pd/hg-pd-service/src/main/java/org/apache/hugegraph/pd/service/PDService.java @Override public void movePartition(Pdpb.MovePartitionRequest request, StreamObserver observer) { +======== + public void movePartition(ClusterOp.MovePartitionRequest request, + StreamObserver observer) { +>>>>>>>> d7e3d51dd (3.6.5 -> 4.x diff):hg-pd-service/src/main/java/org/apache/hugegraph/pd/service/PDService.java if (!isLeader()) { redirectToLeader(PDGrpc.getMovePartitionMethod(), request, observer); return; } logService.insertLog(LogService.PARTITION_CHANGE, "balanceData", request); - Pdpb.MovePartitionResponse response = null; + + ClusterOp.MovePartitionResponse response = null; try { - taskService.patrolPartitions(); - taskService.balancePartitionShard(); - response = Pdpb.MovePartitionResponse.newBuilder().setHeader(okHeader).build(); + if (request.getMode() == ClusterOp.OperationMode.Auto) { + taskService.patrolPartitions(); + taskService.balancePartitionShard(request.getStoreGroupId()); + } else { + for (ClusterOp.MovePartitionParam p : request.getParamList()) { + partitionService.movePartitionsShard(p.getPartitionId(), p.getSrcStoreId(), p.getDstStoreId()); + } + } + response = ClusterOp.MovePartitionResponse.newBuilder().setHeader(okHeader).build(); } catch (PDException e) { - log.error("transferData exception {}", e); - response = Pdpb.MovePartitionResponse.newBuilder().setHeader(newErrorHeader(e)).build(); + log.error("transferData exception", e); + response = ClusterOp.MovePartitionResponse.newBuilder().setHeader(getResponseHeader(e)).build(); } observer.onNext(response); observer.onCompleted(); @@ -1190,10 +1499,29 @@ public void getClusterStats(Pdpb.GetClusterStatsRequest request, redirectToLeader(PDGrpc.getGetClusterStatsMethod(), request, observer); return; } +<<<<<<<< HEAD:hugegraph-pd/hg-pd-service/src/main/java/org/apache/hugegraph/pd/service/PDService.java Pdpb.GetClusterStatsResponse response = null; response = Pdpb.GetClusterStatsResponse.newBuilder().setHeader(okHeader) .setCluster(storeNodeService.getClusterStats()) .build(); +======== + Pdpb.GetClusterStatsResponse response; + + try { + Metapb.ClusterStats state; + if (request.getStoreId() != 0) { + state = storeNodeService.getClusterStats(request.getStoreId()); + } else { + state = storeNodeService.getClusterStats(request.getStoreGroup()); + } + response = Pdpb.GetClusterStatsResponse.newBuilder().setHeader(okHeader) + .setCluster(state) + .build(); + } catch (PDException e) { + log.error("getClusterStats exception :", e); + response = Pdpb.GetClusterStatsResponse.newBuilder().setHeader(getResponseHeader(e)).build(); + } +>>>>>>>> d7e3d51dd (3.6.5 -> 4.x diff):hg-pd-service/src/main/java/org/apache/hugegraph/pd/service/PDService.java observer.onNext(response); observer.onCompleted(); } @@ -1203,9 +1531,14 @@ public void getClusterStats(Pdpb.GetClusterStatsRequest request, * Report the results of tasks such as partition splitting * */ +<<<<<<<< HEAD:hugegraph-pd/hg-pd-service/src/main/java/org/apache/hugegraph/pd/service/PDService.java @Override public void reportTask(Pdpb.ReportTaskRequest request, io.grpc.stub.StreamObserver observer) { +======== + public void reportTask(ClusterOp.ReportTaskRequest request, + io.grpc.stub.StreamObserver observer) { +>>>>>>>> d7e3d51dd (3.6.5 -> 4.x diff):hg-pd-service/src/main/java/org/apache/hugegraph/pd/service/PDService.java if (!isLeader()) { redirectToLeader(PDGrpc.getReportTaskMethod(), request, observer); return; @@ -1213,10 +1546,10 @@ public void reportTask(Pdpb.ReportTaskRequest request, try { taskService.reportTask(request.getTask()); } catch (Exception e) { - log.error("PDService.reportTask {}", e); + log.error("PDService.reportTask", e); } - Pdpb.ReportTaskResponse response = null; - response = Pdpb.ReportTaskResponse.newBuilder().setHeader(okHeader).build(); + ClusterOp.ReportTaskResponse response = null; + response = ClusterOp.ReportTaskResponse.newBuilder().setHeader(okHeader).build(); observer.onNext(response); observer.onCompleted(); } @@ -1239,14 +1572,20 @@ public void getPartitionStats(Pdpb.GetPartitionStatsRequest request, response = Pdpb.GetPartitionStatsResponse.newBuilder().setHeader(okHeader) .setPartitionStats(stats).build(); } catch (PDException e) { +<<<<<<<< HEAD:hugegraph-pd/hg-pd-service/src/main/java/org/apache/hugegraph/pd/service/PDService.java log.error("getPartitionStats exception {}", e); response = Pdpb.GetPartitionStatsResponse.newBuilder().setHeader(newErrorHeader(e)) .build(); +======== + log.error("getPartitionStats exception ", e); + response = Pdpb.GetPartitionStatsResponse.newBuilder().setHeader(getResponseHeader(e)).build(); +>>>>>>>> d7e3d51dd (3.6.5 -> 4.x diff):hg-pd-service/src/main/java/org/apache/hugegraph/pd/service/PDService.java } observer.onNext(response); observer.onCompleted(); } +<<<<<<<< HEAD:hugegraph-pd/hg-pd-service/src/main/java/org/apache/hugegraph/pd/service/PDService.java @Override public boolean isLeader() { @@ -1280,19 +1619,22 @@ public boolean isLeader() { // } //} +======== +>>>>>>>> d7e3d51dd (3.6.5 -> 4.x diff):hg-pd-service/src/main/java/org/apache/hugegraph/pd/service/PDService.java /** * Renewal peerList */ @Override - public void changePeerList(Pdpb.ChangePeerListRequest request, - io.grpc.stub.StreamObserver observer) { + public void changePeerList(ClusterOp.ChangePeerListRequest request, + io.grpc.stub.StreamObserver observer) { if (!isLeader()) { redirectToLeader(PDGrpc.getChangePeerListMethod(), request, observer); return; } - Pdpb.getChangePeerListResponse response; + ClusterOp.ChangePeerListResponse response; try { Status status = RaftEngine.getInstance().changePeerList(request.getPeerList()); +<<<<<<<< HEAD:hugegraph-pd/hg-pd-service/src/main/java/org/apache/hugegraph/pd/service/PDService.java Pdpb.ResponseHeader responseHeader = status.isOk() ? okHeader : newErrorHeader(status.getCode(), status.getErrorMsg()); @@ -1304,6 +1646,16 @@ public void changePeerList(Pdpb.ChangePeerListRequest request, response = Pdpb.getChangePeerListResponse.newBuilder() .setHeader(newErrorHeader(-1, e.getMessage())) .build(); +======== + ResponseHeader responseHeader = status.isOk() ? okHeader : getResponseHeader(status.getCode(), + status.getErrorMsg()); + response = ClusterOp.ChangePeerListResponse.newBuilder().setHeader(responseHeader).build(); + + } catch (Exception e) { + log.error("changePeerList exception: ", e); + response = ClusterOp.ChangePeerListResponse.newBuilder() + .setHeader(getResponseHeader(-1, e.getMessage())).build(); +>>>>>>>> d7e3d51dd (3.6.5 -> 4.x diff):hg-pd-service/src/main/java/org/apache/hugegraph/pd/service/PDService.java } observer.onNext(response); observer.onCompleted(); @@ -1313,6 +1665,7 @@ public void changePeerList(Pdpb.ChangePeerListRequest request, public synchronized void onRaftLeaderChanged() { log.info("onLeaderChanged"); // channel = null; +<<<<<<<< HEAD:hugegraph-pd/hg-pd-service/src/main/java/org/apache/hugegraph/pd/service/PDService.java // TODO: uncomment later //if (licenseVerifierService == null) { // licenseVerifierService = new LicenseVerifierService(pdConfig); @@ -1323,26 +1676,42 @@ public synchronized void onRaftLeaderChanged() { PDWatchSubject.notifyNodeChange(NodeEventType.NODE_EVENT_TYPE_PD_LEADER_CHANGE, RaftEngine.getInstance().getLeaderGrpcAddress(), 0L); } catch (ExecutionException | InterruptedException e) { +======== + if (licenseVerifierService == null) { + licenseVerifierService = new LicenseVerifierService(pdConfig); + } + licenseVerifierService.init(); + + try { + PDPulseSubjects.notifyNodeChange(StoreNodeEventType.STORE_NODE_EVENT_TYPE_PD_LEADER_CHANGE, + RaftEngine.getInstance().getLeaderGrpcAddress(), 0L); + } catch (Exception e) { +>>>>>>>> d7e3d51dd (3.6.5 -> 4.x diff):hg-pd-service/src/main/java/org/apache/hugegraph/pd/service/PDService.java log.error("failed to notice client", e); } } @Override - public void balanceLeaders(Pdpb.BalanceLeadersRequest request, - StreamObserver observer) { + public void balanceLeaders(ClusterOp.BalanceLeadersRequest request, + StreamObserver observer) { if (!isLeader()) { redirectToLeader(PDGrpc.getBalanceLeadersMethod(), request, observer); return; } logService.insertLog(LogService.PARTITION_CHANGE, "balanceLeaders", request); - Pdpb.BalanceLeadersResponse response = null; + ClusterOp.BalanceLeadersResponse response = null; try { taskService.balancePartitionLeader(true); - response = Pdpb.BalanceLeadersResponse.newBuilder().setHeader(okHeader).build(); + response = ClusterOp.BalanceLeadersResponse.newBuilder().setHeader(okHeader).build(); } catch (PDException e) { +<<<<<<<< HEAD:hugegraph-pd/hg-pd-service/src/main/java/org/apache/hugegraph/pd/service/PDService.java log.error("balance Leaders exception: ", e); response = Pdpb.BalanceLeadersResponse.newBuilder().setHeader(newErrorHeader(e)).build(); +======== + log.error("balance Leaders exception ", e); + response = ClusterOp.BalanceLeadersResponse.newBuilder().setHeader(getResponseHeader(e)).build(); +>>>>>>>> d7e3d51dd (3.6.5 -> 4.x diff):hg-pd-service/src/main/java/org/apache/hugegraph/pd/service/PDService.java } observer.onNext(response); observer.onCompleted(); @@ -1368,16 +1737,27 @@ public void putLicense(PutLicenseRequest request, } FileUtils.writeByteArrayToFile(licenseFile, content, false); } catch (Exception e) { +<<<<<<<< HEAD:hugegraph-pd/hg-pd-service/src/main/java/org/apache/hugegraph/pd/service/PDService.java log.error("putLicense with error:", e); +======== + log.error("putLicense with error: ", e); +>>>>>>>> d7e3d51dd (3.6.5 -> 4.x diff):hg-pd-service/src/main/java/org/apache/hugegraph/pd/service/PDService.java if (moved) { try { FileUtils.moveFile(bakFile, licenseFile); } catch (IOException ex) { +<<<<<<<< HEAD:hugegraph-pd/hg-pd-service/src/main/java/org/apache/hugegraph/pd/service/PDService.java log.error("failed to restore the license file:", ex); } } Pdpb.ResponseHeader header = newErrorHeader(Pdpb.ErrorType.LICENSE_ERROR_VALUE, e.getMessage()); +======== + log.error("failed to restore the license file.", ex); + } + } + ResponseHeader header = getResponseHeader(ErrorType.LICENSE_ERROR_VALUE, e.getMessage()); +>>>>>>>> d7e3d51dd (3.6.5 -> 4.x diff):hg-pd-service/src/main/java/org/apache/hugegraph/pd/service/PDService.java response = Pdpb.PutLicenseResponse.newBuilder().setHeader(header).build(); } responseObserver.onNext(response); @@ -1398,17 +1778,30 @@ public void delStore(Pdpb.DetStoreRequest request, if (Metapb.StoreState.Tombstone == store.getState()) { storeNodeService.removeStore(storeId); response = Pdpb.DetStoreResponse.newBuilder() +<<<<<<<< HEAD:hugegraph-pd/hg-pd-service/src/main/java/org/apache/hugegraph/pd/service/PDService.java .setHeader(okHeader) .setStore(store) .build(); } else { throw new PDException(Pdpb.ErrorType.STORE_PROHIBIT_DELETION_VALUE, "the store can't be deleted, please check store state!"); +======== + .setHeader(okHeader) + .setStore(store) + .build(); + }else{ + throw new PDException(ErrorType.STORE_PROHIBIT_DELETION_VALUE, + "the store can't be deleted, please check store state!"); +>>>>>>>> d7e3d51dd (3.6.5 -> 4.x diff):hg-pd-service/src/main/java/org/apache/hugegraph/pd/service/PDService.java } } catch (PDException e) { - log.error("delete store exception: {}", e); + log.error("delete store exception:", e); response = Pdpb.DetStoreResponse.newBuilder() +<<<<<<<< HEAD:hugegraph-pd/hg-pd-service/src/main/java/org/apache/hugegraph/pd/service/PDService.java .setHeader(newErrorHeader(e)).build(); +======== + .setHeader(getResponseHeader(e)).build(); +>>>>>>>> d7e3d51dd (3.6.5 -> 4.x diff):hg-pd-service/src/main/java/org/apache/hugegraph/pd/service/PDService.java } observer.onNext(response); observer.onCompleted(); @@ -1422,6 +1815,7 @@ public void delStore(Pdpb.DetStoreRequest request, */ private boolean checkShardCount(int newShardCount) { try { +<<<<<<<< HEAD:hugegraph-pd/hg-pd-service/src/main/java/org/apache/hugegraph/pd/service/PDService.java var maxCount = pdConfig.getPartition().getMaxShardsPerStore() * storeNodeService.getActiveStores().size() / pdConfig.getConfigService().getPartitionCount(); @@ -1430,6 +1824,21 @@ private boolean checkShardCount(int newShardCount) { log.error("new shard count :{} exceed current cluster max shard count {}", newShardCount, maxCount); return false; +======== + var storeGroups = configService.getAllStoreGroup(); + var maxStoreShardCount = pdConfig.getPartition().getMaxShardsPerStore(); + // 检查每个分组是否可以容纳新分片数量 + for (var storeGroup : storeGroups) { + // 每个分组最大允许的shard数量 + int maxCount = storeNodeService.getActiveStoresByStoreGroup(storeGroup.getGroupId()).size() * + maxStoreShardCount / configService.getPartitionCount(storeGroup.getGroupId()); + + if (newShardCount > maxCount) { + log.error("new shard count :{} exceed current cluster max shard count {}", newShardCount, maxCount); + return false; + } + +>>>>>>>> d7e3d51dd (3.6.5 -> 4.x diff):hg-pd-service/src/main/java/org/apache/hugegraph/pd/service/PDService.java } } catch (Exception e) { log.error("checkShardCount: {}", e.getMessage()); @@ -1443,9 +1852,15 @@ private boolean checkShardCount(int newShardCount) { public boolean isResourceEnough(int oldShardCount, int newShardCount) { // Whether the resources of the active store are sufficient try { +<<<<<<<< HEAD:hugegraph-pd/hg-pd-service/src/main/java/org/apache/hugegraph/pd/service/PDService.java // The multiple of the storage space occupied float expansionRatio = newShardCount / oldShardCount; // The space currently occupied +======== + + double expansionRatio = newShardCount * 1.0 / oldShardCount; // 占用的存储空间膨胀的倍数 + // 当前占用的空间 +>>>>>>>> d7e3d51dd (3.6.5 -> 4.x diff):hg-pd-service/src/main/java/org/apache/hugegraph/pd/service/PDService.java long currentDataSize = 0L; // The space occupied after data bloat long newDataSize = 0L; @@ -1453,8 +1868,8 @@ public boolean isResourceEnough(int oldShardCount, int newShardCount) { long totalAvaible = 0L; // Statistics on the current storage space for (Metapb.Store store : storeNodeService.getStores()) { - List graphStatsList = store.getStats().getGraphStatsList(); - for (Metapb.GraphStats graphStats : graphStatsList) { + List graphStatsList = store.getStats().getGraphStatsList(); + for (GraphStats graphStats : graphStatsList) { currentDataSize += graphStats.getApproximateSize(); } } @@ -1479,35 +1894,41 @@ public boolean isResourceEnough(int oldShardCount, int newShardCount) { * Compaction on rocksdb * */ +<<<<<<<< HEAD:hugegraph-pd/hg-pd-service/src/main/java/org/apache/hugegraph/pd/service/PDService.java @Override public void dbCompaction(Pdpb.DbCompactionRequest request, StreamObserver observer) { +======== + public void dbCompaction(ClusterOp.DbCompactionRequest request, + StreamObserver observer) { +>>>>>>>> d7e3d51dd (3.6.5 -> 4.x diff):hg-pd-service/src/main/java/org/apache/hugegraph/pd/service/PDService.java if (!isLeader()) { redirectToLeader(PDGrpc.getDbCompactionMethod(), request, observer); return; } logService.insertLog(LogService.TASK, "dbCompaction", request); - Pdpb.DbCompactionResponse response = null; + ClusterOp.DbCompactionResponse response = null; try { log.info("dbCompaction call dbCompaction"); taskService.dbCompaction(request.getTableName()); - response = Pdpb.DbCompactionResponse.newBuilder().setHeader(okHeader).build(); + response = ClusterOp.DbCompactionResponse.newBuilder().setHeader(okHeader).build(); } catch (PDException e) { - log.error("dbCompaction exception {}", e); - response = Pdpb.DbCompactionResponse.newBuilder().setHeader(newErrorHeader(e)).build(); + log.error("dbCompaction exception", e); + response = ClusterOp.DbCompactionResponse.newBuilder().setHeader(getResponseHeader(e)).build(); } observer.onNext(response); observer.onCompleted(); } @Override - public void combineCluster(Pdpb.CombineClusterRequest request, - StreamObserver observer) { + public void combineCluster(ClusterOp.CombineClusterRequest request, + StreamObserver observer) { if (!isLeader()) { redirectToLeader(PDGrpc.getCombineClusterMethod(), request, observer); return; } +<<<<<<<< HEAD:hugegraph-pd/hg-pd-service/src/main/java/org/apache/hugegraph/pd/service/PDService.java Pdpb.CombineClusterResponse response; try { @@ -1516,27 +1937,47 @@ public void combineCluster(Pdpb.CombineClusterRequest request, } catch (PDException e) { response = Pdpb.CombineClusterResponse.newBuilder().setHeader(newErrorHeader(e)).build(); +======== + ClusterOp.CombineClusterResponse response ; + + try{ + partitionService.combinePartition(request.getStoreGroupId(), request.getToCount()); + response = ClusterOp.CombineClusterResponse.newBuilder().setHeader(okHeader).build(); + }catch (PDException e){ + response = ClusterOp.CombineClusterResponse.newBuilder().setHeader(getResponseHeader(e)).build(); +>>>>>>>> d7e3d51dd (3.6.5 -> 4.x diff):hg-pd-service/src/main/java/org/apache/hugegraph/pd/service/PDService.java } observer.onNext(response); observer.onCompleted(); } + @Deprecated @Override - public void combineGraph(Pdpb.CombineGraphRequest request, - StreamObserver observer) { + public void combineGraph(ClusterOp.CombineGraphRequest request, + StreamObserver observer) { if (!isLeader()) { redirectToLeader(PDGrpc.getCombineGraphMethod(), request, observer); return; } +<<<<<<<< HEAD:hugegraph-pd/hg-pd-service/src/main/java/org/apache/hugegraph/pd/service/PDService.java Pdpb.CombineGraphResponse response; +======== + ClusterOp.CombineGraphResponse response ; +>>>>>>>> d7e3d51dd (3.6.5 -> 4.x diff):hg-pd-service/src/main/java/org/apache/hugegraph/pd/service/PDService.java try { partitionService.combineGraphPartition(request.getGraphName(), request.getToCount()); +<<<<<<<< HEAD:hugegraph-pd/hg-pd-service/src/main/java/org/apache/hugegraph/pd/service/PDService.java response = Pdpb.CombineGraphResponse.newBuilder().setHeader(okHeader).build(); } catch (PDException e) { response = Pdpb.CombineGraphResponse.newBuilder().setHeader(newErrorHeader(e)).build(); +======== + response = ClusterOp.CombineGraphResponse.newBuilder().setHeader(okHeader).build(); + }catch (PDException e){ + response = ClusterOp.CombineGraphResponse.newBuilder().setHeader(getResponseHeader(e)).build(); +>>>>>>>> d7e3d51dd (3.6.5 -> 4.x diff):hg-pd-service/src/main/java/org/apache/hugegraph/pd/service/PDService.java } observer.onNext(response); @@ -1557,8 +1998,12 @@ public void deleteShardGroup(Pdpb.DeleteShardGroupRequest request, storeNodeService.deleteShardGroup(request.getGroupId()); response = Pdpb.DeleteShardGroupResponse.newBuilder().setHeader(okHeader).build(); } catch (PDException e) { +<<<<<<<< HEAD:hugegraph-pd/hg-pd-service/src/main/java/org/apache/hugegraph/pd/service/PDService.java response = Pdpb.DeleteShardGroupResponse.newBuilder().setHeader(newErrorHeader(e)).build(); +======== + response = Pdpb.DeleteShardGroupResponse.newBuilder().setHeader(getResponseHeader(e)).build(); +>>>>>>>> d7e3d51dd (3.6.5 -> 4.x diff):hg-pd-service/src/main/java/org/apache/hugegraph/pd/service/PDService.java } observer.onNext(response); observer.onCompleted(); @@ -1572,14 +2017,14 @@ public void getShardGroup(Pdpb.GetShardGroupRequest request, return; } Pdpb.GetShardGroupResponse response; - // TODO + try { Metapb.ShardGroup shardGroup = storeNodeService.getShardGroup(request.getGroupId()); response = Pdpb.GetShardGroupResponse.newBuilder().setHeader(okHeader) .setShardGroup(shardGroup).build(); } catch (PDException e) { - log.error("getPartitionStats exception", e); - response = Pdpb.GetShardGroupResponse.newBuilder().setHeader(newErrorHeader(e)).build(); + log.error("getShardGroup exception", e); + response = Pdpb.GetShardGroupResponse.newBuilder().setHeader(getResponseHeader(e)).build(); } observer.onNext(response); @@ -1598,12 +2043,20 @@ public void updateShardGroup(Pdpb.UpdateShardGroupRequest request, try { var group = request.getShardGroup(); storeNodeService.updateShardGroup(group.getId(), group.getShardsList(), +<<<<<<<< HEAD:hugegraph-pd/hg-pd-service/src/main/java/org/apache/hugegraph/pd/service/PDService.java group.getVersion(), group.getConfVer()); response = Pdpb.UpdateShardGroupResponse.newBuilder().setHeader(okHeader).build(); } catch (PDException e) { log.error("updateShardGroup exception, ", e); response = Pdpb.UpdateShardGroupResponse.newBuilder().setHeader(newErrorHeader(e)).build(); +======== + group.getVersion(), group.getConfVer()); + response = Pdpb.UpdateShardGroupResponse.newBuilder().setHeader(okHeader).build(); + } catch (PDException e) { + log.error("updateShardGroup exception, ", e); + response = Pdpb.UpdateShardGroupResponse.newBuilder().setHeader(getResponseHeader(e)).build(); +>>>>>>>> d7e3d51dd (3.6.5 -> 4.x diff):hg-pd-service/src/main/java/org/apache/hugegraph/pd/service/PDService.java } responseObserver.onNext(response); @@ -1611,21 +2064,26 @@ public void updateShardGroup(Pdpb.UpdateShardGroupRequest request, } @Override +<<<<<<<< HEAD:hugegraph-pd/hg-pd-service/src/main/java/org/apache/hugegraph/pd/service/PDService.java public void updateShardGroupOp(Pdpb.ChangeShardRequest request, StreamObserver observer) { +======== + public void updateShardGroupOp(ClusterOp.ChangeShardRequest request, + StreamObserver observer) { +>>>>>>>> d7e3d51dd (3.6.5 -> 4.x diff):hg-pd-service/src/main/java/org/apache/hugegraph/pd/service/PDService.java if (!isLeader()) { redirectToLeader(PDGrpc.getUpdateShardGroupOpMethod(), request, observer); return; } - Pdpb.ChangeShardResponse response; + ClusterOp.ChangeShardResponse response; try { storeNodeService.shardGroupOp(request.getGroupId(), request.getShardsList()); - response = Pdpb.ChangeShardResponse.newBuilder().setHeader(okHeader).build(); + response = ClusterOp.ChangeShardResponse.newBuilder().setHeader(okHeader).build(); } catch (PDException e) { log.error("changeShard exception, ", e); - response = Pdpb.ChangeShardResponse.newBuilder().setHeader(newErrorHeader(e)).build(); + response = ClusterOp.ChangeShardResponse.newBuilder().setHeader(getResponseHeader(e)).build(); } observer.onNext(response); @@ -1633,35 +2091,50 @@ public void updateShardGroupOp(Pdpb.ChangeShardRequest request, } @Override +<<<<<<<< HEAD:hugegraph-pd/hg-pd-service/src/main/java/org/apache/hugegraph/pd/service/PDService.java public void changeShard(Pdpb.ChangeShardRequest request, StreamObserver observer) { +======== + public void changeShard(ClusterOp.ChangeShardRequest request, + StreamObserver observer) { +>>>>>>>> d7e3d51dd (3.6.5 -> 4.x diff):hg-pd-service/src/main/java/org/apache/hugegraph/pd/service/PDService.java if (!isLeader()) { redirectToLeader(PDGrpc.getChangeShardMethod(), request, observer); return; } - Pdpb.ChangeShardResponse response; + ClusterOp.ChangeShardResponse response; try { partitionService.changeShard(request.getGroupId(), request.getShardsList()); +<<<<<<<< HEAD:hugegraph-pd/hg-pd-service/src/main/java/org/apache/hugegraph/pd/service/PDService.java response = Pdpb.ChangeShardResponse.newBuilder().setHeader(okHeader).build(); +======== + response = ClusterOp.ChangeShardResponse.newBuilder().setHeader(okHeader).build(); +>>>>>>>> d7e3d51dd (3.6.5 -> 4.x diff):hg-pd-service/src/main/java/org/apache/hugegraph/pd/service/PDService.java } catch (PDException e) { log.error("changeShard exception, ", e); - response = Pdpb.ChangeShardResponse.newBuilder().setHeader(newErrorHeader(e)).build(); + response = ClusterOp.ChangeShardResponse.newBuilder().setHeader(getResponseHeader(e)).build(); } observer.onNext(response); observer.onCompleted(); } +<<<<<<<< HEAD:hugegraph-pd/hg-pd-service/src/main/java/org/apache/hugegraph/pd/service/PDService.java @Override public void updatePdRaft(Pdpb.UpdatePdRaftRequest request, StreamObserver observer) { +======== + public void updatePdRaft(ClusterOp.UpdatePdRaftRequest request, + StreamObserver observer){ +>>>>>>>> d7e3d51dd (3.6.5 -> 4.x diff):hg-pd-service/src/main/java/org/apache/hugegraph/pd/service/PDService.java if (!isLeader()) { redirectToLeader(PDGrpc.getUpdatePdRaftMethod(), request, observer); return; } +<<<<<<<< HEAD:hugegraph-pd/hg-pd-service/src/main/java/org/apache/hugegraph/pd/service/PDService.java var list = parseConfig(request.getConfig()); log.info("update raft request: {}, list: {}", request.getConfig(), list); @@ -1672,6 +2145,17 @@ public void updatePdRaft(Pdpb.UpdatePdRaftRequest request, do { var leaders = list.stream().filter(s -> s.getKey().equals("leader")) .collect(Collectors.toList()); +======== + var list = PeerUtil.parseConfig(request.getConfig()); + + log.info("update raft request: {}, list: {}", request.getConfig(), list); + + ClusterOp.UpdatePdRaftResponse response = ClusterOp.UpdatePdRaftResponse.newBuilder() + .setHeader(okHeader).build(); + + do { + var leaders = list.stream().filter(s -> s.getKey().equals("leader")).collect(Collectors.toList()); +>>>>>>>> d7e3d51dd (3.6.5 -> 4.x diff):hg-pd-service/src/main/java/org/apache/hugegraph/pd/service/PDService.java var node = RaftEngine.getInstance().getRaftNode(); if (leaders.size() == 1) { @@ -1679,25 +2163,40 @@ public void updatePdRaft(Pdpb.UpdatePdRaftRequest request, // change leader var peers = new HashSet<>(node.listPeers()); +<<<<<<<< HEAD:hugegraph-pd/hg-pd-service/src/main/java/org/apache/hugegraph/pd/service/PDService.java if (!peerEquals(leaderPeer, node.getLeaderId())) { +======== + if (!PeerUtil.isPeerEquals(leaderPeer, node.getLeaderId())) { +>>>>>>>> d7e3d51dd (3.6.5 -> 4.x diff):hg-pd-service/src/main/java/org/apache/hugegraph/pd/service/PDService.java if (peers.contains(leaderPeer)) { log.info("updatePdRaft, transfer to {}", leaderPeer); node.transferLeadershipTo(leaderPeer); } else { +<<<<<<<< HEAD:hugegraph-pd/hg-pd-service/src/main/java/org/apache/hugegraph/pd/service/PDService.java response = Pdpb.UpdatePdRaftResponse.newBuilder() .setHeader(newErrorHeader(6667, "new leader" + " not in " + "raft peers")) .build(); +======== + response = ClusterOp.UpdatePdRaftResponse.newBuilder() + .setHeader(getResponseHeader(6667, "new leader not in raft peers")) + .build(); +>>>>>>>> d7e3d51dd (3.6.5 -> 4.x diff):hg-pd-service/src/main/java/org/apache/hugegraph/pd/service/PDService.java } break; } } else { +<<<<<<<< HEAD:hugegraph-pd/hg-pd-service/src/main/java/org/apache/hugegraph/pd/service/PDService.java response = Pdpb.UpdatePdRaftResponse.newBuilder() .setHeader(newErrorHeader(6666, "leader size != 1")) .build(); +======== + response = ClusterOp.UpdatePdRaftResponse.newBuilder() + .setHeader(getResponseHeader(6666, "leader size != 1")).build(); +>>>>>>>> d7e3d51dd (3.6.5 -> 4.x diff):hg-pd-service/src/main/java/org/apache/hugegraph/pd/service/PDService.java break; } @@ -1718,8 +2217,12 @@ public void updatePdRaft(Pdpb.UpdatePdRaftRequest request, log.info("updatePdRaft, change peers success"); } else { log.error("changePeers status: {}, msg:{}, code: {}, raft error:{}", +<<<<<<<< HEAD:hugegraph-pd/hg-pd-service/src/main/java/org/apache/hugegraph/pd/service/PDService.java status, status.getErrorMsg(), status.getCode(), status.getRaftError()); +======== + status, status.getErrorMsg(), status.getCode(), status.getRaftError()); +>>>>>>>> d7e3d51dd (3.6.5 -> 4.x diff):hg-pd-service/src/main/java/org/apache/hugegraph/pd/service/PDService.java } }); } while (false); @@ -1740,7 +2243,11 @@ public void getCache(GetGraphRequest request, .setHeader(okHeader).build(); } catch (PDException e) { log.error("get cache exception, ", e); +<<<<<<<< HEAD:hugegraph-pd/hg-pd-service/src/main/java/org/apache/hugegraph/pd/service/PDService.java response = CacheResponse.newBuilder().setHeader(newErrorHeader(e)).build(); +======== + response = CacheResponse.newBuilder().setHeader(getResponseHeader(e)).build(); +>>>>>>>> d7e3d51dd (3.6.5 -> 4.x diff):hg-pd-service/src/main/java/org/apache/hugegraph/pd/service/PDService.java } observer.onNext(response); observer.onCompleted(); @@ -1760,6 +2267,7 @@ public void getPartitions(GetGraphRequest request, observer.onCompleted(); } +<<<<<<<< HEAD:hugegraph-pd/hg-pd-service/src/main/java/org/apache/hugegraph/pd/service/PDService.java private List> parseConfig(String conf) { List> result = new LinkedList<>(); @@ -1792,4 +2300,524 @@ private boolean peerEquals(PeerId p1, PeerId p2) { } return Objects.equals(p1.getIp(), p2.getIp()) && Objects.equals(p1.getPort(), p2.getPort()); } +======== + + @Override + public void submitIndexTask(Pdpb.IndexTaskCreateRequest request, StreamObserver observer) { + if (!isLeader()) { + redirectToLeader(PDGrpc.getSubmitIndexTaskMethod(), request, observer); + return; + } + + var builder = Pdpb.TaskQueryResponse.newBuilder(); + var param = request.getParam(); + try { + var partitions = partitionService.getPartitions(param.getGraph()); + + if (partitions.isEmpty()) { + throw new PDException(PARTITION_NOT_EXISTS, "graph has no partition"); + } + + var newTaskId = idService.getId(USER_TASK_ID_KEY, 1); + + var taskInfo = storeNodeService.getTaskInfoMeta(); + for (var partition : partitions) { + var buildIndex = Metapb.BuildIndex.newBuilder() + .setPartitionId(partition.getId()) + .setTaskId(newTaskId) + .setParam(param) + .build(); + + var task = MetaTask.Task.newBuilder() + .setId(newTaskId) + .setType(MetaTask.TaskType.Build_Index) + .setState(MetaTask.TaskState.Task_Doing) + .setStartTimestamp(System.currentTimeMillis()) + .setPartition(partition) + .setBuildIndex(buildIndex) + .build(); + + taskInfo.updateUserTask(task); + + log.info("notify client build index task: {}", buildIndex); + + PDPulseSubjects.notifyClient(PartitionHeartbeatResponse.newBuilder() + .setPartition(partition) + // 给store的task id + .setId(newTaskId) + .setBuildIndex(buildIndex)); + } + observer.onNext(builder.setHeader(okHeader).setTaskId(newTaskId).build()); + } catch (PDException e) { + log.error("IndexTaskGrpcService.submitTask", e); + observer.onNext(builder.setHeader(getResponseHeader(e)).build()); + } + observer.onCompleted(); + } + + @Override + public void submitBackupGraphTask(Pdpb.BackupGraphRequest request, + StreamObserver observer) { + if (!isLeader()) { + redirectToLeader(PDGrpc.getSubmitBackupGraphTaskMethod(), request, observer); + return; + } + + var builder = Pdpb.TaskQueryResponse.newBuilder(); + + try { + + var sourceGraph = partitionService.getGraph(request.getGraphName()); + var targetGraph = partitionService.getGraph(request.getTargetGraphName()); + + if (sourceGraph == null || targetGraph == null) { + throw new PDException(GRAPH_NOT_EXISTS, "source or target graph not exists"); + } + + var partitions = partitionService.getPartitions(request.getGraphName()); + var targetPartitions = partitionService.getPartitions(request.getTargetGraphName()); + + if (partitions.isEmpty()) { + throw new PDException(PARTITION_NOT_EXISTS, "source graph has no partition"); + } + + if (targetPartitions.isEmpty()) { + partitionService.allocGraphPartitions(targetGraph); + targetPartitions = partitionService.getPartitions(request.getTargetGraphName()); + } + + var newTaskId = idService.getId(USER_TASK_ID_KEY, 1); + + var taskInfo = storeNodeService.getTaskInfoMeta(); + + for (var partition : partitions) { + + SplitPartition.Builder splitBuilder = SplitPartition.newBuilder().addAllNewPartition(targetPartitions); + + var task = MetaTask.Task.newBuilder() + .setId(newTaskId) + .setType(MetaTask.TaskType.Backup_Graph) + .setState(MetaTask.TaskState.Task_Doing) + .setStartTimestamp(System.currentTimeMillis()) + .setPartition(partition) + .setSplitPartition(splitBuilder.build()) + .build(); + + taskInfo.updateUserTask(task); + + log.info("notify client backup graph: {} - {}", sourceGraph.getGraphName(), partition.getId()); + + PDPulseSubjects.notifyClient(PartitionHeartbeatResponse.newBuilder() + .setPartition(partition) + // 给store的task id + .setId(newTaskId) + .setSplitPartition(splitBuilder.build())); + } + + observer.onNext(builder.setHeader(okHeader).setTaskId(newTaskId).build()); + } catch (PDException e) { + log.error("IndexTaskGrpcService.submitTask", e); + observer.onNext(builder.setHeader(getResponseHeader(e)).build()); + } + observer.onCompleted(); + } + + @Override + public void queryTaskState(Pdpb.TaskQueryRequest request, StreamObserver observer) { + if (!isLeader()) { + redirectToLeader(PDGrpc.getQueryTaskStateMethod(), request, observer); + return; + } + + var taskInfo = storeNodeService.getTaskInfoMeta(); + var builder = Pdpb.TaskQueryResponse.newBuilder(); + + try { + var tasks = taskInfo.scanUserTask(request.getTaskId()); + + if (tasks.isEmpty()) { + throw new PDException(TASK_NOT_EXISTS, "task not found"); + } else { + var state = MetaTask.TaskState.Task_Success; + String message = "OK"; + int countOfSuccess = 0; + int countOfDoing = 0; + + for (var task : tasks) { + var state0 = task.getState(); + if (state0 == MetaTask.TaskState.Task_Failure) { + state = MetaTask.TaskState.Task_Failure; + message = task.getMessage(); + break; + } else if (state0 == MetaTask.TaskState.Task_Doing) { + state = MetaTask.TaskState.Task_Doing; + countOfDoing ++; + } else if (state0 == MetaTask.TaskState.Task_Success) { + countOfSuccess ++; + } + } + + if (state == MetaTask.TaskState.Task_Doing) { + message = "Doing/" + countOfDoing + ", Success/" + countOfSuccess; + } + + builder.setHeader(okHeader).setState(state).setMessage(message); + } + } catch (PDException e) { + builder.setHeader(getResponseHeader(e)); + } + + observer.onNext(builder.build()); + observer.onCompleted(); + } + + @Override + public void retryTask(Pdpb.TaskQueryRequest request, StreamObserver observer) { + if (!isLeader()) { + redirectToLeader(PDGrpc.getRetryTaskMethod(), request, observer); + return; + } + + var taskInfo = storeNodeService.getTaskInfoMeta(); + var builder = Pdpb.TaskQueryResponse.newBuilder(); + var taskId = request.getTaskId(); + + try { + var tasks = taskInfo.scanUserTask(taskId); + + if (tasks.isEmpty()) { + builder.setHeader(okHeader).setState(MetaTask.TaskState.Task_Failure).setMessage("task not found"); + } else { + var state = MetaTask.TaskState.Task_Success; + String message = "OK"; + for (var task : tasks) { + var state0 = task.getState(); + if (state0 == MetaTask.TaskState.Task_Failure || state0 == MetaTask.TaskState.Task_Doing) { + var partition = task.getPartition(); + log.info("notify client retry task: {}", task.getId()); + + var responseBuilder = PartitionHeartbeatResponse.newBuilder() + .setPartition(partition) + .setId(task.getId()); + if (task.hasBuildIndex()) { + responseBuilder.setBuildIndex(task.getBuildIndex()); + } else if (task.hasSplitPartition()) { + responseBuilder.setSplitPartition(task.getSplitPartition()); + } else { + throw new PDException(TASK_NOT_EXISTS, "task type not support"); + } + + PDPulseSubjects.notifyClient(responseBuilder); + } + } + builder.setHeader(okHeader).setState(state).setMessage(message); + } + } catch (PDException e) { + builder.setHeader(getResponseHeader(e)); + } + + observer.onNext(builder.build()); + observer.onCompleted(); + } + + @Override + public void getGraphStats(GetGraphRequest request, + io.grpc.stub.StreamObserver observer) { + if (!isLeader()) { + redirectToLeader(PDGrpc.getGetGraphStatsMethod(), request, observer); + return; + } + String graphName = request.getGraphName(); + GraphStatsResponse.Builder builder = GraphStatsResponse.newBuilder(); + try { + List stores = storeNodeService.getStores(graphName); + long dataSize = 0; + long keySize = 0; + for (Metapb.Store store : stores) { + List gss = store.getStats().getGraphStatsList(); + if (!gss.isEmpty()) { + String gssGraph = gss.get(0).getGraphName(); + String suffix = "/g"; + if (gssGraph.split("/").length > 2 && !graphName.endsWith(suffix)) { + graphName += suffix; + } + for (GraphStats gs : gss) { + boolean nameEqual = graphName.equals(gs.getGraphName()); + boolean roleEqual = Metapb.ShardRole.Leader.equals(gs.getRole()); + if (nameEqual && roleEqual) { + dataSize += gs.getApproximateSize(); + keySize += gs.getApproximateKeys(); + } + } + } + } + GraphStats stats = GraphStats.newBuilder().setApproximateSize(dataSize) + .setApproximateKeys(keySize).setGraphName(request.getGraphName()) + .build(); + builder.setStats(stats); + } catch (PDException e) { + builder.setHeader(getResponseHeader(e)); + } + observer.onNext(builder.build()); + observer.onCompleted(); + } + + @Override + public void getMembersAndClusterState(Pdpb.GetMembersRequest request, + io.grpc.stub.StreamObserver observer) { + if (!isLeader()) { + redirectToLeader(PDGrpc.getGetMembersAndClusterStateMethod(), request, observer); + return; + } + Pdpb.MembersAndClusterState response; + try { + var stateList= storeNodeService.getAllClusterStats() + .entrySet().stream().map(entry -> Metapb.GroupClusterState.newBuilder() + .setStoreGroup(entry.getKey()) + .setState(entry.getValue()).build()) + .collect(Collectors.toList()); + + response = Pdpb.MembersAndClusterState.newBuilder() + .addAllMembers(RaftEngine.getInstance().getMembers()) + .setLeader(RaftEngine.getInstance().getLocalMember()) + .addAllState(stateList) + // .setState(storeNodeService.getClusterStats().getState()) + .build(); + + } catch (Exception e) { + log.error("getMembers exception: ", e); + response = Pdpb.MembersAndClusterState.newBuilder().setHeader(getResponseHeader(-1, e.getMessage())) + .build(); + } + observer.onNext(response); + observer.onCompleted(); + } + + @Override + public void createGraph(Pdpb.CreateGraphRequest request, StreamObserver observer) { + if (!isLeader()) { + redirectToLeader(PDGrpc.getCreateGraphMethod(), request, observer); + return; + } + + Pdpb.CreateGraphResponse response; + Metapb.Graph graph = request.getGraph(); + try { + graph = partitionService.createGraph(graph.getGraphName(), + graph.getPartitionCount(), graph.getStoreGroupId()); + response = Pdpb.CreateGraphResponse.newBuilder().setHeader(okHeader).setGraph(graph).build(); + } catch (PDException e) { + log.error("create exception: ", e); + response = Pdpb.CreateGraphResponse.newBuilder().setHeader(getResponseHeader(e)).build(); + } + + observer.onNext(response); + observer.onCompleted(); + } + + @Override + public void createStoreGroup(StoreGroup.CreateStoreGroupRequest request, + StreamObserver observer) { + if (!isLeader()) { + redirectToLeader(PDGrpc.getCreateStoreGroupMethod(), request, observer); + return; + } + + StoreGroup.CreateStoreGroupResponse response; + + try { + var storeGroup = configService.getStoreGroup(request.getGroupId()); + if (storeGroup == null) { + storeGroup = configService.createStoreGroup(request.getGroupId(), + request.getName(), request.getPartitionCount()); + storeNodeService.updateClusterStatus(request.getGroupId(), Metapb.ClusterState.Cluster_Not_Ready); + } else { + throw new PDException(STORE_GROUP_NOT_EXISTS.getNumber(), "Store Group exists"); + } + + response = StoreGroup.CreateStoreGroupResponse.newBuilder() + .setHeader(okHeader).setStoreGroup(storeGroup).build(); + } catch (PDException e) { + response = StoreGroup.CreateStoreGroupResponse.newBuilder().setHeader(getResponseHeader(e)).build(); + } + + observer.onNext(response); + observer.onCompleted(); + } + + @Override + public void getStoreGroup(StoreGroup.GetStoreGroupRequest request, + StreamObserver observer) { + if (!isLeader()) { + redirectToLeader(PDGrpc.getGetStoreGroupMethod(), request, observer); + return; + } + + StoreGroup.GetStoreGroupResponse response; + try { + var storeGroup = configService.getStoreGroup(request.getGroupId()); + if (storeGroup == null) { + throw new PDException(STORE_GROUP_NOT_EXISTS); + } + response = StoreGroup.GetStoreGroupResponse.newBuilder().setHeader(okHeader) + .setStoreGroup(storeGroup).build(); + } catch (PDException e) { + response = StoreGroup.GetStoreGroupResponse.newBuilder().setHeader(getResponseHeader(e)).build(); + } + observer.onNext(response); + observer.onCompleted(); + } + + @Override + public void getAllStoreGroup(StoreGroup.GetAllStoreGroupRequest request, + StreamObserver observer) { + if (!isLeader()) { + redirectToLeader(PDGrpc.getGetAllStoreGroupMethod(), request, observer); + return; + } + + StoreGroup.GetAllStoreGroupResponse response; + + try { + var groupStoreList = configService.getAllStoreGroup(); + response = StoreGroup.GetAllStoreGroupResponse.newBuilder() + .setHeader(okHeader).addAllStoreGroups(groupStoreList).build(); + } catch (PDException e) { + response = StoreGroup.GetAllStoreGroupResponse.newBuilder().setHeader(getResponseHeader(e)).build(); + } + + observer.onNext(response); + observer.onCompleted(); + } + + @Override + public void updateStoreGroup(StoreGroup.UpdateStoreGroupRequest request, + StreamObserver observer) { + if (!isLeader()) { + redirectToLeader(PDGrpc.getUpdateStoreGroupMethod(), request, observer); + return; + } + + StoreGroup.UpdateStoreGroupResponse response; + + try { + var storeGroup = configService.updateStoreGroup(request.getGroupId(), request.getName()); + response = StoreGroup.UpdateStoreGroupResponse.newBuilder() + .setHeader(okHeader).setStoreGroup(storeGroup).build(); + } catch (PDException e) { + response = StoreGroup.UpdateStoreGroupResponse.newBuilder().setHeader(getResponseHeader(e)).build(); + } + + observer.onNext(response); + observer.onCompleted(); + } + + @Override + public void getStoresByStoreGroup(StoreGroup.GetGroupStoresRequest request, + StreamObserver observer) { + if (!isLeader()) { + redirectToLeader(PDGrpc.getGetStoresByStoreGroupMethod(), request, observer); + return; + } + + StoreGroup.GetGroupStoresResponse response; + + try { + var stores = storeNodeService.getStoresByStoreGroup(request.getStoreGroupId()); + response = StoreGroup.GetGroupStoresResponse.newBuilder().addAllStores(stores).build(); + } catch (PDException e) { + response = StoreGroup.GetGroupStoresResponse.newBuilder().setHeader(getResponseHeader(e)).build(); + } + observer.onNext(response); + observer.onCompleted(); + } + + @Override + public void updateStoreGroupRelation(StoreGroup.UpdateStoreGroupRelationRequest request, + StreamObserver observer) { + if (!isLeader()) { + redirectToLeader(PDGrpc.getUpdateStoreGroupRelationMethod(), request, observer); + return; + } + + StoreGroup.UpdateStoreGroupRelationResponse response; + + try { + // 没做过初始化 或者初始化过但是没有shard group的分配 + if (! storeNodeService.isStoreHasStoreGroup(request.getStoreId()) || storeNodeService.getShardGroups() + .stream().noneMatch(shardGroup -> { + for (var shard : shardGroup.getShardsList()) { + if (shard.getStoreId() == request.getStoreId()) { + return true; + } + } + return false; })) { + storeNodeService.updateStoreGroupRelation(request.getStoreId(), request.getStoreGroupId()); + response = StoreGroup.UpdateStoreGroupRelationResponse.newBuilder().setHeader(okHeader) + .setSuccess(true).setMessage("").build(); + } else { + response = StoreGroup.UpdateStoreGroupRelationResponse.newBuilder() + .setHeader(getResponseHeader(-1, "store has partitions yet")).build(); + } + } catch (PDException e) { + response = StoreGroup.UpdateStoreGroupRelationResponse.newBuilder().setHeader(getResponseHeader(e)).build(); + } + + observer.onNext(response); + observer.onCompleted(); + } + + @Override + public void getLeaderGrpcAddress(NoArg request, + StreamObserver observer) { + GetLeaderGrpcAddressResponse.Builder response = GetLeaderGrpcAddressResponse.newBuilder(); + try { + String grpcAddress = RaftEngine.getInstance().getLeaderGrpcAddress(false); + response.setHeader(okHeader).setAddress(grpcAddress); + } catch (PDException e) { + response.setHeader(getResponseHeader(e)); + } + observer.onNext(response.build()); + observer.onCompleted(); + } + + /** + */ + @Override + public void clearGrpcAddressCache(NoArg request, StreamObserver observer) { + VoidResponse.Builder response = VoidResponse.newBuilder(); + try { + RaftEngine.getInstance().clearGrpcAddresses(); + response.setHeader(okHeader); + } catch (Exception e) { + response.setHeader(getResponseHeader(ErrorType.ERROR_VALUE, e.getMessage())); + } + observer.onNext(response.build()); + observer.onCompleted(); + } + + @Override + public void getAllGrpcAddresses(NoArg request, + StreamObserver observer) { + boolean allows = pdConfig.isAllowsAddressAcquisition(); + Pdpb.GetAllGrpcAddressesResponse.Builder builder = + Pdpb.GetAllGrpcAddressesResponse.newBuilder().setAllowed(allows); + try { + if (!isLeader()) { + redirectToLeader(PDGrpc.getGetAllGrpcAddressesMethod(), request, observer); + return; + } + if (allows) { + List grpcAddresses = RaftEngine.getInstance().getPeerGrpcAddressesByCache(); + builder.addAllAddresses(grpcAddresses).setHeader(okHeader); + } + } catch (Exception e) { + log.error("getAllGrpcAddresses error", e); + builder.setHeader(getResponseHeader(ErrorType.ERROR_VALUE, e.getMessage())); + } + observer.onNext(builder.build()); + observer.onCompleted(); + } + +>>>>>>>> d7e3d51dd (3.6.5 -> 4.x diff):hg-pd-service/src/main/java/org/apache/hugegraph/pd/service/PDService.java } diff --git a/hugegraph-pd/hg-pd-service/src/main/java/org/apache/hugegraph/pd/upgrade/scripts/PartitionMetaUpgrade.java b/hugegraph-pd/hg-pd-service/src/main/java/org/apache/hugegraph/pd/upgrade/scripts/PartitionMetaUpgrade.java index 487444edde..d4aa1c0003 100644 --- a/hugegraph-pd/hg-pd-service/src/main/java/org/apache/hugegraph/pd/upgrade/scripts/PartitionMetaUpgrade.java +++ b/hugegraph-pd/hg-pd-service/src/main/java/org/apache/hugegraph/pd/upgrade/scripts/PartitionMetaUpgrade.java @@ -1,3 +1,4 @@ +<<<<<<<< HEAD:hugegraph-pd/hg-pd-service/src/main/java/org/apache/hugegraph/pd/upgrade/scripts/PartitionMetaUpgrade.java /* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with @@ -16,6 +17,17 @@ */ package org.apache.hugegraph.pd.upgrade.scripts; +======== +package org.apache.hugegraph.pd.upgrade.scripts; + +import org.apache.hugegraph.pd.common.PDException; +import org.apache.hugegraph.pd.config.PDConfig; +import org.apache.hugegraph.pd.grpc.Metapb; +import org.apache.hugegraph.pd.meta.MetadataKeyHelper; +import org.apache.hugegraph.pd.meta.MetadataRocksDBStore; +import org.apache.hugegraph.pd.upgrade.VersionUpgradeScript; +import lombok.extern.slf4j.Slf4j; +>>>>>>>> d7e3d51dd (3.6.5 -> 4.x diff):hg-pd-service/src/main/java/org/apache/hugegraph/pd/upgrade/scripts/PartitionMetaUpgrade.java import java.util.HashSet; diff --git a/hugegraph-pd/hg-pd-service/src/main/java/org/apache/hugegraph/pd/watch/KvWatchSubject.java b/hugegraph-pd/hg-pd-service/src/main/java/org/apache/hugegraph/pd/watch/KvWatchSubject.java index a1a297014b..e21b54512f 100644 --- a/hugegraph-pd/hg-pd-service/src/main/java/org/apache/hugegraph/pd/watch/KvWatchSubject.java +++ b/hugegraph-pd/hg-pd-service/src/main/java/org/apache/hugegraph/pd/watch/KvWatchSubject.java @@ -1,3 +1,4 @@ +<<<<<<<< HEAD:hugegraph-pd/hg-pd-service/src/main/java/org/apache/hugegraph/pd/watch/KvWatchSubject.java /* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with @@ -15,6 +16,8 @@ * limitations under the License. */ +======== +>>>>>>>> d7e3d51dd (3.6.5 -> 4.x diff):hg-pd-service/src/main/java/org/apache/hugegraph/pd/watch/KvWatchSubject.java package org.apache.hugegraph.pd.watch; import java.util.Arrays; @@ -36,12 +39,22 @@ import org.apache.hugegraph.pd.raft.RaftEngine; import org.apache.hugegraph.pd.store.RaftKVStore; +<<<<<<<< HEAD:hugegraph-pd/hg-pd-service/src/main/java/org/apache/hugegraph/pd/watch/KvWatchSubject.java +======== +import io.grpc.Status; +>>>>>>>> d7e3d51dd (3.6.5 -> 4.x diff):hg-pd-service/src/main/java/org/apache/hugegraph/pd/watch/KvWatchSubject.java import io.grpc.StatusRuntimeException; import io.grpc.stub.StreamObserver; import lombok.extern.slf4j.Slf4j; +<<<<<<<< HEAD:hugegraph-pd/hg-pd-service/src/main/java/org/apache/hugegraph/pd/watch/KvWatchSubject.java /** * Watch subscription and response processing classes +======== +/** watch订阅、响应处理类 + * @author zhangyingjie + * @date 2022/6/21 +>>>>>>>> d7e3d51dd (3.6.5 -> 4.x diff):hg-pd-service/src/main/java/org/apache/hugegraph/pd/watch/KvWatchSubject.java **/ @Slf4j public class KvWatchSubject { @@ -49,12 +62,17 @@ public class KvWatchSubject { public static final String KEY_DELIMITER = "KW"; public static final String PREFIX_DELIMITER = "PW"; public static final String ALL_PREFIX = "W"; +<<<<<<<< HEAD:hugegraph-pd/hg-pd-service/src/main/java/org/apache/hugegraph/pd/watch/KvWatchSubject.java public static final long WATCH_TTL = 20000L; private static final ConcurrentMap> clients = new ConcurrentHashMap<>(); private final KvService kvService; BiPredicate equal = String::equals; BiPredicate startWith = String::startsWith; +======== + public static final long WATCH_TTL = 1800000L; + private static final ConcurrentMap> clients = new ConcurrentHashMap<>(); +>>>>>>>> d7e3d51dd (3.6.5 -> 4.x diff):hg-pd-service/src/main/java/org/apache/hugegraph/pd/watch/KvWatchSubject.java /** * The following three sets of keys will be used: @@ -101,7 +119,6 @@ public void addObserver(String key, long clientId, StreamObserver KvService.getKeyWithoutPrefix(ALL_PREFIX, delimiter, key, clientId); clients.putIfAbsent(keyWithoutPrefix, observer); addWatchKey(key, delimiter, clientId); - log.info("client:{},start to watch key:{}", clientId, key); } public void removeObserver(String key, long clientId, String delimiter) throws PDException { @@ -165,7 +182,11 @@ public void notifyObserver(String key, WatchType watchType, } else { log.info("cannot find StreamObserver for clientId:{}", clientId); } +<<<<<<<< HEAD:hugegraph-pd/hg-pd-service/src/main/java/org/apache/hugegraph/pd/watch/KvWatchSubject.java } catch (StatusRuntimeException ignored) { +======== + } catch (StatusRuntimeException s) { +>>>>>>>> d7e3d51dd (3.6.5 -> 4.x diff):hg-pd-service/src/main/java/org/apache/hugegraph/pd/watch/KvWatchSubject.java } catch (Exception e) { log.warn("notifyObserver with error:{}", clientId, e); @@ -207,23 +228,33 @@ public void keepClientAlive() { value.onNext(testAlive); } Map clientKeys = kvService.scanWithPrefix(clientKey); - for (Map.Entry keyEntry : clientKeys.entrySet()) { + Set> set = clientKeys.entrySet(); + for (Map.Entry keyEntry : set) { String entryKey = keyEntry.getKey(); String aliveKey = entryKey.replaceFirst(removes, ""); +<<<<<<<< HEAD:hugegraph-pd/hg-pd-service/src/main/java/org/apache/hugegraph/pd/watch/KvWatchSubject.java boolean keepAliveKey = kvService.keepAlive(aliveKey); boolean keepAliveEntry = kvService.keepAlive(entryKey); // log.info("keep alive client:{},{}:{},{}:{}", client, aliveKey, // keepAliveKey, // entryKey, // keepAliveEntry); +======== + kvService.keepAlive(aliveKey); + kvService.keepAlive(entryKey); +>>>>>>>> d7e3d51dd (3.6.5 -> 4.x diff):hg-pd-service/src/main/java/org/apache/hugegraph/pd/watch/KvWatchSubject.java done = true; } break; } catch (Exception e) { + if (e instanceof StatusRuntimeException && + ((StatusRuntimeException) e).getStatus().getCode().equals(Status.Code.CANCELLED)) { + break; + } try { Thread.sleep(100); } catch (InterruptedException ex) { - log.info("keep alive client {} with error:{}", client, e); + } } } @@ -240,7 +271,10 @@ private void removeClient(StreamObserver value, String key, Strin if (RaftEngine.getInstance().isLeader()) { kvService.deleteWithPrefix(clientKey); } else { +<<<<<<<< HEAD:hugegraph-pd/hg-pd-service/src/main/java/org/apache/hugegraph/pd/watch/KvWatchSubject.java // todo: delete records via client +======== +>>>>>>>> d7e3d51dd (3.6.5 -> 4.x diff):hg-pd-service/src/main/java/org/apache/hugegraph/pd/watch/KvWatchSubject.java var store = kvService.getMeta().getStore(); if (store instanceof RaftKVStore) { ((RaftKVStore) store).doRemoveByPrefix(kvService.getStoreKey(clientKey)); @@ -249,7 +283,11 @@ private void removeClient(StreamObserver value, String key, Strin if (value != null) { synchronized (value) { - value.onCompleted(); + try{ + value.onCompleted(); + } catch (Exception e) { + + } } } clients.remove(key); diff --git a/hugegraph-pd/hg-pd-service/src/main/java/org/apache/hugegraph/pd/watch/PDWatchSubject.java b/hugegraph-pd/hg-pd-service/src/main/java/org/apache/hugegraph/pd/watch/PDWatchSubject.java deleted file mode 100644 index 3b14372218..0000000000 --- a/hugegraph-pd/hg-pd-service/src/main/java/org/apache/hugegraph/pd/watch/PDWatchSubject.java +++ /dev/null @@ -1,215 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright ownership. - * The ASF licenses this file to You under the Apache License, Version 2.0 - * (the "License"); you may not use this file except in compliance with - * the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.hugegraph.pd.watch; - -import static org.apache.hugegraph.pd.common.HgAssert.isArgumentNotNull; - -import java.util.Map; -import java.util.concurrent.ConcurrentHashMap; - -import javax.annotation.concurrent.ThreadSafe; - -import org.apache.hugegraph.pd.grpc.Metapb; -import org.apache.hugegraph.pd.grpc.watch.NodeEventType; -import org.apache.hugegraph.pd.grpc.watch.WatchChangeType; -import org.apache.hugegraph.pd.grpc.watch.WatchCreateRequest; -import org.apache.hugegraph.pd.grpc.watch.WatchRequest; -import org.apache.hugegraph.pd.grpc.watch.WatchResponse; -import org.apache.hugegraph.pd.grpc.watch.WatchType; - -import io.grpc.stub.StreamObserver; -import lombok.extern.slf4j.Slf4j; - -@Slf4j -@ThreadSafe -public class PDWatchSubject implements StreamObserver { - - public final static Map subjectHolder = new ConcurrentHashMap<>(); - private final static byte[] lock = new byte[0]; - - static { - subjectHolder.put(WatchType.WATCH_TYPE_PARTITION_CHANGE.name(), - new PartitionChangeSubject()); - subjectHolder.put(WatchType.WATCH_TYPE_STORE_NODE_CHANGE.name(), new NodeChangeSubject()); - subjectHolder.put(WatchType.WATCH_TYPE_GRAPH_CHANGE.name(), new NodeChangeSubject()); - subjectHolder.put(WatchType.WATCH_TYPE_SHARD_GROUP_CHANGE.name(), - new org.apache.hugegraph.pd.watch.ShardGroupChangeSubject()); - } - - private final StreamObserver responseObserver; - private AbstractWatchSubject subject; - private Long watcherId; - - private PDWatchSubject(StreamObserver responseObserver) { - this.responseObserver = responseObserver; - } - - public static StreamObserver addObserver( - StreamObserver responseObserver) { - isArgumentNotNull(responseObserver, "responseObserver"); - return new PDWatchSubject(responseObserver); - } - - /** - * Notify partition change - * - * @param changeType change type - * @param graph name of graph - * @param partitionId id of partition - */ - public static void notifyPartitionChange(ChangeType changeType, String graph, int partitionId) { - ((PartitionChangeSubject) subjectHolder.get(WatchType.WATCH_TYPE_PARTITION_CHANGE.name())) - .notifyWatcher(changeType.getGrpcType(), graph, partitionId); - - } - - public static void notifyShardGroupChange(ChangeType changeType, int groupId, - Metapb.ShardGroup group) { - ((org.apache.hugegraph.pd.watch.ShardGroupChangeSubject) subjectHolder.get( - WatchType.WATCH_TYPE_SHARD_GROUP_CHANGE.name())) - .notifyWatcher(changeType.getGrpcType(), groupId, group); - } - - /** - * Notify store-node change - * - * @param changeType change type - * @param graph name of graph - * @param nodeId id of partition - */ - public static void notifyNodeChange(NodeEventType changeType, String graph, long nodeId) { - ((NodeChangeSubject) subjectHolder.get(WatchType.WATCH_TYPE_STORE_NODE_CHANGE.name())) - .notifyWatcher(changeType, graph, nodeId); - } - - public static void notifyChange(WatchType type, - WatchResponse.Builder builder) { - subjectHolder.get(type.name()).notifyWatcher(builder); - } - - public static void notifyError(int code, String message) { - subjectHolder.forEach((k, v) -> { - v.notifyError(code, message); - }); - } - - private static Long createWatcherId() { - synchronized (lock) { - Thread.yield(); - try { - Thread.sleep(1); - } catch (InterruptedException e) { - log.error("Failed to sleep", e); - } - - return System.currentTimeMillis(); - } - - } - - private void cancelWatcher() { - - if (this.subject == null) { - this.responseObserver.onError( - new Exception("Invoke cancel-watch before create-watch.")); - return; - } - - this.subject.removeObserver(this.watcherId, this.responseObserver); - } - - private WatchType getWatchType(WatchCreateRequest request) { - WatchType watchType = request.getWatchType(); - - if (watchType.equals(WatchType.WATCH_TYPE_UNKNOWN)) { - this.responseObserver.onError(new Exception("unknown watch type.")); - return null; - } - - return watchType; - } - - private AbstractWatchSubject getSubject(WatchType watchType) { - AbstractWatchSubject subject = subjectHolder.get(watchType.name()); - - if (subject == null) { - responseObserver.onError(new Exception("Unsupported watch-type: " + watchType.name())); - return null; - } - - return subject; - } - - private void addWatcher(WatchCreateRequest request) { - if (this.subject != null) { - return; - } - WatchType watchType = getWatchType(request); - if (watchType == null) { - return; - } - - this.subject = getSubject(watchType); - this.watcherId = createWatcherId(); - - this.subject.addObserver(this.watcherId, this.responseObserver); - } - - @Override - public void onNext(WatchRequest watchRequest) { - - if (watchRequest.hasCreateRequest()) { - this.addWatcher(watchRequest.getCreateRequest()); - return; - } - - if (watchRequest.hasCancelRequest()) { - this.cancelWatcher(); - } - - } - - @Override - public void onError(Throwable throwable) { - this.cancelWatcher(); - } - - @Override - public void onCompleted() { - this.cancelWatcher(); - } - - public enum ChangeType { - ADD(WatchChangeType.WATCH_CHANGE_TYPE_ADD), - ALTER(WatchChangeType.WATCH_CHANGE_TYPE_ALTER), - DEL(WatchChangeType.WATCH_CHANGE_TYPE_DEL), - - USER_DEFINED(WatchChangeType.WATCH_CHANGE_TYPE_SPECIAL1); - - private final WatchChangeType grpcType; - - ChangeType(WatchChangeType grpcType) { - this.grpcType = grpcType; - } - - public WatchChangeType getGrpcType() { - return this.grpcType; - } - } - -} diff --git a/hugegraph-pd/hg-pd-service/src/main/resources/log4j2.xml b/hugegraph-pd/hg-pd-service/src/main/resources/log4j2.xml index 228f4d0381..08eb667e0d 100644 --- a/hugegraph-pd/hg-pd-service/src/main/resources/log4j2.xml +++ b/hugegraph-pd/hg-pd-service/src/main/resources/log4j2.xml @@ -22,27 +22,33 @@ logs hugegraph-pd + raft-${FILE_NAME} + 6d + 2GB + 100 + 32 - - + + - - + + - - + + + diff --git a/hugegraph-pd/hg-pd-test/src/main/java/org/apache/hugegraph/pd/client/PDClientTest.java b/hugegraph-pd/hg-pd-test/src/main/java/org/apache/hugegraph/pd/client/PDClientTest.java index 3aedfb117a..db85008f4f 100644 --- a/hugegraph-pd/hg-pd-test/src/main/java/org/apache/hugegraph/pd/client/PDClientTest.java +++ b/hugegraph-pd/hg-pd-test/src/main/java/org/apache/hugegraph/pd/client/PDClientTest.java @@ -1,3 +1,4 @@ +<<<<<<<< HEAD:hugegraph-pd/hg-pd-test/src/main/java/org/apache/hugegraph/pd/client/PDClientTest.java /* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with @@ -15,12 +16,15 @@ * limitations under the License. */ +======== +>>>>>>>> d7e3d51dd (3.6.5 -> 4.x diff):hg-pd-test/src/main/java/org/apache/hugegraph/pd/client/PDClientTest.java package org.apache.hugegraph.pd.client; import java.nio.charset.StandardCharsets; import java.util.ArrayList; import java.util.List; +<<<<<<<< HEAD:hugegraph-pd/hg-pd-test/src/main/java/org/apache/hugegraph/pd/client/PDClientTest.java import org.apache.hugegraph.pd.common.PDException; import org.apache.hugegraph.pd.grpc.MetaTask; import org.apache.hugegraph.pd.grpc.Metapb; @@ -29,6 +33,23 @@ import org.mockito.Mockito; // TODO: Exceptions should be thrown rather than silenced. +======== +import org.apache.hugegraph.pd.grpc.ClusterOp; +import lombok.extern.slf4j.Slf4j; +import org.junit.Test; +import org.mockito.Mockito; + +import org.apache.hugegraph.pd.client.listener.PDEventListener; +import org.apache.hugegraph.pd.common.PDException; +import org.apache.hugegraph.pd.grpc.MetaTask; +import org.apache.hugegraph.pd.grpc.Metapb; + +/** + * @author zhengfuquan + * @date 2022/11/28 + **/ +@Slf4j +>>>>>>>> d7e3d51dd (3.6.5 -> 4.x diff):hg-pd-test/src/main/java/org/apache/hugegraph/pd/client/PDClientTest.java public class PDClientTest extends BaseClientTest { @Test @@ -75,7 +96,7 @@ public void testGetStore() { try { pdClient.getStore(0L); } catch (PDException e) { - e.printStackTrace(); + assert e.getErrorCode() == 101; } } @@ -85,7 +106,6 @@ public void testUpdateStore() { try { pdClient.updateStore(store); } catch (PDException e) { - e.printStackTrace(); } } @@ -228,7 +248,7 @@ public void testGetClusterStats() { @Test public void testAddEventListener() { - PDClient.PDEventListener listener = Mockito.mock(PDClient.PDEventListener.class); + PDEventListener listener = Mockito.mock(PDEventListener.class); pdClient.addEventListener(listener); } @@ -281,16 +301,16 @@ public void testGetGraphSpace() { @Test public void testSetPDConfig() { try { - pdClient.setPDConfig(0, "", 0, 0L); + pdClient.setPDConfig("", 0, 0L); } catch (PDException e) { - e.printStackTrace(); + assert e.getErrorCode() == 112; } Metapb.PDConfig pdConfig = Metapb.PDConfig.newBuilder().build(); try { pdClient.setPDConfig(pdConfig); } catch (PDException e) { - e.printStackTrace(); + assert e.getErrorCode() == 112; } } @@ -308,7 +328,7 @@ public void testChangePeerList() { try { pdClient.changePeerList(""); } catch (PDException e) { - e.printStackTrace(); + assert e.getErrorCode() == -1; } } @@ -316,11 +336,17 @@ public void testChangePeerList() { public void testSplitData() { try { Metapb.PDConfig config = pdClient.getPDConfig(); +<<<<<<<< HEAD:hugegraph-pd/hg-pd-test/src/main/java/org/apache/hugegraph/pd/client/PDClientTest.java pdClient.setPDConfig(config.toBuilder().setMaxShardsPerStore(12).build()); +======== + pdClient.setPDConfig(config.toBuilder() + .setMaxShardsPerStore(12) + .build()); +>>>>>>>> d7e3d51dd (3.6.5 -> 4.x diff):hg-pd-test/src/main/java/org/apache/hugegraph/pd/client/PDClientTest.java System.out.println(pdClient.getPDConfig()); pdClient.splitData(); - } catch (Exception e) { - e.printStackTrace(); + } catch (PDException e) { + log.error("testSplitData", e); } } @@ -329,16 +355,16 @@ public void testBalancePartition() { try { pdClient.balancePartition(); } catch (PDException e) { - e.printStackTrace(); + } } @Test public void testMovePartition() { - Pdpb.OperationMode mode = Pdpb.OperationMode.Auto; - List params = new ArrayList<>(1); + ClusterOp.OperationMode mode = ClusterOp.OperationMode.Auto; + List params = new ArrayList<>(1); try { - pdClient.movePartition(mode, params); + pdClient.balancePartition(); } catch (PDException e) { e.printStackTrace(); } @@ -359,7 +385,7 @@ public void testBalanceLeaders() { try { pdClient.balanceLeaders(); } catch (PDException e) { - e.printStackTrace(); + assert e.getErrorCode() == 1001; } } @@ -368,7 +394,6 @@ public void testDelStore() { try { pdClient.delStore(0L); } catch (PDException e) { - e.printStackTrace(); } } diff --git a/hugegraph-pd/hg-pd-test/src/main/java/org/apache/hugegraph/pd/client/StoreRegisterTest.java b/hugegraph-pd/hg-pd-test/src/main/java/org/apache/hugegraph/pd/client/StoreRegisterTest.java index 55e59d574e..060c7d8469 100644 --- a/hugegraph-pd/hg-pd-test/src/main/java/org/apache/hugegraph/pd/client/StoreRegisterTest.java +++ b/hugegraph-pd/hg-pd-test/src/main/java/org/apache/hugegraph/pd/client/StoreRegisterTest.java @@ -1,3 +1,4 @@ +<<<<<<<< HEAD:hugegraph-pd/hg-pd-test/src/main/java/org/apache/hugegraph/pd/client/StoreRegisterTest.java /* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with @@ -16,10 +17,29 @@ */ package org.apache.hugegraph.pd.client; +======== +package org.apache.hugegraph.pd; + +import org.apache.hugegraph.pd.client.PDClient; +import org.apache.hugegraph.pd.client.PDConfig; +import org.apache.hugegraph.pd.pulse.Pulse; +import org.apache.hugegraph.pd.common.KVPair; +import org.apache.hugegraph.pd.common.PDException; +import org.apache.hugegraph.pd.grpc.Metapb; +import org.apache.hugegraph.pd.grpc.pulse.PartitionHeartbeatRequest; +import org.apache.hugegraph.pd.grpc.pulse.PulseResponse; +import org.apache.hugegraph.pd.pulse.PulseListener; +import org.apache.hugegraph.pd.pulse.PulseNotifier; +import org.apache.hugegraph.pd.pulse.PulseServerNotice; +import org.junit.Assert; +import org.junit.BeforeClass; +// import org.junit.Test; +>>>>>>>> d7e3d51dd (3.6.5 -> 4.x diff):hg-pd-client/src/test/java/org/apache/hugegraph/pd/StoreRegisterTest.java import java.nio.charset.StandardCharsets; import java.util.List; +<<<<<<<< HEAD:hugegraph-pd/hg-pd-test/src/main/java/org/apache/hugegraph/pd/client/StoreRegisterTest.java import org.apache.hugegraph.pd.common.KVPair; import org.apache.hugegraph.pd.common.PDException; import org.apache.hugegraph.pd.grpc.Metapb; @@ -34,13 +54,24 @@ public class StoreRegisterTest { private static PDClient pdClient; +======== +public class StoreRegisterTest { + private static PDClient pdClient; + private static PDConfig config; + private long storeId = 0; +>>>>>>>> d7e3d51dd (3.6.5 -> 4.x diff):hg-pd-client/src/test/java/org/apache/hugegraph/pd/StoreRegisterTest.java private final String storeAddr = "localhost"; private final String graphName = "default/hugegraph/g"; private long storeId = 0; @BeforeClass +<<<<<<<< HEAD:hugegraph-pd/hg-pd-test/src/main/java/org/apache/hugegraph/pd/client/StoreRegisterTest.java public static void beforeClass() { PDConfig config = PDConfig.of("localhost:8686"); +======== + public static void beforeClass() throws Exception { + config = PDConfig.of("localhost:8686"); +>>>>>>>> d7e3d51dd (3.6.5 -> 4.x diff):hg-pd-client/src/test/java/org/apache/hugegraph/pd/StoreRegisterTest.java config.setEnableCache(true); pdClient = PDClient.create(config); } @@ -92,10 +123,17 @@ public void testStoreHeartbeat() throws PDException { @Test public void testPartitionHeartbeat() throws PDException { testRegisterStore(); +<<<<<<<< HEAD:hugegraph-pd/hg-pd-test/src/main/java/org/apache/hugegraph/pd/client/StoreRegisterTest.java PDPulse pdPulse = new PDPulseImpl(pdClient.getLeaderIp()); PDPulse.Notifier notifier = pdPulse.connectPartition( new PDPulse.Listener<>() { +======== + Pulse pdPulse = pdClient.getPulse(); + + PulseNotifier notifier = pdPulse.connect( + new PulseListener() { +>>>>>>>> d7e3d51dd (3.6.5 -> 4.x diff):hg-pd-client/src/test/java/org/apache/hugegraph/pd/StoreRegisterTest.java @Override public void onNext(PulseResponse response) { diff --git a/hugegraph-pd/hg-pd-test/src/main/java/org/apache/hugegraph/pd/common/HgAssertTest.java b/hugegraph-pd/hg-pd-test/src/main/java/org/apache/hugegraph/pd/common/HgAssertTest.java index 3e61dd0a94..c3994214f7 100644 --- a/hugegraph-pd/hg-pd-test/src/main/java/org/apache/hugegraph/pd/common/HgAssertTest.java +++ b/hugegraph-pd/hg-pd-test/src/main/java/org/apache/hugegraph/pd/common/HgAssertTest.java @@ -1,3 +1,4 @@ +<<<<<<<< HEAD:hugegraph-pd/hg-pd-test/src/main/java/org/apache/hugegraph/pd/common/HgAssertTest.java /* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with @@ -14,6 +15,9 @@ * See the License for the specific language governing permissions and * limitations under the License. */ +======== +package org.apache.hugegraph.pd.common; +>>>>>>>> d7e3d51dd (3.6.5 -> 4.x diff):hg-pd-test/src/main/java/org/apache/hugegraph/pd/common/HgAssertTest.java package org.apache.hugegraph.pd.common; diff --git a/hugegraph-pd/hg-pd-test/src/main/java/org/apache/hugegraph/pd/core/ConfigServiceTest.java b/hugegraph-pd/hg-pd-test/src/main/java/org/apache/hugegraph/pd/core/ConfigServiceTest.java index 7ac5509bb1..63a8d32b78 100644 --- a/hugegraph-pd/hg-pd-test/src/main/java/org/apache/hugegraph/pd/core/ConfigServiceTest.java +++ b/hugegraph-pd/hg-pd-test/src/main/java/org/apache/hugegraph/pd/core/ConfigServiceTest.java @@ -1,3 +1,4 @@ +<<<<<<<< HEAD:hugegraph-pd/hg-pd-test/src/main/java/org/apache/hugegraph/pd/core/ConfigServiceTest.java /* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with @@ -21,13 +22,30 @@ import org.apache.hugegraph.pd.ConfigService; import org.apache.hugegraph.pd.IdService; +======== +package org.apache.hugegraph.pd.service; + +import org.apache.hugegraph.pd.ConfigService; +import org.apache.hugegraph.pd.IdService; +import org.apache.hugegraph.pd.common.PDException; +>>>>>>>> d7e3d51dd (3.6.5 -> 4.x diff):hg-pd-test/src/main/java/org/apache/hugegraph/pd/service/ConfigServiceTest.java import org.apache.hugegraph.pd.config.PDConfig; import org.apache.hugegraph.pd.grpc.Metapb; import org.junit.Assert; import org.junit.Before; import org.junit.Test; +<<<<<<<< HEAD:hugegraph-pd/hg-pd-test/src/main/java/org/apache/hugegraph/pd/core/ConfigServiceTest.java public class ConfigServiceTest extends PDCoreTestBase { +======== +import java.util.List; + +import static org.junit.Assert.assertEquals; + +public class ConfigServiceTest { + + private PDConfig config = BaseServerTest.getConfig(); +>>>>>>>> d7e3d51dd (3.6.5 -> 4.x diff):hg-pd-test/src/main/java/org/apache/hugegraph/pd/service/ConfigServiceTest.java private ConfigService service; @@ -42,7 +60,6 @@ public void testGetPDConfig() { try { final Metapb.PDConfig config = Metapb.PDConfig.newBuilder() .setVersion(0L) - .setPartitionCount(0) .setShardCount(55) .setMaxShardsPerStore(0) .setTimestamp(0L).build(); @@ -81,7 +98,6 @@ public void testUpdatePDConfig() { try { final Metapb.PDConfig mConfig = Metapb.PDConfig.newBuilder() .setVersion(0L) - .setPartitionCount(0) .setShardCount(0) .setMaxShardsPerStore(0) .setTimestamp(0L) @@ -102,4 +118,27 @@ public void testUpdatePDConfig() { e.printStackTrace(); } } + + @Test + public void testStoreGroup() throws PDException { + config.setInitialStoreList("192.168.1.1:8500,192.168.1.1:8501,192.168.1.2:8500/1"); + service.loadConfig(); + + assertEquals(2, service.getAllStoreGroup().size()); + var group1 = service.getStoreGroup(0); + assertEquals(24, group1.getPartitionCount()); + + var group2 = service.getStoreGroup(1); + assertEquals(12, group2.getPartitionCount()); + + service.updateStoreGroup(0, "DEFAULT"); + service.setPartitionCount(0, 36); + + group1 = service.getStoreGroup(0); + assertEquals(36, group1.getPartitionCount()); + assertEquals("DEFAULT", group1.getName()); + + service.createStoreGroup(2, "group2", 12); + assertEquals(3, service.getAllStoreGroup().size()); + } } diff --git a/hugegraph-pd/hg-pd-test/src/main/java/org/apache/hugegraph/pd/core/PDCoreTestBase.java b/hugegraph-pd/hg-pd-test/src/main/java/org/apache/hugegraph/pd/core/PDCoreTestBase.java index 9e7b03d98e..a5cd15cbb4 100644 --- a/hugegraph-pd/hg-pd-test/src/main/java/org/apache/hugegraph/pd/core/PDCoreTestBase.java +++ b/hugegraph-pd/hg-pd-test/src/main/java/org/apache/hugegraph/pd/core/PDCoreTestBase.java @@ -1,3 +1,4 @@ +<<<<<<<< HEAD:hugegraph-pd/hg-pd-test/src/main/java/org/apache/hugegraph/pd/core/PDCoreTestBase.java /* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with @@ -46,11 +47,57 @@ public class PDCoreTestBase { private static final String DATA_PATH = "/tmp/pd_data"; private static PDConfig pdConfig; +======== +package org.apache.hugegraph.pd.service; + +import org.apache.hugegraph.pd.ConfigService; +import org.apache.hugegraph.pd.IdService; +import org.apache.hugegraph.pd.listener.PartitionInstructionListener; +import org.apache.hugegraph.pd.PartitionService; +import org.apache.hugegraph.pd.listener.PartitionStatusListener; +import org.apache.hugegraph.pd.StoreMonitorDataService; +import org.apache.hugegraph.pd.StoreNodeService; +import org.apache.hugegraph.pd.listener.StoreStatusListener; +import org.apache.hugegraph.pd.TaskScheduleService; +import org.apache.hugegraph.pd.common.PDException; +import org.apache.hugegraph.pd.config.PDConfig; +import org.apache.hugegraph.pd.grpc.Metapb; +import org.apache.hugegraph.pd.grpc.pulse.ChangeShard; +import org.apache.hugegraph.pd.grpc.pulse.CleanPartition; +import org.apache.hugegraph.pd.grpc.pulse.DbCompaction; +import org.apache.hugegraph.pd.grpc.pulse.MovePartition; +import org.apache.hugegraph.pd.grpc.pulse.PartitionKeyRange; +import org.apache.hugegraph.pd.grpc.pulse.SplitPartition; +import org.apache.hugegraph.pd.grpc.pulse.TransferLeader; +import org.apache.hugegraph.pd.raft.RaftEngine; +import lombok.Getter; +import org.junit.AfterClass; +import org.junit.BeforeClass; + +import java.io.File; + +public class PdTestBase { + @Getter + private static PDConfig pdConfig; + + @Getter +>>>>>>>> d7e3d51dd (3.6.5 -> 4.x diff):hg-pd-test/src/main/java/org/apache/hugegraph/pd/service/PdTestBase.java private static StoreNodeService storeNodeService; + @Getter private static PartitionService partitionService; + @Getter private static TaskScheduleService taskService; + @Getter private static StoreMonitorDataService storeMonitorDataService; +<<<<<<<< HEAD:hugegraph-pd/hg-pd-test/src/main/java/org/apache/hugegraph/pd/core/PDCoreTestBase.java +======== + @Getter + private static ConfigService configService; + + private static final String DATA_PATH = "/tmp/pd_data"; + +>>>>>>>> d7e3d51dd (3.6.5 -> 4.x diff):hg-pd-test/src/main/java/org/apache/hugegraph/pd/service/PdTestBase.java @BeforeClass public static void initService() throws PDException { deleteDir(new File(DATA_PATH)); @@ -75,14 +122,13 @@ public static void initService() throws PDException { config.setStore(new PDConfig().new Store()); config.setPartition(new PDConfig().new Partition() {{ setShardCount(1); - setTotalCount(12); setMaxShardsPerStore(12); }}); config.setDiscovery(new PDConfig().new Discovery()); pdConfig = config; - var configService = new ConfigService(pdConfig); + configService = new ConfigService(pdConfig); configService.loadConfig(); var engine = RaftEngine.getInstance(); @@ -91,8 +137,8 @@ public static void initService() throws PDException { engine.waitingForLeader(5000); storeNodeService = new StoreNodeService(pdConfig); - partitionService = new PartitionService(pdConfig, storeNodeService); - taskService = new TaskScheduleService(pdConfig, storeNodeService, partitionService); + partitionService = new PartitionService(pdConfig, storeNodeService, configService); + taskService = new TaskScheduleService(pdConfig, storeNodeService, partitionService, configService); var idService = new IdService(pdConfig); storeMonitorDataService = new StoreMonitorDataService(pdConfig); RaftEngine.getInstance().addStateListener(partitionService); @@ -197,6 +243,7 @@ private static boolean deleteDir(File dir) { return dir.delete(); } +<<<<<<<< HEAD:hugegraph-pd/hg-pd-test/src/main/java/org/apache/hugegraph/pd/core/PDCoreTestBase.java public static StoreNodeService getStoreNodeService() { return storeNodeService; } @@ -216,4 +263,6 @@ public static TaskScheduleService getTaskService() { public static StoreMonitorDataService getStoreMonitorDataService() { return storeMonitorDataService; } +======== +>>>>>>>> d7e3d51dd (3.6.5 -> 4.x diff):hg-pd-test/src/main/java/org/apache/hugegraph/pd/service/PdTestBase.java } diff --git a/hugegraph-pd/hg-pd-test/src/main/java/org/apache/hugegraph/pd/core/StoreServiceTest.java b/hugegraph-pd/hg-pd-test/src/main/java/org/apache/hugegraph/pd/core/StoreServiceTest.java index a57b95f0c3..16d9ba8838 100644 --- a/hugegraph-pd/hg-pd-test/src/main/java/org/apache/hugegraph/pd/core/StoreServiceTest.java +++ b/hugegraph-pd/hg-pd-test/src/main/java/org/apache/hugegraph/pd/core/StoreServiceTest.java @@ -1,3 +1,4 @@ +<<<<<<<< HEAD:hugegraph-pd/hg-pd-test/src/main/java/org/apache/hugegraph/pd/core/StoreServiceTest.java /* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with @@ -17,6 +18,21 @@ package org.apache.hugegraph.pd.core; +======== +package org.apache.hugegraph.pd.service; + +import org.apache.hugegraph.pd.ConfigService; +import org.apache.hugegraph.pd.IdService; +import org.apache.hugegraph.pd.PartitionService; +import org.apache.hugegraph.pd.StoreNodeService; +import org.apache.hugegraph.pd.listener.StoreStatusListener; +import org.apache.hugegraph.pd.common.PDException; +import org.apache.hugegraph.pd.config.PDConfig; +import org.apache.hugegraph.pd.grpc.MetaTask; +import org.apache.hugegraph.pd.grpc.Metapb; + +import static org.apache.hugegraph.pd.common.Consts.DEFAULT_STORE_GROUP_ID; +>>>>>>>> d7e3d51dd (3.6.5 -> 4.x diff):hg-pd-test/src/main/java/org/apache/hugegraph/pd/service/StoreServiceTest.java import static org.assertj.core.api.Assertions.assertThat; import static org.junit.jupiter.api.Assertions.assertEquals; import static org.mockito.Mockito.mock; @@ -52,10 +68,17 @@ public void setUp() { @Test public void testInit() { // Setup +<<<<<<<< HEAD:hugegraph-pd/hg-pd-test/src/main/java/org/apache/hugegraph/pd/core/StoreServiceTest.java PDConfig pdConfig = getPdConfig(); final PartitionService partitionService = new PartitionService(pdConfig, new StoreNodeService( pdConfig)); +======== + PDConfig pdConfig = getConfig(); + final PDConfig pdConfig1 = getConfig(); + final PartitionService partitionService = new PartitionService(pdConfig, new StoreNodeService(pdConfig1), + new ConfigService(pdConfig1)); +>>>>>>>> d7e3d51dd (3.6.5 -> 4.x diff):hg-pd-test/src/main/java/org/apache/hugegraph/pd/service/StoreServiceTest.java // Run the test this.service.init(partitionService); @@ -63,11 +86,41 @@ public void testInit() { // Verify the results } +<<<<<<<< HEAD:hugegraph-pd/hg-pd-test/src/main/java/org/apache/hugegraph/pd/core/StoreServiceTest.java +======== + private PDConfig getConfig() { + PDConfig pdConfig = new PDConfig(); + pdConfig.setConfigService( + new ConfigService(BaseServerTest.getConfig())); + pdConfig.setIdService(new IdService(BaseServerTest.getConfig())); + pdConfig.setClusterId(0L); + pdConfig.setPatrolInterval(0L); + pdConfig.setDataPath("dataPath"); + pdConfig.setMinStoreCount(0); + pdConfig.setInitialStoreList("initialStoreList"); + pdConfig.setHost("host"); + pdConfig.setVerifyPath("verifyPath"); + pdConfig.setLicensePath("licensePath"); + PDConfig.Raft raft = new PDConfig().new Raft(); + raft.setEnable(false); + pdConfig.setRaft(raft); + final PDConfig.Partition partition = new PDConfig().new Partition(); + partition.setShardCount(0); + pdConfig.setPartition(partition); + pdConfig.setInitialStoreMap(Map.ofEntries(Map.entry("value", "value"))); + return pdConfig; + } + +>>>>>>>> d7e3d51dd (3.6.5 -> 4.x diff):hg-pd-test/src/main/java/org/apache/hugegraph/pd/service/StoreServiceTest.java @Test public void testIsOK() { // Setup // Run the test +<<<<<<<< HEAD:hugegraph-pd/hg-pd-test/src/main/java/org/apache/hugegraph/pd/core/StoreServiceTest.java final boolean result = this.service.isOK(); +======== + final boolean result = service.isOK(DEFAULT_STORE_GROUP_ID); +>>>>>>>> d7e3d51dd (3.6.5 -> 4.x diff):hg-pd-test/src/main/java/org/apache/hugegraph/pd/service/StoreServiceTest.java // Verify the results assertThat(result).isTrue(); @@ -255,7 +308,6 @@ public void testUpdateStore() throws Exception { // Configure PDConfig.getPartition(...). final PDConfig.Partition partition = new PDConfig().new Partition(); - partition.setTotalCount(0); partition.setMaxShardsPerStore(0); partition.setShardCount(0); @@ -287,11 +339,18 @@ public void testStoreTurnoff() throws Exception { .setDataVersion(0).setCores(0) .setDataPath("dataPath").build(); +<<<<<<<< HEAD:hugegraph-pd/hg-pd-test/src/main/java/org/apache/hugegraph/pd/core/StoreServiceTest.java // Configure PDConfig.getPartition(...). final PDConfig.Partition partition = new PDConfig().new Partition(); partition.setTotalCount(0); partition.setMaxShardsPerStore(0); partition.setShardCount(0); +======== + // Configure PDConfig.getPartition(...). + final PDConfig.Partition partition = new PDConfig().new Partition(); + partition.setMaxShardsPerStore(0); + partition.setShardCount(0); +>>>>>>>> d7e3d51dd (3.6.5 -> 4.x diff):hg-pd-test/src/main/java/org/apache/hugegraph/pd/service/StoreServiceTest.java // Run the test this.service.storeTurnoff(store); @@ -453,7 +512,11 @@ public void testGetActiveStores1() throws Exception { .setDataPath("dataPath").build()); // Run the test +<<<<<<<< HEAD:hugegraph-pd/hg-pd-test/src/main/java/org/apache/hugegraph/pd/core/StoreServiceTest.java final List result = this.service.getActiveStores("graphName"); +======== + final List result = service.getActiveStoresByStoreGroup(DEFAULT_STORE_GROUP_ID); +>>>>>>>> d7e3d51dd (3.6.5 -> 4.x diff):hg-pd-test/src/main/java/org/apache/hugegraph/pd/service/StoreServiceTest.java // Verify the results } @@ -471,6 +534,7 @@ public void testGetActiveStores1ThrowsPDException() { @Ignore // state is Pending instead of Tombstone @Test public void testGetTombStores() throws Exception { +<<<<<<<< HEAD:hugegraph-pd/hg-pd-test/src/main/java/org/apache/hugegraph/pd/core/StoreServiceTest.java // Setup final List storeList = List.of( Metapb.Store.newBuilder().setId(0L).setAddress("address") @@ -499,6 +563,36 @@ public void testGetTombStores() throws Exception { this.service.removeStore(result.get(0).getId()); List stores = this.service.getStores(); assertThat(stores.size()).isEqualTo(0); +======== + //// Setup + //final List storeList = List.of( + // Metapb.Store.newBuilder().setId(0L).setAddress("address") + // .setRaftAddress("raftAddress") + // .addLabels(Metapb.StoreLabel.newBuilder().build()) + // .setVersion("version") + // .setState(Metapb.StoreState.Tombstone) + // .setStartTimestamp(0L).setDeployPath("deployPath") + // .setLastHeartbeat(0L).setStats( + // Metapb.StoreStats.newBuilder().setStoreId(0L) + // .setPartitionCount(0).addGraphStats( + // Metapb.GraphStats.newBuilder() + // .setGraphName("value") + // .setApproximateSize(0L) + // .setRole(Metapb.ShardRole.None) + // .build()).build()) + // .setDataVersion(0).setCores(0) + // .setDataPath("dataPath").build()); + //service.register(storeList.get(0)); + // + //// Run the test + //final List result = service.getTombStores(); + // + //// Verify the results + //assertThat(result.size() == 1); + //service.removeStore(result.get(0).getId()); + //List stores = service.getStores(); + //assertThat(stores.size() == 0); +>>>>>>>> d7e3d51dd (3.6.5 -> 4.x diff):hg-pd-test/src/main/java/org/apache/hugegraph/pd/service/StoreServiceTest.java } @Test @@ -522,7 +616,6 @@ public void testAllocShards() throws Exception { // Configure PDConfig.getPartition(...). final PDConfig.Partition partition = new PDConfig().new Partition(); - partition.setTotalCount(0); partition.setMaxShardsPerStore(0); partition.setShardCount(0); @@ -557,7 +650,6 @@ public void testReallocShards() throws Exception { // Configure PDConfig.getPartition(...). final PDConfig.Partition partition = new PDConfig().new Partition(); - partition.setTotalCount(0); partition.setMaxShardsPerStore(0); partition.setShardCount(0); when(this.config.getPartition()).thenReturn(partition); @@ -623,7 +715,6 @@ public void testHeartBeat() throws Exception { // Configure PDConfig.getPartition(...). final PDConfig.Partition partition = new PDConfig().new Partition(); - partition.setTotalCount(0); partition.setMaxShardsPerStore(0); partition.setShardCount(0); when(this.config.getPartition()).thenReturn(partition); @@ -639,26 +730,34 @@ public void testHeartBeat() throws Exception { } @Test - public void testUpdateClusterStatus1() { + public void testUpdateClusterStatus1() throws PDException { // Setup final Metapb.ClusterStats expectedResult = Metapb.ClusterStats .newBuilder().setState(Metapb.ClusterState.Cluster_OK) .setMessage("message").setTimestamp(0L).build(); // Run the test +<<<<<<<< HEAD:hugegraph-pd/hg-pd-test/src/main/java/org/apache/hugegraph/pd/core/StoreServiceTest.java final Metapb.ClusterStats result = this.service.updateClusterStatus( +======== + final Metapb.ClusterStats result = service.updateClusterStatus(DEFAULT_STORE_GROUP_ID, +>>>>>>>> d7e3d51dd (3.6.5 -> 4.x diff):hg-pd-test/src/main/java/org/apache/hugegraph/pd/service/StoreServiceTest.java Metapb.ClusterState.Cluster_OK); } @Test - public void testUpdateClusterStatus2() { + public void testUpdateClusterStatus2() throws PDException { // Setup final Metapb.ClusterStats expectedResult = Metapb.ClusterStats .newBuilder().setState(Metapb.ClusterState.Cluster_OK) .setMessage("message").setTimestamp(0L).build(); // Run the test +<<<<<<<< HEAD:hugegraph-pd/hg-pd-test/src/main/java/org/apache/hugegraph/pd/core/StoreServiceTest.java final Metapb.ClusterStats result = this.service.updateClusterStatus( +======== + final Metapb.ClusterStats result = service.updateClusterStatus( DEFAULT_STORE_GROUP_ID, +>>>>>>>> d7e3d51dd (3.6.5 -> 4.x diff):hg-pd-test/src/main/java/org/apache/hugegraph/pd/service/StoreServiceTest.java Metapb.PartitionState.PState_None); } @@ -666,7 +765,11 @@ public void testUpdateClusterStatus2() { public void testCheckStoreStatus() { // Setup // Run the test +<<<<<<<< HEAD:hugegraph-pd/hg-pd-test/src/main/java/org/apache/hugegraph/pd/core/StoreServiceTest.java this.service.checkStoreStatus(); +======== + service.checkStoreStatus(DEFAULT_STORE_GROUP_ID); +>>>>>>>> d7e3d51dd (3.6.5 -> 4.x diff):hg-pd-test/src/main/java/org/apache/hugegraph/pd/service/StoreServiceTest.java // Verify the results } diff --git a/hugegraph-pd/hg-pd-test/src/main/java/org/apache/hugegraph/pd/core/meta/MetadataKeyHelperTest.java b/hugegraph-pd/hg-pd-test/src/main/java/org/apache/hugegraph/pd/core/meta/MetadataKeyHelperTest.java index 03aa0e7856..e8e90949f4 100644 --- a/hugegraph-pd/hg-pd-test/src/main/java/org/apache/hugegraph/pd/core/meta/MetadataKeyHelperTest.java +++ b/hugegraph-pd/hg-pd-test/src/main/java/org/apache/hugegraph/pd/core/meta/MetadataKeyHelperTest.java @@ -1,3 +1,4 @@ +<<<<<<<< HEAD:hugegraph-pd/hg-pd-test/src/main/java/org/apache/hugegraph/pd/core/meta/MetadataKeyHelperTest.java /* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with @@ -17,6 +18,12 @@ package org.apache.hugegraph.pd.core.meta; +======== +package org.apache.hugegraph.pd.common; + +import org.apache.hugegraph.pd.grpc.Metapb; +import org.apache.hugegraph.pd.meta.MetadataKeyHelper; +>>>>>>>> d7e3d51dd (3.6.5 -> 4.x diff):hg-pd-test/src/main/java/org/apache/hugegraph/pd/common/MetadataKeyHelperTest.java import static org.assertj.core.api.Assertions.assertThat; import static org.junit.Assert.assertArrayEquals; diff --git a/hugegraph-pd/hg-pd-test/src/main/java/org/apache/hugegraph/pd/core/store/HgKVStoreImplTest.java b/hugegraph-pd/hg-pd-test/src/main/java/org/apache/hugegraph/pd/core/store/HgKVStoreImplTest.java index 5e77b6a829..12a26013c7 100644 --- a/hugegraph-pd/hg-pd-test/src/main/java/org/apache/hugegraph/pd/core/store/HgKVStoreImplTest.java +++ b/hugegraph-pd/hg-pd-test/src/main/java/org/apache/hugegraph/pd/core/store/HgKVStoreImplTest.java @@ -1,3 +1,4 @@ +<<<<<<<< HEAD:hugegraph-pd/hg-pd-test/src/main/java/org/apache/hugegraph/pd/core/store/HgKVStoreImplTest.java /* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with @@ -16,6 +17,16 @@ */ package org.apache.hugegraph.pd.core.store; +======== +package org.apache.hugegraph.pd.store; + +import org.apache.hugegraph.pd.common.PDException; +import org.apache.hugegraph.pd.config.PDConfig; +import org.apache.commons.io.FileUtils; +import org.junit.Assert; +import org.junit.BeforeClass; +// import org.junit.Test; +>>>>>>>> d7e3d51dd (3.6.5 -> 4.x diff):hg-pd-core/src/test/java/org/apache/hugegraph/pd/store/HgKVStoreImplTest.java import java.io.File; import java.io.IOException; diff --git a/hugegraph-pd/hg-pd-test/src/main/java/org/apache/hugegraph/pd/rest/RestApiTest.java b/hugegraph-pd/hg-pd-test/src/main/java/org/apache/hugegraph/pd/rest/RestApiTest.java index 813d7f0656..06ad865eb9 100644 --- a/hugegraph-pd/hg-pd-test/src/main/java/org/apache/hugegraph/pd/rest/RestApiTest.java +++ b/hugegraph-pd/hg-pd-test/src/main/java/org/apache/hugegraph/pd/rest/RestApiTest.java @@ -1,3 +1,4 @@ +<<<<<<<< HEAD:hugegraph-pd/hg-pd-test/src/main/java/org/apache/hugegraph/pd/rest/RestApiTest.java /* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with @@ -16,6 +17,9 @@ */ package org.apache.hugegraph.pd.rest; +======== +package org.apache.hugegraph.pd.service; +>>>>>>>> d7e3d51dd (3.6.5 -> 4.x diff):hg-pd-test/src/main/java/org/apache/hugegraph/pd/service/RestApiTest.java import java.io.IOException; import java.net.URI; @@ -27,6 +31,13 @@ import org.json.JSONObject; import org.junit.Test; +<<<<<<<< HEAD:hugegraph-pd/hg-pd-test/src/main/java/org/apache/hugegraph/pd/rest/RestApiTest.java +======== +/** + * @author tianxiaohui + * @date 20221220 + **/ +>>>>>>>> d7e3d51dd (3.6.5 -> 4.x diff):hg-pd-test/src/main/java/org/apache/hugegraph/pd/service/RestApiTest.java public class RestApiTest extends BaseServerTest { @Test @@ -34,7 +45,11 @@ public void testQueryClusterInfo() throws URISyntaxException, IOException, Inter JSONException { String url = pdRestAddr + "/v1/cluster"; HttpRequest request = HttpRequest.newBuilder() +<<<<<<<< HEAD:hugegraph-pd/hg-pd-test/src/main/java/org/apache/hugegraph/pd/rest/RestApiTest.java .uri(new URI(url)) +======== + .uri(new URI(url)).header(key, value) +>>>>>>>> d7e3d51dd (3.6.5 -> 4.x diff):hg-pd-test/src/main/java/org/apache/hugegraph/pd/service/RestApiTest.java .GET() .build(); HttpResponse response = client.send(request, HttpResponse.BodyHandlers.ofString()); @@ -43,11 +58,19 @@ public void testQueryClusterInfo() throws URISyntaxException, IOException, Inter } @Test +<<<<<<<< HEAD:hugegraph-pd/hg-pd-test/src/main/java/org/apache/hugegraph/pd/rest/RestApiTest.java public void testQueryClusterMembers() throws URISyntaxException, IOException, InterruptedException, JSONException { String url = pdRestAddr + "/v1/members"; HttpRequest request = HttpRequest.newBuilder() .uri(new URI(url)) +======== + public void testQueryClusterMembers() throws URISyntaxException, IOException, InterruptedException, + JSONException { + String url = pdRestAddr + "/v1/members"; + HttpRequest request = HttpRequest.newBuilder() + .uri(new URI(url)).header(key, value) +>>>>>>>> d7e3d51dd (3.6.5 -> 4.x diff):hg-pd-test/src/main/java/org/apache/hugegraph/pd/service/RestApiTest.java .GET() .build(); HttpResponse response = client.send(request, HttpResponse.BodyHandlers.ofString()); @@ -60,7 +83,11 @@ public void testQueryStoresInfo() throws URISyntaxException, IOException, Interr JSONException { String url = pdRestAddr + "/v1/stores"; HttpRequest request = HttpRequest.newBuilder() +<<<<<<<< HEAD:hugegraph-pd/hg-pd-test/src/main/java/org/apache/hugegraph/pd/rest/RestApiTest.java .uri(new URI(url)) +======== + .uri(new URI(url)).header(key, value) +>>>>>>>> d7e3d51dd (3.6.5 -> 4.x diff):hg-pd-test/src/main/java/org/apache/hugegraph/pd/service/RestApiTest.java .GET() .build(); HttpResponse response = client.send(request, HttpResponse.BodyHandlers.ofString()); @@ -73,7 +100,11 @@ public void testQueryGraphsInfo() throws IOException, InterruptedException, JSON URISyntaxException { String url = pdRestAddr + "/v1/graphs"; HttpRequest request = HttpRequest.newBuilder() +<<<<<<<< HEAD:hugegraph-pd/hg-pd-test/src/main/java/org/apache/hugegraph/pd/rest/RestApiTest.java .uri(new URI(url)) +======== + .uri(new URI(url)).header(key, value) +>>>>>>>> d7e3d51dd (3.6.5 -> 4.x diff):hg-pd-test/src/main/java/org/apache/hugegraph/pd/service/RestApiTest.java .GET() .build(); HttpResponse response = client.send(request, HttpResponse.BodyHandlers.ofString()); @@ -86,7 +117,11 @@ public void testQueryPartitionsInfo() throws IOException, InterruptedException, URISyntaxException { String url = pdRestAddr + "/v1/highLevelPartitions"; HttpRequest request = HttpRequest.newBuilder() +<<<<<<<< HEAD:hugegraph-pd/hg-pd-test/src/main/java/org/apache/hugegraph/pd/rest/RestApiTest.java .uri(new URI(url)) +======== + .uri(new URI(url)).header(key, value) +>>>>>>>> d7e3d51dd (3.6.5 -> 4.x diff):hg-pd-test/src/main/java/org/apache/hugegraph/pd/service/RestApiTest.java .GET() .build(); HttpResponse response = client.send(request, HttpResponse.BodyHandlers.ofString()); @@ -99,7 +134,11 @@ public void testQueryDebugPartitionsInfo() throws URISyntaxException, IOExceptio InterruptedException { String url = pdRestAddr + "/v1/partitions"; HttpRequest request = HttpRequest.newBuilder() +<<<<<<<< HEAD:hugegraph-pd/hg-pd-test/src/main/java/org/apache/hugegraph/pd/rest/RestApiTest.java .uri(new URI(url)) +======== + .uri(new URI(url)).header(key, value) +>>>>>>>> d7e3d51dd (3.6.5 -> 4.x diff):hg-pd-test/src/main/java/org/apache/hugegraph/pd/service/RestApiTest.java .GET() .build(); HttpResponse response = client.send(request, HttpResponse.BodyHandlers.ofString()); @@ -112,8 +151,15 @@ public void testQueryShards() throws URISyntaxException, IOException, Interrupte String url = pdRestAddr + "/v1/shards"; HttpRequest request = HttpRequest.newBuilder() .uri(new URI(url)) +<<<<<<<< HEAD:hugegraph-pd/hg-pd-test/src/main/java/org/apache/hugegraph/pd/rest/RestApiTest.java .GET() .build(); +======== + .header(key, value) + .GET() + .build(); + +>>>>>>>> d7e3d51dd (3.6.5 -> 4.x diff):hg-pd-test/src/main/java/org/apache/hugegraph/pd/service/RestApiTest.java HttpResponse response = client.send(request, HttpResponse.BodyHandlers.ofString()); JSONObject obj = new JSONObject(response.body()); assert obj.getInt("status") == 0; diff --git a/start_pd_server.sh b/start_pd_server.sh new file mode 100644 index 0000000000..7bd0643fb4 --- /dev/null +++ b/start_pd_server.sh @@ -0,0 +1,44 @@ +AGILE_PRODUCT_HTTP_URL=https://irepo.baidu-int.com/rest/prod/v3/baidu/starhugegraph/hugegraph-pd/nodes/54389398/files +AGILE_COMPILE_BRANCH=3.7.1 +DEPLOY_PATH=/home/disk3/store_ci/${AGILE_COMPILE_BRANCH} +PD_PATH=${DEPLOY_PATH}/pd/ +STORE_PATH=${DEPLOY_PATH}/store/ +function rename() +{ + cfilelist=$(find -maxdepth 1 -type d -printf '%f\n' ) + for cfilename in $cfilelist + do + if [[ $cfilename =~ SNAPSHOT ]] + then + mv $cfilename ${cfilename/-?.?.?-SNAPSHOT/} + fi + done +} +# kill -9 $(jps -mlv|grep ${DEPLOY_PATH}/pd/${AGILE_COMPILE_BRANCH}|grep -v grep|awk '{print $1}') +echo "stop server...." +kill -9 $(jps -mlv|grep ${DEPLOY_PATH}|grep -v grep|awk '{print $1}') +echo "stopped server" +mkdir -p ${PD_PATH} +cd ${PD_PATH} +echo "get pd...." +wget -q -O output.tar.gz --no-check-certificate --header "IREPO-TOKEN:c7404132-b76e-4f48-b77f-478286cbdfb8" ${AGILE_PRODUCT_HTTP_URL} +tar -zxf output.tar.gz +cd output +rm -rf hugegraph-pd +echo "unzip pd tar...." +find . -name "*.tar.gz" -exec tar -zxvf {} \; +rename +# start pd +echo "changing pd application.yml...." +cd ${PD_PATH}/output/hugegraph-pd +sed -i 's/initial-store-list:.*/initial-store-list: 10.108.17.32:8500\n initial-store-count: 1/' conf/application.yml +sed -i 's/,127.0.0.1:8611,127.0.0.1:8612//' conf/application.yml +bin/start-hugegraph-pd.sh +sleep 10 +echo "start pd end" +mkdir -p ${STORE_PATH}/output +cd ${STORE_PATH}/output +rm -rf hugegraph-store/storage +hugegraph-store/bin/start-hugegraph-store.sh +sleep 10 +echo "start store end" \ No newline at end of file diff --git a/start_store_server.sh b/start_store_server.sh new file mode 100644 index 0000000000..94935044c3 --- /dev/null +++ b/start_store_server.sh @@ -0,0 +1,44 @@ +AGILE_PRODUCT_HTTP_URL=https://irepo.baidu-int.com/rest/prod/v3/baidu/starhugegraph/hugegraph-store/nodes/54385742/files +AGILE_COMPILE_BRANCH=3.7.1 +DEPLOY_PATH=/home/disk3/store_ci/${AGILE_COMPILE_BRANCH} +PD_PATH=${DEPLOY_PATH}/pd/ +STORE_PATH=${DEPLOY_PATH}/store/ +function rename() +{ + cfilelist=$(find -maxdepth 1 -type d -printf '%f\n' ) + for cfilename in $cfilelist + do + if [[ $cfilename =~ SNAPSHOT ]] + then + mv $cfilename ${cfilename/-?.?.?-SNAPSHOT/} + fi + done +} +echo "stop server...." +kill -9 $(jps -mlv|grep ${DEPLOY_PATH}|grep -v grep|awk '{print $1}') +echo "stopped server" +mkdir -p ${PD_PATH} +cd ${PD_PATH}/output/hugegraph-pd +rm -rf pd_data +bin/start-hugegraph-pd.sh +# start store +mkdir -p ${STORE_PATH} +cd ${STORE_PATH} +echo "get store...." +wget -q -O output.tar.gz --no-check-certificate --header "IREPO-TOKEN:1fc56829-86d9-4c81-bb4c-9fabb7ea873d" ${AGILE_PRODUCT_HTTP_URL} +echo "unzip store tar...." +tar -zxf output.tar.gz +cd output +rm -rf hugegraph-store +find . -name "*.tar.gz" -exec tar -zxvf {} \; +rename +pushd hugegraph-store +echo "changing store application.yml...." +sed -i 's#local os=`uname`#local os=Linux#g' bin/util.sh +sed -i 's/export LD_PRELOAD/#export LD_PRELOAD/' bin/start-hugegraph-store.sh +sed -i 's/host: 127.0.0.1/host: 10.108.17.32/' conf/application.yml +sed -i 's/address: 127.0.0.1:8510/address: 10.108.17.32:8510/' conf/application.yml +bin/start-hugegraph-store.sh +popd +sleep 5 +echo "start store end"