From 62baf2a4aba4e30eac51056f2f69f0c6ff8dc0d1 Mon Sep 17 00:00:00 2001 From: imbajin Date: Sat, 15 Nov 2025 00:54:35 +0800 Subject: [PATCH 01/19] chore: update release workflow and .gitignore entries Bumped default release version to 1.7.0 and updated default GPG user in the release workflow. Added installation step for subversion on Ubuntu. Appended WARP.md to .gitignore. --- .github/workflows/validate-release.yml | 7 +++++-- .gitignore | 1 + 2 files changed, 6 insertions(+), 2 deletions(-) diff --git a/.github/workflows/validate-release.yml b/.github/workflows/validate-release.yml index e25a44043..747f0d78e 100644 --- a/.github/workflows/validate-release.yml +++ b/.github/workflows/validate-release.yml @@ -6,11 +6,11 @@ on: release_version: required: true description: svn release version - default: '1.5.0' + default: '1.7.0' gpg_user: required: true description: current release manager (gpg username) - default: 'vgalaxies' + default: 'Junzhi Peng' push: branches: @@ -67,6 +67,9 @@ jobs: if [[ ${{ matrix.os }} =~ "macos" ]]; then brew install svn fi + if [[ ${{ matrix.os }} =~ "ubuntu" ]]; then + sudo apt-get install -y subversion + fi rm -rf dist/${{ inputs.release_version }} svn co ${URL_PREFIX}/${{ inputs.release_version }} dist/${{ inputs.release_version }} diff --git a/.gitignore b/.gitignore index 39893e594..86f2983b1 100644 --- a/.gitignore +++ b/.gitignore @@ -28,3 +28,4 @@ GEMINI.md .vscode/settings.json .aider* .gemini/ +WARP.md From 5bb8ee05c15427dd4ca64f93e52885b5c6763cb4 Mon Sep 17 00:00:00 2001 From: imbajin Date: Sat, 15 Nov 2025 01:35:20 +0800 Subject: [PATCH 02/19] refactor: unify release validation script and add local path support Deleted validate-release-in-local.sh and enhanced validate-release.sh to support both SVN and local directory validation. Added color-coded output, improved argument handling, and included Java version checks for better usability and error reporting. --- dist/validate-release-in-local.sh | 356 ------------------------------ dist/validate-release.sh | 161 +++++++++----- 2 files changed, 101 insertions(+), 416 deletions(-) delete mode 100755 dist/validate-release-in-local.sh diff --git a/dist/validate-release-in-local.sh b/dist/validate-release-in-local.sh deleted file mode 100755 index e935be693..000000000 --- a/dist/validate-release-in-local.sh +++ /dev/null @@ -1,356 +0,0 @@ -#!/usr/bin/env bash -# -# Licensed to the Apache Software Foundation (ASF) under one or more -# contributor license agreements. See the NOTICE file distributed with -# this work for additional information regarding copyright ownership. -# The ASF licenses this file to You under the Apache License, Version 2.0 -# (the "License"); you may not use this file except in compliance with -# the License. You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -# This script is used to validate the release package, including: -# 1. Check the release package name & content -# 2. Check the release package sha512 & gpg signature -# 3. Compile the source package & run server & toolchain -# 4. Run server & toolchain in binary package - -# exit when any error occurs -set -e - -# release version (input by committer) -RELEASE_VERSION=$1 # like 1.2.0 -JAVA_VERSION=$2 # like 11 -USER=$3 -LOCAL_DIST_PATH=$4 # local directory path containing release files - -# this URL is only valid during the release process -SVN_URL_PREFIX="https://dist.apache.org/repos/dist/dev/incubator/hugegraph" - -# git release branch (check it carefully) -#GIT_BRANCH="release-${RELEASE_VERSION}" - -RELEASE_VERSION=${RELEASE_VERSION:?"Please input the release version, like 1.2.0"} -USER=${USER:-"imbajin"} -WORK_DIR=$( - cd "$(dirname "$0")" - pwd -) - -# Use local directory if provided, otherwise use default dist path -if [[ -n "${LOCAL_DIST_PATH}" ]]; then - DIST_DIR="${LOCAL_DIST_PATH}" - echo "Using local directory: ${DIST_DIR}" -else - DIST_DIR="${WORK_DIR}/dist/${RELEASE_VERSION}" - echo "Using default directory: ${DIST_DIR}" -fi - -# Validate local directory exists -if [[ ! -d "${DIST_DIR}" ]]; then - echo "Error: Directory ${DIST_DIR} does not exist" - exit 1 -fi - -cd "${WORK_DIR}" -echo "Current work dir: $(pwd)" -echo "Release files directory: ${DIST_DIR}" - -################################ -# Step 1: Validate Local Directory # -################################ -cd "${DIST_DIR}" -echo "Contents of ${DIST_DIR}:" -ls -lh - -################################################## -# Step 2: Check Environment & Import Public Keys # -################################################## -shasum --version 1>/dev/null -gpg --version 1>/dev/null - -wget https://downloads.apache.org/incubator/hugegraph/KEYS -echo "Import KEYS:" && gpg --import KEYS -# TODO: how to trust all public keys in gpg list, currently only trust the first one -echo -e "5\ny\n" | gpg --batch --command-fd 0 --edit-key $USER trust - -echo "trust all pk" -for key in $(gpg --no-tty --list-keys --with-colons | awk -F: '/^pub/ {print $5}'); do - echo -e "5\ny\n" | gpg --batch --command-fd 0 --edit-key "$key" trust -done - -######################################## -# Step 3: Check SHA512 & GPG Signature # -######################################## -cd "${DIST_DIR}" - -for i in *.tar.gz; do - echo "$i" - shasum -a 512 --check "$i".sha512 - eval gpg "${GPG_OPT}" --verify "$i".asc "$i" -done - -#################################### -# Step 4: Validate Source Packages # -#################################### -cd "${DIST_DIR}" - -CATEGORY_X="\bGPL|\bLGPL|Sleepycat License|BSD-4-Clause|\bBCL\b|JSR-275|Amazon Software License|\bRSAL\b|\bQPL\b|\bSSPL|\bCPOL|\bNPL1|Creative Commons Non-Commercial|JSON\.org" -CATEGORY_B="\bCDDL1|\bCPL|\bEPL|\bIPL|\bMPL|\bSPL|OSL-3.0|UnRAR License|Erlang Public License|\bOFL\b|Ubuntu Font License Version 1.0|IPA Font License Agreement v1.0|EPL2.0|CC-BY" -ls -lh ./*.tar.gz -for i in *src.tar.gz; do - echo "$i" - - # 4.1: check the directory name include "incubating" - if [[ ! "$i" =~ "incubating" ]]; then - echo "The package name $i should include incubating" && exit 1 - fi - - MODULE_DIR=$(basename "$i" .tar.gz) - rm -rf ${MODULE_DIR} - tar -xzvf "$i" - pushd ${MODULE_DIR} - echo "Start to check the package content: ${MODULE_DIR}" - - # 4.2: check the directory include "NOTICE" and "LICENSE" file and "DISCLAIMER" file - if [[ ! -f "LICENSE" ]]; then - echo "The package $i should include LICENSE file" && exit 1 - fi - if [[ ! -f "NOTICE" ]]; then - echo "The package $i should include NOTICE file" && exit 1 - fi - if [[ ! -f "DISCLAIMER" ]]; then - echo "The package $i should include DISCLAIMER file" && exit 1 - fi - - # 4.3: ensure doesn't contains ASF CATEGORY X License dependencies in LICENSE and NOTICE files - COUNT=$(grep -E "$CATEGORY_X" LICENSE NOTICE | wc -l) - if [[ $COUNT -ne 0 ]]; then - grep -E "$CATEGORY_X" LICENSE NOTICE - echo "The package $i shouldn't include invalid ASF category X dependencies, but get $COUNT" && exit 1 - fi - - # 4.4: ensure doesn't contains ASF CATEGORY B License dependencies in LICENSE and NOTICE files - COUNT=$(grep -E "$CATEGORY_B" LICENSE NOTICE | wc -l) - if [[ $COUNT -ne 0 ]]; then - grep -E "$CATEGORY_B" LICENSE NOTICE - echo "The package $i shouldn't include invalid ASF category B dependencies, but get $COUNT" && exit 1 - fi - - # 4.5: ensure doesn't contains empty directory or file - find . -type d -empty | while read -r EMPTY_DIR; do - find . -type d -empty - echo "The package $i shouldn't include empty directory: $EMPTY_DIR is empty" && exit 1 - done - find . -type f -empty | while read -r EMPTY_FILE; do - find . -type f -empty - echo "The package $i shouldn't include empty file: $EMPTY_FILE is empty" && exit 1 - done - - # 4.6: ensure any file should less than 800kb - find . -type f -size +800k | while read -r FILE; do - find . -type f -size +800k - echo "The package $i shouldn't include file larger than 800kb: $FILE is larger than 800kb" && exit 1 - done - - # 4.7: ensure all binary files are documented in LICENSE - find . -type f | perl -lne 'print if -B' | while read -r BINARY_FILE; do - FILE_NAME=$(basename "$BINARY_FILE") - if grep -q "$FILE_NAME" LICENSE; then - echo "Binary file $BINARY_FILE is documented in LICENSE, please check manually" - else - echo "Error: Binary file $BINARY_FILE is not documented in LICENSE" && exit 1 - fi - done - - # 4.8: test compile the packages - if [[ ($JAVA_VERSION == 8 && "$i" =~ "hugegraph-computer") ]]; then - echo "Skip compile $i module in java8" - elif [[ "$i" =~ 'hugegraph-ai' ]]; then - echo "Skip compile $i module in all versions" - elif [[ "$i" =~ "hugegraph-commons" ]]; then - mvn install -DskipTests -Papache-release -ntp -e - elif [[ "$i" =~ "hugegraph-computer" ]]; then - cd computer - mvn install -DskipTests -Papache-release -ntp -e - else - # TODO: consider using commands that are entirely consistent with building binary packages - mvn package -DskipTests -Papache-release -ntp -e - ls -lh - fi - popd -done - -########################################### -# Step 5: Run Compiled Packages of Server # -########################################### -cd "${DIST_DIR}" - -ls -lh -pushd ./*hugegraph-incubating*src/hugegraph-server/*hugegraph*"${RELEASE_VERSION}" -bin/init-store.sh -sleep 3 -bin/start-hugegraph.sh -popd - -####################################################################### -# Step 6: Run Compiled Packages of ToolChain (Loader & Tool & Hubble) # -####################################################################### -cd "${DIST_DIR}" - -pushd ./*toolchain*src -ls -lh -pushd ./*toolchain*"${RELEASE_VERSION}" -ls -lh - -# 6.1: load some data first -echo "test loader" -pushd ./*loader*"${RELEASE_VERSION}" -bin/hugegraph-loader.sh -f ./example/file/struct.json -s ./example/file/schema.groovy \ - -g hugegraph -popd - -# 6.2: try some gremlin query & api in tool -echo "test tool" -pushd ./*tool*"${RELEASE_VERSION}" -bin/hugegraph gremlin-execute --script 'g.V().count()' -bin/hugegraph task-list -bin/hugegraph backup -t all --directory ./backup-test -popd - -# 6.3: start hubble and connect to server -echo "test hubble" -pushd ./*hubble*"${RELEASE_VERSION}" -# TODO: add hubble doc & test it -cat conf/hugegraph-hubble.properties -bin/start-hubble.sh -bin/stop-hubble.sh -popd - -popd -popd -# stop server -pushd ./*hugegraph-incubating*src/hugegraph-server/*hugegraph*"${RELEASE_VERSION}" -bin/stop-hugegraph.sh -popd - -# clear source packages -#rm -rf ./*src* -#ls -lh - -#################################### -# Step 7: Validate Binary Packages # -#################################### -cd "${DIST_DIR}" - -for i in *.tar.gz; do - if [[ "$i" == *-src.tar.gz ]]; then - # skip source packages - continue - fi - - echo "$i" - - # 7.1: check the directory name include "incubating" - if [[ ! "$i" =~ "incubating" ]]; then - echo "The package name $i should include incubating" && exit 1 - fi - - MODULE_DIR=$(basename "$i" .tar.gz) - rm -rf ${MODULE_DIR} - tar -xzvf "$i" - pushd ${MODULE_DIR} - ls -lh - echo "Start to check the package content: ${MODULE_DIR}" - - # 7.2: check root dir include "NOTICE"/"LICENSE"/"DISCLAIMER" files & "licenses" dir - if [[ ! -f "LICENSE" ]]; then - echo "The package $i should include LICENSE file" && exit 1 - fi - if [[ ! -f "NOTICE" ]]; then - echo "The package $i should include NOTICE file" && exit 1 - fi - if [[ ! -f "DISCLAIMER" ]]; then - echo "The package $i should include DISCLAIMER file" && exit 1 - fi - if [[ ! -d "licenses" ]]; then - echo "The package $i should include licenses dir" && exit 1 - fi - - # 7.3: ensure doesn't contains ASF CATEGORY X License dependencies in LICENSE/NOTICE and licenses/* files - COUNT=$(grep -r -E "$CATEGORY_X" LICENSE NOTICE licenses | wc -l) - if [[ $COUNT -ne 0 ]]; then - grep -r -E "$CATEGORY_X" LICENSE NOTICE licenses - echo "The package $i shouldn't include invalid ASF category X dependencies, but get $COUNT" && exit 1 - fi - - # 7.4: ensure doesn't contains empty directory or file - find . -type d -empty | while read -r EMPTY_DIR; do - find . -type d -empty - echo "The package $i shouldn't include empty directory: $EMPTY_DIR is empty" && exit 1 - done - find . -type f -empty | while read -r EMPTY_FILE; do - find . -type f -empty - echo "The package $i shouldn't include empty file: $EMPTY_FILE is empty" && exit 1 - done - - popd -done - -# TODO: skip the following steps by comparing the artifacts built from source packages with binary packages -######################################### -# Step 8: Run Binary Packages of Server # -######################################### -cd "${DIST_DIR}" - -# TODO: run pd & store -pushd ./*hugegraph-incubating*"${RELEASE_VERSION}"/*hugegraph-server-incubating*"${RELEASE_VERSION}" -bin/init-store.sh -sleep 3 -bin/start-hugegraph.sh -popd - -##################################################################### -# Step 9: Run Binary Packages of ToolChain (Loader & Tool & Hubble) # -##################################################################### -cd "${DIST_DIR}" - -pushd ./*toolchain*"${RELEASE_VERSION}" -ls -lh - -# 9.1: load some data first -echo "test loader" -pushd ./*loader*"${RELEASE_VERSION}" -bin/hugegraph-loader.sh -f ./example/file/struct.json -s ./example/file/schema.groovy -g hugegraph -popd - -# 9.2: try some gremlin query & api in tool -echo "test tool" -pushd ./*tool*"${RELEASE_VERSION}" -bin/hugegraph gremlin-execute --script 'g.V().count()' -bin/hugegraph task-list -bin/hugegraph backup -t all --directory ./backup-test -popd - -# 9.3: start hubble and connect to server -echo "test hubble" -pushd ./*hubble*"${RELEASE_VERSION}" -# TODO: add hubble doc & test it -cat conf/hugegraph-hubble.properties -bin/start-hubble.sh -bin/stop-hubble.sh -popd - -popd -# stop server -pushd ./*hugegraph-incubating*"${RELEASE_VERSION}"/*hugegraph-server-incubating*"${RELEASE_VERSION}" -bin/stop-hugegraph.sh -popd - -echo "Finish validate, please check all steps manually again!" diff --git a/dist/validate-release.sh b/dist/validate-release.sh index 8df1645f9..d9fdb186a 100755 --- a/dist/validate-release.sh +++ b/dist/validate-release.sh @@ -1,33 +1,36 @@ #!/usr/bin/env bash # -# Licensed to the Apache Software Foundation (ASF) under one or more -# contributor license agreements. See the NOTICE file distributed with -# this work for additional information regarding copyright ownership. -# The ASF licenses this file to You under the Apache License, Version 2.0 -# (the "License"); you may not use this file except in compliance with -# the License. You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# # This script is used to validate the release package, including: # 1. Check the release package name & content # 2. Check the release package sha512 & gpg signature # 3. Compile the source package & run server & toolchain # 4. Run server & toolchain in binary package +# +# Usage: +# 1. Validate from Apache SVN (default): +# ./validate-release.sh [local-path] [java-version] +# Example: ./validate-release.sh 1.7.0 pengjunzhi +# Example: ./validate-release.sh 1.7.0 pengjunzhi "" 11 +# +# 2. Validate from local directory: +# ./validate-release.sh [java-version] +# Example: ./validate-release.sh 1.7.0 pengjunzhi /path/to/dist +# Example: ./validate-release.sh 1.7.0 pengjunzhi /path/to/dist 11 # exit when any error occurs set -e +# Color definitions +RED='\033[0;31m' +GREEN='\033[0;32m' +YELLOW='\033[0;33m' +NC='\033[0m' # No Color + # release version (input by committer) -RELEASE_VERSION=$1 # like 1.2.0 -JAVA_VERSION=$2 # like 11 -USER=$3 +RELEASE_VERSION=$1 # like 1.7.0 +USER=$2 +LOCAL_DIST_PATH=$3 # optional: local directory path containing release files +JAVA_VERSION=${4:-11} # optional: default to 11 # this URL is only valid during the release process SVN_URL_PREFIX="https://dist.apache.org/repos/dist/dev/incubator/hugegraph" @@ -35,8 +38,8 @@ SVN_URL_PREFIX="https://dist.apache.org/repos/dist/dev/incubator/hugegraph" # git release branch (check it carefully) #GIT_BRANCH="release-${RELEASE_VERSION}" -RELEASE_VERSION=${RELEASE_VERSION:?"Please input the release version, like 1.2.0"} -USER=${USER:-"imbajin"} +RELEASE_VERSION=${RELEASE_VERSION:?"Please input the release version, like 1.7.0"} +USER=${USER:?"Please input the user name"} WORK_DIR=$( cd "$(dirname "$0")" pwd @@ -45,13 +48,35 @@ WORK_DIR=$( cd "${WORK_DIR}" echo "Current work dir: $(pwd)" -################################ -# Step 1: Download SVN Sources # -################################ -rm -rf "${WORK_DIR}/dist/${RELEASE_VERSION}" -mkdir -p "${WORK_DIR}/dist/${RELEASE_VERSION}" -cd "${WORK_DIR}/dist/${RELEASE_VERSION}" -svn co "${SVN_URL_PREFIX}/${RELEASE_VERSION}" . +#################################################### +# Step 1: Prepare Release Files (SVN or Local) # +#################################################### +if [[ -n "${LOCAL_DIST_PATH}" ]]; then + # Use local directory + DIST_DIR="${LOCAL_DIST_PATH}" + echo "Using local directory: ${DIST_DIR}" + + # Validate local directory exists + if [[ ! -d "${DIST_DIR}" ]]; then + echo -e "${RED}Error: Directory ${DIST_DIR} does not exist${NC}" + exit 1 + fi + + echo "Contents of ${DIST_DIR}:" + ls -lh "${DIST_DIR}" +else + # Download from SVN + DIST_DIR="${WORK_DIR}/dist/${RELEASE_VERSION}" + echo "Downloading from SVN to: ${DIST_DIR}" + + rm -rf "${DIST_DIR}" + mkdir -p "${DIST_DIR}" + cd "${DIST_DIR}" + svn co "${SVN_URL_PREFIX}/${RELEASE_VERSION}" . +fi + +cd "${DIST_DIR}" +echo "Release files directory: ${DIST_DIR}" ################################################## # Step 2: Check Environment & Import Public Keys # @@ -59,6 +84,26 @@ svn co "${SVN_URL_PREFIX}/${RELEASE_VERSION}" . shasum --version 1>/dev/null gpg --version 1>/dev/null +# Check Java version +echo "Checking Java version..." +if ! command -v java &> /dev/null; then + echo -e "${RED}Error: Java is not installed or not in PATH${NC}" + exit 1 +fi + +CURRENT_JAVA_VERSION=$(java -version 2>&1 | head -n 1 | awk -F '"' '{print $2}' | awk -F '.' '{print $1}') +echo "Current Java version: $CURRENT_JAVA_VERSION (Required: ${JAVA_VERSION})" + +if [[ "$CURRENT_JAVA_VERSION" != "$JAVA_VERSION" ]]; then + echo -e "${RED}Error: Java version mismatch!${NC}" + echo -e "${RED} Current: Java $CURRENT_JAVA_VERSION${NC}" + echo -e "${RED} Required: Java ${JAVA_VERSION}${NC}" + echo -e "${RED} Please switch to Java ${JAVA_VERSION} before running this script${NC}" + exit 1 +fi + +echo -e "${GREEN}Java version check passed: Java $CURRENT_JAVA_VERSION${NC}" + wget https://downloads.apache.org/incubator/hugegraph/KEYS echo "Import KEYS:" && gpg --import KEYS # TODO: how to trust all public keys in gpg list, currently only trust the first one @@ -72,7 +117,7 @@ done ######################################## # Step 3: Check SHA512 & GPG Signature # ######################################## -cd "${WORK_DIR}/dist/${RELEASE_VERSION}" +cd "${DIST_DIR}" for i in *.tar.gz; do echo "$i" @@ -83,7 +128,7 @@ done #################################### # Step 4: Validate Source Packages # #################################### -cd "${WORK_DIR}/dist/${RELEASE_VERSION}" +cd "${DIST_DIR}" CATEGORY_X="\bGPL|\bLGPL|Sleepycat License|BSD-4-Clause|\bBCL\b|JSR-275|Amazon Software License|\bRSAL\b|\bQPL\b|\bSSPL|\bCPOL|\bNPL1|Creative Commons Non-Commercial|JSON\.org" CATEGORY_B="\bCDDL1|\bCPL|\bEPL|\bIPL|\bMPL|\bSPL|OSL-3.0|UnRAR License|Erlang Public License|\bOFL\b|Ubuntu Font License Version 1.0|IPA Font License Agreement v1.0|EPL2.0|CC-BY" @@ -93,7 +138,7 @@ for i in *src.tar.gz; do # 4.1: check the directory name include "incubating" if [[ ! "$i" =~ "incubating" ]]; then - echo "The package name $i should include incubating" && exit 1 + echo -e "${RED}The package name $i should include incubating${NC}" && exit 1 fi MODULE_DIR=$(basename "$i" .tar.gz) @@ -104,65 +149,61 @@ for i in *src.tar.gz; do # 4.2: check the directory include "NOTICE" and "LICENSE" file and "DISCLAIMER" file if [[ ! -f "LICENSE" ]]; then - echo "The package $i should include LICENSE file" && exit 1 + echo -e "${RED}The package $i should include LICENSE file${NC}" && exit 1 fi if [[ ! -f "NOTICE" ]]; then - echo "The package $i should include NOTICE file" && exit 1 + echo -e "${RED}The package $i should include NOTICE file${NC}" && exit 1 fi if [[ ! -f "DISCLAIMER" ]]; then - echo "The package $i should include DISCLAIMER file" && exit 1 + echo -e "${RED}The package $i should include DISCLAIMER file${NC}" && exit 1 fi # 4.3: ensure doesn't contains ASF CATEGORY X License dependencies in LICENSE and NOTICE files COUNT=$(grep -E "$CATEGORY_X" LICENSE NOTICE | wc -l) if [[ $COUNT -ne 0 ]]; then grep -E "$CATEGORY_X" LICENSE NOTICE - echo "The package $i shouldn't include invalid ASF category X dependencies, but get $COUNT" && exit 1 + echo -e "${RED}The package $i shouldn't include invalid ASF category X dependencies, but get $COUNT${NC}" && exit 1 fi # 4.4: ensure doesn't contains ASF CATEGORY B License dependencies in LICENSE and NOTICE files COUNT=$(grep -E "$CATEGORY_B" LICENSE NOTICE | wc -l) if [[ $COUNT -ne 0 ]]; then grep -E "$CATEGORY_B" LICENSE NOTICE - echo "The package $i shouldn't include invalid ASF category B dependencies, but get $COUNT" && exit 1 + echo -e "${RED}The package $i shouldn't include invalid ASF category B dependencies, but get $COUNT${NC}" && exit 1 fi # 4.5: ensure doesn't contains empty directory or file find . -type d -empty | while read -r EMPTY_DIR; do find . -type d -empty - echo "The package $i shouldn't include empty directory: $EMPTY_DIR is empty" && exit 1 + echo -e "${RED}The package $i shouldn't include empty directory: $EMPTY_DIR is empty${NC}" && exit 1 done find . -type f -empty | while read -r EMPTY_FILE; do find . -type f -empty - echo "The package $i shouldn't include empty file: $EMPTY_FILE is empty" && exit 1 + echo -e "${RED}The package $i shouldn't include empty file: $EMPTY_FILE is empty${NC}" && exit 1 done # 4.6: ensure any file should less than 800kb find . -type f -size +800k | while read -r FILE; do find . -type f -size +800k - echo "The package $i shouldn't include file larger than 800kb: $FILE is larger than 800kb" && exit 1 + echo -e "${RED}The package $i shouldn't include file larger than 800kb: $FILE is larger than 800kb${NC}" && exit 1 done # 4.7: ensure all binary files are documented in LICENSE find . -type f | perl -lne 'print if -B' | while read -r BINARY_FILE; do FILE_NAME=$(basename "$BINARY_FILE") if grep -q "$FILE_NAME" LICENSE; then - echo "Binary file $BINARY_FILE is documented in LICENSE, please check manually" + echo -e "${YELLOW}Binary file $BINARY_FILE is documented in LICENSE, please check manually${NC}" else - echo "Error: Binary file $BINARY_FILE is not documented in LICENSE" && exit 1 + echo -e "${RED}Error: Binary file $BINARY_FILE is not documented in LICENSE${NC}" && exit 1 fi done # 4.8: test compile the packages - if [[ ($JAVA_VERSION == 8 && "$i" =~ "hugegraph-computer") ]]; then - echo "Skip compile $i module in java8" - elif [[ "$i" =~ 'hugegraph-ai' ]]; then + if [[ "$i" =~ 'hugegraph-ai' ]]; then echo "Skip compile $i module in all versions" - elif [[ "$i" =~ "hugegraph-commons" ]]; then - mvn install -DskipTests -Papache-release -ntp -e elif [[ "$i" =~ "hugegraph-computer" ]]; then cd computer - mvn install -DskipTests -Papache-release -ntp -e + mvn package -DskipTests -Papache-release -ntp -e else # TODO: consider using commands that are entirely consistent with building binary packages mvn package -DskipTests -Papache-release -ntp -e @@ -174,7 +215,7 @@ done ########################################### # Step 5: Run Compiled Packages of Server # ########################################### -cd "${WORK_DIR}/dist/${RELEASE_VERSION}" +cd "${DIST_DIR}" ls -lh pushd ./*hugegraph-incubating*src/hugegraph-server/*hugegraph*"${RELEASE_VERSION}" @@ -186,7 +227,7 @@ popd ####################################################################### # Step 6: Run Compiled Packages of ToolChain (Loader & Tool & Hubble) # ####################################################################### -cd "${WORK_DIR}/dist/${RELEASE_VERSION}" +cd "${DIST_DIR}" pushd ./*toolchain*src ls -lh @@ -231,7 +272,7 @@ popd #################################### # Step 7: Validate Binary Packages # #################################### -cd "${WORK_DIR}/dist/${RELEASE_VERSION}" +cd "${DIST_DIR}" for i in *.tar.gz; do if [[ "$i" == *-src.tar.gz ]]; then @@ -243,7 +284,7 @@ for i in *.tar.gz; do # 7.1: check the directory name include "incubating" if [[ ! "$i" =~ "incubating" ]]; then - echo "The package name $i should include incubating" && exit 1 + echo -e "${RED}The package name $i should include incubating${NC}" && exit 1 fi MODULE_DIR=$(basename "$i" .tar.gz) @@ -255,33 +296,33 @@ for i in *.tar.gz; do # 7.2: check root dir include "NOTICE"/"LICENSE"/"DISCLAIMER" files & "licenses" dir if [[ ! -f "LICENSE" ]]; then - echo "The package $i should include LICENSE file" && exit 1 + echo -e "${RED}The package $i should include LICENSE file${NC}" && exit 1 fi if [[ ! -f "NOTICE" ]]; then - echo "The package $i should include NOTICE file" && exit 1 + echo -e "${RED}The package $i should include NOTICE file${NC}" && exit 1 fi if [[ ! -f "DISCLAIMER" ]]; then - echo "The package $i should include DISCLAIMER file" && exit 1 + echo -e "${RED}The package $i should include DISCLAIMER file${NC}" && exit 1 fi if [[ ! -d "licenses" ]]; then - echo "The package $i should include licenses dir" && exit 1 + echo -e "${RED}The package $i should include licenses dir${NC}" && exit 1 fi # 7.3: ensure doesn't contains ASF CATEGORY X License dependencies in LICENSE/NOTICE and licenses/* files COUNT=$(grep -r -E "$CATEGORY_X" LICENSE NOTICE licenses | wc -l) if [[ $COUNT -ne 0 ]]; then grep -r -E "$CATEGORY_X" LICENSE NOTICE licenses - echo "The package $i shouldn't include invalid ASF category X dependencies, but get $COUNT" && exit 1 + echo -e "${RED}The package $i shouldn't include invalid ASF category X dependencies, but get $COUNT${NC}" && exit 1 fi # 7.4: ensure doesn't contains empty directory or file find . -type d -empty | while read -r EMPTY_DIR; do find . -type d -empty - echo "The package $i shouldn't include empty directory: $EMPTY_DIR is empty" && exit 1 + echo -e "${RED}The package $i shouldn't include empty directory: $EMPTY_DIR is empty${NC}" && exit 1 done find . -type f -empty | while read -r EMPTY_FILE; do find . -type f -empty - echo "The package $i shouldn't include empty file: $EMPTY_FILE is empty" && exit 1 + echo -e "${RED}The package $i shouldn't include empty file: $EMPTY_FILE is empty${NC}" && exit 1 done popd @@ -291,7 +332,7 @@ done ######################################### # Step 8: Run Binary Packages of Server # ######################################### -cd "${WORK_DIR}/dist/${RELEASE_VERSION}" +cd "${DIST_DIR}" # TODO: run pd & store pushd ./*hugegraph-incubating*"${RELEASE_VERSION}"/*hugegraph-server-incubating*"${RELEASE_VERSION}" @@ -303,7 +344,7 @@ popd ##################################################################### # Step 9: Run Binary Packages of ToolChain (Loader & Tool & Hubble) # ##################################################################### -cd "${WORK_DIR}/dist/${RELEASE_VERSION}" +cd "${DIST_DIR}" pushd ./*toolchain*"${RELEASE_VERSION}" ls -lh @@ -337,4 +378,4 @@ pushd ./*hugegraph-incubating*"${RELEASE_VERSION}"/*hugegraph-server-incubating* bin/stop-hugegraph.sh popd -echo "Finish validate, please check all steps manually again!" +echo -e "${GREEN}Finish validate, please check all steps manually again!${NC}" From e4e9e087f4c6e94bc66206b0ee8d3fc5f490b428 Mon Sep 17 00:00:00 2001 From: imbajin Date: Sat, 15 Nov 2025 02:28:05 +0800 Subject: [PATCH 03/19] refactor: revamp release validation script with enhanced checks V2 Major rewrite of validate-release.sh for Apache HugeGraph, adding modular structure, improved logging, error/warning collection, colorized output, and comprehensive validation steps for source and binary packages. New features include dependency checks, GPG key management, license compliance, file size and binary checks, version consistency, and automated server/toolchain testing. Usage instructions and help output are expanded for clarity. --- dist/validate-release.sh | 1548 +++++++++++++++++++++++++++++--------- 1 file changed, 1173 insertions(+), 375 deletions(-) diff --git a/dist/validate-release.sh b/dist/validate-release.sh index d9fdb186a..06dcdbbd9 100755 --- a/dist/validate-release.sh +++ b/dist/validate-release.sh @@ -1,381 +1,1179 @@ #!/usr/bin/env bash # -# This script is used to validate the release package, including: -# 1. Check the release package name & content -# 2. Check the release package sha512 & gpg signature -# 3. Compile the source package & run server & toolchain -# 4. Run server & toolchain in binary package +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +################################################################################ +# Apache HugeGraph Release Validation Script +################################################################################ +# +# This script validates Apache HugeGraph (Incubating) release packages: +# 1. Check package integrity (SHA512, GPG signatures) +# 2. Validate package names and required files +# 3. Check license compliance (ASF categories) +# 4. Validate package contents +# 5. Compile source packages +# 6. Run server and toolchain tests # # Usage: -# 1. Validate from Apache SVN (default): -# ./validate-release.sh [local-path] [java-version] -# Example: ./validate-release.sh 1.7.0 pengjunzhi -# Example: ./validate-release.sh 1.7.0 pengjunzhi "" 11 +# validate-release.sh [local-path] [java-version] +# validate-release.sh --help +# +# Arguments: +# version Release version (e.g., 1.7.0) +# user Apache username for GPG key trust +# local-path (Optional) Local directory containing release files +# If omitted, downloads from Apache SVN +# java-version (Optional) Java version to validate (default: 11) # -# 2. Validate from local directory: -# ./validate-release.sh [java-version] -# Example: ./validate-release.sh 1.7.0 pengjunzhi /path/to/dist -# Example: ./validate-release.sh 1.7.0 pengjunzhi /path/to/dist 11 - -# exit when any error occurs -set -e - -# Color definitions -RED='\033[0;31m' -GREEN='\033[0;32m' -YELLOW='\033[0;33m' -NC='\033[0m' # No Color - -# release version (input by committer) -RELEASE_VERSION=$1 # like 1.7.0 -USER=$2 -LOCAL_DIST_PATH=$3 # optional: local directory path containing release files -JAVA_VERSION=${4:-11} # optional: default to 11 - -# this URL is only valid during the release process -SVN_URL_PREFIX="https://dist.apache.org/repos/dist/dev/incubator/hugegraph" - -# git release branch (check it carefully) -#GIT_BRANCH="release-${RELEASE_VERSION}" - -RELEASE_VERSION=${RELEASE_VERSION:?"Please input the release version, like 1.7.0"} -USER=${USER:?"Please input the user name"} -WORK_DIR=$( - cd "$(dirname "$0")" - pwd -) - -cd "${WORK_DIR}" -echo "Current work dir: $(pwd)" - -#################################################### -# Step 1: Prepare Release Files (SVN or Local) # -#################################################### -if [[ -n "${LOCAL_DIST_PATH}" ]]; then - # Use local directory - DIST_DIR="${LOCAL_DIST_PATH}" - echo "Using local directory: ${DIST_DIR}" - - # Validate local directory exists - if [[ ! -d "${DIST_DIR}" ]]; then - echo -e "${RED}Error: Directory ${DIST_DIR} does not exist${NC}" - exit 1 - fi - - echo "Contents of ${DIST_DIR}:" - ls -lh "${DIST_DIR}" -else - # Download from SVN - DIST_DIR="${WORK_DIR}/dist/${RELEASE_VERSION}" - echo "Downloading from SVN to: ${DIST_DIR}" - - rm -rf "${DIST_DIR}" - mkdir -p "${DIST_DIR}" - cd "${DIST_DIR}" - svn co "${SVN_URL_PREFIX}/${RELEASE_VERSION}" . -fi - -cd "${DIST_DIR}" -echo "Release files directory: ${DIST_DIR}" - -################################################## -# Step 2: Check Environment & Import Public Keys # -################################################## -shasum --version 1>/dev/null -gpg --version 1>/dev/null - -# Check Java version -echo "Checking Java version..." -if ! command -v java &> /dev/null; then - echo -e "${RED}Error: Java is not installed or not in PATH${NC}" - exit 1 -fi - -CURRENT_JAVA_VERSION=$(java -version 2>&1 | head -n 1 | awk -F '"' '{print $2}' | awk -F '.' '{print $1}') -echo "Current Java version: $CURRENT_JAVA_VERSION (Required: ${JAVA_VERSION})" - -if [[ "$CURRENT_JAVA_VERSION" != "$JAVA_VERSION" ]]; then - echo -e "${RED}Error: Java version mismatch!${NC}" - echo -e "${RED} Current: Java $CURRENT_JAVA_VERSION${NC}" - echo -e "${RED} Required: Java ${JAVA_VERSION}${NC}" - echo -e "${RED} Please switch to Java ${JAVA_VERSION} before running this script${NC}" - exit 1 -fi - -echo -e "${GREEN}Java version check passed: Java $CURRENT_JAVA_VERSION${NC}" - -wget https://downloads.apache.org/incubator/hugegraph/KEYS -echo "Import KEYS:" && gpg --import KEYS -# TODO: how to trust all public keys in gpg list, currently only trust the first one -echo -e "5\ny\n" | gpg --batch --command-fd 0 --edit-key $USER trust - -echo "trust all pk" -for key in $(gpg --no-tty --list-keys --with-colons | awk -F: '/^pub/ {print $5}'); do - echo -e "5\ny\n" | gpg --batch --command-fd 0 --edit-key "$key" trust -done - -######################################## -# Step 3: Check SHA512 & GPG Signature # -######################################## -cd "${DIST_DIR}" - -for i in *.tar.gz; do - echo "$i" - shasum -a 512 --check "$i".sha512 - eval gpg "${GPG_OPT}" --verify "$i".asc "$i" -done - -#################################### -# Step 4: Validate Source Packages # -#################################### -cd "${DIST_DIR}" - -CATEGORY_X="\bGPL|\bLGPL|Sleepycat License|BSD-4-Clause|\bBCL\b|JSR-275|Amazon Software License|\bRSAL\b|\bQPL\b|\bSSPL|\bCPOL|\bNPL1|Creative Commons Non-Commercial|JSON\.org" -CATEGORY_B="\bCDDL1|\bCPL|\bEPL|\bIPL|\bMPL|\bSPL|OSL-3.0|UnRAR License|Erlang Public License|\bOFL\b|Ubuntu Font License Version 1.0|IPA Font License Agreement v1.0|EPL2.0|CC-BY" -ls -lh ./*.tar.gz -for i in *src.tar.gz; do - echo "$i" - - # 4.1: check the directory name include "incubating" - if [[ ! "$i" =~ "incubating" ]]; then - echo -e "${RED}The package name $i should include incubating${NC}" && exit 1 - fi - - MODULE_DIR=$(basename "$i" .tar.gz) - rm -rf ${MODULE_DIR} - tar -xzvf "$i" - pushd ${MODULE_DIR} - echo "Start to check the package content: ${MODULE_DIR}" - - # 4.2: check the directory include "NOTICE" and "LICENSE" file and "DISCLAIMER" file - if [[ ! -f "LICENSE" ]]; then - echo -e "${RED}The package $i should include LICENSE file${NC}" && exit 1 - fi - if [[ ! -f "NOTICE" ]]; then - echo -e "${RED}The package $i should include NOTICE file${NC}" && exit 1 - fi - if [[ ! -f "DISCLAIMER" ]]; then - echo -e "${RED}The package $i should include DISCLAIMER file${NC}" && exit 1 - fi - - # 4.3: ensure doesn't contains ASF CATEGORY X License dependencies in LICENSE and NOTICE files - COUNT=$(grep -E "$CATEGORY_X" LICENSE NOTICE | wc -l) - if [[ $COUNT -ne 0 ]]; then - grep -E "$CATEGORY_X" LICENSE NOTICE - echo -e "${RED}The package $i shouldn't include invalid ASF category X dependencies, but get $COUNT${NC}" && exit 1 - fi - - # 4.4: ensure doesn't contains ASF CATEGORY B License dependencies in LICENSE and NOTICE files - COUNT=$(grep -E "$CATEGORY_B" LICENSE NOTICE | wc -l) - if [[ $COUNT -ne 0 ]]; then - grep -E "$CATEGORY_B" LICENSE NOTICE - echo -e "${RED}The package $i shouldn't include invalid ASF category B dependencies, but get $COUNT${NC}" && exit 1 - fi - - # 4.5: ensure doesn't contains empty directory or file - find . -type d -empty | while read -r EMPTY_DIR; do - find . -type d -empty - echo -e "${RED}The package $i shouldn't include empty directory: $EMPTY_DIR is empty${NC}" && exit 1 - done - find . -type f -empty | while read -r EMPTY_FILE; do - find . -type f -empty - echo -e "${RED}The package $i shouldn't include empty file: $EMPTY_FILE is empty${NC}" && exit 1 - done - - # 4.6: ensure any file should less than 800kb - find . -type f -size +800k | while read -r FILE; do - find . -type f -size +800k - echo -e "${RED}The package $i shouldn't include file larger than 800kb: $FILE is larger than 800kb${NC}" && exit 1 - done - - # 4.7: ensure all binary files are documented in LICENSE - find . -type f | perl -lne 'print if -B' | while read -r BINARY_FILE; do - FILE_NAME=$(basename "$BINARY_FILE") - if grep -q "$FILE_NAME" LICENSE; then - echo -e "${YELLOW}Binary file $BINARY_FILE is documented in LICENSE, please check manually${NC}" +# Examples: +# # Validate from Apache SVN +# ./validate-release.sh 1.7.0 pengjunzhi +# +# # Validate from local directory +# ./validate-release.sh 1.7.0 pengjunzhi /path/to/dist +# +# # Specify Java version +# ./validate-release.sh 1.7.0 pengjunzhi "" 11 +# ./validate-release.sh 1.7.0 pengjunzhi /path/to/dist 11 +# +################################################################################ + +# Strict mode - but don't exit on error yet (we collect all errors) +set -o pipefail +set -o nounset + +################################################################################ +# Configuration Constants +################################################################################ + +readonly SCRIPT_VERSION="2.0.0" +readonly SCRIPT_NAME=$(basename "$0") + +# URLs +readonly SVN_URL_PREFIX="https://dist.apache.org/repos/dist/dev/incubator/hugegraph" +readonly KEYS_URL="https://downloads.apache.org/incubator/hugegraph/KEYS" + +# Validation Rules +readonly MAX_FILE_SIZE="800k" +readonly SERVER_START_DELAY=3 +readonly SERVICE_HEALTH_TIMEOUT=30 + +# License Patterns (ASF Category X - Prohibited) +readonly CATEGORY_X="\bGPL|\bLGPL|Sleepycat License|BSD-4-Clause|\bBCL\b|JSR-275|Amazon Software License|\bRSAL\b|\bQPL\b|\bSSPL|\bCPOL|\bNPL1|Creative Commons Non-Commercial|JSON\.org" + +# License Patterns (ASF Category B - Must be documented) +readonly CATEGORY_B="\bCDDL1|\bCPL|\bEPL|\bIPL|\bMPL|\bSPL|OSL-3.0|UnRAR License|Erlang Public License|\bOFL\b|Ubuntu Font License Version 1.0|IPA Font License Agreement v1.0|EPL2.0|CC-BY" + +# Color Definitions +readonly RED='\033[0;31m' +readonly GREEN='\033[0;32m' +readonly YELLOW='\033[0;33m' +readonly BLUE='\033[0;34m' +readonly NC='\033[0m' # No Color + +################################################################################ +# Global Variables +################################################################################ + +# Script state +WORK_DIR="" +LOG_FILE="" +DIST_DIR="" +RELEASE_VERSION="" +USER="" +LOCAL_DIST_PATH="" +JAVA_VERSION=11 +NON_INTERACTIVE=0 + +# Error tracking +declare -a VALIDATION_ERRORS=() +declare -a VALIDATION_WARNINGS=() +TOTAL_CHECKS=0 +PASSED_CHECKS=0 +FAILED_CHECKS=0 + +# Service tracking for cleanup +SERVER_STARTED=0 +HUBBLE_STARTED=0 + +################################################################################ +# Helper Functions - Output & Logging +################################################################################ + +show_usage() { + cat << EOF +Apache HugeGraph Release Validation Script v${SCRIPT_VERSION} + +Usage: ${SCRIPT_NAME} [local-path] [java-version] + ${SCRIPT_NAME} --help | -h + ${SCRIPT_NAME} --version | -v + +Validates Apache HugeGraph release packages including: + - Package integrity (SHA512, GPG signatures) + - License compliance (ASF categories) + - Package contents and structure + - Compilation and runtime testing + +Arguments: + version Release version (e.g., 1.7.0) + user Apache username for GPG key trust + local-path (Optional) Local directory path containing release files + If omitted, downloads from Apache SVN + java-version (Optional) Java version to validate (default: 11) + +Options: + --help, -h Show this help message + --version, -v Show script version + --non-interactive Run without prompts (for CI/CD) + +Examples: + # Validate from Apache SVN (downloads files) + ${SCRIPT_NAME} 1.7.0 pengjunzhi + + # Validate from local directory + ${SCRIPT_NAME} 1.7.0 pengjunzhi /path/to/dist + + # Specify Java version + ${SCRIPT_NAME} 1.7.0 pengjunzhi "" 11 + ${SCRIPT_NAME} 1.7.0 pengjunzhi /path/to/dist 11 + + # Non-interactive mode for CI + ${SCRIPT_NAME} --non-interactive 1.7.0 pengjunzhi + +For more information, visit: + https://hugegraph.apache.org/docs/contribution-guidelines/validate-release/ + +EOF +} + +log() { + local level=$1 + shift + local message="$*" + local timestamp=$(date '+%Y-%m-%d %H:%M:%S') + echo "[${timestamp}] [${level}] ${message}" | tee -a "${LOG_FILE:-/dev/null}" +} + +info() { + echo -e "$*" + log "INFO" "$*" +} + +success() { + echo -e "${GREEN}✓ $*${NC}" + log "SUCCESS" "$*" +} + +warn() { + echo -e "${YELLOW}⚠ $*${NC}" >&2 + log "WARN" "$*" +} + +error() { + echo -e "${RED}✗ $*${NC}" >&2 + log "ERROR" "$*" +} + +print_step() { + local step=$1 + local total=$2 + local description=$3 + echo "" + echo -e "${BLUE}━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━${NC}" + echo -e "${BLUE}Step [$step/$total]: $description${NC}" + echo -e "${BLUE}━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━${NC}" + log "STEP" "[$step/$total] $description" +} + +print_progress() { + local current=$1 + local total=$2 + local item=$3 + echo -e " [${current}/${total}] ${item}" +} + +collect_error() { + local error_msg="$1" + VALIDATION_ERRORS+=("$error_msg") + FAILED_CHECKS=$((FAILED_CHECKS + 1)) + error "$error_msg" +} + +collect_warning() { + local warning_msg="$1" + VALIDATION_WARNINGS+=("$warning_msg") + warn "$warning_msg" +} + +mark_check_passed() { + PASSED_CHECKS=$((PASSED_CHECKS + 1)) + TOTAL_CHECKS=$((TOTAL_CHECKS + 1)) +} + +mark_check_failed() { + FAILED_CHECKS=$((FAILED_CHECKS + 1)) + TOTAL_CHECKS=$((TOTAL_CHECKS + 1)) +} + +################################################################################ +# Helper Functions - System & Environment +################################################################################ + +setup_logging() { + local log_dir="${WORK_DIR}/logs" + mkdir -p "$log_dir" + LOG_FILE="$log_dir/validate-${RELEASE_VERSION}-$(date +%Y%m%d-%H%M%S).log" + + info "Logging to: ${LOG_FILE}" + log "INIT" "Starting validation for HugeGraph ${RELEASE_VERSION}" + log "INIT" "User: ${USER}, Java: ${JAVA_VERSION}" +} + +check_dependencies() { + local missing_deps=() + local required_commands=("svn" "gpg" "shasum" "mvn" "java" "wget" "tar" "curl" "awk" "grep" "find" "perl") + + info "Checking required dependencies..." + + for cmd in "${required_commands[@]}"; do + if ! command -v "$cmd" &> /dev/null; then + missing_deps+=("$cmd") + error "Missing: $cmd" + else + local version_info + case "$cmd" in + java) + version_info=$(java -version 2>&1 | head -n1 | cut -d'"' -f2) + ;; + mvn) + version_info=$(mvn --version 2>&1 | head -n1 | awk '{print $3}') + ;; + *) + version_info=$($cmd --version 2>&1 | head -n1 || echo "installed") + ;; + esac + success "$cmd: $version_info" + fi + done + + if [[ ${#missing_deps[@]} -gt 0 ]]; then + error "Missing required dependencies: ${missing_deps[*]}" + echo "" + echo "Please install missing dependencies:" + echo " Ubuntu/Debian: sudo apt-get install ${missing_deps[*]}" + echo " macOS: brew install ${missing_deps[*]}" + exit 1 + fi + + success "All dependencies are installed" +} + +check_java_version() { + local required_version=$1 + + info "Checking Java version..." + + if ! command -v java &> /dev/null; then + collect_error "Java is not installed or not in PATH" + return 1 + fi + + local current_version=$(java -version 2>&1 | head -n 1 | awk -F '"' '{print $2}' | awk -F '.' '{print $1}') + info "Current Java version: $current_version (Required: ${required_version})" + + if [[ "$current_version" != "$required_version" ]]; then + collect_error "Java version mismatch! Current: Java $current_version, Required: Java ${required_version}" + collect_error "Please switch to Java ${required_version} before running this script" + return 1 + fi + + success "Java version check passed: Java $current_version" + mark_check_passed + return 0 +} + +find_package_dir() { + local pattern=$1 + local base_dir=${2:-"${DIST_DIR}"} + + local found=$(find "$base_dir" -maxdepth 3 -type d -path "$pattern" 2>/dev/null | head -n1) + + if [[ -z "$found" ]]; then + collect_error "Could not find directory matching pattern: $pattern" + return 1 + fi + + echo "$found" +} + +################################################################################ +# Helper Functions - GPG & Signatures +################################################################################ + +import_and_trust_gpg_keys() { + local user=$1 + + info "Downloading KEYS file from ${KEYS_URL}..." + if ! wget -q "${KEYS_URL}" -O KEYS; then + collect_error "Failed to download KEYS file from ${KEYS_URL}" + return 1 + fi + success "KEYS file downloaded" + + info "Importing GPG keys..." + local import_output=$(gpg --import KEYS 2>&1) + local imported_count=$(echo "$import_output" | grep -c "imported" || echo "0") + + if [[ "$imported_count" == "0" ]]; then + warn "No new keys imported (may already exist in keyring)" + else + success "Imported GPG keys" + fi + + # Trust specific user key + if ! gpg --list-keys "$user" &>/dev/null; then + collect_error "User '$user' key not found in imported keys. Please verify the username." + return 1 + fi + + info "Trusting GPG key for user: $user" + echo -e "5\ny\n" | gpg --batch --command-fd 0 --edit-key "$user" trust 2>/dev/null + success "Trusted key for $user" + + # Trust all imported keys + info "Trusting all imported public keys..." + local trusted=0 + for key in $(gpg --no-tty --list-keys --with-colons | awk -F: '/^pub/ {print $5}'); do + echo -e "5\ny\n" | gpg --batch --command-fd 0 --edit-key "$key" trust 2>/dev/null + trusted=$((trusted + 1)) + done + success "Trusted $trusted GPG keys" + + mark_check_passed + return 0 +} + +################################################################################ +# Validation Functions - Package Checks +################################################################################ + +check_incubating_name() { + local package=$1 + TOTAL_CHECKS=$((TOTAL_CHECKS + 1)) + + if [[ ! "$package" =~ "incubating" ]]; then + collect_error "Package name '$package' should include 'incubating'" + return 1 + fi + + mark_check_passed + return 0 +} + +check_required_files() { + local package=$1 + local require_disclaimer=${2:-true} + local has_error=0 + + if [[ ! -f "LICENSE" ]]; then + collect_error "Package '$package' missing LICENSE file" + has_error=1 + else + mark_check_passed + fi + + if [[ ! -f "NOTICE" ]]; then + collect_error "Package '$package' missing NOTICE file" + has_error=1 + else + mark_check_passed + fi + + if [[ "$require_disclaimer" == "true" ]] && [[ ! -f "DISCLAIMER" ]]; then + collect_error "Package '$package' missing DISCLAIMER file" + has_error=1 + else + mark_check_passed + fi + + return $has_error +} + +check_license_categories() { + local package=$1 + local files=$2 + local has_error=0 + + # Check Category X (Prohibited) + TOTAL_CHECKS=$((TOTAL_CHECKS + 1)) + local cat_x_count=$(grep -r -E "$CATEGORY_X" $files 2>/dev/null | wc -l | tr -d ' ') + if [[ $cat_x_count -ne 0 ]]; then + collect_error "Package '$package' contains $cat_x_count prohibited ASF Category X license(s)" + grep -r -E "$CATEGORY_X" $files + has_error=1 + else + mark_check_passed + fi + + # Check Category B (Must be documented - warning only) + TOTAL_CHECKS=$((TOTAL_CHECKS + 1)) + local cat_b_count=$(grep -r -E "$CATEGORY_B" $files 2>/dev/null | wc -l | tr -d ' ') + if [[ $cat_b_count -ne 0 ]]; then + collect_warning "Package '$package' contains $cat_b_count ASF Category B license(s) - please verify documentation" + grep -r -E "$CATEGORY_B" $files + else + mark_check_passed + fi + + return $has_error +} + +check_empty_files_and_dirs() { + local package=$1 + local has_error=0 + + TOTAL_CHECKS=$((TOTAL_CHECKS + 1)) + + # Find empty directories + local empty_dirs=() + while IFS= read -r empty_dir; do + empty_dirs+=("$empty_dir") + done < <(find . -type d -empty 2>/dev/null) + + # Find empty files + local empty_files=() + while IFS= read -r empty_file; do + empty_files+=("$empty_file") + done < <(find . -type f -empty 2>/dev/null) + + if [[ ${#empty_dirs[@]} -gt 0 ]]; then + collect_error "Package '$package' contains ${#empty_dirs[@]} empty director(y/ies):" + printf ' %s\n' "${empty_dirs[@]}" + has_error=1 + fi + + if [[ ${#empty_files[@]} -gt 0 ]]; then + collect_error "Package '$package' contains ${#empty_files[@]} empty file(s):" + printf ' %s\n' "${empty_files[@]}" + has_error=1 + fi + + if [[ $has_error -eq 0 ]]; then + mark_check_passed + fi + + return $has_error +} + +check_file_sizes() { + local package=$1 + local max_size=$2 + local has_error=0 + + TOTAL_CHECKS=$((TOTAL_CHECKS + 1)) + + local large_files=() + while IFS= read -r large_file; do + large_files+=("$large_file") + done < <(find . -type f -size "+${max_size}" 2>/dev/null) + + if [[ ${#large_files[@]} -gt 0 ]]; then + collect_error "Package '$package' contains ${#large_files[@]} file(s) larger than ${max_size}:" + for file in "${large_files[@]}"; do + local size=$(du -h "$file" | awk '{print $1}') + echo " $file ($size)" + done + has_error=1 else - echo -e "${RED}Error: Binary file $BINARY_FILE is not documented in LICENSE${NC}" && exit 1 - fi - done - - # 4.8: test compile the packages - if [[ "$i" =~ 'hugegraph-ai' ]]; then - echo "Skip compile $i module in all versions" - elif [[ "$i" =~ "hugegraph-computer" ]]; then - cd computer - mvn package -DskipTests -Papache-release -ntp -e - else - # TODO: consider using commands that are entirely consistent with building binary packages - mvn package -DskipTests -Papache-release -ntp -e - ls -lh - fi - popd -done - -########################################### -# Step 5: Run Compiled Packages of Server # -########################################### -cd "${DIST_DIR}" - -ls -lh -pushd ./*hugegraph-incubating*src/hugegraph-server/*hugegraph*"${RELEASE_VERSION}" -bin/init-store.sh -sleep 3 -bin/start-hugegraph.sh -popd - -####################################################################### -# Step 6: Run Compiled Packages of ToolChain (Loader & Tool & Hubble) # -####################################################################### -cd "${DIST_DIR}" - -pushd ./*toolchain*src -ls -lh -pushd ./*toolchain*"${RELEASE_VERSION}" -ls -lh - -# 6.1: load some data first -echo "test loader" -pushd ./*loader*"${RELEASE_VERSION}" -bin/hugegraph-loader.sh -f ./example/file/struct.json -s ./example/file/schema.groovy \ - -g hugegraph -popd - -# 6.2: try some gremlin query & api in tool -echo "test tool" -pushd ./*tool*"${RELEASE_VERSION}" -bin/hugegraph gremlin-execute --script 'g.V().count()' -bin/hugegraph task-list -bin/hugegraph backup -t all --directory ./backup-test -popd - -# 6.3: start hubble and connect to server -echo "test hubble" -pushd ./*hubble*"${RELEASE_VERSION}" -# TODO: add hubble doc & test it -cat conf/hugegraph-hubble.properties -bin/start-hubble.sh -bin/stop-hubble.sh -popd - -popd -popd -# stop server -pushd ./*hugegraph-incubating*src/hugegraph-server/*hugegraph*"${RELEASE_VERSION}" -bin/stop-hugegraph.sh -popd - -# clear source packages -#rm -rf ./*src* -#ls -lh - -#################################### -# Step 7: Validate Binary Packages # -#################################### -cd "${DIST_DIR}" - -for i in *.tar.gz; do - if [[ "$i" == *-src.tar.gz ]]; then - # skip source packages - continue - fi - - echo "$i" - - # 7.1: check the directory name include "incubating" - if [[ ! "$i" =~ "incubating" ]]; then - echo -e "${RED}The package name $i should include incubating${NC}" && exit 1 - fi - - MODULE_DIR=$(basename "$i" .tar.gz) - rm -rf ${MODULE_DIR} - tar -xzvf "$i" - pushd ${MODULE_DIR} - ls -lh - echo "Start to check the package content: ${MODULE_DIR}" - - # 7.2: check root dir include "NOTICE"/"LICENSE"/"DISCLAIMER" files & "licenses" dir - if [[ ! -f "LICENSE" ]]; then - echo -e "${RED}The package $i should include LICENSE file${NC}" && exit 1 - fi - if [[ ! -f "NOTICE" ]]; then - echo -e "${RED}The package $i should include NOTICE file${NC}" && exit 1 - fi - if [[ ! -f "DISCLAIMER" ]]; then - echo -e "${RED}The package $i should include DISCLAIMER file${NC}" && exit 1 - fi - if [[ ! -d "licenses" ]]; then - echo -e "${RED}The package $i should include licenses dir${NC}" && exit 1 - fi - - # 7.3: ensure doesn't contains ASF CATEGORY X License dependencies in LICENSE/NOTICE and licenses/* files - COUNT=$(grep -r -E "$CATEGORY_X" LICENSE NOTICE licenses | wc -l) - if [[ $COUNT -ne 0 ]]; then - grep -r -E "$CATEGORY_X" LICENSE NOTICE licenses - echo -e "${RED}The package $i shouldn't include invalid ASF category X dependencies, but get $COUNT${NC}" && exit 1 - fi - - # 7.4: ensure doesn't contains empty directory or file - find . -type d -empty | while read -r EMPTY_DIR; do - find . -type d -empty - echo -e "${RED}The package $i shouldn't include empty directory: $EMPTY_DIR is empty${NC}" && exit 1 - done - find . -type f -empty | while read -r EMPTY_FILE; do - find . -type f -empty - echo -e "${RED}The package $i shouldn't include empty file: $EMPTY_FILE is empty${NC}" && exit 1 - done - - popd -done - -# TODO: skip the following steps by comparing the artifacts built from source packages with binary packages -######################################### -# Step 8: Run Binary Packages of Server # -######################################### -cd "${DIST_DIR}" - -# TODO: run pd & store -pushd ./*hugegraph-incubating*"${RELEASE_VERSION}"/*hugegraph-server-incubating*"${RELEASE_VERSION}" -bin/init-store.sh -sleep 3 -bin/start-hugegraph.sh -popd - -##################################################################### -# Step 9: Run Binary Packages of ToolChain (Loader & Tool & Hubble) # -##################################################################### -cd "${DIST_DIR}" - -pushd ./*toolchain*"${RELEASE_VERSION}" -ls -lh - -# 9.1: load some data first -echo "test loader" -pushd ./*loader*"${RELEASE_VERSION}" -bin/hugegraph-loader.sh -f ./example/file/struct.json -s ./example/file/schema.groovy -g hugegraph -popd - -# 9.2: try some gremlin query & api in tool -echo "test tool" -pushd ./*tool*"${RELEASE_VERSION}" -bin/hugegraph gremlin-execute --script 'g.V().count()' -bin/hugegraph task-list -bin/hugegraph backup -t all --directory ./backup-test -popd - -# 9.3: start hubble and connect to server -echo "test hubble" -pushd ./*hubble*"${RELEASE_VERSION}" -# TODO: add hubble doc & test it -cat conf/hugegraph-hubble.properties -bin/start-hubble.sh -bin/stop-hubble.sh -popd - -popd -# stop server -pushd ./*hugegraph-incubating*"${RELEASE_VERSION}"/*hugegraph-server-incubating*"${RELEASE_VERSION}" -bin/stop-hugegraph.sh -popd - -echo -e "${GREEN}Finish validate, please check all steps manually again!${NC}" + mark_check_passed + fi + + return $has_error +} + +check_binary_files() { + local package=$1 + local has_error=0 + + TOTAL_CHECKS=$((TOTAL_CHECKS + 1)) + + info "Checking for undocumented binary files..." + + local binary_count=0 + local undocumented_count=0 + + # Find binary files using perl + while IFS= read -r binary_file; do + binary_count=$((binary_count + 1)) + local file_name=$(basename "$binary_file") + + # Check if documented in LICENSE + if grep -q "$file_name" LICENSE 2>/dev/null; then + success "Binary file '$binary_file' is documented in LICENSE" + else + collect_error "Undocumented binary file: $binary_file" + undocumented_count=$((undocumented_count + 1)) + has_error=1 + fi + done < <(find . -type f -exec perl -lne 'print if -B' {} \; 2>/dev/null) + + if [[ $binary_count -eq 0 ]]; then + success "No binary files found" + mark_check_passed + elif [[ $undocumented_count -eq 0 ]]; then + success "All $binary_count binary file(s) are documented" + mark_check_passed + fi + + return $has_error +} + +check_license_headers() { + local package=$1 + + TOTAL_CHECKS=$((TOTAL_CHECKS + 1)) + + info "Checking for ASF license headers in source files..." + + # Check Java files for Apache license headers + local files_without_license=() + while IFS= read -r java_file; do + if ! head -n 20 "$java_file" | grep -q "Licensed to the Apache Software Foundation"; then + files_without_license+=("$java_file") + fi + done < <(find . -name "*.java" -type f 2>/dev/null) + + if [[ ${#files_without_license[@]} -gt 0 ]]; then + collect_warning "Found ${#files_without_license[@]} Java file(s) without ASF license headers" + collect_warning "Run 'mvn apache-rat:check' for detailed license header analysis" + # Note: This is a warning, not an error + else + success "All Java source files have ASF license headers" + mark_check_passed + fi +} + +check_version_consistency() { + local package=$1 + local expected_version=$2 + + TOTAL_CHECKS=$((TOTAL_CHECKS + 1)) + + info "Checking version consistency in pom.xml files..." + + # Find inconsistent versions in pom.xml files + local inconsistent=() + while IFS= read -r pom_file; do + # Extract version tags (exclude parent versions and SNAPSHOT) + while IFS= read -r version_line; do + if [[ ! "$version_line" =~ "" ]] && \ + [[ ! "$version_line" =~ "SNAPSHOT" ]] && \ + [[ ! "$version_line" =~ "$expected_version" ]]; then + inconsistent+=("$pom_file: $version_line") + fi + done < <(grep "" "$pom_file" 2>/dev/null) + done < <(find . -name "pom.xml" -type f 2>/dev/null) + + if [[ ${#inconsistent[@]} -gt 0 ]]; then + collect_error "Found version inconsistencies in pom.xml files:" + printf ' %s\n' "${inconsistent[@]}" + return 1 + else + success "Version consistency check passed" + mark_check_passed + fi + + return 0 +} + +check_notice_year() { + local package=$1 + + TOTAL_CHECKS=$((TOTAL_CHECKS + 1)) + + if [[ ! -f "NOTICE" ]]; then + return 0 # Already checked in check_required_files + fi + + local current_year=$(date +%Y) + if ! grep -q "$current_year" NOTICE; then + collect_warning "NOTICE file may not contain current year ($current_year). Please verify copyright dates." + else + mark_check_passed + fi +} + +################################################################################ +# Main Validation Functions +################################################################################ + +validate_source_package() { + local package_file=$1 + local package_dir=$(basename "$package_file" .tar.gz) + + info "Validating source package: $package_file" + + # Extract package + rm -rf "$package_dir" + tar -xzf "$package_file" + + if [[ ! -d "$package_dir" ]]; then + collect_error "Failed to extract package: $package_file" + return 1 + fi + + pushd "$package_dir" > /dev/null + + # Run all checks + check_incubating_name "$package_file" + check_required_files "$package_file" true + check_license_categories "$package_file" "LICENSE NOTICE" + check_empty_files_and_dirs "$package_file" + check_file_sizes "$package_file" "$MAX_FILE_SIZE" + check_binary_files "$package_file" + check_license_headers "$package_file" + check_version_consistency "$package_file" "$RELEASE_VERSION" + check_notice_year "$package_file" + + # Compile check + info "Compiling source package: $package_file" + TOTAL_CHECKS=$((TOTAL_CHECKS + 1)) + + if [[ "$package_file" =~ 'hugegraph-ai' ]]; then + warn "Skipping compilation for AI module (not required)" + mark_check_passed + elif [[ "$package_file" =~ "hugegraph-computer" ]]; then + if cd computer 2>/dev/null && mvn package -DskipTests -Papache-release -ntp -e; then + success "Compilation successful: $package_file" + mark_check_passed + else + collect_error "Compilation failed: $package_file" + fi + cd .. + else + if mvn package -DskipTests -Papache-release -ntp -e; then + success "Compilation successful: $package_file" + mark_check_passed + else + collect_error "Compilation failed: $package_file" + fi + fi + + popd > /dev/null + + info "Finished validating source package: $package_file" +} + +validate_binary_package() { + local package_file=$1 + local package_dir=$(basename "$package_file" .tar.gz) + + info "Validating binary package: $package_file" + + # Extract package + rm -rf "$package_dir" + tar -xzf "$package_file" + + if [[ ! -d "$package_dir" ]]; then + collect_error "Failed to extract package: $package_file" + return 1 + fi + + pushd "$package_dir" > /dev/null + + # Run checks + check_incubating_name "$package_file" + check_required_files "$package_file" true + + # Binary packages should have licenses directory + TOTAL_CHECKS=$((TOTAL_CHECKS + 1)) + if [[ ! -d "licenses" ]]; then + collect_error "Package '$package_file' missing licenses directory" + else + mark_check_passed + fi + + check_license_categories "$package_file" "LICENSE NOTICE licenses" + check_empty_files_and_dirs "$package_file" + + popd > /dev/null + + info "Finished validating binary package: $package_file" +} + +################################################################################ +# Cleanup Function +################################################################################ + +cleanup() { + local exit_code=$? + + log "CLEANUP" "Starting cleanup (exit code: $exit_code)" + + # Stop running services + if [[ $SERVER_STARTED -eq 1 ]]; then + info "Stopping HugeGraph server..." + local server_dir=$(find_package_dir "*hugegraph-incubating*src/hugegraph-server/*hugegraph*${RELEASE_VERSION}" 2>/dev/null || echo "") + if [[ -n "$server_dir" ]] && [[ -d "$server_dir" ]]; then + pushd "$server_dir" > /dev/null 2>&1 + bin/stop-hugegraph.sh || true + popd > /dev/null 2>&1 + fi + fi + + if [[ $HUBBLE_STARTED -eq 1 ]]; then + info "Stopping Hubble..." + # Hubble stop is handled in the test flow + fi + + # Show final report + echo "" + echo "━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━" + echo " VALIDATION SUMMARY " + echo "━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━" + echo "" + echo "Total Checks: $TOTAL_CHECKS" + echo -e "${GREEN}Passed: $PASSED_CHECKS${NC}" + echo -e "${RED}Failed: $FAILED_CHECKS${NC}" + echo -e "${YELLOW}Warnings: ${#VALIDATION_WARNINGS[@]}${NC}" + echo "" + + if [[ ${#VALIDATION_ERRORS[@]} -gt 0 ]]; then + echo -e "${RED}━━━ ERRORS ━━━${NC}" + for err in "${VALIDATION_ERRORS[@]}"; do + echo -e "${RED} ✗ $err${NC}" + done + echo "" + fi + + if [[ ${#VALIDATION_WARNINGS[@]} -gt 0 ]]; then + echo -e "${YELLOW}━━━ WARNINGS ━━━${NC}" + for warn in "${VALIDATION_WARNINGS[@]}"; do + echo -e "${YELLOW} ⚠ $warn${NC}" + done + echo "" + fi + + echo "━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━" + echo "" + + if [[ ${#VALIDATION_ERRORS[@]} -gt 0 ]]; then + echo -e "${RED}VALIDATION FAILED${NC}" + echo -e "Log file: ${LOG_FILE}" + echo "" + exit 1 + else + echo -e "${GREEN}✓ VALIDATION PASSED${NC}" + echo -e "Log file: ${LOG_FILE}" + echo "" + echo "Please review the validation results and provide feedback in the" + echo "release voting thread on the mailing list." + echo "" + exit 0 + fi +} + +# Set trap for cleanup +trap cleanup EXIT +trap 'echo -e "${RED}Script interrupted${NC}"; exit 130' INT TERM + +################################################################################ +# Main Execution +################################################################################ + +main() { + # Parse command line arguments + while [[ $# -gt 0 ]]; do + case $1 in + --help|-h) + show_usage + exit 0 + ;; + --version|-v) + echo "Apache HugeGraph Release Validation Script v${SCRIPT_VERSION}" + exit 0 + ;; + --non-interactive) + NON_INTERACTIVE=1 + shift + ;; + *) + break + ;; + esac + done + + # Parse positional arguments + RELEASE_VERSION=${1:-} + USER=${2:-} + LOCAL_DIST_PATH=${3:-} + JAVA_VERSION=${4:-11} + + # Validate required arguments + if [[ -z "$RELEASE_VERSION" ]]; then + error "Missing required argument: version" + echo "" + show_usage + exit 1 + fi + + if [[ -z "$USER" ]]; then + error "Missing required argument: user" + echo "" + show_usage + exit 1 + fi + + # Initialize + WORK_DIR=$(cd "$(dirname "$0")" && pwd) + cd "${WORK_DIR}" + + setup_logging + + echo "" + echo "━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━" + echo " Apache HugeGraph Release Validation v${SCRIPT_VERSION}" + echo "━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━" + echo "" + echo " Version: ${RELEASE_VERSION}" + echo " User: ${USER}" + echo " Java: ${JAVA_VERSION}" + echo " Mode: $([ -n "${LOCAL_DIST_PATH}" ] && echo "Local (${LOCAL_DIST_PATH})" || echo "SVN Download")" + echo " Log: ${LOG_FILE}" + echo "" + echo "━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━" + echo "" + + #################################################### + # Step 1: Check Dependencies + #################################################### + print_step 1 9 "Check Dependencies" + check_dependencies + check_java_version "$JAVA_VERSION" + + #################################################### + # Step 2: Prepare Release Files + #################################################### + print_step 2 9 "Prepare Release Files" + + if [[ -n "${LOCAL_DIST_PATH}" ]]; then + # Use local directory + DIST_DIR="${LOCAL_DIST_PATH}" + info "Using local directory: ${DIST_DIR}" + + if [[ ! -d "${DIST_DIR}" ]]; then + collect_error "Directory ${DIST_DIR} does not exist" + exit 1 + fi + + info "Contents of ${DIST_DIR}:" + ls -lh "${DIST_DIR}" + else + # Download from SVN + DIST_DIR="${WORK_DIR}/dist/${RELEASE_VERSION}" + info "Downloading from SVN to: ${DIST_DIR}" + + rm -rf "${DIST_DIR}" + mkdir -p "${DIST_DIR}" + + if ! svn co "${SVN_URL_PREFIX}/${RELEASE_VERSION}" "${DIST_DIR}"; then + collect_error "Failed to download from SVN: ${SVN_URL_PREFIX}/${RELEASE_VERSION}" + exit 1 + fi + + success "Downloaded release files from SVN" + fi + + cd "${DIST_DIR}" + + #################################################### + # Step 3: Import GPG Keys + #################################################### + print_step 3 9 "Import & Trust GPG Keys" + import_and_trust_gpg_keys "$USER" + + #################################################### + # Step 4: Check SHA512 & GPG Signatures + #################################################### + print_step 4 9 "Verify SHA512 & GPG Signatures" + + local package_count=0 + local packages=() + for pkg in *.tar.gz; do + if [[ -f "$pkg" ]]; then + packages+=("$pkg") + package_count=$((package_count + 1)) + fi + done + + local current=0 + for pkg in "${packages[@]}"; do + current=$((current + 1)) + print_progress $current $package_count "$pkg" + + # Check SHA512 + TOTAL_CHECKS=$((TOTAL_CHECKS + 1)) + if shasum -a 512 --check "${pkg}.sha512"; then + success "SHA512 verified: $pkg" + mark_check_passed + else + collect_error "SHA512 verification failed: $pkg" + fi + + # Check GPG signature + TOTAL_CHECKS=$((TOTAL_CHECKS + 1)) + if gpg --verify "${pkg}.asc" "$pkg" 2>&1 | grep -q "Good signature"; then + success "GPG signature verified: $pkg" + mark_check_passed + else + collect_error "GPG signature verification failed: $pkg" + fi + done + + #################################################### + # Step 5: Validate Source Packages + #################################################### + print_step 5 9 "Validate Source Packages" + + local src_packages=() + for pkg in *-src.tar.gz; do + if [[ -f "$pkg" ]]; then + src_packages+=("$pkg") + fi + done + + info "Found ${#src_packages[@]} source package(s)" + + for src_pkg in "${src_packages[@]}"; do + validate_source_package "$src_pkg" + done + + #################################################### + # Step 6: Run Compiled Packages (Server) + #################################################### + print_step 6 9 "Test Compiled Server Package" + + local server_dir=$(find_package_dir "*hugegraph-incubating*src/hugegraph-server/*hugegraph*${RELEASE_VERSION}") + if [[ -n "$server_dir" ]]; then + info "Starting HugeGraph server from: $server_dir" + pushd "$server_dir" > /dev/null + + if bin/init-store.sh; then + success "Store initialized" + else + collect_error "Failed to initialize store" + fi + + sleep $SERVER_START_DELAY + + if bin/start-hugegraph.sh; then + success "Server started" + SERVER_STARTED=1 + else + collect_error "Failed to start server" + fi + + popd > /dev/null + else + collect_error "Could not find compiled server directory" + fi + + #################################################### + # Step 7: Test Toolchain (Loader, Tool, Hubble) + #################################################### + print_step 7 9 "Test Compiled Toolchain Packages" + + local toolchain_src=$(find_package_dir "*toolchain*src") + if [[ -n "$toolchain_src" ]]; then + pushd "$toolchain_src" > /dev/null + + local toolchain_dir=$(find . -maxdepth 1 -type d -name "*toolchain*${RELEASE_VERSION}" | head -n1) + if [[ -n "$toolchain_dir" ]]; then + pushd "$toolchain_dir" > /dev/null + + # Test Loader + info "Testing HugeGraph Loader..." + local loader_dir=$(find . -maxdepth 1 -type d -name "*loader*${RELEASE_VERSION}" | head -n1) + if [[ -n "$loader_dir" ]]; then + pushd "$loader_dir" > /dev/null + if bin/hugegraph-loader.sh -f ./example/file/struct.json -s ./example/file/schema.groovy -g hugegraph; then + success "Loader test passed" + else + collect_error "Loader test failed" + fi + popd > /dev/null + fi + + # Test Tool + info "Testing HugeGraph Tool..." + local tool_dir=$(find . -maxdepth 1 -type d -name "*tool*${RELEASE_VERSION}" | head -n1) + if [[ -n "$tool_dir" ]]; then + pushd "$tool_dir" > /dev/null + if bin/hugegraph gremlin-execute --script 'g.V().count()' && \ + bin/hugegraph task-list && \ + bin/hugegraph backup -t all --directory ./backup-test; then + success "Tool test passed" + else + collect_error "Tool test failed" + fi + popd > /dev/null + fi + + # Test Hubble + info "Testing HugeGraph Hubble..." + local hubble_dir=$(find . -maxdepth 1 -type d -name "*hubble*${RELEASE_VERSION}" | head -n1) + if [[ -n "$hubble_dir" ]]; then + pushd "$hubble_dir" > /dev/null + if bin/start-hubble.sh; then + HUBBLE_STARTED=1 + success "Hubble started" + sleep 2 + bin/stop-hubble.sh + HUBBLE_STARTED=0 + success "Hubble stopped" + else + collect_error "Hubble test failed" + fi + popd > /dev/null + fi + + popd > /dev/null + fi + + popd > /dev/null + fi + + # Stop server after toolchain tests + if [[ $SERVER_STARTED -eq 1 ]] && [[ -n "$server_dir" ]]; then + info "Stopping server..." + pushd "$server_dir" > /dev/null + bin/stop-hugegraph.sh + SERVER_STARTED=0 + success "Server stopped" + popd > /dev/null + fi + + #################################################### + # Step 8: Validate Binary Packages + #################################################### + print_step 8 9 "Validate Binary Packages" + + cd "${DIST_DIR}" + + local bin_packages=() + for pkg in *.tar.gz; do + if [[ "$pkg" != *-src.tar.gz ]]; then + bin_packages+=("$pkg") + fi + done + + info "Found ${#bin_packages[@]} binary package(s)" + + for bin_pkg in "${bin_packages[@]}"; do + validate_binary_package "$bin_pkg" + done + + #################################################### + # Step 9: Test Binary Packages + #################################################### + print_step 9 9 "Test Binary Server & Toolchain" + + # Test binary server + local bin_server_dir=$(find_package_dir "*hugegraph-incubating*${RELEASE_VERSION}/*hugegraph-server-incubating*${RELEASE_VERSION}") + if [[ -n "$bin_server_dir" ]]; then + info "Testing binary server package..." + pushd "$bin_server_dir" > /dev/null + + if bin/init-store.sh && sleep $SERVER_START_DELAY && bin/start-hugegraph.sh; then + success "Binary server started" + SERVER_STARTED=1 + else + collect_error "Failed to start binary server" + fi + + popd > /dev/null + fi + + # Test binary toolchain + local bin_toolchain=$(find_package_dir "*toolchain*${RELEASE_VERSION}" "${DIST_DIR}") + if [[ -n "$bin_toolchain" ]]; then + pushd "$bin_toolchain" > /dev/null + + # Test binary loader + local bin_loader=$(find . -maxdepth 1 -type d -name "*loader*${RELEASE_VERSION}" | head -n1) + if [[ -n "$bin_loader" ]]; then + pushd "$bin_loader" > /dev/null + if bin/hugegraph-loader.sh -f ./example/file/struct.json -s ./example/file/schema.groovy -g hugegraph; then + success "Binary loader test passed" + else + collect_error "Binary loader test failed" + fi + popd > /dev/null + fi + + # Test binary tool + local bin_tool=$(find . -maxdepth 1 -type d -name "*tool*${RELEASE_VERSION}" | head -n1) + if [[ -n "$bin_tool" ]]; then + pushd "$bin_tool" > /dev/null + if bin/hugegraph gremlin-execute --script 'g.V().count()' && \ + bin/hugegraph task-list && \ + bin/hugegraph backup -t all --directory ./backup-test; then + success "Binary tool test passed" + else + collect_error "Binary tool test failed" + fi + popd > /dev/null + fi + + # Test binary hubble + local bin_hubble=$(find . -maxdepth 1 -type d -name "*hubble*${RELEASE_VERSION}" | head -n1) + if [[ -n "$bin_hubble" ]]; then + pushd "$bin_hubble" > /dev/null + if bin/start-hubble.sh; then + HUBBLE_STARTED=1 + success "Binary hubble started" + sleep 2 + bin/stop-hubble.sh + HUBBLE_STARTED=0 + success "Binary hubble stopped" + else + collect_error "Binary hubble test failed" + fi + popd > /dev/null + fi + + popd > /dev/null + fi + + # Stop binary server + if [[ $SERVER_STARTED -eq 1 ]] && [[ -n "$bin_server_dir" ]]; then + pushd "$bin_server_dir" > /dev/null + bin/stop-hugegraph.sh + SERVER_STARTED=0 + success "Binary server stopped" + popd > /dev/null + fi + + #################################################### + # Validation Complete + #################################################### + success "All validation steps completed!" + + # Cleanup function will show the final report +} + +# Run main function +main "$@" From 1ab58f165fc26811c4b47f44ef9a0469119a51e0 Mon Sep 17 00:00:00 2001 From: imbajin Date: Sat, 15 Nov 2025 02:57:57 +0800 Subject: [PATCH 04/19] refactor: enhance release validation for multi-arch and license checks V3 Updated CI workflow to support additional OS and architectures (arm64, macOS 14). Improved documentation and script usage instructions. The license header check now covers more file types and excludes generated/vendor files. Maven build commands in docs and scripts now use '-DskipTests' and '-Dcheckstyle.skip=true' for consistency. Added a detailed README for the release validation script. --- .github/workflows/validate-release.yml | 4 +- .../validate-release.md | 4 +- .../validate-release.md | 4 +- dist/README.md | 231 ++++++++++++++++++ dist/validate-release.sh | 134 +++++++--- 5 files changed, 342 insertions(+), 35 deletions(-) create mode 100644 dist/README.md diff --git a/.github/workflows/validate-release.yml b/.github/workflows/validate-release.yml index 747f0d78e..bbd1506f2 100644 --- a/.github/workflows/validate-release.yml +++ b/.github/workflows/validate-release.yml @@ -350,5 +350,5 @@ jobs: matrix: # disable java8 because of server java_version: ['11'] - # TODO: support windows-latest or other OS in future - os: [ubuntu-latest, macos-latest] + # Support multiple OS and architectures (x64 and arm64) + os: [ubuntu-latest, ubuntu-24.04-arm, macos-latest, macos-14] diff --git a/content/cn/docs/contribution-guidelines/validate-release.md b/content/cn/docs/contribution-guidelines/validate-release.md index 86f6a22e5..7794a4ba2 100644 --- a/content/cn/docs/contribution-guidelines/validate-release.md +++ b/content/cn/docs/contribution-guidelines/validate-release.md @@ -125,8 +125,8 @@ PMC 同学请特别注意认真检查 `LICENSE` + `NOTICE` 文件,确保文件 # 请优先使用/切换到 `java 11` 版本进行后序的编译和运行操作 (注:`Computer` 仅支持 `java >= 11`) # java --version -# 尝试在 Unix 环境下编译测试是否正常 (stage 表示从 stage 仓库拉取依赖) -mvn clean package -P stage -Dmaven.test.skip=true -Dcheckstyle.skip=true +# 尝试在 Unix 环境下编译测试是否正常 +mvn clean package -DskipTests -Dcheckstyle.skip=true -P stage ``` ##### B. 二进制包 diff --git a/content/en/docs/contribution-guidelines/validate-release.md b/content/en/docs/contribution-guidelines/validate-release.md index 398d79c14..83613d13c 100644 --- a/content/en/docs/contribution-guidelines/validate-release.md +++ b/content/en/docs/contribution-guidelines/validate-release.md @@ -136,8 +136,8 @@ After decompressing `*hugegraph*src.tar.gz`, Do the following checks: # prefer to use/switch to `java 11` for the following operations (compiling/running) (Note: `Computer` only supports `java >= 11`) # java --version -# try to compile in the Unix env to check if it works well -mvn clean package -P stage -Dmaven.test.skip=true -Dcheckstyle.skip=true +# try to compile in the Unix env to check if it works well (-P is optional) +mvn clean package -P stage -DskipTests -Dcheckstyle.skip=true ``` ##### B. binary package diff --git a/dist/README.md b/dist/README.md new file mode 100644 index 000000000..b4d43d8ef --- /dev/null +++ b/dist/README.md @@ -0,0 +1,231 @@ +# Apache HugeGraph 发版验证脚本 + +Apache HugeGraph (Incubating) 发布包的自动化验证脚本。 + +## 概述 + +`validate-release.sh` 脚本对 Apache HugeGraph 发布包进行全面验证,自动执行 [Apache 发布政策](https://www.apache.org/legal/release-policy.html) 和 [孵化器发布检查清单](https://cwiki.apache.org/confluence/display/INCUBATOR/Incubator+Release+Checklist) 要求的大部分检查。 + +## 功能特性 + +- ✅ **自动依赖检查** - 验证所有必需工具(svn、gpg、java、maven 等) +- ✅ **SHA512 和 GPG 签名验证** - 确保包的完整性和真实性 +- ✅ **许可证合规性验证** - 检查禁止的 ASF Category X 和 B 类许可证 +- ✅ **包内容验证** - 验证必需文件(LICENSE、NOTICE、DISCLAIMER) +- ✅ **ASF 许可证头检查** - 验证所有源文件中的许可证头(Java、Python、Go、Shell 等) +- ✅ **版本一致性验证** - 确保 pom.xml 版本与预期发布版本匹配 +- ✅ **源码包编译** - 编译源码包以验证构建正确性 +- ✅ **运行时测试** - 测试服务器和工具链(loader、tool、hubble)功能 +- ✅ **进度跟踪** - 显示实时进度和分步指示器 +- ✅ **详细日志记录** - 将所有输出保存到带时间戳的日志文件 +- ✅ **全面的错误报告** - 收集所有错误并在最后显示摘要 + +## 环境要求 + +- Java 11(HugeGraph 1.5.0+ 必需) +- Maven 3.x +- svn(Subversion 客户端) +- gpg(用于签名验证的 GnuPG) +- wget 或 curl +- 标准 Unix 工具(bash、find、grep、awk 等) + +脚本会自动检查所有依赖项,如果缺少任何内容会提供安装说明。 + +## 使用方法 + +### 基本用法 + +```bash +# 查看帮助信息 +./validate-release.sh --help + +# 从 Apache SVN 验证(自动下载发布文件) +./validate-release.sh <版本号> + +# 示例 +./validate-release.sh 1.7.0 pengjunzhi +``` + +### 高级用法 + +```bash +# 从本地目录验证(如果已经下载了文件) +./validate-release.sh <版本号> <本地路径> + +# 示例 +./validate-release.sh 1.7.0 pengjunzhi /path/to/downloaded/dist + +# 指定 Java 版本(默认:11) +./validate-release.sh <版本号> <本地路径> + +# 示例 - 使用 Java 11 +./validate-release.sh 1.7.0 pengjunzhi /path/to/dist 11 + +# 示例 - SVN 模式使用 Java 11 +./validate-release.sh 1.7.0 pengjunzhi "" 11 + +# 非交互模式(用于 CI/CD) +./validate-release.sh --non-interactive 1.7.0 pengjunzhi +``` + +### 命令行选项 + +- `--help`, `-h` - 显示帮助信息并退出 +- `--version`, `-v` - 显示脚本版本并退出 +- `--non-interactive` - 无提示运行(用于 CI/CD 管道) + +## 验证步骤 + +脚本执行以下 9 个验证步骤: + +1. **检查依赖项** - 验证所有必需工具已安装 +2. **准备发布文件** - 从 Apache SVN 下载或使用本地目录 +3. **导入并信任 GPG 密钥** - 导入 KEYS 文件并信任发布管理员的 GPG 密钥 +4. **验证 SHA512 和 GPG 签名** - 验证所有包的校验和和签名 +5. **验证源码包** - 对源码包进行全面检查: + - 包命名(包含 "incubating") + - 必需文件(LICENSE、NOTICE、DISCLAIMER) + - 许可证合规性(无 Category X,已记录 Category B) + - 无空文件或目录 + - 文件大小限制(无文件 > 800KB) + - 二进制文件文档 + - 所有源文件中的许可证头 + - pom.xml 文件之间的版本一致性 + - NOTICE 文件版权年份 + - 源码编译 +6. **测试编译的服务器** - 启动并测试编译的 HugeGraph 服务器 +7. **测试编译的工具链** - 从编译包测试 loader、tool 和 hubble +8. **验证二进制包** - 检查二进制包的必需文件和结构 +9. **测试二进制包** - 从二进制包测试服务器和工具链 + +## 输出结果 + +### 进度指示器 + +脚本提供实时进度信息: + +``` +━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━ + Apache HugeGraph Release Validation v2.0.0 +━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━ + + 版本: 1.7.0 + 用户: pengjunzhi + Java: 11 + 模式: SVN 下载 + 日志: logs/validate-1.7.0-20251115-021742.log + +━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━ + +━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━ +步骤 [1/9]: 检查依赖项 +━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━ +✓ svn: version 1.14.1 +✓ gpg: gpg (GnuPG) 2.2.41 +✓ java: 11.0.21 +... +``` + +### 彩色结果 + +- ✓ **绿色** - 成功的检查 +- ✗ **红色** - 需要修复的错误 +- ⚠ **黄色** - 需要审查的警告 +- **蓝色** - 步骤标题和进度信息 + +### 日志文件 + +所有输出都保存到 `logs/validate--.log` 以供后续查看。 + +### 最终摘要 + +验证结束时,会显示一个全面的摘要: + +``` +━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━ + 验证摘要 +━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━ + +总检查数: 127 +通过: 125 +失败: 2 +警告: 3 + +━━━ 错误 ━━━ + ✗ 包 'xyz' 缺少 LICENSE 文件 + ✗ 二进制文件 'logo.png' 未在 LICENSE 中记录 + +━━━ 警告 ━━━ + ⚠ NOTICE 文件可能不包含当前年份 (2025) + +━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━ + +验证失败 +日志文件: logs/validate-1.7.0-20251115-021742.log +``` + +## 错误处理 + +脚本使用**"继续并报告"**方式: + +- 不会在第一个错误时退出 +- 收集所有验证错误 +- 在最后显示全面摘要 +- 退出码 0 = 所有检查通过 +- 退出码 1 = 一个或多个检查失败 + +这允许你一次看到所有问题,而不是逐个修复。 + +## 故障排除 + +### Java 版本不匹配 + +如果看到 Java 版本错误: + +```bash +# 检查你的 Java 版本 +java -version + +# 使用 JAVA_HOME 指定 Java 11 +export JAVA_HOME=/path/to/java11 +export PATH=$JAVA_HOME/bin:$PATH + +# 或使用 jenv 切换 Java 版本 +jenv global 11 +``` + +### GPG 密钥问题 + +如果 GPG 密钥导入失败: + +```bash +# 手动下载并导入 KEYS +curl https://downloads.apache.org/incubator/hugegraph/KEYS > KEYS +gpg --import KEYS + +# 信任特定密钥 +gpg --edit-key +# 在 GPG 提示符中,输入: trust, 然后 5, 然后 y, 然后 quit +``` + +### 权限被拒绝 + +确保脚本可执行: + +```bash +chmod +x validate-release.sh +``` + +## 参考文档 + +- [Apache 发布政策](https://www.apache.org/legal/release-policy.html) +- [孵化器发布检查清单](https://cwiki.apache.org/confluence/display/INCUBATOR/Incubator+Release+Checklist) +- [HugeGraph 验证发布指南](../content/cn/docs/contribution-guidelines/validate-release.md) + +## 贡献 + +如果发现问题或有改进建议,请: + +1. 查看现有问题:https://github.com/apache/incubator-hugegraph-doc/issues +2. 提交新问题或 pull request + diff --git a/dist/validate-release.sh b/dist/validate-release.sh index 06dcdbbd9..013694651 100755 --- a/dist/validate-release.sh +++ b/dist/validate-release.sh @@ -1,20 +1,4 @@ #!/usr/bin/env bash -# -# Licensed to the Apache Software Foundation (ASF) under one or more -# contributor license agreements. See the NOTICE file distributed with -# this work for additional information regarding copyright ownership. -# The ASF licenses this file to You under the Apache License, Version 2.0 -# (the "License"); you may not use this file except in compliance with -# the License. You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# ################################################################################ # Apache HugeGraph Release Validation Script ################################################################################ @@ -46,7 +30,6 @@ # ./validate-release.sh 1.7.0 pengjunzhi /path/to/dist # # # Specify Java version -# ./validate-release.sh 1.7.0 pengjunzhi "" 11 # ./validate-release.sh 1.7.0 pengjunzhi /path/to/dist 11 # ################################################################################ @@ -154,7 +137,7 @@ Examples: ${SCRIPT_NAME} --non-interactive 1.7.0 pengjunzhi For more information, visit: - https://hugegraph.apache.org/docs/contribution-guidelines/validate-release/ + https://github.com/apache/incubator-hugegraph-doc/tree/master/dist EOF } @@ -548,21 +531,114 @@ check_license_headers() { info "Checking for ASF license headers in source files..." - # Check Java files for Apache license headers + # Define file patterns to check for license headers + # Including: Java, Shell scripts, Python, Go, JavaScript, TypeScript, C/C++, Scala, Groovy, etc. + local -a file_patterns=( + "*.java" # Java files + "*.sh" # Shell scripts + "*.py" # Python files + "*.go" # Go files + "*.js" # JavaScript files + "*.ts" # TypeScript files + "*.jsx" # React JSX files + "*.tsx" # React TypeScript files + "*.c" # C files + "*.h" # C header files + "*.cpp" # C++ files + "*.cc" # C++ files + "*.cxx" # C++ files + "*.hpp" # C++ header files + "*.scala" # Scala files + "*.groovy" # Groovy files + "*.gradle" # Gradle build files + "*.rs" # Rust files + "*.kt" # Kotlin files + "*.proto" # Protocol buffer files + ) + + # Files to exclude from license header check + local -a exclude_patterns=( + "*.min.js" # Minified JavaScript + "*.min.css" # Minified CSS + "*node_modules*" # Node.js dependencies + "*target*" # Maven build output + "*build*" # Build directories + "*.pb.go" # Generated protobuf files + "*generated*" # Generated code + "*third_party*" # Third party code + "*vendor*" # Vendor dependencies + ) + local files_without_license=() - while IFS= read -r java_file; do - if ! head -n 20 "$java_file" | grep -q "Licensed to the Apache Software Foundation"; then - files_without_license+=("$java_file") + local total_checked=0 + local excluded_count=0 + + # Build find command with all patterns + local find_cmd="find . -type f \\(" + local first=1 + for pattern in "${file_patterns[@]}"; do + if [[ $first -eq 1 ]]; then + find_cmd="$find_cmd -name \"$pattern\"" + first=0 + else + find_cmd="$find_cmd -o -name \"$pattern\"" fi - done < <(find . -name "*.java" -type f 2>/dev/null) + done + find_cmd="$find_cmd \\) 2>/dev/null" + + # Check each source file for ASF license header + while IFS= read -r source_file; do + # Skip if file matches exclude patterns + local should_exclude=0 + for exclude_pattern in "${exclude_patterns[@]}"; do + if [[ "$source_file" == $exclude_pattern ]]; then + should_exclude=1 + excluded_count=$((excluded_count + 1)) + break + fi + done + + if [[ $should_exclude -eq 1 ]]; then + continue + fi + + total_checked=$((total_checked + 1)) + + # Check first 30 lines for Apache license header + # Looking for the standard ASF license header text + if ! head -n 30 "$source_file" | grep -q "Licensed to the Apache Software Foundation"; then + files_without_license+=("$source_file") + fi + done < <(eval "$find_cmd") + + # Report results + info "Checked $total_checked source file(s) for ASF license headers (excluded $excluded_count generated/vendored files)" if [[ ${#files_without_license[@]} -gt 0 ]]; then - collect_warning "Found ${#files_without_license[@]} Java file(s) without ASF license headers" - collect_warning "Run 'mvn apache-rat:check' for detailed license header analysis" - # Note: This is a warning, not an error + collect_error "Found ${#files_without_license[@]} source file(s) without ASF license headers:" + + # Show first 20 files without headers (to avoid overwhelming output) + local show_count=${#files_without_license[@]} + if [[ $show_count -gt 20 ]]; then + show_count=20 + fi + + for ((i=0; i/dev/null && mvn package -DskipTests -Papache-release -ntp -e; then + if cd computer 2>/dev/null && mvn clean package -DskipTests -Dcheckstyle.skip=true -ntp -e; then success "Compilation successful: $package_file" mark_check_passed else @@ -664,7 +740,7 @@ validate_source_package() { fi cd .. else - if mvn package -DskipTests -Papache-release -ntp -e; then + if mvn clean package -DskipTests -Dcheckstyle.skip=true -ntp -e; then success "Compilation successful: $package_file" mark_check_passed else From ba747666386a4c4b678c3ac375e05767ec57d489 Mon Sep 17 00:00:00 2001 From: imbajin Date: Sat, 15 Nov 2025 04:07:28 +0800 Subject: [PATCH 05/19] refactor: improve validation script error context and reporting V4 Adds contextual error and warning reporting with step and package information, enhances license category and header checks, improves version consistency logic, and refines summary output with execution time and clearer formatting. These changes make validation results more actionable and easier to interpret, especially for multi-package and multi-step validations. --- dist/validate-release.sh | 209 +++++++++++++++++++++++++++++++-------- 1 file changed, 169 insertions(+), 40 deletions(-) diff --git a/dist/validate-release.sh b/dist/validate-release.sh index 013694651..cf39fdc6f 100755 --- a/dist/validate-release.sh +++ b/dist/validate-release.sh @@ -87,11 +87,16 @@ declare -a VALIDATION_WARNINGS=() TOTAL_CHECKS=0 PASSED_CHECKS=0 FAILED_CHECKS=0 +CURRENT_STEP="" +CURRENT_PACKAGE="" # Service tracking for cleanup SERVER_STARTED=0 HUBBLE_STARTED=0 +# Script execution time tracking +SCRIPT_START_TIME=0 + ################################################################################ # Helper Functions - Output & Logging ################################################################################ @@ -174,6 +179,7 @@ print_step() { local step=$1 local total=$2 local description=$3 + CURRENT_STEP="Step $step: $description" echo "" echo -e "${BLUE}━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━${NC}" echo -e "${BLUE}Step [$step/$total]: $description${NC}" @@ -190,25 +196,65 @@ print_progress() { collect_error() { local error_msg="$1" - VALIDATION_ERRORS+=("$error_msg") + local context="" + + # Build context string + if [[ -n "$CURRENT_STEP" ]]; then + context="[$CURRENT_STEP]" + fi + + if [[ -n "$CURRENT_PACKAGE" ]]; then + if [[ -n "$context" ]]; then + context="$context [$CURRENT_PACKAGE]" + else + context="[$CURRENT_PACKAGE]" + fi + fi + + # Store error with context + if [[ -n "$context" ]]; then + VALIDATION_ERRORS+=("$context $error_msg") + else + VALIDATION_ERRORS+=("$error_msg") + fi + FAILED_CHECKS=$((FAILED_CHECKS + 1)) error "$error_msg" } collect_warning() { local warning_msg="$1" - VALIDATION_WARNINGS+=("$warning_msg") + local context="" + + # Build context string + if [[ -n "$CURRENT_STEP" ]]; then + context="[$CURRENT_STEP]" + fi + + if [[ -n "$CURRENT_PACKAGE" ]]; then + if [[ -n "$context" ]]; then + context="$context [$CURRENT_PACKAGE]" + else + context="[$CURRENT_PACKAGE]" + fi + fi + + # Store warning with context + if [[ -n "$context" ]]; then + VALIDATION_WARNINGS+=("$context $warning_msg") + else + VALIDATION_WARNINGS+=("$warning_msg") + fi + warn "$warning_msg" } mark_check_passed() { PASSED_CHECKS=$((PASSED_CHECKS + 1)) - TOTAL_CHECKS=$((TOTAL_CHECKS + 1)) } mark_check_failed() { FAILED_CHECKS=$((FAILED_CHECKS + 1)) - TOTAL_CHECKS=$((TOTAL_CHECKS + 1)) } ################################################################################ @@ -402,10 +448,28 @@ check_license_categories() { # Check Category X (Prohibited) TOTAL_CHECKS=$((TOTAL_CHECKS + 1)) - local cat_x_count=$(grep -r -E "$CATEGORY_X" $files 2>/dev/null | wc -l | tr -d ' ') + local cat_x_matches=$(grep -r -E "$CATEGORY_X" $files 2>/dev/null) + local cat_x_count=$(echo "$cat_x_matches" | grep -v '^$' | wc -l | tr -d ' ') + if [[ $cat_x_count -ne 0 ]]; then - collect_error "Package '$package' contains $cat_x_count prohibited ASF Category X license(s)" - grep -r -E "$CATEGORY_X" $files + # Build detailed error message with license information + local error_details="Package '$package' contains $cat_x_count prohibited ASF Category X license(s):" + + # Extract and format each violation + while IFS= read -r match_line; do + if [[ -n "$match_line" ]]; then + # Parse file:content format + local file_name=$(echo "$match_line" | cut -d':' -f1) + local license_info=$(echo "$match_line" | cut -d':' -f2-) + + # Try to extract specific license name + local license_name=$(echo "$license_info" | grep -oE "$CATEGORY_X" | head -n1) + + error_details="${error_details}\n - File: ${file_name}\n License: ${license_name}\n Context: ${license_info}" + fi + done <<< "$cat_x_matches" + + collect_error "$error_details" has_error=1 else mark_check_passed @@ -416,7 +480,6 @@ check_license_categories() { local cat_b_count=$(grep -r -E "$CATEGORY_B" $files 2>/dev/null | wc -l | tr -d ' ') if [[ $cat_b_count -ne 0 ]]; then collect_warning "Package '$package' contains $cat_b_count ASF Category B license(s) - please verify documentation" - grep -r -E "$CATEGORY_B" $files else mark_check_passed fi @@ -511,7 +574,7 @@ check_binary_files() { undocumented_count=$((undocumented_count + 1)) has_error=1 fi - done < <(find . -type f -exec perl -lne 'print if -B' {} \; 2>/dev/null) + done < <(find . -type f 2>/dev/null | perl -lne 'print if -B $_') if [[ $binary_count -eq 0 ]]; then success "No binary files found" @@ -587,6 +650,7 @@ check_license_headers() { find_cmd="$find_cmd \\) 2>/dev/null" # Check each source file for ASF license header + local documented_count=0 while IFS= read -r source_file; do # Skip if file matches exclude patterns local should_exclude=0 @@ -607,13 +671,28 @@ check_license_headers() { # Check first 30 lines for Apache license header # Looking for the standard ASF license header text if ! head -n 30 "$source_file" | grep -q "Licensed to the Apache Software Foundation"; then - files_without_license+=("$source_file") + # No ASF header found - check if it's documented in LICENSE file as third-party code + local file_name=$(basename "$source_file") + local file_path_relative=$(echo "$source_file" | sed 's|^\./||') + + # Check if file name or path is mentioned in LICENSE file + if [[ -f "LICENSE" ]] && (grep -q "$file_name" LICENSE 2>/dev/null || grep -q "$file_path_relative" LICENSE 2>/dev/null); then + # File is documented in LICENSE as third-party code - this is allowed + documented_count=$((documented_count + 1)) + else + # Not documented - this is an error + files_without_license+=("$source_file") + fi fi done < <(eval "$find_cmd") # Report results info "Checked $total_checked source file(s) for ASF license headers (excluded $excluded_count generated/vendored files)" + if [[ $documented_count -gt 0 ]]; then + info "Found $documented_count source file(s) documented in LICENSE as third-party code (allowed)" + fi + if [[ ${#files_without_license[@]} -gt 0 ]]; then collect_error "Found ${#files_without_license[@]} source file(s) without ASF license headers:" @@ -632,11 +711,11 @@ check_license_headers() { fi echo "" - collect_error "All source files must include the Apache License header" + collect_error "All source files must include the Apache License header or be documented in LICENSE file" collect_error "You can use 'mvn apache-rat:check' for detailed license header analysis" return 1 else - success "All $total_checked source file(s) have ASF license headers" + success "All $total_checked source file(s) have ASF license headers or are documented in LICENSE" mark_check_passed return 0 fi @@ -648,30 +727,45 @@ check_version_consistency() { TOTAL_CHECKS=$((TOTAL_CHECKS + 1)) - info "Checking version consistency in pom.xml files..." + # Skip version check for Python projects (hugegraph-ai) + if [[ "$package" =~ 'hugegraph-ai' ]]; then + info "Skipping version check for Python project: $package" + mark_check_passed + return 0 + fi + + info "Checking version consistency (revision property)..." - # Find inconsistent versions in pom.xml files - local inconsistent=() + # Find the parent/root pom.xml that defines the revision property + local root_pom="" + local revision_value="" + + # Look for pom.xml files that define the revision property while IFS= read -r pom_file; do - # Extract version tags (exclude parent versions and SNAPSHOT) - while IFS= read -r version_line; do - if [[ ! "$version_line" =~ "" ]] && \ - [[ ! "$version_line" =~ "SNAPSHOT" ]] && \ - [[ ! "$version_line" =~ "$expected_version" ]]; then - inconsistent+=("$pom_file: $version_line") - fi - done < <(grep "" "$pom_file" 2>/dev/null) + if grep -q "" "$pom_file" 2>/dev/null; then + # Extract the revision value + revision_value=$(grep "" "$pom_file" | head -1 | sed 's/.*\(.*\)<\/revision>.*/\1/') + root_pom="$pom_file" + break + fi done < <(find . -name "pom.xml" -type f 2>/dev/null) - if [[ ${#inconsistent[@]} -gt 0 ]]; then - collect_error "Found version inconsistencies in pom.xml files:" - printf ' %s\n' "${inconsistent[@]}" - return 1 - else - success "Version consistency check passed" + if [[ -z "$root_pom" ]]; then + collect_warning "No property found in pom.xml files - skipping version check" mark_check_passed + return 0 fi + info "Found revision property in $root_pom: $revision_value" + + # Check if revision matches expected version + if [[ "$revision_value" != "$expected_version" ]]; then + collect_error "Version mismatch: $revision_value in $root_pom (expected: $expected_version)" + return 1 + fi + + success "Version consistency check passed: revision=$revision_value" + mark_check_passed return 0 } @@ -686,7 +780,7 @@ check_notice_year() { local current_year=$(date +%Y) if ! grep -q "$current_year" NOTICE; then - collect_warning "NOTICE file may not contain current year ($current_year). Please verify copyright dates." + collect_warning "Package '$package': NOTICE file may not contain current year ($current_year). Please verify copyright dates." else mark_check_passed fi @@ -700,6 +794,9 @@ validate_source_package() { local package_file=$1 local package_dir=$(basename "$package_file" .tar.gz) + # Set current package context for error reporting + CURRENT_PACKAGE="$package_file" + info "Validating source package: $package_file" # Extract package @@ -708,6 +805,7 @@ validate_source_package() { if [[ ! -d "$package_dir" ]]; then collect_error "Failed to extract package: $package_file" + CURRENT_PACKAGE="" return 1 fi @@ -750,6 +848,9 @@ validate_source_package() { popd > /dev/null + # Clear package context + CURRENT_PACKAGE="" + info "Finished validating source package: $package_file" } @@ -757,6 +858,9 @@ validate_binary_package() { local package_file=$1 local package_dir=$(basename "$package_file" .tar.gz) + # Set current package context for error reporting + CURRENT_PACKAGE="$package_file" + info "Validating binary package: $package_file" # Extract package @@ -765,6 +869,7 @@ validate_binary_package() { if [[ ! -d "$package_dir" ]]; then collect_error "Failed to extract package: $package_file" + CURRENT_PACKAGE="" return 1 fi @@ -787,6 +892,9 @@ validate_binary_package() { popd > /dev/null + # Clear package context + CURRENT_PACKAGE="" + info "Finished validating binary package: $package_file" } @@ -821,26 +929,44 @@ cleanup() { echo " VALIDATION SUMMARY " echo "━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━" echo "" - echo "Total Checks: $TOTAL_CHECKS" - echo -e "${GREEN}Passed: $PASSED_CHECKS${NC}" - echo -e "${RED}Failed: $FAILED_CHECKS${NC}" - echo -e "${YELLOW}Warnings: ${#VALIDATION_WARNINGS[@]}${NC}" + + # Calculate execution time + local script_end_time=$(date +%s) + local execution_seconds=$((script_end_time - SCRIPT_START_TIME)) + local execution_minutes=$((execution_seconds / 60)) + local execution_seconds_remainder=$((execution_seconds % 60)) + + echo "Execution Time: ${execution_minutes}m ${execution_seconds_remainder}s" + echo "Total Checks: $TOTAL_CHECKS" + echo -e "${GREEN}Passed: $PASSED_CHECKS${NC}" + echo -e "${RED}Failed: $FAILED_CHECKS${NC}" + echo -e "${YELLOW}Warnings: ${#VALIDATION_WARNINGS[@]}${NC}" echo "" if [[ ${#VALIDATION_ERRORS[@]} -gt 0 ]]; then - echo -e "${RED}━━━ ERRORS ━━━${NC}" + echo -e "${RED}━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━${NC}" + echo -e "${RED} ERRORS ${NC}" + echo -e "${RED}━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━${NC}" + echo "" + local err_index=1 for err in "${VALIDATION_ERRORS[@]}"; do - echo -e "${RED} ✗ $err${NC}" + echo -e "${RED}[E${err_index}] $err${NC}" + echo "" # Blank line between errors for readability + err_index=$((err_index + 1)) done - echo "" fi if [[ ${#VALIDATION_WARNINGS[@]} -gt 0 ]]; then - echo -e "${YELLOW}━━━ WARNINGS ━━━${NC}" + echo -e "${YELLOW}━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━${NC}" + echo -e "${YELLOW} WARNINGS ${NC}" + echo -e "${YELLOW}━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━${NC}" + echo "" + local warn_index=1 for warn in "${VALIDATION_WARNINGS[@]}"; do - echo -e "${YELLOW} ⚠ $warn${NC}" + echo -e "${YELLOW}[W${warn_index}] $warn${NC}" + echo "" # Blank line between warnings for readability + warn_index=$((warn_index + 1)) done - echo "" fi echo "━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━" @@ -871,6 +997,9 @@ trap 'echo -e "${RED}Script interrupted${NC}"; exit 130' INT TERM ################################################################################ main() { + # Record script start time + SCRIPT_START_TIME=$(date +%s) + # Parse command line arguments while [[ $# -gt 0 ]]; do case $1 in From af87f8a3eacf251e5887836d927210195247cb3d Mon Sep 17 00:00:00 2001 From: imbajin Date: Sat, 15 Nov 2025 04:13:26 +0800 Subject: [PATCH 06/19] fix: add JSON to CATEGORY_X license validation The JSON license was added to the CATEGORY_X regex in the binary package validation step to ensure packages with this license are properly flagged during release validation. --- .github/workflows/validate-release.yml | 2 +- dist/README.md | 204 ++++++++++++++++++++----- 2 files changed, 164 insertions(+), 42 deletions(-) diff --git a/.github/workflows/validate-release.yml b/.github/workflows/validate-release.yml index bbd1506f2..6b7cb043c 100644 --- a/.github/workflows/validate-release.yml +++ b/.github/workflows/validate-release.yml @@ -244,7 +244,7 @@ jobs: - name: 7. Validate Binary Packages run: | cd dist/${{ inputs.release_version }} || exit - CATEGORY_X="\bGPL|\bLGPL|Sleepycat License|BSD-4-Clause|\bBCL\b|JSR-275|Amazon Software License|\bRSAL\b|\bQPL\b|\bSSPL|\bCPOL|\bNPL1|Creative Commons Non-Commercial" + CATEGORY_X="\bGPL|\bLGPL|Sleepycat License|BSD-4-Clause|\bBCL\b|JSR-275|Amazon Software License|\bRSAL\b|\bQPL\b|\bSSPL|\bCPOL|\bNPL1|Creative Commons Non-Commercial|JSON" for i in *.tar.gz; do if [[ "$i" == *-src.tar.gz ]]; then # skip source packages diff --git a/dist/README.md b/dist/README.md index b4d43d8ef..0ccdb8ce4 100644 --- a/dist/README.md +++ b/dist/README.md @@ -10,15 +10,18 @@ Apache HugeGraph (Incubating) 发布包的自动化验证脚本。 - ✅ **自动依赖检查** - 验证所有必需工具(svn、gpg、java、maven 等) - ✅ **SHA512 和 GPG 签名验证** - 确保包的完整性和真实性 -- ✅ **许可证合规性验证** - 检查禁止的 ASF Category X 和 B 类许可证 +- ✅ **许可证合规性验证** - 检查禁止的 ASF Category X 和需要文档化的 Category B 许可证 +- ✅ **详细的许可证错误报告** - 对 Category X 违规显示文件路径、许可证名称和上下文 - ✅ **包内容验证** - 验证必需文件(LICENSE、NOTICE、DISCLAIMER) -- ✅ **ASF 许可证头检查** - 验证所有源文件中的许可证头(Java、Python、Go、Shell 等) -- ✅ **版本一致性验证** - 确保 pom.xml 版本与预期发布版本匹配 +- ✅ **ASF 许可证头检查** - 验证所有源文件中的许可证头,支持第三方代码文档化 +- ✅ **版本一致性验证** - 验证 Maven `` 属性与预期发布版本匹配 +- ✅ **多语言项目支持** - 自动跳过 Python 项目(hugegraph-ai)的 Maven 版本检查 - ✅ **源码包编译** - 编译源码包以验证构建正确性 - ✅ **运行时测试** - 测试服务器和工具链(loader、tool、hubble)功能 -- ✅ **进度跟踪** - 显示实时进度和分步指示器 +- ✅ **智能进度跟踪** - 显示实时进度、步骤指示器和执行时间 +- ✅ **上下文化错误报告** - 错误和警告包含步骤、包名和索引编号 - ✅ **详细日志记录** - 将所有输出保存到带时间戳的日志文件 -- ✅ **全面的错误报告** - 收集所有错误并在最后显示摘要 +- ✅ **全面的错误摘要** - 收集所有错误并在最后显示格式化摘要 ## 环境要求 @@ -27,7 +30,7 @@ Apache HugeGraph (Incubating) 发布包的自动化验证脚本。 - svn(Subversion 客户端) - gpg(用于签名验证的 GnuPG) - wget 或 curl -- 标准 Unix 工具(bash、find、grep、awk 等) +- 标准 Unix 工具(bash、find、grep、awk、perl 等) 脚本会自动检查所有依赖项,如果缺少任何内容会提供安装说明。 @@ -78,25 +81,26 @@ Apache HugeGraph (Incubating) 发布包的自动化验证脚本。 脚本执行以下 9 个验证步骤: -1. **检查依赖项** - 验证所有必需工具已安装 +1. **检查依赖项** - 验证所有必需工具已安装并显示版本信息 2. **准备发布文件** - 从 Apache SVN 下载或使用本地目录 -3. **导入并信任 GPG 密钥** - 导入 KEYS 文件并信任发布管理员的 GPG 密钥 +3. **导入并信任 GPG 密钥** - 导入 KEYS 文件并信任所有公钥 4. **验证 SHA512 和 GPG 签名** - 验证所有包的校验和和签名 5. **验证源码包** - 对源码包进行全面检查: - 包命名(包含 "incubating") - 必需文件(LICENSE、NOTICE、DISCLAIMER) - - 许可证合规性(无 Category X,已记录 Category B) + - 许可证合规性(禁止 Category X,记录 Category B) + - 详细的许可证违规报告(文件路径、许可证名称、上下文) - 无空文件或目录 - 文件大小限制(无文件 > 800KB) - - 二进制文件文档 - - 所有源文件中的许可证头 - - pom.xml 文件之间的版本一致性 + - 二进制文件文档化(在 LICENSE 中声明) + - 所有源文件的许可证头(支持第三方代码文档化) + - Maven `` 属性版本一致性(跳过 Python 项目) - NOTICE 文件版权年份 - - 源码编译 -6. **测试编译的服务器** - 启动并测试编译的 HugeGraph 服务器 + - 源码编译测试 +6. **测试编译的服务器** - 初始化并启动编译的 HugeGraph 服务器 7. **测试编译的工具链** - 从编译包测试 loader、tool 和 hubble -8. **验证二进制包** - 检查二进制包的必需文件和结构 -9. **测试二进制包** - 从二进制包测试服务器和工具链 +8. **验证二进制包** - 检查二进制包的必需文件、licenses 目录和许可证合规性 +9. **测试二进制包** - 从二进制包测试服务器和工具链功能 ## 输出结果 @@ -109,20 +113,21 @@ Apache HugeGraph (Incubating) 发布包的自动化验证脚本。 Apache HugeGraph Release Validation v2.0.0 ━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━ - 版本: 1.7.0 - 用户: pengjunzhi + Version: 1.7.0 + User: pengjunzhi Java: 11 - 模式: SVN 下载 - 日志: logs/validate-1.7.0-20251115-021742.log + Mode: SVN Download + Log: logs/validate-1.7.0-20251115-021742.log ━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━ ━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━ -步骤 [1/9]: 检查依赖项 +Step [1/9]: Check Dependencies ━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━ ✓ svn: version 1.14.1 ✓ gpg: gpg (GnuPG) 2.2.41 ✓ java: 11.0.21 +✓ mvn: Apache Maven 3.9.5 ... ``` @@ -139,42 +144,127 @@ Apache HugeGraph (Incubating) 发布包的自动化验证脚本。 ### 最终摘要 -验证结束时,会显示一个全面的摘要: +验证结束时,会显示一个全面的摘要,包含执行时间和详细的错误/警告信息: ``` ━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━ - 验证摘要 + VALIDATION SUMMARY ━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━ -总检查数: 127 -通过: 125 -失败: 2 -警告: 3 +Execution Time: 6m 34s +Total Checks: 139 +Passed: 134 +Failed: 3 +Warnings: 2 -━━━ 错误 ━━━ - ✗ 包 'xyz' 缺少 LICENSE 文件 - ✗ 二进制文件 'logo.png' 未在 LICENSE 中记录 +━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━ + ERRORS +━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━ -━━━ 警告 ━━━ - ⚠ NOTICE 文件可能不包含当前年份 (2025) +[E1] [Step 8: Validate Binary Packages] [xxxx] contains 1 prohibited ASF Category X license(s): +xxxxx ━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━ + WARNINGS +━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━ +xxxx +━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━ + +VALIDATION FAILED +Log file: logs/validate-1.7.0-20251115-021742.log +``` -验证失败 -日志文件: logs/validate-1.7.0-20251115-021742.log +## 许可证检查说明 + +### Category X 许可证(禁止使用) + +脚本会严格检查以下 ASF Category X 许可证,发现后会报错并提供详细信息: + +- GPL, LGPL 系列 +- Sleepycat License +- BSD-4-Clause +- BCL (Binary Code License) +- JSR-275 +- Amazon Software License +- RSAL (Reciprocal Public License) +- QPL (Q Public License) +- SSPL (Server Side Public License) +- CPOL (Code Project Open License) +- NPL1 (Netscape Public License) +- Creative Commons Non-Commercial +- **JSON.org** (JSON License) + +**错误报告格式:** +``` +Package 'xxx.tar.gz' contains 1 prohibited ASF Category X license(s): + - File: licenses/LICENSE-json.txt + License: JSON.org + Context: Copyright (c) 2002 JSON.org ``` +### Category B 许可证(需要文档化) + +以下许可证会触发警告,提醒检查是否在 LICENSE 文件中正确记录: + +- CDDL1, CPL, EPL, IPL, MPL, SPL +- OSL-3.0 +- UnRAR License +- Erlang Public License +- OFL (SIL Open Font License) +- Ubuntu Font License Version 1.0 +- IPA Font License Agreement v1.0 +- EPL2.0 +- CC-BY (Creative Commons Attribution) + +**警告报告格式(简洁):** +``` +Package 'xxx.tar.gz' contains 2 ASF Category B license(s) - please verify documentation +``` + +### 许可证头检查 + +脚本会检查所有源代码文件(Java、Shell、Python、Go、JavaScript、TypeScript、C/C++、Scala、Groovy、Rust、Kotlin、Proto 等)是否包含 ASF 许可证头。 + +**第三方代码处理:** +- 如果源文件没有 ASF 许可证头,脚本会检查该文件是否在 LICENSE 文件中被文档化 +- 支持通过文件名或相对路径匹配 +- 已文档化的第三方代码会被标记为合法并单独统计 +- 只有未文档化且缺少 ASF 头的文件才会报错 + ## 错误处理 脚本使用**"继续并报告"**方式: - 不会在第一个错误时退出 -- 收集所有验证错误 -- 在最后显示全面摘要 +- 收集所有验证错误和警告 +- 在最后显示全面摘要,包含: + - 执行总时间 + - 检查统计(总数、通过、失败、警告) + - 带编号和上下文的错误列表 + - 带编号和上下文的警告列表 - 退出码 0 = 所有检查通过 - 退出码 1 = 一个或多个检查失败 -这允许你一次看到所有问题,而不是逐个修复。 +每个错误和警告都包含: +- 编号索引([E1], [E2], [W1], [W2] 等) +- 步骤上下文(哪个验证步骤) +- 包名上下文(哪个包) +- 详细的错误描述 + +这允许你一次看到所有问题,并能快速定位到具体的失败点。 + +## 特殊处理 + +### Python 项目(hugegraph-ai) + +- 自动跳过编译步骤 +- 自动跳过 Maven `` 版本检查 +- 仍然执行其他所有验证(许可证、文件结构等) + +### Computer 模块 + +- 在特殊目录结构下编译(`cd computer && mvn package`) +- 支持 Java 8 和 Java 11 ## 故障排除 @@ -189,9 +279,6 @@ java -version # 使用 JAVA_HOME 指定 Java 11 export JAVA_HOME=/path/to/java11 export PATH=$JAVA_HOME/bin:$PATH - -# 或使用 jenv 切换 Java 版本 -jenv global 11 ``` ### GPG 密钥问题 @@ -206,6 +293,11 @@ gpg --import KEYS # 信任特定密钥 gpg --edit-key # 在 GPG 提示符中,输入: trust, 然后 5, 然后 y, 然后 quit + +# 或者信任所有导入的密钥 +for key in $(gpg --no-tty --list-keys --with-colons | awk -F: '/^pub/ {print $5}'); do + echo -e "5\ny\n" | gpg --batch --command-fd 0 --edit-key "$key" trust +done ``` ### 权限被拒绝 @@ -216,10 +308,41 @@ gpg --edit-key chmod +x validate-release.sh ``` +### 许可证检查误报 + +如果合法的第三方代码被标记为缺少许可证头: + +1. 确保在根目录的 `LICENSE` 文件中记录了该文件 +2. 记录格式可以是文件名或相对路径 +3. 重新运行验证脚本 + +示例 LICENSE 文件条目: +``` +This product bundles ThirdParty.java from XYZ project, +which is available under a "MIT License". +For details, see licenses/LICENSE-mit.txt +``` + +### 查看详细日志 + +如果需要更多调试信息: + +```bash +# 查看完整日志 +cat logs/validate--.log + +# 搜索特定错误 +grep "ERROR" logs/validate-*.log + +# 查看特定步骤 +grep "Step \[5/9\]" logs/validate-*.log +``` + ## 参考文档 - [Apache 发布政策](https://www.apache.org/legal/release-policy.html) - [孵化器发布检查清单](https://cwiki.apache.org/confluence/display/INCUBATOR/Incubator+Release+Checklist) +- [Apache 许可证分类](https://www.apache.org/legal/resolved.html) - [HugeGraph 验证发布指南](../content/cn/docs/contribution-guidelines/validate-release.md) ## 贡献 @@ -228,4 +351,3 @@ chmod +x validate-release.sh 1. 查看现有问题:https://github.com/apache/incubator-hugegraph-doc/issues 2. 提交新问题或 pull request - From 512b51cfce2dc24ad1fb2f86c6d5dd473b950f86 Mon Sep 17 00:00:00 2001 From: Peng Junzhi <201250214@smail.nju.edu.cn> Date: Sat, 15 Nov 2025 16:51:16 +0800 Subject: [PATCH 07/19] introduce new version of validation --- .github/workflows/validate-release-new.yml | 805 +++++++++++++++++++++ 1 file changed, 805 insertions(+) create mode 100644 .github/workflows/validate-release-new.yml diff --git a/.github/workflows/validate-release-new.yml b/.github/workflows/validate-release-new.yml new file mode 100644 index 000000000..08d678d11 --- /dev/null +++ b/.github/workflows/validate-release-new.yml @@ -0,0 +1,805 @@ +name: "Validate Apache Release (New)" + +on: + workflow_dispatch: + inputs: + release_version: + required: true + description: svn release version + default: '1.7.0' + gpg_user: + required: true + description: current release manager (gpg username) + default: 'pengjunzhi' + java_version: + required: false + description: Java version to validate + default: '11' + type: choice + options: + - '11' + - '17' + + push: + branches: + - 'release-*' + pull_request: + branches: + - 'release-*' + +jobs: + validate: + name: "Validate Release On ${{ matrix.os }} (java-${{ matrix.java_version }})" + runs-on: ${{ matrix.os }} + env: + RELEASE_VERSION: ${{ inputs.release_version || '1.7.0' }} + GPG_USER: ${{ inputs.gpg_user || 'pengjunzhi' }} + JAVA_VERSION: ${{ inputs.java_version || matrix.java_version || '11' }} + SVN_URL_PREFIX: https://dist.apache.org/repos/dist/dev/incubator/hugegraph + KEYS_URL: https://downloads.apache.org/incubator/hugegraph/KEYS + MAX_FILE_SIZE: 800k + SERVER_START_DELAY: 3 + # License Patterns (ASF Category X - Prohibited) + CATEGORY_X: '\bGPL|\bLGPL|Sleepycat License|BSD-4-Clause|\bBCL\b|JSR-275|Amazon Software License|\bRSAL\b|\bQPL\b|\bSSPL|\bCPOL|\bNPL1|Creative Commons Non-Commercial|JSON\.org' + # License Patterns (ASF Category B - Must be documented) + CATEGORY_B: '\bCDDL1|\bCPL|\bEPL|\bIPL|\bMPL|\bSPL|OSL-3.0|UnRAR License|Erlang Public License|\bOFL\b|Ubuntu Font License Version 1.0|IPA Font License Agreement v1.0|EPL2.0|CC-BY' + steps: + - name: Checkout source + uses: actions/checkout@v4 + + - name: Install JDK ${{ env.JAVA_VERSION }} + uses: actions/setup-java@v3 + with: + java-version: ${{ env.JAVA_VERSION }} + distribution: 'adopt' + + - name: Install dependencies + run: | + if [[ "${{ runner.os }}" == "macOS" ]]; then + brew install svn wget perl + elif [[ "${{ runner.os }}" == "Linux" ]]; then + sudo apt-get update + sudo apt-get install -y subversion wget perl + fi + # Verify all required commands + for cmd in svn gpg shasum mvn java wget tar curl awk grep find perl; do + if ! command -v "$cmd" &> /dev/null; then + echo "Error: Missing required dependency: $cmd" + exit 1 + fi + echo "✓ $cmd: $(command -v $cmd)" + done + + - name: Cache Maven packages + uses: actions/cache@v3 + with: + path: ~/.m2 + key: ${{ runner.os }}-m2-${{ hashFiles('**/pom.xml') }} + restore-keys: ${{ runner.os }}-m2 + + - name: Step 1 - Check Dependencies + run: | + echo "━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━" + echo "Step [1/9]: Check Dependencies" + echo "━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━" + + # Check Java version + CURRENT_JAVA=$(java -version 2>&1 | head -n 1 | awk -F '"' '{print $2}' | awk -F '.' '{print $1}') + echo "Current Java version: $CURRENT_JAVA (Required: ${{ env.JAVA_VERSION }})" + if [[ "$CURRENT_JAVA" != "${{ env.JAVA_VERSION }}" ]]; then + echo "Error: Java version mismatch! Current: Java $CURRENT_JAVA, Required: Java ${{ env.JAVA_VERSION }}" + exit 1 + fi + echo "✓ Java version check passed: Java $CURRENT_JAVA" + + - name: Step 2 - Prepare Release Files + run: | + echo "" + echo "━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━" + echo "Step [2/9]: Prepare Release Files" + echo "━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━" + + DIST_DIR="dist/${{ env.RELEASE_VERSION }}" + echo "Downloading from SVN to: ${DIST_DIR}" + + rm -rf "${DIST_DIR}" + mkdir -p "${DIST_DIR}" + + if ! svn co "${SVN_URL_PREFIX}/${{ env.RELEASE_VERSION }}" "${DIST_DIR}"; then + echo "Error: Failed to download from SVN: ${SVN_URL_PREFIX}/${{ env.RELEASE_VERSION }}" + exit 1 + fi + + echo "✓ Downloaded release files from SVN" + cd "${DIST_DIR}" + ls -lh + + - name: Step 3 - Import & Trust GPG Keys + run: | + echo "" + echo "━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━" + echo "Step [3/9]: Import & Trust GPG Keys" + echo "━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━" + + cd dist/${{ env.RELEASE_VERSION }} + + echo "Downloading KEYS file from ${KEYS_URL}..." + if ! wget -q "${KEYS_URL}" -O KEYS; then + echo "Error: Failed to download KEYS file from ${KEYS_URL}" + exit 1 + fi + echo "✓ KEYS file downloaded" + + echo "Importing GPG keys..." + IMPORT_OUTPUT=$(gpg --import KEYS 2>&1) + IMPORTED_COUNT=$(echo "$IMPORT_OUTPUT" | grep -c "imported" || echo "0") + + if [[ "$IMPORTED_COUNT" == "0" ]]; then + echo "⚠ No new keys imported (may already exist in keyring)" + else + echo "✓ Imported GPG keys" + fi + + # Trust specific user key + if ! gpg --list-keys "${{ env.GPG_USER }}" &>/dev/null; then + echo "Error: User '${{ env.GPG_USER }}' key not found in imported keys. Please verify the username." + exit 1 + fi + + echo "Trusting GPG key for user: ${{ env.GPG_USER }}" + echo -e "5\ny\n" | gpg --batch --command-fd 0 --edit-key "${{ env.GPG_USER }}" trust 2>/dev/null + echo "✓ Trusted key for ${{ env.GPG_USER }}" + + # Trust all imported keys + echo "Trusting all imported public keys..." + TRUSTED=0 + for key in $(gpg --no-tty --list-keys --with-colons | awk -F: '/^pub/ {print $5}'); do + echo -e "5\ny\n" | gpg --batch --command-fd 0 --edit-key "$key" trust 2>/dev/null + TRUSTED=$((TRUSTED + 1)) + done + echo "✓ Trusted $TRUSTED GPG keys" + + - name: Step 4 - Verify SHA512 & GPG Signatures + run: | + echo "" + echo "━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━" + echo "Step [4/9]: Verify SHA512 & GPG Signatures" + echo "━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━" + + cd dist/${{ env.RELEASE_VERSION }} + + PACKAGE_COUNT=0 + for pkg in *.tar.gz; do + if [[ -f "$pkg" ]]; then + PACKAGE_COUNT=$((PACKAGE_COUNT + 1)) + fi + done + + CURRENT=0 + for pkg in *.tar.gz; do + if [[ ! -f "$pkg" ]]; then + continue + fi + CURRENT=$((CURRENT + 1)) + echo " [${CURRENT}/${PACKAGE_COUNT}] $pkg" + + # Check SHA512 + if shasum -a 512 --check "${pkg}.sha512"; then + echo " ✓ SHA512 verified: $pkg" + else + echo " ✗ SHA512 verification failed: $pkg" + exit 1 + fi + + # Check GPG signature + if gpg --verify "${pkg}.asc" "$pkg" 2>&1 | grep -q "Good signature"; then + echo " ✓ GPG signature verified: $pkg" + else + echo " ✗ GPG signature verification failed: $pkg" + exit 1 + fi + done + + - name: Step 5 - Validate Source Packages + run: | + echo "" + echo "━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━" + echo "Step [5/9]: Validate Source Packages" + echo "━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━" + + cd dist/${{ env.RELEASE_VERSION }} + + SRC_PACKAGES=() + for pkg in *-src.tar.gz; do + if [[ -f "$pkg" ]]; then + SRC_PACKAGES+=("$pkg") + fi + done + + echo "Found ${#SRC_PACKAGES[@]} source package(s)" + + for src_pkg in "${SRC_PACKAGES[@]}"; do + echo "" + echo "Validating source package: $src_pkg" + + # Extract package + PACKAGE_DIR=$(basename "$src_pkg" .tar.gz) + rm -rf "$PACKAGE_DIR" + tar -xzf "$src_pkg" + + if [[ ! -d "$PACKAGE_DIR" ]]; then + echo "Error: Failed to extract package: $src_pkg" + exit 1 + fi + + pushd "$PACKAGE_DIR" + + # 5.1: Check incubating name + if [[ ! "$src_pkg" =~ "incubating" ]]; then + echo "Error: Package name '$src_pkg' should include 'incubating'" + exit 1 + fi + echo " ✓ Package name includes 'incubating'" + + # 5.2: Check required files + if [[ ! -f "LICENSE" ]]; then + echo "Error: Package '$src_pkg' missing LICENSE file" + exit 1 + fi + echo " ✓ LICENSE file exists" + + if [[ ! -f "NOTICE" ]]; then + echo "Error: Package '$src_pkg' missing NOTICE file" + exit 1 + fi + echo " ✓ NOTICE file exists" + + if [[ ! -f "DISCLAIMER" ]]; then + echo "Error: Package '$src_pkg' missing DISCLAIMER file" + exit 1 + fi + echo " ✓ DISCLAIMER file exists" + + # 5.3: Check license categories (Category X - Prohibited) + CAT_X_MATCHES=$(grep -r -E "${{ env.CATEGORY_X }}" LICENSE NOTICE 2>/dev/null || true) + CAT_X_COUNT=$(echo "$CAT_X_MATCHES" | grep -v '^$' | wc -l | tr -d ' ') + + if [[ $CAT_X_COUNT -ne 0 ]]; then + echo "Error: Package '$src_pkg' contains $CAT_X_COUNT prohibited ASF Category X license(s):" + echo "$CAT_X_MATCHES" + exit 1 + fi + echo " ✓ No Category X licenses found" + + # 5.4: Check license categories (Category B - Warning) + CAT_B_COUNT=$(grep -r -E "${{ env.CATEGORY_B }}" LICENSE NOTICE 2>/dev/null | wc -l | tr -d ' ' || echo "0") + if [[ $CAT_B_COUNT -ne 0 ]]; then + echo " ⚠ Warning: Package '$src_pkg' contains $CAT_B_COUNT ASF Category B license(s) - please verify documentation" + else + echo " ✓ No Category B licenses found" + fi + + # 5.5: Check empty files and directories + EMPTY_DIRS=$(find . -type d -empty 2>/dev/null || true) + EMPTY_FILES=$(find . -type f -empty 2>/dev/null || true) + + if [[ -n "$EMPTY_DIRS" ]]; then + echo "Error: Package '$src_pkg' contains empty director(y/ies):" + echo "$EMPTY_DIRS" + exit 1 + fi + + if [[ -n "$EMPTY_FILES" ]]; then + echo "Error: Package '$src_pkg' contains empty file(s):" + echo "$EMPTY_FILES" + exit 1 + fi + echo " ✓ No empty files or directories" + + # 5.6: Check file sizes + LARGE_FILES=$(find . -type f -size "+${{ env.MAX_FILE_SIZE }}" 2>/dev/null || true) + if [[ -n "$LARGE_FILES" ]]; then + echo "Error: Package '$src_pkg' contains file(s) larger than ${{ env.MAX_FILE_SIZE }}:" + echo "$LARGE_FILES" + exit 1 + fi + echo " ✓ All files are within size limit" + + # 5.7: Check binary files + BINARY_COUNT=0 + UNDOCUMENTED_COUNT=0 + while IFS= read -r binary_file; do + BINARY_COUNT=$((BINARY_COUNT + 1)) + FILE_NAME=$(basename "$binary_file") + if ! grep -q "$FILE_NAME" LICENSE 2>/dev/null; then + echo "Error: Undocumented binary file: $binary_file" + UNDOCUMENTED_COUNT=$((UNDOCUMENTED_COUNT + 1)) + fi + done < <(find . -type f 2>/dev/null | perl -lne 'print if -B $_' || true) + + if [[ $BINARY_COUNT -eq 0 ]]; then + echo " ✓ No binary files found" + elif [[ $UNDOCUMENTED_COUNT -eq 0 ]]; then + echo " ✓ All $BINARY_COUNT binary file(s) are documented" + else + echo "Error: Found $UNDOCUMENTED_COUNT undocumented binary file(s)" + exit 1 + fi + + # 5.8: Check license headers in source files + echo " Checking for ASF license headers in source files..." + + # Define file patterns to check + FILE_PATTERNS=("*.java" "*.sh" "*.py" "*.go" "*.js" "*.ts" "*.jsx" "*.tsx" "*.c" "*.h" "*.cpp" "*.cc" "*.cxx" "*.hpp" "*.scala" "*.groovy" "*.gradle" "*.rs" "*.kt" "*.proto") + + # Files to exclude + EXCLUDE_PATTERNS=("*.min.js" "*.min.css" "*node_modules*" "*target*" "*build*" "*.pb.go" "*generated*" "*third_party*" "*vendor*") + + FILES_WITHOUT_LICENSE=() + TOTAL_CHECKED=0 + EXCLUDED_COUNT=0 + DOCUMENTED_COUNT=0 + + # Build find command + FIND_CMD="find . -type f \\(" + FIRST=1 + for pattern in "${FILE_PATTERNS[@]}"; do + if [[ $FIRST -eq 1 ]]; then + FIND_CMD="$FIND_CMD -name \"$pattern\"" + FIRST=0 + else + FIND_CMD="$FIND_CMD -o -name \"$pattern\"" + fi + done + FIND_CMD="$FIND_CMD \\) 2>/dev/null" + + # Check each source file + while IFS= read -r source_file; do + # Skip if file matches exclude patterns + SHOULD_EXCLUDE=0 + for exclude_pattern in "${EXCLUDE_PATTERNS[@]}"; do + if [[ "$source_file" == $exclude_pattern ]]; then + SHOULD_EXCLUDE=1 + EXCLUDED_COUNT=$((EXCLUDED_COUNT + 1)) + break + fi + done + + if [[ $SHOULD_EXCLUDE -eq 1 ]]; then + continue + fi + + TOTAL_CHECKED=$((TOTAL_CHECKED + 1)) + + # Check first 30 lines for Apache license header + if ! head -n 30 "$source_file" | grep -q "Licensed to the Apache Software Foundation"; then + # Check if documented in LICENSE file + FILE_NAME=$(basename "$source_file") + FILE_PATH_RELATIVE=$(echo "$source_file" | sed 's|^\./||') + + if [[ -f "LICENSE" ]] && (grep -q "$FILE_NAME" LICENSE 2>/dev/null || grep -q "$FILE_PATH_RELATIVE" LICENSE 2>/dev/null); then + DOCUMENTED_COUNT=$((DOCUMENTED_COUNT + 1)) + else + FILES_WITHOUT_LICENSE+=("$source_file") + fi + fi + done < <(eval "$FIND_CMD") + + echo " Checked $TOTAL_CHECKED source file(s) for ASF license headers (excluded $EXCLUDED_COUNT generated/vendored files)" + + if [[ $DOCUMENTED_COUNT -gt 0 ]]; then + echo " Found $DOCUMENTED_COUNT source file(s) documented in LICENSE as third-party code (allowed)" + fi + + if [[ ${#FILES_WITHOUT_LICENSE[@]} -gt 0 ]]; then + echo "Error: Found ${#FILES_WITHOUT_LICENSE[@]} source file(s) without ASF license headers:" + SHOW_COUNT=${#FILES_WITHOUT_LICENSE[@]} + if [[ $SHOW_COUNT -gt 20 ]]; then + SHOW_COUNT=20 + fi + for ((i=0; i" "$pom_file" 2>/dev/null; then + REVISION_VALUE=$(grep "" "$pom_file" | head -1 | sed 's/.*\(.*\)<\/revision>.*/\1/') + ROOT_POM="$pom_file" + break + fi + done < <(find . -name "pom.xml" -type f 2>/dev/null) + + if [[ -n "$ROOT_POM" ]]; then + echo " Found revision property in $ROOT_POM: $REVISION_VALUE" + if [[ "$REVISION_VALUE" != "${{ env.RELEASE_VERSION }}" ]]; then + echo "Error: Version mismatch: $REVISION_VALUE in $ROOT_POM (expected: ${{ env.RELEASE_VERSION }})" + exit 1 + fi + echo " ✓ Version consistency check passed: revision=$REVISION_VALUE" + else + echo " ⚠ Warning: No property found in pom.xml files - skipping version check" + fi + else + echo " Skipping version check for Python project: $src_pkg" + fi + + # 5.10: Check NOTICE year + if [[ -f "NOTICE" ]]; then + CURRENT_YEAR=$(date +%Y) + if ! grep -q "$CURRENT_YEAR" NOTICE; then + echo " ⚠ Warning: NOTICE file may not contain current year ($CURRENT_YEAR). Please verify copyright dates." + else + echo " ✓ NOTICE file contains current year" + fi + fi + + # 5.11: Compile source package + echo " Compiling source package: $src_pkg" + + if [[ "$src_pkg" =~ 'hugegraph-ai' ]]; then + echo " ⚠ Skipping compilation for AI module (not required)" + elif [[ "$src_pkg" =~ "hugegraph-computer" ]]; then + if cd computer 2>/dev/null && mvn clean package -DskipTests -Dcheckstyle.skip=true -ntp -e; then + echo " ✓ Compilation successful: $src_pkg" + else + echo "Error: Compilation failed: $src_pkg" + exit 1 + fi + cd .. + else + if mvn clean package -DskipTests -Dcheckstyle.skip=true -ntp -e; then + echo " ✓ Compilation successful: $src_pkg" + else + echo "Error: Compilation failed: $src_pkg" + exit 1 + fi + fi + + popd + echo "✓ Finished validating source package: $src_pkg" + done + + - name: Step 6 - Test Compiled Server Package + run: | + echo "" + echo "━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━" + echo "Step [6/9]: Test Compiled Server Package" + echo "━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━" + + cd dist/${{ env.RELEASE_VERSION }} + + # Find server directory + SERVER_DIR=$(find . -maxdepth 3 -type d -path "*hugegraph-incubating*src/hugegraph-server/*hugegraph*${{ env.RELEASE_VERSION }}" 2>/dev/null | head -n1) + + if [[ -z "$SERVER_DIR" ]]; then + echo "Error: Could not find compiled server directory" + exit 1 + fi + + echo "Starting HugeGraph server from: $SERVER_DIR" + pushd "$SERVER_DIR" + + if bin/init-store.sh; then + echo " ✓ Store initialized" + else + echo "Error: Failed to initialize store" + exit 1 + fi + + sleep ${{ env.SERVER_START_DELAY }} + + if bin/start-hugegraph.sh; then + echo " ✓ Server started" + else + echo "Error: Failed to start server" + exit 1 + fi + + popd + + - name: Step 7 - Test Compiled Toolchain Packages + run: | + echo "" + echo "━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━" + echo "Step [7/9]: Test Compiled Toolchain Packages" + echo "━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━" + + cd dist/${{ env.RELEASE_VERSION }} + + TOOLCHAIN_SRC=$(find . -maxdepth 3 -type d -path "*toolchain*src" 2>/dev/null | head -n1) + + if [[ -n "$TOOLCHAIN_SRC" ]]; then + pushd "$TOOLCHAIN_SRC" + + TOOLCHAIN_DIR=$(find . -maxdepth 1 -type d -name "*toolchain*${{ env.RELEASE_VERSION }}" | head -n1) + if [[ -n "$TOOLCHAIN_DIR" ]]; then + pushd "$TOOLCHAIN_DIR" + + # Test Loader + echo "Testing HugeGraph Loader..." + LOADER_DIR=$(find . -maxdepth 1 -type d -name "*loader*${{ env.RELEASE_VERSION }}" | head -n1) + if [[ -n "$LOADER_DIR" ]]; then + pushd "$LOADER_DIR" + if bin/hugegraph-loader.sh -f ./example/file/struct.json -s ./example/file/schema.groovy -g hugegraph; then + echo " ✓ Loader test passed" + else + echo "Error: Loader test failed" + exit 1 + fi + popd + fi + + # Test Tool + echo "Testing HugeGraph Tool..." + TOOL_DIR=$(find . -maxdepth 1 -type d -name "*tool*${{ env.RELEASE_VERSION }}" | head -n1) + if [[ -n "$TOOL_DIR" ]]; then + pushd "$TOOL_DIR" + if bin/hugegraph gremlin-execute --script 'g.V().count()' && \ + bin/hugegraph task-list && \ + bin/hugegraph backup -t all --directory ./backup-test; then + echo " ✓ Tool test passed" + else + echo "Error: Tool test failed" + exit 1 + fi + popd + fi + + # Test Hubble + echo "Testing HugeGraph Hubble..." + HUBBLE_DIR=$(find . -maxdepth 1 -type d -name "*hubble*${{ env.RELEASE_VERSION }}" | head -n1) + if [[ -n "$HUBBLE_DIR" ]]; then + pushd "$HUBBLE_DIR" + if bin/start-hubble.sh; then + echo " ✓ Hubble started" + sleep 2 + bin/stop-hubble.sh + echo " ✓ Hubble stopped" + else + echo "Error: Hubble test failed" + exit 1 + fi + popd + fi + + popd + fi + + popd + fi + + # Stop server after toolchain tests + SERVER_DIR=$(find . -maxdepth 3 -type d -path "*hugegraph-incubating*src/hugegraph-server/*hugegraph*${{ env.RELEASE_VERSION }}" 2>/dev/null | head -n1) + if [[ -n "$SERVER_DIR" ]]; then + echo "Stopping server..." + pushd "$SERVER_DIR" + bin/stop-hugegraph.sh + echo " ✓ Server stopped" + popd + fi + + - name: Step 8 - Validate Binary Packages + run: | + echo "" + echo "━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━" + echo "Step [8/9]: Validate Binary Packages" + echo "━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━" + + cd dist/${{ env.RELEASE_VERSION }} + + BIN_PACKAGES=() + for pkg in *.tar.gz; do + if [[ "$pkg" != *-src.tar.gz ]] && [[ -f "$pkg" ]]; then + BIN_PACKAGES+=("$pkg") + fi + done + + echo "Found ${#BIN_PACKAGES[@]} binary package(s)" + + for bin_pkg in "${BIN_PACKAGES[@]}"; do + echo "" + echo "Validating binary package: $bin_pkg" + + # Extract package + PACKAGE_DIR=$(basename "$bin_pkg" .tar.gz) + rm -rf "$PACKAGE_DIR" + tar -xzf "$bin_pkg" + + if [[ ! -d "$PACKAGE_DIR" ]]; then + echo "Error: Failed to extract package: $bin_pkg" + exit 1 + fi + + pushd "$PACKAGE_DIR" + + # 8.1: Check incubating name + if [[ ! "$bin_pkg" =~ "incubating" ]]; then + echo "Error: Package name '$bin_pkg' should include 'incubating'" + exit 1 + fi + echo " ✓ Package name includes 'incubating'" + + # 8.2: Check required files + if [[ ! -f "LICENSE" ]]; then + echo "Error: Package '$bin_pkg' missing LICENSE file" + exit 1 + fi + echo " ✓ LICENSE file exists" + + if [[ ! -f "NOTICE" ]]; then + echo "Error: Package '$bin_pkg' missing NOTICE file" + exit 1 + fi + echo " ✓ NOTICE file exists" + + if [[ ! -f "DISCLAIMER" ]]; then + echo "Error: Package '$bin_pkg' missing DISCLAIMER file" + exit 1 + fi + echo " ✓ DISCLAIMER file exists" + + # 8.3: Check licenses directory + if [[ ! -d "licenses" ]]; then + echo "Error: Package '$bin_pkg' missing licenses directory" + exit 1 + fi + echo " ✓ licenses directory exists" + + # 8.4: Check license categories (Category X - Prohibited) + CAT_X_MATCHES=$(grep -r -E "${{ env.CATEGORY_X }}" LICENSE NOTICE licenses 2>/dev/null || true) + CAT_X_COUNT=$(echo "$CAT_X_MATCHES" | grep -v '^$' | wc -l | tr -d ' ') + + if [[ $CAT_X_COUNT -ne 0 ]]; then + echo "Error: Package '$bin_pkg' contains $CAT_X_COUNT prohibited ASF Category X license(s):" + echo "$CAT_X_MATCHES" + exit 1 + fi + echo " ✓ No Category X licenses found" + + # 8.5: Check license categories (Category B - Warning) + CAT_B_COUNT=$(grep -r -E "${{ env.CATEGORY_B }}" LICENSE NOTICE licenses 2>/dev/null | wc -l | tr -d ' ' || echo "0") + if [[ $CAT_B_COUNT -ne 0 ]]; then + echo " ⚠ Warning: Package '$bin_pkg' contains $CAT_B_COUNT ASF Category B license(s) - please verify documentation" + else + echo " ✓ No Category B licenses found" + fi + + # 8.6: Check empty files and directories + EMPTY_DIRS=$(find . -type d -empty 2>/dev/null || true) + EMPTY_FILES=$(find . -type f -empty 2>/dev/null || true) + + if [[ -n "$EMPTY_DIRS" ]]; then + echo "Error: Package '$bin_pkg' contains empty director(y/ies):" + echo "$EMPTY_DIRS" + exit 1 + fi + + if [[ -n "$EMPTY_FILES" ]]; then + echo "Error: Package '$bin_pkg' contains empty file(s):" + echo "$EMPTY_FILES" + exit 1 + fi + echo " ✓ No empty files or directories" + + popd + echo "✓ Finished validating binary package: $bin_pkg" + done + + - name: Step 9 - Test Binary Server & Toolchain + run: | + echo "" + echo "━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━" + echo "Step [9/9]: Test Binary Server & Toolchain" + echo "━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━" + + cd dist/${{ env.RELEASE_VERSION }} + + # Test binary server + BIN_SERVER_DIR=$(find . -maxdepth 3 -type d -path "*hugegraph-incubating*${{ env.RELEASE_VERSION }}/*hugegraph-server-incubating*${{ env.RELEASE_VERSION }}" 2>/dev/null | head -n1) + + if [[ -n "$BIN_SERVER_DIR" ]]; then + echo "Testing binary server package..." + pushd "$BIN_SERVER_DIR" + + if bin/init-store.sh && sleep ${{ env.SERVER_START_DELAY }} && bin/start-hugegraph.sh; then + echo " ✓ Binary server started" + else + echo "Error: Failed to start binary server" + exit 1 + fi + + popd + fi + + # Test binary toolchain + BIN_TOOLCHAIN=$(find . -maxdepth 3 -type d -path "*toolchain*${{ env.RELEASE_VERSION }}" 2>/dev/null | head -n1) + + if [[ -n "$BIN_TOOLCHAIN" ]]; then + pushd "$BIN_TOOLCHAIN" + + # Test binary loader + BIN_LOADER=$(find . -maxdepth 1 -type d -name "*loader*${{ env.RELEASE_VERSION }}" | head -n1) + if [[ -n "$BIN_LOADER" ]]; then + pushd "$BIN_LOADER" + if bin/hugegraph-loader.sh -f ./example/file/struct.json -s ./example/file/schema.groovy -g hugegraph; then + echo " ✓ Binary loader test passed" + else + echo "Error: Binary loader test failed" + exit 1 + fi + popd + fi + + # Test binary tool + BIN_TOOL=$(find . -maxdepth 1 -type d -name "*tool*${{ env.RELEASE_VERSION }}" | head -n1) + if [[ -n "$BIN_TOOL" ]]; then + pushd "$BIN_TOOL" + if bin/hugegraph gremlin-execute --script 'g.V().count()' && \ + bin/hugegraph task-list && \ + bin/hugegraph backup -t all --directory ./backup-test; then + echo " ✓ Binary tool test passed" + else + echo "Error: Binary tool test failed" + exit 1 + fi + popd + fi + + # Test binary hubble + BIN_HUBBLE=$(find . -maxdepth 1 -type d -name "*hubble*${{ env.RELEASE_VERSION }}" | head -n1) + if [[ -n "$BIN_HUBBLE" ]]; then + pushd "$BIN_HUBBLE" + if bin/start-hubble.sh; then + echo " ✓ Binary hubble started" + sleep 2 + bin/stop-hubble.sh + echo " ✓ Binary hubble stopped" + else + echo "Error: Binary hubble test failed" + exit 1 + fi + popd + fi + + popd + fi + + # Stop binary server + if [[ -n "$BIN_SERVER_DIR" ]]; then + pushd "$BIN_SERVER_DIR" + bin/stop-hugegraph.sh + echo " ✓ Binary server stopped" + popd + fi + + echo "" + echo "━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━" + echo " VALIDATION SUMMARY " + echo "━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━" + echo "" + echo "✓ VALIDATION PASSED" + echo "" + echo "Please review the validation results and provide feedback in the" + echo "release voting thread on the mailing list." + + strategy: + fail-fast: false + matrix: + java_version: ['11'] + os: [ubuntu-latest, macos-latest] + From c4107b6266e7420a043ce2bdb8cc7fa07b3d71a4 Mon Sep 17 00:00:00 2001 From: imbajin Date: Wed, 26 Nov 2025 14:49:11 +0800 Subject: [PATCH 08/19] Update GPG username and regex in workflow --- .github/workflows/validate-release.yml | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/.github/workflows/validate-release.yml b/.github/workflows/validate-release.yml index 6b7cb043c..414602208 100644 --- a/.github/workflows/validate-release.yml +++ b/.github/workflows/validate-release.yml @@ -10,7 +10,7 @@ on: gpg_user: required: true description: current release manager (gpg username) - default: 'Junzhi Peng' + default: 'pengjunzhi' push: branches: @@ -244,7 +244,7 @@ jobs: - name: 7. Validate Binary Packages run: | cd dist/${{ inputs.release_version }} || exit - CATEGORY_X="\bGPL|\bLGPL|Sleepycat License|BSD-4-Clause|\bBCL\b|JSR-275|Amazon Software License|\bRSAL\b|\bQPL\b|\bSSPL|\bCPOL|\bNPL1|Creative Commons Non-Commercial|JSON" + CATEGORY_X="\bGPL|\bLGPL|Sleepycat License|BSD-4-Clause|\bBCL\b|JSR-275|Amazon Software License|\bRSAL\b|\bQPL\b|\bSSPL|\bCPOL|\bNPL1|Creative Commons Non-Commercial|JSON\.org" for i in *.tar.gz; do if [[ "$i" == *-src.tar.gz ]]; then # skip source packages From c773933c56aae7e116025a3d5eb1bf0d1c6e42b4 Mon Sep 17 00:00:00 2001 From: Peng Junzhi <78788603+Pengzna@users.noreply.github.com> Date: Sun, 16 Nov 2025 06:36:39 -0600 Subject: [PATCH 09/19] feat: add 1.7.0 release-notes (#429) --- .../hugegraph-1.7.0-release-notes.md | 257 ++++++++++++++++++ .../hugegraph-1.7.0-release-notes.md | 257 ++++++++++++++++++ 2 files changed, 514 insertions(+) create mode 100644 content/cn/docs/changelog/hugegraph-1.7.0-release-notes.md create mode 100644 content/en/docs/changelog/hugegraph-1.7.0-release-notes.md diff --git a/content/cn/docs/changelog/hugegraph-1.7.0-release-notes.md b/content/cn/docs/changelog/hugegraph-1.7.0-release-notes.md new file mode 100644 index 000000000..1a385c335 --- /dev/null +++ b/content/cn/docs/changelog/hugegraph-1.7.0-release-notes.md @@ -0,0 +1,257 @@ +--- +title: "HugeGraph 1.7.0 Release Notes" +linkTitle: "Release-1.7.0" +weight: 7 +--- + +> WIP: This doc is under construction, please wait for the final version (BETA) + +### 运行环境/版本说明 + +**1.7.0**版 `hugegraph` 相关组件仅支持 Java 11 编译/运行环境 + +### hugegraph + +#### API Changes + +- **BREAKING CHANGE**: Disable legacy backends include MySQL/PG/c*(.etc) [#2746](https://github.com/apache/incubator-hugegraph/pull/2746) +- **BREAKING CHANGE**: Release version 1.7.0 [server + pd + store] [#2889](https://github.com/apache/incubator-hugegraph/pull/2889) + +#### Feature Changes + +- Support MemoryManagement for graph query framework [#2649](https://github.com/apache/incubator-hugegraph/pull/2649) +- LoginAPI support token_expire field [#2754](https://github.com/apache/incubator-hugegraph/pull/2754) +- Add option for task role election [#2843](https://github.com/apache/incubator-hugegraph/pull/2843) +- Optimize perf by avoid boxing long [#2861](https://github.com/apache/incubator-hugegraph/pull/2861) +- StringId hold bytes to avoid decode/encode [#2862](https://github.com/apache/incubator-hugegraph/pull/2862) +- Add PerfExample5 and PerfExample6 [#2860](https://github.com/apache/incubator-hugegraph/pull/2860) +- RocksDBStore remove redundant checkOpened() call [#2863](https://github.com/apache/incubator-hugegraph/pull/2863) +- Add path filter [#2898](https://github.com/apache/incubator-hugegraph/pull/2898) +- Init serena memory system & add memories [#2902](https://github.com/apache/incubator-hugegraph/pull/2902) + +#### Bug Fixes + +- Filter dynamice path(PUT/GET/DELETE) with params cause OOM [#2569](https://github.com/apache/incubator-hugegraph/pull/2569) +- JRaft Histogram Metrics Value NaN [#2631](https://github.com/apache/incubator-hugegraph/pull/2631) +- Update server image desc [#2702](https://github.com/apache/incubator-hugegraph/pull/2702) +- Kneigbor-api has unmatched edge type with server [#2699](https://github.com/apache/incubator-hugegraph/pull/2699) +- Add license for swagger-ui & reset use stage to false in ci yml [#2706](https://github.com/apache/incubator-hugegraph/pull/2706) +- Fix build pd-store arm image [#2744](https://github.com/apache/incubator-hugegraph/pull/2744) +- Fix graph server cache notifier mechanism [#2729](https://github.com/apache/incubator-hugegraph/pull/2729) +- Tx leak when stopping the graph server [#2791](https://github.com/apache/incubator-hugegraph/pull/2791) +- Ensure backend is initialized in gremlin script [#2824](https://github.com/apache/incubator-hugegraph/pull/2824) +- Fix some potential lock & type cast issues [#2895](https://github.com/apache/incubator-hugegraph/pull/2895) +- Fix npe in getVersion [#2897](https://github.com/apache/incubator-hugegraph/pull/2897) +- Fix the support for graphsapi in rocksdb and add testing for graphsapi [#2900](https://github.com/apache/incubator-hugegraph/pull/2900) +- Remove graph path in auth api path [#2899](https://github.com/apache/incubator-hugegraph/pull/2899) +- Migrate to LTS jdk11 in all Dockerfile [#2901](https://github.com/apache/incubator-hugegraph/pull/2901) +- Remove the judgment for java8 compatibility in the init-store [#2905](https://github.com/apache/incubator-hugegraph/pull/2905) +- Add missing license and remove binary license.txt & fix tinkerpop ci & remove duplicate module [#2910](https://github.com/apache/incubator-hugegraph/pull/2910) + +#### Option Changes + +- Remove some outdated configuration [#2678](https://github.com/apache/incubator-hugegraph/pull/2678) + +#### Other Changes + +- Update outdated docs for release 1.5.0 [#2690](https://github.com/apache/incubator-hugegraph/pull/2690) +- Fix licenses and remove empty files [#2692](https://github.com/apache/incubator-hugegraph/pull/2692) +- Update repo artifacts references [#2695](https://github.com/apache/incubator-hugegraph/pull/2695) +- Adjust release fury version [#2698](https://github.com/apache/incubator-hugegraph/pull/2698) +- Fix the JSON license issue [#2697](https://github.com/apache/incubator-hugegraph/pull/2697) +- Add debug info for tp test [#2688](https://github.com/apache/incubator-hugegraph/pull/2688) +- Enhance words in README [#2734](https://github.com/apache/incubator-hugegraph/pull/2734) +- Add collaborators in asf config [#2741](https://github.com/apache/incubator-hugegraph/pull/2741) +- Adjust the related filters of sofa-bolt [#2735](https://github.com/apache/incubator-hugegraph/pull/2735) +- Reopen discussion in .asf.yml config [#2751](https://github.com/apache/incubator-hugegraph/pull/2751) +- Fix typo in README [#2806](https://github.com/apache/incubator-hugegraph/pull/2806) +- Centralize version management in project [#2797](https://github.com/apache/incubator-hugegraph/pull/2797) +- Update notice year [#2826](https://github.com/apache/incubator-hugegraph/pull/2826) +- Improve maven Reproducible Builds → upgrade plugins [#2874](https://github.com/apache/incubator-hugegraph/pull/2874) +- Enhance docker instruction with auth opened graph [#2881](https://github.com/apache/incubator-hugegraph/pull/2881) +- Remove the package existing in java8 [#2792](https://github.com/apache/incubator-hugegraph/pull/2792) +- Revise Docker usage instructions in README [#2882](https://github.com/apache/incubator-hugegraph/pull/2882) +- Add DeepWiki badge to README [#2883](https://github.com/apache/incubator-hugegraph/pull/2883) +- Update guidance for store module [#2894](https://github.com/apache/incubator-hugegraph/pull/2894) +- Update test commands and improve documentation clarity [#2893](https://github.com/apache/incubator-hugegraph/pull/2893) +- Bump rocksdb version from 7.2.2 to 8.10.2 [#2896](https://github.com/apache/incubator-hugegraph/pull/2896) + +### hugegraph-toolchain + +#### API Changes + +- Support graphspace [#633](https://github.com/apache/incubator-hugegraph-toolchain/pull/633) + +#### Feature Changes + +- Support jdbc date type & sync .editorconfig [#648](https://github.com/apache/incubator-hugegraph-toolchain/pull/648) +- Add a useSSL option for mysql [#650](https://github.com/apache/incubator-hugegraph-toolchain/pull/650) +- Patch for father sub edge [#654](https://github.com/apache/incubator-hugegraph-toolchain/pull/654) +- Improve user experience for user script [#666](https://github.com/apache/incubator-hugegraph-toolchain/pull/666) +- Support concurrent readers, short-id & Graphsrc [#683](https://github.com/apache/incubator-hugegraph-toolchain/pull/683) +- Init serena onboarding & project memory files [#692](https://github.com/apache/incubator-hugegraph-toolchain/pull/692) + +#### Bug Fixes + +- Typo word in display [#655](https://github.com/apache/incubator-hugegraph-toolchain/pull/655) +- Patch up missing classes and methods for hubble [#657](https://github.com/apache/incubator-hugegraph-toolchain/pull/657) +- Adjust Client to 1.7.0 server [#689](https://github.com/apache/incubator-hugegraph-toolchain/pull/689) +- Remove json license for release 1.7.0 [#698](https://github.com/apache/incubator-hugegraph-toolchain/pull/698) + +#### Other Changes + +- Update hugegraph source commit id [#640](https://github.com/apache/incubator-hugegraph-toolchain/pull/640) +- Add collaborators in asf config [#656](https://github.com/apache/incubator-hugegraph-toolchain/pull/656) +- Update pom for version-1.7.0 [#681](https://github.com/apache/incubator-hugegraph-toolchain/pull/681) +- Add DeepWiki badge to README [#684](https://github.com/apache/incubator-hugegraph-toolchain/pull/684) +- Adjust APIs to compatible with 1.7.0 server [#685](https://github.com/apache/incubator-hugegraph-toolchain/pull/685) +- Adjust LoadContext to 1.7.0 version [#687](https://github.com/apache/incubator-hugegraph-toolchain/pull/687) +- Migrate to LTS jdk11 in all Dockerfile [#691](https://github.com/apache/incubator-hugegraph-toolchain/pull/691) +- Update copyright year in NOTICE file [#697](https://github.com/apache/incubator-hugegraph-toolchain/pull/697) + +### hugegraph-computer + +#### Feature Changes + +- Migration Vermeer to hugegraph-computer [#316](https://github.com/apache/incubator-hugegraph-computer/pull/316) +- Make startChan's size configurable [#328](https://github.com/apache/incubator-hugegraph-computer/pull/328) +- Assign WorkerGroup via worker configuration [#332](https://github.com/apache/incubator-hugegraph-computer/pull/332) +- Support task priority based scheduling [#336](https://github.com/apache/incubator-hugegraph-computer/pull/336) +- Avoid 800k [#340](https://github.com/apache/incubator-hugegraph-computer/pull/340) + +#### Bug Fixes + +- Fix docker file build [#341](https://github.com/apache/incubator-hugegraph-computer/pull/341) + +#### Other Changes + +- Update release version to 1.5.0 [#318](https://github.com/apache/incubator-hugegraph-computer/pull/318) +- Update go depends module & fix headers [#321](https://github.com/apache/incubator-hugegraph-computer/pull/321) +- Update go version to 1.23 [#322](https://github.com/apache/incubator-hugegraph-computer/pull/322) +- Add collaborator in .asf.yaml [#323](https://github.com/apache/incubator-hugegraph-computer/pull/323) +- Update the Go version in docker image [#333](https://github.com/apache/incubator-hugegraph-computer/pull/333) +- Add DeepWiki badge to README [#337](https://github.com/apache/incubator-hugegraph-computer/pull/337) +- Bump project version to 1.7.0 (RELEASE) [#338](https://github.com/apache/incubator-hugegraph-computer/pull/338) +- Update copyright year in NOTICE file [#342](https://github.com/apache/incubator-hugegraph-computer/pull/342) + +### hugegraph-ai + +#### API Changes + +- Support choose template in api [#135](https://github.com/apache/incubator-hugegraph-ai/pull/135) +- Add post method for paths-api [#162](https://github.com/apache/incubator-hugegraph-ai/pull/162) +- Support switch graph in api & add some query configs [#184](https://github.com/apache/incubator-hugegraph-ai/pull/184) +- Text2gremlin api [#258](https://github.com/apache/incubator-hugegraph-ai/pull/258) +- Support switching prompt EN/CN [#269](https://github.com/apache/incubator-hugegraph-ai/pull/269) +- **BREAKING CHANGE**: Update keyword extraction method [#282](https://github.com/apache/incubator-hugegraph-ai/pull/282) + +#### Feature Changes + +- Added the process of text2gql in graphrag V1.0 [#105](https://github.com/apache/incubator-hugegraph-ai/pull/105) +- Use pydantic-settings for config management [#122](https://github.com/apache/incubator-hugegraph-ai/pull/122) +- Timely execute vid embedding & enhance some HTTP logic [#141](https://github.com/apache/incubator-hugegraph-ai/pull/141) +- Use retry from tenacity [#143](https://github.com/apache/incubator-hugegraph-ai/pull/143) +- Modify the summary info and enhance the request logic [#147](https://github.com/apache/incubator-hugegraph-ai/pull/147) +- Automatic backup graph data timely [#151](https://github.com/apache/incubator-hugegraph-ai/pull/151) +- Add a button to backup data & count together [#153](https://github.com/apache/incubator-hugegraph-ai/pull/153) +- Extract topk_per_keyword & topk_return_results to .env [#154](https://github.com/apache/incubator-hugegraph-ai/pull/154) +- Modify clear buttons [#156](https://github.com/apache/incubator-hugegraph-ai/pull/156) +- Support intent recognition V1 [#159](https://github.com/apache/incubator-hugegraph-ai/pull/159) +- Change vid embedding x:yy to yy & use multi-thread [#158](https://github.com/apache/incubator-hugegraph-ai/pull/158) +- Support mathjax in rag query block V1 [#157](https://github.com/apache/incubator-hugegraph-ai/pull/157) +- Use poetry to manage the dependencies [#149](https://github.com/apache/incubator-hugegraph-ai/pull/149) +- Return schema.groovy first when backup graph data [#161](https://github.com/apache/incubator-hugegraph-ai/pull/161) +- Merge all logs into one file [#171](https://github.com/apache/incubator-hugegraph-ai/pull/171) +- Use uv for the CI action [#175](https://github.com/apache/incubator-hugegraph-ai/pull/175) +- Use EN prompt for keywords extraction [#174](https://github.com/apache/incubator-hugegraph-ai/pull/174) +- Support litellm LLM provider [#178](https://github.com/apache/incubator-hugegraph-ai/pull/178) +- Improve graph extraction default prompt [#187](https://github.com/apache/incubator-hugegraph-ai/pull/187) +- Replace vid by full vertexes info [#189](https://github.com/apache/incubator-hugegraph-ai/pull/189) +- Support asynchronous streaming generation in rag block by using async_generator and asyncio.wait [#190](https://github.com/apache/incubator-hugegraph-ai/pull/190) +- Generalize the regex extraction func [#194](https://github.com/apache/incubator-hugegraph-ai/pull/194) +- Create quick_start.md [#196](https://github.com/apache/incubator-hugegraph-ai/pull/196) +- Support Docker & K8s deployment way [#195](https://github.com/apache/incubator-hugegraph-ai/pull/195) +- Multi-stage building in Dockerfile [#199](https://github.com/apache/incubator-hugegraph-ai/pull/199) +- Support graph checking before updating vid embedding [#205](https://github.com/apache/incubator-hugegraph-ai/pull/205) +- Disable text2gql by default [#216](https://github.com/apache/incubator-hugegraph-ai/pull/216) +- Use 4.1-mini and 0.01 temperature by default [#214](https://github.com/apache/incubator-hugegraph-ai/pull/214) +- Enhance the multi configs for LLM [#212](https://github.com/apache/incubator-hugegraph-ai/pull/212) +- Textbox to Code [#217](https://github.com/apache/incubator-hugegraph-ai/pull/223) +- Replace the IP + Port with URL [#209](https://github.com/apache/incubator-hugegraph-ai/pull/209) +- Update gradio's version [#235](https://github.com/apache/incubator-hugegraph-ai/pull/235) +- Use asyncio to get embeddings [#215](https://github.com/apache/incubator-hugegraph-ai/pull/215) +- Change QPS -> RPM for timer decorator [#241](https://github.com/apache/incubator-hugegraph-ai/pull/241) +- Support batch embedding [#238](https://github.com/apache/incubator-hugegraph-ai/pull/238) +- Using nuitka to provide a binary/perf way for the service [#242](https://github.com/apache/incubator-hugegraph-ai/pull/242) +- Use uv instead poetry [#226](https://github.com/apache/incubator-hugegraph-ai/pull/226) +- Basic compatible in text2gremlin generation [#261](https://github.com/apache/incubator-hugegraph-ai/pull/261) +- Enhance config path handling and add project root validation [#262](https://github.com/apache/incubator-hugegraph-ai/pull/262) +- Add vermeer python client for graph computing [#263](https://github.com/apache/incubator-hugegraph-ai/pull/263) +- Use uv in client & ml modules & adapter the CI [#257](https://github.com/apache/incubator-hugegraph-ai/pull/257) +- Use uv to manage pkgs & update README [#272](https://github.com/apache/incubator-hugegraph-ai/pull/272) +- Limit the deps version to handle critical init problems [#279](https://github.com/apache/incubator-hugegraph-ai/pull/279) +- Support semi-automated prompt generation [#281](https://github.com/apache/incubator-hugegraph-ai/pull/281) +- Support semi-automated generated graph schema [#274](https://github.com/apache/incubator-hugegraph-ai/pull/274) +- Unify all modules with uv [#287](https://github.com/apache/incubator-hugegraph-ai/pull/287) +- Add GitHub Actions for auto upstream sync and update SEALData subsample logic [#289](https://github.com/apache/incubator-hugegraph-ai/pull/289) +- Add a basic LLM/AI coding instruction file [#290](https://github.com/apache/incubator-hugegraph-ai/pull/290) +- Add rules for AI coding guideline - V1.0 [#293](https://github.com/apache/incubator-hugegraph-ai/pull/293) +- Replace QianFan by OpenAI-compatible format [#285](https://github.com/apache/incubator-hugegraph-ai/pull/285) +- Optimize vector index with asyncio embedding [#264](https://github.com/apache/incubator-hugegraph-ai/pull/264) +- Refactor embedding parallelization to preserve order [#295](https://github.com/apache/incubator-hugegraph-ai/pull/295) +- Support storing vector data for a graph instance by model type/name [#265](https://github.com/apache/incubator-hugegraph-ai/pull/265) +- Add AGENTS.md as new document standard [#299](https://github.com/apache/incubator-hugegraph-ai/pull/299) +- Add Fixed Workflow Execution Engine: Flow, Node, and Scheduler Architecture [#302](https://github.com/apache/incubator-hugegraph-ai/pull/302) +- Support vector db layer V1.0 [#304](https://github.com/apache/incubator-hugegraph-ai/pull/304) + +#### Bug Fixes + +- Limit the length of log & improve the format [#121](https://github.com/apache/incubator-hugegraph-ai/pull/121) +- Pylint in ml [#125](https://github.com/apache/incubator-hugegraph-ai/pull/125) +- Critical bug with pylint usage [#131](https://github.com/apache/incubator-hugegraph-ai/pull/131) +- Multi vid k-neighbor query only return the data of first vid [#132](https://github.com/apache/incubator-hugegraph-ai/pull/132) +- Replace getenv usage to settings [#133](https://github.com/apache/incubator-hugegraph-ai/pull/133) +- Correct header writing errors [#140](https://github.com/apache/incubator-hugegraph-ai/pull/140) +- Update prompt to fit prefix cache [#137](https://github.com/apache/incubator-hugegraph-ai/pull/137) +- Extract_graph_data use wrong method [#145](https://github.com/apache/incubator-hugegraph-ai/pull/145) +- Use empty str for llm config [#155](https://github.com/apache/incubator-hugegraph-ai/pull/155) +- Update gremlin generate prompt to apply fuzzy match [#163](https://github.com/apache/incubator-hugegraph-ai/pull/163) +- Enable fastapi auto reload function [#164](https://github.com/apache/incubator-hugegraph-ai/pull/164) +- Fix tiny bugs & optimize reranker layout [#202](https://github.com/apache/incubator-hugegraph-ai/pull/202) +- Enable tasks concurrency configs in Gradio [#188](https://github.com/apache/incubator-hugegraph-ai/pull/188) +- Align regex extraction of json to json format of prompt [#211](https://github.com/apache/incubator-hugegraph-ai/pull/211) +- Fix documentation sample code error [#219](https://github.com/apache/incubator-hugegraph-ai/pull/219) +- Failed to remove vectors when updating vid embedding [#243](https://github.com/apache/incubator-hugegraph-ai/pull/243) +- Skip empty chunk in LLM steaming mode [#245](https://github.com/apache/incubator-hugegraph-ai/pull/245) +- Ollama batch embedding bug [#250](https://github.com/apache/incubator-hugegraph-ai/pull/250) +- Fix Dockerfile to add pyproject.toml anchor file [#266](https://github.com/apache/incubator-hugegraph-ai/pull/266) +- Add missing 'properties' in gremlin prompt formatting [#298](https://github.com/apache/incubator-hugegraph-ai/pull/298) +- Fixed cgraph version [#305](https://github.com/apache/incubator-hugegraph-ai/pull/305) +- Ollama embedding API usage and config param [#306](https://github.com/apache/incubator-hugegraph-ai/pull/306) + +#### Option Changes + +- Remove enable_gql logic in api & rag block [#148](https://github.com/apache/incubator-hugegraph-ai/pull/148) + +#### Other Changes + +- Update README for python-client/SDK [#150](https://github.com/apache/incubator-hugegraph-ai/pull/150) +- Enable pip cache [#142](https://github.com/apache/incubator-hugegraph-ai/pull/142) +- Enable discussion & change merge way [#201](https://github.com/apache/incubator-hugegraph-ai/pull/201) +- Synchronization with official documentation [#273](https://github.com/apache/incubator-hugegraph-ai/pull/273) +- Fix grammar errors [#275](https://github.com/apache/incubator-hugegraph-ai/pull/275) +- Improve README clarity and deployment instructions [#276](https://github.com/apache/incubator-hugegraph-ai/pull/276) +- Add docker-compose deployment and improve container networking instructions [#280](https://github.com/apache/incubator-hugegraph-ai/pull/280) +- Update docker compose command [#283](https://github.com/apache/incubator-hugegraph-ai/pull/283) +- Reduce third-party library log output [#244](https://github.com/apache/incubator-hugegraph-ai/pull/284) +- Update README with improved setup instructions [#294](https://github.com/apache/incubator-hugegraph-ai/pull/294) +- Add collaborators in asf config [#182](https://github.com/apache/incubator-hugegraph-ai/pull/182) + +### 发布细节 + +Please check the release details/contributor in each repository: + +- [Server Release Notes](https://github.com/apache/incubator-hugegraph/releases) +- [Toolchain Release Notes](https://github.com/apache/incubator-hugegraph-toolchain/releases) +- [Computer Release Notes](https://github.com/apache/incubator-hugegraph-computer/releases) +- [AI Release Notes](https://github.com/apache/incubator-hugegraph-ai/releases) diff --git a/content/en/docs/changelog/hugegraph-1.7.0-release-notes.md b/content/en/docs/changelog/hugegraph-1.7.0-release-notes.md new file mode 100644 index 000000000..874730079 --- /dev/null +++ b/content/en/docs/changelog/hugegraph-1.7.0-release-notes.md @@ -0,0 +1,257 @@ +--- +title: "HugeGraph 1.7.0 Release Notes" +linkTitle: "Release-1.7.0" +weight: 7 +--- + +> WIP: This doc is under construction, please wait for the final version (BETA) + +### Operating Environment / Version Description + +For **1.7.0** version `hugegraph`, related components only support Java11. + +### hugegraph + +#### API Changes + +- **BREAKING CHANGE**: Disable legacy backends include MySQL/PG/c*(.etc) [#2746](https://github.com/apache/incubator-hugegraph/pull/2746) +- **BREAKING CHANGE**: Release version 1.7.0 [server + pd + store] [#2889](https://github.com/apache/incubator-hugegraph/pull/2889) + +#### Feature Changes + +- Support MemoryManagement for graph query framework [#2649](https://github.com/apache/incubator-hugegraph/pull/2649) +- LoginAPI support token_expire field [#2754](https://github.com/apache/incubator-hugegraph/pull/2754) +- Add option for task role election [#2843](https://github.com/apache/incubator-hugegraph/pull/2843) +- Optimize perf by avoid boxing long [#2861](https://github.com/apache/incubator-hugegraph/pull/2861) +- StringId hold bytes to avoid decode/encode [#2862](https://github.com/apache/incubator-hugegraph/pull/2862) +- Add PerfExample5 and PerfExample6 [#2860](https://github.com/apache/incubator-hugegraph/pull/2860) +- RocksDBStore remove redundant checkOpened() call [#2863](https://github.com/apache/incubator-hugegraph/pull/2863) +- Add path filter [#2898](https://github.com/apache/incubator-hugegraph/pull/2898) +- Init serena memory system & add memories [#2902](https://github.com/apache/incubator-hugegraph/pull/2902) + +#### Bug Fixes + +- Filter dynamice path(PUT/GET/DELETE) with params cause OOM [#2569](https://github.com/apache/incubator-hugegraph/pull/2569) +- JRaft Histogram Metrics Value NaN [#2631](https://github.com/apache/incubator-hugegraph/pull/2631) +- Update server image desc [#2702](https://github.com/apache/incubator-hugegraph/pull/2702) +- Kneigbor-api has unmatched edge type with server [#2699](https://github.com/apache/incubator-hugegraph/pull/2699) +- Add license for swagger-ui & reset use stage to false in ci yml [#2706](https://github.com/apache/incubator-hugegraph/pull/2706) +- Fix build pd-store arm image [#2744](https://github.com/apache/incubator-hugegraph/pull/2744) +- Fix graph server cache notifier mechanism [#2729](https://github.com/apache/incubator-hugegraph/pull/2729) +- Tx leak when stopping the graph server [#2791](https://github.com/apache/incubator-hugegraph/pull/2791) +- Ensure backend is initialized in gremlin script [#2824](https://github.com/apache/incubator-hugegraph/pull/2824) +- Fix some potential lock & type cast issues [#2895](https://github.com/apache/incubator-hugegraph/pull/2895) +- Fix npe in getVersion [#2897](https://github.com/apache/incubator-hugegraph/pull/2897) +- Fix the support for graphsapi in rocksdb and add testing for graphsapi [#2900](https://github.com/apache/incubator-hugegraph/pull/2900) +- Remove graph path in auth api path [#2899](https://github.com/apache/incubator-hugegraph/pull/2899) +- Migrate to LTS jdk11 in all Dockerfile [#2901](https://github.com/apache/incubator-hugegraph/pull/2901) +- Remove the judgment for java8 compatibility in the init-store [#2905](https://github.com/apache/incubator-hugegraph/pull/2905) +- Add missing license and remove binary license.txt & fix tinkerpop ci & remove duplicate module [#2910](https://github.com/apache/incubator-hugegraph/pull/2910) + +#### Option Changes + +- Remove some outdated configuration [#2678](https://github.com/apache/incubator-hugegraph/pull/2678) + +#### Other Changes + +- Update outdated docs for release 1.5.0 [#2690](https://github.com/apache/incubator-hugegraph/pull/2690) +- Fix licenses and remove empty files [#2692](https://github.com/apache/incubator-hugegraph/pull/2692) +- Update repo artifacts references [#2695](https://github.com/apache/incubator-hugegraph/pull/2695) +- Adjust release fury version [#2698](https://github.com/apache/incubator-hugegraph/pull/2698) +- Fix the JSON license issue [#2697](https://github.com/apache/incubator-hugegraph/pull/2697) +- Add debug info for tp test [#2688](https://github.com/apache/incubator-hugegraph/pull/2688) +- Enhance words in README [#2734](https://github.com/apache/incubator-hugegraph/pull/2734) +- Add collaborators in asf config [#2741](https://github.com/apache/incubator-hugegraph/pull/2741) +- Adjust the related filters of sofa-bolt [#2735](https://github.com/apache/incubator-hugegraph/pull/2735) +- Reopen discussion in .asf.yml config [#2751](https://github.com/apache/incubator-hugegraph/pull/2751) +- Fix typo in README [#2806](https://github.com/apache/incubator-hugegraph/pull/2806) +- Centralize version management in project [#2797](https://github.com/apache/incubator-hugegraph/pull/2797) +- Update notice year [#2826](https://github.com/apache/incubator-hugegraph/pull/2826) +- Improve maven Reproducible Builds → upgrade plugins [#2874](https://github.com/apache/incubator-hugegraph/pull/2874) +- Enhance docker instruction with auth opened graph [#2881](https://github.com/apache/incubator-hugegraph/pull/2881) +- Remove the package existing in java8 [#2792](https://github.com/apache/incubator-hugegraph/pull/2792) +- Revise Docker usage instructions in README [#2882](https://github.com/apache/incubator-hugegraph/pull/2882) +- Add DeepWiki badge to README [#2883](https://github.com/apache/incubator-hugegraph/pull/2883) +- Update guidance for store module [#2894](https://github.com/apache/incubator-hugegraph/pull/2894) +- Update test commands and improve documentation clarity [#2893](https://github.com/apache/incubator-hugegraph/pull/2893) +- Bump rocksdb version from 7.2.2 to 8.10.2 [#2896](https://github.com/apache/incubator-hugegraph/pull/2896) + +### hugegraph-toolchain + +#### API Changes + +- Support graphspace [#633](https://github.com/apache/incubator-hugegraph-toolchain/pull/633) + +#### Feature Changes + +- Support jdbc date type & sync .editorconfig [#648](https://github.com/apache/incubator-hugegraph-toolchain/pull/648) +- Add a useSSL option for mysql [#650](https://github.com/apache/incubator-hugegraph-toolchain/pull/650) +- Patch for father sub edge [#654](https://github.com/apache/incubator-hugegraph-toolchain/pull/654) +- Improve user experience for user script [#666](https://github.com/apache/incubator-hugegraph-toolchain/pull/666) +- Support concurrent readers, short-id & Graphsrc [#683](https://github.com/apache/incubator-hugegraph-toolchain/pull/683) +- Init serena onboarding & project memory files [#692](https://github.com/apache/incubator-hugegraph-toolchain/pull/692) + +#### Bug Fixes + +- Typo word in display [#655](https://github.com/apache/incubator-hugegraph-toolchain/pull/655) +- Patch up missing classes and methods for hubble [#657](https://github.com/apache/incubator-hugegraph-toolchain/pull/657) +- Adjust Client to 1.7.0 server [#689](https://github.com/apache/incubator-hugegraph-toolchain/pull/689) +- Remove json license for release 1.7.0 [#698](https://github.com/apache/incubator-hugegraph-toolchain/pull/698) + +#### Other Changes + +- Update hugegraph source commit id [#640](https://github.com/apache/incubator-hugegraph-toolchain/pull/640) +- Add collaborators in asf config [#656](https://github.com/apache/incubator-hugegraph-toolchain/pull/656) +- Update pom for version-1.7.0 [#681](https://github.com/apache/incubator-hugegraph-toolchain/pull/681) +- Add DeepWiki badge to README [#684](https://github.com/apache/incubator-hugegraph-toolchain/pull/684) +- Adjust APIs to compatible with 1.7.0 server [#685](https://github.com/apache/incubator-hugegraph-toolchain/pull/685) +- Adjust LoadContext to 1.7.0 version [#687](https://github.com/apache/incubator-hugegraph-toolchain/pull/687) +- Migrate to LTS jdk11 in all Dockerfile [#691](https://github.com/apache/incubator-hugegraph-toolchain/pull/691) +- Update copyright year in NOTICE file [#697](https://github.com/apache/incubator-hugegraph-toolchain/pull/697) + +### hugegraph-computer + +#### Feature Changes + +- Migration Vermeer to hugegraph-computer [#316](https://github.com/apache/incubator-hugegraph-computer/pull/316) +- Make startChan's size configurable [#328](https://github.com/apache/incubator-hugegraph-computer/pull/328) +- Assign WorkerGroup via worker configuration [#332](https://github.com/apache/incubator-hugegraph-computer/pull/332) +- Support task priority based scheduling [#336](https://github.com/apache/incubator-hugegraph-computer/pull/336) +- Avoid 800k [#340](https://github.com/apache/incubator-hugegraph-computer/pull/340) + +#### Bug Fixes + +- Fix docker file build [#341](https://github.com/apache/incubator-hugegraph-computer/pull/341) + +#### Other Changes + +- Update release version to 1.5.0 [#318](https://github.com/apache/incubator-hugegraph-computer/pull/318) +- Update go depends module & fix headers [#321](https://github.com/apache/incubator-hugegraph-computer/pull/321) +- Update go version to 1.23 [#322](https://github.com/apache/incubator-hugegraph-computer/pull/322) +- Add collaborator in .asf.yaml [#323](https://github.com/apache/incubator-hugegraph-computer/pull/323) +- Update the Go version in docker image [#333](https://github.com/apache/incubator-hugegraph-computer/pull/333) +- Add DeepWiki badge to README [#337](https://github.com/apache/incubator-hugegraph-computer/pull/337) +- Bump project version to 1.7.0 (RELEASE) [#338](https://github.com/apache/incubator-hugegraph-computer/pull/338) +- Update copyright year in NOTICE file [#342](https://github.com/apache/incubator-hugegraph-computer/pull/342) + +### hugegraph-ai + +#### API Changes + +- Support choose template in api [#135](https://github.com/apache/incubator-hugegraph-ai/pull/135) +- Add post method for paths-api [#162](https://github.com/apache/incubator-hugegraph-ai/pull/162) +- Support switch graph in api & add some query configs [#184](https://github.com/apache/incubator-hugegraph-ai/pull/184) +- Text2gremlin api [#258](https://github.com/apache/incubator-hugegraph-ai/pull/258) +- Support switching prompt EN/CN [#269](https://github.com/apache/incubator-hugegraph-ai/pull/269) +- **BREAKING CHANGE**: Update keyword extraction method [#282](https://github.com/apache/incubator-hugegraph-ai/pull/282) + +#### Feature Changes + +- Added the process of text2gql in graphrag V1.0 [#105](https://github.com/apache/incubator-hugegraph-ai/pull/105) +- Use pydantic-settings for config management [#122](https://github.com/apache/incubator-hugegraph-ai/pull/122) +- Timely execute vid embedding & enhance some HTTP logic [#141](https://github.com/apache/incubator-hugegraph-ai/pull/141) +- Use retry from tenacity [#143](https://github.com/apache/incubator-hugegraph-ai/pull/143) +- Modify the summary info and enhance the request logic [#147](https://github.com/apache/incubator-hugegraph-ai/pull/147) +- Automatic backup graph data timely [#151](https://github.com/apache/incubator-hugegraph-ai/pull/151) +- Add a button to backup data & count together [#153](https://github.com/apache/incubator-hugegraph-ai/pull/153) +- Extract topk_per_keyword & topk_return_results to .env [#154](https://github.com/apache/incubator-hugegraph-ai/pull/154) +- Modify clear buttons [#156](https://github.com/apache/incubator-hugegraph-ai/pull/156) +- Support intent recognition V1 [#159](https://github.com/apache/incubator-hugegraph-ai/pull/159) +- Change vid embedding x:yy to yy & use multi-thread [#158](https://github.com/apache/incubator-hugegraph-ai/pull/158) +- Support mathjax in rag query block V1 [#157](https://github.com/apache/incubator-hugegraph-ai/pull/157) +- Use poetry to manage the dependencies [#149](https://github.com/apache/incubator-hugegraph-ai/pull/149) +- Return schema.groovy first when backup graph data [#161](https://github.com/apache/incubator-hugegraph-ai/pull/161) +- Merge all logs into one file [#171](https://github.com/apache/incubator-hugegraph-ai/pull/171) +- Use uv for the CI action [#175](https://github.com/apache/incubator-hugegraph-ai/pull/175) +- Use EN prompt for keywords extraction [#174](https://github.com/apache/incubator-hugegraph-ai/pull/174) +- Support litellm LLM provider [#178](https://github.com/apache/incubator-hugegraph-ai/pull/178) +- Improve graph extraction default prompt [#187](https://github.com/apache/incubator-hugegraph-ai/pull/187) +- Replace vid by full vertexes info [#189](https://github.com/apache/incubator-hugegraph-ai/pull/189) +- Support asynchronous streaming generation in rag block by using async_generator and asyncio.wait [#190](https://github.com/apache/incubator-hugegraph-ai/pull/190) +- Generalize the regex extraction func [#194](https://github.com/apache/incubator-hugegraph-ai/pull/194) +- Create quick_start.md [#196](https://github.com/apache/incubator-hugegraph-ai/pull/196) +- Support Docker & K8s deployment way [#195](https://github.com/apache/incubator-hugegraph-ai/pull/195) +- Multi-stage building in Dockerfile [#199](https://github.com/apache/incubator-hugegraph-ai/pull/199) +- Support graph checking before updating vid embedding [#205](https://github.com/apache/incubator-hugegraph-ai/pull/205) +- Disable text2gql by default [#216](https://github.com/apache/incubator-hugegraph-ai/pull/216) +- Use 4.1-mini and 0.01 temperature by default [#214](https://github.com/apache/incubator-hugegraph-ai/pull/214) +- Enhance the multi configs for LLM [#212](https://github.com/apache/incubator-hugegraph-ai/pull/212) +- Textbox to Code [#217](https://github.com/apache/incubator-hugegraph-ai/pull/223) +- Replace the IP + Port with URL [#209](https://github.com/apache/incubator-hugegraph-ai/pull/209) +- Update gradio's version [#235](https://github.com/apache/incubator-hugegraph-ai/pull/235) +- Use asyncio to get embeddings [#215](https://github.com/apache/incubator-hugegraph-ai/pull/215) +- Change QPS -> RPM for timer decorator [#241](https://github.com/apache/incubator-hugegraph-ai/pull/241) +- Support batch embedding [#238](https://github.com/apache/incubator-hugegraph-ai/pull/238) +- Using nuitka to provide a binary/perf way for the service [#242](https://github.com/apache/incubator-hugegraph-ai/pull/242) +- Use uv instead poetry [#226](https://github.com/apache/incubator-hugegraph-ai/pull/226) +- Basic compatible in text2gremlin generation [#261](https://github.com/apache/incubator-hugegraph-ai/pull/261) +- Enhance config path handling and add project root validation [#262](https://github.com/apache/incubator-hugegraph-ai/pull/262) +- Add vermeer python client for graph computing [#263](https://github.com/apache/incubator-hugegraph-ai/pull/263) +- Use uv in client & ml modules & adapter the CI [#257](https://github.com/apache/incubator-hugegraph-ai/pull/257) +- Use uv to manage pkgs & update README [#272](https://github.com/apache/incubator-hugegraph-ai/pull/272) +- Limit the deps version to handle critical init problems [#279](https://github.com/apache/incubator-hugegraph-ai/pull/279) +- Support semi-automated prompt generation [#281](https://github.com/apache/incubator-hugegraph-ai/pull/281) +- Support semi-automated generated graph schema [#274](https://github.com/apache/incubator-hugegraph-ai/pull/274) +- Unify all modules with uv [#287](https://github.com/apache/incubator-hugegraph-ai/pull/287) +- Add GitHub Actions for auto upstream sync and update SEALData subsample logic [#289](https://github.com/apache/incubator-hugegraph-ai/pull/289) +- Add a basic LLM/AI coding instruction file [#290](https://github.com/apache/incubator-hugegraph-ai/pull/290) +- Add rules for AI coding guideline - V1.0 [#293](https://github.com/apache/incubator-hugegraph-ai/pull/293) +- Replace QianFan by OpenAI-compatible format [#285](https://github.com/apache/incubator-hugegraph-ai/pull/285) +- Optimize vector index with asyncio embedding [#264](https://github.com/apache/incubator-hugegraph-ai/pull/264) +- Refactor embedding parallelization to preserve order [#295](https://github.com/apache/incubator-hugegraph-ai/pull/295) +- Support storing vector data for a graph instance by model type/name [#265](https://github.com/apache/incubator-hugegraph-ai/pull/265) +- Add AGENTS.md as new document standard [#299](https://github.com/apache/incubator-hugegraph-ai/pull/299) +- Add Fixed Workflow Execution Engine: Flow, Node, and Scheduler Architecture [#302](https://github.com/apache/incubator-hugegraph-ai/pull/302) +- Support vector db layer V1.0 [#304](https://github.com/apache/incubator-hugegraph-ai/pull/304) + +#### Bug Fixes + +- Limit the length of log & improve the format [#121](https://github.com/apache/incubator-hugegraph-ai/pull/121) +- Pylint in ml [#125](https://github.com/apache/incubator-hugegraph-ai/pull/125) +- Critical bug with pylint usage [#131](https://github.com/apache/incubator-hugegraph-ai/pull/131) +- Multi vid k-neighbor query only return the data of first vid [#132](https://github.com/apache/incubator-hugegraph-ai/pull/132) +- Replace getenv usage to settings [#133](https://github.com/apache/incubator-hugegraph-ai/pull/133) +- Correct header writing errors [#140](https://github.com/apache/incubator-hugegraph-ai/pull/140) +- Update prompt to fit prefix cache [#137](https://github.com/apache/incubator-hugegraph-ai/pull/137) +- Extract_graph_data use wrong method [#145](https://github.com/apache/incubator-hugegraph-ai/pull/145) +- Use empty str for llm config [#155](https://github.com/apache/incubator-hugegraph-ai/pull/155) +- Update gremlin generate prompt to apply fuzzy match [#163](https://github.com/apache/incubator-hugegraph-ai/pull/163) +- Enable fastapi auto reload function [#164](https://github.com/apache/incubator-hugegraph-ai/pull/164) +- Fix tiny bugs & optimize reranker layout [#202](https://github.com/apache/incubator-hugegraph-ai/pull/202) +- Enable tasks concurrency configs in Gradio [#188](https://github.com/apache/incubator-hugegraph-ai/pull/188) +- Align regex extraction of json to json format of prompt [#211](https://github.com/apache/incubator-hugegraph-ai/pull/211) +- Fix documentation sample code error [#219](https://github.com/apache/incubator-hugegraph-ai/pull/219) +- Failed to remove vectors when updating vid embedding [#243](https://github.com/apache/incubator-hugegraph-ai/pull/243) +- Skip empty chunk in LLM steaming mode [#245](https://github.com/apache/incubator-hugegraph-ai/pull/245) +- Ollama batch embedding bug [#250](https://github.com/apache/incubator-hugegraph-ai/pull/250) +- Fix Dockerfile to add pyproject.toml anchor file [#266](https://github.com/apache/incubator-hugegraph-ai/pull/266) +- Add missing 'properties' in gremlin prompt formatting [#298](https://github.com/apache/incubator-hugegraph-ai/pull/298) +- Fixed cgraph version [#305](https://github.com/apache/incubator-hugegraph-ai/pull/305) +- Ollama embedding API usage and config param [#306](https://github.com/apache/incubator-hugegraph-ai/pull/306) + +#### Option Changes + +- Remove enable_gql logic in api & rag block [#148](https://github.com/apache/incubator-hugegraph-ai/pull/148) + +#### Other Changes + +- Update README for python-client/SDK [#150](https://github.com/apache/incubator-hugegraph-ai/pull/150) +- Enable pip cache [#142](https://github.com/apache/incubator-hugegraph-ai/pull/142) +- Enable discussion & change merge way [#201](https://github.com/apache/incubator-hugegraph-ai/pull/201) +- Synchronization with official documentation [#273](https://github.com/apache/incubator-hugegraph-ai/pull/273) +- Fix grammar errors [#275](https://github.com/apache/incubator-hugegraph-ai/pull/275) +- Improve README clarity and deployment instructions [#276](https://github.com/apache/incubator-hugegraph-ai/pull/276) +- Add docker-compose deployment and improve container networking instructions [#280](https://github.com/apache/incubator-hugegraph-ai/pull/280) +- Update docker compose command [#283](https://github.com/apache/incubator-hugegraph-ai/pull/283) +- Reduce third-party library log output [#244](https://github.com/apache/incubator-hugegraph-ai/pull/284) +- Update README with improved setup instructions [#294](https://github.com/apache/incubator-hugegraph-ai/pull/294) +- Add collaborators in asf config [#182](https://github.com/apache/incubator-hugegraph-ai/pull/182) + +### Release Details + +Please check the release details/contributor in each repository: + +- [Server Release Notes](https://github.com/apache/incubator-hugegraph/releases) +- [Toolchain Release Notes](https://github.com/apache/incubator-hugegraph-toolchain/releases) +- [Computer Release Notes](https://github.com/apache/incubator-hugegraph-computer/releases) +- [AI Release Notes](https://github.com/apache/incubator-hugegraph-ai/releases) From dff6ba1049a6f2081ae626dcbe87070b530fa37f Mon Sep 17 00:00:00 2001 From: Jingkai Yang Date: Mon, 17 Nov 2025 19:32:28 +0800 Subject: [PATCH 10/19] chore: add docker using / add load data methods (#423) --- .../quickstart/computing/hugegraph-vermeer.md | 219 ++++++++++++++++-- .../quickstart/computing/hugegraph-vermeer.md | 219 ++++++++++++++++-- 2 files changed, 411 insertions(+), 27 deletions(-) diff --git a/content/cn/docs/quickstart/computing/hugegraph-vermeer.md b/content/cn/docs/quickstart/computing/hugegraph-vermeer.md index e96b5046f..e739e110e 100644 --- a/content/cn/docs/quickstart/computing/hugegraph-vermeer.md +++ b/content/cn/docs/quickstart/computing/hugegraph-vermeer.md @@ -16,6 +16,142 @@ master 是负责通信、转发、汇总的节点,计算量和占用资源量 ### 1.2 运行方法 +1. **方案一:Docker Compose(推荐)** + +确保docker-compose.yaml存在于您的项目根目录中。如果没有,以下是一个示例: +```yaml +# +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +services: + vermeer-master: + image: hugegraph/vermeer + container_name: vermeer-master + volumes: + - ~/.config:/go/bin/config # Change here to your actual config path + command: --env=master + networks: + vermeer_network: + ipv4_address: 172.20.0.10 # Assign a static IP for the master + + vermeer-worker: + image: hugegraph/vermeer + container_name: vermeer-worker + volumes: + - ~/:/go/bin/config # Change here to your actual config path + command: --env=worker + networks: + vermeer_network: + ipv4_address: 172.20.0.11 # Assign a static IP for the worker + +networks: + vermeer_network: + driver: bridge + ipam: + config: + - subnet: 172.20.0.0/24 # Define the subnet for your network +``` + +修改 docker-compose.yaml +- **Volume**:例如将两处 ~/:/go/bin/config 改为 /home/user/config:/go/bin/config(或您自己的配置目录)。 +- **Subnet**:根据实际情况修改子网IP。请注意,每个容器需要访问的端口在config文件中指定,具体请参照项目`config`文件夹下内容。 + +在项目目录构建镜像并启动(或者先用 docker build 再 docker-compose up) + +```shell +# 构建镜像(在项目根 vermeer 目录) +docker build -t hugegraph/vermeer . + +# 启动(在 vermeer 根目录) +docker-compose up -d +# 或使用新版 CLI: +# docker compose up -d +``` + +查看日志 / 停止 / 删除: + +```shell +docker-compose logs -f +docker-compose down +``` + +2. **方案二:通过 docker run 单独启动(手动创建网络并分配静态 IP)** + +确保CONFIG_DIR对Docker进程具有适当的读取/执行权限。 + +构建镜像: + +```shell +docker build -t hugegraph/vermeer . +``` + +创建自定义 bridge 网络(一次性操作): + +```shell +docker network create --driver bridge \ + --subnet 172.20.0.0/24 \ + vermeer_network +``` + +运行 master(调整 CONFIG_DIR 为您的绝对配置路径,可以根据实际情况调整IP): + +```shell +CONFIG_DIR=/home/user/config + +docker run -d \ + --name vermeer-master \ + --network vermeer_network --ip 172.20.0.10 \ + -v ${CONFIG_DIR}:/go/bin/config \ + hugegraph/vermeer \ + --env=master +``` + +运行 worker: + +```shell +docker run -d \ + --name vermeer-worker \ + --network vermeer_network --ip 172.20.0.11 \ + -v ${CONFIG_DIR}:/go/bin/config \ + hugegraph/vermeer \ + --env=worker +``` + +查看日志 / 停止 / 删除: + +```shell +docker logs -f vermeer-master +docker logs -f vermeer-worker + +docker stop vermeer-master vermeer-worker +docker rm vermeer-master vermeer-worker + +# 删除自定义网络(如果需要) +docker network rm vermeer_network +``` + +3. **方案三:从源码构建** + +构建。具体请参照[Vermeer Readme](https://github.com/apache/incubator-hugegraph-computer/tree/master/vermeer)。 + +```shell +go build +``` + 在进入文件夹目录后输入 `./vermeer --env=master` 或 `./vermeer --env=worker01` ## 二、任务创建类 rest api @@ -33,7 +169,11 @@ master 是负责通信、转发、汇总的节点,计算量和占用资源量 具体参数参考 Vermeer 参数列表文档。 -request 示例: +vermeer提供三种加载方式: + +1. 从本地加载 + +**request 示例:** ```javascript POST http://localhost:8688/tasks/create @@ -41,16 +181,67 @@ POST http://localhost:8688/tasks/create "task_type": "load", "graph": "testdb", "params": { - "load.parallel": "50", - "load.type": "local", - "load.vertex_files": "{\"localhost\":\"data/twitter-2010.v_[0,99]\"}", - "load.edge_files": "{\"localhost\":\"data/twitter-2010.e_[0,99]\"}", - "load.use_out_degree": "1", - "load.use_outedge": "1" + "load.parallel": "50", + "load.type": "local", + "load.vertex_files": "{\"localhost\":\"data/twitter-2010.v_[0,99]\"}", + "load.edge_files": "{\"localhost\":\"data/twitter-2010.e_[0,99]\"}", + "load.use_out_degree": "1", + "load.use_outedge": "1" } } ``` +2. 从hugegraph加载 + +**request 示例:** + +⚠️ 安全警告:切勿在配置文件或代码中存储真实密码。请改用环境变量或安全的凭据管理系统。 + +```javascript +POST http://localhost:8688/tasks/create +{ + "task_type": "load", + "graph": "testdb", + "params": { + "load.parallel": "50", + "load.type": "hugegraph", + "load.hg_pd_peers": "[\":8686\"]", + "load.hugegraph_name": "DEFAULT/hugegraph2/g", + "load.hugegraph_username": "admin", + "load.hugegraph_password": "", + "load.use_out_degree": "1", + "load.use_outedge": "1" + } +} +``` + +3. 从hdfs加载 + +**request 示例:** + +```javascript +POST http://localhost:8688/tasks/create +{ + "task_type": "load", + "graph": "testdb", + "params": { + "load.parallel": "50", + "load.type": "hdfs", + "load.hdfs_namenode": "name_node1:9000", + "load.hdfs_conf_path": "/path/to/conf", + "load.krb_realm": "EXAMPLE.COM", + "load.krb_name": "user@EXAMPLE.COM", + "load.krb_keytab_path": "/path/to/keytab", + "load.krb_conf_path": "/path/to/krb5.conf", + "load.hdfs_use_krb": "1", + "load.vertex_files": "/data/graph/vertices", + "load.edge_files": "/data/graph/edges", + "load.use_out_degree": "1", + "load.use_outedge": "1" + } +} +``` + ### 2.3 输出计算结果 所有的 vermeer 计算任务均支持多种结果输出方式,可自定义输出方式:local、hdfs、afs 或 hugegraph,在发送请求时的 params 参数下加入对应参数,即可生效。指定 output.need_statistics 为 1 时,支持计算结果统计信息输出,结果会写在接口任务信息内。统计模式算子目前支持 "count" 和 "modularity" 。但仅针对社区发现算法适用。 @@ -66,13 +257,13 @@ POST http://localhost:8688/tasks/create "graph": "testdb", "params": { "compute.algorithm": "pagerank", - "compute.parallel":"10", - "compute.max_step":"10", - "output.type":"local", - "output.parallel":"1", - "output.file_path":"result/pagerank" - } - } + "compute.parallel": "10", + "compute.max_step": "10", + "output.type": "local", + "output.parallel": "1", + "output.file_path": "result/pagerank" + } +} ``` ## 三、支持的算法 diff --git a/content/en/docs/quickstart/computing/hugegraph-vermeer.md b/content/en/docs/quickstart/computing/hugegraph-vermeer.md index 7ea79038f..b0a96027f 100644 --- a/content/en/docs/quickstart/computing/hugegraph-vermeer.md +++ b/content/en/docs/quickstart/computing/hugegraph-vermeer.md @@ -16,6 +16,144 @@ The framework's runtime configuration can be passed via command-line parameters ### 1.2 Running Method +1. **Option 1: Docker Compose (Recommended)** + +Please ensure that `docker-compose.yaml` exists in your project root directory. If it doesn't, here is an example: + +```yaml +# +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements.  See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License.  You may obtain a copy of the License at +# +#    http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +services: +  vermeer-master: +    image: hugegraph/vermeer +    container_name: vermeer-master +    volumes: +      - ~/.config:/go/bin/config # Change here to your actual config path +    command: --env=master +    networks: +      vermeer_network: +        ipv4_address: 172.20.0.10 # Assign a static IP for the master + +  vermeer-worker: +    image: hugegraph/vermeer +    container_name: vermeer-worker +    volumes: +      - ~/:/go/bin/config # Change here to your actual config path +    command: --env=worker +    networks: +      vermeer_network: +        ipv4_address: 172.20.0.11 # Assign a static IP for the worker + +networks: +  vermeer_network: +    driver: bridge +    ipam: +      config: +        - subnet: 172.20.0.0/24 # Define the subnet for your network +``` + +Modify `docker-compose.yaml` + +- **Volume**: For example, change both instances of `~/:/go/bin/config` to `/home/user/config:/go/bin/config` (or your own configuration directory). +- **Subnet**: Modify the subnet IP based on your actual situation. Note that the ports each container needs to access are specified in the config file. Please refer to the contents of the project's `config` folder for details. + +Build the Image and Start in the Project Directory (or `docker build` first, then `docker-compose up`) + +```shell +# Build the image (in the project root vermeer directory) +docker build -t hugegraph/vermeer . + +# Start the services (in the vermeer root directory) +docker-compose up -d +# Or use the new CLI: +# docker compose up -d +``` + +View Logs / Stop / Remove + +```shell +docker-compose logs -f +docker-compose down +``` + +2. **Option 2: Start individually via `docker run` (Manually create network and assign static IP)** + +Ensure the CONFIG_DIR has proper read/execute permissions for the Docker process. + +Build the image: + +```shell +docker build -t hugegraph/vermeer . +``` + +Create a custom bridge network (one-time operation): + +```shell +docker network create --driver bridge \ + --subnet 172.20.0.0/24 \ + vermeer_network +``` + +Run master (adjust `CONFIG_DIR` to your **absolute** configuration path, and you can adjust the IP as needed based on your actual situation). + +```shell +CONFIG_DIR=/home/user/config + +docker run -d \ + --name vermeer-master \ + --network vermeer_network --ip 172.20.0.10 \ + -v ${CONFIG_DIR}:/go/bin/config \ + hugegraph/vermeer \ + --env=master +``` + +Run worker: + +```shell +docker run -d \ + --name vermeer-worker \ + --network vermeer_network --ip 172.20.0.11 \ + -v ${CONFIG_DIR}:/go/bin/config \ + hugegraph/vermeer \ + --env=worker +``` + +View logs / Stop / Remove: + +```shell +docker logs -f vermeer-master +docker logs -f vermeer-worker + +docker stop vermeer-master vermeer-worker +docker rm vermeer-master vermeer-worker + +# Remove the custom network (if needed) +docker network rm vermeer_network +``` + +3. **Option 3: Build from Source** + +Build. You can refer[Vermeer Readme](https://github.com/apache/incubator-hugegraph-computer/tree/master/vermeer). + +```shell +go build +``` + Enter the directory and input `./vermeer --env=master` or `./vermeer --env=worker01`. ## 2. Task Creation REST API @@ -33,7 +171,11 @@ Available URLs are as follows: Refer to the Vermeer parameter list document for specific parameters. -Request example: +Vermeer provides three ways to load data: + +1. Load from Local Files + +**Request Example:** ```javascript POST http://localhost:8688/tasks/create @@ -41,16 +183,67 @@ POST http://localhost:8688/tasks/create "task_type": "load", "graph": "testdb", "params": { - "load.parallel": "50", - "load.type": "local", - "load.vertex_files": "{\"localhost\":\"data/twitter-2010.v_[0,99]\"}", - "load.edge_files": "{\"localhost\":\"data/twitter-2010.e_[0,99]\"}", - "load.use_out_degree": "1", - "load.use_outedge": "1" + "load.parallel": "50", + "load.type": "local", + "load.vertex_files": "{\"localhost\":\"data/twitter-2010.v_[0,99]\"}", + "load.edge_files": "{\"localhost\":\"data/twitter-2010.e_[0,99]\"}", + "load.use_out_degree": "1", + "load.use_outedge": "1" } } ``` +2. Load from HugeGraph + +**Request Example:** + +⚠️ Security Warning: Never store real passwords in configuration files or code. Use environment variables or a secure credential management system instead. + +```javascript +POST http://localhost:8688/tasks/create +{ + "task_type": "load", + "graph": "testdb", + "params": { + "load.parallel": "50", + "load.type": "hugegraph", + "load.hg_pd_peers": "[\":8686\"]", + "load.hugegraph_name": "DEFAULT/hugegraph2/g", + "load.hugegraph_username": "admin", + "load.hugegraph_password": "", + "load.use_out_degree": "1", + "load.use_outedge": "1" + } +} +``` + +3. Load from HDFS + +**Request Example:** + +```javascript +POST http://localhost:8688/tasks/create +{ + "task_type": "load", + "graph": "testdb", + "params": { + "load.parallel": "50", + "load.type": "hdfs", + "load.hdfs_namenode": "name_node1:9000", + "load.hdfs_conf_path": "/path/to/conf", + "load.krb_realm": "EXAMPLE.COM", + "load.krb_name": "user@EXAMPLE.COM", + "load.krb_keytab_path": "/path/to/keytab", + "load.krb_conf_path": "/path/to/krb5.conf", + "load.hdfs_use_krb": "1", + "load.vertex_files": "/data/graph/vertices", + "load.edge_files": "/data/graph/edges", + "load.use_out_degree": "1", + "load.use_outedge": "1" + } +} +``` + ### 2.3 Output Computation Results All Vermeer computation tasks support multiple result output methods, which can be customized: local, hdfs, afs, or hugegraph. Add the corresponding parameters under the params parameter when sending the request to take effect. When output.need_statistics is set to 1, it supports outputting statistical information of the computation results, which will be written in the interface task information. The statistical mode operators currently support "count" and "modularity," but only for community detection algorithms. @@ -66,13 +259,13 @@ POST http://localhost:8688/tasks/create "graph": "testdb", "params": { "compute.algorithm": "pagerank", - "compute.parallel":"10", - "compute.max_step":"10", - "output.type":"local", - "output.parallel":"1", - "output.file_path":"result/pagerank" + "compute.parallel": "10", + "compute.max_step": "10", + "output.type": "local", + "output.parallel": "1", + "output.file_path": "result/pagerank" } - } +} ``` ## 3. Supported Algorithms From ea90243bc1e34971fc7c21b66875c6f021aff377 Mon Sep 17 00:00:00 2001 From: Jingkai Yang Date: Wed, 19 Nov 2025 14:15:16 +0800 Subject: [PATCH 11/19] chore: minor test (#430) --- content/cn/docs/quickstart/computing/hugegraph-vermeer.md | 3 ++- content/en/docs/quickstart/computing/hugegraph-vermeer.md | 2 +- 2 files changed, 3 insertions(+), 2 deletions(-) diff --git a/content/cn/docs/quickstart/computing/hugegraph-vermeer.md b/content/cn/docs/quickstart/computing/hugegraph-vermeer.md index e739e110e..c16eba7f0 100644 --- a/content/cn/docs/quickstart/computing/hugegraph-vermeer.md +++ b/content/cn/docs/quickstart/computing/hugegraph-vermeer.md @@ -19,6 +19,7 @@ master 是负责通信、转发、汇总的节点,计算量和占用资源量 1. **方案一:Docker Compose(推荐)** 确保docker-compose.yaml存在于您的项目根目录中。如果没有,以下是一个示例: + ```yaml # # Licensed to the Apache Software Foundation (ASF) under one or more @@ -146,7 +147,7 @@ docker network rm vermeer_network 3. **方案三:从源码构建** -构建。具体请参照[Vermeer Readme](https://github.com/apache/incubator-hugegraph-computer/tree/master/vermeer)。 +构建。具体请参照 [Vermeer Readme](https://github.com/apache/incubator-hugegraph-computer/tree/master/vermeer)。 ```shell go build diff --git a/content/en/docs/quickstart/computing/hugegraph-vermeer.md b/content/en/docs/quickstart/computing/hugegraph-vermeer.md index b0a96027f..03ca9f538 100644 --- a/content/en/docs/quickstart/computing/hugegraph-vermeer.md +++ b/content/en/docs/quickstart/computing/hugegraph-vermeer.md @@ -148,7 +148,7 @@ docker network rm vermeer_network 3. **Option 3: Build from Source** -Build. You can refer[Vermeer Readme](https://github.com/apache/incubator-hugegraph-computer/tree/master/vermeer). +Build. You can refer [Vermeer Readme](https://github.com/apache/incubator-hugegraph-computer/tree/master/vermeer). ```shell go build From e4660a64edf297f42e4f07db545d98ef2faf4074 Mon Sep 17 00:00:00 2001 From: Jingkai Yang Date: Wed, 19 Nov 2025 15:15:54 +0800 Subject: [PATCH 12/19] docs: delete licsence/add link of twitter (#431) --- .../quickstart/computing/hugegraph-vermeer.md | 19 ++----------------- .../quickstart/computing/hugegraph-vermeer.md | 19 ++----------------- 2 files changed, 4 insertions(+), 34 deletions(-) diff --git a/content/cn/docs/quickstart/computing/hugegraph-vermeer.md b/content/cn/docs/quickstart/computing/hugegraph-vermeer.md index c16eba7f0..bae1aa93a 100644 --- a/content/cn/docs/quickstart/computing/hugegraph-vermeer.md +++ b/content/cn/docs/quickstart/computing/hugegraph-vermeer.md @@ -21,23 +21,6 @@ master 是负责通信、转发、汇总的节点,计算量和占用资源量 确保docker-compose.yaml存在于您的项目根目录中。如果没有,以下是一个示例: ```yaml -# -# Licensed to the Apache Software Foundation (ASF) under one or more -# contributor license agreements. See the NOTICE file distributed with -# this work for additional information regarding copyright ownership. -# The ASF licenses this file to You under the Apache License, Version 2.0 -# (the "License"); you may not use this file except in compliance with -# the License. You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# - services: vermeer-master: image: hugegraph/vermeer @@ -174,6 +157,8 @@ vermeer提供三种加载方式: 1. 从本地加载 +可以预先获取数据集,例如 twitter-2010 数据集。获取方式:https://snap.stanford.edu/data/twitter-2010.html,第一个 twitter-2010.txt.gz 即可。 + **request 示例:** ```javascript diff --git a/content/en/docs/quickstart/computing/hugegraph-vermeer.md b/content/en/docs/quickstart/computing/hugegraph-vermeer.md index 03ca9f538..3d2aad604 100644 --- a/content/en/docs/quickstart/computing/hugegraph-vermeer.md +++ b/content/en/docs/quickstart/computing/hugegraph-vermeer.md @@ -21,23 +21,6 @@ The framework's runtime configuration can be passed via command-line parameters Please ensure that `docker-compose.yaml` exists in your project root directory. If it doesn't, here is an example: ```yaml -# -# Licensed to the Apache Software Foundation (ASF) under one or more -# contributor license agreements.  See the NOTICE file distributed with -# this work for additional information regarding copyright ownership. -# The ASF licenses this file to You under the Apache License, Version 2.0 -# (the "License"); you may not use this file except in compliance with -# the License.  You may obtain a copy of the License at -# -#    http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# - services:   vermeer-master:     image: hugegraph/vermeer @@ -175,6 +158,8 @@ Vermeer provides three ways to load data: 1. Load from Local Files +You can obtain the dataset in advance, such as the Twitter-2010 dataset. Acquisition method: https://snap.stanford.edu/data/twitter-2010.html The first Twitter-2010.text.gz is sufficient. + **Request Example:** ```javascript From 34bfab56b42974007b477c2e7f2129dcf6bb9fa0 Mon Sep 17 00:00:00 2001 From: Jingkai Yang Date: Thu, 20 Nov 2025 16:12:54 +0800 Subject: [PATCH 13/19] chore: add deepwiki notes in docs (#432) --- content/cn/docs/quickstart/computing/_index.md | 6 ++++++ content/cn/docs/quickstart/hugegraph-ai/_index.md | 6 ++++++ content/cn/docs/quickstart/hugegraph/_index.md | 6 ++++++ content/cn/docs/quickstart/toolchain/_index.md | 6 ++++++ content/en/docs/quickstart/computing/_index.md | 6 ++++++ content/en/docs/quickstart/hugegraph-ai/_index.md | 6 ++++++ content/en/docs/quickstart/hugegraph/_index.md | 6 ++++++ content/en/docs/quickstart/toolchain/_index.md | 6 ++++++ 8 files changed, 48 insertions(+) diff --git a/content/cn/docs/quickstart/computing/_index.md b/content/cn/docs/quickstart/computing/_index.md index 068e1956c..8777af8c9 100644 --- a/content/cn/docs/quickstart/computing/_index.md +++ b/content/cn/docs/quickstart/computing/_index.md @@ -3,3 +3,9 @@ title: "HugeGraph Computing (OLAP)" linkTitle: "HugeGraph Computing (OLAP)" weight: 4 --- + +## 🚀 最佳实践:优先使用 DeepWiki 智能文档 + +> 为解决静态文档可能过时的问题,我们提供了 **实时更新、内容更全面** 的 DeepWiki。它相当于一个拥有项目最新知识的专家,非常适合**所有开发者**在开始项目前阅读和咨询。 + +**👉 强烈推荐访问并对话:**[**incubator-hugegraph-computer**](https://deepwiki.com/apache/incubator-hugegraph-computer) \ No newline at end of file diff --git a/content/cn/docs/quickstart/hugegraph-ai/_index.md b/content/cn/docs/quickstart/hugegraph-ai/_index.md index ba2c97222..d5cb8adde 100644 --- a/content/cn/docs/quickstart/hugegraph-ai/_index.md +++ b/content/cn/docs/quickstart/hugegraph-ai/_index.md @@ -7,6 +7,12 @@ weight: 3 [![License](https://img.shields.io/badge/license-Apache%202-0E78BA.svg)](https://www.apache.org/licenses/LICENSE-2.0.html) [![Ask DeepWiki](https://deepwiki.com/badge.svg)](https://deepwiki.com/apache/incubator-hugegraph-ai) +## 🚀 最佳实践:优先使用 DeepWiki 智能文档 + +> 为解决静态文档可能过时的问题,我们提供了 **实时更新、内容更全面** 的 DeepWiki。它相当于一个拥有项目最新知识的专家,非常适合**所有开发者**在开始项目前阅读和咨询。 + +**👉 强烈推荐访问并对话:**[**incubator-hugegraph-ai**](https://deepwiki.com/apache/incubator-hugegraph-ai) + `hugegraph-ai` 整合了 [HugeGraph](https://github.com/apache/hugegraph) 与人工智能功能,为开发者构建 AI 驱动的图应用提供全面支持。 ## ✨ 核心功能 diff --git a/content/cn/docs/quickstart/hugegraph/_index.md b/content/cn/docs/quickstart/hugegraph/_index.md index 8d57d3f7b..f64d0adcf 100644 --- a/content/cn/docs/quickstart/hugegraph/_index.md +++ b/content/cn/docs/quickstart/hugegraph/_index.md @@ -3,3 +3,9 @@ title: "HugeGraph (OLTP)" linkTitle: "HugeGraph (OLTP)" weight: 1 --- + +## 🚀 最佳实践:优先使用 DeepWiki 智能文档 + +> 为解决静态文档可能过时的问题,我们提供了 **实时更新、内容更全面** 的 DeepWiki。它相当于一个拥有项目最新知识的专家,非常适合**所有开发者**在开始项目前阅读和咨询。 + +**👉 强烈推荐访问并对话:**[**incubator-hugegraph**](https://deepwiki.com/apache/incubator-hugegraph) \ No newline at end of file diff --git a/content/cn/docs/quickstart/toolchain/_index.md b/content/cn/docs/quickstart/toolchain/_index.md index 8033e0e72..776d935b2 100644 --- a/content/cn/docs/quickstart/toolchain/_index.md +++ b/content/cn/docs/quickstart/toolchain/_index.md @@ -5,3 +5,9 @@ weight: 2 --- > **测试指南**:如需在本地运行工具链测试,请参考 [HugeGraph 工具链本地测试指南](/cn/docs/guides/toolchain-local-test) + +## 🚀 最佳实践:优先使用 DeepWiki 智能文档 + +> 为解决静态文档可能过时的问题,我们提供了 **实时更新、内容更全面** 的 DeepWiki。它相当于一个拥有项目最新知识的专家,非常适合**所有开发者**在开始项目前阅读和咨询。 + +**👉 强烈推荐访问并对话:**[**incubator-hugegraph-toolchain**](https://deepwiki.com/apache/incubator-hugegraph-toolchain) diff --git a/content/en/docs/quickstart/computing/_index.md b/content/en/docs/quickstart/computing/_index.md index 068e1956c..5ec200bb5 100644 --- a/content/en/docs/quickstart/computing/_index.md +++ b/content/en/docs/quickstart/computing/_index.md @@ -3,3 +3,9 @@ title: "HugeGraph Computing (OLAP)" linkTitle: "HugeGraph Computing (OLAP)" weight: 4 --- + +## 🚀 Best practice: Prioritize using DeepWiki intelligent documents + +> To address the issue of outdated static documents, we provide DeepWiki with **real-time updates and more comprehensive content**. It is equivalent to an expert with the latest knowledge of the project, which is very suitable for **all developers** to read and consult before starting the project. + +**👉 Strongly recommend visiting and having a conversation with:** [**incubator-hugegraph-computer**](https://deepwiki.com/apache/incubator-hugegraph-computer) \ No newline at end of file diff --git a/content/en/docs/quickstart/hugegraph-ai/_index.md b/content/en/docs/quickstart/hugegraph-ai/_index.md index 1095042b4..4fb5e9df4 100644 --- a/content/en/docs/quickstart/hugegraph-ai/_index.md +++ b/content/en/docs/quickstart/hugegraph-ai/_index.md @@ -7,6 +7,12 @@ weight: 3 [![License](https://img.shields.io/badge/license-Apache%202-0E78BA.svg)](https://www.apache.org/licenses/LICENSE-2.0.html) [![Ask DeepWiki](https://deepwiki.com/badge.svg)](https://deepwiki.com/apache/incubator-hugegraph-ai) +## 🚀 Best practice: Prioritize using DeepWiki intelligent documents + +> To address the issue of outdated static documents, we provide DeepWiki with **real-time updates and more comprehensive content**. It is equivalent to an expert with the latest knowledge of the project, which is very suitable for **all developers** to read and consult before starting the project. + +**👉 Strongly recommend visiting and having a conversation with:** [**incubator-hugegraph-ai**](https://deepwiki.com/apache/incubator-hugegraph-ai) + `hugegraph-ai` integrates [HugeGraph](https://github.com/apache/hugegraph) with artificial intelligence capabilities, providing comprehensive support for developers to build AI-powered graph applications. ## ✨ Key Features diff --git a/content/en/docs/quickstart/hugegraph/_index.md b/content/en/docs/quickstart/hugegraph/_index.md index 8d57d3f7b..e35040e3f 100644 --- a/content/en/docs/quickstart/hugegraph/_index.md +++ b/content/en/docs/quickstart/hugegraph/_index.md @@ -3,3 +3,9 @@ title: "HugeGraph (OLTP)" linkTitle: "HugeGraph (OLTP)" weight: 1 --- + +## 🚀 Best practice: Prioritize using DeepWiki intelligent documents + +> To address the issue of outdated static documents, we provide DeepWiki with **real-time updates and more comprehensive content**. It is equivalent to an expert with the latest knowledge of the project, which is very suitable for **all developers** to read and consult before starting the project. + +**👉 Strongly recommend visiting and having a conversation with:** [**incubator-hugegraph**](https://deepwiki.com/apache/incubator-hugegraph) diff --git a/content/en/docs/quickstart/toolchain/_index.md b/content/en/docs/quickstart/toolchain/_index.md index 6c06a88cf..5c7508230 100644 --- a/content/en/docs/quickstart/toolchain/_index.md +++ b/content/en/docs/quickstart/toolchain/_index.md @@ -5,3 +5,9 @@ weight: 2 --- > **Testing Guide**: For running toolchain tests locally, please refer to [HugeGraph Toolchain Local Testing Guide](/docs/guides/toolchain-local-test) + +## 🚀 Best practice: Prioritize using DeepWiki intelligent documents + +> To address the issue of outdated static documents, we provide DeepWiki with **real-time updates and more comprehensive content**. It is equivalent to an expert with the latest knowledge of the project, which is very suitable for **all developers** to read and consult before starting the project. + +**👉 Strongly recommend visiting and having a conversation with:** [**incubator-hugegraph-toolchain**](https://deepwiki.com/apache/incubator-hugegraph-toolchain) \ No newline at end of file From 5840b8b77d5e437dbee9c82b7caa3cf2c69a2299 Mon Sep 17 00:00:00 2001 From: Peng Junzhi <78788603+Pengzna@users.noreply.github.com> Date: Wed, 26 Nov 2025 19:15:48 +0800 Subject: [PATCH 14/19] feat: add graphspace related doc (#425) * chore: update release workflow and .gitignore entries Bumped default release version to 1.7.0 and updated default GPG user in the release workflow. Added installation step for subversion on Ubuntu. Appended WARP.md to .gitignore. * refactor: unify release validation script and add local path support Deleted validate-release-in-local.sh and enhanced validate-release.sh to support both SVN and local directory validation. Added color-coded output, improved argument handling, and included Java version checks for better usability and error reporting. * refactor: revamp release validation script with enhanced checks V2 Major rewrite of validate-release.sh for Apache HugeGraph, adding modular structure, improved logging, error/warning collection, colorized output, and comprehensive validation steps for source and binary packages. New features include dependency checks, GPG key management, license compliance, file size and binary checks, version consistency, and automated server/toolchain testing. Usage instructions and help output are expanded for clarity. * refactor: enhance release validation for multi-arch and license checks V3 Updated CI workflow to support additional OS and architectures (arm64, macOS 14). Improved documentation and script usage instructions. The license header check now covers more file types and excludes generated/vendor files. Maven build commands in docs and scripts now use '-DskipTests' and '-Dcheckstyle.skip=true' for consistency. Added a detailed README for the release validation script. * refactor: improve validation script error context and reporting V4 Adds contextual error and warning reporting with step and package information, enhances license category and header checks, improves version consistency logic, and refines summary output with execution time and clearer formatting. These changes make validation results more actionable and easier to interpret, especially for multi-package and multi-step validations. * fix: add JSON to CATEGORY_X license validation The JSON license was added to the CATEGORY_X regex in the binary package validation step to ensure packages with this license are properly flagged during release validation. * introduce new version of validation * Update GPG username and regex in workflow --------- Co-authored-by: imbajin --- content/cn/docs/clients/restful-api/_index.md | 8 +- content/cn/docs/clients/restful-api/auth.md | 237 +++++-- content/cn/docs/clients/restful-api/cypher.md | 32 +- content/cn/docs/clients/restful-api/edge.md | 52 +- .../cn/docs/clients/restful-api/edgelabel.md | 31 +- content/cn/docs/clients/restful-api/graphs.md | 114 +++- .../cn/docs/clients/restful-api/graphspace.md | 281 ++++++++ .../cn/docs/clients/restful-api/gremlin.md | 26 +- .../cn/docs/clients/restful-api/indexlabel.md | 22 +- .../docs/clients/restful-api/propertykey.md | 26 +- content/cn/docs/clients/restful-api/rank.md | 31 +- .../cn/docs/clients/restful-api/rebuild.md | 22 +- content/cn/docs/clients/restful-api/schema.md | 6 +- content/cn/docs/clients/restful-api/task.md | 8 +- .../cn/docs/clients/restful-api/traverser.md | 614 +++++++++--------- .../cn/docs/clients/restful-api/variable.md | 12 +- content/cn/docs/clients/restful-api/vertex.md | 158 +++-- .../docs/clients/restful-api/vertexlabel.md | 37 +- .../quickstart/hugegraph/hugegraph-server.md | 16 +- content/en/docs/clients/restful-api/_index.md | 10 +- content/en/docs/clients/restful-api/auth.md | 300 +++++++-- content/en/docs/clients/restful-api/cypher.md | 39 +- content/en/docs/clients/restful-api/edge.md | 44 +- .../en/docs/clients/restful-api/edgelabel.md | 12 +- content/en/docs/clients/restful-api/graphs.md | 187 ++++-- .../en/docs/clients/restful-api/graphspace.md | 281 ++++++++ .../en/docs/clients/restful-api/gremlin.md | 6 +- .../en/docs/clients/restful-api/indexlabel.md | 10 +- .../docs/clients/restful-api/propertykey.md | 10 +- content/en/docs/clients/restful-api/rank.md | 4 +- .../en/docs/clients/restful-api/rebuild.md | 12 +- content/en/docs/clients/restful-api/schema.md | 4 +- content/en/docs/clients/restful-api/task.md | 8 +- .../en/docs/clients/restful-api/traverser.md | 14 +- .../en/docs/clients/restful-api/variable.md | 8 +- content/en/docs/clients/restful-api/vertex.md | 26 +- .../docs/clients/restful-api/vertexlabel.md | 12 +- .../quickstart/hugegraph/hugegraph-server.md | 25 +- 38 files changed, 1937 insertions(+), 808 deletions(-) create mode 100644 content/cn/docs/clients/restful-api/graphspace.md create mode 100644 content/en/docs/clients/restful-api/graphspace.md diff --git a/content/cn/docs/clients/restful-api/_index.md b/content/cn/docs/clients/restful-api/_index.md index bd52c92fb..afdd9d830 100644 --- a/content/cn/docs/clients/restful-api/_index.md +++ b/content/cn/docs/clients/restful-api/_index.md @@ -4,8 +4,12 @@ linkTitle: "RESTful API" weight: 1 --- -HugeGraph-Server通过HugeGraph-API基于HTTP协议为Client提供操作图的接口,主要包括元数据和 -图数据的增删改查,遍历算法,变量,图操作及其他操作。 +> ⚠️ **版本兼容性说明** +> +> - HugeGraph 1.7.0+ 引入了图空间功能,API 路径格式为:`/graphspaces/{graphspace}/graphs/{graph}` +> - HugeGraph 1.5.x 及之前版本使用旧路径:`/graphs/{graph}`, 以及创建/克隆图的 api 使用 text/plain 作为 Content-Type, 1.7.0 及之后使用 json +> - 默认图空间名称为 `DEFAULT`,可直接使用 +> - 旧版本 doc 参考:[HugeGraph 1.5.x RESTful API](https://github.com/apache/incubator-hugegraph-doc/tree/release-1.5.0) 除了下方的文档,你还可以通过 `localhost:8080/swagger-ui/index.html` 访问 `swagger-ui` 以查看 `RESTful API`。[示例可以参考此处](/cn/docs/quickstart/hugegraph/hugegraph-server#swaggerui-example) diff --git a/content/cn/docs/clients/restful-api/auth.md b/content/cn/docs/clients/restful-api/auth.md index cee6c3091..606b4e5c0 100644 --- a/content/cn/docs/clients/restful-api/auth.md +++ b/content/cn/docs/clients/restful-api/auth.md @@ -22,6 +22,7 @@ city: Beijing}) ##### 接口说明: 用户认证与权限控制接口包括 5 类:UserAPI、GroupAPI、TargetAPI、BelongAPI、AccessAPI。 +**注意**: 1.5.0 及之前,group/target 等 id 的格式类似 -69:grant,1.7.0 及之后,id 和 name 一致,如 admin [HugeGraph 1.5.x RESTful API](https://github.com/apache/incubator-hugegraph-doc/tree/release-1.5.0) ### 10.2 用户(User)API 用户接口包括:创建用户,删除用户,修改用户,和查询用户相关信息接口。 @@ -52,7 +53,7 @@ city: Beijing}) ##### Method & Url ``` -POST http://localhost:8080/graphs/hugegraph/auth/users +POST http://localhost:8080/graphspaces/DEFAULT/auth/users ``` ##### Response Status @@ -71,7 +72,7 @@ POST http://localhost:8080/graphs/hugegraph/auth/users "user_name": "boss", "user_creator": "admin", "user_phone": "182****9088", - "id": "-63:boss", + "id": "boss", "user_create": "2020-11-17 14:31:07.833" } ``` @@ -86,7 +87,7 @@ POST http://localhost:8080/graphs/hugegraph/auth/users ##### Method & Url ``` -DELETE http://localhost:8080/graphs/hugegraph/auth/users/-63:test +DELETE http://localhost:8080/graphspaces/DEFAULT/auth/users/test ``` ##### Response Status @@ -110,7 +111,7 @@ DELETE http://localhost:8080/graphs/hugegraph/auth/users/-63:test ##### Method & Url ``` -PUT http://localhost:8080/graphs/hugegraph/auth/users/-63:test +PUT http://localhost:8080/graphspaces/DEFAULT/auth/users/test ``` ##### Request Body @@ -138,7 +139,7 @@ PUT http://localhost:8080/graphs/hugegraph/auth/users/-63:test "user_name": "test", "user_creator": "admin", "user_phone": "183****9266", - "id": "-63:test", + "id": "test", "user_create": "2020-11-12 10:27:13.601" } ``` @@ -153,7 +154,7 @@ PUT http://localhost:8080/graphs/hugegraph/auth/users/-63:test ##### Method & Url ``` -GET http://localhost:8080/graphs/hugegraph/auth/users +GET http://localhost:8080/graphspaces/DEFAULT/auth/users ``` ##### Response Status @@ -172,7 +173,7 @@ GET http://localhost:8080/graphs/hugegraph/auth/users "user_update": "2020-11-11 11:41:12.254", "user_name": "admin", "user_creator": "system", - "id": "-63:admin", + "id": "admin", "user_create": "2020-11-11 11:41:12.254" } ] @@ -188,7 +189,7 @@ GET http://localhost:8080/graphs/hugegraph/auth/users ##### Method & Url ``` -GET http://localhost:8080/graphs/hugegraph/auth/users/-63:admin +GET http://localhost:8080/graphspaces/DEFAULT/auth/users/admin ``` ##### Response Status @@ -207,7 +208,7 @@ GET http://localhost:8080/graphs/hugegraph/auth/users/-63:admin "user_update": "2020-11-11 11:41:12.254", "user_name": "admin", "user_creator": "system", - "id": "-63:admin", + "id": "admin", "user_create": "2020-11-11 11:41:12.254" } ] @@ -219,7 +220,7 @@ GET http://localhost:8080/graphs/hugegraph/auth/users/-63:admin ##### Method & Url ``` -GET http://localhost:8080/graphs/hugegraph/auth/users/-63:boss/role +GET http://localhost:8080/graphspaces/DEFAULT/auth/users/boss/role ``` ##### Response Status @@ -270,7 +271,7 @@ GET http://localhost:8080/graphs/hugegraph/auth/users/-63:boss/role ##### Method & Url ``` -POST http://localhost:8080/graphs/hugegraph/auth/groups +POST http://localhost:8080/graphspaces/DEFAULT/auth/groups ``` ##### Response Status @@ -302,7 +303,7 @@ POST http://localhost:8080/graphs/hugegraph/auth/groups ##### Method & Url ``` -DELETE http://localhost:8080/graphs/hugegraph/auth/groups/-69:grant +DELETE http://localhost:8080/graphspaces/DEFAULT/auth/groups/-69:grant ``` ##### Response Status @@ -326,7 +327,7 @@ DELETE http://localhost:8080/graphs/hugegraph/auth/groups/-69:grant ##### Method & Url ``` -PUT http://localhost:8080/graphs/hugegraph/auth/groups/-69:grant +PUT http://localhost:8080/graphspaces/DEFAULT/auth/groups/-69:grant ``` ##### Request Body @@ -366,7 +367,7 @@ PUT http://localhost:8080/graphs/hugegraph/auth/groups/-69:grant ##### Method & Url ``` -GET http://localhost:8080/graphs/hugegraph/auth/groups +GET http://localhost:8080/graphspaces/DEFAULT/auth/groups ``` ##### Response Status @@ -401,7 +402,7 @@ GET http://localhost:8080/graphs/hugegraph/auth/groups ##### Method & Url ``` -GET http://localhost:8080/graphs/hugegraph/auth/groups/-69:all +GET http://localhost:8080/graphspaces/DEFAULT/auth/groups/-69:all ``` ##### Response Status @@ -464,7 +465,7 @@ target_resources 可以包括多个 target_resource,以列表的形式存储 ##### Method & Url ``` -POST http://localhost:8080/graphs/hugegraph/auth/targets +POST http://localhost:8080/graphspaces/DEFAULT/auth/targets ``` ##### Response Status @@ -504,7 +505,7 @@ POST http://localhost:8080/graphs/hugegraph/auth/targets ##### Method & Url ``` -DELETE http://localhost:8080/graphs/hugegraph/auth/targets/-77:gremlin +DELETE http://localhost:8080/graphspaces/DEFAULT/auth/targets/-77:gremlin ``` ##### Response Status @@ -529,7 +530,7 @@ DELETE http://localhost:8080/graphs/hugegraph/auth/targets/-77:gremlin ##### Method & Url ``` -PUT http://localhost:8080/graphs/hugegraph/auth/targets/-77:gremlin +PUT http://localhost:8080/graphspaces/DEFAULT/auth/targets/-77:gremlin ``` ##### Request Body @@ -583,7 +584,7 @@ PUT http://localhost:8080/graphs/hugegraph/auth/targets/-77:gremlin ##### Method & Url ``` -GET http://localhost:8080/graphs/hugegraph/auth/targets +GET http://localhost:8080/graphspaces/DEFAULT/auth/targets ``` ##### Response Status @@ -642,7 +643,7 @@ GET http://localhost:8080/graphs/hugegraph/auth/targets ##### Method & Url ``` -GET http://localhost:8080/graphs/hugegraph/auth/targets/-77:grant +GET http://localhost:8080/graphspaces/DEFAULT/auth/targets/-77:grant ``` ##### Response Status @@ -688,7 +689,7 @@ GET http://localhost:8080/graphs/hugegraph/auth/targets/-77:grant ```json { - "user": "-63:boss", + "user": "boss", "group": "-69:all" } ``` @@ -697,7 +698,7 @@ GET http://localhost:8080/graphs/hugegraph/auth/targets/-77:grant ##### Method & Url ``` -POST http://localhost:8080/graphs/hugegraph/auth/belongs +POST http://localhost:8080/graphspaces/DEFAULT/auth/belongs ``` ##### Response Status @@ -713,8 +714,8 @@ POST http://localhost:8080/graphs/hugegraph/auth/belongs "belong_create": "2020-11-11 16:19:35.422", "belong_creator": "admin", "belong_update": "2020-11-11 16:19:35.422", - "id": "S-63:boss>-82>>S-69:all", - "user": "-63:boss", + "id": "Sboss>-82>>S-69:all", + "user": "boss", "group": "-69:all" } ``` @@ -728,7 +729,7 @@ POST http://localhost:8080/graphs/hugegraph/auth/belongs ##### Method & Url ``` -DELETE http://localhost:8080/graphs/hugegraph/auth/belongs/S-63:boss>-82>>S-69:grant +DELETE http://localhost:8080/graphspaces/DEFAULT/auth/belongs/Sboss>-82>>S-69:grant ``` ##### Response Status @@ -753,7 +754,7 @@ DELETE http://localhost:8080/graphs/hugegraph/auth/belongs/S-63:boss>-82>>S-69:g ##### Method & Url ``` -PUT http://localhost:8080/graphs/hugegraph/auth/belongs/S-63:boss>-82>>S-69:grant +PUT http://localhost:8080/graphspaces/DEFAULT/auth/belongs/Sboss>-82>>S-69:grant ``` ##### Request Body @@ -778,8 +779,8 @@ PUT http://localhost:8080/graphs/hugegraph/auth/belongs/S-63:boss>-82>>S-69:gran "belong_create": "2020-11-12 10:40:21.720", "belong_creator": "admin", "belong_update": "2020-11-12 10:42:47.265", - "id": "S-63:boss>-82>>S-69:grant", - "user": "-63:boss", + "id": "Sboss>-82>>S-69:grant", + "user": "boss", "group": "-69:grant" } ``` @@ -794,7 +795,7 @@ PUT http://localhost:8080/graphs/hugegraph/auth/belongs/S-63:boss>-82>>S-69:gran ##### Method & Url ``` -GET http://localhost:8080/graphs/hugegraph/auth/belongs +GET http://localhost:8080/graphspaces/DEFAULT/auth/belongs ``` ##### Response Status @@ -812,8 +813,8 @@ GET http://localhost:8080/graphs/hugegraph/auth/belongs "belong_create": "2020-11-11 16:19:35.422", "belong_creator": "admin", "belong_update": "2020-11-11 16:19:35.422", - "id": "S-63:boss>-82>>S-69:all", - "user": "-63:boss", + "id": "Sboss>-82>>S-69:all", + "user": "boss", "group": "-69:all" } ] @@ -829,7 +830,7 @@ GET http://localhost:8080/graphs/hugegraph/auth/belongs ##### Method & Url ``` -GET http://localhost:8080/graphs/hugegraph/auth/belongs/S-63:boss>-82>>S-69:all +GET http://localhost:8080/graphspaces/DEFAULT/auth/belongs/Sboss>-82>>S-69:all ``` ##### Response Status @@ -845,8 +846,8 @@ GET http://localhost:8080/graphs/hugegraph/auth/belongs/S-63:boss>-82>>S-69:all "belong_create": "2020-11-11 16:19:35.422", "belong_creator": "admin", "belong_update": "2020-11-11 16:19:35.422", - "id": "S-63:boss>-82>>S-69:all", - "user": "-63:boss", + "id": "Sboss>-82>>S-69:all", + "user": "boss", "group": "-69:all" } ``` @@ -883,7 +884,7 @@ access_permission: ##### Method & Url ``` -POST http://localhost:8080/graphs/hugegraph/auth/accesses +POST http://localhost:8080/graphspaces/DEFAULT/auth/accesses ``` ##### Response Status @@ -916,7 +917,7 @@ POST http://localhost:8080/graphs/hugegraph/auth/accesses ##### Method & Url ``` -DELETE http://localhost:8080/graphs/hugegraph/auth/accesses/S-69:all>-88>12>S-77:all +DELETE http://localhost:8080/graphspaces/DEFAULT/auth/accesses/S-69:all>-88>12>S-77:all ``` ##### Response Status @@ -941,7 +942,7 @@ DELETE http://localhost:8080/graphs/hugegraph/auth/accesses/S-69:all>-88>12>S-77 ##### Method & Url ``` -PUT http://localhost:8080/graphs/hugegraph/auth/accesses/S-69:all>-88>12>S-77:all +PUT http://localhost:8080/graphspaces/DEFAULT/auth/accesses/S-69:all>-88>12>S-77:all ``` ##### Request Body @@ -982,7 +983,7 @@ PUT http://localhost:8080/graphs/hugegraph/auth/accesses/S-69:all>-88>12>S-77:al ##### Method & Url ``` -GET http://localhost:8080/graphs/hugegraph/auth/accesses +GET http://localhost:8080/graphspaces/DEFAULT/auth/accesses ``` ##### Response Status @@ -1018,7 +1019,7 @@ GET http://localhost:8080/graphs/hugegraph/auth/accesses ##### Method & Url ``` -GET http://localhost:8080/graphs/hugegraph/auth/accesses/S-69:all>-88>11>S-77:all +GET http://localhost:8080/graphspaces/DEFAULT/auth/accesses/S-69:all>-88>11>S-77:all ``` ##### Response Status @@ -1040,3 +1041,161 @@ GET http://localhost:8080/graphs/hugegraph/auth/accesses/S-69:all>-88>11>S-77:al "target": "-77:all" } ``` + +### 10.7 图空间管理员(Manager)API + +**重要提示**:在使用以下 API 之前,需要先创建图空间(graphspace)。请参考 [Graphspace API](../graphspace) 创建名为 `gs1` 的图空间。文档中的示例均假设已存在名为 `gs1` 的图空间 + +1. 图空间管理员 API 用于在 graphspace 维度给用户授予/回收管理员角色,并查询当前用户或其他用户在该 graphspace 下的角色信息。角色类型可取 `SPACE`、`SPACE_MEMBER`、`ADMIN` 。 + +#### 10.7.1 检查当前登录用户是否拥有某个角色 + +##### Params + +- type: 需要校验的角色类型,可选 + +##### Method & Url + +``` +GET http://localhost:8080/graphspaces/gs1/auth/managers/check?type=WRITE +``` + +##### Response Status + +```json +200 +``` + +##### Response Body + +```json +"true" +``` + +返回 `true/false` 字符串表示是否拥有对应角色。 + +#### 10.7.2 查询图空间管理员列表 + +##### Params + +- type: 角色类型,可选,按角色过滤 + +##### Method & Url + +``` +GET http://localhost:8080/graphspaces/gs1/auth/managers?type=SPACE +``` + +##### Response Status + +```json +200 +``` + +##### Response Body + +```json +{ + "managers": [ + { + "user": "admin", + "type": "SPACE", + "create_time": "2024-01-10 09:30:00" + } + ] +} +``` + +#### 10.7.3 授权/创建图空间管理员 + +- 下面在 gs1 下,将用户 boss 授权为 SPACE_MEMBER 角色 + +##### Request Body + +```json +{ + "user": "boss", + "type": "SPACE_MEMBER" +} +``` + +##### Method & Url + +``` +POST http://localhost:8080/graphspaces/gs1/auth/managers +``` + +##### Response Status + +```json +201 +``` + +##### Response Body + +```json +{ + "user": "boss", + "type": "SPACE_MEMBER", + "manager_creator": "admin", + "manager_create": "2024-01-10 09:45:12" +} +``` + +#### 10.7.4 取消图空间管理员权限 + +- 下面在 gs1 下,将用户 boss 的 SPACE_MEMBER 角色删除 + +##### Params + +- user: 需要删除的用户 Id +- type: 需要删除的角色类型 + +##### Method & Url + +``` +DELETE http://localhost:8080/graphspaces/gs1/auth/managers?user=boss&type=SPACE_MEMBER +``` + +##### Response Status + +```json +204 +``` + +##### Response Body + +```json +1 +``` + +#### 10.7.5 查询指定用户在图空间中的角色 + +##### Params + +- user: 用户 Id + +##### Method & Url + +``` +GET http://localhost:8080/graphspaces/gs1/auth/managers/role?user=boss +``` + +##### Response Status + +```json +200 +``` + +##### Response Body + +```json +{ + "roles": { + "boss": [ + "READ", + "SPACE_MEMBER" + ] + } +} +``` diff --git a/content/cn/docs/clients/restful-api/cypher.md b/content/cn/docs/clients/restful-api/cypher.md index b3efdf630..7eddf199c 100644 --- a/content/cn/docs/clients/restful-api/cypher.md +++ b/content/cn/docs/clients/restful-api/cypher.md @@ -6,23 +6,31 @@ weight: 15 ### 9.1 Cypher -#### 9.1.1 向HugeGraphServer发送Cypher语句(GET),同步执行 +#### 9.1.1 向 HugeGraphServer 发送 Cypher 语句(GET),同步执行 ##### Method & Url ```javascript -GET /graphs/{graph}/cypher?cypher={cypher} +GET /graphspaces/{graphspace}/graphs/{graph}/cypher?cypher={cypher} ``` ##### Params + +**路径参数说明:** + +- graphspace: 图空间名称 - graph: 图名称 -- cypher: cypher语句 + +**请求参数说明:** + +- cypher: cypher 语句 ##### 使用示例 ```javascript -GET http://localhost:8080/graphs/hugecypher1/cypher?cypher=match(n:person) return n.name as name order by n.name limit 1 +GET +http://localhost:8080/graphspaces/DEFAULT/graphs/hugecypher1/cypher?cypher=match(n:person) return n.name as name order by n.name limit 1 ``` ##### Response Status @@ -53,30 +61,36 @@ GET http://localhost:8080/graphs/hugecypher1/cypher?cypher=match(n:person) retur } ``` -#### 9.1.2 向HugeGraphServer发送Cypher语句(POST),同步执行 +#### 9.1.2 向 HugeGraphServer 发送 Cypher 语句(POST),同步执行 ##### Method & Url ```javascript -POST /graphs/{graph}/cypher +POST /graphspaces/{graphspace}/graphs/{graph}/cypher ``` ##### Params + +**路径参数说明:** + +- graphspace: 图空间名称 - graph: 图名称 ##### Body {cypher} -- cypher: cypher语句 + +- cypher: cypher 语句 注意: -> 不是JSON格式,是纯文本的Cypher语句 +> 不是 JSON 格式,是纯文本的 Cypher 语句 ##### 使用示例 ```javascript -POST http://localhost:8080/graphs/hugecypher1/cypher +POST +http://localhost:8080/graphspaces/DEFAULT/graphs/hugecypher1/cypher ``` ###### Request Body diff --git a/content/cn/docs/clients/restful-api/edge.md b/content/cn/docs/clients/restful-api/edge.md index e164a142b..d17242a2b 100644 --- a/content/cn/docs/clients/restful-api/edge.md +++ b/content/cn/docs/clients/restful-api/edge.md @@ -61,6 +61,7 @@ g = graph.traversal() **路径参数说明:** +- graphspace: 图空间名称 - graph:待操作的图 **请求体说明:** @@ -76,8 +77,8 @@ g = graph.traversal() ##### Method & Url -``` -POST http://localhost:8080/graphs/hugegraph/graph/edges +```bash +POST http://localhost:8080/graphspaces/DEFAULT/graphs/hugegraph/graph/edges ``` ##### Request Body @@ -126,6 +127,7 @@ POST http://localhost:8080/graphs/hugegraph/graph/edges **路径参数说明:** +- graphspace: 图空间名称 - graph:待操作的图 **请求参数说明:** @@ -138,8 +140,8 @@ POST http://localhost:8080/graphs/hugegraph/graph/edges ##### Method & Url -``` -POST http://localhost:8080/graphs/hugegraph/graph/edges/batch +```bash +POST http://localhost:8080/graphspaces/DEFAULT/graphs/hugegraph/graph/edges/batch ``` ##### Request Body @@ -192,6 +194,7 @@ POST http://localhost:8080/graphs/hugegraph/graph/edges/batch **路径参数说明:** +- graphspace: 图空间名称 - graph:待操作的图 - id:待操作的边 id @@ -205,8 +208,8 @@ POST http://localhost:8080/graphs/hugegraph/graph/edges/batch ##### Method & Url -``` -PUT http://localhost:8080/graphs/hugegraph/graph/edges/S1:marko>2>>S2:lop?action=append +```http +PUT http://localhost:8080/graphspaces/DEFAULT/graphs/hugegraph/graph/edges/S1:marko>2>>S2:lop?action=append ``` ##### Request Body @@ -251,6 +254,7 @@ PUT http://localhost:8080/graphs/hugegraph/graph/edges/S1:marko>2>>S2:lop?action **路径参数说明:** +- graphspace: 图空间名称 - graph:待操作的图 **请求体说明:** @@ -267,8 +271,8 @@ PUT http://localhost:8080/graphs/hugegraph/graph/edges/S1:marko>2>>S2:lop?action ##### Method & Url -``` -PUT http://127.0.0.1:8080/graphs/hugegraph/graph/edges/batch +```http +PUT http://127.0.0.1:8080/graphspaces/DEFAULT/graphs/hugegraph/graph/edges/batch ``` ##### Request Body @@ -355,6 +359,7 @@ PUT http://127.0.0.1:8080/graphs/hugegraph/graph/edges/batch **路径参数说明:** +- graphspace: 图空间名称 - graph:待操作的图 - id:待操作的边 id @@ -368,8 +373,8 @@ PUT http://127.0.0.1:8080/graphs/hugegraph/graph/edges/batch ##### Method & Url -``` -PUT http://localhost:8080/graphs/hugegraph/graph/edges/S1:marko>2>>S2:lop?action=eliminate +```http +PUT http://localhost:8080/graphspaces/DEFAULT/graphs/hugegraph/graph/edges/S1:marko>2>>S2:lop?action=eliminate ``` ##### Request Body @@ -408,6 +413,7 @@ PUT http://localhost:8080/graphs/hugegraph/graph/edges/S1:marko>2>>S2:lop?action **路径参数说明:** +- graphspace: 图空间名称 - graph:待操作的图 **请求参数说明:** @@ -442,8 +448,8 @@ PUT http://localhost:8080/graphs/hugegraph/graph/edges/S1:marko>2>>S2:lop?action ##### Method & Url -``` -GET http://127.0.0.1:8080/graphs/hugegraph/graph/edges?vertex_id="1:marko"&label=knows&properties={"date":"P.within(\"20160111\")"} +```http +GET http://127.0.0.1:8080/graphspaces/DEFAULT/graphs/hugegraph/graph/edges?vertex_id="1:marko"&label=knows&properties={"date":"P.within(\"20160111\")"} ``` ##### Response Status @@ -478,8 +484,8 @@ GET http://127.0.0.1:8080/graphs/hugegraph/graph/edges?vertex_id="1:marko"&label ##### Method & Url -``` -GET http://127.0.0.1:8080/graphs/hugegraph/graph/edges?page&limit=2 +```http +GET http://127.0.0.1:8080/graphspaces/DEFAULT/graphs/hugegraph/graph/edges?page&limit=2 ``` ##### Response Status @@ -530,8 +536,8 @@ GET http://127.0.0.1:8080/graphs/hugegraph/graph/edges?page&limit=2 ##### Method & Url -``` -GET http://127.0.0.1:8080/graphs/hugegraph/graph/edges?page=EoYxOm1hcmtvgggCAIQyOmxvcAAAAAAAAAAC&limit=2 +```http +GET http://127.0.0.1:8080/graphspaces/DEFAULT/graphs/hugegraph/graph/edges?page=EoYxOm1hcmtvgggCAIQyOmxvcAAAAAAAAAAC&limit=2 ``` ##### Response Status @@ -573,13 +579,14 @@ GET http://127.0.0.1:8080/graphs/hugegraph/graph/edges?page=EoYxOm1hcmtvgggCAIQy **路径参数说明:** +- graphspace: 图空间名称 - graph:待操作的图 - id:待操作的边 id ##### Method & Url -``` -GET http://localhost:8080/graphs/hugegraph/graph/edges/S1:marko>2>>S2:lop +```http +GET http://localhost:8080/graphspaces/DEFAULT/graphs/hugegraph/graph/edges/S1:marko>2>>S2:lop ``` ##### Response Status @@ -612,6 +619,7 @@ GET http://localhost:8080/graphs/hugegraph/graph/edges/S1:marko>2>>S2:lop **路径参数说明:** +- graphspace: 图空间名称 - graph:待操作的图 - id:待操作的边 id @@ -623,8 +631,8 @@ GET http://localhost:8080/graphs/hugegraph/graph/edges/S1:marko>2>>S2:lop ##### Method & Url -``` -DELETE http://localhost:8080/graphs/hugegraph/graph/edges/S1:marko>2>>S2:lop +```http +DELETE http://localhost:8080/graphspaces/DEFAULT/graphs/hugegraph/graph/edges/S1:marko>2>>S2:lop ``` ##### Response Status @@ -639,8 +647,8 @@ DELETE http://localhost:8080/graphs/hugegraph/graph/edges/S1:marko>2>>S2:lop ##### Method & Url -``` -DELETE http://localhost:8080/graphs/hugegraph/graph/edges/S1:marko>1>>S1:vadas?label=knows +```http +DELETE http://localhost:8080/graphspaces/DEFAULT/graphs/hugegraph/graph/edges/S1:marko>1>>S1:vadas?label=knows ``` ##### Response Status diff --git a/content/cn/docs/clients/restful-api/edgelabel.md b/content/cn/docs/clients/restful-api/edgelabel.md index 5e767bfd1..145992c22 100644 --- a/content/cn/docs/clients/restful-api/edgelabel.md +++ b/content/cn/docs/clients/restful-api/edgelabel.md @@ -6,26 +6,25 @@ weight: 4 ### 1.4 EdgeLabel -假设已经创建好了1.2.3中的 PropertyKeys 和 1.3.3中的 VertexLabels +假设已经创建好了 1.2.3 中的 PropertyKeys 和 1.3.3 中的 VertexLabels -Params说明 +Params 说明 - name:顶点类型名称,必填 - source_label: 源顶点类型的名称,必填 - target_label: 目标顶点类型的名称,必填 -- frequency:两个点之间是否可以有多条边,可以取值SINGLE和MULTIPLE,非必填,默认值SINGLE +- frequency:两个点之间是否可以有多条边,可以取值 SINGLE 和 MULTIPLE,非必填,默认值 SINGLE - properties: 边类型关联的属性类型,选填 - sort_keys: 当允许关联多次时,指定区分键属性列表 - nullable_keys:可为空的属性,选填,默认可为空 -- enable_label_index: 是否开启类型索引,默认关闭 +- enable_label_index:是否开启类型索引,默认关闭 - -#### 1.4.1 创建一个EdgeLabel +#### 1.4.1 创建一个 EdgeLabel ##### Method & Url ``` -POST http://localhost:8080/graphs/hugegraph/schema/edgelabels +POST http://localhost:8080/graphspaces/DEFAULT/graphs/hugegraph/schema/edgelabels ``` ##### Request Body @@ -125,7 +124,7 @@ POST http://localhost:8080/graphs/hugegraph/schema/edgelabels } ``` -#### 1.4.2 为已存在的EdgeLabel添加properties或userdata,或者移除userdata(目前不支持移除properties) +#### 1.4.2 为已存在的 EdgeLabel 添加 properties 或 userdata,或者移除 userdata(目前不支持移除 properties) ##### Params @@ -134,7 +133,7 @@ POST http://localhost:8080/graphs/hugegraph/schema/edgelabels ##### Method & Url ``` -PUT http://localhost:8080/graphs/hugegraph/schema/edgelabels/created?action=append +PUT http://localhost:8080/graphspaces/DEFAULT/graphs/hugegraph/schema/edgelabels/created?action=append ``` ##### Request Body @@ -182,12 +181,12 @@ PUT http://localhost:8080/graphs/hugegraph/schema/edgelabels/created?action=appe } ``` -#### 1.4.3 获取所有的EdgeLabel +#### 1.4.3 获取所有的 EdgeLabel ##### Method & Url ``` -GET http://localhost:8080/graphs/hugegraph/schema/edgelabels +GET http://localhost:8080/graphspaces/DEFAULT/graphs/hugegraph/schema/edgelabels ``` ##### Response Status @@ -244,12 +243,12 @@ GET http://localhost:8080/graphs/hugegraph/schema/edgelabels } ``` -#### 1.4.4 根据name获取EdgeLabel +#### 1.4.4 根据 name 获取 EdgeLabel ##### Method & Url ``` -GET http://localhost:8080/graphs/hugegraph/schema/edgelabels/created +GET http://localhost:8080/graphspaces/DEFAULT/graphs/hugegraph/schema/edgelabels/created ``` ##### Response Status @@ -285,14 +284,14 @@ GET http://localhost:8080/graphs/hugegraph/schema/edgelabels/created } ``` -#### 1.4.5 根据name删除EdgeLabel +#### 1.4.5 根据 name 删除 EdgeLabel 删除 EdgeLabel 会导致删除对应的边以及相关的索引数据,会产生一个异步任务 ##### Method & Url ``` -DELETE http://localhost:8080/graphs/hugegraph/schema/edgelabels/created +DELETE http://localhost:8080/graphspaces/DEFAULT/graphs/hugegraph/schema/edgelabels/created ``` ##### Response Status @@ -311,4 +310,4 @@ DELETE http://localhost:8080/graphs/hugegraph/schema/edgelabels/created 注: -> 可以通过`GET http://localhost:8080/graphs/hugegraph/tasks/1`(其中"1"是task_id)来查询异步任务的执行状态,更多[异步任务RESTful API](../task) \ No newline at end of file +> 可以通过`GET http://localhost:8080/graphspaces/DEFAULT/graphs/hugegraph/tasks/1`(其中"1"是 task_id)来查询异步任务的执行状态,更多[异步任务 RESTful API](../task) diff --git a/content/cn/docs/clients/restful-api/graphs.md b/content/cn/docs/clients/restful-api/graphs.md index 9e1b8a8d7..5229a9432 100644 --- a/content/cn/docs/clients/restful-api/graphs.md +++ b/content/cn/docs/clients/restful-api/graphs.md @@ -6,12 +6,20 @@ weight: 12 ### 6.1 Graphs -#### 6.1.1 列出数据库中全部的图 +**重要提醒**:1.7.0 及之后,动态创建图必须开启鉴权模式。非鉴权模式请参考[图配置文件](https://hugegraph.apache.org/cn/docs/config/config-guide/#4-hugegraphproperties),通过配置文件静态创建图。 + +#### 6.1.1 列出图空间中全部的图 + +##### Params + +**路径参数说明:** + +- graphspace: 图空间名称 ##### Method & Url ``` -GET http://localhost:8080/graphs +GET http://localhost:8080/graphspaces/DEFAULT/graphs ``` ##### Response Status @@ -33,10 +41,17 @@ GET http://localhost:8080/graphs #### 6.1.2 查看某个图的信息 +##### Params + +**路径参数说明:** + +- graphspace: 图空间名称 +- graph: 图名称 + ##### Method & Url ``` -GET http://localhost:8080/graphs/hugegraph +GET http://localhost:8080/graphspaces/DEFAULT/graphs/hugegraph ``` ##### Response Status @@ -58,6 +73,13 @@ GET http://localhost:8080/graphs/hugegraph ##### Params +**路径参数说明:** + +- graphspace: 图空间名称 +- graph: 图名称 + +**请求参数说明:** + 由于清空图是一个比较危险的操作,为避免用户误调用,我们给 API 添加了用于确认的参数: - confirm_message: 默认为`I'm sure to delete all data` @@ -65,7 +87,7 @@ GET http://localhost:8080/graphs/hugegraph ##### Method & Url ``` -DELETE http://localhost:8080/graphs/hugegraph/clear?confirm_message=I%27m+sure+to+delete+all+data +DELETE http://localhost:8080/graphspaces/DEFAULT/graphs/hugegraph/clear?confirm_message=I%27m+sure+to+delete+all+data ``` ##### Response Status @@ -78,25 +100,34 @@ DELETE http://localhost:8080/graphs/hugegraph/clear?confirm_message=I%27m+sure+t ##### Params +**路径参数说明:** + +- graphspace: 图空间名称 +- graph: 要创建的新图名称 + +**请求参数说明:** + - clone_graph_name: 已有图的名称;从已有的图来克隆,用户可选择传递配置文件,传递时将替换已有图中的配置; ##### Method & Url ``` -POST http://localhost:8080/graphs/hugegraph_clone?clone_graph_name=hugegraph +POST http://localhost:8080/graphspaces/DEFAULT/graphs/hugegraph_clone?clone_graph_name=hugegraph ``` ##### Request Body (可选) -克隆 (fork) 一个无权限的新图 (body 类型必须设置为 `Context-Type=text/plain`) +克隆一个非鉴权模式的图(设置 `Content-Type: application/json`) -```properties -gremlin.graph=org.apache.hugegraph.HugeFactory -backend=rocksdb -serializer=binary -store=hugegraph_clone -rocksdb.data_path=./rks-data-xx -rocksdb.wal_path=./rks-data-xx +```json +{ + "gremlin.graph": "org.apache.hugegraph.HugeFactory", + "backend": "rocksdb", + "serializer": "binary", + "store": "hugegraph_clone", + "rocksdb.data_path": "./rks-data-xx", + "rocksdb.wal_path": "./rks-data-xx" +} ``` > Note: @@ -120,23 +151,32 @@ rocksdb.wal_path=./rks-data-xx #### 6.1.5 创建一个图,**该操作需要管理员权限** +##### Params + +**路径参数说明:** + +- graphspace: 图空间名称 +- graph: 图名称 + ##### Method & Url ``` -POST http://localhost:8080/graphs/hugegraph-xx +POST http://localhost:8080/graphspaces/DEFAULT/graphs/hugegraph-xx ``` ##### Request Body -新建一个无权限的新图 (body 类型必须设置为 `Context-Type=text/plain`) +创建一个非鉴权模式的图(设置 `Content-Type: application/json`) -```properties -gremlin.graph=org.apache.hugegraph.HugeFactory -backend=rocksdb -serializer=binary -store=hugegraph2 -rocksdb.data_path=./rks-data-xx -rocksdb.wal_path=./rks-data-xx +```json +{ + "gremlin.graph": "org.apache.hugegraph.HugeFactory", + "backend": "rocksdb", + "serializer": "binary", + "store": "hugegraph", + "rocksdb.data_path": "./rks-data-xx", + "rocksdb.wal_path": "./rks-data-xx" +} ``` > Note: @@ -153,7 +193,7 @@ rocksdb.wal_path=./rks-data-xx ```javascript { - "name": "hugegraph2", + "name":"hugegraph2", "backend": "rocksdb" } ``` @@ -162,6 +202,13 @@ rocksdb.wal_path=./rks-data-xx ##### Params +**路径参数说明:** + +- graphspace: 图空间名称 +- graph: 图名称 + +**请求参数说明:** + 由于删除图是一个比较危险的操作,为避免用户误调用,我们给 API 添加了用于确认的参数: - confirm_message: 默认为`I'm sure to drop the graph` @@ -169,7 +216,7 @@ rocksdb.wal_path=./rks-data-xx ##### Method & Url ```javascript -DELETE http://localhost:8080/graphs/hugegraph_clone?confirm_message=I%27m%20sure%20to%20drop%20the%20graph +DELETE http://localhost:8080/graphspaces/DEFAULT/graphs/hugegraph_clone?confirm_message=I%27m%20sure%20to%20drop%20the%20graph ``` ##### Response Status @@ -178,6 +225,8 @@ DELETE http://localhost:8080/graphs/hugegraph_clone?confirm_message=I%27m%20sure 204 ``` +> 注意:对于 HugeGraph 1.5.0 及之前版本,如需创建或删除图,请继续使用旧的 `text/plain`(properties)格式请求体,而不是 JSON。 + ### 6.2 Conf #### 6.2.1 查看某个图的配置,**该操作需要管理员权限** @@ -185,7 +234,8 @@ DELETE http://localhost:8080/graphs/hugegraph_clone?confirm_message=I%27m%20sure ##### Method & Url ```javascript -GET http://localhost:8080/graphs/hugegraph/conf +GET +http://localhost:8080/graphspaces/DEFAULT/graphs/hugegraph/conf ``` ##### Response Status @@ -243,7 +293,7 @@ Restore 时存在两种不同的模式:Restoring 和 Merging ##### Method & Url ``` -GET http://localhost:8080/graphs/hugegraph/mode +GET http://localhost:8080/graphspaces/DEFAULT/graphs/hugegraph/mode ``` ##### Response Status @@ -267,7 +317,7 @@ GET http://localhost:8080/graphs/hugegraph/mode ##### Method & Url ``` -PUT http://localhost:8080/graphs/hugegraph/mode +PUT http://localhost:8080/graphspaces/DEFAULT/graphs/hugegraph/mode ``` ##### Request Body @@ -301,7 +351,7 @@ PUT http://localhost:8080/graphs/hugegraph/mode ##### Method & Url ``` -GET http://localhost:8080/graphs/hugegraph/graph_read_mode +GET http://localhost:8080/graphspaces/DEFAULT/graphs/hugegraph/graph_read_mode ``` ##### Response Status @@ -327,7 +377,7 @@ GET http://localhost:8080/graphs/hugegraph/graph_read_mode ##### Method & Url ``` -PUT http://localhost:8080/graphs/hugegraph/graph_read_mode +PUT http://localhost:8080/graphspaces/DEFAULT/graphs/hugegraph/graph_read_mode ``` ##### Request Body @@ -363,7 +413,7 @@ PUT http://localhost:8080/graphs/hugegraph/graph_read_mode ##### Method & Url ``` -PUT http://localhost:8080/graphs/hugegraph/snapshot_create +PUT http://localhost:8080/graphspaces/DEFAULT/graphs/hugegraph/snapshot_create ``` ##### Response Status @@ -389,7 +439,7 @@ PUT http://localhost:8080/graphs/hugegraph/snapshot_create ##### Method & Url ``` -PUT http://localhost:8080/graphs/hugegraph/snapshot_resume +PUT http://localhost:8080/graphspaces/DEFAULT/graphs/hugegraph/snapshot_resume ``` ##### Response Status @@ -417,7 +467,7 @@ PUT http://localhost:8080/graphs/hugegraph/snapshot_resume ##### Method & Url ``` -PUT http://localhost:8080/graphs/hugegraph/compact +PUT http://localhost:8080/graphspaces/DEFAULT/graphs/hugegraph/compact ``` ##### Response Status diff --git a/content/cn/docs/clients/restful-api/graphspace.md b/content/cn/docs/clients/restful-api/graphspace.md new file mode 100644 index 000000000..62231b3be --- /dev/null +++ b/content/cn/docs/clients/restful-api/graphspace.md @@ -0,0 +1,281 @@ +--- +title: "Graphspace API" +linkTitle: "Graphspace" +weight: 1 +description: "Graphspace(图空间)REST 接口:多租户与资源隔离的创建、查看、更新与删除,以及使用前置条件与限制。" +--- + +### 2.0 Graphspace + +在 HugeGraph 中,多租户是通过图空间(graph space)来实现的,资源的分配和隔离可以通过图空间进行。 + +**重要前置条件**: + +1. 目前图空间功能只支持在 hstore 模式下使用。 +2. 如果非 hstore 模式,则只能使用默认的图空间 `DEFAULT`,且不支持创建、删除和更新图空间的操作。 +3. 注意在 rest-server.properties 中,设置 `usePD=true`,并且 hugegraph.properties 中,设置 `backend=hstore` +4. 图空间功能必须开启鉴权模式,默认账密为 admin:pa,请务必修改默认密码,防止未授权访问。 + +#### 2.0.1 创建一个图空间 + +##### Method & Url + +``` +POST http://localhost:8080/graphspaces +``` + +##### Request Body + +注意:目前 cpu,内存,以及 k8s 相关功能暂未开放 + +| 名称 | 是否必填 | 类型 | 默认值 | 取值范围 | 说明 | +|------------------------------|------|---------|-------|-----------------------------------|-----------------------------------------------------------------------| +| name | 是 | String | | 小写字母、数字和下划线组成,首字符必须是小写字母,长度不超过 48 | 图空间的名字 | +| description | 是 | String | | | 图空间的描述信息 | +| cpu_limit | 是 | Int | | > 0 | CPU 核数 | +| memory_limit | 是 | Int | | > 0 | 内存大小,单位 GB | +| storage_limit | 是 | Int | | > 0 | 图空间的数据占据的磁盘空间上限 | +| compute_cpu_limit | 否 | Int | 0 | >= 0 | 针对图计算的额外资源配置,单位 cores。当该字段不配置或者配置为 0 时,会由 cpu_limit 字段的值进行覆盖 | +| compute_memory_limit | 否 | Int | 0 | >= 0 | 针对图计算的额外内存配置,单位 GB。当该字段不配置或者配置为 0 时,会由 memory_limit 字段的值进行覆盖 | +| oltp_namespace | 是 | String | | | OLTP 的 k8s 命名空间 | +| olap_namespace | 是 | String | | | OLAP 的 k8s 命名空间。当 olap_namespace 和 oltp_namespace 的值相同时,其配置的资源限额会进行合并 | +| storage_namespace | 是 | String | | | 存储的 k8s 命名空间 | +| operator_image_path | 否 | String | | | 图计算 operator 的镜像地址:在创建图空间时,允许指定对应的图计算镜像并交由 K8S 进行统一管理 | +| internal_algorithm_image_url | 否 | String | | | 图计算的算法镜像地址:在创建图空间时,允许指定图计算的算法镜像并交由 K8S 进行统一管理 | +| max_graph_number | 是 | Int | | > 0 | 图空间的图数目的上限 | +| max_role_number | 是 | Int | | > 0 | 图空间的角色数目的上限 | +| auth | 否 | Boolean | false | true, false | 图空间是否支持权限认证 | +| configs | 否 | Map | | | 其他配置信息 | + +```json +{ + "name": "gs1", + "description": "1st graph space", + "max_graph_number": 100, + "cpu_limit": 1000, + "memory_limit": 8192, + "storage_limit": 1000000, + "max_role_number": 10, + "auth": true, + "configs": {} +} +``` + +##### Response Status + +```json +201 +``` + +##### Response Body + +```json +{ + "name": "gs1", + "description": "1st graph space", + "cpu_limit": 1000, + "memory_limit": 8192, + "storage_limit": 1000000, + "compute_cpu_limit": 0, + "compute_memory_limit": 0, + "oltp_namespace": "hugegraph-server", + "olap_namespace": "hugegraph-server", + "storage_namespace": "hugegraph-server", + "operator_image_path": "127.0.0.1/hugegraph-registry/hugegraph-computer-operator:3.1.1", + "internal_algorithm_image_url": "127.0.0.1/hugegraph-registry/hugegraph-computer-algorithm:3.1.1", + "max_graph_number": 100, + "max_role_number": 10, + "cpu_used": 0, + "memory_used": 0, + "storage_used": 0, + "graph_number_used": 0, + "role_number_used": 0, + "auth": true +} +``` + +#### 2.0.2 列出系统所有图空间 + +##### Method & Url + +``` +GET http://localhost:8080/graphspaces +``` + +##### Response Status + +```json +200 +``` + +##### Response Body + +```json +{ + "graphSpaces": [ + "gs1", + "DEFAULT" + ] +} +``` + +#### 2.0.3 查看某个图空间 + +##### Params + +**路径参数说明:** + +- graphspace: 图空间名称 + +##### Method & Url + +``` +GET http://localhost:8080/graphspaces/gs1 +``` + +##### Response Status + +```json +200 +``` + +##### Response Body + +```json +{ + "name": "gs1", + "description": "1st graph space", + "cpu_limit": 1000, + "memory_limit": 8192, + "storage_limit": 1000000, + "oltp_namespace": "hugegraph-server", + "olap_namespace": "hugegraph-server", + "storage_namespace": "hugegraph-server", + "operator_image_path": "127.0.0.1/hugegraph-registry/hugegraph-computer-operator:3.1.1", + "internal_algorithm_image_url": "127.0.0.1/hugegraph-registry/hugegraph-computer-algorithm:3.1.1", + "compute_cpu_limit": 0, + "compute_memory_limit": 0, + "max_graph_number": 100, + "max_role_number": 10, + "cpu_used": 0, + "memory_used": 0, + "storage_used": 0, + "graph_number_used": 0, + "role_number_used": 0, + "auth": true +} +``` + +#### 2.0.4 更新某个图空间 + +> 注意:auth 鉴权配置,在创建图空间的过程一旦确定下来,不允许更新 + +##### Params + +**路径参数说明:** + +- graphspace: 图空间名称 + +**请求体说明:** + +- action: 标记本次操作为 Update 动作,取值固定为 "update" +- update: 即将更新的值,下述参数都应置于 update 中 + +| 名称 | 是否必填 | 类型 | 默认值 | 取值范围 | 说明 | +|------------------------------|------|--------|-----|------|-----------------------------------------------------------------------| +| name | 是 | String | | | 图空间名称 | +| description | 是 | String | | | 图空间的描述信息 | +| cpu_limit | 是 | Int | | > 0 | OLTP HugeGraphServer 的 CPU 核数 | +| memory_limit | 是 | Int | | > 0 | OLTP HugeGraphServer 的内存大小,单位 GB | +| storage_limit | 是 | Int | | > 0 | 图空间的数据占据的磁盘空间上限 | +| compute_cpu_limit | 否 | Int | 0 | >= 0 | 针对图计算的额外资源配置,单位 cores。当该字段不配置或者配置为 0 时,会由 cpu_limit 字段的值进行覆盖 | +| compute_memory_limit | 否 | Int | 0 | >= 0 | 针对图计算的额外内存配置,单位 GB。当该字段不配置或者配置为 0 时,会由 memory_limit 字段的值进行覆盖 | +| oltp_namespace | 是 | String | | | OLTP 的 k8s 命名空间 | +| olap_namespace | 是 | String | | | OLAP 的 k8s 命名空间。当 olap_namespace 和 oltp_namespace 的值相同时,其配置的资源限额会进行合并 | +| storage_namespace | 是 | String | | | 存储的 k8s 命名空间 | +| operator_image_path | 否 | String | | | 图计算 operator 的镜像地址:在更新图空间时,允许指定对应的图计算镜像并交由 K8S 进行统一管理 | +| internal_algorithm_image_url | 否 | String | | | 图计算的算法镜像地址:在更新图空间时,允许指定图计算的算法镜像并交由 K8S 进行统一管理 | +| max_graph_number | 是 | Int | | > 0 | 图空间的图数目的上限 | +| max_role_number | 是 | Int | | > 0 | 图空间的角色数目的上限 | + +##### Method & Url + +``` +PUT http://localhost:8080/graphspaces/gs1 +``` + +##### Request Body + +```json +{ + "action": "update", + "update": { + "name": "gs1", + "description": "1st graph space", + "cpu_limit": 2000, + "memory_limit": 40960, + "storage_limit": 2048, + "oltp_namespace": "hugegraph-server", + "olap_namespace": "hugegraph-server", + "operator_image_path": "127.0.0.1/hugegraph-registry/hugegraph-computer-operator:3.1.1", + "internal_algorithm_image_url": "127.0.0.1/hugegraph-registry/hugegraph-computer-algorithm:3.1.1", + "max_graph_number": 1000, + "max_role_number": 100 + } +} +``` + +##### Response Status + +```json +200 +``` + +##### Response Body + +```json +{ + "name": "gs1", + "description": "1st graph space", + "cpu_limit": 2000, + "memory_limit": 40960, + "storage_limit": 2048, + "oltp_namespace": "hugegraph-server", + "olap_namespace": "hugegraph-server", + "storage_namespace": "hugegraph-server", + "operator_image_path": "127.0.0.1/hugegraph-registry/hugegraph-computer-operator:3.1.1", + "internal_algorithm_image_url": "127.0.0.1/hugegraph-registry/hugegraph-computer-algorithm:3.1.1", + "compute_cpu_limit": 0, + "compute_memory_limit": 0, + "max_graph_number": 1000, + "max_role_number": 100, + "cpu_used": 0, + "memory_used": 0, + "storage_used": 0, + "graph_number_used": 0, + "role_number_used": 0, + "auth": true +} +``` + +#### 2.0.5 删除某个图空间 + +##### Params + +**路径参数说明:** + +- graphspace: 图空间名称 + +##### Method & Url + +``` +DELETE http://localhost:8080/graphspaces/gs1 +``` + +##### Response Status + +```json +204 +``` + +> 注意:删除图空间,会导致图空间的全部资源被释放。 diff --git a/content/cn/docs/clients/restful-api/gremlin.md b/content/cn/docs/clients/restful-api/gremlin.md index f72a2efc5..d2affc3ae 100644 --- a/content/cn/docs/clients/restful-api/gremlin.md +++ b/content/cn/docs/clients/restful-api/gremlin.md @@ -6,12 +6,12 @@ weight: 14 ### 8.1 Gremlin -#### 8.1.1 向HugeGraphServer发送gremlin语句(GET),同步执行 +#### 8.1.1 向 HugeGraphServer 发送 gremlin 语句(GET),同步执行 ##### Params - gremlin: 要发送给`HugeGraphServer`执行的`gremlin`语句 -- bindings: 用来绑定参数,key是字符串,value是绑定的值(只能是字符串或者数字),功能类似于MySQL的 Prepared Statement,用于加速语句执行 +- bindings: 用来绑定参数,key 是字符串,value 是绑定的值(只能是字符串或者数字),功能类似于 MySQL 的 Prepared Statement,用于加速语句执行 - language: 发送语句的语言类型,默认为`gremlin-groovy` - aliases: 为存在于图空间的已有变量添加别名 @@ -64,7 +64,7 @@ GET http://127.0.0.1:8080/gremlin?gremlin=hugegraph.traversal().V('1:marko') } ``` -#### 8.1.2 向HugeGraphServer发送gremlin语句(POST),同步执行 +#### 8.1.2 向 HugeGraphServer 发送 gremlin 语句(POST),同步执行 ##### Method & Url @@ -129,11 +129,11 @@ POST http://localhost:8080/gremlin 注意: > 这里是直接使用图对象(hugegraph),先获取其遍历器(traversal()),再获取顶点。 -不能直接写成`graph.traversal().V()`或`g.V()`,可以通过`"aliases": {"graph": "hugegraph", "g": "__g_hugegraph"}` -为图和遍历器添加别名后使用别名操作。其中,`hugegraph`是原生存在的变量,`__g_hugegraph`是`HugeGraphServer`额外添加的变量, -每个图都会存在一个对应的这样格式(__g_${graph})的遍历器对象。 +> 不能直接写成`graph.traversal().V()`或`g.V()`,可以通过`"aliases": {"graph": "hugegraph", "g": "__g_hugegraph"}` +> 为图和遍历器添加别名后使用别名操作。其中,`hugegraph`是原生存在的变量,`__g_hugegraph`是`HugeGraphServer`额外添加的变量, +> 每个图都会存在一个对应的这样格式(__g_${graph})的遍历器对象。 -> 响应体的结构与其他 Vertex 或 Edge 的 RESTful API的结构有区别,用户可能需要自行解析。 +> 响应体的结构与其他 Vertex 或 Edge 的 RESTful API 的结构有区别,用户可能需要自行解析。 **查询边** @@ -186,12 +186,12 @@ POST http://localhost:8080/gremlin } ``` -#### 8.1.3 向HugeGraphServer发送gremlin语句(POST),异步执行 +#### 8.1.3 向 HugeGraphServer 发送 gremlin 语句(POST),异步执行 ##### Method & Url ``` -POST http://localhost:8080/graphs/hugegraph/jobs/gremlin +POST http://localhost:8080/graphspaces/DEFAULT/graphs/hugegraph/jobs/gremlin ``` **查询顶点** @@ -209,8 +209,8 @@ POST http://localhost:8080/graphs/hugegraph/jobs/gremlin 注意: -> 异步执行Gremlin语句暂不支持aliases,可以使用 `graph` 代表要操作的图,也可以直接使用图的名字, 例如 `hugegraph`; -另外`g`代表 traversal,等价于 `graph.traversal()` 或者 `hugegraph.traversal()` +> 异步执行 Gremlin 语句暂不支持 aliases,可以使用 `graph` 代表要操作的图,也可以直接使用图的名字,例如 `hugegraph`; +> 另外`g`代表 traversal,等价于 `graph.traversal()` 或者 `hugegraph.traversal()` ##### Response Status @@ -228,7 +228,7 @@ POST http://localhost:8080/graphs/hugegraph/jobs/gremlin 注: -> 可以通过`GET http://localhost:8080/graphs/hugegraph/tasks/1`(其中"1"是task_id)来查询异步任务的执行状态,更多[异步任务RESTful API](../task) +> 可以通过`GET http://localhost:8080/graphspaces/DEFAULT/graphs/hugegraph/tasks/1`(其中"1"是 task_id)来查询异步任务的执行状态,更多[异步任务 RESTful API](../task) **查询边** @@ -259,4 +259,4 @@ POST http://localhost:8080/graphs/hugegraph/jobs/gremlin 注: -> 可以通过`GET http://localhost:8080/graphs/hugegraph/tasks/2`(其中"2"是task_id)来查询异步任务的执行状态,更多[异步任务RESTful API](../task) \ No newline at end of file +> 可以通过`GET http://localhost:8080/graphspaces/DEFAULT/graphs/hugegraph/tasks/2`(其中"2"是 task_id)来查询异步任务的执行状态,更多[异步任务 RESTful API](../task) diff --git a/content/cn/docs/clients/restful-api/indexlabel.md b/content/cn/docs/clients/restful-api/indexlabel.md index 51904be52..efddfbbfb 100644 --- a/content/cn/docs/clients/restful-api/indexlabel.md +++ b/content/cn/docs/clients/restful-api/indexlabel.md @@ -6,14 +6,14 @@ weight: 5 ### 1.5 IndexLabel -假设已经创建好了1.1.3中的 PropertyKeys 、1.2.3中的 VertexLabels 以及 1.3.3中的 EdgeLabels +假设已经创建好了 1.1.3 中的 PropertyKeys、1.2.3 中的 VertexLabels 以及 1.3.3 中的 EdgeLabels -#### 1.5.1 创建一个IndexLabel +#### 1.5.1 创建一个 IndexLabel ##### Method & Url -``` -POST http://localhost:8080/graphs/hugegraph/schema/indexlabels +```bash +POST http://localhost:8080/graphspaces/DEFAULT/graphs/hugegraph/schema/indexlabels ``` ##### Request Body @@ -54,12 +54,12 @@ POST http://localhost:8080/graphs/hugegraph/schema/indexlabels } ``` -#### 1.5.2 获取所有的IndexLabel +#### 1.5.2 获取所有的 IndexLabel ##### Method & Url ``` -GET http://localhost:8080/graphs/hugegraph/schema/indexlabels +GET http://localhost:8080/graphspaces/DEFAULT/graphs/hugegraph/schema/indexlabels ``` ##### Response Status @@ -118,12 +118,12 @@ GET http://localhost:8080/graphs/hugegraph/schema/indexlabels } ``` -#### 1.5.3 根据name获取IndexLabel +#### 1.5.3 根据 name 获取 IndexLabel ##### Method & Url ``` -GET http://localhost:8080/graphs/hugegraph/schema/indexlabels/personByCity +GET http://localhost:8080/graphspaces/DEFAULT/graphs/hugegraph/schema/indexlabels/personByCity ``` ##### Response Status @@ -147,14 +147,14 @@ GET http://localhost:8080/graphs/hugegraph/schema/indexlabels/personByCity } ``` -#### 1.5.4 根据name删除IndexLabel +#### 1.5.4 根据 name 删除 IndexLabel 删除 IndexLabel 会导致删除相关的索引数据,会产生一个异步任务 ##### Method & Url ``` -DELETE http://localhost:8080/graphs/hugegraph/schema/indexlabels/personByCity +DELETE http://localhost:8080/graphspaces/DEFAULT/graphs/hugegraph/schema/indexlabels/personByCity ``` ##### Response Status @@ -173,4 +173,4 @@ DELETE http://localhost:8080/graphs/hugegraph/schema/indexlabels/personByCity 注: -> 可以通过`GET http://localhost:8080/graphs/hugegraph/tasks/1`(其中"1"是task_id)来查询异步任务的执行状态,更多[异步任务RESTful API](../task) \ No newline at end of file +> 可以通过`GET http://localhost:8080/graphspaces/DEFAULT/graphs/hugegraph/tasks/1`(其中"1"是 task_id)来查询异步任务的执行状态,更多[异步任务 RESTful API](../task) diff --git a/content/cn/docs/clients/restful-api/propertykey.md b/content/cn/docs/clients/restful-api/propertykey.md index be17d5ec2..0f008f8b8 100644 --- a/content/cn/docs/clients/restful-api/propertykey.md +++ b/content/cn/docs/clients/restful-api/propertykey.md @@ -6,7 +6,7 @@ weight: 2 ### 1.2 PropertyKey -Params说明: +Params 说明: - name:属性类型名称,必填 - data_type:属性类型数据类型,包括:bool、byte、int、long、float、double、text、date、uuid、blob,默认 `text` 类型 (代表 string 字符串类型) @@ -14,17 +14,17 @@ Params说明: 请求体字段说明: -- id:属性类型id值 +- id:属性类型 id 值 - properties:属性的属性,对于属性而言,此项为空 -- user_data:设置属性类型的通用信息,比如可设置age属性的取值范围,最小为0,最大为100;目前此项不做任何校验,只为后期拓展提供预留入口 +- user_data:设置属性类型的通用信息,比如可设置 age 属性的取值范围,最小为 0,最大为 100;目前此项不做任何校验,只为后期拓展提供预留入口 #### 1.2.1 创建一个 PropertyKey ##### Method & Url -``` -POST http://localhost:8080/graphs/hugegraph/schema/propertykeys +```http request +POST http://localhost:8080/graphspaces/DEFAULT/graphs/hugegraph/schema/propertykeys ``` ##### Request Body @@ -72,8 +72,8 @@ POST http://localhost:8080/graphs/hugegraph/schema/propertykeys ##### Method & Url -``` -PUT http://localhost:8080/graphs/hugegraph/schema/propertykeys/age?action=append +```http request +PUT http://localhost:8080/graphspaces/DEFAULT/graphs/hugegraph/schema/propertykeys/age?action=append ``` ##### Request Body @@ -122,7 +122,7 @@ PUT http://localhost:8080/graphs/hugegraph/schema/propertykeys/age?action=append ##### Method & Url ``` -GET http://localhost:8080/graphs/hugegraph/schema/propertykeys +GET http://localhost:8080/graphspaces/DEFAULT/graphs/hugegraph/schema/propertykeys ``` ##### Response Status @@ -196,12 +196,12 @@ GET http://localhost:8080/graphs/hugegraph/schema/propertykeys } ``` -#### 1.2.4 根据name获取PropertyKey +#### 1.2.4 根据 name 获取 PropertyKey ##### Method & Url -``` -GET http://localhost:8080/graphs/hugegraph/schema/propertykeys/age +```http request +GET http://localhost:8080/graphspaces/DEFAULT/graphs/hugegraph/schema/propertykeys/age ``` 其中,`age`为要获取的 PropertyKey 的名称 @@ -236,8 +236,8 @@ GET http://localhost:8080/graphs/hugegraph/schema/propertykeys/age ##### Method & Url -``` -DELETE http://localhost:8080/graphs/hugegraph/schema/propertykeys/age +```http request +DELETE http://localhost:8080/graphspaces/DEFAULT/graphs/hugegraph/schema/propertykeys/age ``` 其中,`age`为要删除的 PropertyKey 的名称 diff --git a/content/cn/docs/clients/restful-api/rank.md b/content/cn/docs/clients/restful-api/rank.md index 8ef45c7fa..780ed8298 100644 --- a/content/cn/docs/clients/restful-api/rank.md +++ b/content/cn/docs/clients/restful-api/rank.md @@ -13,16 +13,17 @@ HugeGraphServer 除了上一节提到的遍历(traverser)方法,还提供 #### 4.2.1 Personal Rank API -Personal Rank 算法典型场景是用于推荐应用中, 根据某个点现有的出边, 推荐具有相近 / 相同关系的其他点, -比如根据某个人的阅读记录 / 习惯, 向它推荐其他可能感兴趣的书, 或潜在的书友, 举例如下: -1. 假设给定 1个 Person 点 是 tom, 它喜欢 `a,b,c,d,e` 5本书, 我们的想给 tom 推荐一些书友, 以及一些书, 最容易的想法就是看看还有哪些人喜欢过这些书 (共同兴趣) -2. 那么此时, 需要有其它的 Person 点比如 neo, 他喜欢 `b,d,f` 3本书, 以及 jay, 它喜欢 `c,d,e,g` 4本书, lee 它喜欢 `a,d,e,f` 4本书 -3. 由于 tom 已经看过的书不需要重复推荐, 所以返回结果里应该期望推荐有共同喜好的其他书友看过, 但 tom 没看过的书, 比如推荐 "f" 和 "g" 书, 且优先级 f > g -4. 此时再计算 tom 的个性化 rank 值, 就会返回排序后 TopN 推荐的 书友 + 书 的结果了 (如果只需要推荐的书, 选择 OTHER_LABEL 即可) +Personal Rank 算法典型场景是用于推荐应用中,根据某个点现有的出边,推荐具有相近 / 相同关系的其他点, +比如根据某个人的阅读记录 / 习惯,向它推荐其他可能感兴趣的书,或潜在的书友,举例如下: + +1. 假设给定 1 个 Person 点 是 tom, 它喜欢 `a,b,c,d,e` 5 本书,我们的想给 tom 推荐一些书友,以及一些书,最容易的想法就是看看还有哪些人喜欢过这些书 (共同兴趣) +2. 那么此时,需要有其它的 Person 点比如 neo, 他喜欢 `b,d,f` 3 本书,以及 jay, 它喜欢 `c,d,e,g` 4 本书,lee 它喜欢 `a,d,e,f` 4 本书 +3. 由于 tom 已经看过的书不需要重复推荐,所以返回结果里应该期望推荐有共同喜好的其他书友看过,但 tom 没看过的书,比如推荐 "f" 和 "g" 书,且优先级 f > g +4. 此时再计算 tom 的个性化 rank 值,就会返回排序后 TopN 推荐的 书友 + 书 的结果了 (如果只需要推荐的书,选择 OTHER_LABEL 即可) ##### 4.2.1.0 数据准备 -上面是一个简单的例子, 这里再提供一个公开的 1MB 测试数据集 [MovieLens](https://grouplens.org/datasets/movielens/) 为例, +上面是一个简单的例子,这里再提供一个公开的 1MB 测试数据集 [MovieLens](https://grouplens.org/datasets/movielens/) 为例, 用户需下载该数据集,然后使用 HugeGraph-Loader 导入到 HugeGraph 中,简单起见,数据中顶点 user 和 movie 的属性都忽略,仅使用 id 字段即可,边 rating 的具体评分值也忽略。loader 使用的元数据 文件和输入源映射文件内容如下: @@ -123,10 +124,10 @@ schema.edgeLabel("rating") 假设有一个用户和物品的二分图,基于随机游走的 PersonalRank 算法步骤如下: 1. 选定一个起点用户 u,其初始权重为 1.0,从 Vu 开始游走(有 alpha 的概率走到邻居点,1 - alpha 的概率停留); -2. 如果决定向外游走, 那么会选取某一个类型的出边, 例如 `rating` 来查找共同的打分人: +2. 如果决定向外游走,那么会选取某一个类型的出边,例如 `rating` 来查找共同的打分人: 1. 那就从当前节点的邻居节点中按照均匀分布随机选择一个,并且按照均匀分布划分权重值; 2. 给源顶点补偿权重 1 - alpha; - 3. 重复步骤2; + 3. 重复步骤 2; 3. 达到一定步数或达到精度后收敛,得到推荐列表。 ###### Params @@ -139,12 +140,12 @@ schema.edgeLabel("rating") - alpha:每轮迭代时从某个点往外走的概率,与 PageRank 算法中的 alpha 类似,取值区间为 (0, 1], 默认值 `0.85` - max_degree: 查询过程中,单个顶点遍历的最大邻接边数目,默认为 `10000` - max_depth: 迭代次数,取值区间为 [2, 50], 默认值 `5` -- with_label:筛选结果中保留哪些结果,可选以下三类, 默认为 `BOTH_LABEL` +- with_label:筛选结果中保留哪些结果,可选以下三类,默认为 `BOTH_LABEL` - SAME_LABEL:仅保留与源顶点相同类别的顶点 - OTHER_LABEL:仅保留与源顶点不同类别(二分图的另一端)的顶点 - BOTH_LABEL:同时保留与源顶点相同和相反类别的顶点 - limit: 返回的顶点的最大数目,默认为 `100` -- max_diff: 提前收敛的精度差, 默认为 `0.0001` (*后续实现*) +- max_diff: 提前收敛的精度差,默认为 `0.0001` (*后续实现*) - sorted:返回的结果是否根据 rank 排序,为 true 时降序排列,反之不排序,默认为 `true` ##### 4.2.1.2 使用方法 @@ -152,7 +153,7 @@ schema.edgeLabel("rating") ###### Method & Url ``` -POST http://localhost:8080/graphs/hugegraph/traversers/personalrank +POST http://localhost:8080/graphspaces/DEFAULT/graphs/hugegraph/traversers/personalrank ``` ###### Request Body @@ -298,16 +299,16 @@ public class Loader { - steps: 表示从起始顶点走过的路径规则,是一组 Step 的列表,每个 Step 对应结果中的一层,必填项。每个 Step 的结构如下: - direction:表示边的方向(OUT, IN, BOTH),默认是 BOTH - labels:边的类型列表,多个边类型取并集 - - max_degree:查询过程中,单个顶点遍历的最大邻接边数目,默认为 10000 (注: 0.12版之前 step 内仅支持 degree 作为参数名, 0.12开始统一使用 max_degree, 并向下兼容 degree 写法) + - max_degree:查询过程中,单个顶点遍历的最大邻接边数目,默认为 10000 (注:0.12 版之前 step 内仅支持 degree 作为参数名,0.12 开始统一使用 max_degree, 并向下兼容 degree 写法) - top:在结果中每一层只保留权重最高的前 N 个结果,默认为 100,最大值为 1000 -- capacity: 遍历过程中最大的访问的顶点数目,选填项,默认为10000000 +- capacity: 遍历过程中最大的访问的顶点数目,选填项,默认为 10000000 ##### 4.2.2.2 使用方法 ###### Method & Url ``` -POST http://localhost:8080/graphs/hugegraph/traversers/neighborrank +POST http://localhost:8080/graphspaces/DEFAULT/graphs/hugegraph/traversers/neighborrank ``` ###### Request Body diff --git a/content/cn/docs/clients/restful-api/rebuild.md b/content/cn/docs/clients/restful-api/rebuild.md index eae5d175a..18e037448 100644 --- a/content/cn/docs/clients/restful-api/rebuild.md +++ b/content/cn/docs/clients/restful-api/rebuild.md @@ -6,12 +6,12 @@ weight: 6 ### 1.6 Rebuild -#### 1.6.1 重建IndexLabel +#### 1.6.1 重建 IndexLabel ##### Method & Url ``` -PUT http://localhost:8080/graphs/hugegraph/jobs/rebuild/indexlabels/personByCity +PUT http://localhost:8080/graphspaces/DEFAULT/graphs/hugegraph/jobs/rebuild/indexlabels/personByCity ``` ##### Response Status @@ -30,14 +30,14 @@ PUT http://localhost:8080/graphs/hugegraph/jobs/rebuild/indexlabels/personByCity 注: -> 可以通过`GET http://localhost:8080/graphs/hugegraph/tasks/1`(其中"1"是task_id)来查询异步任务的执行状态,更多[异步任务RESTful API](../task) +> 可以通过`GET http://localhost:8080/graphspaces/DEFAULT/graphs/hugegraph/tasks/1`(其中"1"是 task_id)来查询异步任务的执行状态,更多[异步任务 RESTful API](../task) -#### 1.6.2 VertexLabel对应的全部索引重建 +#### 1.6.2 VertexLabel 对应的全部索引重建 ##### Method & Url -``` -PUT http://localhost:8080/graphs/hugegraph/jobs/rebuild/vertexlabels/person +```http request +PUT http://localhost:8080/graphspaces/DEFAULT/graphs/hugegraph/jobs/rebuild/vertexlabels/person ``` ##### Response Status @@ -56,14 +56,14 @@ PUT http://localhost:8080/graphs/hugegraph/jobs/rebuild/vertexlabels/person 注: -> 可以通过`GET http://localhost:8080/graphs/hugegraph/tasks/2`(其中"2"是task_id)来查询异步任务的执行状态,更多[异步任务RESTful API](../task) +> 可以通过`GET http://localhost:8080/graphspaces/DEFAULT/graphs/hugegraph/tasks/2`(其中"2"是 task_id)来查询异步任务的执行状态,更多[异步任务 RESTful API](../task) -#### 1.6.3 EdgeLabel对应的全部索引重建 +#### 1.6.3 EdgeLabel 对应的全部索引重建 ##### Method & Url -``` -PUT http://localhost:8080/graphs/hugegraph/jobs/rebuild/edgelabels/created +```http request +PUT http://localhost:8080/graphspaces/DEFAULT/graphs/hugegraph/jobs/rebuild/edgelabels/created ``` ##### Response Status @@ -82,4 +82,4 @@ PUT http://localhost:8080/graphs/hugegraph/jobs/rebuild/edgelabels/created 注: -> 可以通过`GET http://localhost:8080/graphs/hugegraph/tasks/3`(其中"3"是task_id)来查询异步任务的执行状态,更多[异步任务RESTful API](../task) \ No newline at end of file +> 可以通过`GET http://localhost:8080/graphspaces/DEFAULT/graphs/hugegraph/tasks/3`(其中"3"是 task_id)来查询异步任务的执行状态,更多[异步任务 RESTful API](../task) diff --git a/content/cn/docs/clients/restful-api/schema.md b/content/cn/docs/clients/restful-api/schema.md index 91cdd329a..f0e525b05 100644 --- a/content/cn/docs/clients/restful-api/schema.md +++ b/content/cn/docs/clients/restful-api/schema.md @@ -11,9 +11,9 @@ HugeGraph 提供单一接口获取某个图的全部 Schema 信息,包括:Pr ##### Method & Url ``` -GET http://localhost:8080/graphs/{graph_name}/schema +GET http://localhost:8080/graphspaces/DEFAULT/graphs/{graph_name}/schema -e.g: GET http://localhost:8080/graphs/hugegraph/schema +e.g: GET http://localhost:8080/graphspaces/DEFAULT/graphs/hugegraph/schema ``` ##### Response Status @@ -319,4 +319,4 @@ e.g: GET http://localhost:8080/graphs/hugegraph/schema } ] } -``` \ No newline at end of file +``` diff --git a/content/cn/docs/clients/restful-api/task.md b/content/cn/docs/clients/restful-api/task.md index 584730e35..b91a5c5a3 100644 --- a/content/cn/docs/clients/restful-api/task.md +++ b/content/cn/docs/clients/restful-api/task.md @@ -16,7 +16,7 @@ weight: 13 ##### Method & Url ``` -GET http://localhost:8080/graphs/hugegraph/tasks?status=success +GET http://localhost:8080/graphspaces/DEFAULT/graphs/hugegraph/tasks?status=success ``` ##### Response Status @@ -50,7 +50,7 @@ GET http://localhost:8080/graphs/hugegraph/tasks?status=success ##### Method & Url ``` -GET http://localhost:8080/graphs/hugegraph/tasks/2 +GET http://localhost:8080/graphspaces/DEFAULT/graphs/hugegraph/tasks/2 ``` ##### Response Status @@ -82,7 +82,7 @@ GET http://localhost:8080/graphs/hugegraph/tasks/2 ##### Method & Url ``` -DELETE http://localhost:8080/graphs/hugegraph/tasks/2 +DELETE http://localhost:8080/graphspaces/DEFAULT/graphs/hugegraph/tasks/2 ``` ##### Response Status @@ -110,7 +110,7 @@ DELETE http://localhost:8080/graphs/hugegraph/tasks/2 ##### Method & Url ``` -PUT http://localhost:8080/graphs/hugegraph/tasks/2?action=cancel +PUT http://localhost:8080/graphspaces/DEFAULT/graphs/hugegraph/tasks/2?action=cancel ``` > 请保证在 10 秒内发送该请求,如果超过 10 秒发送,任务可能已经执行完成,无法取消。 diff --git a/content/cn/docs/clients/restful-api/traverser.md b/content/cn/docs/clients/restful-api/traverser.md index 12d7cd3bb..e246ede58 100644 --- a/content/cn/docs/clients/restful-api/traverser.md +++ b/content/cn/docs/clients/restful-api/traverser.md @@ -4,36 +4,36 @@ linkTitle: "Traverser" weight: 9 --- -### 3.1 traverser API概述 +### 3.1 traverser API 概述 -HugeGraphServer为HugeGraph图数据库提供了RESTful API接口。除了顶点和边的CRUD基本操作以外,还提供了一些遍历(traverser)方法,我们称为`traverser API`。这些遍历方法实现了一些复杂的图算法,方便用户对图进行分析和挖掘。 +HugeGraphServer 为 HugeGraph 图数据库提供了 RESTful API 接口。除了顶点和边的 CRUD 基本操作以外,还提供了一些遍历(traverser)方法,我们称为`traverser API`。这些遍历方法实现了一些复杂的图算法,方便用户对图进行分析和挖掘。 -HugeGraph支持的Traverser API包括: +HugeGraph 支持的 Traverser API 包括: -- K-out API,根据起始顶点,查找恰好N步可达的邻居,分为基础版和高级版: - - 基础版使用GET方法,根据起始顶点,查找恰好N步可达的邻居 - - 高级版使用POST方法,根据起始顶点,查找恰好N步可达的邻居,与基础版的不同在于: +- K-out API,根据起始顶点,查找恰好 N 步可达的邻居,分为基础版和高级版: + - 基础版使用 GET 方法,根据起始顶点,查找恰好 N 步可达的邻居 + - 高级版使用 POST 方法,根据起始顶点,查找恰好 N 步可达的邻居,与基础版的不同在于: - 支持只统计邻居数量 - 支持顶点和边属性过滤 - 支持返回到达邻居的最短路径 -- K-neighbor API,根据起始顶点,查找N步以内可达的所有邻居,分为基础版和高级版: - - 基础版使用GET方法,根据起始顶点,查找N步以内可达的所有邻居 - - 高级版使用POST方法,根据起始顶点,查找N步以内可达的所有邻居,与基础版的不同在于: +- K-neighbor API,根据起始顶点,查找 N 步以内可达的所有邻居,分为基础版和高级版: + - 基础版使用 GET 方法,根据起始顶点,查找 N 步以内可达的所有邻居 + - 高级版使用 POST 方法,根据起始顶点,查找 N 步以内可达的所有邻居,与基础版的不同在于: - 支持只统计邻居数量 - 支持顶点和边属性过滤 - 支持返回到达邻居的最短路径 - Same Neighbors, 查询两个顶点的共同邻居 -- Jaccard Similarity API,计算jaccard相似度,包括两种: - - 一种是使用GET方法,计算两个顶点的邻居的相似度(交并比) - - 一种是使用POST方法,在全图中查找与起点的jaccard similarity最高的N个点 +- Jaccard Similarity API,计算 jaccard 相似度,包括两种: + - 一种是使用 GET 方法,计算两个顶点的邻居的相似度(交并比) + - 一种是使用 POST 方法,在全图中查找与起点的 jaccard similarity 最高的 N 个点 - Shortest Path API,查找两个顶点之间的最短路径 - All Shortest Paths,查找两个顶点间的全部最短路径 - Weighted Shortest Path,查找起点到目标点的带权最短路径 - Single Source Shortest Path,查找一个点到其他各个点的加权最短路径 - Multi Node Shortest Path,查找指定顶点集之间两两最短路径 - Paths API,查找两个顶点间的全部路径,分为基础版和高级版: - - 基础版使用GET方法,根据起点和终点,查找两个顶点间的全部路径 - - 高级版使用POST方法,根据一组起点和一组终点,查找两个集合间符合条件的全部路径 + - 基础版使用 GET 方法,根据起点和终点,查找两个顶点间的全部路径 + - 高级版使用 POST 方法,根据一组起点和一组终点,查找两个集合间符合条件的全部路径 - Customized Paths API,从一批顶点出发,按(一种)模式遍历经过的全部路径 - Template Path API,指定起点和终点以及起点和终点间路径信息,查找符合的路径 - Crosspoints API,查找两个顶点的交点(共同祖先或者共同子孙) @@ -42,19 +42,19 @@ HugeGraph支持的Traverser API包括: - Rays API,从起始顶点出发,可到达边界的路径(即无环路径) - Fusiform Similarity API,查找一个顶点的梭形相似点 - Vertices API - - 按ID批量查询顶点; + - 按 ID 批量查询顶点; - 获取顶点的分区; - 按分区查询顶点; - Edges API - - 按ID批量查询边; + - 按 ID 批量查询边; - 获取边的分区; - 按分区查询边; -### 3.2. traverser API详解 +### 3.2. traverser API 详解 -使用方法中的例子,都是基于TinkerPop官网给出的图: +使用方法中的例子,都是基于 TinkerPop 官网给出的图: -![tinkerpop示例图](http://tinkerpop.apache.org/docs/3.4.0/images/tinkerpop-modern.png) +![tinkerpop 示例图](http://tinkerpop.apache.org/docs/3.4.0/images/tinkerpop-modern.png) 数据导入程序如下: @@ -168,7 +168,7 @@ public class Loader { } ``` -顶点ID为: +顶点 ID 为: ``` "2:ripple", @@ -179,7 +179,7 @@ public class Loader { "2:lop" ``` -边ID为: +边 ID 为: ``` "S1:peter>2>>S2:lop", @@ -194,25 +194,25 @@ public class Loader { ##### 3.2.1.1 功能介绍 -根据起始顶点、方向、边的类型(可选)和深度depth,查找从起始顶点出发恰好depth步可达的顶点 +根据起始顶点、方向、边的类型(可选)和深度 depth,查找从起始顶点出发恰好 depth 步可达的顶点 ###### Params -- source:起始顶点id,必填项 -- direction:起始顶点向外发散的方向(OUT,IN,BOTH),选填项,默认是BOTH +- source:起始顶点 id,必填项 +- direction:起始顶点向外发散的方向(OUT,IN,BOTH),选填项,默认是 BOTH - max_depth:步数,必填项 -- label:边的类型,选填项,默认代表所有edge label -- nearest:nearest为true时,代表起始顶点到达结果顶点的最短路径长度为depth,不存在更短的路径;nearest为false时,代表起始顶点到结果顶点有一条长度为depth的路径(未必最短且可以有环),选填项,默认为true -- max_degree:查询过程中,单个顶点遍历的最大邻接边数目,选填项,默认为10000 -- capacity:遍历过程中最大的访问的顶点数目,选填项,默认为10000000 -- limit:返回的顶点的最大数目,选填项,默认为10000000 +- label:边的类型,选填项,默认代表所有 edge label +- nearest:nearest 为 true 时,代表起始顶点到达结果顶点的最短路径长度为 depth,不存在更短的路径;nearest 为 false 时,代表起始顶点到结果顶点有一条长度为 depth 的路径(未必最短且可以有环),选填项,默认为 true +- max_degree:查询过程中,单个顶点遍历的最大邻接边数目,选填项,默认为 10000 +- capacity:遍历过程中最大的访问的顶点数目,选填项,默认为 10000000 +- limit:返回的顶点的最大数目,选填项,默认为 10000000 ##### 3.2.1.2 使用方法 ###### Method & Url ``` -GET http://localhost:8080/graphs/{graph}/traversers/kout?source="1:marko"&max_depth=2 +GET http://localhost:8080/graphspaces/DEFAULT/graphs/{graph}/traversers/kout?source="1:marko"&max_depth=2 ``` ###### Response Status @@ -234,51 +234,51 @@ GET http://localhost:8080/graphs/{graph}/traversers/kout?source="1:marko"&max_de ##### 3.2.1.3 适用场景 -查找恰好N步关系可达的顶点。两个例子: +查找恰好 N 步关系可达的顶点。两个例子: -- 家族关系中,查找一个人的所有孙子,person A通过连续的两条“儿子”边到达的顶点集合。 +- 家族关系中,查找一个人的所有孙子,person A 通过连续的两条“儿子”边到达的顶点集合。 - 社交关系中发现潜在好友,例如:与目标用户相隔两层朋友关系的用户,可以通过连续两条“朋友”边到达的顶点。 #### 3.2.2 K-out API(POST,高级版) ##### 3.2.2.1 功能介绍 -根据起始顶点、步骤(包括方向、边类型和过滤属性)和深度depth,查找从起始顶点出发恰好depth步可达的顶点。 +根据起始顶点、步骤(包括方向、边类型和过滤属性)和深度 depth,查找从起始顶点出发恰好 depth 步可达的顶点。 -> 与K-out基础版的不同在于: +> 与 K-out 基础版的不同在于: > - 支持只统计邻居数量 > - 支持边属性过滤 > - 支持返回到达邻居的最短路径 ###### Params -- source:起始顶点id,必填项 -- steps: 从起始点出发的Steps,必填项,结构如下: - - direction:表示边的方向(OUT,IN,BOTH),默认是BOTH - - edge_steps:边Step集合,支持对单边的类型和属性过滤,如果为空,则不过滤 +- source:起始顶点 id,必填项 +- steps: 从起始点出发的 Steps,必填项,结构如下: + - direction:表示边的方向(OUT,IN,BOTH),默认是 BOTH + - edge_steps:边 Step 集合,支持对单边的类型和属性过滤,如果为空,则不过滤 - label:边类型 - properties:边属性 - - vertex_steps:顶点Step集合,支持对单点的类型和属性过滤,如果为空,则不过滤 + - vertex_steps:顶点 Step 集合,支持对单点的类型和属性过滤,如果为空,则不过滤 - label:顶点类型 - properties:顶点属性 - - max_degree:查询过程中,单个顶点遍历的最大邻接边数目,默认为 10000 (注: 0.12版之前 step 内仅支持 degree 作为参数名, 0.12开始统一使用 max_degree, 并向下兼容 degree 写法) - - skip_degree:用于设置查询过程中舍弃超级顶点的最小边数,即当某个顶点的邻接边数目大于 skip_degree 时,完全舍弃该顶点。选填项,如果开启时,需满足 `skip_degree >= max_degree` 约束,默认为0 (不启用),表示不跳过任何点 (注意: 开启此配置后,遍历时会尝试访问一个顶点的 skip_degree 条边,而不仅仅是 max_degree 条边,这样有额外的遍历开销,对查询性能影响可能有较大影响,请确认理解后再开启) + - max_degree:查询过程中,单个顶点遍历的最大邻接边数目,默认为 10000 (注:0.12 版之前 step 内仅支持 degree 作为参数名,0.12 开始统一使用 max_degree, 并向下兼容 degree 写法) + - skip_degree:用于设置查询过程中舍弃超级顶点的最小边数,即当某个顶点的邻接边数目大于 skip_degree 时,完全舍弃该顶点。选填项,如果开启时,需满足 `skip_degree >= max_degree` 约束,默认为 0 (不启用),表示不跳过任何点 (注意:开启此配置后,遍历时会尝试访问一个顶点的 skip_degree 条边,而不仅仅是 max_degree 条边,这样有额外的遍历开销,对查询性能影响可能有较大影响,请确认理解后再开启) - max_depth:步数,必填项 -- nearest:nearest为true时,代表起始顶点到达结果顶点的最短路径长度为depth,不存在更短的路径;nearest为false时,代表起始顶点到结果顶点有一条长度为depth的路径(未必最短且可以有环),选填项,默认为true -- count_only:Boolean值,true表示只统计结果的数目,不返回具体结果;false表示返回具体的结果,默认为false -- with_path:true表示返回起始点到每个邻居的最短路径,false表示不返回起始点到每个邻居的最短路径,选填项,默认为false -- with_edge,选填项,默认为false: - - 如果设置为true,则结果将包含所有边的完整信息,即路径中的所有边 - - 当with_path为true时,将返回所有路径中的边的完整信息 - - 当with_path为false时,不返回任何信息 - - 如果设置为false,则仅返回边的id -- with_vertex,选填项,默认为false: - - 如果设置为true,则结果将包含所有顶点的完整信息,即路径中的所有顶点 - - 当with_path为true时,将返回所有路径中的顶点的完整信息 - - 当with_path为false时,返回所有邻居顶点的完整信息 - - 如果设置为false,则仅返回顶点的id -- capacity:遍历过程中最大的访问的顶点数目,选填项,默认为10000000 -- limit:返回的顶点的最大数目,选填项,默认为10000000 +- nearest:nearest 为 true 时,代表起始顶点到达结果顶点的最短路径长度为 depth,不存在更短的路径;nearest 为 false 时,代表起始顶点到结果顶点有一条长度为 depth 的路径(未必最短且可以有环),选填项,默认为 true +- count_only:Boolean 值,true 表示只统计结果的数目,不返回具体结果;false 表示返回具体的结果,默认为 false +- with_path:true 表示返回起始点到每个邻居的最短路径,false 表示不返回起始点到每个邻居的最短路径,选填项,默认为 false +- with_edge,选填项,默认为 false: + - 如果设置为 true,则结果将包含所有边的完整信息,即路径中的所有边 + - 当 with_path 为 true 时,将返回所有路径中的边的完整信息 + - 当 with_path 为 false 时,不返回任何信息 + - 如果设置为 false,则仅返回边的 id +- with_vertex,选填项,默认为 false: + - 如果设置为 true,则结果将包含所有顶点的完整信息,即路径中的所有顶点 + - 当 with_path 为 true 时,将返回所有路径中的顶点的完整信息 + - 当 with_path 为 false 时,返回所有邻居顶点的完整信息 + - 如果设置为 false,则仅返回顶点的 id +- capacity:遍历过程中最大的访问的顶点数目,选填项,默认为 10000000 +- limit:返回的顶点的最大数目,选填项,默认为 10000000 - traverse_mode: 遍历方式,可选择“breadth_first_search”或“depth_first_search”作为参数,默认为“breadth_first_search” ##### 3.2.2.2 使用方法 @@ -286,7 +286,7 @@ GET http://localhost:8080/graphs/{graph}/traversers/kout?source="1:marko"&max_de ###### Method & Url ``` -POST http://localhost:8080/graphs/{graph}/traversers/kout +POST http://localhost:8080/graphspaces/DEFAULT/graphs/{graph}/traversers/kout ``` ###### Request Body @@ -429,31 +429,31 @@ POST http://localhost:8080/graphs/{graph}/traversers/kout ##### 3.2.2.3 适用场景 -参见3.2.1.3 +参见 3.2.1.3 #### 3.2.3 K-neighbor(GET,基础版) ##### 3.2.3.1 功能介绍 -根据起始顶点、方向、边的类型(可选)和深度depth,查找包括起始顶点在内、depth步之内可达的所有顶点 +根据起始顶点、方向、边的类型(可选)和深度 depth,查找包括起始顶点在内、depth 步之内可达的所有顶点 -> 相当于:起始顶点、K-out(1)、K-out(2)、... 、K-out(max_depth)的并集 +> 相当于:起始顶点、K-out(1)、K-out(2)、... 、K-out(max_depth) 的并集 ###### Params -- source: 起始顶点id,必填项 -- direction:起始顶点向外发散的方向(OUT,IN,BOTH),选填项,默认是BOTH +- source: 起始顶点 id,必填项 +- direction:起始顶点向外发散的方向(OUT,IN,BOTH),选填项,默认是 BOTH - max_depth:步数,必填项 -- label:边的类型,选填项,默认代表所有edge label -- max_degree:查询过程中,单个顶点遍历的最大邻接边数目,选填项,默认为10000 -- limit:返回的顶点的最大数目,也即遍历过程中最大的访问的顶点数目,选填项,默认为10000000 +- label:边的类型,选填项,默认代表所有 edge label +- max_degree:查询过程中,单个顶点遍历的最大邻接边数目,选填项,默认为 10000 +- limit:返回的顶点的最大数目,也即遍历过程中最大的访问的顶点数目,选填项,默认为 10000000 ##### 3.2.3.2 使用方法 ###### Method & Url ``` -GET http://localhost:8080/graphs/{graph}/traversers/kneighbor?source=“1:marko”&max_depth=2 +GET http://localhost:8080/graphspaces/DEFAULT/graphs/{graph}/traversers/kneighbor?source=“1:marko”&max_depth=2 ``` ###### Response Status @@ -479,59 +479,59 @@ GET http://localhost:8080/graphs/{graph}/traversers/kneighbor?source=“1:marko ##### 3.2.3.3 适用场景 -查找N步以内可达的所有顶点,例如: +查找 N 步以内可达的所有顶点,例如: -- 家族关系中,查找一个人五服以内所有子孙,person A通过连续的5条“亲子”边到达的顶点集合。 -- 社交关系中发现好友圈子,例如目标用户通过1条、2条、3条“朋友”边可到达的用户可以组成目标用户的朋友圈子 +- 家族关系中,查找一个人五服以内所有子孙,person A 通过连续的 5 条“亲子”边到达的顶点集合。 +- 社交关系中发现好友圈子,例如目标用户通过 1 条、2 条、3 条“朋友”边可到达的用户可以组成目标用户的朋友圈子 #### 3.2.4 K-neighbor API(POST,高级版) ##### 3.2.4.1 功能介绍 -根据起始顶点、步骤(包括方向、边类型和过滤属性)和深度depth,查找从起始顶点出发depth步内可达的所有顶点。 +根据起始顶点、步骤(包括方向、边类型和过滤属性)和深度 depth,查找从起始顶点出发 depth 步内可达的所有顶点。 -> 与K-neighbor基础版的不同在于: +> 与 K-neighbor 基础版的不同在于: > - 支持只统计邻居数量 > - 支持边属性过滤 > - 支持返回到达邻居的最短路径 ###### Params -- source:起始顶点id,必填项 -- steps: 从起始点出发的Steps,必填项,结构如下: - - direction:表示边的方向(OUT,IN,BOTH),默认是BOTH - - 从起始点出发的Steps,必填项,结构如下: - - direction:表示边的方向(OUT,IN,BOTH),默认是BOTH - - edge_steps:边Step集合,支持对单边的类型和属性过滤,如果为空,则不过滤 +- source:起始顶点 id,必填项 +- steps: 从起始点出发的 Steps,必填项,结构如下: + - direction:表示边的方向(OUT,IN,BOTH),默认是 BOTH + - 从起始点出发的 Steps,必填项,结构如下: + - direction:表示边的方向(OUT,IN,BOTH),默认是 BOTH + - edge_steps:边 Step 集合,支持对单边的类型和属性过滤,如果为空,则不过滤 - label:边类型 - properties:边属性 - - vertex_steps:顶点Step集合,支持对单点的类型和属性过滤,如果为空,则不过滤 + - vertex_steps:顶点 Step 集合,支持对单点的类型和属性过滤,如果为空,则不过滤 - label:顶点类型 - properties:顶点属性 - - max_degree:查询过程中,单个顶点遍历的最大邻接边数目,默认为 10000 (注: 0.12版之前 step 内仅支持 degree 作为参数名, 0.12开始统一使用 max_degree, 并向下兼容 degree 写法) - - skip_degree:用于设置查询过程中舍弃超级顶点的最小边数,即当某个顶点的邻接边数目大于 skip_degree 时,完全舍弃该顶点。选填项,如果开启时,需满足 `skip_degree >= max_degree` 约束,默认为0 (不启用),表示不跳过任何点 (注意: 开启此配置后,遍历时会尝试访问一个顶点的 skip_degree 条边,而不仅仅是 max_degree 条边,这样有额外的遍历开销,对查询性能影响可能有较大影响,请确认理解后再开启) + - max_degree:查询过程中,单个顶点遍历的最大邻接边数目,默认为 10000 (注:0.12 版之前 step 内仅支持 degree 作为参数名,0.12 开始统一使用 max_degree, 并向下兼容 degree 写法) + - skip_degree:用于设置查询过程中舍弃超级顶点的最小边数,即当某个顶点的邻接边数目大于 skip_degree 时,完全舍弃该顶点。选填项,如果开启时,需满足 `skip_degree >= max_degree` 约束,默认为 0 (不启用),表示不跳过任何点 (注意:开启此配置后,遍历时会尝试访问一个顶点的 skip_degree 条边,而不仅仅是 max_degree 条边,这样有额外的遍历开销,对查询性能影响可能有较大影响,请确认理解后再开启) - max_depth:步数,必填项 -- count_only:Boolean值,true表示只统计结果的数目,不返回具体结果;false表示返回具体的结果,默认为false -- with_path:true表示返回起始点到每个邻居的最短路径,false表示不返回起始点到每个邻居的最短路径,选填项,默认为false -- with_edge,选填项,默认为false: - - 如果设置为true,则结果将包含所有边的完整信息,即路径中的所有边 - - 当with_path为true时,将返回所有路径中的边的完整信息 - - 当with_path为false时,不返回任何信息 - - 如果设置为false,则仅返回边的id -- with_vertex,选填项,默认为false: - - 如果设置为true,则结果将包含所有顶点的完整信息,即路径中的所有顶点 - - 当with_path为true时,将返回所有路径中的顶点的完整信息 - - 当with_path为false时,返回所有邻居顶点的完整信息 - - 如果设置为false,则仅返回顶点的id -- limit:返回的顶点的最大数目,选填项,默认为10000000 +- count_only:Boolean 值,true 表示只统计结果的数目,不返回具体结果;false 表示返回具体的结果,默认为 false +- with_path:true 表示返回起始点到每个邻居的最短路径,false 表示不返回起始点到每个邻居的最短路径,选填项,默认为 false +- with_edge,选填项,默认为 false: + - 如果设置为 true,则结果将包含所有边的完整信息,即路径中的所有边 + - 当 with_path 为 true 时,将返回所有路径中的边的完整信息 + - 当 with_path 为 false 时,不返回任何信息 + - 如果设置为 false,则仅返回边的 id +- with_vertex,选填项,默认为 false: + - 如果设置为 true,则结果将包含所有顶点的完整信息,即路径中的所有顶点 + - 当 with_path 为 true 时,将返回所有路径中的顶点的完整信息 + - 当 with_path 为 false 时,返回所有邻居顶点的完整信息 + - 如果设置为 false,则仅返回顶点的 id +- limit:返回的顶点的最大数目,选填项,默认为 10000000 ##### 3.2.4.2 使用方法 ###### Method & Url ``` -POST http://localhost:8080/graphs/{graph}/traversers/kneighbor +POST http://localhost:8080/graphspaces/DEFAULT/graphs/{graph}/traversers/kneighbor ``` ###### Request Body @@ -730,7 +730,7 @@ POST http://localhost:8080/graphs/{graph}/traversers/kneighbor ##### 3.2.4.3 适用场景 -参见3.2.3.3 +参见 3.2.3.3 #### 3.2.5 Same Neighbors @@ -740,19 +740,19 @@ POST http://localhost:8080/graphs/{graph}/traversers/kneighbor ###### Params -- vertex:一个顶点id,必填项 -- other:另一个顶点id,必填项 -- direction:顶点向外发散的方向(OUT,IN,BOTH),选填项,默认是BOTH -- label:边的类型,选填项,默认代表所有edge label -- max_degree:查询过程中,单个顶点遍历的最大邻接边数目,选填项,默认为10000 -- limit:返回的共同邻居的最大数目,选填项,默认为10000000 +- vertex:一个顶点 id,必填项 +- other:另一个顶点 id,必填项 +- direction:顶点向外发散的方向(OUT,IN,BOTH),选填项,默认是 BOTH +- label:边的类型,选填项,默认代表所有 edge label +- max_degree:查询过程中,单个顶点遍历的最大邻接边数目,选填项,默认为 10000 +- limit:返回的共同邻居的最大数目,选填项,默认为 10000000 ##### 3.2.5.2 使用方法 ###### Method & Url ``` -GET http://localhost:8080/graphs/{graph}/traversers/sameneighbors?vertex=“1:marko”&other="1:josh" +GET http://localhost:8080/graphspaces/DEFAULT/graphs/{graph}/traversers/sameneighbors?vertex=“1:marko”&other="1:josh" ``` ###### Response Status @@ -777,26 +777,26 @@ GET http://localhost:8080/graphs/{graph}/traversers/sameneighbors?vertex=“1:ma - 社交关系中发现两个用户的共同粉丝或者共同关注用户 -#### 3.2.6 Jaccard Similarity(GET) +#### 3.2.6 Jaccard Similarity (GET) ##### 3.2.6.1 功能介绍 -计算两个顶点的jaccard similarity(两个顶点邻居的交集比上两个顶点邻居的并集) +计算两个顶点的 jaccard similarity(两个顶点邻居的交集比上两个顶点邻居的并集) ###### Params -- vertex:一个顶点id,必填项 -- other:另一个顶点id,必填项 -- direction:顶点向外发散的方向(OUT,IN,BOTH),选填项,默认是BOTH -- label:边的类型,选填项,默认代表所有edge label -- max_degree:查询过程中,单个顶点遍历的最大邻接边数目,选填项,默认为10000 +- vertex:一个顶点 id,必填项 +- other:另一个顶点 id,必填项 +- direction:顶点向外发散的方向(OUT,IN,BOTH),选填项,默认是 BOTH +- label:边的类型,选填项,默认代表所有 edge label +- max_degree:查询过程中,单个顶点遍历的最大邻接边数目,选填项,默认为 10000 ##### 3.2.6.2 使用方法 ###### Method & Url ``` -GET http://localhost:8080/graphs/{graph}/traversers/jaccardsimilarity?vertex="1:marko"&other="1:josh" +GET http://localhost:8080/graphspaces/DEFAULT/graphs/{graph}/traversers/jaccardsimilarity?vertex="1:marko"&other="1:josh" ``` ###### Response Status @@ -817,32 +817,32 @@ GET http://localhost:8080/graphs/{graph}/traversers/jaccardsimilarity?vertex="1: 用于评估两个点的相似性或者紧密度 -#### 3.2.7 Jaccard Similarity(POST) +#### 3.2.7 Jaccard Similarity (POST) ##### 3.2.7.1 功能介绍 -计算与指定顶点的jaccard similarity最大的N个点 +计算与指定顶点的 jaccard similarity 最大的 N 个点 -> jaccard similarity的计算方式为:两个顶点邻居的交集比上两个顶点邻居的并集 +> jaccard similarity 的计算方式为:两个顶点邻居的交集比上两个顶点邻居的并集 ###### Params -- vertex:一个顶点id,必填项 -- 从起始点出发的Step,必填项,结构如下: - - direction:表示边的方向(OUT,IN,BOTH),默认是BOTH +- vertex:一个顶点 id,必填项 +- 从起始点出发的 Step,必填项,结构如下: + - direction:表示边的方向(OUT,IN,BOTH),默认是 BOTH - labels:边的类型列表 - properties:通过属性的值过滤边 - - max_degree:查询过程中,单个顶点遍历的最大邻接边数目,默认为 10000 (注: 0.12版之前 step 内仅支持 degree 作为参数名, 0.12开始统一使用 max_degree, 并向下兼容 degree 写法) - - skip_degree:用于设置查询过程中舍弃超级顶点的最小边数,即当某个顶点的邻接边数目大于 skip_degree 时,完全舍弃该顶点。选填项,如果开启时,需满足 `skip_degree >= max_degree` 约束,默认为0 (不启用),表示不跳过任何点 (注意: 开启此配置后,遍历时会尝试访问一个顶点的 skip_degree 条边,而不仅仅是 max_degree 条边,这样有额外的遍历开销,对查询性能影响可能有较大影响,请确认理解后再开启) -- top:返回一个起点的jaccard similarity中最大的top个,选填项,默认为100 -- capacity:遍历过程中最大的访问的顶点数目,选填项,默认为10000000 + - max_degree:查询过程中,单个顶点遍历的最大邻接边数目,默认为 10000 (注:0.12 版之前 step 内仅支持 degree 作为参数名,0.12 开始统一使用 max_degree, 并向下兼容 degree 写法) + - skip_degree:用于设置查询过程中舍弃超级顶点的最小边数,即当某个顶点的邻接边数目大于 skip_degree 时,完全舍弃该顶点。选填项,如果开启时,需满足 `skip_degree >= max_degree` 约束,默认为 0 (不启用),表示不跳过任何点 (注意:开启此配置后,遍历时会尝试访问一个顶点的 skip_degree 条边,而不仅仅是 max_degree 条边,这样有额外的遍历开销,对查询性能影响可能有较大影响,请确认理解后再开启) +- top:返回一个起点的 jaccard similarity 中最大的 top 个,选填项,默认为 100 +- capacity:遍历过程中最大的访问的顶点数目,选填项,默认为 10000000 ##### 3.2.7.2 使用方法 ###### Method & Url ``` -POST http://localhost:8080/graphs/{graph}/traversers/jaccardsimilarity +POST http://localhost:8080/graphspaces/DEFAULT/graphs/{graph}/traversers/jaccardsimilarity ``` ###### Request Body @@ -888,21 +888,21 @@ POST http://localhost:8080/graphs/{graph}/traversers/jaccardsimilarity ###### Params -- source:起始顶点id,必填项 -- target:目的顶点id,必填项 -- direction:起始顶点向外发散的方向(OUT,IN,BOTH),选填项,默认是BOTH +- source:起始顶点 id,必填项 +- target:目的顶点 id,必填项 +- direction:起始顶点向外发散的方向(OUT,IN,BOTH),选填项,默认是 BOTH - max_depth:最大步数,必填项 -- label:边的类型,选填项,默认代表所有edge label -- max_degree:查询过程中,单个顶点遍历的最大邻接边数目,选填项,默认为10000 -- skip_degree:用于设置查询过程中舍弃超级顶点的最小边数,即当某个顶点的邻接边数目大于 skip_degree 时,完全舍弃该顶点。选填项,如果开启时,需满足 `skip_degree >= max_degree` 约束,默认为0 (不启用),表示不跳过任何点 (注意: 开启此配置后,遍历时会尝试访问一个顶点的 skip_degree 条边,而不仅仅是 max_degree 条边,这样有额外的遍历开销,对查询性能影响可能有较大影响,请确认理解后再开启) -- capacity:遍历过程中最大的访问的顶点数目,选填项,默认为10000000 +- label:边的类型,选填项,默认代表所有 edge label +- max_degree:查询过程中,单个顶点遍历的最大邻接边数目,选填项,默认为 10000 +- skip_degree:用于设置查询过程中舍弃超级顶点的最小边数,即当某个顶点的邻接边数目大于 skip_degree 时,完全舍弃该顶点。选填项,如果开启时,需满足 `skip_degree >= max_degree` 约束,默认为 0 (不启用),表示不跳过任何点 (注意:开启此配置后,遍历时会尝试访问一个顶点的 skip_degree 条边,而不仅仅是 max_degree 条边,这样有额外的遍历开销,对查询性能影响可能有较大影响,请确认理解后再开启) +- capacity:遍历过程中最大的访问的顶点数目,选填项,默认为 10000000 ##### 3.2.8.2 使用方法 ###### Method & Url ``` -GET http://localhost:8080/graphs/{graph}/traversers/shortestpath?source="1:marko"&target="2:ripple"&max_depth=3 +GET http://localhost:8080/graphspaces/DEFAULT/graphs/{graph}/traversers/shortestpath?source="1:marko"&target="2:ripple"&max_depth=3 ``` ###### Response Status @@ -938,21 +938,21 @@ GET http://localhost:8080/graphs/{graph}/traversers/shortestpath?source="1:marko ###### Params -- source:起始顶点id,必填项 -- target:目的顶点id,必填项 -- direction:起始顶点向外发散的方向(OUT,IN,BOTH),选填项,默认是BOTH +- source:起始顶点 id,必填项 +- target:目的顶点 id,必填项 +- direction:起始顶点向外发散的方向(OUT,IN,BOTH),选填项,默认是 BOTH - max_depth:最大步数,必填项 -- label:边的类型,选填项,默认代表所有edge label -- max_degree:查询过程中,单个顶点遍历的最大邻接边数目,选填项,默认为10000 -- skip_degree:用于设置查询过程中舍弃超级顶点的最小边数,即当某个顶点的邻接边数目大于 skip_degree 时,完全舍弃该顶点。选填项,如果开启时,需满足 `skip_degree >= max_degree` 约束,默认为0 (不启用),表示不跳过任何点 (注意: 开启此配置后,遍历时会尝试访问一个顶点的 skip_degree 条边,而不仅仅是 max_degree 条边,这样有额外的遍历开销,对查询性能影响可能有较大影响,请确认理解后再开启) -- capacity:遍历过程中最大的访问的顶点数目,选填项,默认为10000000 +- label:边的类型,选填项,默认代表所有 edge label +- max_degree:查询过程中,单个顶点遍历的最大邻接边数目,选填项,默认为 10000 +- skip_degree:用于设置查询过程中舍弃超级顶点的最小边数,即当某个顶点的邻接边数目大于 skip_degree 时,完全舍弃该顶点。选填项,如果开启时,需满足 `skip_degree >= max_degree` 约束,默认为 0 (不启用),表示不跳过任何点 (注意:开启此配置后,遍历时会尝试访问一个顶点的 skip_degree 条边,而不仅仅是 max_degree 条边,这样有额外的遍历开销,对查询性能影响可能有较大影响,请确认理解后再开启) +- capacity:遍历过程中最大的访问的顶点数目,选填项,默认为 10000000 ##### 3.2.9.2 使用方法 ###### Method & Url ``` -GET http://localhost:8080/graphs/{graph}/traversers/allshortestpaths?source="A"&target="Z"&max_depth=10 +GET http://localhost:8080/graphspaces/DEFAULT/graphs/{graph}/traversers/allshortestpaths?source="A"&target="Z"&max_depth=10 ``` ###### Response Status @@ -1001,22 +1001,22 @@ GET http://localhost:8080/graphs/{graph}/traversers/allshortestpaths?source="A"& ###### Params -- source:起始顶点id,必填项 -- target:目的顶点id,必填项 -- direction:起始顶点向外发散的方向(OUT,IN,BOTH),选填项,默认是BOTH -- label:边的类型,选填项,默认代表所有edge label +- source:起始顶点 id,必填项 +- target:目的顶点 id,必填项 +- direction:起始顶点向外发散的方向(OUT,IN,BOTH),选填项,默认是 BOTH +- label:边的类型,选填项,默认代表所有 edge label - weight:边的权重属性,必填项,必须是数字类型的属性 -- max_degree:查询过程中,单个顶点遍历的最大邻接边数目,选填项,默认为10000 -- skip_degree:用于设置查询过程中舍弃超级顶点的最小边数,即当某个顶点的邻接边数目大于 skip_degree 时,完全舍弃该顶点。选填项,如果开启时,需满足 `skip_degree >= max_degree` 约束,默认为0 (不启用),表示不跳过任何点 (注意: 开启此配置后,遍历时会尝试访问一个顶点的 skip_degree 条边,而不仅仅是 max_degree 条边,这样有额外的遍历开销,对查询性能影响可能有较大影响,请确认理解后再开启) -- capacity:遍历过程中最大的访问的顶点数目,选填项,默认为10000000 -- with_vertex:true表示返回结果包含完整的顶点信息(路径中的全部顶点),false时表示只返回顶点id,选填项,默认为false +- max_degree:查询过程中,单个顶点遍历的最大邻接边数目,选填项,默认为 10000 +- skip_degree:用于设置查询过程中舍弃超级顶点的最小边数,即当某个顶点的邻接边数目大于 skip_degree 时,完全舍弃该顶点。选填项,如果开启时,需满足 `skip_degree >= max_degree` 约束,默认为 0 (不启用),表示不跳过任何点 (注意:开启此配置后,遍历时会尝试访问一个顶点的 skip_degree 条边,而不仅仅是 max_degree 条边,这样有额外的遍历开销,对查询性能影响可能有较大影响,请确认理解后再开启) +- capacity:遍历过程中最大的访问的顶点数目,选填项,默认为 10000000 +- with_vertex:true 表示返回结果包含完整的顶点信息(路径中的全部顶点),false 时表示只返回顶点 id,选填项,默认为 false ##### 3.2.10.2 使用方法 ###### Method & Url ``` -GET http://localhost:8080/graphs/{graph}/traversers/weightedshortestpath?source="1:marko"&target="2:ripple"&weight="weight"&with_vertex=true +GET http://localhost:8080/graphspaces/DEFAULT/graphs/{graph}/traversers/weightedshortestpath?source="1:marko"&target="2:ripple"&weight="weight"&with_vertex=true ``` ###### Response Status @@ -1076,7 +1076,7 @@ GET http://localhost:8080/graphs/{graph}/traversers/weightedshortestpath?source= 查找两个顶点间的带权最短路径,例如: -- 交通线路中查找从A城市到B城市花钱最少的交通方式 +- 交通线路中查找从 A 城市到 B 城市花钱最少的交通方式 #### 3.2.11 Single Source Shortest Path @@ -1086,22 +1086,22 @@ GET http://localhost:8080/graphs/{graph}/traversers/weightedshortestpath?source= ###### Params -- source:起始顶点id,必填项 -- direction:起始顶点向外发散的方向(OUT,IN,BOTH),选填项,默认是BOTH -- label:边的类型,选填项,默认代表所有edge label -- weight:边的权重属性,选填项,必须是数字类型的属性,如果不填或者虽然填了但是边没有该属性,则权重为1.0 -- max_degree:查询过程中,单个顶点遍历的最大邻接边数目,选填项,默认为10000 -- skip_degree:用于设置查询过程中舍弃超级顶点的最小边数,即当某个顶点的邻接边数目大于 skip_degree 时,完全舍弃该顶点。选填项,如果开启时,需满足 `skip_degree >= max_degree` 约束,默认为0 (不启用),表示不跳过任何点 (注意: 开启此配置后,遍历时会尝试访问一个顶点的 skip_degree 条边,而不仅仅是 max_degree 条边,这样有额外的遍历开销,对查询性能影响可能有较大影响,请确认理解后再开启) -- capacity:遍历过程中最大的访问的顶点数目,选填项,默认为10000000 -- limit:查询到的目标顶点个数,也是返回的最短路径的条数,选填项,默认为10 -- with_vertex:true表示返回结果包含完整的顶点信息(路径中的全部顶点),false时表示只返回顶点id,选填项,默认为false +- source:起始顶点 id,必填项 +- direction:起始顶点向外发散的方向(OUT,IN,BOTH),选填项,默认是 BOTH +- label:边的类型,选填项,默认代表所有 edge label +- weight:边的权重属性,选填项,必须是数字类型的属性,如果不填或者虽然填了但是边没有该属性,则权重为 1.0 +- max_degree:查询过程中,单个顶点遍历的最大邻接边数目,选填项,默认为 10000 +- skip_degree:用于设置查询过程中舍弃超级顶点的最小边数,即当某个顶点的邻接边数目大于 skip_degree 时,完全舍弃该顶点。选填项,如果开启时,需满足 `skip_degree >= max_degree` 约束,默认为 0 (不启用),表示不跳过任何点 (注意:开启此配置后,遍历时会尝试访问一个顶点的 skip_degree 条边,而不仅仅是 max_degree 条边,这样有额外的遍历开销,对查询性能影响可能有较大影响,请确认理解后再开启) +- capacity:遍历过程中最大的访问的顶点数目,选填项,默认为 10000000 +- limit:查询到的目标顶点个数,也是返回的最短路径的条数,选填项,默认为 10 +- with_vertex:true 表示返回结果包含完整的顶点信息(路径中的全部顶点),false 时表示只返回顶点 id,选填项,默认为 false ##### 3.2.11.2 使用方法 ###### Method & Url ``` -GET http://localhost:8080/graphs/{graph}/traversers/singlesourceshortestpath?source="1:marko"&with_vertex=true +GET http://localhost:8080/graphspaces/DEFAULT/graphs/{graph}/traversers/singlesourceshortestpath?source="1:marko"&with_vertex=true ``` ###### Response Status @@ -1233,27 +1233,27 @@ GET http://localhost:8080/graphs/{graph}/traversers/singlesourceshortestpath?sou ###### Params - vertices:定义起始顶点,必填项,指定方式包括: - - ids:通过顶点id列表提供起始顶点 - - label和properties:如果没有指定ids,则使用label和properties的联合条件查询起始顶点 + - ids:通过顶点 id 列表提供起始顶点 + - label 和 properties:如果没有指定 ids,则使用 label 和 properties 的联合条件查询起始顶点 - label:顶点的类型 - properties:通过属性的值查询起始顶点 - > 注意:properties中的属性值可以是列表,表示只要key对应的value在列表中就可以 -- step:表示从起始顶点到终止顶点走过的路径,必填项,Step的结构如下: - - direction:表示边的方向(OUT,IN,BOTH),默认是BOTH + > 注意:properties 中的属性值可以是列表,表示只要 key 对应的 value 在列表中就可以 +- step:表示从起始顶点到终止顶点走过的路径,必填项,Step 的结构如下: + - direction:表示边的方向(OUT,IN,BOTH),默认是 BOTH - labels:边的类型列表 - properties:通过属性的值过滤边 - - max_degree:查询过程中,单个顶点遍历的最大邻接边数目,默认为 10000 (注: 0.12版之前 step 内仅支持 degree 作为参数名, 0.12开始统一使用 max_degree, 并向下兼容 degree 写法) - - skip_degree:用于设置查询过程中舍弃超级顶点的最小边数,即当某个顶点的邻接边数目大于 skip_degree 时,完全舍弃该顶点。选填项,如果开启时,需满足 `skip_degree >= max_degree` 约束,默认为0 (不启用),表示不跳过任何点 (注意: 开启此配置后,遍历时会尝试访问一个顶点的 skip_degree 条边,而不仅仅是 max_degree 条边,这样有额外的遍历开销,对查询性能影响可能有较大影响,请确认理解后再开启) + - max_degree:查询过程中,单个顶点遍历的最大邻接边数目,默认为 10000 (注:0.12 版之前 step 内仅支持 degree 作为参数名,0.12 开始统一使用 max_degree, 并向下兼容 degree 写法) + - skip_degree:用于设置查询过程中舍弃超级顶点的最小边数,即当某个顶点的邻接边数目大于 skip_degree 时,完全舍弃该顶点。选填项,如果开启时,需满足 `skip_degree >= max_degree` 约束,默认为 0 (不启用),表示不跳过任何点 (注意:开启此配置后,遍历时会尝试访问一个顶点的 skip_degree 条边,而不仅仅是 max_degree 条边,这样有额外的遍历开销,对查询性能影响可能有较大影响,请确认理解后再开启) - max_depth:步数,必填项 -- capacity:遍历过程中最大的访问的顶点数目,选填项,默认为10000000 -- with_vertex:true表示返回结果包含完整的顶点信息(路径中的全部顶点),false时表示只返回顶点id,选填项,默认为false +- capacity:遍历过程中最大的访问的顶点数目,选填项,默认为 10000000 +- with_vertex:true 表示返回结果包含完整的顶点信息(路径中的全部顶点),false 时表示只返回顶点 id,选填项,默认为 false ##### 3.2.12.2 使用方法 ###### Method & Url ``` -POST http://localhost:8080/graphs/{graph}/traversers/multinodeshortestpath +POST http://localhost:8080/graphspaces/DEFAULT/graphs/{graph}/traversers/multinodeshortestpath ``` ###### Request Body @@ -1459,7 +1459,7 @@ POST http://localhost:8080/graphs/{graph}/traversers/multinodeshortestpath - 查找多个公司和法人之间的最短路径 -#### 3.2.13 Paths (GET,基础版) +#### 3.2.13 Paths(GET,基础版) ##### 3.2.13.1 功能介绍 @@ -1467,21 +1467,21 @@ POST http://localhost:8080/graphs/{graph}/traversers/multinodeshortestpath ###### Params -- source:起始顶点id,必填项 -- target:目的顶点id,必填项 -- direction:起始顶点向外发散的方向(OUT,IN,BOTH),选填项,默认是BOTH -- label:边的类型,选填项,默认代表所有edge label +- source:起始顶点 id,必填项 +- target:目的顶点 id,必填项 +- direction:起始顶点向外发散的方向(OUT,IN,BOTH),选填项,默认是 BOTH +- label:边的类型,选填项,默认代表所有 edge label - max_depth:步数,必填项 -- max_degree:查询过程中,单个顶点遍历的最大邻接边数目,选填项,默认为10000 -- capacity:遍历过程中最大的访问的顶点数目,选填项,默认为10000000 -- limit:返回的路径的最大数目,选填项,默认为10 +- max_degree:查询过程中,单个顶点遍历的最大邻接边数目,选填项,默认为 10000 +- capacity:遍历过程中最大的访问的顶点数目,选填项,默认为 10000000 +- limit:返回的路径的最大数目,选填项,默认为 10 ##### 3.2.13.2 使用方法 ###### Method & Url ``` -GET http://localhost:8080/graphs/{graph}/traversers/paths?source="1:marko"&target="1:josh"&max_depth=5 +GET http://localhost:8080/graphspaces/DEFAULT/graphs/{graph}/traversers/paths?source="1:marko"&target="1:josh"&max_depth=5 ``` ###### Response Status @@ -1519,7 +1519,7 @@ GET http://localhost:8080/graphs/{graph}/traversers/paths?source="1:marko"&targe - 社交网络中,查找两个用户所有可能的关系路径 - 设备关联网络中,查找两个设备之间所有的关联路径 -#### 3.2.14 Paths (POST,高级版) +#### 3.2.14 Paths(POST,高级版) ##### 3.2.14.1 功能介绍 @@ -1528,28 +1528,28 @@ GET http://localhost:8080/graphs/{graph}/traversers/paths?source="1:marko"&targe ###### Params - sources:定义起始顶点,必填项,指定方式包括: - - ids:通过顶点id列表提供起始顶点 - - label和properties:如果没有指定ids,则使用label和properties的联合条件查询起始顶点 + - ids:通过顶点 id 列表提供起始顶点 + - label 和 properties:如果没有指定 ids,则使用 label 和 properties 的联合条件查询起始顶点 - label:顶点的类型 - properties:通过属性的值查询起始顶点 - > 注意:properties中的属性值可以是列表,表示只要key对应的value在列表中就可以 + > 注意:properties 中的属性值可以是列表,表示只要 key 对应的 value 在列表中就可以 - targets:定义终止顶点,必填项,指定方式包括: - - ids:通过顶点id列表提供终止顶点 - - label和properties:如果没有指定ids,则使用label和properties的联合条件查询终止顶点 + - ids:通过顶点 id 列表提供终止顶点 + - label 和 properties:如果没有指定 ids,则使用 label 和 properties 的联合条件查询终止顶点 - label:顶点的类型 - properties:通过属性的值查询终止顶点 - > 注意:properties中的属性值可以是列表,表示只要key对应的value在列表中就可以 -- step:表示从起始顶点到终止顶点走过的路径,必填项,Step的结构如下: - - direction:表示边的方向(OUT,IN,BOTH),默认是BOTH + > 注意:properties 中的属性值可以是列表,表示只要 key 对应的 value 在列表中就可以 +- step:表示从起始顶点到终止顶点走过的路径,必填项,Step 的结构如下: + - direction:表示边的方向(OUT,IN,BOTH),默认是 BOTH - labels:边的类型列表 - properties:通过属性的值过滤边 - - max_degree:查询过程中,单个顶点遍历的最大邻接边数目,默认为 10000 (注: 0.12版之前 step 内仅支持 degree 作为参数名, 0.12开始统一使用 max_degree, 并向下兼容 degree 写法) - - skip_degree:用于设置查询过程中舍弃超级顶点的最小边数,即当某个顶点的邻接边数目大于 skip_degree 时,完全舍弃该顶点。选填项,如果开启时,需满足 `skip_degree >= max_degree` 约束,默认为0 (不启用),表示不跳过任何点 (注意: 开启此配置后,遍历时会尝试访问一个顶点的 skip_degree 条边,而不仅仅是 max_degree 条边,这样有额外的遍历开销,对查询性能影响可能有较大影响,请确认理解后再开启) + - max_degree:查询过程中,单个顶点遍历的最大邻接边数目,默认为 10000 (注:0.12 版之前 step 内仅支持 degree 作为参数名,0.12 开始统一使用 max_degree, 并向下兼容 degree 写法) + - skip_degree:用于设置查询过程中舍弃超级顶点的最小边数,即当某个顶点的邻接边数目大于 skip_degree 时,完全舍弃该顶点。选填项,如果开启时,需满足 `skip_degree >= max_degree` 约束,默认为 0 (不启用),表示不跳过任何点 (注意:开启此配置后,遍历时会尝试访问一个顶点的 skip_degree 条边,而不仅仅是 max_degree 条边,这样有额外的遍历开销,对查询性能影响可能有较大影响,请确认理解后再开启) - max_depth:步数,必填项 -- nearest:nearest为true时,代表起始顶点到达结果顶点的最短路径长度为depth,不存在更短的路径;nearest为false时,代表起始顶点到结果顶点有一条长度为depth的路径(未必最短且可以有环),选填项,默认为true -- capacity:遍历过程中最大的访问的顶点数目,选填项,默认为10000000 -- limit:返回的路径的最大数目,选填项,默认为10 -- with_vertex:true表示返回结果包含完整的顶点信息(路径中的全部顶点),false时表示只返回顶点id,选填项,默认为false +- nearest:nearest 为 true 时,代表起始顶点到达结果顶点的最短路径长度为 depth,不存在更短的路径;nearest 为 false 时,代表起始顶点到结果顶点有一条长度为 depth 的路径(未必最短且可以有环),选填项,默认为 true +- capacity:遍历过程中最大的访问的顶点数目,选填项,默认为 10000000 +- limit:返回的路径的最大数目,选填项,默认为 10 +- with_vertex:true 表示返回结果包含完整的顶点信息(路径中的全部顶点),false 时表示只返回顶点 id,选填项,默认为 false ##### 3.2.14.2 使用方法 @@ -1557,7 +1557,7 @@ GET http://localhost:8080/graphs/{graph}/traversers/paths?source="1:marko"&targe ###### Method & Url ``` -POST http://localhost:8080/graphs/{graph}/traversers/paths +POST http://localhost:8080/graphspaces/DEFAULT/graphs/{graph}/traversers/paths ``` ###### Request Body @@ -1630,33 +1630,33 @@ POST http://localhost:8080/graphs/{graph}/traversers/paths ###### Params - sources:定义起始顶点,必填项,指定方式包括: - - ids:通过顶点id列表提供起始顶点 - - label和properties:如果没有指定ids,则使用label和properties的联合条件查询起始顶点 + - ids:通过顶点 id 列表提供起始顶点 + - label 和 properties:如果没有指定 ids,则使用 label 和 properties 的联合条件查询起始顶点 - label:顶点的类型 - properties:通过属性的值查询起始顶点 - > 注意:properties中的属性值可以是列表,表示只要key对应的value在列表中就可以 -- steps:表示从起始顶点走过的路径规则,是一组Step的列表。必填项。每个Step的结构如下: - - direction:表示边的方向(OUT,IN,BOTH),默认是BOTH + > 注意:properties 中的属性值可以是列表,表示只要 key 对应的 value 在列表中就可以 +- steps:表示从起始顶点走过的路径规则,是一组 Step 的列表。必填项。每个 Step 的结构如下: + - direction:表示边的方向(OUT,IN,BOTH),默认是 BOTH - labels:边的类型列表 - properties:通过属性的值过滤边 - - weight_by:根据指定的属性计算边的权重,sort_by不为NONE时有效,与default_weight互斥 - - default_weight:当边没有属性作为权重计算值时,采取的默认权重,sort_by不为NONE时有效,与weight_by互斥 - - max_degree:查询过程中,单个顶点遍历的最大邻接边数目,默认为 10000 (注: 0.12版之前 step 内仅支持 degree 作为参数名, 0.12开始统一使用 max_degree, 并向下兼容 degree 写法) - - sample:当需要对某个step的符合条件的边进行采样时设置,-1表示不采样,默认为采样100 -- sort_by:根据路径的权重排序,选填项,默认为NONE: - - NONE表示不排序,默认值 - - INCR表示按照路径权重的升序排序 - - DECR表示按照路径权重的降序排序 -- capacity:遍历过程中最大的访问的顶点数目,选填项,默认为10000000 -- limit:返回的路径的最大数目,选填项,默认为10 -- with_vertex:true表示返回结果包含完整的顶点信息(路径中的全部顶点),false时表示只返回顶点id,选填项,默认为false + - weight_by:根据指定的属性计算边的权重,sort_by 不为 NONE 时有效,与 default_weight 互斥 + - default_weight:当边没有属性作为权重计算值时,采取的默认权重,sort_by 不为 NONE 时有效,与 weight_by 互斥 + - max_degree:查询过程中,单个顶点遍历的最大邻接边数目,默认为 10000 (注:0.12 版之前 step 内仅支持 degree 作为参数名,0.12 开始统一使用 max_degree, 并向下兼容 degree 写法) + - sample:当需要对某个 step 的符合条件的边进行采样时设置,-1 表示不采样,默认为采样 100 +- sort_by:根据路径的权重排序,选填项,默认为 NONE: + - NONE 表示不排序,默认值 + - INCR 表示按照路径权重的升序排序 + - DECR 表示按照路径权重的降序排序 +- capacity:遍历过程中最大的访问的顶点数目,选填项,默认为 10000000 +- limit:返回的路径的最大数目,选填项,默认为 10 +- with_vertex:true 表示返回结果包含完整的顶点信息(路径中的全部顶点),false 时表示只返回顶点 id,选填项,默认为 false ##### 3.2.15.2 使用方法 ###### Method & Url ``` -POST http://localhost:8080/graphs/{graph}/traversers/customizedpaths +POST http://localhost:8080/graphspaces/DEFAULT/graphs/{graph}/traversers/customizedpaths ``` ###### Request Body @@ -1805,7 +1805,7 @@ POST http://localhost:8080/graphs/{graph}/traversers/customizedpaths 适合查找各种复杂的路径集合,例如: -- 社交网络中,查找看过张艺谋所导演的电影的用户关注的大V的路径(张艺谋--->电影---->用户--->大V) +- 社交网络中,查找看过张艺谋所导演的电影的用户关注的大 V 的路径(张艺谋--->电影---->用户--->大 V) - 风控网络中,查找多个高风险用户的直系亲属的朋友的路径(高风险用户--->直系亲属--->朋友) #### 3.2.16 Template Paths @@ -1817,35 +1817,35 @@ POST http://localhost:8080/graphs/{graph}/traversers/customizedpaths ###### Params - sources:定义起始顶点,必填项,指定方式包括: - - ids:通过顶点id列表提供起始顶点 - - label和properties:如果没有指定ids,则使用label和properties的联合条件查询起始顶点 + - ids:通过顶点 id 列表提供起始顶点 + - label 和 properties:如果没有指定 ids,则使用 label 和 properties 的联合条件查询起始顶点 - label:顶点的类型 - properties:通过属性的值查询起始顶点 - > 注意:properties中的属性值可以是列表,表示只要key对应的value在列表中就可以 + > 注意:properties 中的属性值可以是列表,表示只要 key 对应的 value 在列表中就可以 - targets:定义终止顶点,必填项,指定方式包括: - - ids:通过顶点id列表提供终止顶点 - - label和properties:如果没有指定ids,则使用label和properties的联合条件查询终止顶点 + - ids:通过顶点 id 列表提供终止顶点 + - label 和 properties:如果没有指定 ids,则使用 label 和 properties 的联合条件查询终止顶点 - label:顶点的类型 - properties:通过属性的值查询终止顶点 - > 注意:properties中的属性值可以是列表,表示只要key对应的value在列表中就可以 -- steps:表示从起始顶点走过的路径规则,是一组Step的列表。必填项。每个Step的结构如下: - - direction:表示边的方向(OUT,IN,BOTH),默认是BOTH + > 注意:properties 中的属性值可以是列表,表示只要 key 对应的 value 在列表中就可以 +- steps:表示从起始顶点走过的路径规则,是一组 Step 的列表。必填项。每个 Step 的结构如下: + - direction:表示边的方向(OUT,IN,BOTH),默认是 BOTH - labels:边的类型列表 - properties:通过属性的值过滤边 - - max_times:当前step可以重复的次数,当为N时,表示从起始顶点可以经过当前step 1-N 次 - - max_degree:查询过程中,单个顶点遍历的最大邻接边数目,默认为 10000 (注: 0.12版之前 step 内仅支持 degree 作为参数名, 0.12开始统一使用 max_degree, 并向下兼容 degree 写法) - - skip_degree:用于设置查询过程中舍弃超级顶点的最小边数,即当某个顶点的邻接边数目大于 skip_degree 时,完全舍弃该顶点。选填项,如果开启时,需满足 `skip_degree >= max_degree` 约束,默认为0 (不启用),表示不跳过任何点 (注意: 开启此配置后,遍历时会尝试访问一个顶点的 skip_degree 条边,而不仅仅是 max_degree 条边,这样有额外的遍历开销,对查询性能影响可能有较大影响,请确认理解后再开启) -- with_ring:Boolean值,true表示包含环路;false表示不包含环路,默认为false -- capacity:遍历过程中最大的访问的顶点数目,选填项,默认为10000000 -- limit:返回的路径的最大数目,选填项,默认为10 -- with_vertex:true表示返回结果包含完整的顶点信息(路径中的全部顶点),false时表示只返回顶点id,选填项,默认为false + - max_times:当前 step 可以重复的次数,当为 N 时,表示从起始顶点可以经过当前 step 1-N 次 + - max_degree:查询过程中,单个顶点遍历的最大邻接边数目,默认为 10000 (注:0.12 版之前 step 内仅支持 degree 作为参数名,0.12 开始统一使用 max_degree, 并向下兼容 degree 写法) + - skip_degree:用于设置查询过程中舍弃超级顶点的最小边数,即当某个顶点的邻接边数目大于 skip_degree 时,完全舍弃该顶点。选填项,如果开启时,需满足 `skip_degree >= max_degree` 约束,默认为 0 (不启用),表示不跳过任何点 (注意:开启此配置后,遍历时会尝试访问一个顶点的 skip_degree 条边,而不仅仅是 max_degree 条边,这样有额外的遍历开销,对查询性能影响可能有较大影响,请确认理解后再开启) +- with_ring:Boolean 值,true 表示包含环路;false 表示不包含环路,默认为 false +- capacity:遍历过程中最大的访问的顶点数目,选填项,默认为 10000000 +- limit:返回的路径的最大数目,选填项,默认为 10 +- with_vertex:true 表示返回结果包含完整的顶点信息(路径中的全部顶点),false 时表示只返回顶点 id,选填项,默认为 false ##### 3.2.16.2 使用方法 ###### Method & Url ``` -POST http://localhost:8080/graphs/{graph}/traversers/templatepaths +POST http://localhost:8080/graphspaces/DEFAULT/graphs/{graph}/traversers/templatepaths ``` ###### Request Body @@ -1985,7 +1985,7 @@ POST http://localhost:8080/graphs/{graph}/traversers/templatepaths ##### 3.2.16.3 适用场景 -适合查找各种复杂的模板路径,比如personA -(朋友)-> personB -(同学)-> personC,其中"朋友"和"同学"边可以分别是最多3层和4层的情况 +适合查找各种复杂的模板路径,比如 personA -(朋友)-> personB -(同学)-> personC,其中"朋友"和"同学"边可以分别是最多 3 层和 4 层的情况 #### 3.2.17 Crosspoints @@ -1995,21 +1995,21 @@ POST http://localhost:8080/graphs/{graph}/traversers/templatepaths ###### Params -- source:起始顶点id,必填项 -- target:目的顶点id,必填项 -- direction:起始顶点到目的顶点的方向, 目的点到起始点是反方向,BOTH时不考虑方向(OUT,IN,BOTH),选填项,默认是BOTH -- label:边的类型,选填项,默认代表所有edge label +- source:起始顶点 id,必填项 +- target:目的顶点 id,必填项 +- direction:起始顶点到目的顶点的方向,目的点到起始点是反方向,BOTH 时不考虑方向(OUT,IN,BOTH),选填项,默认是 BOTH +- label:边的类型,选填项,默认代表所有 edge label - max_depth:步数,必填项 -- max_degree:查询过程中,单个顶点遍历的最大邻接边数目,选填项,默认为10000 -- capacity:遍历过程中最大的访问的顶点数目,选填项,默认为10000000 -- limit:返回的交点的最大数目,选填项,默认为10 +- max_degree:查询过程中,单个顶点遍历的最大邻接边数目,选填项,默认为 10000 +- capacity:遍历过程中最大的访问的顶点数目,选填项,默认为 10000000 +- limit:返回的交点的最大数目,选填项,默认为 10 ##### 3.2.17.2 使用方法 ###### Method & Url ``` -GET http://localhost:8080/graphs/{graph}/traversers/crosspoints?source="2:lop"&target="2:ripple"&max_depth=5&direction=IN +GET http://localhost:8080/graphspaces/DEFAULT/graphs/{graph}/traversers/crosspoints?source="2:lop"&target="2:ripple"&max_depth=5&direction=IN ``` ###### Response Status @@ -2039,7 +2039,7 @@ GET http://localhost:8080/graphs/{graph}/traversers/crosspoints?source="2:lop"&t 查找两个顶点的交点及其路径,例如: -- 社交网络中,查找两个用户共同关注的话题或者大V +- 社交网络中,查找两个用户共同关注的话题或者大 V - 家族关系中,查找共同的祖先 #### 3.2.18 Customized Crosspoints @@ -2051,34 +2051,34 @@ GET http://localhost:8080/graphs/{graph}/traversers/crosspoints?source="2:lop"&t ###### Params - sources:定义起始顶点,必填项,指定方式包括: - - ids:通过顶点id列表提供起始顶点 - - label和properties:如果没有指定ids,则使用label和properties的联合条件查询起始顶点 + - ids:通过顶点 id 列表提供起始顶点 + - label 和 properties:如果没有指定 ids,则使用 label 和 properties 的联合条件查询起始顶点 - label:顶点的类型 - properties:通过属性的值查询起始顶点 - > 注意:properties中的属性值可以是列表,表示只要key对应的value在列表中就可以 + > 注意:properties 中的属性值可以是列表,表示只要 key 对应的 value 在列表中就可以 -- path_patterns:表示从起始顶点走过的路径规则,是一组规则的列表。必填项。每个规则是一个PathPattern - - 每个PathPattern是一组Step列表,每个Step结构如下: - - direction:表示边的方向(OUT,IN,BOTH),默认是BOTH +- path_patterns:表示从起始顶点走过的路径规则,是一组规则的列表。必填项。每个规则是一个 PathPattern + - 每个 PathPattern 是一组 Step 列表,每个 Step 结构如下: + - direction:表示边的方向(OUT,IN,BOTH),默认是 BOTH - labels:边的类型列表 - properties:通过属性的值过滤边 - - max_degree:查询过程中,单个顶点遍历的最大邻接边数目,默认为 10000 (注: 0.12版之前 step 内仅支持 degree 作为参数名, 0.12开始统一使用 max_degree, 并向下兼容 degree 写法) - - skip_degree:用于设置查询过程中舍弃超级顶点的最小边数,即当某个顶点的邻接边数目大于 skip_degree 时,完全舍弃该顶点。选填项,如果开启时,需满足 `skip_degree >= max_degree` 约束,默认为0 (不启用),表示不跳过任何点 (注意: 开启此配置后,遍历时会尝试访问一个顶点的 skip_degree 条边,而不仅仅是 max_degree 条边,这样有额外的遍历开销,对查询性能影响可能有较大影响,请确认理解后再开启) -- capacity:遍历过程中最大的访问的顶点数目,选填项,默认为10000000 -- limit:返回的路径的最大数目,选填项,默认为10 -- with_path:true表示返回交点所在的路径,false表示不返回交点所在的路径,选填项,默认为false -- with_vertex,选填项,默认为false: - - true表示返回结果包含完整的顶点信息(路径中的全部顶点) - - with_path为true时,返回所有路径中的顶点的完整信息 - - with_path为false时,返回所有交点的完整信息 - - false时表示只返回顶点id + - max_degree:查询过程中,单个顶点遍历的最大邻接边数目,默认为 10000 (注:0.12 版之前 step 内仅支持 degree 作为参数名,0.12 开始统一使用 max_degree, 并向下兼容 degree 写法) + - skip_degree:用于设置查询过程中舍弃超级顶点的最小边数,即当某个顶点的邻接边数目大于 skip_degree 时,完全舍弃该顶点。选填项,如果开启时,需满足 `skip_degree >= max_degree` 约束,默认为 0 (不启用),表示不跳过任何点 (注意:开启此配置后,遍历时会尝试访问一个顶点的 skip_degree 条边,而不仅仅是 max_degree 条边,这样有额外的遍历开销,对查询性能影响可能有较大影响,请确认理解后再开启) +- capacity:遍历过程中最大的访问的顶点数目,选填项,默认为 10000000 +- limit:返回的路径的最大数目,选填项,默认为 10 +- with_path:true 表示返回交点所在的路径,false 表示不返回交点所在的路径,选填项,默认为 false +- with_vertex,选填项,默认为 false: + - true 表示返回结果包含完整的顶点信息(路径中的全部顶点) + - with_path 为 true 时,返回所有路径中的顶点的完整信息 + - with_path 为 false 时,返回所有交点的完整信息 + - false 时表示只返回顶点 id ##### 3.2.18.2 使用方法 ###### Method & Url ``` -POST http://localhost:8080/graphs/{graph}/traversers/customizedcrosspoints +POST http://localhost:8080/graphspaces/DEFAULT/graphs/{graph}/traversers/customizedcrosspoints ``` ###### Request Body @@ -2234,21 +2234,21 @@ POST http://localhost:8080/graphs/{graph}/traversers/customizedcrosspoints ###### Params -- source:起始顶点id,必填项 -- direction:起始顶点发出的边的方向(OUT,IN,BOTH),选填项,默认是BOTH -- label:边的类型,选填项,默认代表所有edge label +- source:起始顶点 id,必填项 +- direction:起始顶点发出的边的方向(OUT,IN,BOTH),选填项,默认是 BOTH +- label:边的类型,选填项,默认代表所有 edge label - max_depth:步数,必填项 -- source_in_ring:环路是否包含起点,选填项,默认为true -- max_degree:查询过程中,单个顶点遍历的最大邻接边数目,选填项,默认为10000 -- capacity:遍历过程中最大的访问的顶点数目,选填项,默认为10000000 -- limit:返回的可达环路的最大数目,选填项,默认为10 +- source_in_ring:环路是否包含起点,选填项,默认为 true +- max_degree:查询过程中,单个顶点遍历的最大邻接边数目,选填项,默认为 10000 +- capacity:遍历过程中最大的访问的顶点数目,选填项,默认为 10000000 +- limit:返回的可达环路的最大数目,选填项,默认为 10 ##### 3.2.19.2 使用方法 ###### Method & Url ``` -GET http://localhost:8080/graphs/{graph}/traversers/rings?source="1:marko"&max_depth=2 +GET http://localhost:8080/graphspaces/DEFAULT/graphs/{graph}/traversers/rings?source="1:marko"&max_depth=2 ``` ###### Response Status @@ -2304,20 +2304,20 @@ GET http://localhost:8080/graphs/{graph}/traversers/rings?source="1:marko"&max_d ###### Params -- source:起始顶点id,必填项 -- direction:起始顶点发出的边的方向(OUT,IN,BOTH),选填项,默认是BOTH -- label:边的类型,选填项,默认代表所有edge label +- source:起始顶点 id,必填项 +- direction:起始顶点发出的边的方向(OUT,IN,BOTH),选填项,默认是 BOTH +- label:边的类型,选填项,默认代表所有 edge label - max_depth:步数,必填项 -- max_degree:查询过程中,单个顶点遍历的最大邻接边数目,选填项,默认为10000 -- capacity:遍历过程中最大的访问的顶点数目,选填项,默认为10000000 -- limit:返回的非环路的最大数目,选填项,默认为10 +- max_degree:查询过程中,单个顶点遍历的最大邻接边数目,选填项,默认为 10000 +- capacity:遍历过程中最大的访问的顶点数目,选填项,默认为 10000000 +- limit:返回的非环路的最大数目,选填项,默认为 10 ##### 3.2.20.2 使用方法 ###### Method & Url ``` -GET http://localhost:8080/graphs/{graph}/traversers/rays?source="1:marko"&max_depth=2&direction=OUT +GET http://localhost:8080/graphspaces/DEFAULT/graphs/{graph}/traversers/rays?source="1:marko"&max_depth=2&direction=OUT ``` ###### Response Status @@ -2372,39 +2372,39 @@ GET http://localhost:8080/graphs/{graph}/traversers/rays?source="1:marko"&max_de ##### 3.2.21.1 功能介绍 -按照条件查询一批顶点对应的"梭形相似点"。当两个顶点跟很多共同的顶点之间有某种关系的时候,我们认为这两个点为"梭形相似点"。举个例子说明"梭形相似点":"读者A"读了100本书,可以定义读过这100本书中的80本以上的读者,是"读者A"的"梭形相似点" +按照条件查询一批顶点对应的"梭形相似点"。当两个顶点跟很多共同的顶点之间有某种关系的时候,我们认为这两个点为"梭形相似点"。举个例子说明"梭形相似点":"读者 A"读了 100 本书,可以定义读过这 100 本书中的 80 本以上的读者,是"读者 A"的"梭形相似点" ###### Params - sources:定义起始顶点,必填项,指定方式包括: - - ids:通过顶点id列表提供起始顶点 - - label和properties:如果没有指定ids,则使用label和properties的联合条件查询起始顶点 + - ids:通过顶点 id 列表提供起始顶点 + - label 和 properties:如果没有指定 ids,则使用 label 和 properties 的联合条件查询起始顶点 - label:顶点的类型 - properties:通过属性的值查询起始顶点 - > 注意:properties中的属性值可以是列表,表示只要key对应的value在列表中就可以 + > 注意:properties 中的属性值可以是列表,表示只要 key 对应的 value 在列表中就可以 -- label:边的类型,选填项,默认代表所有edge label -- direction:起始顶点向外发散的方向(OUT,IN,BOTH),选填项,默认是BOTH -- min_neighbors:最少邻居数目,邻居数目少于这个阈值时,认为起点不具备"梭形相似点"。比如想要找一个"读者A"读过的书的"梭形相似点",那么`min_neighbors`为100时,表示"读者A"至少要读过100本书才可以有"梭形相似点",必填项 +- label:边的类型,选填项,默认代表所有 edge label +- direction:起始顶点向外发散的方向(OUT,IN,BOTH),选填项,默认是 BOTH +- min_neighbors:最少邻居数目,邻居数目少于这个阈值时,认为起点不具备"梭形相似点"。比如想要找一个"读者 A"读过的书的"梭形相似点",那么`min_neighbors`为 100 时,表示"读者 A"至少要读过 100 本书才可以有"梭形相似点",必填项 - alpha:相似度,代表:起点与"梭形相似点"的共同邻居数目占起点的全部邻居数目的比例,必填项 -- min_similars:"梭形相似点"的最少个数,只有当起点的"梭形相似点"数目大于或等于该值时,才会返回起点及其"梭形相似点",选填项,默认值为1 -- top:返回一个起点的"梭形相似点"中相似度最高的top个,必填项,0表示全部 -- group_property:与`min_groups`一起使用,当起点跟其所有的"梭形相似点"某个属性的值有至少`min_groups`个不同值时,才会返回该起点及其"梭形相似点"。比如为"读者A"推荐"异地"书友时,需要设置`group_property`为读者的"城市"属性,`min_group`至少为2,选填项,不填代表不需要根据属性过滤 +- min_similars:"梭形相似点"的最少个数,只有当起点的"梭形相似点"数目大于或等于该值时,才会返回起点及其"梭形相似点",选填项,默认值为 1 +- top:返回一个起点的"梭形相似点"中相似度最高的 top 个,必填项,0 表示全部 +- group_property:与`min_groups`一起使用,当起点跟其所有的"梭形相似点"某个属性的值有至少`min_groups`个不同值时,才会返回该起点及其"梭形相似点"。比如为"读者 A"推荐"异地"书友时,需要设置`group_property`为读者的"城市"属性,`min_group`至少为 2,选填项,不填代表不需要根据属性过滤 - min_groups:与`group_property`一起使用,只有`group_property`设置时才有意义 -- max_degree:查询过程中,单个顶点遍历的最大邻接边数目,选填项,默认为10000 -- capacity:遍历过程中最大的访问的顶点数目,选填项,默认为10000000 -- limit:返回的结果数目上限(一个起点及其"梭形相似点"算一个结果),选填项,默认为10 -- with_intermediary:是否返回起点及其"梭形相似点"共同关联的中间点,默认为false -- with_vertex,选填项,默认为false: - - true表示返回结果包含完整的顶点信息 - - false时表示只返回顶点id +- max_degree:查询过程中,单个顶点遍历的最大邻接边数目,选填项,默认为 10000 +- capacity:遍历过程中最大的访问的顶点数目,选填项,默认为 10000000 +- limit:返回的结果数目上限(一个起点及其"梭形相似点"算一个结果),选填项,默认为 10 +- with_intermediary:是否返回起点及其"梭形相似点"共同关联的中间点,默认为 false +- with_vertex,选填项,默认为 false: + - true 表示返回结果包含完整的顶点信息 + - false 时表示只返回顶点 id ##### 3.2.21.2 使用方法 ###### Method & Url ``` -POST http://localhost:8080/graphs/hugegraph/traversers/fusiformsimilarity +POST http://localhost:8080/graphspaces/DEFAULT/graphs/hugegraph/traversers/fusiformsimilarity ``` ###### Request Body @@ -2502,16 +2502,16 @@ POST http://localhost:8080/graphs/hugegraph/traversers/fusiformsimilarity #### 3.2.22 Vertices -##### 3.2.22.1 根据顶点的id列表,批量查询顶点 +##### 3.2.22.1 根据顶点的 id 列表,批量查询顶点 ###### Params -- ids:要查询的顶点id列表 +- ids:要查询的顶点 id 列表 ###### Method & Url ``` -GET http://localhost:8080/graphs/hugegraph/traversers/vertices?ids="1:marko"&ids="2:lop" +GET http://localhost:8080/graphspaces/DEFAULT/graphs/hugegraph/traversers/vertices?ids="1:marko"&ids="2:lop" ``` ###### Response Status @@ -2581,7 +2581,7 @@ GET http://localhost:8080/graphs/hugegraph/traversers/vertices?ids="1:marko"&ids ##### 3.2.22.2 获取顶点 Shard 信息 -通过指定的分片大小split_size,获取顶点分片信息(可以与 3.2.21.3 中的 Scan 配合使用来获取顶点)。 +通过指定的分片大小 split_size,获取顶点分片信息(可以与 3.2.21.3 中的 Scan 配合使用来获取顶点)。 ###### Params @@ -2590,7 +2590,7 @@ GET http://localhost:8080/graphs/hugegraph/traversers/vertices?ids="1:marko"&ids ###### Method & Url ``` -GET http://localhost:8080/graphs/hugegraph/traversers/vertices/shards?split_size=67108864 +GET http://localhost:8080/graphspaces/DEFAULT/graphs/hugegraph/traversers/vertices/shards?split_size=67108864 ``` ###### Response Status @@ -2629,21 +2629,21 @@ GET http://localhost:8080/graphs/hugegraph/traversers/vertices/shards?split_size } ``` -##### 3.2.22.3 根据Shard信息批量获取顶点 +##### 3.2.22.3 根据 Shard 信息批量获取顶点 -通过指定的分片信息批量查询顶点(Shard信息的获取参见 3.2.21.2 Shard)。 +通过指定的分片信息批量查询顶点(Shard 信息的获取参见 3.2.21.2 Shard)。 ###### Params - start:分片起始位置,必填项 - end:分片结束位置,必填项 -- page:分页位置,选填项,默认为null,不分页;当page为“”时表示分页的第一页,从start指示的位置开始 -- page_limit:分页获取顶点时,一页中顶点数目的上限,选填项,默认为100000 +- page:分页位置,选填项,默认为 null,不分页;当 page 为“”时表示分页的第一页,从 start 指示的位置开始 +- page_limit:分页获取顶点时,一页中顶点数目的上限,选填项,默认为 100000 ###### Method & Url ``` -GET http://localhost:8080/graphs/hugegraph/traversers/vertices/scan?start=0&end=4294967295 +GET http://localhost:8080/graphspaces/DEFAULT/graphs/hugegraph/traversers/vertices/scan?start=0&end=4294967295 ``` ###### Response Status @@ -2813,21 +2813,21 @@ GET http://localhost:8080/graphs/hugegraph/traversers/vertices/scan?start=0&end= ##### 3.2.22.4 适用场景 -- 按id列表查询顶点,可用于批量查询顶点,比如在path查询到多条路径之后,可以进一步查询某条路径的所有顶点属性。 +- 按 id 列表查询顶点,可用于批量查询顶点,比如在 path 查询到多条路径之后,可以进一步查询某条路径的所有顶点属性。 - 获取分片和按分片查询顶点,可以用来遍历全部顶点 #### 3.2.23 Edges -##### 3.2.23.1 根据边的id列表,批量查询边 +##### 3.2.23.1 根据边的 id 列表,批量查询边 ###### Params -- ids:要查询的边id列表 +- ids:要查询的边 id 列表 ###### Method & Url ``` -GET http://localhost:8080/graphs/hugegraph/traversers/edges?ids="S1:josh>1>>S2:lop"&ids="S1:josh>1>>S2:ripple" +GET http://localhost:8080/graphspaces/DEFAULT/graphs/hugegraph/traversers/edges?ids="S1:josh>1>>S2:lop"&ids="S1:josh>1>>S2:ripple" ``` ###### Response Status @@ -2873,7 +2873,7 @@ GET http://localhost:8080/graphs/hugegraph/traversers/edges?ids="S1:josh>1>>S2:l ##### 3.2.23.2 获取边 Shard 信息 -通过指定的分片大小split_size,获取边分片信息(可以与 3.2.22.3 中的 Scan 配合使用来获取边)。 +通过指定的分片大小 split_size,获取边分片信息(可以与 3.2.22.3 中的 Scan 配合使用来获取边)。 ###### Params @@ -2882,7 +2882,7 @@ GET http://localhost:8080/graphs/hugegraph/traversers/edges?ids="S1:josh>1>>S2:l ###### Method & Url ``` -GET http://localhost:8080/graphs/hugegraph/traversers/edges/shards?split_size=4294967295 +GET http://localhost:8080/graphspaces/DEFAULT/graphs/hugegraph/traversers/edges/shards?split_size=4294967295 ``` ###### Response Status @@ -2927,19 +2927,19 @@ GET http://localhost:8080/graphs/hugegraph/traversers/edges/shards?split_size=42 ##### 3.2.23.3 根据 Shard 信息批量获取边 -通过指定的分片信息批量查询边(Shard信息的获取参见 3.2.22.2)。 +通过指定的分片信息批量查询边(Shard 信息的获取参见 3.2.22.2)。 ###### Params - start:分片起始位置,必填项 - end:分片结束位置,必填项 -- page:分页位置,选填项,默认为null,不分页;当page为“”时表示分页的第一页,从start指示的位置开始 -- page_limit:分页获取边时,一页中边数目的上限,选填项,默认为100000 +- page:分页位置,选填项,默认为 null,不分页;当 page 为“”时表示分页的第一页,从 start 指示的位置开始 +- page_limit:分页获取边时,一页中边数目的上限,选填项,默认为 100000 ###### Method & Url ``` -GET http://localhost:8080/graphs/hugegraph/traversers/edges/scan?start=0&end=3221225469 +GET http://localhost:8080/graphspaces/DEFAULT/graphs/hugegraph/traversers/edges/scan?start=0&end=3221225469 ``` ###### Response Status @@ -3037,5 +3037,5 @@ GET http://localhost:8080/graphs/hugegraph/traversers/edges/scan?start=0&end=322 ##### 3.2.23.4 适用场景 -- 按id列表查询边,可用于批量查询边 +- 按 id 列表查询边,可用于批量查询边 - 获取分片和按分片查询边,可以用来遍历全部边 diff --git a/content/cn/docs/clients/restful-api/variable.md b/content/cn/docs/clients/restful-api/variable.md index ee2223583..5ca2ec0b9 100644 --- a/content/cn/docs/clients/restful-api/variable.md +++ b/content/cn/docs/clients/restful-api/variable.md @@ -6,14 +6,14 @@ weight: 11 ### 5.1 Variables -Variables可以用来存储有关整个图的数据,数据按照键值对的方式存取 +Variables 可以用来存储有关整个图的数据,数据按照键值对的方式存取 #### 5.1.1 创建或者更新某个键值对 ##### Method & Url ``` -PUT http://localhost:8080/graphs/hugegraph/variables/name +PUT http://localhost:8080/graphspaces/DEFAULT/graphs/hugegraph/variables/name ``` ##### Request Body @@ -43,7 +43,7 @@ PUT http://localhost:8080/graphs/hugegraph/variables/name ##### Method & Url ``` -GET http://localhost:8080/graphs/hugegraph/variables +GET http://localhost:8080/graphspaces/DEFAULT/graphs/hugegraph/variables ``` ##### Response Status @@ -65,7 +65,7 @@ GET http://localhost:8080/graphs/hugegraph/variables ##### Method & Url ``` -GET http://localhost:8080/graphs/hugegraph/variables/name +GET http://localhost:8080/graphspaces/DEFAULT/graphs/hugegraph/variables/name ``` ##### Response Status @@ -87,11 +87,11 @@ GET http://localhost:8080/graphs/hugegraph/variables/name ##### Method & Url ``` -DELETE http://localhost:8080/graphs/hugegraph/variables/name +DELETE http://localhost:8080/graphspaces/DEFAULT/graphs/hugegraph/variables/name ``` ##### Response Status ```json 204 -``` \ No newline at end of file +``` diff --git a/content/cn/docs/clients/restful-api/vertex.md b/content/cn/docs/clients/restful-api/vertex.md index 4b8a2f79b..0df58ecce 100644 --- a/content/cn/docs/clients/restful-api/vertex.md +++ b/content/cn/docs/clients/restful-api/vertex.md @@ -42,10 +42,17 @@ schema.indexLabel("personByAge").onV("person").by("age").range().ifNotExist().cr #### 2.1.1 创建一个顶点 +##### Params + +**路径参数说明:** + +- graphspace: 图空间名称 +- graph: 图名称 + ##### Method & Url ``` -POST http://localhost:8080/graphs/hugegraph/graph/vertices +POST http://localhost:8080/graphspaces/DEFAULT/graphs/hugegraph/graph/vertices ``` ##### Request Body @@ -82,10 +89,17 @@ POST http://localhost:8080/graphs/hugegraph/graph/vertices #### 2.1.2 创建多个顶点 +##### Params + +**路径参数说明:** + +- graphspace: 图空间名称 +- graph: 图名称 + ##### Method & Url ``` -POST http://localhost:8080/graphs/hugegraph/graph/vertices/batch +POST http://localhost:8080/graphspaces/DEFAULT/graphs/hugegraph/graph/vertices/batch ``` ##### Request Body @@ -127,10 +141,18 @@ POST http://localhost:8080/graphs/hugegraph/graph/vertices/batch #### 2.1.3 更新顶点属性 +##### Params + +**路径参数说明:** + +- graphspace: 图空间名称 +- graph: 图名称 +- id: 顶点 id,需要包含引号,例如"1:marko" + ##### Method & Url ``` -PUT http://127.0.0.1:8080/graphs/hugegraph/graph/vertices/"1:marko"?action=append +PUT http://127.0.0.1:8080/graphspaces/DEFAULT/graphs/hugegraph/graph/vertices/"1:marko"?action=append ``` ##### Request Body @@ -145,7 +167,7 @@ PUT http://127.0.0.1:8080/graphs/hugegraph/graph/vertices/"1:marko"?action=appen } ``` -> 注意:属性的取值有三种类别,分别为single、set和list。single表示增加或更新属性值,set或list表示追加属性值。 +> 注意:属性的取值有三种类别,分别为 single、set 和 list。single 表示增加或更新属性值,set 或 list 表示追加属性值。 ##### Response Status @@ -175,13 +197,13 @@ PUT http://127.0.0.1:8080/graphs/hugegraph/graph/vertices/"1:marko"?action=appen 批量更新顶点的属性时,可以选择多种更新策略,如下: - SUM: 数值累加 -- BIGGER: 原值和新值(数字、日期)取更大的 -- SMALLER: 原值和新值(数字、日期)取更小的 -- UNION: Set属性取并集 -- INTERSECTION: Set属性取交集 -- APPEND: List属性追加元素 +- BIGGER: 原值和新值 (数字、日期) 取更大的 +- SMALLER: 原值和新值 (数字、日期) 取更小的 +- UNION: Set 属性取并集 +- INTERSECTION: Set 属性取交集 +- APPEND: List 属性追加元素 - ELIMINATE: List/Set属性删除元素 -- OVERRIDE: 覆盖已有属性,如果新属性为null,则仍然使用旧属性 +- OVERRIDE: 覆盖已有属性,如果新属性为 null,则仍然使用旧属性 假设原顶点的属性如下: @@ -220,13 +242,20 @@ PUT http://127.0.0.1:8080/graphs/hugegraph/graph/vertices/"1:marko"?action=appen 通过以下命令新增顶点: ```shell -curl -H "Content-Type: application/json" -d '[{"label":"person","properties":{"name":"josh","age":32,"city":"Beijing","weight":0.1,"hobby":["reading","football"]}},{"label":"software","properties":{"name":"lop","lang":"java","price":328}}]' http:///127.0.0.1:8080/graphs/hugegraph/graph/vertices/batch +curl -H "Content-Type: application/json" -d '[{"label":"person","properties":{"name":"josh","age":32,"city":"Beijing","weight":0.1,"hobby":["reading","football"]}},{"label":"software","properties":{"name":"lop","lang":"java","price":328}}]' http://127.0.0.1:8080/graphspaces/DEFAULT/graphs/hugegraph/graph/vertices/batch ``` +##### Params + +**路径参数说明:** + +- graphspace: 图空间名称 +- graph: 图名称 + ##### Method & Url ``` -PUT http://127.0.0.1:8080/graphs/hugegraph/graph/vertices/batch +PUT http://127.0.0.1:8080/graphspaces/DEFAULT/graphs/hugegraph/graph/vertices/batch ``` ##### Request Body @@ -310,21 +339,29 @@ PUT http://127.0.0.1:8080/graphs/hugegraph/graph/vertices/batch 结果分析如下: -- lang 属性未指定更新策略,直接用新值覆盖旧值,无论新值是否为null; -- price 属性指定 BIGGER 的更新策略,旧属性值为328,新属性值为299,所以仍然保留了旧属性值328; -- age 属性指定 OVERRIDE 更新策略,而新属性值中未传入age,相当于age为null,所以仍然保留了原属性值32; -- city 属性也指定了 OVERRIDE 更新策略,且新属性值不为null,所以覆盖了旧值; -- weight 属性指定了 SUM 更新策略,旧属性值为0.1,新属性值为0.2,最后的值为0.3; -- hobby 属性(基数为Set)指定了 UNION 更新策略,所以新值与旧值取了并集; +- lang 属性未指定更新策略,直接用新值覆盖旧值,无论新值是否为 null; +- price 属性指定 BIGGER 的更新策略,旧属性值为 328,新属性值为 299,所以仍然保留了旧属性值 328; +- age 属性指定 OVERRIDE 更新策略,而新属性值中未传入 age,相当于 age 为 null,所以仍然保留了原属性值 32; +- city 属性也指定了 OVERRIDE 更新策略,且新属性值不为 null,所以覆盖了旧值; +- weight 属性指定了 SUM 更新策略,旧属性值为 0.1,新属性值为 0.2,最后的值为 0.3; +- hobby 属性(基数为 Set)指定了 UNION 更新策略,所以新值与旧值取了并集; 其他更新策略的使用方式与此类似,此处不再详述。 #### 2.1.5 删除顶点属性 +##### Params + +**路径参数说明:** + +- graphspace: 图空间名称 +- graph: 图名称 +- id: 顶点 id,需要包含引号,例如"1:marko" + ##### Method & Url ``` -PUT http://127.0.0.1:8080/graphs/hugegraph/graph/vertices/"1:marko"?action=eliminate +PUT http://127.0.0.1:8080/graphspaces/DEFAULT/graphs/hugegraph/graph/vertices/"1:marko"?action=eliminate ``` ##### Request Body @@ -338,7 +375,7 @@ PUT http://127.0.0.1:8080/graphs/hugegraph/graph/vertices/"1:marko"?action=elimi } ``` -> 注意:这里会直接删除属性(删除key和所有value),无论其属性的取值是single、set或list。 +> 注意:这里会直接删除属性(删除 key 和所有 value),无论其属性的取值是 single、set 或 list。 ##### Response Status @@ -364,34 +401,41 @@ PUT http://127.0.0.1:8080/graphs/hugegraph/graph/vertices/"1:marko"?action=elimi ##### Params +**路径参数说明:** + +- graphspace: 图空间名称 +- graph: 图名称 + +**请求参数说明:** + - label: 顶点的类型 - properties: 属性键值对(查询属性的前提是该属性已经建立了索引) - limit: 查询结果的最大数目 - page: 分页的页号 -以上参数都是可选的,但如果提供了page参数,就必须同时提供limit参数,并且不能再提供其他参数。`label, properties`和`limit`之间可以任意组合。 +以上参数都是可选的,但如果提供了 page 参数,就必须同时提供 limit 参数,并且不能再提供其他参数。`label, properties`和`limit`之间可以任意组合。 -属性键值对由属性名称和属性值组成JSON格式的对象,可以使用多个属性键值对作为查询条件,属性值支持精确匹配和范围匹配,精确匹配的形式如`properties={"age":29}`,范围匹配的形式如`properties={"age":"P.gt(29)"}`,范围匹配支持以下表达式: +属性键值对由属性名称和属性值组成 JSON 格式的对象,可以使用多个属性键值对作为查询条件,属性值支持精确匹配和范围匹配,精确匹配的形式如`properties={"age":29}`,范围匹配的形式如`properties={"age":"P.gt(29)"}`,范围匹配支持以下表达式: -| 表达式 | 说明 | -|------------------------------------|-----------------------------| -| P.eq(number) | 属性值等于number的顶点 | -| P.neq(number) | 属性值不等于number的顶点 | -| P.lt(number) | 属性值小于number的顶点 | -| P.lte(number) | 属性值小于等于number的顶点 | -| P.gt(number) | 属性值大于number的顶点 | -| P.gte(number) | 属性值大于等于number的顶点 | -| P.between(number1,number2) | 属性值大于等于number1且小于number2的顶点 | -| P.inside(number1,number2) | 属性值大于number1且小于number2的顶点 | -| P.outside(number1,number2) | 属性值小于number1且大于number2的顶点 | -| P.within(value1,value2,value3,...) | 属性值等于任何一个给定value的顶点 | +| 表达式 | 说明 | +|------------------------------------|---------------------------------| +| P.eq(number) | 属性值等于 number 的顶点 | +| P.neq(number) | 属性值不等于 number 的顶点 | +| P.lt(number) | 属性值小于 number 的顶点 | +| P.lte(number) | 属性值小于等于 number 的顶点 | +| P.gt(number) | 属性值大于 number 的顶点 | +| P.gte(number) | 属性值大于等于 number 的顶点 | +| P.between(number1,number2) | 属性值大于等于 number1 且小于 number2 的顶点 | +| P.inside(number1,number2) | 属性值大于 number1 且小于 number2 的顶点 | +| P.outside(number1,number2) | 属性值小于 number1 且大于 number2 的顶点 | +| P.within(value1,value2,value3,...) | 属性值等于任何一个给定 value 的顶点 | **查询所有 age 为 29 且 label 为 person 的顶点** ##### Method & Url ``` -GET http://localhost:8080/graphs/hugegraph/graph/vertices?label=person&properties={"age":29}&limit=1 +GET http://localhost:8080/graphspaces/DEFAULT/graphs/hugegraph/graph/vertices?label=person&properties={"age":29}&limit=1 ``` ##### Response Status @@ -418,18 +462,18 @@ GET http://localhost:8080/graphs/hugegraph/graph/vertices?label=person&propertie } ``` -**分页查询所有顶点,获取第一页(page不带参数值),限定3条** +**分页查询所有顶点,获取第一页(page 不带参数值),限定 3 条** 通过以下命令新增顶点: ```shell -curl -H "Content-Type: application/json" -d '[{"label":"person","properties":{"name":"peter","age":29,"city":"Shanghai"}},{"label":"person","properties":{"name":"vadas","age":27,"city":"Hongkong"}}]' http://localhost:8080/graphs/hugegraph/graph/vertices/batch +curl -H "Content-Type: application/json" -d '[{"label":"person","properties":{"name":"peter","age":29,"city":"Shanghai"}},{"label":"person","properties":{"name":"vadas","age":27,"city":"Hongkong"}}]' http://localhost:8080/graphspaces/DEFAULT/graphs/hugegraph/graph/vertices/batch ``` ##### Method & Url ``` -GET http://localhost:8080/graphs/hugegraph/graph/vertices?page&limit=3 +GET http://localhost:8080/graphspaces/DEFAULT/graphs/hugegraph/graph/vertices?page&limit=3 ``` ##### Response Status @@ -485,12 +529,12 @@ GET http://localhost:8080/graphs/hugegraph/graph/vertices?page&limit=3 返回的 `body` 里面是带有下一页的页号信息的,`"page": "CIYxOnBldGVyAAAAAAAAAAM="`,在查询下一页的时候将该值赋给 `page` 参数。 -**分页查询所有顶点,获取下一页(page带上上一页返回的page值),限定3条** +**分页查询所有顶点,获取下一页(page 带上上一页返回的 page 值),限定 3 条** ##### Method & Url ``` -GET http://localhost:8080/graphs/hugegraph/graph/vertices?page=CIYxOnBldGVyAAAAAAAAAAM=&limit=3 +GET http://localhost:8080/graphspaces/DEFAULT/graphs/hugegraph/graph/vertices?page=CIYxOnBldGVyAAAAAAAAAAM=&limit=3 ``` ##### Response Status @@ -539,14 +583,22 @@ GET http://localhost:8080/graphs/hugegraph/graph/vertices?page=CIYxOnBldGVyAAAAA } ``` -当`"page": null`时,表示已经没有下一页了(注:如果后端使用的是 Cassandra ,为了提高性能,当返回的页数刚好是最后一页时,返回的 `page` 值可能不为空,但是如果用这个 `page` 值再请求下一页数据时,就会返回 `空数据` 和 `page = null`,其他情况也类似) +当`"page": null`时,表示已经没有下一页了(注:如果后端使用的是 Cassandra,为了提高性能,当返回的页数刚好是最后一页时,返回的 `page` 值可能不为空,但是如果用这个 `page` 值再请求下一页数据时,就会返回 `空数据` 和 `page = null`,其他情况也类似) -#### 2.1.7 根据Id获取顶点 +#### 2.1.7 根据 Id 获取顶点 + +##### Params + +**路径参数说明:** + +- graphspace: 图空间名称 +- graph: 图名称 +- id: 顶点 id,需要包含引号,例如"1:marko" ##### Method & Url ``` -GET http://localhost:8080/graphs/hugegraph/graph/vertices/"1:marko" +GET http://localhost:8080/graphspaces/DEFAULT/graphs/hugegraph/graph/vertices/"1:marko" ``` ##### Response Status @@ -569,18 +621,26 @@ GET http://localhost:8080/graphs/hugegraph/graph/vertices/"1:marko" } ``` -#### 2.1.8 根据Id删除顶点 +#### 2.1.8 根据 Id 删除顶点 ##### Params +**路径参数说明:** + +- graphspace: 图空间名称 +- graph: 图名称 +- id: 顶点 id,需要包含引号,例如"1:marko" + +**请求参数说明:** + - label: 顶点类型,可选参数 -**仅根据Id删除顶点** +**仅根据 Id 删除顶点** ##### Method & Url ``` -DELETE http://localhost:8080/graphs/hugegraph/graph/vertices/"1:marko" +DELETE http://localhost:8080/graphspaces/DEFAULT/graphs/hugegraph/graph/vertices/"1:marko" ``` ##### Response Status @@ -589,14 +649,14 @@ DELETE http://localhost:8080/graphs/hugegraph/graph/vertices/"1:marko" 204 ``` -**根据Label+Id删除顶点** +**根据 Label+Id 删除顶点** -通过指定Label参数和Id来删除顶点时,一般来说其性能比仅根据Id删除会更好。 +通过指定 Label 参数和 Id 来删除顶点时,一般来说其性能比仅根据 Id 删除会更好。 ##### Method & Url ``` -DELETE http://localhost:8080/graphs/hugegraph/graph/vertices/"1:marko"?label=person +DELETE http://localhost:8080/graphspaces/DEFAULT/graphs/hugegraph/graph/vertices/"1:marko"?label=person ``` ##### Response Status diff --git a/content/cn/docs/clients/restful-api/vertexlabel.md b/content/cn/docs/clients/restful-api/vertexlabel.md index e75bcc7a2..31ff5a7eb 100644 --- a/content/cn/docs/clients/restful-api/vertexlabel.md +++ b/content/cn/docs/clients/restful-api/vertexlabel.md @@ -6,27 +6,26 @@ weight: 3 ### 1.3 VertexLabel -假设已经创建好了1.1.3中列出来的 PropertyKeys +假设已经创建好了 1.1.3 中列出来的 PropertyKeys -Params说明 +Params 说明 -- id:顶点类型id值 +- id:顶点类型 id 值 - name:顶点类型名称,必填 -- id_strategy: 顶点类型的ID策略,主键ID、自动生成、自定义字符串、自定义数字、自定义UUID,默认主键ID +- id_strategy: 顶点类型的 ID 策略,主键 ID、自动生成、自定义字符串、自定义数字、自定义 UUID,默认主键 ID - properties: 顶点类型关联的属性类型 -- primary_keys: 主键属性,当ID策略为PRIMARY_KEY时必须有值,其他ID策略时必须为空; -- enable_label_index: 是否开启类型索引,默认关闭 -- index_names:顶点类型创建的索引,详情见3.4 +- primary_keys: 主键属性,当 ID 策略为 PRIMARY_KEY 时必须有值,其他 ID 策略时必须为空; +- enable_label_index:是否开启类型索引,默认关闭 +- index_names:顶点类型创建的索引,详情见 3.4 - nullable_keys:可为空的属性 - user_data:设置顶点类型的通用信息,作用同属性类型 - -#### 1.3.1 创建一个VertexLabel +#### 1.3.1 创建一个 VertexLabel ##### Method & Url ``` -POST http://localhost:8080/graphs/hugegraph/schema/vertexlabels +POST http://localhost:8080/graphspaces/DEFAULT/graphs/hugegraph/schema/vertexlabels ``` ##### Request Body @@ -116,7 +115,7 @@ POST http://localhost:8080/graphs/hugegraph/schema/vertexlabels } ``` -#### 1.3.2 为已存在的VertexLabel添加properties或userdata,或者移除userdata(目前不支持移除properties) +#### 1.3.2 为已存在的 VertexLabel 添加 properties 或 userdata,或者移除 userdata(目前不支持移除 properties) ##### Params @@ -125,7 +124,7 @@ POST http://localhost:8080/graphs/hugegraph/schema/vertexlabels ##### Method & Url ``` -PUT http://localhost:8080/graphs/hugegraph/schema/vertexlabels/person?action=append +PUT http://localhost:8080/graphspaces/DEFAULT/graphs/hugegraph/schema/vertexlabels/person?action=append ``` ##### Request Body @@ -176,12 +175,12 @@ PUT http://localhost:8080/graphs/hugegraph/schema/vertexlabels/person?action=app } ``` -#### 1.3.3 获取所有的VertexLabel +#### 1.3.3 获取所有的 VertexLabel ##### Method & Url ``` -GET http://localhost:8080/graphs/hugegraph/schema/vertexlabels +GET http://localhost:8080/graphspaces/DEFAULT/graphs/hugegraph/schema/vertexlabels ``` ##### Response Status @@ -241,12 +240,12 @@ GET http://localhost:8080/graphs/hugegraph/schema/vertexlabels } ``` -#### 1.3.4 根据name获取VertexLabel +#### 1.3.4 根据 name 获取 VertexLabel ##### Method & Url ``` -GET http://localhost:8080/graphs/hugegraph/schema/vertexlabels/person +GET http://localhost:8080/graphspaces/DEFAULT/graphs/hugegraph/schema/vertexlabels/person ``` ##### Response Status @@ -282,14 +281,14 @@ GET http://localhost:8080/graphs/hugegraph/schema/vertexlabels/person } ``` -#### 1.3.5 根据name删除VertexLabel +#### 1.3.5 根据 name 删除 VertexLabel 删除 VertexLabel 会导致删除对应的顶点以及相关的索引数据,会产生一个异步任务 ##### Method & Url ``` -DELETE http://localhost:8080/graphs/hugegraph/schema/vertexlabels/person +DELETE http://localhost:8080/graphspaces/DEFAULT/graphs/hugegraph/schema/vertexlabels/person ``` ##### Response Status @@ -308,4 +307,4 @@ DELETE http://localhost:8080/graphs/hugegraph/schema/vertexlabels/person 注: -> 可以通过`GET http://localhost:8080/graphs/hugegraph/tasks/1`(其中"1"是task_id)来查询异步任务的执行状态,更多[异步任务RESTful API](../task) \ No newline at end of file +> 可以通过`GET http://localhost:8080/graphspaces/DEFAULT/graphs/hugegraph/tasks/1`(其中"1"是 task_id)来查询异步任务的执行状态,更多[异步任务 RESTful API](../task) diff --git a/content/cn/docs/quickstart/hugegraph/hugegraph-server.md b/content/cn/docs/quickstart/hugegraph/hugegraph-server.md index d4e80d8fc..a0606b94d 100644 --- a/content/cn/docs/quickstart/hugegraph/hugegraph-server.md +++ b/content/cn/docs/quickstart/hugegraph/hugegraph-server.md @@ -17,11 +17,11 @@ Core 模块是 Tinkerpop 接口的实现,Backend 模块用于管理数据存 #### 2.1 安装 Java 11 (JDK 11) -请优先考虑在 Java 11 的环境上启动 `HugeGraph-Server`(在 1.5.0 版前,会保留对 Java 8 的基本兼容) +请考虑在 Java 11 的环境上启动 `HugeGraph-Server`(在 1.5.0 版前,会保留对 Java 8 的基本兼容) **在往下阅读之前先执行 `java -version` 命令确认 jdk 版本** -> 注:使用 Java 8 启动 HugeGraph-Server 会失去一些**安全性**的保障,也会降低性能相关指标 (请尽早升级/迁移) +> 注:使用 Java 8 启动 HugeGraph-Server 会失去一些**安全性**的保障,也会降低性能相关指标 (请尽早升级/迁移,1.7.0 不再支持) ### 3 部署 @@ -187,7 +187,9 @@ HugeGraphServer 启动时会连接后端存储并尝试检查后端存储版本 要使用分布式存储引擎,需要先部署 HugeGraph-PD 和 HugeGraph-Store,详见 [HugeGraph-PD 快速入门](/cn/docs/quickstart/hugegraph/hugegraph-pd/) 和 [HugeGraph-Store 快速入门](/cn/docs/quickstart/hugegraph/hugegraph-hstore/)。 -确保 PD 和 Store 服务均已启动后,修改 HugeGraph-Server 的 `hugegraph.properties` 配置: +确保 PD 和 Store 服务均已启动后 + +1. 修改 HugeGraph-Server 的 `hugegraph.properties` 配置: ```properties backend=hstore @@ -198,10 +200,17 @@ task.scheduler_type=distributed pd.peers=127.0.0.1:8686,127.0.0.1:8687,127.0.0.1:8688 ``` +2. 修改 HugeGraph-Server 的 `rest-server.properties` 配置: + +```properties +usePD=true +``` + 如果配置多个 HugeGraph-Server 节点,需要为每个节点修改 `rest-server.properties` 配置文件,例如: 节点 1(主节点): ```properties +usePD=true restserver.url=http://127.0.0.1:8081 gremlinserver.url=http://127.0.0.1:8181 @@ -214,6 +223,7 @@ server.role=master 节点 2(工作节点): ```properties +usePD=true restserver.url=http://127.0.0.1:8082 gremlinserver.url=http://127.0.0.1:8182 diff --git a/content/en/docs/clients/restful-api/_index.md b/content/en/docs/clients/restful-api/_index.md index 29e567509..355b4cbc3 100644 --- a/content/en/docs/clients/restful-api/_index.md +++ b/content/en/docs/clients/restful-api/_index.md @@ -4,9 +4,15 @@ linkTitle: "RESTful API" weight: 1 --- -HugeGraph-Server provides interfaces for clients to operate on graphs based on the HTTP protocol through the HugeGraph-API. These interfaces primarily include the ability to add, delete, modify, and query metadata and graph data, perform traversal algorithms, handle variables, and perform other graph-related operations. +> ⚠️ **Version compatibility notes** +> +> - HugeGraph 1.7.0+ introduces graphspaces, and REST paths follow `/graphspaces/{graphspace}/graphs/{graph}`. +> - HugeGraph 1.5.x and earlier still rely on the legacy `/graphs/{graph}` path, and the create/clone graph APIs require `Content-Type: text/plain`; 1.7.0+ expects JSON bodies. +> - The default graphspace name is `DEFAULT`, which you can use directly if you do not need multi-tenant isolation. +> - **Note**: Before version 1.5.0, the format of ids such as group/target was similar to -69:grant. After version 1.7.0, the id and name were consistent, such as admin [HugeGraph 1.5.x RESTful API](https://github.com/apache/incubator-hugegraph-doc/tree/release-1.5.0) -Expect the doc below, you can also use `swagger-ui` to visit the `RESTful API` by `localhost:8080/swagger-ui/index.html`. [Here is an example](/docs/quickstart/hugegraph/hugegraph-server#swaggerui-example) + +Besides the documentation below, you can also open `swagger-ui` at `localhost:8080/swagger-ui/index.html` to explore the RESTful API. [Here is an example](/docs/quickstart/hugegraph/hugegraph-server#swaggerui-example) [comment]: <> (- Graph Schema) diff --git a/content/en/docs/clients/restful-api/auth.md b/content/en/docs/clients/restful-api/auth.md index 3fb187fd5..e90b84089 100644 --- a/content/en/docs/clients/restful-api/auth.md +++ b/content/en/docs/clients/restful-api/auth.md @@ -17,6 +17,7 @@ Description: User 'boss' has read permission for people in the 'graph1' graph fr ##### Interface Description: The user authentication and access control interface includes 5 categories: UserAPI, GroupAPI, TargetAPI, BelongAPI, AccessAPI. +**Note** Before 1.5.0, the format of ids such as group/target was similar to -69:grant. After 1.7.0, the id and name were consistent. Such as admin [HugeGraph 1.5 x RESTful API](https://github.com/apache/incubator-hugegraph-doc/tree/release-1.5.0) ### 10.2 User (User) API The user interface includes APIs for creating users, deleting users, modifying users, and querying user-related information. @@ -47,7 +48,7 @@ Both user_name and user_password are required. ##### Method & Url ``` -POST http://localhost:8080/graphs/hugegraph/auth/users +POST http://localhost:8080/graphspaces/DEFAULT/auth/users ``` ##### Response Status @@ -66,7 +67,7 @@ In the response message, the password is encrypted as ciphertext. "user_name": "boss", "user_creator": "admin", "user_phone": "182****9088", - "id": "-63:boss", + "id": "boss", "user_create": "2020-11-17 14:31:07.833" } ``` @@ -81,7 +82,7 @@ In the response message, the password is encrypted as ciphertext. ##### Method & Url ``` -DELETE http://localhost:8080/graphs/hugegraph/auth/users/-63:test +DELETE http://localhost:8080/graphspaces/DEFAULT/auth/users/test ``` ##### Response Status @@ -105,7 +106,7 @@ DELETE http://localhost:8080/graphs/hugegraph/auth/users/-63:test ##### Method & Url ``` -PUT http://localhost:8080/graphs/hugegraph/auth/users/-63:test +PUT http://localhost:8080/graphspaces/DEFAULT/auth/users/test ``` ##### Request Body @@ -134,7 +135,7 @@ The returned result is the entire user object including the modified content. "user_name": "test", "user_creator": "admin", "user_phone": "183****9266", - "id": "-63:test", + "id": "test", "user_create": "2020-11-12 10:27:13.601" } ``` @@ -149,7 +150,7 @@ The returned result is the entire user object including the modified content. ##### Method & Url ``` -GET http://localhost:8080/graphs/hugegraph/auth/users +GET http://localhost:8080/graphspaces/DEFAULT/auth/users ``` ##### Response Status @@ -168,7 +169,7 @@ GET http://localhost:8080/graphs/hugegraph/auth/users "user_update": "2020-11-11 11:41:12.254", "user_name": "admin", "user_creator": "system", - "id": "-63:admin", + "id": "admin", "user_create": "2020-11-11 11:41:12.254" } ] @@ -184,7 +185,7 @@ GET http://localhost:8080/graphs/hugegraph/auth/users ##### Method & Url ``` -GET http://localhost:8080/graphs/hugegraph/auth/users/-63:admin +GET http://localhost:8080/graphspaces/DEFAULT/auth/users/admin ``` ##### Response Status @@ -203,7 +204,7 @@ GET http://localhost:8080/graphs/hugegraph/auth/users/-63:admin "user_update": "2020-11-11 11:41:12.254", "user_name": "admin", "user_creator": "system", - "id": "-63:admin", + "id": "admin", "user_create": "2020-11-11 11:41:12.254" } ] @@ -215,7 +216,7 @@ GET http://localhost:8080/graphs/hugegraph/auth/users/-63:admin ##### Method & Url ``` -GET http://localhost:8080/graphs/hugegraph/auth/users/-63:boss/role +GET http://localhost:8080/graphspaces/DEFAULT/auth/users/boss/role ``` ##### Response Status @@ -266,7 +267,7 @@ The group interface includes APIs for creating groups, deleting groups, modifyin ##### Method & Url ``` -POST http://localhost:8080/graphs/hugegraph/auth/groups +POST http://localhost:8080/graphspaces/DEFAULT/auth/groups ``` ##### Response Status @@ -298,7 +299,7 @@ POST http://localhost:8080/graphs/hugegraph/auth/groups ##### Method & Url ``` -DELETE http://localhost:8080/graphs/hugegraph/auth/groups/-69:grant +DELETE http://localhost:8080/graphspaces/DEFAULT/auth/groups/-69:grant ``` ##### Response Status @@ -322,7 +323,7 @@ DELETE http://localhost:8080/graphs/hugegraph/auth/groups/-69:grant ##### Method & Url ``` -PUT http://localhost:8080/graphs/hugegraph/auth/groups/-69:grant +PUT http://localhost:8080/graphspaces/DEFAULT/auth/groups/-69:grant ``` ##### Request Body @@ -363,7 +364,7 @@ The returned result is the entire group object including the modified content. ##### Method & Url ``` -GET http://localhost:8080/graphs/hugegraph/auth/groups +GET http://localhost:8080/graphspaces/DEFAULT/auth/groups ``` ##### Response Status @@ -398,7 +399,7 @@ GET http://localhost:8080/graphs/hugegraph/auth/groups ##### Method & Url ``` -GET http://localhost:8080/graphs/hugegraph/auth/groups/-69:all +GET http://localhost:8080/graphspaces/DEFAULT/auth/groups/-69:all ``` ##### Response Status @@ -459,7 +460,7 @@ The resource definition means: a vertex of type 'person' with the city property ##### Method & Url ``` -POST http://localhost:8080/graphs/hugegraph/auth/targets +POST http://localhost:8080/graphspaces/DEFAULT/auth/targets ``` ##### Response Status @@ -498,7 +499,7 @@ POST http://localhost:8080/graphs/hugegraph/auth/targets ##### Method & Url ``` -DELETE http://localhost:8080/graphs/hugegraph/auth/targets/-77:gremlin +DELETE http://localhost:8080/graphspaces/DEFAULT/auth/targets/-77:gremlin ``` ##### Response Status @@ -522,7 +523,7 @@ DELETE http://localhost:8080/graphs/hugegraph/auth/targets/-77:gremlin ##### Method & Url ``` -PUT http://localhost:8080/graphs/hugegraph/auth/targets/-77:gremlin +PUT http://localhost:8080/graphspaces/DEFAULT/auth/targets/-77:gremlin ``` ##### Request Body @@ -577,7 +578,7 @@ The response contains the entire target group object, including the modified con ##### Method & Url ``` -GET http://localhost:8080/graphs/hugegraph/auth/targets +GET http://localhost:8080/graphspaces/DEFAULT/auth/targets ``` ##### Response Status @@ -636,7 +637,7 @@ GET http://localhost:8080/graphs/hugegraph/auth/targets ##### Method & Url ``` -GET http://localhost:8080/graphs/hugegraph/auth/targets/-77:grant +GET http://localhost:8080/graphspaces/DEFAULT/auth/targets/-77:grant ``` ##### Response Status @@ -683,7 +684,7 @@ The API for associating roles includes creating, deleting, modifying, and queryi ```json { - "user": "-63:boss", + "user": "boss", "group": "-69:all" } ``` @@ -692,7 +693,7 @@ The API for associating roles includes creating, deleting, modifying, and queryi ##### Method & Url ``` -POST http://localhost:8080/graphs/hugegraph/auth/belongs +POST http://localhost:8080/graphspaces/DEFAULT/auth/belongs ``` ##### Response Status @@ -708,8 +709,8 @@ POST http://localhost:8080/graphs/hugegraph/auth/belongs "belong_create": "2020-11-11 16:19:35.422", "belong_creator": "admin", "belong_update": "2020-11-11 16:19:35.422", - "id": "S-63:boss>-82>>S-69:all", - "user": "-63:boss", + "id": "Sboss>-82>>S-69:all", + "user": "boss", "group": "-69:all" } ``` @@ -723,7 +724,7 @@ POST http://localhost:8080/graphs/hugegraph/auth/belongs ##### Method & Url ``` -DELETE http://localhost:8080/graphs/hugegraph/auth/belongs/S-63:boss>-82>>S-69:grant +DELETE http://localhost:8080/graphspaces/DEFAULT/auth/belongs/Sboss>-82>>S-69:grant ``` ##### Response Status @@ -749,7 +750,7 @@ An association of roles can only be modified for its description. The `user` and ##### Method & Url ``` -PUT http://localhost:8080/graphs/hugegraph/auth/belongs/S-63:boss>-82>>S-69:grant +PUT http://localhost:8080/graphspaces/DEFAULT/auth/belongs/Sboss>-82>>S-69:grant ``` ##### Request Body @@ -774,8 +775,8 @@ The response includes the modified content as well as the entire association of "belong_create": "2020-11-12 10:40:21.720", "belong_creator": "admin", "belong_update": "2020-11-12 10:42:47.265", - "id": "S-63:boss>-82>>S-69:grant", - "user": "-63:boss", + "id": "Sboss>-82>>S-69:grant", + "user": "boss", "group": "-69:grant" } ``` @@ -790,7 +791,7 @@ The response includes the modified content as well as the entire association of ##### Method & Url ``` -GET http://localhost:8080/graphs/hugegraph/auth/belongs +GET http://localhost:8080/graphspaces/DEFAULT/auth/belongs ``` ##### Response Status @@ -808,8 +809,8 @@ GET http://localhost:8080/graphs/hugegraph/auth/belongs "belong_create": "2020-11-11 16:19:35.422", "belong_creator": "admin", "belong_update": "2020-11-11 16:19:35.422", - "id": "S-63:boss>-82>>S-69:all", - "user": "-63:boss", + "id": "Sboss>-82>>S-69:all", + "user": "boss", "group": "-69:all" } ] @@ -825,7 +826,7 @@ GET http://localhost:8080/graphs/hugegraph/auth/belongs ##### Method & Url ``` -GET http://localhost:8080/graphs/hugegraph/auth/belongs/S-63:boss>-82>>S-69:all +GET http://localhost:8080/graphspaces/DEFAULT/auth/belongs/Sboss>-82>>S-69:all ``` ##### Response Status @@ -841,8 +842,8 @@ GET http://localhost:8080/graphs/hugegraph/auth/belongs/S-63:boss>-82>>S-69:all "belong_create": "2020-11-11 16:19:35.422", "belong_creator": "admin", "belong_update": "2020-11-11 16:19:35.422", - "id": "S-63:boss>-82>>S-69:all", - "user": "-63:boss", + "id": "Sboss>-82>>S-69:all", + "user": "boss", "group": "-69:all" } ``` @@ -879,7 +880,7 @@ Access permissions: ##### Method & Url ``` -POST http://localhost:8080/graphs/hugegraph/auth/accesses +POST http://localhost:8080/graphspaces/DEFAULT/auth/accesses ``` ##### Response Status @@ -897,8 +898,8 @@ POST http://localhost:8080/graphs/hugegraph/auth/accesses "id": "S-69:all>-88>11>S-77:all", "access_update": "2020-11-11 15:54:54.008", "access_creator": "admin", - "group": "-69:all", - "target": "-77:all" + "group": "-69:all", + "target": "-77:all" } ``` @@ -911,7 +912,7 @@ POST http://localhost:8080/graphs/hugegraph/auth/accesses ##### Method & Url ``` -DELETE http://localhost:8080/graphs/hugegraph/auth/accesses/S-69:all>-88>12>S-77:all +DELETE http://localhost:8080/graphspaces/DEFAULT/auth/accesses/S-69:all>-88>12>S-77:all ``` ##### Response Status @@ -927,7 +928,8 @@ DELETE http://localhost:8080/graphs/hugegraph/auth/accesses/S-69:all>-88>12>S-77 ``` #### 10.6.3 Modify Authorization -Authorization can only be modified for its description. User group, resource, and permission cannot be modified. If you need to modify the relationship of the authorization, you can delete the original authorization relationship and create a new one. + +Authorization can only be modified for its description. User group, resource, and permission cannot be modified. If you need to modify the authorization relationship, delete the original authorization and create a new one. ##### Params @@ -936,14 +938,16 @@ Authorization can only be modified for its description. User group, resource, an ##### Method & Url ``` -PUT http://localhost:8080/graphs/hugegraph/auth/accesses/S-69:all>-88>12>S-77:all +PUT http://localhost:8080/graphspaces/DEFAULT/auth/accesses/S-69:all>-88>12>S-77:all ``` ##### Request Body + Modify access_description + ```json { - "access_description": "test" + "access_description": "test" } ``` @@ -954,17 +958,19 @@ Modify access_description ``` ##### Response Body -#### Return Result Including Modified Content of the Entire User Group Object + +The response includes the modified content as well as the entire authorization object. + ```json { - "access_description": "test", - "access_permission": "WRITE", - "access_create": "2020-11-12 10:12:03.074", - "id": "S-69:all>-88>12>S-77:all", - "access_update": "2020-11-12 10:16:18.637", - "access_creator": "admin", - "group": "-69:all", - "target": "-77:all" + "access_description": "test", + "access_permission": "WRITE", + "access_create": "2020-11-12 10:12:03.074", + "id": "S-69:all>-88>12>S-77:all", + "access_update": "2020-11-12 10:16:18.637", + "access_creator": "admin", + "group": "-69:all", + "target": "-77:all" } ``` @@ -977,7 +983,7 @@ Modify access_description ##### Method & Url ``` -GET http://localhost:8080/graphs/hugegraph/auth/accesses +GET http://localhost:8080/graphspaces/DEFAULT/auth/accesses ``` ##### Response Status @@ -990,17 +996,17 @@ GET http://localhost:8080/graphs/hugegraph/auth/accesses ```json { - "accesses": [ - { - "access_permission": "READ", - "access_create": "2020-11-11 15:54:54.008", - "id": "S-69:all>-88>11>S-77:all", - "access_update": "2020-11-11 15:54:54.008", - "access_creator": "admin", - "group": "-69:all", - "target": "-77:all" - } - ] + "accesses": [ + { + "access_permission": "READ", + "access_create": "2020-11-11 15:54:54.008", + "id": "S-69:all>-88>11>S-77:all", + "access_update": "2020-11-11 15:54:54.008", + "access_creator": "admin", + "group": "-69:all", + "target": "-77:all" + } + ] } ``` @@ -1013,7 +1019,7 @@ GET http://localhost:8080/graphs/hugegraph/auth/accesses ##### Method & Url ``` -GET http://localhost:8080/graphs/hugegraph/auth/accesses/S-69:all>-88>11>S-77:all +GET http://localhost:8080/graphspaces/DEFAULT/auth/accesses/S-69:all>-88>11>S-77:all ``` ##### Response Status @@ -1026,12 +1032,170 @@ GET http://localhost:8080/graphs/hugegraph/auth/accesses/S-69:all>-88>11>S-77:al ```json { - "access_permission": "READ", - "access_create": "2020-11-11 15:54:54.008", - "id": "S-69:all>-88>11>S-77:all", - "access_update": "2020-11-11 15:54:54.008", - "access_creator": "admin", + "access_permission": "READ", + "access_create": "2020-11-11 15:54:54.008", + "id": "S-69:all>-88>11>S-77:all", + "access_update": "2020-11-11 15:54:54.008", + "access_creator": "admin", "group": "-69:all", "target": "-77:all" } ``` + +### 10.7 Graphspace Manager (Manager) API + +> **Note**: Before using the following APIs, you need to create a graphspace first. For example, create a graphspace named `gs1` via the [Graphspace API](../graphspace). The examples below assume that `gs1` already exists. + +1. The graphspace manager API is used to grant/revoke manager roles for users at the graphspace level, and to query the roles of the current user or other users in a graphspace. Supported role types include `SPACE`, `SPACE_MEMBER`, and `ADMIN`. + +#### 10.7.1 Check whether the current login user has a specific role + +##### Params + +- type: Role type to check, optional + +##### Method & Url + +``` +GET http://localhost:8080/graphspaces/gs1/auth/managers/check?type=WRITE +``` + +##### Response Status + +```json +200 +``` + +##### Response Body + +```json +"true" +``` + +The API returns the string `true` or `false` indicating whether the current user has the given role. + +#### 10.7.2 List graphspace managers + +##### Params + +- type: Role type, optional, used to filter by role + +##### Method & Url + +``` +GET http://localhost:8080/graphspaces/gs1/auth/managers?type=SPACE +``` + +##### Response Status + +```json +200 +``` + +##### Response Body + +```json +{ + "managers": [ + { + "user": "admin", + "type": "SPACE", + "create_time": "2024-01-10 09:30:00" + } + ] +} +``` + +#### 10.7.3 Grant/create a graphspace manager + +- The following example grants user `boss` the `SPACE_MEMBER` role in graphspace `gs1`. + +##### Request Body + +```json +{ + "user": "boss", + "type": "SPACE_MEMBER" +} +``` + +##### Method & Url + +``` +POST http://localhost:8080/graphspaces/gs1/auth/managers +``` + +##### Response Status + +```json +201 +``` + +##### Response Body + +```json +{ + "user": "boss", + "type": "SPACE_MEMBER", + "manager_creator": "admin", + "manager_create": "2024-01-10 09:45:12" +} +``` + +#### 10.7.4 Revoke graphspace manager privileges + +- The following example revokes the `SPACE_MEMBER` role of user `boss` in graphspace `gs1`. + +##### Params + +- user: User ID to revoke +- type: Role type to revoke + +##### Method & Url + +``` +DELETE http://localhost:8080/graphspaces/gs1/auth/managers?user=boss&type=SPACE_MEMBER +``` + +##### Response Status + +```json +204 +``` + +##### Response Body + +```json +1 +``` + +#### 10.7.5 Query roles of a specific user in a graphspace + +##### Params + +- user: User ID + +##### Method & Url + +``` +GET http://localhost:8080/graphspaces/gs1/auth/managers/role?user=boss +``` + +##### Response Status + +```json +200 +``` + +##### Response Body + +```json +{ + "roles": { + "boss": [ + "READ", + "SPACE_MEMBER" + ] + } +} +``` diff --git a/content/en/docs/clients/restful-api/cypher.md b/content/en/docs/clients/restful-api/cypher.md index 7ce3a9727..ba120e2c7 100644 --- a/content/en/docs/clients/restful-api/cypher.md +++ b/content/en/docs/clients/restful-api/cypher.md @@ -10,29 +10,36 @@ weight: 15 ##### Method & Url -```javascript -GET /graphs/{graph}/cypher?cypher={cypher} +```http +GET /graphspaces/{graphspace}/graphs/{graph}/cypher?cypher={cypher} ``` ##### Params + +**Path parameters** + +- graphspace: Graphspace name - graph: Graph name + +**Query parameters** + - cypher: Cypher statement ##### Example -```javascript -GET http://localhost:8080/graphs/hugecypher1/cypher?cypher=match(n:person) return n.name as name order by n.name limit 1 +```http +GET http://localhost:8080/graphspaces/DEFAULT/graphs/hugecypher1/cypher?cypher=match(n:person) return n.name as name order by n.name limit 1 ``` ##### Response Status -```javascript +```json 200 ``` ##### Response Body -```javascript +```json { "requestId": "766b9f48-2f10-40d9-951a-3027d0748ab7", "status": { @@ -51,8 +58,6 @@ GET http://localhost:8080/graphs/hugecypher1/cypher?cypher=match(n:person) retur } } } - - ``` #### 9.1.2 Sending a cypher statement (POST) to HugeGraphServer for synchronous execution @@ -60,11 +65,15 @@ GET http://localhost:8080/graphs/hugecypher1/cypher?cypher=match(n:person) retur ##### Method & Url -```javascript -POST /graphs/{graph}/cypher +```http +POST /graphspaces/{graphspace}/graphs/{graph}/cypher ``` ##### Params + +**Path parameters** + +- graphspace: Graphspace name - graph: Graph name ##### Body @@ -77,8 +86,8 @@ Note: ##### Example -```javascript -POST http://localhost:8080/graphs/hugecypher1/cypher +```http +POST http://localhost:8080/graphspaces/DEFAULT/graphs/hugecypher1/cypher ``` ###### Request Body @@ -88,12 +97,12 @@ match(n:person) return n.name as name order by n.name limit 1 ``` ##### Response Status -```javascript +```json 200 ``` ##### Response Body -```javascript +```json { "requestId": "f096bee0-e249-498f-b5a3-ea684fc84f57", "status": { @@ -112,6 +121,4 @@ match(n:person) return n.name as name order by n.name limit 1 } } } - - ``` diff --git a/content/en/docs/clients/restful-api/edge.md b/content/en/docs/clients/restful-api/edge.md index b990a54a2..aaff63967 100644 --- a/content/en/docs/clients/restful-api/edge.md +++ b/content/en/docs/clients/restful-api/edge.md @@ -76,8 +76,8 @@ g = graph.traversal() ##### Method & Url -``` -POST http://localhost:8080/graphs/hugegraph/graph/edges +```http +POST http://localhost:8080/graphspaces/DEFAULT/graphs/hugegraph/graph/edges ``` ##### Request Body @@ -138,8 +138,8 @@ POST http://localhost:8080/graphs/hugegraph/graph/edges ##### Method & Url -``` -POST http://localhost:8080/graphs/hugegraph/graph/edges/batch +```http +POST http://localhost:8080/graphspaces/DEFAULT/graphs/hugegraph/graph/edges/batch ``` ##### Request Body @@ -205,8 +205,8 @@ POST http://localhost:8080/graphs/hugegraph/graph/edges/batch ##### Method & Url -``` -PUT http://localhost:8080/graphs/hugegraph/graph/edges/S1:marko>2>>S2:lop?action=append +```http +PUT http://localhost:8080/graphspaces/DEFAULT/graphs/hugegraph/graph/edges/S1:marko>2>>S2:lop?action=append ``` ##### Request Body @@ -267,8 +267,8 @@ PUT http://localhost:8080/graphs/hugegraph/graph/edges/S1:marko>2>>S2:lop?action ##### Method & Url -``` -PUT http://127.0.0.1:8080/graphs/hugegraph/graph/edges/batch +```http +PUT http://127.0.0.1:8080/graphspaces/DEFAULT/graphs/hugegraph/graph/edges/batch ``` ##### Request Body @@ -368,8 +368,8 @@ PUT http://127.0.0.1:8080/graphs/hugegraph/graph/edges/batch ##### Method & Url -``` -PUT http://localhost:8080/graphs/hugegraph/graph/edges/S1:marko>2>>S2:lop?action=eliminate +```http +PUT http://localhost:8080/graphspaces/DEFAULT/graphs/hugegraph/graph/edges/S1:marko>2>>S2:lop?action=eliminate ``` ##### Request Body @@ -442,8 +442,8 @@ Key-value pairs of properties consist of the property name and value in JSON for ##### Method & Url -``` -GET http://127.0.0.1:8080/graphs/hugegraph/graph/edges?vertex_id="1:marko"&label=knows&properties={"date":"P.within(\"20160111\")"} +```http +GET http://127.0.0.1:8080/graphspaces/DEFAULT/graphs/hugegraph/graph/edges?vertex_id="1:marko"&label=knows&properties={"date":"P.within(\"20160111\")"} ``` ##### Response Status @@ -478,8 +478,8 @@ GET http://127.0.0.1:8080/graphs/hugegraph/graph/edges?vertex_id="1:marko"&label ##### Method & Url -``` -GET http://127.0.0.1:8080/graphs/hugegraph/graph/edges?page&limit=2 +```http +GET http://127.0.0.1:8080/graphspaces/DEFAULT/graphs/hugegraph/graph/edges?page&limit=2 ``` ##### Response Status @@ -530,8 +530,8 @@ The returned body contains the page number information for the next page, `"page ##### Method & Url -``` -GET http://127.0.0.1:8080/graphs/hugegraph/graph/edges?page=EoYxOm1hcmtvgggCAIQyOmxvcAAAAAAAAAAC&limit=2 +```http +GET http://127.0.0.1:8080/graphspaces/DEFAULT/graphs/hugegraph/graph/edges?page=EoYxOm1hcmtvgggCAIQyOmxvcAAAAAAAAAAC&limit=2 ``` ##### Response Status @@ -578,8 +578,8 @@ When `"page": null` is returned, it indicates that there are no more pages avail ##### Method & Url -``` -GET http://localhost:8080/graphs/hugegraph/graph/edges/S1:marko>2>>S2:lop +```http +GET http://localhost:8080/graphspaces/DEFAULT/graphs/hugegraph/graph/edges/S1:marko>2>>S2:lop ``` ##### Response Status @@ -623,8 +623,8 @@ GET http://localhost:8080/graphs/hugegraph/graph/edges/S1:marko>2>>S2:lop ##### Method & Url -``` -DELETE http://localhost:8080/graphs/hugegraph/graph/edges/S1:marko>2>>S2:lop +```http +DELETE http://localhost:8080/graphspaces/DEFAULT/graphs/hugegraph/graph/edges/S1:marko>2>>S2:lop ``` ##### Response Status @@ -639,8 +639,8 @@ In general, specifying the Label parameter along with the ID to delete an edge w ##### Method & Url -``` -DELETE http://localhost:8080/graphs/hugegraph/graph/edges/S1:marko>1>>S1:vadas?label=knows +```http +DELETE http://localhost:8080/graphspaces/DEFAULT/graphs/hugegraph/graph/edges/S1:marko>1>>S1:vadas?label=knows ``` ##### Response Status diff --git a/content/en/docs/clients/restful-api/edgelabel.md b/content/en/docs/clients/restful-api/edgelabel.md index 0d4cb1dd8..4906c687f 100644 --- a/content/en/docs/clients/restful-api/edgelabel.md +++ b/content/en/docs/clients/restful-api/edgelabel.md @@ -24,7 +24,7 @@ Params Explanation ##### Method & Url ``` -POST http://localhost:8080/graphs/hugegraph/schema/edgelabels +POST http://localhost:8080/graphspaces/DEFAULT/graphs/hugegraph/schema/edgelabels ``` ##### Request Body @@ -133,7 +133,7 @@ Additionally, when the edge has a property called "createdTime" and you want to ##### Method & Url ``` -PUT http://localhost:8080/graphs/hugegraph/schema/edgelabels/created?action=append +PUT http://localhost:8080/graphspaces/DEFAULT/graphs/hugegraph/schema/edgelabels/created?action=append ``` ##### Request Body @@ -186,7 +186,7 @@ PUT http://localhost:8080/graphs/hugegraph/schema/edgelabels/created?action=appe ##### Method & Url ``` -GET http://localhost:8080/graphs/hugegraph/schema/edgelabels +GET http://localhost:8080/graphspaces/DEFAULT/graphs/hugegraph/schema/edgelabels ``` ##### Response Status @@ -248,7 +248,7 @@ GET http://localhost:8080/graphs/hugegraph/schema/edgelabels ##### Method & Url ``` -GET http://localhost:8080/graphs/hugegraph/schema/edgelabels/created +GET http://localhost:8080/graphspaces/DEFAULT/graphs/hugegraph/schema/edgelabels/created ``` ##### Response Status @@ -291,7 +291,7 @@ Deleting an EdgeLabel will result in the deletion of corresponding edges and rel ##### Method & Url ``` -DELETE http://localhost:8080/graphs/hugegraph/schema/edgelabels/created +DELETE http://localhost:8080/graphspaces/DEFAULT/graphs/hugegraph/schema/edgelabels/created ``` ##### Response Status @@ -310,4 +310,4 @@ DELETE http://localhost:8080/graphs/hugegraph/schema/edgelabels/created Note: -> You can query the execution status of an asynchronous task by using `GET http://localhost:8080/graphs/hugegraph/tasks/1` (where "1" is the task_id). For more information, refer to the [Asynchronous Task RESTful API](../task). +> You can query the execution status of an asynchronous task by using `GET http://localhost:8080/graphspaces/DEFAULT/graphs/hugegraph/tasks/1` (where "1" is the task_id). For more information, refer to the [Asynchronous Task RESTful API](../task). diff --git a/content/en/docs/clients/restful-api/graphs.md b/content/en/docs/clients/restful-api/graphs.md index 3ce4bb5f6..913d8ae2a 100644 --- a/content/en/docs/clients/restful-api/graphs.md +++ b/content/en/docs/clients/restful-api/graphs.md @@ -6,12 +6,20 @@ weight: 12 ### 6.1 Graphs -#### 6.1.1 List all graphs +**Important Reminder**: Since HugeGraph 1.7.0, dynamic graph creation must enable authentication mode. For non-authentication mode, please refer to [Graph Configuration File](https://hugegraph.apache.org/docs/config/config-guide/#4-hugegraphproperties) to statically create graphs through configuration files. + +#### 6.1.1 List all graphs in the graphspace + +##### Params + +**Path parameters** + +- graphspace: Graphspace name ##### Method & Url ``` -GET http://localhost:8080/graphs +GET http://localhost:8080/graphspaces/DEFAULT/graphs ``` ##### Response Status @@ -33,10 +41,17 @@ GET http://localhost:8080/graphs #### 6.1.2 Get details of the graph +##### Params + +**Path parameters** + +- graphspace: Graphspace name +- graph: Graph name + ##### Method & Url ``` -GET http://localhost:8080/graphs/hugegraph +GET http://localhost:8080/graphspaces/DEFAULT/graphs/hugegraph ``` ##### Response Status @@ -54,22 +69,25 @@ GET http://localhost:8080/graphs/hugegraph } ``` -#### 6.1.3 Clear all data of a graph, include: schema, vertex, edge and index .etc.,**This operation - -requires administrator privileges** +#### 6.1.3 Clear all data of a graph, include: schema, vertex, edge and index, **This operation requires administrator privileges** ##### Params -Since emptying the graph is a dangerous operation, we have added parameters for confirmation to the -API to -avoid false calls by users: +**Path parameters** + +- graphspace: Graphspace name +- graph: Graph name + +**Query parameters** + +Since emptying the graph is a dangerous operation, we have added parameters for confirmation to the API to avoid false calls by users: - confirm_message: default by `I'm sure to delete all data` ##### Method & Url ``` -DELETE http://localhost:8080/graphs/hugegraph/clear?confirm_message=I%27m+sure+to+delete+all+data +DELETE http://localhost:8080/graphspaces/DEFAULT/graphs/hugegraph/clear?confirm_message=I%27m+sure+to+delete+all+data ``` ##### Response Status @@ -78,31 +96,38 @@ DELETE http://localhost:8080/graphs/hugegraph/clear?confirm_message=I%27m+sure+t 204 ``` -#### 6.1.4 Clone graph,**this operation requires administrator privileges** +#### 6.1.4 Clone graph, **this operation requires administrator privileges** ##### Params -- clone_graph_name: name of an existed graph. - To clone from an existing graph, the user can choose to transfer the configuration file, - which will replace the configuration in the existing graph +**Path parameters** + +- graphspace: Graphspace name +- graph: Name of the new graph to create + +**Query parameters** + +- clone_graph_name: name of an existed graph. To clone from an existing graph, the user can choose to transfer the configuration file, which will replace the configuration in the existing graph ##### Method & Url ``` -POST http://localhost:8080/graphs/hugegraph_clone?clone_graph_name=hugegraph +POST http://localhost:8080/graphspaces/DEFAULT/graphs/hugegraph_clone?clone_graph_name=hugegraph ``` ##### Request Body [Optional] -Clone a `non-auth` mode graph: (Must set body `Context-Type=text/plain`) +Clone a `non-auth` mode graph (set `Content-Type: application/json`) -```properties -gremlin.graph=org.apache.hugegraph.HugeFactory -backend=rocksdb -serializer=binary -store=hugegraph_clone -rocksdb.data_path=./rks-data-xx -rocksdb.wal_path=./rks-data-xx +```json +{ + "gremlin.graph": "org.apache.hugegraph.HugeFactory", + "backend": "rocksdb", + "serializer": "binary", + "store": "hugegraph", + "rocksdb.data_path": "./rks-data-xx", + "rocksdb.wal_path": "./rks-data-xx" +} ``` > Note: @@ -124,25 +149,34 @@ rocksdb.wal_path=./rks-data-xx } ``` -#### 6.1.5 Create graph,**this operation requires administrator privileges** +#### 6.1.5 Create graph, **this operation requires administrator privileges** + +##### Params + +**Path parameters** + +- graphspace: Graphspace name +- graph: Graph name ##### Method & Url ``` -POST http://localhost:8080/graphs/hugegraph2 +POST http://localhost:8080/graphspaces/DEFAULT/graphs/hugegraph2 ``` ##### Request Body -create a non-auth graph: (Must set body `Context-Type=text/plain`) +Create a non-auth graph (set `Content-Type: application/json`) -```properties -gremlin.graph=org.apache.hugegraph.HugeFactory -backend=rocksdb -serializer=binary -store=hugegraph2 -rocksdb.data_path=./rks-data-xx -rocksdb.wal_path=./rks-data-xx +```json +{ + "gremlin.graph": "org.apache.hugegraph.HugeFactory", + "backend": "rocksdb", + "serializer": "binary", + "store": "hugegraph2", + "rocksdb.data_path": "./rks-data-xx", + "rocksdb.wal_path": "./rks-data-xx" +} ``` > Note: @@ -164,20 +198,25 @@ rocksdb.wal_path=./rks-data-xx } ``` -#### 6.1.6 Delete graph and it's data +#### 6.1.6 Delete graph and its data ##### Params -Since deleting a graph is a dangerous operation, we have added parameters for confirmation to the -API to -avoid false calls by users: +**Path parameters** + +- graphspace: Graphspace name +- graph: Graph name + +**Query parameters** + +Since deleting a graph is a dangerous operation, we have added parameters for confirmation to the API to avoid false calls by users: - confirm_message: default by `I'm sure to drop the graph` ##### Method & Url ``` -DELETE http://localhost:8080/graphs/hugegraph_clone?confirm_message=I%27m%20sure%20to%20drop%20the%20graph +DELETE http://localhost:8080/graphspaces/DEFAULT/graphs/hugegraph_clone?confirm_message=I%27m%20sure%20to%20drop%20the%20graph ``` ##### Response Status @@ -186,14 +225,23 @@ DELETE http://localhost:8080/graphs/hugegraph_clone?confirm_message=I%27m%20sure 204 ``` +> Note: For HugeGraph 1.5.0 and earlier versions, if you need to create or drop a graph, please still use the legacy `text/plain` (properties) style request body instead of JSON. + ### 6.2 Conf -#### 6.2.1 Get configuration for a graph,**This operation requires administrator privileges** +#### 6.2.1 Get configuration for a graph, **This operation requires administrator privileges** + +##### Params + +**Path parameters** + +- graphspace: Graphspace name +- graph: Graph name ##### Method & Url ```javascript -GET http://localhost:8080/graphs/hugegraph/conf +GET http://localhost:8080/graphspaces/DEFAULT/graphs/hugegraph/conf ``` ##### Response Status @@ -247,12 +295,19 @@ Under normal circumstances, the graph mode is None. When you need to restore the you need to temporarily modify the graph mode to Restoring or Merging as needed. When you complete the restore, change the graph mode to None. -#### 6.3.1 Get graph mode. +#### 6.3.1 Get graph mode + +##### Params + +**Path parameters** + +- graphspace: Graphspace name +- graph: Graph name ##### Method & Url ``` -GET http://localhost:8080/graphs/hugegraph/mode +GET http://localhost:8080/graphspaces/DEFAULT/graphs/hugegraph/mode ``` ##### Response Status @@ -273,10 +328,17 @@ GET http://localhost:8080/graphs/hugegraph/mode #### 6.3.2 Modify graph mode. **This operation requires administrator privileges** +##### Params + +**Path parameters** + +- graphspace: Graphspace name +- graph: Graph name + ##### Method & Url ``` -PUT http://localhost:8080/graphs/hugegraph/mode +PUT http://localhost:8080/graphspaces/DEFAULT/graphs/hugegraph/mode ``` ##### Request Body @@ -301,16 +363,19 @@ PUT http://localhost:8080/graphs/hugegraph/mode } ``` -#### 6.3.3 Get graph's read mode. +#### 6.3.3 Get graph's read mode ##### Params -- name: name of a graph +**Path parameters** + +- graphspace: Graphspace name +- graph: Graph name ##### Method & Url ``` -GET http://localhost:8080/graphs/hugegraph/graph_read_mode +GET http://localhost:8080/graphspaces/DEFAULT/graphs/hugegraph/graph_read_mode ``` ##### Response Status @@ -331,12 +396,15 @@ GET http://localhost:8080/graphs/hugegraph/graph_read_mode ##### Params -- name: name of a graph +**Path parameters** + +- graphspace: Graphspace name +- graph: Graph name ##### Method & Url ``` -PUT http://localhost:8080/graphs/hugegraph/graph_read_mode +PUT http://localhost:8080/graphspaces/DEFAULT/graphs/hugegraph/graph_read_mode ``` ##### Request Body @@ -367,12 +435,15 @@ PUT http://localhost:8080/graphs/hugegraph/graph_read_mode ##### Params -- name: name of a graph +**Path parameters** + +- graphspace: Graphspace name +- graph: Graph name ##### Method & Url ``` -PUT http://localhost:8080/graphs/hugegraph/snapshot_create +PUT http://localhost:8080/graphspaces/DEFAULT/graphs/hugegraph/snapshot_create ``` ##### Response Status @@ -393,12 +464,15 @@ PUT http://localhost:8080/graphs/hugegraph/snapshot_create ##### Params -- name: name of a graph +**Path parameters** + +- graphspace: Graphspace name +- graph: Graph name ##### Method & Url ``` -PUT http://localhost:8080/graphs/hugegraph/snapshot_resume +PUT http://localhost:8080/graphspaces/DEFAULT/graphs/hugegraph/snapshot_resume ``` ##### Response Status @@ -417,16 +491,19 @@ PUT http://localhost:8080/graphs/hugegraph/snapshot_resume ### 6.5 Compact -#### 6.5.1 Manually compact graph,**This operation requires administrator privileges** +#### 6.5.1 Manually compact graph, **This operation requires administrator privileges** ##### Params -- name: name of a graph +**Path parameters** + +- graphspace: Graphspace name +- graph: Graph name ##### Method & Url ``` -PUT http://localhost:8080/graphs/hugegraph/compact +PUT http://localhost:8080/graphspaces/DEFAULT/graphs/hugegraph/compact ``` ##### Response Status diff --git a/content/en/docs/clients/restful-api/graphspace.md b/content/en/docs/clients/restful-api/graphspace.md new file mode 100644 index 000000000..15eb1a91b --- /dev/null +++ b/content/en/docs/clients/restful-api/graphspace.md @@ -0,0 +1,281 @@ +--- +title: "Graphspace API" +linkTitle: "Graphspace" +weight: 1 +--- + +### 2.0 Graphspace + +HugeGraph implements multi-tenancy through graph spaces, which isolate compute/storage resources per tenant. + +**Prerequisites** + +1. Graphspace currently only works in HStore mode. +2. In non-HStore mode you can only use the default graphspace `DEFAULT`; creating/deleting/updating other graphspaces is not supported. +3. Set `usePD=true` in `rest-server.properties` and `backend=hstore` in `hugegraph.properties`. +4. Graphspace enables strict authentication by default (default credential: `admin:pa`). Change the password immediately to avoid unauthorized access. + +#### 2.0.1 Create a graphspace + +##### Method & Url + +``` +POST http://localhost:8080/graphspaces +``` + +##### Request Body + +Note: CPU/memory and Kubernetes-related capabilities are not publicly available yet. + +| Name | Required | Type | Default | Range/Note | Description | +|------------------------------|----------|---------|---------|--------------------------------------------------------------------------------|-----------------------------------------------------------------------------------| +| name | Yes | String | | Lowercase letters, digits, underscore; must start with a letter; max length 48 | Graphspace name | +| description | Yes | String | | | Description | +| cpu_limit | Yes | Int | | > 0 | CPU cores for the graphspace | +| memory_limit | Yes | Int | | > 0 (GB) | Memory quota in GB | +| storage_limit | Yes | Int | | > 0 | Maximum disk usage | +| compute_cpu_limit | No | Int | 0 | >= 0 | Extra HugeGraph-Computer CPU cores; falls back to `cpu_limit` if unset or 0 | +| compute_memory_limit | No | Int | 0 | >= 0 | Extra HugeGraph-Computer memory in GB; falls back to `memory_limit` if unset or 0 | +| oltp_namespace | Yes | String | | | Kubernetes namespace for OLTP HugeGraph-Server | +| olap_namespace | Yes | String | | Resources are merged when identical to `oltp_namespace` | Kubernetes namespace for OLAP / HugeGraph-Computer | +| storage_namespace | Yes | String | | | Kubernetes namespace for HugeGraph-Store | +| operator_image_path | No | String | | | HugeGraph-Computer operator image registry | +| internal_algorithm_image_url | No | String | | | HugeGraph-Computer algorithm image registry | +| max_graph_number | Yes | Int | | > 0 | Maximum number of graphs that can be created inside the graphspace | +| max_role_number | Yes | Int | | > 0 | Maximum number of roles that can be created inside the graphspace | +| auth | No | Boolean | false | true / false | Whether to enable authentication for the graphspace | +| configs | No | Map | | | Additional configuration | + +```json +{ + "name": "gs1", + "description": "1st graph space", + "max_graph_number": 100, + "cpu_limit": 1000, + "memory_limit": 8192, + "storage_limit": 1000000, + "max_role_number": 10, + "auth": true, + "configs": {} +} +``` + +##### Response Status + +```json +201 +``` + +##### Response Body + +```json +{ + "name": "gs1", + "description": "1st graph space", + "cpu_limit": 1000, + "memory_limit": 8192, + "storage_limit": 1000000, + "compute_cpu_limit": 0, + "compute_memory_limit": 0, + "oltp_namespace": "hugegraph-server", + "olap_namespace": "hugegraph-server", + "storage_namespace": "hugegraph-server", + "operator_image_path": "127.0.0.1/hugegraph-registry/hugegraph-computer-operator:3.1.1", + "internal_algorithm_image_url": "127.0.0.1/hugegraph-registry/hugegraph-computer-algorithm:3.1.1", + "max_graph_number": 100, + "max_role_number": 10, + "cpu_used": 0, + "memory_used": 0, + "storage_used": 0, + "graph_number_used": 0, + "role_number_used": 0, + "auth": true +} +``` + +#### 2.0.2 List all graphspaces + +##### Method & Url + +``` +GET http://localhost:8080/graphspaces +``` + +##### Response Status + +```json +200 +``` + +##### Response Body + +```json +{ + "graphSpaces": [ + "gs1", + "DEFAULT" + ] +} +``` + +#### 2.0.3 Get graphspace details + +##### Params + +**Path parameters** + +- graphspace: Graphspace name + +##### Method & Url + +``` +GET http://localhost:8080/graphspaces/gs1 +``` + +##### Response Status + +```json +200 +``` + +##### Response Body + +```json +{ + "name": "gs1", + "description": "1st graph space", + "cpu_limit": 1000, + "memory_limit": 8192, + "storage_limit": 1000000, + "oltp_namespace": "hugegraph-server", + "olap_namespace": "hugegraph-server", + "storage_namespace": "hugegraph-server", + "operator_image_path": "127.0.0.1/hugegraph-registry/hugegraph-computer-operator:3.1.1", + "internal_algorithm_image_url": "127.0.0.1/hugegraph-registry/hugegraph-computer-algorithm:3.1.1", + "compute_cpu_limit": 0, + "compute_memory_limit": 0, + "max_graph_number": 100, + "max_role_number": 10, + "cpu_used": 0, + "memory_used": 0, + "storage_used": 0, + "graph_number_used": 0, + "role_number_used": 0, + "auth": true +} +``` + +#### 2.0.4 Update a graphspace + +> `auth` cannot be changed once a graphspace is created. + +##### Params + +**Path parameter** + +- graphspace: Graphspace name + +**Request parameters** + +- action: Must be `"update"` +- update: Container for the actual fields to update (see table below) + +| Name | Required | Type | Range/Note | Description | +|------------------------------|----------|--------|---------------------------------------------------------|-----------------------------------------------------------------------------------| +| name | Yes | String | | Graphspace name | +| description | Yes | String | | Description | +| cpu_limit | Yes | Int | > 0 | CPU cores for OLTP HugeGraph-Server | +| memory_limit | Yes | Int | > 0 (GB) | Memory quota (GB) for OLTP HugeGraph-Server | +| storage_limit | Yes | Int | > 0 | Maximum disk usage | +| compute_cpu_limit | No | Int | >= 0 | Extra HugeGraph-Computer CPU cores; falls back to `cpu_limit` if unset or 0 | +| compute_memory_limit | No | Int | >= 0 | Extra HugeGraph-Computer memory in GB; falls back to `memory_limit` if unset or 0 | +| oltp_namespace | Yes | String | | Kubernetes namespace for OLTP HugeGraph-Server | +| olap_namespace | Yes | String | Resources are merged when identical to `oltp_namespace` | Kubernetes namespace for OLAP | +| storage_namespace | Yes | String | | Kubernetes namespace for HugeGraph-Store | +| operator_image_path | No | String | | HugeGraph-Computer operator image registry | +| internal_algorithm_image_url | No | String | | HugeGraph-Computer algorithm image registry | +| max_graph_number | Yes | Int | > 0 | Maximum number of graphs | +| max_role_number | Yes | Int | > 0 | Maximum number of roles | + +##### Method & Url + +``` +PUT http://localhost:8080/graphspaces/gs1 +``` + +##### Request Body + +```json +{ + "action": "update", + "update": { + "name": "gs1", + "description": "1st graph space", + "cpu_limit": 2000, + "memory_limit": 40960, + "storage_limit": 2048, + "oltp_namespace": "hugegraph-server", + "olap_namespace": "hugegraph-server", + "operator_image_path": "127.0.0.1/hugegraph-registry/hugegraph-computer-operator:3.1.1", + "internal_algorithm_image_url": "127.0.0.1/hugegraph-registry/hugegraph-computer-algorithm:3.1.1", + "max_graph_number": 1000, + "max_role_number": 100 + } +} +``` + +##### Response Status + +```json +200 +``` + +##### Response Body + +```json +{ + "name": "gs1", + "description": "1st graph space", + "cpu_limit": 2000, + "memory_limit": 40960, + "storage_limit": 2048, + "oltp_namespace": "hugegraph-server", + "olap_namespace": "hugegraph-server", + "storage_namespace": "hugegraph-server", + "operator_image_path": "127.0.0.1/hugegraph-registry/hugegraph-computer-operator:3.1.1", + "internal_algorithm_image_url": "127.0.0.1/hugegraph-registry/hugegraph-computer-algorithm:3.1.1", + "compute_cpu_limit": 0, + "compute_memory_limit": 0, + "max_graph_number": 1000, + "max_role_number": 100, + "cpu_used": 0, + "memory_used": 0, + "storage_used": 0, + "graph_number_used": 0, + "role_number_used": 0, + "auth": true +} +``` + +#### 2.0.5 Delete a graphspace + +##### Params + +**Path parameter** + +- graphspace: Graphspace name + +##### Method & Url + +``` +DELETE http://localhost:8080/graphspaces/gs1 +``` + +##### Response Status + +```json +204 +``` + +> Warning: deleting a graphspace releases all resources that belong to it. + diff --git a/content/en/docs/clients/restful-api/gremlin.md b/content/en/docs/clients/restful-api/gremlin.md index 0529a9ce4..f78ad082c 100644 --- a/content/en/docs/clients/restful-api/gremlin.md +++ b/content/en/docs/clients/restful-api/gremlin.md @@ -188,7 +188,7 @@ Note: ##### Method & Url ``` -POST http://localhost:8080/graphs/hugegraph/jobs/gremlin +POST http://localhost:8080/graphspaces/DEFAULT/graphs/hugegraph/jobs/gremlin ``` **Querying vertices** @@ -224,7 +224,7 @@ Note: Note: -> You can query the execution status of an asynchronous task by using `GET http://localhost:8080/graphs/hugegraph/tasks/1` (where "1" is the task_id). For more information, refer to the [Asynchronous Task RESTful API](../task). +> You can query the execution status of an asynchronous task by using `GET http://localhost:8080/graphspaces/DEFAULT/graphs/hugegraph/tasks/1` (where "1" is the task_id). For more information, refer to the [Asynchronous Task RESTful API](../task). **Querying edges** @@ -255,4 +255,4 @@ Note: Note: -> You can query the execution status of an asynchronous task by using `GET http://localhost:8080/graphs/hugegraph/tasks/2` (where "2" is the task_id). For more information, refer to the [Asynchronous Task RESTful API](../task). +> You can query the execution status of an asynchronous task by using `GET http://localhost:8080/graphspaces/DEFAULT/graphs/hugegraph/tasks/2` (where "2" is the task_id). For more information, refer to the [Asynchronous Task RESTful API](../task). diff --git a/content/en/docs/clients/restful-api/indexlabel.md b/content/en/docs/clients/restful-api/indexlabel.md index f98f39b7a..74320d37d 100644 --- a/content/en/docs/clients/restful-api/indexlabel.md +++ b/content/en/docs/clients/restful-api/indexlabel.md @@ -13,7 +13,7 @@ Assuming PropertyKeys from version 1.1.3, VertexLabels from version 1.2.3, and E ##### Method & Url ``` -POST http://localhost:8080/graphs/hugegraph/schema/indexlabels +POST http://localhost:8080/graphspaces/DEFAULT/graphs/hugegraph/schema/indexlabels ``` ##### Request Body @@ -59,7 +59,7 @@ POST http://localhost:8080/graphs/hugegraph/schema/indexlabels ##### Method & Url ``` -GET http://localhost:8080/graphs/hugegraph/schema/indexlabels +GET http://localhost:8080/graphspaces/DEFAULT/graphs/hugegraph/schema/indexlabels ``` ##### Response Status @@ -123,7 +123,7 @@ GET http://localhost:8080/graphs/hugegraph/schema/indexlabels ##### Method & Url ``` -GET http://localhost:8080/graphs/hugegraph/schema/indexlabels/personByCity +GET http://localhost:8080/graphspaces/DEFAULT/graphs/hugegraph/schema/indexlabels/personByCity ``` ##### Response Status @@ -154,7 +154,7 @@ Deleting an IndexLabel will result in the deletion of related index data. This o ##### Method & Url ``` -DELETE http://localhost:8080/graphs/hugegraph/schema/indexlabels/personByCity +DELETE http://localhost:8080/graphspaces/DEFAULT/graphs/hugegraph/schema/indexlabels/personByCity ``` ##### Response Status @@ -173,4 +173,4 @@ DELETE http://localhost:8080/graphs/hugegraph/schema/indexlabels/personByCity Note: -> You can query the execution status of an asynchronous task by using `GET http://localhost:8080/graphs/hugegraph/tasks/1` (where "1" is the task_id). For more information, refer to the [Asynchronous Task RESTful API](../task). +> You can query the execution status of an asynchronous task by using `GET http://localhost:8080/graphspaces/DEFAULT/graphs/hugegraph/tasks/1` (where "1" is the task_id). For more information, refer to the [Asynchronous Task RESTful API](../task). diff --git a/content/en/docs/clients/restful-api/propertykey.md b/content/en/docs/clients/restful-api/propertykey.md index 71d503963..90c76414c 100644 --- a/content/en/docs/clients/restful-api/propertykey.md +++ b/content/en/docs/clients/restful-api/propertykey.md @@ -23,7 +23,7 @@ Request Body Field Description: ##### Method & Url ``` -POST http://localhost:8080/graphs/hugegraph/schema/propertykeys +POST http://localhost:8080/graphspaces/DEFAULT/graphs/hugegraph/schema/propertykeys ``` ##### Request Body @@ -72,7 +72,7 @@ POST http://localhost:8080/graphs/hugegraph/schema/propertykeys ##### Method & Url ``` -PUT http://localhost:8080/graphs/hugegraph/schema/propertykeys/age?action=append +PUT http://localhost:8080/graphspaces/DEFAULT/graphs/hugegraph/schema/propertykeys/age?action=append ``` ##### Request Body @@ -121,7 +121,7 @@ PUT http://localhost:8080/graphs/hugegraph/schema/propertykeys/age?action=append ##### Method & Url ``` -GET http://localhost:8080/graphs/hugegraph/schema/propertykeys +GET http://localhost:8080/graphspaces/DEFAULT/graphs/hugegraph/schema/propertykeys ``` ##### Response Status @@ -200,7 +200,7 @@ GET http://localhost:8080/graphs/hugegraph/schema/propertykeys ##### Method & Url ``` -GET http://localhost:8080/graphs/hugegraph/schema/propertykeys/age +GET http://localhost:8080/graphspaces/DEFAULT/graphs/hugegraph/schema/propertykeys/age ``` Where `age` is the name of the PropertyKey to be retrieved. @@ -236,7 +236,7 @@ Where `age` is the name of the PropertyKey to be retrieved. ##### Method & Url ``` -DELETE http://localhost:8080/graphs/hugegraph/schema/propertykeys/age +DELETE http://localhost:8080/graphspaces/DEFAULT/graphs/hugegraph/schema/propertykeys/age ``` Where `age` is the name of the PropertyKey to be deleted. diff --git a/content/en/docs/clients/restful-api/rank.md b/content/en/docs/clients/restful-api/rank.md index 6a7525fd8..e1dd71a4c 100644 --- a/content/en/docs/clients/restful-api/rank.md +++ b/content/en/docs/clients/restful-api/rank.md @@ -162,7 +162,7 @@ A random walk based PersonalRank algorithm should be likes this: ###### Method & Url ``` -POST http://localhost:8080/graphs/hugegraph/traversers/personalrank +POST http://localhost:8080/graphspaces/DEFAULT/graphs/hugegraph/traversers/personalrank ``` ###### Request Body @@ -318,7 +318,7 @@ In graph words: to go out from the starting point, get the probability of going ###### Method & Url ``` -POST http://localhost:8080/graphs/hugegraph/traversers/neighborrank +POST http://localhost:8080/graphspaces/DEFAULT/graphs/hugegraph/traversers/neighborrank ``` ###### Request Body diff --git a/content/en/docs/clients/restful-api/rebuild.md b/content/en/docs/clients/restful-api/rebuild.md index cc9fb64e9..b2dbaf6f3 100644 --- a/content/en/docs/clients/restful-api/rebuild.md +++ b/content/en/docs/clients/restful-api/rebuild.md @@ -11,7 +11,7 @@ weight: 6 ##### Method & Url ``` -PUT http://localhost:8080/graphs/hugegraph/jobs/rebuild/indexlabels/personByCity +PUT http://localhost:8080/graphspaces/DEFAULT/graphs/hugegraph/jobs/rebuild/indexlabels/personByCity ``` ##### Response Status @@ -29,14 +29,14 @@ PUT http://localhost:8080/graphs/hugegraph/jobs/rebuild/indexlabels/personByCity ``` Note: -> You can get the asynchronous job status by `GET http://localhost:8080/graphs/hugegraph/tasks/${task_id}` (the task_id here should be 1). See More [AsyncJob RESTfull API](../task) +> You can get the asynchronous job status by `GET http://localhost:8080/graphspaces/DEFAULT/graphs/hugegraph/tasks/${task_id}` (the task_id here should be 1). See More [AsyncJob RESTfull API](../task) #### 1.6.2 Rebulid all Indexs of VertexLabel ##### Method & Url ``` -PUT http://localhost:8080/graphs/hugegraph/jobs/rebuild/vertexlabels/person +PUT http://localhost:8080/graphspaces/DEFAULT/graphs/hugegraph/jobs/rebuild/vertexlabels/person ``` ##### Response Status @@ -55,14 +55,14 @@ PUT http://localhost:8080/graphs/hugegraph/jobs/rebuild/vertexlabels/person Note: -> You can get the asynchronous job status by `GET http://localhost:8080/graphs/hugegraph/tasks/${task_id}` (the task_id here should be 2). See More [AsyncJob RESTfull API](../task) +> You can get the asynchronous job status by `GET http://localhost:8080/graphspaces/DEFAULT/graphs/hugegraph/tasks/${task_id}` (the task_id here should be 2). See More [AsyncJob RESTfull API](../task) #### 1.6.3 Rebulid all Indexs of EdgeLabel ##### Method & Url ``` -PUT http://localhost:8080/graphs/hugegraph/jobs/rebuild/edgelabels/created +PUT http://localhost:8080/graphspaces/DEFAULT/graphs/hugegraph/jobs/rebuild/edgelabels/created ``` ##### Response Status @@ -81,4 +81,4 @@ PUT http://localhost:8080/graphs/hugegraph/jobs/rebuild/edgelabels/created Note: -> You can get the asynchronous job status by `GET http://localhost:8080/graphs/hugegraph/tasks/${task_id}` (the task_id here should be 3). See More [AsyncJob RESTfull API](../task) \ No newline at end of file +> You can get the asynchronous job status by `GET http://localhost:8080/graphspaces/DEFAULT/graphs/hugegraph/tasks/${task_id}` (the task_id here should be 3). See More [AsyncJob RESTfull API](../task) \ No newline at end of file diff --git a/content/en/docs/clients/restful-api/schema.md b/content/en/docs/clients/restful-api/schema.md index d549fac70..6364cd3e4 100644 --- a/content/en/docs/clients/restful-api/schema.md +++ b/content/en/docs/clients/restful-api/schema.md @@ -11,9 +11,9 @@ HugeGraph provides a single interface to get all Schema information of a graph, ##### Method & Url ``` -GET http://localhost:8080/graphs/{graph_name}/schema +GET http://localhost:8080/graphspaces/{graphspace}/graphs/{graph_name}/schema -e.g: GET http://localhost:8080/graphs/hugegraph/schema +e.g: GET http://localhost:8080/graphspaces/DEFAULT/graphs/hugegraph/schema ``` ##### Response Status diff --git a/content/en/docs/clients/restful-api/task.md b/content/en/docs/clients/restful-api/task.md index d1852f2cb..18f87d560 100644 --- a/content/en/docs/clients/restful-api/task.md +++ b/content/en/docs/clients/restful-api/task.md @@ -16,7 +16,7 @@ weight: 13 ##### Method & Url ``` -GET http://localhost:8080/graphs/hugegraph/tasks?status=success +GET http://localhost:8080/graphspaces/DEFAULT/graphs/hugegraph/tasks?status=success ``` ##### Response Status @@ -50,7 +50,7 @@ GET http://localhost:8080/graphs/hugegraph/tasks?status=success ##### Method & Url ``` -GET http://localhost:8080/graphs/hugegraph/tasks/2 +GET http://localhost:8080/graphspaces/DEFAULT/graphs/hugegraph/tasks/2 ``` ##### Response Status @@ -82,7 +82,7 @@ GET http://localhost:8080/graphs/hugegraph/tasks/2 ##### Method & Url ``` -DELETE http://localhost:8080/graphs/hugegraph/tasks/2 +DELETE http://localhost:8080/graphspaces/DEFAULT/graphs/hugegraph/tasks/2 ``` ##### Response Status @@ -110,7 +110,7 @@ If you already created an async task via [Gremlin API](/docs/clients/restful-api ##### Method & Url ``` -PUT http://localhost:8080/graphs/hugegraph/tasks/2?action=cancel +PUT http://localhost:8080/graphspaces/DEFAULT/graphs/hugegraph/tasks/2?action=cancel ``` > cancel it in 10s. if more than 10s, the task may already be finished, then can't be cancelled. diff --git a/content/en/docs/clients/restful-api/traverser.md b/content/en/docs/clients/restful-api/traverser.md index f6b21ee93..681166132 100644 --- a/content/en/docs/clients/restful-api/traverser.md +++ b/content/en/docs/clients/restful-api/traverser.md @@ -2414,7 +2414,7 @@ Queries a batch of "fusiform similar vertices" based on specified conditions. Wh ###### Method & Url ``` -POST http://localhost:8080/graphs/hugegraph/traversers/fusiformsimilarity +POST http://localhost:8080/graphspaces/DEFAULT/graphs/hugegraph/traversers/fusiformsimilarity ``` ###### Request Body @@ -2521,7 +2521,7 @@ Used to query vertices that have high similarity with a group of vertices. For e ###### Method & Url ``` -GET http://localhost:8080/graphs/hugegraph/traversers/vertices?ids="1:marko"&ids="2:lop" +GET http://localhost:8080/graphspaces/DEFAULT/graphs/hugegraph/traversers/vertices?ids="1:marko"&ids="2:lop" ``` ###### Response Status @@ -2600,7 +2600,7 @@ Obtain vertex shard information by specifying the shard size `split_size` (can b ###### Method & Url ``` -GET http://localhost:8080/graphs/hugegraph/traversers/vertices/shards?split_size=67108864 +GET http://localhost:8080/graphspaces/DEFAULT/graphs/hugegraph/traversers/vertices/shards?split_size=67108864 ``` ###### Response Status @@ -2653,7 +2653,7 @@ Retrieve vertices in batches based on the specified shard information (refer to ###### Method & Url ``` -GET http://localhost:8080/graphs/hugegraph/traversers/vertices/scan?start=0&end=4294967295 +GET http://localhost:8080/graphspaces/DEFAULT/graphs/hugegraph/traversers/vertices/scan?start=0&end=4294967295 ``` ###### Response Status @@ -2837,7 +2837,7 @@ GET http://localhost:8080/graphs/hugegraph/traversers/vertices/scan?start=0&end= ###### Method & Url ``` -GET http://localhost:8080/graphs/hugegraph/traversers/edges?ids="S1:josh>1>>S2:lop"&ids="S1:josh>1>>S2:ripple" +GET http://localhost:8080/graphspaces/DEFAULT/graphs/hugegraph/traversers/edges?ids="S1:josh>1>>S2:lop"&ids="S1:josh>1>>S2:ripple" ``` ###### Response Status @@ -2892,7 +2892,7 @@ Retrieve shard information for edges by specifying the shard size (`split_size`) ###### Method & Url ``` -GET http://localhost:8080/graphs/hugegraph/traversers/edges/shards?split_size=4294967295 +GET http://localhost:8080/graphspaces/DEFAULT/graphs/hugegraph/traversers/edges/shards?split_size=4294967295 ``` ###### Response Status @@ -2949,7 +2949,7 @@ Batch retrieve edges by specifying shard information (refer to section 3.2.22.2 ###### Method & Url ``` -GET http://localhost:8080/graphs/hugegraph/traversers/edges/scan?start=0&end=3221225469 +GET http://localhost:8080/graphspaces/DEFAULT/graphs/hugegraph/traversers/edges/scan?start=0&end=3221225469 ``` ###### Response Status diff --git a/content/en/docs/clients/restful-api/variable.md b/content/en/docs/clients/restful-api/variable.md index 572bab32a..151498771 100644 --- a/content/en/docs/clients/restful-api/variable.md +++ b/content/en/docs/clients/restful-api/variable.md @@ -13,7 +13,7 @@ Variables can be used to store data about the entire graph. The data is accessed ##### Method & Url ``` -PUT http://localhost:8080/graphs/hugegraph/variables/name +PUT http://localhost:8080/graphspaces/DEFAULT/graphs/hugegraph/variables/name ``` ##### Request Body @@ -43,7 +43,7 @@ PUT http://localhost:8080/graphs/hugegraph/variables/name ##### Method & Url ``` -GET http://localhost:8080/graphs/hugegraph/variables +GET http://localhost:8080/graphspaces/DEFAULT/graphs/hugegraph/variables ``` ##### Response Status @@ -65,7 +65,7 @@ GET http://localhost:8080/graphs/hugegraph/variables ##### Method & Url ``` -GET http://localhost:8080/graphs/hugegraph/variables/name +GET http://localhost:8080/graphspaces/DEFAULT/graphs/hugegraph/variables/name ``` ##### Response Status @@ -87,7 +87,7 @@ GET http://localhost:8080/graphs/hugegraph/variables/name ##### Method & Url ``` -DELETE http://localhost:8080/graphs/hugegraph/variables/name +DELETE http://localhost:8080/graphspaces/DEFAULT/graphs/hugegraph/variables/name ``` ##### Response Status diff --git a/content/en/docs/clients/restful-api/vertex.md b/content/en/docs/clients/restful-api/vertex.md index 78e8f4c5f..d016cb146 100644 --- a/content/en/docs/clients/restful-api/vertex.md +++ b/content/en/docs/clients/restful-api/vertex.md @@ -45,7 +45,7 @@ schema.indexLabel("personByAge").onV("person").by("age").range().ifNotExist().cr ##### Method & Url ``` -POST http://localhost:8080/graphs/hugegraph/graph/vertices +POST http://localhost:8080/graphspaces/DEFAULT/graphs/hugegraph/graph/vertices ``` ##### Request Body @@ -85,7 +85,7 @@ POST http://localhost:8080/graphs/hugegraph/graph/vertices ##### Method & Url ``` -POST http://localhost:8080/graphs/hugegraph/graph/vertices/batch +POST http://localhost:8080/graphspaces/DEFAULT/graphs/hugegraph/graph/vertices/batch ``` ##### Request Body @@ -130,7 +130,7 @@ POST http://localhost:8080/graphs/hugegraph/graph/vertices/batch ##### Method & Url ``` -PUT http://127.0.0.1:8080/graphs/hugegraph/graph/vertices/"1:marko"?action=append +PUT http://127.0.0.1:8080/graphspaces/DEFAULT/graphs/hugegraph/graph/vertices/"1:marko"?action=append ``` ##### Request Body @@ -220,13 +220,13 @@ Assuming the original vertex and properties are: Add vertices with the following command: ```shell -curl -H "Content-Type: application/json" -d '[{"label":"person","properties":{"name":"josh","age":32,"city":"Beijing","weight":0.1,"hobby":["reading","football"]}},{"label":"software","properties":{"name":"lop","lang":"java","price":328}}]' http:///127.0.0.1:8080/graphs/hugegraph/graph/vertices/batch +curl -H "Content-Type: application/json" -d '[{"label":"person","properties":{"name":"josh","age":32,"city":"Beijing","weight":0.1,"hobby":["reading","football"]}},{"label":"software","properties":{"name":"lop","lang":"java","price":328}}]' http://127.0.0.1:8080/graphspaces/DEFAULT/graphs/hugegraph/graph/vertices/batch ``` ##### Method & Url ``` -PUT http://127.0.0.1:8080/graphs/hugegraph/graph/vertices/batch +PUT http://127.0.0.1:8080/graphspaces/DEFAULT/graphs/hugegraph/graph/vertices/batch ``` ##### Request Body @@ -324,7 +324,7 @@ The usage of other update strategies can be inferred in a similar manner and wil ##### Method & Url ``` -PUT http://127.0.0.1:8080/graphs/hugegraph/graph/vertices/"1:marko"?action=eliminate +PUT http://127.0.0.1:8080/graphspaces/DEFAULT/graphs/hugegraph/graph/vertices/"1:marko"?action=eliminate ``` ##### Request Body @@ -391,7 +391,7 @@ Property key-value pairs consist of the property name and value in JSON format. ##### Method & Url ``` -GET http://localhost:8080/graphs/hugegraph/graph/vertices?label=person&properties={"age":29}&limit=1 +GET http://localhost:8080/graphspaces/DEFAULT/graphs/hugegraph/graph/vertices?label=person&properties={"age":29}&limit=1 ``` ##### Response Status @@ -423,13 +423,13 @@ GET http://localhost:8080/graphs/hugegraph/graph/vertices?label=person&propertie Add vertices with the following command: ```shell -curl -H "Content-Type: application/json" -d '[{"label":"person","properties":{"name":"peter","age":29,"city":"Shanghai"}},{"label":"person","properties":{"name":"vadas","age":27,"city":"Hongkong"}}]' http://localhost:8080/graphs/hugegraph/graph/vertices/batch +curl -H "Content-Type: application/json" -d '[{"label":"person","properties":{"name":"peter","age":29,"city":"Shanghai"}},{"label":"person","properties":{"name":"vadas","age":27,"city":"Hongkong"}}]' http://localhost:8080/graphspaces/DEFAULT/graphs/hugegraph/graph/vertices/batch ``` ##### Method & Url ``` -GET http://localhost:8080/graphs/hugegraph/graph/vertices?page&limit=3 +GET http://localhost:8080/graphspaces/DEFAULT/graphs/hugegraph/graph/vertices?page&limit=3 ``` ##### Response Status @@ -490,7 +490,7 @@ The returned `body` contains information about the page number of the next `page ##### Method & Url ``` -GET http://localhost:8080/graphs/hugegraph/graph/vertices?page=CIYxOnBldGVyAAAAAAAAAAM=&limit=3 +GET http://localhost:8080/graphspaces/DEFAULT/graphs/hugegraph/graph/vertices?page=CIYxOnBldGVyAAAAAAAAAAM=&limit=3 ``` ##### Response Status @@ -546,7 +546,7 @@ At this point, `"page": null` indicates that there are no more pages available. ##### Method & Url ``` -GET http://localhost:8080/graphs/hugegraph/graph/vertices/"1:marko" +GET http://localhost:8080/graphspaces/DEFAULT/graphs/hugegraph/graph/vertices/"1:marko" ``` ##### Response Status @@ -580,7 +580,7 @@ GET http://localhost:8080/graphs/hugegraph/graph/vertices/"1:marko" ##### Method & Url ``` -DELETE http://localhost:8080/graphs/hugegraph/graph/vertices/"1:marko" +DELETE http://localhost:8080/graphspaces/DEFAULT/graphs/hugegraph/graph/vertices/"1:marko" ``` ##### Response Status @@ -596,7 +596,7 @@ When deleting a vertex by specifying both the Label parameter and the ID, it gen ##### Method & Url ``` -DELETE http://localhost:8080/graphs/hugegraph/graph/vertices/"1:marko"?label=person +DELETE http://localhost:8080/graphspaces/DEFAULT/graphs/hugegraph/graph/vertices/"1:marko"?label=person ``` ##### Response Status diff --git a/content/en/docs/clients/restful-api/vertexlabel.md b/content/en/docs/clients/restful-api/vertexlabel.md index 7a3c8c1fe..241497098 100644 --- a/content/en/docs/clients/restful-api/vertexlabel.md +++ b/content/en/docs/clients/restful-api/vertexlabel.md @@ -25,7 +25,7 @@ Params Description: ##### Method & Url ``` -POST http://localhost:8080/graphs/hugegraph/schema/vertexlabels +POST http://localhost:8080/graphspaces/DEFAULT/graphs/hugegraph/schema/vertexlabels ``` ##### Request Body @@ -124,7 +124,7 @@ Additionally, if the vertex has a property called "createdTime" and you want to ##### Method & Url ``` -PUT http://localhost:8080/graphs/hugegraph/schema/vertexlabels/person?action=append +PUT http://localhost:8080/graphspaces/DEFAULT/graphs/hugegraph/schema/vertexlabels/person?action=append ``` ##### Request Body @@ -180,7 +180,7 @@ PUT http://localhost:8080/graphs/hugegraph/schema/vertexlabels/person?action=app ##### Method & Url ``` -GET http://localhost:8080/graphs/hugegraph/schema/vertexlabels +GET http://localhost:8080/graphspaces/DEFAULT/graphs/hugegraph/schema/vertexlabels ``` ##### Response Status @@ -245,7 +245,7 @@ GET http://localhost:8080/graphs/hugegraph/schema/vertexlabels ##### Method & Url ``` -GET http://localhost:8080/graphs/hugegraph/schema/vertexlabels/person +GET http://localhost:8080/graphspaces/DEFAULT/graphs/hugegraph/schema/vertexlabels/person ``` ##### Response Status @@ -288,7 +288,7 @@ Deleting a VertexLabel will result in the removal of corresponding vertices and ##### Method & Url ``` -DELETE http://localhost:8080/graphs/hugegraph/schema/vertexlabels/person +DELETE http://localhost:8080/graphspaces/DEFAULT/graphs/hugegraph/schema/vertexlabels/person ``` ##### Response Status @@ -307,4 +307,4 @@ DELETE http://localhost:8080/graphs/hugegraph/schema/vertexlabels/person Note: -> You can use `GET http://localhost:8080/graphs/hugegraph/tasks/1` (where "1" is the task_id) to query the execution status of the asynchronous task. For more information, refer to the [Asynchronous Task RESTful API](../task). +> You can use `GET http://localhost:8080/graphspaces/DEFAULT/graphs/hugegraph/tasks/1` (where "1" is the task_id) to query the execution status of the asynchronous task. For more information, refer to the [Asynchronous Task RESTful API](../task). diff --git a/content/en/docs/quickstart/hugegraph/hugegraph-server.md b/content/en/docs/quickstart/hugegraph/hugegraph-server.md index d4ecc1d60..226238ae2 100644 --- a/content/en/docs/quickstart/hugegraph/hugegraph-server.md +++ b/content/en/docs/quickstart/hugegraph/hugegraph-server.md @@ -40,13 +40,13 @@ There are four ways to deploy HugeGraph-Server components: #### 3.1 Use Docker container (Convenient for Test/Dev) -You can refer to [Docker deployment guide](https://hub.docker.com/r/hugegraph/hugegraph). +You can refer to the [Docker deployment guide](https://github.com/apache/incubator-hugegraph/blob/master/hugegraph-server/hugegraph-dist/docker/README.md). -We can use `docker run -itd --name=graph -e PASSWORD=xxx -p 8080:8080 hugegraph/hugegraph:1.5.0` to quickly start an inner `HugeGraph server` with `RocksDB` in background. +We can use `docker run -itd --name=server -p 8080:8080 -e PASSWORD=xxx hugegraph/hugegraph:1.5.0` to quickly start a `HugeGraph Server` with a built-in `RocksDB` backend. Optional: 1. use `docker exec -it graph bash` to enter the container to do some operations. -2. use `docker run -itd --name=graph -p 8080:8080 -e PRELOAD="true" hugegraph/hugegraph:1.5.0` to start with a **built-in** example graph. We can use `RESTful API` to verify the result. The detailed step can refer to [5.1.9](#519-create-an-example-graph-when-startup) +2. use `docker run -itd --name=graph -p 8080:8080 -e PRELOAD="true" hugegraph/hugegraph:1.5.0` to start with a **built-in** example graph. We can use `RESTful API` to verify the result. The detailed step can refer to [5.1.8](#518-create-an-example-graph-when-startup) 3. use `-e PASSWORD=xxx` to enable auth mode and set the password for admin. You can find more details from [Config Authentication](/docs/config/config-authentication#use-docker-to-enable-authentication-mode) If you use docker desktop, you can set the option like: @@ -74,8 +74,8 @@ services: > Note: > > 1. The docker image of the hugegraph is a convenient release to start it quickly, but not **official distribution** artifacts. You can find more details from [ASF Release Distribution Policy](https://infra.apache.org/release-distribution.html#dockerhub). -> -> 2. Recommend to use `release tag`(like `1.5.0`/`1.5.0`) for the stable version. Use `latest` tag to experience the newest functions in development. +> +> 2. Recommend to use `release tag` (like `1.5.0`/`1.x.0`) for the stable version. Use `latest` tag to experience the newest functions in development. #### 3.2 Download the binary tar tarball @@ -91,9 +91,10 @@ curl https://downloads.apache.org/incubator/hugegraph/{version}/apache-hugegraph ``` #### 3.3 Source code compilation -Please ensure that the wget command is installed before compiling the source code -We could get HugeGraph **source code** in 2 ways: (So as the other HugeGraph repos/modules) +Please ensure that the wget/curl commands are installed before compiling the source code + +Download HugeGraph **source code** in either of the following 2 ways (so as the other HugeGraph repos/modules): - download the stable/release version from the ASF site - clone the unstable/latest version by GitBox(ASF) or GitHub @@ -193,7 +194,7 @@ If you need to access HugeGraphServer externally, please modify the `restserver. Since the configuration (hugegraph.properties) and startup steps required by various backends are slightly different, the following will introduce the configuration and startup of each backend one by one. -Follow the [Server Authentication Configuration](/docs/config/config-authentication/) before you start Server later. +**Note:** Configure [Server Authentication](/docs/config/config-authentication/) before starting HugeGraphServer if you need Auth mode (especially for production or public network environments). ##### 5.1.1 Distributed Storage (HStore) @@ -215,10 +216,17 @@ task.scheduler_type=distributed pd.peers=127.0.0.1:8686,127.0.0.1:8687,127.0.0.1:8688 ``` +Then enable PD discovery in `rest-server.properties` (required for every HugeGraph-Server node): + +```properties +usePD=true +``` + If configuring multiple HugeGraph-Server nodes, you need to modify the `rest-server.properties` configuration file for each node, for example: Node 1 (Master node): ```properties +usePD=true restserver.url=http://127.0.0.1:8081 gremlinserver.url=http://127.0.0.1:8181 @@ -231,6 +239,7 @@ server.role=master Node 2 (Worker node): ```properties +usePD=true restserver.url=http://127.0.0.1:8082 gremlinserver.url=http://127.0.0.1:8182 From b55fe2baa098b9a6074c1a2d43b60d653cae0619 Mon Sep 17 00:00:00 2001 From: Peng Junzhi <78788603+Pengzna@users.noreply.github.com> Date: Fri, 28 Nov 2025 22:07:36 +0800 Subject: [PATCH 15/19] feat: 1.7.0 download-links (#434) --- content/cn/docs/download/download.md | 31 +++++++++++++++++++++----- content/en/docs/download/download.md | 33 +++++++++++++++++++++++----- 2 files changed, 53 insertions(+), 11 deletions(-) diff --git a/content/cn/docs/download/download.md b/content/cn/docs/download/download.md index 8127e31f3..48977f8a5 100644 --- a/content/cn/docs/download/download.md +++ b/content/cn/docs/download/download.md @@ -12,18 +12,18 @@ weight: 2 > 注: HugeGraph 所有组件版本号已保持一致, `client/loader/hubble/common` 等 maven 仓库版本号同理, 依赖引用可参考 [maven 示例](https://github.com/apache/incubator-hugegraph-toolchain#maven-dependencies) -### 最新版本 1.5.0 +### 最新版本 1.7.0 > 注: 从版本 `1.5.0` 开始,需要 Java11 运行时环境 -- Release Date: 2024-12-10 -- [Release Notes](/docs/changelog/hugegraph-1.5.0-release-notes/) +- Release Date: 2025-11-28 +- [Release Notes](/docs/changelog/hugegraph-1.7.0-release-notes/) #### 二进制包 | Server | Toolchain | |-----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|-----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| -| [[Binary](https://www.apache.org/dyn/closer.lua/incubator/hugegraph/1.5.0/apache-hugegraph-incubating-1.5.0.tar.gz?action=download)] [[Sign](https://downloads.apache.org/incubator/hugegraph/1.5.0/apache-hugegraph-incubating-1.5.0.tar.gz.asc)] [[SHA512](https://downloads.apache.org/incubator/hugegraph/1.5.0/apache-hugegraph-incubating-1.5.0.tar.gz.sha512)] | [[Binary](https://www.apache.org/dyn/closer.lua/incubator/hugegraph/1.5.0/apache-hugegraph-toolchain-incubating-1.5.0.tar.gz?action=download)] [[Sign](https://downloads.apache.org/incubator/hugegraph/1.5.0/apache-hugegraph-toolchain-incubating-1.5.0.tar.gz.asc)] [[SHA512](https://downloads.apache.org/incubator/hugegraph/1.5.0/apache-hugegraph-toolchain-incubating-1.5.0.tar.gz.sha512)] | +| [[Binary](https://www.apache.org/dyn/closer.lua/incubator/hugegraph/1.7.0/apache-hugegraph-incubating-1.7.0.tar.gz?action=download)] [[Sign](https://downloads.apache.org/incubator/hugegraph/1.7.0/apache-hugegraph-incubating-1.7.0.tar.gz.asc)] [[SHA512](https://downloads.apache.org/incubator/hugegraph/1.7.0/apache-hugegraph-incubating-1.7.0.tar.gz.sha512)] | [[Binary](https://www.apache.org/dyn/closer.lua/incubator/hugegraph/1.7.0/apache-hugegraph-toolchain-incubating-1.7.0.tar.gz?action=download)] [[Sign](https://downloads.apache.org/incubator/hugegraph/1.7.0/apache-hugegraph-toolchain-incubating-1.7.0.tar.gz.asc)] [[SHA512](https://downloads.apache.org/incubator/hugegraph/1.7.0/apache-hugegraph-toolchain-incubating-1.7.0.tar.gz.sha512)] | #### 源码包 @@ -31,17 +31,36 @@ Please refer to [build from source](/docs/quickstart/hugegraph-server/). | Server | Toolchain | AI | Computer | |-----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|-----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|-----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| -| [[Source](https://www.apache.org/dyn/closer.lua/incubator/hugegraph/1.5.0/apache-hugegraph-incubating-1.5.0-src.tar.gz?action=download)] [[Sign](https://downloads.apache.org/incubator/hugegraph/1.5.0/apache-hugegraph-incubating-1.5.0-src.tar.gz.asc)] [[SHA512](https://downloads.apache.org/incubator/hugegraph/1.5.0/apache-hugegraph-incubating-1.5.0-src.tar.gz.sha512)] | [[Source](https://www.apache.org/dyn/closer.lua/incubator/hugegraph/1.5.0/apache-hugegraph-toolchain-incubating-1.5.0-src.tar.gz?action=download)] [[Sign](https://downloads.apache.org/incubator/hugegraph/1.5.0/apache-hugegraph-toolchain-incubating-1.5.0-src.tar.gz.asc)] [[SHA512](https://downloads.apache.org/incubator/hugegraph/1.5.0/apache-hugegraph-toolchain-incubating-1.5.0-src.tar.gz.sha512)] | [[Source](https://www.apache.org/dyn/closer.lua/incubator/hugegraph/1.5.0/apache-hugegraph-ai-incubating-1.5.0-src.tar.gz?action=download)] [[Sign](https://downloads.apache.org/incubator/hugegraph/1.5.0/apache-hugegraph-ai-incubating-1.5.0-src.tar.gz.asc)] [[SHA512](https://downloads.apache.org/incubator/hugegraph/1.5.0/apache-hugegraph-ai-incubating-1.5.0-src.tar.gz.sha512)] | [[Source](https://www.apache.org/dyn/closer.lua/incubator/hugegraph/1.5.0/apache-hugegraph-computer-incubating-1.5.0-src.tar.gz?action=download)] [[Sign](https://downloads.apache.org/incubator/hugegraph/1.5.0/apache-hugegraph-computer-incubating-1.5.0-src.tar.gz.asc)] [[SHA512](https://downloads.apache.org/incubator/hugegraph/1.5.0/apache-hugegraph-computer-incubating-1.5.0-src.tar.gz.sha512)] | +| [[Source](https://www.apache.org/dyn/closer.lua/incubator/hugegraph/1.7.0/apache-hugegraph-incubating-1.7.0-src.tar.gz?action=download)] [[Sign](https://downloads.apache.org/incubator/hugegraph/1.7.0/apache-hugegraph-incubating-1.7.0-src.tar.gz.asc)] [[SHA512](https://downloads.apache.org/incubator/hugegraph/1.7.0/apache-hugegraph-incubating-1.7.0-src.tar.gz.sha512)] | [[Source](https://www.apache.org/dyn/closer.lua/incubator/hugegraph/1.7.0/apache-hugegraph-toolchain-incubating-1.7.0-src.tar.gz?action=download)] [[Sign](https://downloads.apache.org/incubator/hugegraph/1.7.0/apache-hugegraph-toolchain-incubating-1.7.0-src.tar.gz.asc)] [[SHA512](https://downloads.apache.org/incubator/hugegraph/1.7.0/apache-hugegraph-toolchain-incubating-1.7.0-src.tar.gz.sha512)] | [[Source](https://www.apache.org/dyn/closer.lua/incubator/hugegraph/1.7.0/apache-hugegraph-ai-incubating-1.7.0-src.tar.gz?action=download)] [[Sign](https://downloads.apache.org/incubator/hugegraph/1.7.0/apache-hugegraph-ai-incubating-1.7.0-src.tar.gz.asc)] [[SHA512](https://downloads.apache.org/incubator/hugegraph/1.7.0/apache-hugegraph-ai-incubating-1.7.0-src.tar.gz.sha512)] | [[Source](https://www.apache.org/dyn/closer.lua/incubator/hugegraph/1.7.0/apache-hugegraph-computer-incubating-1.7.0-src.tar.gz?action=download)] [[Sign](https://downloads.apache.org/incubator/hugegraph/1.7.0/apache-hugegraph-computer-incubating-1.7.0-src.tar.gz.asc)] [[SHA512](https://downloads.apache.org/incubator/hugegraph/1.7.0/apache-hugegraph-computer-incubating-1.7.0-src.tar.gz.sha512)] | --- ### 归档版本 > 注: -> +> > 1. 请大家尽早迁移到最新 Release 版本上, 社区将不再维护 `1.0.0` 前的旧版本 (非 ASF 版本) > 2. `1.3.0` 是最后一个兼容 Java8 的主版本, 请尽早使用/迁移运行时为 Java11 (低版本 Java 有潜在更多的 SEC 风险和性能影响) +#### 1.5.0 + +- Release Date: 2024-12-10 +- [Release Notes](/docs/changelog/hugegraph-1.5.0-release-notes/) + +##### 二进制包 + +| Server | Toolchain | +|-----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|-----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| +| [[Binary](https://www.apache.org/dyn/closer.lua/incubator/hugegraph/1.5.0/apache-hugegraph-incubating-1.5.0.tar.gz?action=download)] [[Sign](https://downloads.apache.org/incubator/hugegraph/1.5.0/apache-hugegraph-incubating-1.5.0.tar.gz.asc)] [[SHA512](https://downloads.apache.org/incubator/hugegraph/1.5.0/apache-hugegraph-incubating-1.5.0.tar.gz.sha512)] | [[Binary](https://www.apache.org/dyn/closer.lua/incubator/hugegraph/1.5.0/apache-hugegraph-toolchain-incubating-1.5.0.tar.gz?action=download)] [[Sign](https://downloads.apache.org/incubator/hugegraph/1.5.0/apache-hugegraph-toolchain-incubating-1.5.0.tar.gz.asc)] [[SHA512](https://downloads.apache.org/incubator/hugegraph/1.5.0/apache-hugegraph-toolchain-incubating-1.5.0.tar.gz.sha512)] | + +##### 源码包 + +Please refer to [build from source](/docs/quickstart/hugegraph-server/). + +| Server | Toolchain | AI | Computer | +|-----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|-----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|-----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| +| [[Source](https://www.apache.org/dyn/closer.lua/incubator/hugegraph/1.5.0/apache-hugegraph-incubating-1.5.0-src.tar.gz?action=download)] [[Sign](https://downloads.apache.org/incubator/hugegraph/1.5.0/apache-hugegraph-incubating-1.5.0-src.tar.gz.asc)] [[SHA512](https://downloads.apache.org/incubator/hugegraph/1.5.0/apache-hugegraph-incubating-1.5.0-src.tar.gz.sha512)] | [[Source](https://www.apache.org/dyn/closer.lua/incubator/hugegraph/1.5.0/apache-hugegraph-toolchain-incubating-1.5.0-src.tar.gz?action=download)] [[Sign](https://downloads.apache.org/incubator/hugegraph/1.5.0/apache-hugegraph-toolchain-incubating-1.5.0-src.tar.gz.asc)] [[SHA512](https://downloads.apache.org/incubator/hugegraph/1.5.0/apache-hugegraph-toolchain-incubating-1.5.0-src.tar.gz.sha512)] | [[Source](https://www.apache.org/dyn/closer.lua/incubator/hugegraph/1.5.0/apache-hugegraph-ai-incubating-1.5.0-src.tar.gz?action=download)] [[Sign](https://downloads.apache.org/incubator/hugegraph/1.5.0/apache-hugegraph-ai-incubating-1.5.0-src.tar.gz.asc)] [[SHA512](https://downloads.apache.org/incubator/hugegraph/1.5.0/apache-hugegraph-ai-incubating-1.5.0-src.tar.gz.sha512)] | [[Source](https://www.apache.org/dyn/closer.lua/incubator/hugegraph/1.5.0/apache-hugegraph-computer-incubating-1.5.0-src.tar.gz?action=download)] [[Sign](https://downloads.apache.org/incubator/hugegraph/1.5.0/apache-hugegraph-computer-incubating-1.5.0-src.tar.gz.asc)] [[SHA512](https://downloads.apache.org/incubator/hugegraph/1.5.0/apache-hugegraph-computer-incubating-1.5.0-src.tar.gz.sha512)] | + #### 1.3.0 - Release Date: 2024-04-01 diff --git a/content/en/docs/download/download.md b/content/en/docs/download/download.md index 1a0960edd..d5205c570 100644 --- a/content/en/docs/download/download.md +++ b/content/en/docs/download/download.md @@ -13,18 +13,18 @@ weight: 2 > Note: The version numbers of all components of HugeGraph have been kept consistent, and the version numbers of Maven repositories such as `client/loader/hubble/common` are the same. You can refer to these for dependency references [maven example](https://github.com/apache/incubator-hugegraph-toolchain#maven-dependencies). -### Latest Version 1.5.0 +### Latest Version 1.7.0 > Note: Starting from version `1.5.0`, a Java11 runtime environment is required. -- Release Date: 2024-12-10 -- [Release Notes](/docs/changelog/hugegraph-1.5.0-release-notes/) +- Release Date: 2025-11-28 +- [Release Notes](/docs/changelog/hugegraph-1.7.0-release-notes/) #### Binary Packages | Server | Toolchain | |-----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|-----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| -| [[Binary](https://www.apache.org/dyn/closer.lua/incubator/hugegraph/1.5.0/apache-hugegraph-incubating-1.5.0.tar.gz?action=download)] [[Sign](https://downloads.apache.org/incubator/hugegraph/1.5.0/apache-hugegraph-incubating-1.5.0.tar.gz.asc)] [[SHA512](https://downloads.apache.org/incubator/hugegraph/1.5.0/apache-hugegraph-incubating-1.5.0.tar.gz.sha512)] | [[Binary](https://www.apache.org/dyn/closer.lua/incubator/hugegraph/1.5.0/apache-hugegraph-toolchain-incubating-1.5.0.tar.gz?action=download)] [[Sign](https://downloads.apache.org/incubator/hugegraph/1.5.0/apache-hugegraph-toolchain-incubating-1.5.0.tar.gz.asc)] [[SHA512](https://downloads.apache.org/incubator/hugegraph/1.5.0/apache-hugegraph-toolchain-incubating-1.5.0.tar.gz.sha512)] | +| [[Binary](https://www.apache.org/dyn/closer.lua/incubator/hugegraph/1.7.0/apache-hugegraph-incubating-1.7.0.tar.gz?action=download)] [[Sign](https://downloads.apache.org/incubator/hugegraph/1.7.0/apache-hugegraph-incubating-1.7.0.tar.gz.asc)] [[SHA512](https://downloads.apache.org/incubator/hugegraph/1.7.0/apache-hugegraph-incubating-1.7.0.tar.gz.sha512)] | [[Binary](https://www.apache.org/dyn/closer.lua/incubator/hugegraph/1.7.0/apache-hugegraph-toolchain-incubating-1.7.0.tar.gz?action=download)] [[Sign](https://downloads.apache.org/incubator/hugegraph/1.7.0/apache-hugegraph-toolchain-incubating-1.7.0.tar.gz.asc)] [[SHA512](https://downloads.apache.org/incubator/hugegraph/1.7.0/apache-hugegraph-toolchain-incubating-1.7.0.tar.gz.sha512)] | #### Source Packages @@ -32,7 +32,7 @@ Please refer to [build from source](/docs/quickstart/hugegraph-server/). | Server | Toolchain | AI | Computer | |-----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|-----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|-----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| -| [[Source](https://www.apache.org/dyn/closer.lua/incubator/hugegraph/1.5.0/apache-hugegraph-incubating-1.5.0-src.tar.gz?action=download)] [[Sign](https://downloads.apache.org/incubator/hugegraph/1.5.0/apache-hugegraph-incubating-1.5.0-src.tar.gz.asc)] [[SHA512](https://downloads.apache.org/incubator/hugegraph/1.5.0/apache-hugegraph-incubating-1.5.0-src.tar.gz.sha512)] | [[Source](https://www.apache.org/dyn/closer.lua/incubator/hugegraph/1.5.0/apache-hugegraph-toolchain-incubating-1.5.0-src.tar.gz?action=download)] [[Sign](https://downloads.apache.org/incubator/hugegraph/1.5.0/apache-hugegraph-toolchain-incubating-1.5.0-src.tar.gz.asc)] [[SHA512](https://downloads.apache.org/incubator/hugegraph/1.5.0/apache-hugegraph-toolchain-incubating-1.5.0-src.tar.gz.sha512)] | [[Source](https://www.apache.org/dyn/closer.lua/incubator/hugegraph/1.5.0/apache-hugegraph-ai-incubating-1.5.0-src.tar.gz?action=download)] [[Sign](https://downloads.apache.org/incubator/hugegraph/1.5.0/apache-hugegraph-ai-incubating-1.5.0-src.tar.gz.asc)] [[SHA512](https://downloads.apache.org/incubator/hugegraph/1.5.0/apache-hugegraph-ai-incubating-1.5.0-src.tar.gz.sha512)] | [[Source](https://www.apache.org/dyn/closer.lua/incubator/hugegraph/1.5.0/apache-hugegraph-computer-incubating-1.5.0-src.tar.gz?action=download)] [[Sign](https://downloads.apache.org/incubator/hugegraph/1.5.0/apache-hugegraph-computer-incubating-1.5.0-src.tar.gz.asc)] [[SHA512](https://downloads.apache.org/incubator/hugegraph/1.5.0/apache-hugegraph-computer-incubating-1.5.0-src.tar.gz.sha512)] | +| [[Source](https://www.apache.org/dyn/closer.lua/incubator/hugegraph/1.7.0/apache-hugegraph-incubating-1.7.0-src.tar.gz?action=download)] [[Sign](https://downloads.apache.org/incubator/hugegraph/1.7.0/apache-hugegraph-incubating-1.7.0-src.tar.gz.asc)] [[SHA512](https://downloads.apache.org/incubator/hugegraph/1.7.0/apache-hugegraph-incubating-1.7.0-src.tar.gz.sha512)] | [[Source](https://www.apache.org/dyn/closer.lua/incubator/hugegraph/1.7.0/apache-hugegraph-toolchain-incubating-1.7.0-src.tar.gz?action=download)] [[Sign](https://downloads.apache.org/incubator/hugegraph/1.7.0/apache-hugegraph-toolchain-incubating-1.7.0-src.tar.gz.asc)] [[SHA512](https://downloads.apache.org/incubator/hugegraph/1.7.0/apache-hugegraph-toolchain-incubating-1.7.0-src.tar.gz.sha512)] | [[Source](https://www.apache.org/dyn/closer.lua/incubator/hugegraph/1.7.0/apache-hugegraph-ai-incubating-1.7.0-src.tar.gz?action=download)] [[Sign](https://downloads.apache.org/incubator/hugegraph/1.7.0/apache-hugegraph-ai-incubating-1.7.0-src.tar.gz.asc)] [[SHA512](https://downloads.apache.org/incubator/hugegraph/1.7.0/apache-hugegraph-ai-incubating-1.7.0-src.tar.gz.sha512)] | [[Source](https://www.apache.org/dyn/closer.lua/incubator/hugegraph/1.7.0/apache-hugegraph-computer-incubating-1.7.0-src.tar.gz?action=download)] [[Sign](https://downloads.apache.org/incubator/hugegraph/1.7.0/apache-hugegraph-computer-incubating-1.7.0-src.tar.gz.asc)] [[SHA512](https://downloads.apache.org/incubator/hugegraph/1.7.0/apache-hugegraph-computer-incubating-1.7.0-src.tar.gz.sha512)] | --- @@ -40,6 +40,23 @@ Please refer to [build from source](/docs/quickstart/hugegraph-server/). > Note: `1.3.0` is the last major version compatible with Java8, please switch to or migrate to Java11 as soon as possible (lower versions of Java have potentially more SEC risks and performance impacts). +#### 1.5.0 + +- Release Date: 2024-12-10 +- [Release Notes](/docs/changelog/hugegraph-1.5.0-release-notes/) + +##### Binary Packages + +| Server | Toolchain | +|-----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|-----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| +| [[Binary](https://www.apache.org/dyn/closer.lua/incubator/hugegraph/1.5.0/apache-hugegraph-incubating-1.5.0.tar.gz?action=download)] [[Sign](https://downloads.apache.org/incubator/hugegraph/1.5.0/apache-hugegraph-incubating-1.5.0.tar.gz.asc)] [[SHA512](https://downloads.apache.org/incubator/hugegraph/1.5.0/apache-hugegraph-incubating-1.5.0.tar.gz.sha512)] | [[Binary](https://www.apache.org/dyn/closer.lua/incubator/hugegraph/1.5.0/apache-hugegraph-toolchain-incubating-1.5.0.tar.gz?action=download)] [[Sign](https://downloads.apache.org/incubator/hugegraph/1.5.0/apache-hugegraph-toolchain-incubating-1.5.0.tar.gz.asc)] [[SHA512](https://downloads.apache.org/incubator/hugegraph/1.5.0/apache-hugegraph-toolchain-incubating-1.5.0.tar.gz.sha512)] | + +##### Source Packages + +| Server | Toolchain | AI | Computer | +|-----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|-----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|-----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| +| [[Source](https://www.apache.org/dyn/closer.lua/incubator/hugegraph/1.5.0/apache-hugegraph-incubating-1.5.0-src.tar.gz?action=download)] [[Sign](https://downloads.apache.org/incubator/hugegraph/1.5.0/apache-hugegraph-incubating-1.5.0-src.tar.gz.asc)] [[SHA512](https://downloads.apache.org/incubator/hugegraph/1.5.0/apache-hugegraph-incubating-1.5.0-src.tar.gz.sha512)] | [[Source](https://www.apache.org/dyn/closer.lua/incubator/hugegraph/1.5.0/apache-hugegraph-toolchain-incubating-1.5.0-src.tar.gz?action=download)] [[Sign](https://downloads.apache.org/incubator/hugegraph/1.5.0/apache-hugegraph-toolchain-incubating-1.5.0-src.tar.gz.asc)] [[SHA512](https://downloads.apache.org/incubator/hugegraph/1.5.0/apache-hugegraph-toolchain-incubating-1.5.0-src.tar.gz.sha512)] | [[Source](https://www.apache.org/dyn/closer.lua/incubator/hugegraph/1.5.0/apache-hugegraph-ai-incubating-1.5.0-src.tar.gz?action=download)] [[Sign](https://downloads.apache.org/incubator/hugegraph/1.5.0/apache-hugegraph-ai-incubating-1.5.0-src.tar.gz.asc)] [[SHA512](https://downloads.apache.org/incubator/hugegraph/1.5.0/apache-hugegraph-ai-incubating-1.5.0-src.tar.gz.sha512)] | [[Source](https://www.apache.org/dyn/closer.lua/incubator/hugegraph/1.5.0/apache-hugegraph-computer-incubating-1.5.0-src.tar.gz?action=download)] [[Sign](https://downloads.apache.org/incubator/hugegraph/1.5.0/apache-hugegraph-computer-incubating-1.5.0-src.tar.gz.asc)] [[SHA512](https://downloads.apache.org/incubator/hugegraph/1.5.0/apache-hugegraph-computer-incubating-1.5.0-src.tar.gz.sha512)] | + #### 1.3.0 - Release Date: 2024-04-01 @@ -90,3 +107,9 @@ Please refer to [build from source](/docs/quickstart/hugegraph-server/). | Server | Toolchain | Computer | Common | |-----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|-----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|-----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| | [[Source](https://www.apache.org/dyn/closer.lua/incubator/hugegraph/1.0.0/apache-hugegraph-incubating-1.0.0-src.tar.gz?action=download)] [[Sign](https://downloads.apache.org/incubator/hugegraph/1.0.0/apache-hugegraph-incubating-1.0.0-src.tar.gz.asc)] [[SHA512](https://downloads.apache.org/incubator/hugegraph/1.0.0/apache-hugegraph-incubating-1.0.0-src.tar.gz.sha512)] | [[Source](https://www.apache.org/dyn/closer.lua/incubator/hugegraph/1.0.0/apache-hugegraph-toolchain-incubating-1.0.0-src.tar.gz?action=download)] [[Sign](https://downloads.apache.org/incubator/hugegraph/1.0.0/apache-hugegraph-toolchain-incubating-1.0.0-src.tar.gz.asc)] [[SHA512](https://downloads.apache.org/incubator/hugegraph/1.0.0/apache-hugegraph-toolchain-incubating-1.0.0-src.tar.gz.sha512)] | [[Source](https://www.apache.org/dyn/closer.lua/incubator/hugegraph/1.0.0/apache-hugegraph-computer-incubating-1.0.0-src.tar.gz?action=download)] [[Sign](https://downloads.apache.org/incubator/hugegraph/1.0.0/apache-hugegraph-computer-incubating-1.0.0-src.tar.gz.asc)] [[SHA512](https://downloads.apache.org/incubator/hugegraph/1.0.0/apache-hugegraph-computer-incubating-1.0.0-src.tar.gz.sha512)] | [[Source](https://www.apache.org/dyn/closer.lua/incubator/hugegraph/1.0.0/apache-hugegraph-commons-incubating-1.0.0-src.tar.gz?action=download)] [[Sign](https://downloads.apache.org/incubator/hugegraph/1.0.0/apache-hugegraph-commons-incubating-1.0.0-src.tar.gz.asc)] [[SHA512](https://downloads.apache.org/incubator/hugegraph/1.0.0/apache-hugegraph-commons-incubating-1.0.0-src.tar.gz.sha512)] | + +--- + +
Old Versions (Non-ASF Versions) +Due to ASF rules, non-ASF distribution packages cannot be hosted directly on this page. For download instructions for old versions before 1.0.0 (non-ASF versions), please jump to https://github.com/apache/incubator-hugegraph-doc/wiki/Apache-HugeGraph-(Incubating)-Old-Versions-Download +
From d533820f9889634bf5fac8c311cbdb119f5ba642 Mon Sep 17 00:00:00 2001 From: Duoduo Wang Date: Mon, 1 Dec 2025 15:38:17 +0800 Subject: [PATCH 16/19] docs: refactor docs of loader & client for new version(1.7.0) (#415) * fixed mvn version to 1.7.0 added graphspace part for docs of client changed client examples to NEWER version fixed parameters in loader docs --- content/cn/docs/clients/hugegraph-client.md | 39 +++++++++++- .../quickstart/client/hugegraph-client.md | 15 +++-- .../quickstart/toolchain/hugegraph-loader.md | 39 +++++++++--- content/en/docs/clients/hugegraph-client.md | 39 +++++++++++- .../quickstart/client/hugegraph-client.md | 14 +++-- .../quickstart/toolchain/hugegraph-loader.md | 61 +++++++++++++------ 6 files changed, 168 insertions(+), 39 deletions(-) diff --git a/content/cn/docs/clients/hugegraph-client.md b/content/cn/docs/clients/hugegraph-client.md index 1a9e1daa1..4b51ed729 100644 --- a/content/cn/docs/clients/hugegraph-client.md +++ b/content/cn/docs/clients/hugegraph-client.md @@ -13,12 +13,13 @@ weight: 2 HugeGraph-Client 是操作 graph 的总入口,用户必须先创建出 HugeGraph-Client 对象,与 HugeGraph-Server 建立连接(伪连接)后,才能获取到 schema、graph 以及 gremlin 的操作入口对象。 -目前 HugeGraph-Client 只允许连接服务端已存在的图,无法自定义图进行创建。其创建方法如下: +目前 HugeGraph-Client 只允许连接服务端已存在的图,无法自定义图进行创建。1.7.0 版本后,client 支持 graphSpace 设置,默认为DEFAULT。其创建方法如下: ```java // HugeGraphServer 地址:"http://localhost:8080" // 图的名称:"hugegraph" HugeClient hugeClient = HugeClient.builder("http://localhost:8080", "hugegraph") + //.builder("http://localhost:8080", "graphSpaceName", "hugegraph") .configTimeout(20) // 默认 20s 超时 .configUser("**", "**") // 默认未开启用户权限 .build(); @@ -455,6 +456,40 @@ Edge knows1 = marko.addEdge("knows", vadas, "city", "Beijing"); **注意:当 frequency 为 multiple 时必须要设置 sortKeys 对应属性类型的值。** -### 4 简单示例 +### 4 图管理 +client支持一个物理部署中多个 GraphSpace,每个 GraphSpace 下可以含多个图(graph)。 +- 兼容:不指定 GraphSpace 时,默认使用 "DEFAULT" 空间 + +#### 4.1 创建GraphSpace + +```java +GraphSpaceManager spaceManager = hugeClient.graphSpace(); + +// 定义 GraphSpace 配置 +GraphSpace graphSpace = new GraphSpace(); +graphSpace.setName("myGraphSpace"); +graphSpace.setDescription("Business data graph space"); +graphSpace.setMaxGraphNumber(10); // 最大图数量 +graphSpace.setMaxRoleNumber(100); // 最大角色数量 + +// 创建 GraphSpace +spaceManager.createGraphSpace(graphSpace); +``` +#### 4.2 GraphSpace 接口汇总 + +| 类别 | 接口 | 描述 | +|------|------|------| +| Manager - 查询 | listGraphSpace() | 获取所有 GraphSpace 列表 | +| | getGraphSpace(String name) | 获取指定 GraphSpace | +| Manager - 创建/更新 | createGraphSpace(GraphSpace) | 创建 GraphSpace | +| | updateGraphSpace(String, GraphSpace) | 更新配置 | +| Manager - 删除 | removeGraphSpace(String) | 删除指定 GraphSpace | +| GraphSpace - 属性 | getName() / getDescription() | 获取名称/描述 | +| | getGraphNumber() | 获取图数量 | +| GraphSpace - 配置 | setDescription(String) | 设置描述 | +| | setMaxGraphNumber(int) | 设置最大图数量 | + + +### 5 简单示例 简单示例见[HugeGraph-Client](/cn/docs/quickstart/client/hugegraph-client) diff --git a/content/cn/docs/quickstart/client/hugegraph-client.md b/content/cn/docs/quickstart/client/hugegraph-client.md index 9d2165313..9322aabfa 100644 --- a/content/cn/docs/quickstart/client/hugegraph-client.md +++ b/content/cn/docs/quickstart/client/hugegraph-client.md @@ -48,7 +48,7 @@ weight: 1 org.apache.hugegraph hugegraph-client - 1.5.0 + 1.7.0 ``` @@ -79,7 +79,10 @@ public class SingleExample { public static void main(String[] args) throws IOException { // If connect failed will throw a exception. HugeClient hugeClient = HugeClient.builder("http://localhost:8080", + "DEFAULT", "hugegraph") + .configUser("username", "password") + // 这是示例,生产环境需要使用安全的凭证 .build(); SchemaManager schema = hugeClient.schema(); @@ -224,7 +227,10 @@ public class BatchExample { public static void main(String[] args) { // If connect failed will throw a exception. HugeClient hugeClient = HugeClient.builder("http://localhost:8080", - "hugegraph") + "DEFAULT", + "hugegraph") + .configUser("username", "password") + // 这是示例,生产环境需要使用安全的凭证 .build(); SchemaManager schema = hugeClient.schema(); @@ -348,12 +354,11 @@ public class BatchExample { } ``` -### 4.4 运行 Example +#### 4.4 运行 Example 运行 Example 之前需要启动 Server, 启动过程见[HugeGraph-Server Quick Start](/cn/docs/quickstart/hugegraph-server) -### 4.5 详细 API 说明 +#### 4.5 详细 API 说明 示例说明见[HugeGraph-Client 基本 API 介绍](/cn/docs/clients/hugegraph-client) - diff --git a/content/cn/docs/quickstart/toolchain/hugegraph-loader.md b/content/cn/docs/quickstart/toolchain/hugegraph-loader.md index 0ea2b729c..9b088e4cb 100644 --- a/content/cn/docs/quickstart/toolchain/hugegraph-loader.md +++ b/content/cn/docs/quickstart/toolchain/hugegraph-loader.md @@ -605,7 +605,7 @@ bin/mapping-convert.sh struct.json ##### 3.3.2 输入源 -输入源目前分为四类:FILE、HDFS、JDBC、KAFKA,由`type`节点区分,我们称为本地文件输入源、HDFS 输入源、JDBC 输入源和 KAFKA 输入源,下面分别介绍。 +输入源目前分为五类:FILE、HDFS、JDBC、KAFKA 和 GRAPH,由`type`节点区分,我们称为本地文件输入源、HDFS 输入源、JDBC 输入源和 KAFKA 输入源,图数据源,下面分别介绍。 ###### 3.3.2.1 本地文件输入源 @@ -709,6 +709,22 @@ schema: 必填 - skipped_line:想跳过的行,复合结构,目前只能配置要跳过的行的正则表达式,用子节点 regex 描述,默认不跳过任何行,选填; - early_stop:某次从 Kafka broker 拉取的记录为空,停止任务,默认为 false,仅用于调试,选填; +###### 3.3.2.5 GRAPH 输入源 + +- type:输入源类型,必须填 `graph` 或 `GRAPH`,必填; +- graphspace:源图空间名称,默认为 `DEFAULT`; +- graph: 源图名称,必填; +- username:HugeGraph 用户名; +- password:HugeGraph 密码; +- selected_vertices:要同步的顶点筛选规则; +- ignored_vertices:要忽略的顶点筛选规则; +- selected_edges:要同步的边筛选规则; +- ignored_edges:要忽略的边筛选规则; +- pd-peers:HugeGraph-PD 节点地址; +- meta-endpoints:源集群 Meta服务端点; +- cluster:源集群名称; +- batch_size:批量读取源图数据的批次大小,默认为500; + ##### 3.3.3 顶点和边映射 顶点和边映射的节点(JSON 文件中的一个 key)有很多相同的部分,下面先介绍相同部分,再分别介绍`顶点映射`和`边映射`的特有节点。 @@ -794,20 +810,29 @@ schema: 必填 | 参数 | 默认值 | 是否必传 | 描述信息 | |---------------------------|-----------|------|-------------------------------------------------------------------| | `-f` 或 `--file` | | Y | 配置脚本的路径 | -| `-g` 或 `--graph` | | Y | 图数据库空间 | -| `-s` 或 `--schema` | | Y | schema 文件路径 | | -| `-h` 或 `--host` | localhost | | HugeGraphServer 的地址 | +| `-g` 或 `--graph` | | Y | 图名称 | +| `-gs` 或 `--graphspace` | DEFAULT | | 图空间 | +| `-s` 或 `--schema` | | Y | schema 文件路径 | +| `-h` 或 `--host` 或 `-i` | localhost | | HugeGraphServer 的地址 | | `-p` 或 `--port` | 8080 | | HugeGraphServer 的端口号 | | `--username` | null | | 当 HugeGraphServer 开启了权限认证时,当前图的 username | +| `--password` | null | | 当 HugeGraphServer 开启了权限认证时,当前图的 password | +| `--create-graph` | false | | 是否在图不存在时自动创建 | | `--token` | null | | 当 HugeGraphServer 开启了权限认证时,当前图的 token | | `--protocol` | http | | 向服务端发请求的协议,可选 http 或 https | +| `--pd-peers` | | | PD 服务节点地址 | +| `--pd-token` | | | 访问 PD 服务的 token | +| `--meta-endpoints` | | | 元信息存储服务地址 | +| `--direct` | false | | 是否直连 HugeGraph-Store | +| `--route-type` | NODE_PORT | | 路由选择方式(可选值:NODE_PORT / DDS / BOTH) | +| `--cluster` | hg | | 集群名 | | `--trust-store-file` | | | 请求协议为 https 时,客户端的证书文件路径 | | `--trust-store-password` | | | 请求协议为 https 时,客户端证书密码 | | `--clear-all-data` | false | | 导入数据前是否清除服务端的原有数据 | | `--clear-timeout` | 240 | | 导入数据前清除服务端的原有数据的超时时间 | -| `--incremental-mode` | false | | 是否使用断点续导模式,仅输入源为 FILE 和 HDFS 支持该模式,启用该模式能从上一次导入停止的地方开始导 | +| `--incremental-mode` | false | | 是否使用断点续导模式,仅输入源为 FILE 和 HDFS 支持该模式,启用该模式能从上一次导入停止的地方开始导入 | | `--failure-mode` | false | | 失败模式为 true 时,会导入之前失败了的数据,一般来说失败数据文件需要在人工更正编辑好后,再次进行导入 | -| `--batch-insert-threads` | CPUs | | 批量插入线程池大小 (CPUs 是当前 OS 可用可用**逻辑核**个数) | +| `--batch-insert-threads` | CPUs | | 批量插入线程池大小 (CPUs 是当前 OS 可用**逻辑核**个数) | | `--single-insert-threads` | 8 | | 单条插入线程池的大小 | | `--max-conn` | 4 * CPUs | | HugeClient 与 HugeGraphServer 的最大 HTTP 连接数,**调整线程**的时候建议同时调整此项 | | `--max-conn-per-route` | 2 * CPUs | | HugeClient 与 HugeGraphServer 每个路由的最大 HTTP 连接数,**调整线程**的时候建议同时调整此项 | @@ -821,7 +846,7 @@ schema: 必填 | `--check-vertex` | false | | 插入边时是否检查边所连接的顶点是否存在 | | `--print-progress` | true | | 是否在控制台实时打印导入条数 | | `--dry-run` | false | | 打开该模式,只解析不导入,通常用于测试 | -| `--help` | false | | 打印帮助信息 | +| `--help` | false | | 打印帮助信息 | ##### 3.4.2 断点续导模式 diff --git a/content/en/docs/clients/hugegraph-client.md b/content/en/docs/clients/hugegraph-client.md index 5ae2db27e..746418b69 100644 --- a/content/en/docs/clients/hugegraph-client.md +++ b/content/en/docs/clients/hugegraph-client.md @@ -12,12 +12,13 @@ The `gremlin(groovy)` written by the user in `HugeGraph-Studio` can refer to the HugeGraph-Client is the general entry for operating graph. Users must first create a HugeGraph-Client object and establish a connection (pseudo connection) with HugeGraph-Server before they can obtain the operation entry objects of schema, graph and gremlin. -Currently, HugeGraph-Client only allows connections to existing graphs on the server, and cannot create custom graphs. Its creation method is as follows: +Currently, HugeGraph-Client only allows connections to existing graphs on the server, and cannot create custom graphs. After version 1.7.0, client has supported setting graphSpace, the default value for graphSpace is DEFAULT. Its creation method is as follows: ```java // HugeGraphServer address: "http://localhost:8080" // Graph Name: "hugegraph" HugeClient hugeClient = HugeClient.builder("http://localhost:8080", "hugegraph") + //.builder("http://localhost:8080", "graphSpaceName", "hugegraph") .configTimeout(20) // 20s timeout .configUser("**", "**") // enable auth .build(); @@ -444,6 +445,40 @@ Edge knows1 = marko.addEdge("knows", vadas, "city", "Beijing"); **Note: When frequency is multiple, the value of the property type corresponding to sortKeys must be set.** -### 4 Examples +### 4 GraphSpace +The client supports multiple GraphSpaces in one physical deployment, and each GraphSpace can contain multiple graphs. +- Compatibility: When no GraphSpace is specified, the "DEFAULT" space is used by default. + +#### 4.1 Create GraphSpace + +```java +GraphSpaceManager spaceManager = hugeClient.graphSpace(); + +// Define GraphSpace configuration +GraphSpace graphSpace = new GraphSpace(); +graphSpace.setName("myGraphSpace"); +graphSpace.setDescription("Business data graph space"); +graphSpace.setMaxGraphNumber(10); // Maximum number of graphs +graphSpace.setMaxRoleNumber(100); // Maximum number of roles + +// Create GraphSpace +spaceManager.createGraphSpace(graphSpace); +``` + +#### 4.2 GraphSpace Interface Summary + +| Category | Interface | Description | +|----------|-----------|-------------| +| Manager - Query | listGraphSpace() | Get the list of all GraphSpaces | +| | getGraphSpace(String name) | Get the specified GraphSpace | +| Manager - Create/Update | createGraphSpace(GraphSpace) | Create a GraphSpace | +| | updateGraphSpace(String, GraphSpace) | Update configuration | +| Manager - Delete | removeGraphSpace(String) | Delete the specified GraphSpace | +| GraphSpace - Properties | getName() / getDescription() | Get name / description | +| | getGraphNumber() | Get the number of graphs | +| GraphSpace - Configuration | setDescription(String) | Set description | +| | setMaxGraphNumber(int) | Set the maximum number of graphs | + +### 5 Simple Example Simple examples can reference [HugeGraph-Client](/docs/quickstart/client/hugegraph-client) diff --git a/content/en/docs/quickstart/client/hugegraph-client.md b/content/en/docs/quickstart/client/hugegraph-client.md index f932beddf..91ac7865e 100644 --- a/content/en/docs/quickstart/client/hugegraph-client.md +++ b/content/en/docs/quickstart/client/hugegraph-client.md @@ -44,7 +44,7 @@ Using IDEA or Eclipse to create the project: org.apache.hugegraph hugegraph-client - 1.5.0 + 1.7.0 ``` @@ -75,7 +75,10 @@ public class SingleExample { public static void main(String[] args) throws IOException { // If connect failed will throw a exception. HugeClient hugeClient = HugeClient.builder("http://localhost:8080", + "DEFAULT", "hugegraph") + .configUser("username", "password") + // This is an example. In a production environment, secure credentials should be used. .build(); SchemaManager schema = hugeClient.schema(); @@ -218,9 +221,11 @@ import org.apache.hugegraph.structure.graph.Vertex; public class BatchExample { public static void main(String[] args) { - // If connect failed will throw a exception. HugeClient hugeClient = HugeClient.builder("http://localhost:8080", + "DEFAULT", "hugegraph") + .configUser("username", "password") + // This is an example. In a production environment, secure credentials should be used. .build(); SchemaManager schema = hugeClient.schema(); @@ -344,11 +349,10 @@ public class BatchExample { } ``` -### 4.4 Run The Example +#### 4.4 Run The Example Before running Example, you need to start the Server. For the startup process, see[HugeGraph-Server Quick Start](/docs/quickstart/hugegraph/hugegraph-server). -### 4.5 More Information About Client-API +#### 4.5 More Information About Client-API See[Introduce basic API of HugeGraph-Client](/docs/clients/hugegraph-client). - diff --git a/content/en/docs/quickstart/toolchain/hugegraph-loader.md b/content/en/docs/quickstart/toolchain/hugegraph-loader.md index a8ec5ec4d..6d14d05ae 100644 --- a/content/en/docs/quickstart/toolchain/hugegraph-loader.md +++ b/content/en/docs/quickstart/toolchain/hugegraph-loader.md @@ -592,7 +592,7 @@ A struct-v2.json will be generated in the same directory as struct.json. ##### 3.3.2 Input Source -Input sources are currently divided into four categories: FILE, HDFS, JDBC and KAFKA, which are distinguished by the `type` node. We call them local file input sources, HDFS input sources, JDBC input sources, and KAFKA input sources, which are described below. +Input sources are currently divided into five categories: FILE, HDFS, JDBC, KAFKA and GRAPH, which are distinguished by the `type` node. We call them local file input sources, HDFS input sources, JDBC input sources, KAFKA input sources and GRAPH input source, which are described below. ###### 3.3.2.1 Local file input source @@ -696,6 +696,22 @@ schema: required - skipped_line: the line you want to skip, composite structure, currently can only configure the regular expression of the line to be skipped, described by the child node regex, the default is not to skip any line, optional; - early_stop: the record pulled from Kafka broker at a certain time is empty, stop the task, default is false, only for debugging, optional; +###### 3.3.2.5 GRAPH input Source + +- type: Data source type; must be filled in as `graph` or `GRAPH` (required); +- graphspace: Source graphSpace name; default is `DEFAULT`; +- graph: Source graph name (required); +- username: HugeGraph username; +- password: HugeGraph password; +- selected_vertices: Filtering rules for vertices to be synchronized; +- ignored_vertices: Filtering rules for vertices to be ignored; +- selected_edges: Filtering rules for edges to be synchronized; +- ignored_edges: Filtering rules for edges to be ignored; +- pd-peers: HugeGraph-PD node addresses; +- meta-endpoints: Meta service endpoints of the source cluster; +- cluster: Source cluster name; +- batch_size: Batch size for reading data from the source graph; default is 500; + ##### 3.3.3 Vertex and Edge Mapping The nodes of vertex and edge mapping (a key in the JSON file) have a lot of the same parts. The same parts are introduced first, and then the unique nodes of `vertex map` and `edge map` are introduced respectively. @@ -780,35 +796,44 @@ The import process is controlled by commands submitted by the user, and the user | Parameter | Default value | Required or not | Description | |---------------------------|---------------|-----------------|-------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| -| `-f` or `--file` | | Y | path to configure script | -| `-g` or `--graph` | | Y | graph space name | -| `-s` or `--schema` | | Y | schema file path | -| `-h` or `--host` | localhost | | address of HugeGraphServer | -| `-p` or `--port` | 8080 | | port number of HugeGraphServer | +| `-f` or `--file` | | Y | Path to configure script | +| `-g` or `--graph` | | Y | Graph name | +| `-gs` or `--graphspace` | DEFAULT | | Graph space name | +| `-s` or `--schema` | | Y | Schema file path | +| `-h` or `--host` or `-i` | localhost | | Address of HugeGraphServer | +| `-p` or `--port` | 8080 | | Port number of HugeGraphServer | | `--username` | null | | When HugeGraphServer enables permission authentication, the username of the current graph | +| `--password` | null | | When HugeGraphServer enables permission authentication, the password of the current graph | +| `--create-graph` | false | | Whether to automatically create the graph if it does not exist | | `--token` | null | | When HugeGraphServer has enabled authorization authentication, the token of the current graph | | `--protocol` | http | | Protocol for sending requests to the server, optional http or https | +| `--pd-peers` | | | PD service node addresses | +| `--pd-token` | | | Token for accessing PD service | +| `--meta-endpoints` | | | Meta information storage service addresses | +| `--direct` | false | | Whether to directly connect to HugeGraph-Store | +| `--route-type` | NODE_PORT | | Route selection method (optional values: NODE_PORT / DDS / BOTH) | +| `--cluster` | hg | | Cluster name | | `--trust-store-file` | | | When the request protocol is https, the client's certificate file path | | `--trust-store-password` | | | When the request protocol is https, the client certificate password | | `--clear-all-data` | false | | Whether to clear the original data on the server before importing data | | `--clear-timeout` | 240 | | Timeout for clearing the original data on the server before importing data | -| `--incremental-mode` | false | | Whether to use the breakpoint resume mode, only the input source is FILE and HDFS support this mode, enabling this mode can start the import from the place where the last import stopped | -| `--failure-mode` | false | | When the failure mode is true, the data that failed before will be imported. Generally speaking, the failed data file needs to be manually corrected and edited, and then imported again | +| `--incremental-mode` | false | | Whether to use the breakpoint resume mode; only input sources FILE and HDFS support this mode. Enabling this mode allows starting the import from where the last import stopped | +| `--failure-mode` | false | | When failure mode is true, previously failed data will be imported. Generally, the failed data file needs to be manually corrected and edited before re-importing | | `--batch-insert-threads` | CPUs | | Batch insert thread pool size (CPUs is the number of **logical cores** available to the current OS) | | `--single-insert-threads` | 8 | | Size of single insert thread pool | -| `--max-conn` | 4 * CPUs | | The maximum number of HTTP connections between HugeClient and HugeGraphServer, it is recommended to adjust this when **adjusting threads** | -| `--max-conn-per-route` | 2 * CPUs | | The maximum number of HTTP connections for each route between HugeClient and HugeGraphServer, it is recommended to adjust this item at the same time when **adjusting the thread** | +| `--max-conn` | 4 * CPUs | | The maximum number of HTTP connections between HugeClient and HugeGraphServer; it is recommended to adjust this when **adjusting threads** | +| `--max-conn-per-route` | 2 * CPUs | | The maximum number of HTTP connections for each route between HugeClient and HugeGraphServer; it is recommended to adjust this item when **adjusting threads** | | `--batch-size` | 500 | | The number of data items in each batch when importing data | -| `--max-parse-errors` | 1 | | The maximum number of lines of data parsing errors allowed, and the program exits when this value is reached | -| `--max-insert-errors` | 500 | | The maximum number of rows of data insertion errors allowed, and the program exits when this value is reached | -| `--timeout` | 60 | | Timeout (seconds) for inserting results to return | +| `--max-parse-errors` | 1 | | The maximum number of data parsing errors allowed (per line); the program exits when this value is reached | +| `--max-insert-errors` | 500 | | The maximum number of data insertion errors allowed (per row); the program exits when this value is reached | +| `--timeout` | 60 | | Timeout (seconds) for insert result return | | `--shutdown-timeout` | 10 | | Waiting time for multithreading to stop (seconds) | | `--retry-times` | 0 | | Number of retries when a specific exception occurs | -| `--retry-interval` | 10 | | interval before retry (seconds) | -| `--check-vertex` | false | | Whether to check whether the vertex connected by the edge exists when inserting the edge | -| `--print-progress` | true | | Whether to print the number of imported items in the console in real time | -| `--dry-run` | false | | Turn on this mode, only parsing but not importing, usually used for testing | -| `--help` | false | | print help information | +| `--retry-interval` | 10 | | Interval before retry (seconds) | +| `--check-vertex` | false | | Whether to check if the vertices connected by the edge exist when inserting the edge | +| `--print-progress` | true | | Whether to print the number of imported items in real time on the console | +| `--dry-run` | false | | Enable this mode to only parse data without importing; usually used for testing | +| `--help` | false | | Print help information | ##### 3.4.2 Breakpoint Continuation Mode From 70742acef7c499d1f56f07c066e731985a493be2 Mon Sep 17 00:00:00 2001 From: chohee Date: Tue, 2 Dec 2025 15:35:31 +0800 Subject: [PATCH 17/19] chore: remove outdated ConfigAuthenticator --- content/cn/docs/config/config-guide.md | 7 +------ content/cn/docs/config/config-option.md | 4 +--- content/en/docs/config/config-guide.md | 7 +------ content/en/docs/config/config-option.md | 4 +--- 4 files changed, 4 insertions(+), 18 deletions(-) diff --git a/content/cn/docs/config/config-guide.md b/content/cn/docs/config/config-guide.md index efb92c2c1..f791fee37 100644 --- a/content/cn/docs/config/config-guide.md +++ b/content/cn/docs/config/config-guide.md @@ -173,8 +173,7 @@ arthas.ip=127.0.0.1 arthas.disabled_commands=jad # authentication configs -# choose 'org.apache.hugegraph.auth.StandardAuthenticator' or -# 'org.apache.hugegraph.auth.ConfigAuthenticator' +# choose 'org.apache.hugegraph.auth.StandardAuthenticator' or a custom implementation #auth.authenticator= # for StandardAuthenticator mode @@ -182,10 +181,6 @@ arthas.disabled_commands=jad # auth client config #auth.remote_url=127.0.0.1:8899,127.0.0.1:8898,127.0.0.1:8897 -# for ConfigAuthenticator mode -#auth.admin_token= -#auth.user_tokens=[] - # TODO: Deprecated & removed later (useless from version 1.5.0) # rpc server configs for multi graph-servers or raft-servers #rpc.server_host=127.0.0.1 diff --git a/content/cn/docs/config/config-option.md b/content/cn/docs/config/config-option.md index 274dec6de..0bf56af8b 100644 --- a/content/cn/docs/config/config-option.md +++ b/content/cn/docs/config/config-option.md @@ -41,10 +41,8 @@ weight: 2 | batch.max_vertices_per_batch | 500 | The maximum number of vertices submitted per batch. | | batch.max_write_ratio | 50 | The maximum thread ratio for batch writing, only take effect if the batch.max_write_threads is 0. | | batch.max_write_threads | 0 | The maximum threads for batch writing, if the value is 0, the actual value will be set to batch.max_write_ratio * restserver.max_worker_threads. | -| auth.authenticator | | The class path of authenticator implementation. e.g., org.apache.hugegraph.auth.StandardAuthenticator, or org.apache.hugegraph.auth.ConfigAuthenticator. | -| auth.admin_token | 162f7848-0b6d-4faf-b557-3a0797869c55 | Token for administrator operations, only for org.apache.hugegraph.auth.ConfigAuthenticator. | +| auth.authenticator | | The class path of authenticator implementation. e.g., org.apache.hugegraph.auth.StandardAuthenticator, or a custom implementation. | | auth.graph_store | hugegraph | The name of graph used to store authentication information, like users, only for org.apache.hugegraph.auth.StandardAuthenticator. | -| auth.user_tokens | [hugegraph:9fd95c9c-711b-415b-b85f-d4df46ba5c31] | The map of user tokens with name and password, only for org.apache.hugegraph.auth.ConfigAuthenticator. | | auth.audit_log_rate | 1000.0 | The max rate of audit log output per user, default value is 1000 records per second. | | auth.cache_capacity | 10240 | The max cache capacity of each auth cache item. | | auth.cache_expire | 600 | The expiration time in seconds of vertex cache. | diff --git a/content/en/docs/config/config-guide.md b/content/en/docs/config/config-guide.md index 979c49332..48e5e08ca 100644 --- a/content/en/docs/config/config-guide.md +++ b/content/en/docs/config/config-guide.md @@ -171,8 +171,7 @@ arthas.ip=127.0.0.1 arthas.disabled_commands=jad # authentication configs -# choose 'org.apache.hugegraph.auth.StandardAuthenticator' or -# 'org.apache.hugegraph.auth.ConfigAuthenticator' +# choose 'org.apache.hugegraph.auth.StandardAuthenticator' or a custom implementation #auth.authenticator= # for StandardAuthenticator mode @@ -180,10 +179,6 @@ arthas.disabled_commands=jad # auth client config #auth.remote_url=127.0.0.1:8899,127.0.0.1:8898,127.0.0.1:8897 -# for ConfigAuthenticator mode -#auth.admin_token= -#auth.user_tokens=[] - # TODO: Deprecated & removed later (useless from version 1.5.0) # rpc server configs for multi graph-servers or raft-servers #rpc.server_host=127.0.0.1 diff --git a/content/en/docs/config/config-option.md b/content/en/docs/config/config-option.md index c018ef293..b0d937b66 100644 --- a/content/en/docs/config/config-option.md +++ b/content/en/docs/config/config-option.md @@ -41,10 +41,8 @@ Corresponding configuration file `rest-server.properties` | batch.max_vertices_per_batch | 500 | The maximum number of vertices submitted per batch. | | batch.max_write_ratio | 50 | The maximum thread ratio for batch writing, only take effect if the batch.max_write_threads is 0. | | batch.max_write_threads | 0 | The maximum threads for batch writing, if the value is 0, the actual value will be set to batch.max_write_ratio * restserver.max_worker_threads. | -| auth.authenticator | | The class path of authenticator implementation. e.g., org.apache.hugegraph.auth.StandardAuthenticator, or org.apache.hugegraph.auth.ConfigAuthenticator. | -| auth.admin_token | 162f7848-0b6d-4faf-b557-3a0797869c55 | Token for administrator operations, only for org.apache.hugegraph.auth.ConfigAuthenticator. | +| auth.authenticator | | The class path of authenticator implementation. e.g., org.apache.hugegraph.auth.StandardAuthenticator, or a custom implementation. | | auth.graph_store | hugegraph | The name of graph used to store authentication information, like users, only for org.apache.hugegraph.auth.StandardAuthenticator. | -| auth.user_tokens | [hugegraph:9fd95c9c-711b-415b-b85f-d4df46ba5c31] | The map of user tokens with name and password, only for org.apache.hugegraph.auth.ConfigAuthenticator. | | auth.audit_log_rate | 1000.0 | The max rate of audit log output per user, default value is 1000 records per second. | | auth.cache_capacity | 10240 | The max cache capacity of each auth cache item. | | auth.cache_expire | 600 | The expiration time in seconds of vertex cache. | From e2769b8e5a0b9d680b5de06665e470e187978417 Mon Sep 17 00:00:00 2001 From: chohee Date: Wed, 3 Dec 2025 16:03:12 +0800 Subject: [PATCH 18/19] chore: revise the description --- content/cn/docs/download/download.md | 12 ++---------- content/en/docs/download/download.md | 13 ++----------- 2 files changed, 4 insertions(+), 21 deletions(-) diff --git a/content/cn/docs/download/download.md b/content/cn/docs/download/download.md index 48977f8a5..d936e2751 100644 --- a/content/cn/docs/download/download.md +++ b/content/cn/docs/download/download.md @@ -9,13 +9,10 @@ weight: 2 > - 推荐使用最新版本的 HugeGraph 软件包, 运行时环境请选择 Java11 > - 验证下载版本, 请使用相应的哈希 (SHA512)、签名和 [项目签名验证 KEYS](https://downloads.apache.org/incubator/hugegraph/KEYS) > - 检查哈希 (SHA512)、签名的说明在 [版本验证](/docs/contribution-guidelines/validate-release/) 页面, 也可参考 [ASF 验证说明](https://www.apache.org/dyn/closer.cgi#verify) - -> 注: HugeGraph 所有组件版本号已保持一致, `client/loader/hubble/common` 等 maven 仓库版本号同理, 依赖引用可参考 [maven 示例](https://github.com/apache/incubator-hugegraph-toolchain#maven-dependencies) +> - 注: HugeGraph 所有组件版本号已保持一致, `client/loader/hubble/common` 等 maven 仓库版本号同理, 依赖引用可参考 [maven 示例](https://github.com/apache/incubator-hugegraph-toolchain#maven-dependencies) ### 最新版本 1.7.0 -> 注: 从版本 `1.5.0` 开始,需要 Java11 运行时环境 - - Release Date: 2025-11-28 - [Release Notes](/docs/changelog/hugegraph-1.7.0-release-notes/) @@ -41,6 +38,7 @@ Please refer to [build from source](/docs/quickstart/hugegraph-server/). > > 1. 请大家尽早迁移到最新 Release 版本上, 社区将不再维护 `1.0.0` 前的旧版本 (非 ASF 版本) > 2. `1.3.0` 是最后一个兼容 Java8 的主版本, 请尽早使用/迁移运行时为 Java11 (低版本 Java 有潜在更多的 SEC 风险和性能影响) +> 3. 从版本 `1.5.0` 开始,需要 Java11 运行时环境 #### 1.5.0 @@ -114,9 +112,3 @@ Please refer to [build from source](/docs/quickstart/hugegraph-server/). | Server | Toolchain | Computer | Common | |-----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|-----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|-----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| | [[Source](https://www.apache.org/dyn/closer.lua/incubator/hugegraph/1.0.0/apache-hugegraph-incubating-1.0.0-src.tar.gz?action=download)] [[Sign](https://downloads.apache.org/incubator/hugegraph/1.0.0/apache-hugegraph-incubating-1.0.0-src.tar.gz.asc)] [[SHA512](https://downloads.apache.org/incubator/hugegraph/1.0.0/apache-hugegraph-incubating-1.0.0-src.tar.gz.sha512)] | [[Source](https://www.apache.org/dyn/closer.lua/incubator/hugegraph/1.0.0/apache-hugegraph-toolchain-incubating-1.0.0-src.tar.gz?action=download)] [[Sign](https://downloads.apache.org/incubator/hugegraph/1.0.0/apache-hugegraph-toolchain-incubating-1.0.0-src.tar.gz.asc)] [[SHA512](https://downloads.apache.org/incubator/hugegraph/1.0.0/apache-hugegraph-toolchain-incubating-1.0.0-src.tar.gz.sha512)] | [[Source](https://www.apache.org/dyn/closer.lua/incubator/hugegraph/1.0.0/apache-hugegraph-computer-incubating-1.0.0-src.tar.gz?action=download)] [[Sign](https://downloads.apache.org/incubator/hugegraph/1.0.0/apache-hugegraph-computer-incubating-1.0.0-src.tar.gz.asc)] [[SHA512](https://downloads.apache.org/incubator/hugegraph/1.0.0/apache-hugegraph-computer-incubating-1.0.0-src.tar.gz.sha512)] | [[Source](https://www.apache.org/dyn/closer.lua/incubator/hugegraph/1.0.0/apache-hugegraph-commons-incubating-1.0.0-src.tar.gz?action=download)] [[Sign](https://downloads.apache.org/incubator/hugegraph/1.0.0/apache-hugegraph-commons-incubating-1.0.0-src.tar.gz.asc)] [[SHA512](https://downloads.apache.org/incubator/hugegraph/1.0.0/apache-hugegraph-commons-incubating-1.0.0-src.tar.gz.sha512)] | - ---- - -
旧版本 (非 ASF 版本) -由于 ASF 规则要求, 不能直接在当前页面存放非 ASF 发行包, 对于 1.0.0 前旧版本 (非 ASF 版本) 的下载说明, 请跳转至 https://github.com/apache/incubator-hugegraph-doc/wiki/Apache-HugeGraph-(Incubating)-Old-Versions-Download -
diff --git a/content/en/docs/download/download.md b/content/en/docs/download/download.md index d5205c570..f89bfa17e 100644 --- a/content/en/docs/download/download.md +++ b/content/en/docs/download/download.md @@ -10,13 +10,10 @@ weight: 2 > - It is recommended to use the latest version of the HugeGraph software package. Please select Java11 for the runtime environment. > - To verify downloads, use the corresponding hash (SHA512), signature, and [Project Signature Verification KEYS](https://downloads.apache.org/incubator/hugegraph/KEYS). > - Instructions for checking hash (SHA512) and signatures are on the [Validate Release](/docs/contribution-guidelines/validate-release/) page, and you can also refer to [ASF official instructions](https://www.apache.org/dyn/closer.cgi#verify). - -> Note: The version numbers of all components of HugeGraph have been kept consistent, and the version numbers of Maven repositories such as `client/loader/hubble/common` are the same. You can refer to these for dependency references [maven example](https://github.com/apache/incubator-hugegraph-toolchain#maven-dependencies). +> - Note: The version numbers of all components of HugeGraph have been kept consistent, and the version numbers of Maven repositories such as `client/loader/hubble/common` are the same. You can refer to these for dependency references [maven example](https://github.com/apache/incubator-hugegraph-toolchain#maven-dependencies). ### Latest Version 1.7.0 -> Note: Starting from version `1.5.0`, a Java11 runtime environment is required. - - Release Date: 2025-11-28 - [Release Notes](/docs/changelog/hugegraph-1.7.0-release-notes/) @@ -38,7 +35,7 @@ Please refer to [build from source](/docs/quickstart/hugegraph-server/). ### Archived Versions -> Note: `1.3.0` is the last major version compatible with Java8, please switch to or migrate to Java11 as soon as possible (lower versions of Java have potentially more SEC risks and performance impacts). +> Note: `1.3.0` is the last major version compatible with Java8, please switch to or migrate to Java11 as soon as possible (lower versions of Java have potentially more SEC risks and performance impacts). Starting from version `1.5.0`, a Java11 runtime environment is required. #### 1.5.0 @@ -107,9 +104,3 @@ Please refer to [build from source](/docs/quickstart/hugegraph-server/). | Server | Toolchain | Computer | Common | |-----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|-----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|-----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| | [[Source](https://www.apache.org/dyn/closer.lua/incubator/hugegraph/1.0.0/apache-hugegraph-incubating-1.0.0-src.tar.gz?action=download)] [[Sign](https://downloads.apache.org/incubator/hugegraph/1.0.0/apache-hugegraph-incubating-1.0.0-src.tar.gz.asc)] [[SHA512](https://downloads.apache.org/incubator/hugegraph/1.0.0/apache-hugegraph-incubating-1.0.0-src.tar.gz.sha512)] | [[Source](https://www.apache.org/dyn/closer.lua/incubator/hugegraph/1.0.0/apache-hugegraph-toolchain-incubating-1.0.0-src.tar.gz?action=download)] [[Sign](https://downloads.apache.org/incubator/hugegraph/1.0.0/apache-hugegraph-toolchain-incubating-1.0.0-src.tar.gz.asc)] [[SHA512](https://downloads.apache.org/incubator/hugegraph/1.0.0/apache-hugegraph-toolchain-incubating-1.0.0-src.tar.gz.sha512)] | [[Source](https://www.apache.org/dyn/closer.lua/incubator/hugegraph/1.0.0/apache-hugegraph-computer-incubating-1.0.0-src.tar.gz?action=download)] [[Sign](https://downloads.apache.org/incubator/hugegraph/1.0.0/apache-hugegraph-computer-incubating-1.0.0-src.tar.gz.asc)] [[SHA512](https://downloads.apache.org/incubator/hugegraph/1.0.0/apache-hugegraph-computer-incubating-1.0.0-src.tar.gz.sha512)] | [[Source](https://www.apache.org/dyn/closer.lua/incubator/hugegraph/1.0.0/apache-hugegraph-commons-incubating-1.0.0-src.tar.gz?action=download)] [[Sign](https://downloads.apache.org/incubator/hugegraph/1.0.0/apache-hugegraph-commons-incubating-1.0.0-src.tar.gz.asc)] [[SHA512](https://downloads.apache.org/incubator/hugegraph/1.0.0/apache-hugegraph-commons-incubating-1.0.0-src.tar.gz.sha512)] | - ---- - -
Old Versions (Non-ASF Versions) -Due to ASF rules, non-ASF distribution packages cannot be hosted directly on this page. For download instructions for old versions before 1.0.0 (non-ASF versions), please jump to https://github.com/apache/incubator-hugegraph-doc/wiki/Apache-HugeGraph-(Incubating)-Old-Versions-Download -
From 2fda709d905a775e9cbbec4d0fa1da80dc0aa4a0 Mon Sep 17 00:00:00 2001 From: chohee Date: Thu, 4 Dec 2025 18:35:42 +0800 Subject: [PATCH 19/19] chore: remove validate-release-new.yml --- .github/workflows/validate-release-new.yml | 805 --------------------- 1 file changed, 805 deletions(-) delete mode 100644 .github/workflows/validate-release-new.yml diff --git a/.github/workflows/validate-release-new.yml b/.github/workflows/validate-release-new.yml deleted file mode 100644 index 08d678d11..000000000 --- a/.github/workflows/validate-release-new.yml +++ /dev/null @@ -1,805 +0,0 @@ -name: "Validate Apache Release (New)" - -on: - workflow_dispatch: - inputs: - release_version: - required: true - description: svn release version - default: '1.7.0' - gpg_user: - required: true - description: current release manager (gpg username) - default: 'pengjunzhi' - java_version: - required: false - description: Java version to validate - default: '11' - type: choice - options: - - '11' - - '17' - - push: - branches: - - 'release-*' - pull_request: - branches: - - 'release-*' - -jobs: - validate: - name: "Validate Release On ${{ matrix.os }} (java-${{ matrix.java_version }})" - runs-on: ${{ matrix.os }} - env: - RELEASE_VERSION: ${{ inputs.release_version || '1.7.0' }} - GPG_USER: ${{ inputs.gpg_user || 'pengjunzhi' }} - JAVA_VERSION: ${{ inputs.java_version || matrix.java_version || '11' }} - SVN_URL_PREFIX: https://dist.apache.org/repos/dist/dev/incubator/hugegraph - KEYS_URL: https://downloads.apache.org/incubator/hugegraph/KEYS - MAX_FILE_SIZE: 800k - SERVER_START_DELAY: 3 - # License Patterns (ASF Category X - Prohibited) - CATEGORY_X: '\bGPL|\bLGPL|Sleepycat License|BSD-4-Clause|\bBCL\b|JSR-275|Amazon Software License|\bRSAL\b|\bQPL\b|\bSSPL|\bCPOL|\bNPL1|Creative Commons Non-Commercial|JSON\.org' - # License Patterns (ASF Category B - Must be documented) - CATEGORY_B: '\bCDDL1|\bCPL|\bEPL|\bIPL|\bMPL|\bSPL|OSL-3.0|UnRAR License|Erlang Public License|\bOFL\b|Ubuntu Font License Version 1.0|IPA Font License Agreement v1.0|EPL2.0|CC-BY' - steps: - - name: Checkout source - uses: actions/checkout@v4 - - - name: Install JDK ${{ env.JAVA_VERSION }} - uses: actions/setup-java@v3 - with: - java-version: ${{ env.JAVA_VERSION }} - distribution: 'adopt' - - - name: Install dependencies - run: | - if [[ "${{ runner.os }}" == "macOS" ]]; then - brew install svn wget perl - elif [[ "${{ runner.os }}" == "Linux" ]]; then - sudo apt-get update - sudo apt-get install -y subversion wget perl - fi - # Verify all required commands - for cmd in svn gpg shasum mvn java wget tar curl awk grep find perl; do - if ! command -v "$cmd" &> /dev/null; then - echo "Error: Missing required dependency: $cmd" - exit 1 - fi - echo "✓ $cmd: $(command -v $cmd)" - done - - - name: Cache Maven packages - uses: actions/cache@v3 - with: - path: ~/.m2 - key: ${{ runner.os }}-m2-${{ hashFiles('**/pom.xml') }} - restore-keys: ${{ runner.os }}-m2 - - - name: Step 1 - Check Dependencies - run: | - echo "━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━" - echo "Step [1/9]: Check Dependencies" - echo "━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━" - - # Check Java version - CURRENT_JAVA=$(java -version 2>&1 | head -n 1 | awk -F '"' '{print $2}' | awk -F '.' '{print $1}') - echo "Current Java version: $CURRENT_JAVA (Required: ${{ env.JAVA_VERSION }})" - if [[ "$CURRENT_JAVA" != "${{ env.JAVA_VERSION }}" ]]; then - echo "Error: Java version mismatch! Current: Java $CURRENT_JAVA, Required: Java ${{ env.JAVA_VERSION }}" - exit 1 - fi - echo "✓ Java version check passed: Java $CURRENT_JAVA" - - - name: Step 2 - Prepare Release Files - run: | - echo "" - echo "━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━" - echo "Step [2/9]: Prepare Release Files" - echo "━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━" - - DIST_DIR="dist/${{ env.RELEASE_VERSION }}" - echo "Downloading from SVN to: ${DIST_DIR}" - - rm -rf "${DIST_DIR}" - mkdir -p "${DIST_DIR}" - - if ! svn co "${SVN_URL_PREFIX}/${{ env.RELEASE_VERSION }}" "${DIST_DIR}"; then - echo "Error: Failed to download from SVN: ${SVN_URL_PREFIX}/${{ env.RELEASE_VERSION }}" - exit 1 - fi - - echo "✓ Downloaded release files from SVN" - cd "${DIST_DIR}" - ls -lh - - - name: Step 3 - Import & Trust GPG Keys - run: | - echo "" - echo "━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━" - echo "Step [3/9]: Import & Trust GPG Keys" - echo "━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━" - - cd dist/${{ env.RELEASE_VERSION }} - - echo "Downloading KEYS file from ${KEYS_URL}..." - if ! wget -q "${KEYS_URL}" -O KEYS; then - echo "Error: Failed to download KEYS file from ${KEYS_URL}" - exit 1 - fi - echo "✓ KEYS file downloaded" - - echo "Importing GPG keys..." - IMPORT_OUTPUT=$(gpg --import KEYS 2>&1) - IMPORTED_COUNT=$(echo "$IMPORT_OUTPUT" | grep -c "imported" || echo "0") - - if [[ "$IMPORTED_COUNT" == "0" ]]; then - echo "⚠ No new keys imported (may already exist in keyring)" - else - echo "✓ Imported GPG keys" - fi - - # Trust specific user key - if ! gpg --list-keys "${{ env.GPG_USER }}" &>/dev/null; then - echo "Error: User '${{ env.GPG_USER }}' key not found in imported keys. Please verify the username." - exit 1 - fi - - echo "Trusting GPG key for user: ${{ env.GPG_USER }}" - echo -e "5\ny\n" | gpg --batch --command-fd 0 --edit-key "${{ env.GPG_USER }}" trust 2>/dev/null - echo "✓ Trusted key for ${{ env.GPG_USER }}" - - # Trust all imported keys - echo "Trusting all imported public keys..." - TRUSTED=0 - for key in $(gpg --no-tty --list-keys --with-colons | awk -F: '/^pub/ {print $5}'); do - echo -e "5\ny\n" | gpg --batch --command-fd 0 --edit-key "$key" trust 2>/dev/null - TRUSTED=$((TRUSTED + 1)) - done - echo "✓ Trusted $TRUSTED GPG keys" - - - name: Step 4 - Verify SHA512 & GPG Signatures - run: | - echo "" - echo "━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━" - echo "Step [4/9]: Verify SHA512 & GPG Signatures" - echo "━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━" - - cd dist/${{ env.RELEASE_VERSION }} - - PACKAGE_COUNT=0 - for pkg in *.tar.gz; do - if [[ -f "$pkg" ]]; then - PACKAGE_COUNT=$((PACKAGE_COUNT + 1)) - fi - done - - CURRENT=0 - for pkg in *.tar.gz; do - if [[ ! -f "$pkg" ]]; then - continue - fi - CURRENT=$((CURRENT + 1)) - echo " [${CURRENT}/${PACKAGE_COUNT}] $pkg" - - # Check SHA512 - if shasum -a 512 --check "${pkg}.sha512"; then - echo " ✓ SHA512 verified: $pkg" - else - echo " ✗ SHA512 verification failed: $pkg" - exit 1 - fi - - # Check GPG signature - if gpg --verify "${pkg}.asc" "$pkg" 2>&1 | grep -q "Good signature"; then - echo " ✓ GPG signature verified: $pkg" - else - echo " ✗ GPG signature verification failed: $pkg" - exit 1 - fi - done - - - name: Step 5 - Validate Source Packages - run: | - echo "" - echo "━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━" - echo "Step [5/9]: Validate Source Packages" - echo "━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━" - - cd dist/${{ env.RELEASE_VERSION }} - - SRC_PACKAGES=() - for pkg in *-src.tar.gz; do - if [[ -f "$pkg" ]]; then - SRC_PACKAGES+=("$pkg") - fi - done - - echo "Found ${#SRC_PACKAGES[@]} source package(s)" - - for src_pkg in "${SRC_PACKAGES[@]}"; do - echo "" - echo "Validating source package: $src_pkg" - - # Extract package - PACKAGE_DIR=$(basename "$src_pkg" .tar.gz) - rm -rf "$PACKAGE_DIR" - tar -xzf "$src_pkg" - - if [[ ! -d "$PACKAGE_DIR" ]]; then - echo "Error: Failed to extract package: $src_pkg" - exit 1 - fi - - pushd "$PACKAGE_DIR" - - # 5.1: Check incubating name - if [[ ! "$src_pkg" =~ "incubating" ]]; then - echo "Error: Package name '$src_pkg' should include 'incubating'" - exit 1 - fi - echo " ✓ Package name includes 'incubating'" - - # 5.2: Check required files - if [[ ! -f "LICENSE" ]]; then - echo "Error: Package '$src_pkg' missing LICENSE file" - exit 1 - fi - echo " ✓ LICENSE file exists" - - if [[ ! -f "NOTICE" ]]; then - echo "Error: Package '$src_pkg' missing NOTICE file" - exit 1 - fi - echo " ✓ NOTICE file exists" - - if [[ ! -f "DISCLAIMER" ]]; then - echo "Error: Package '$src_pkg' missing DISCLAIMER file" - exit 1 - fi - echo " ✓ DISCLAIMER file exists" - - # 5.3: Check license categories (Category X - Prohibited) - CAT_X_MATCHES=$(grep -r -E "${{ env.CATEGORY_X }}" LICENSE NOTICE 2>/dev/null || true) - CAT_X_COUNT=$(echo "$CAT_X_MATCHES" | grep -v '^$' | wc -l | tr -d ' ') - - if [[ $CAT_X_COUNT -ne 0 ]]; then - echo "Error: Package '$src_pkg' contains $CAT_X_COUNT prohibited ASF Category X license(s):" - echo "$CAT_X_MATCHES" - exit 1 - fi - echo " ✓ No Category X licenses found" - - # 5.4: Check license categories (Category B - Warning) - CAT_B_COUNT=$(grep -r -E "${{ env.CATEGORY_B }}" LICENSE NOTICE 2>/dev/null | wc -l | tr -d ' ' || echo "0") - if [[ $CAT_B_COUNT -ne 0 ]]; then - echo " ⚠ Warning: Package '$src_pkg' contains $CAT_B_COUNT ASF Category B license(s) - please verify documentation" - else - echo " ✓ No Category B licenses found" - fi - - # 5.5: Check empty files and directories - EMPTY_DIRS=$(find . -type d -empty 2>/dev/null || true) - EMPTY_FILES=$(find . -type f -empty 2>/dev/null || true) - - if [[ -n "$EMPTY_DIRS" ]]; then - echo "Error: Package '$src_pkg' contains empty director(y/ies):" - echo "$EMPTY_DIRS" - exit 1 - fi - - if [[ -n "$EMPTY_FILES" ]]; then - echo "Error: Package '$src_pkg' contains empty file(s):" - echo "$EMPTY_FILES" - exit 1 - fi - echo " ✓ No empty files or directories" - - # 5.6: Check file sizes - LARGE_FILES=$(find . -type f -size "+${{ env.MAX_FILE_SIZE }}" 2>/dev/null || true) - if [[ -n "$LARGE_FILES" ]]; then - echo "Error: Package '$src_pkg' contains file(s) larger than ${{ env.MAX_FILE_SIZE }}:" - echo "$LARGE_FILES" - exit 1 - fi - echo " ✓ All files are within size limit" - - # 5.7: Check binary files - BINARY_COUNT=0 - UNDOCUMENTED_COUNT=0 - while IFS= read -r binary_file; do - BINARY_COUNT=$((BINARY_COUNT + 1)) - FILE_NAME=$(basename "$binary_file") - if ! grep -q "$FILE_NAME" LICENSE 2>/dev/null; then - echo "Error: Undocumented binary file: $binary_file" - UNDOCUMENTED_COUNT=$((UNDOCUMENTED_COUNT + 1)) - fi - done < <(find . -type f 2>/dev/null | perl -lne 'print if -B $_' || true) - - if [[ $BINARY_COUNT -eq 0 ]]; then - echo " ✓ No binary files found" - elif [[ $UNDOCUMENTED_COUNT -eq 0 ]]; then - echo " ✓ All $BINARY_COUNT binary file(s) are documented" - else - echo "Error: Found $UNDOCUMENTED_COUNT undocumented binary file(s)" - exit 1 - fi - - # 5.8: Check license headers in source files - echo " Checking for ASF license headers in source files..." - - # Define file patterns to check - FILE_PATTERNS=("*.java" "*.sh" "*.py" "*.go" "*.js" "*.ts" "*.jsx" "*.tsx" "*.c" "*.h" "*.cpp" "*.cc" "*.cxx" "*.hpp" "*.scala" "*.groovy" "*.gradle" "*.rs" "*.kt" "*.proto") - - # Files to exclude - EXCLUDE_PATTERNS=("*.min.js" "*.min.css" "*node_modules*" "*target*" "*build*" "*.pb.go" "*generated*" "*third_party*" "*vendor*") - - FILES_WITHOUT_LICENSE=() - TOTAL_CHECKED=0 - EXCLUDED_COUNT=0 - DOCUMENTED_COUNT=0 - - # Build find command - FIND_CMD="find . -type f \\(" - FIRST=1 - for pattern in "${FILE_PATTERNS[@]}"; do - if [[ $FIRST -eq 1 ]]; then - FIND_CMD="$FIND_CMD -name \"$pattern\"" - FIRST=0 - else - FIND_CMD="$FIND_CMD -o -name \"$pattern\"" - fi - done - FIND_CMD="$FIND_CMD \\) 2>/dev/null" - - # Check each source file - while IFS= read -r source_file; do - # Skip if file matches exclude patterns - SHOULD_EXCLUDE=0 - for exclude_pattern in "${EXCLUDE_PATTERNS[@]}"; do - if [[ "$source_file" == $exclude_pattern ]]; then - SHOULD_EXCLUDE=1 - EXCLUDED_COUNT=$((EXCLUDED_COUNT + 1)) - break - fi - done - - if [[ $SHOULD_EXCLUDE -eq 1 ]]; then - continue - fi - - TOTAL_CHECKED=$((TOTAL_CHECKED + 1)) - - # Check first 30 lines for Apache license header - if ! head -n 30 "$source_file" | grep -q "Licensed to the Apache Software Foundation"; then - # Check if documented in LICENSE file - FILE_NAME=$(basename "$source_file") - FILE_PATH_RELATIVE=$(echo "$source_file" | sed 's|^\./||') - - if [[ -f "LICENSE" ]] && (grep -q "$FILE_NAME" LICENSE 2>/dev/null || grep -q "$FILE_PATH_RELATIVE" LICENSE 2>/dev/null); then - DOCUMENTED_COUNT=$((DOCUMENTED_COUNT + 1)) - else - FILES_WITHOUT_LICENSE+=("$source_file") - fi - fi - done < <(eval "$FIND_CMD") - - echo " Checked $TOTAL_CHECKED source file(s) for ASF license headers (excluded $EXCLUDED_COUNT generated/vendored files)" - - if [[ $DOCUMENTED_COUNT -gt 0 ]]; then - echo " Found $DOCUMENTED_COUNT source file(s) documented in LICENSE as third-party code (allowed)" - fi - - if [[ ${#FILES_WITHOUT_LICENSE[@]} -gt 0 ]]; then - echo "Error: Found ${#FILES_WITHOUT_LICENSE[@]} source file(s) without ASF license headers:" - SHOW_COUNT=${#FILES_WITHOUT_LICENSE[@]} - if [[ $SHOW_COUNT -gt 20 ]]; then - SHOW_COUNT=20 - fi - for ((i=0; i" "$pom_file" 2>/dev/null; then - REVISION_VALUE=$(grep "" "$pom_file" | head -1 | sed 's/.*\(.*\)<\/revision>.*/\1/') - ROOT_POM="$pom_file" - break - fi - done < <(find . -name "pom.xml" -type f 2>/dev/null) - - if [[ -n "$ROOT_POM" ]]; then - echo " Found revision property in $ROOT_POM: $REVISION_VALUE" - if [[ "$REVISION_VALUE" != "${{ env.RELEASE_VERSION }}" ]]; then - echo "Error: Version mismatch: $REVISION_VALUE in $ROOT_POM (expected: ${{ env.RELEASE_VERSION }})" - exit 1 - fi - echo " ✓ Version consistency check passed: revision=$REVISION_VALUE" - else - echo " ⚠ Warning: No property found in pom.xml files - skipping version check" - fi - else - echo " Skipping version check for Python project: $src_pkg" - fi - - # 5.10: Check NOTICE year - if [[ -f "NOTICE" ]]; then - CURRENT_YEAR=$(date +%Y) - if ! grep -q "$CURRENT_YEAR" NOTICE; then - echo " ⚠ Warning: NOTICE file may not contain current year ($CURRENT_YEAR). Please verify copyright dates." - else - echo " ✓ NOTICE file contains current year" - fi - fi - - # 5.11: Compile source package - echo " Compiling source package: $src_pkg" - - if [[ "$src_pkg" =~ 'hugegraph-ai' ]]; then - echo " ⚠ Skipping compilation for AI module (not required)" - elif [[ "$src_pkg" =~ "hugegraph-computer" ]]; then - if cd computer 2>/dev/null && mvn clean package -DskipTests -Dcheckstyle.skip=true -ntp -e; then - echo " ✓ Compilation successful: $src_pkg" - else - echo "Error: Compilation failed: $src_pkg" - exit 1 - fi - cd .. - else - if mvn clean package -DskipTests -Dcheckstyle.skip=true -ntp -e; then - echo " ✓ Compilation successful: $src_pkg" - else - echo "Error: Compilation failed: $src_pkg" - exit 1 - fi - fi - - popd - echo "✓ Finished validating source package: $src_pkg" - done - - - name: Step 6 - Test Compiled Server Package - run: | - echo "" - echo "━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━" - echo "Step [6/9]: Test Compiled Server Package" - echo "━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━" - - cd dist/${{ env.RELEASE_VERSION }} - - # Find server directory - SERVER_DIR=$(find . -maxdepth 3 -type d -path "*hugegraph-incubating*src/hugegraph-server/*hugegraph*${{ env.RELEASE_VERSION }}" 2>/dev/null | head -n1) - - if [[ -z "$SERVER_DIR" ]]; then - echo "Error: Could not find compiled server directory" - exit 1 - fi - - echo "Starting HugeGraph server from: $SERVER_DIR" - pushd "$SERVER_DIR" - - if bin/init-store.sh; then - echo " ✓ Store initialized" - else - echo "Error: Failed to initialize store" - exit 1 - fi - - sleep ${{ env.SERVER_START_DELAY }} - - if bin/start-hugegraph.sh; then - echo " ✓ Server started" - else - echo "Error: Failed to start server" - exit 1 - fi - - popd - - - name: Step 7 - Test Compiled Toolchain Packages - run: | - echo "" - echo "━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━" - echo "Step [7/9]: Test Compiled Toolchain Packages" - echo "━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━" - - cd dist/${{ env.RELEASE_VERSION }} - - TOOLCHAIN_SRC=$(find . -maxdepth 3 -type d -path "*toolchain*src" 2>/dev/null | head -n1) - - if [[ -n "$TOOLCHAIN_SRC" ]]; then - pushd "$TOOLCHAIN_SRC" - - TOOLCHAIN_DIR=$(find . -maxdepth 1 -type d -name "*toolchain*${{ env.RELEASE_VERSION }}" | head -n1) - if [[ -n "$TOOLCHAIN_DIR" ]]; then - pushd "$TOOLCHAIN_DIR" - - # Test Loader - echo "Testing HugeGraph Loader..." - LOADER_DIR=$(find . -maxdepth 1 -type d -name "*loader*${{ env.RELEASE_VERSION }}" | head -n1) - if [[ -n "$LOADER_DIR" ]]; then - pushd "$LOADER_DIR" - if bin/hugegraph-loader.sh -f ./example/file/struct.json -s ./example/file/schema.groovy -g hugegraph; then - echo " ✓ Loader test passed" - else - echo "Error: Loader test failed" - exit 1 - fi - popd - fi - - # Test Tool - echo "Testing HugeGraph Tool..." - TOOL_DIR=$(find . -maxdepth 1 -type d -name "*tool*${{ env.RELEASE_VERSION }}" | head -n1) - if [[ -n "$TOOL_DIR" ]]; then - pushd "$TOOL_DIR" - if bin/hugegraph gremlin-execute --script 'g.V().count()' && \ - bin/hugegraph task-list && \ - bin/hugegraph backup -t all --directory ./backup-test; then - echo " ✓ Tool test passed" - else - echo "Error: Tool test failed" - exit 1 - fi - popd - fi - - # Test Hubble - echo "Testing HugeGraph Hubble..." - HUBBLE_DIR=$(find . -maxdepth 1 -type d -name "*hubble*${{ env.RELEASE_VERSION }}" | head -n1) - if [[ -n "$HUBBLE_DIR" ]]; then - pushd "$HUBBLE_DIR" - if bin/start-hubble.sh; then - echo " ✓ Hubble started" - sleep 2 - bin/stop-hubble.sh - echo " ✓ Hubble stopped" - else - echo "Error: Hubble test failed" - exit 1 - fi - popd - fi - - popd - fi - - popd - fi - - # Stop server after toolchain tests - SERVER_DIR=$(find . -maxdepth 3 -type d -path "*hugegraph-incubating*src/hugegraph-server/*hugegraph*${{ env.RELEASE_VERSION }}" 2>/dev/null | head -n1) - if [[ -n "$SERVER_DIR" ]]; then - echo "Stopping server..." - pushd "$SERVER_DIR" - bin/stop-hugegraph.sh - echo " ✓ Server stopped" - popd - fi - - - name: Step 8 - Validate Binary Packages - run: | - echo "" - echo "━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━" - echo "Step [8/9]: Validate Binary Packages" - echo "━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━" - - cd dist/${{ env.RELEASE_VERSION }} - - BIN_PACKAGES=() - for pkg in *.tar.gz; do - if [[ "$pkg" != *-src.tar.gz ]] && [[ -f "$pkg" ]]; then - BIN_PACKAGES+=("$pkg") - fi - done - - echo "Found ${#BIN_PACKAGES[@]} binary package(s)" - - for bin_pkg in "${BIN_PACKAGES[@]}"; do - echo "" - echo "Validating binary package: $bin_pkg" - - # Extract package - PACKAGE_DIR=$(basename "$bin_pkg" .tar.gz) - rm -rf "$PACKAGE_DIR" - tar -xzf "$bin_pkg" - - if [[ ! -d "$PACKAGE_DIR" ]]; then - echo "Error: Failed to extract package: $bin_pkg" - exit 1 - fi - - pushd "$PACKAGE_DIR" - - # 8.1: Check incubating name - if [[ ! "$bin_pkg" =~ "incubating" ]]; then - echo "Error: Package name '$bin_pkg' should include 'incubating'" - exit 1 - fi - echo " ✓ Package name includes 'incubating'" - - # 8.2: Check required files - if [[ ! -f "LICENSE" ]]; then - echo "Error: Package '$bin_pkg' missing LICENSE file" - exit 1 - fi - echo " ✓ LICENSE file exists" - - if [[ ! -f "NOTICE" ]]; then - echo "Error: Package '$bin_pkg' missing NOTICE file" - exit 1 - fi - echo " ✓ NOTICE file exists" - - if [[ ! -f "DISCLAIMER" ]]; then - echo "Error: Package '$bin_pkg' missing DISCLAIMER file" - exit 1 - fi - echo " ✓ DISCLAIMER file exists" - - # 8.3: Check licenses directory - if [[ ! -d "licenses" ]]; then - echo "Error: Package '$bin_pkg' missing licenses directory" - exit 1 - fi - echo " ✓ licenses directory exists" - - # 8.4: Check license categories (Category X - Prohibited) - CAT_X_MATCHES=$(grep -r -E "${{ env.CATEGORY_X }}" LICENSE NOTICE licenses 2>/dev/null || true) - CAT_X_COUNT=$(echo "$CAT_X_MATCHES" | grep -v '^$' | wc -l | tr -d ' ') - - if [[ $CAT_X_COUNT -ne 0 ]]; then - echo "Error: Package '$bin_pkg' contains $CAT_X_COUNT prohibited ASF Category X license(s):" - echo "$CAT_X_MATCHES" - exit 1 - fi - echo " ✓ No Category X licenses found" - - # 8.5: Check license categories (Category B - Warning) - CAT_B_COUNT=$(grep -r -E "${{ env.CATEGORY_B }}" LICENSE NOTICE licenses 2>/dev/null | wc -l | tr -d ' ' || echo "0") - if [[ $CAT_B_COUNT -ne 0 ]]; then - echo " ⚠ Warning: Package '$bin_pkg' contains $CAT_B_COUNT ASF Category B license(s) - please verify documentation" - else - echo " ✓ No Category B licenses found" - fi - - # 8.6: Check empty files and directories - EMPTY_DIRS=$(find . -type d -empty 2>/dev/null || true) - EMPTY_FILES=$(find . -type f -empty 2>/dev/null || true) - - if [[ -n "$EMPTY_DIRS" ]]; then - echo "Error: Package '$bin_pkg' contains empty director(y/ies):" - echo "$EMPTY_DIRS" - exit 1 - fi - - if [[ -n "$EMPTY_FILES" ]]; then - echo "Error: Package '$bin_pkg' contains empty file(s):" - echo "$EMPTY_FILES" - exit 1 - fi - echo " ✓ No empty files or directories" - - popd - echo "✓ Finished validating binary package: $bin_pkg" - done - - - name: Step 9 - Test Binary Server & Toolchain - run: | - echo "" - echo "━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━" - echo "Step [9/9]: Test Binary Server & Toolchain" - echo "━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━" - - cd dist/${{ env.RELEASE_VERSION }} - - # Test binary server - BIN_SERVER_DIR=$(find . -maxdepth 3 -type d -path "*hugegraph-incubating*${{ env.RELEASE_VERSION }}/*hugegraph-server-incubating*${{ env.RELEASE_VERSION }}" 2>/dev/null | head -n1) - - if [[ -n "$BIN_SERVER_DIR" ]]; then - echo "Testing binary server package..." - pushd "$BIN_SERVER_DIR" - - if bin/init-store.sh && sleep ${{ env.SERVER_START_DELAY }} && bin/start-hugegraph.sh; then - echo " ✓ Binary server started" - else - echo "Error: Failed to start binary server" - exit 1 - fi - - popd - fi - - # Test binary toolchain - BIN_TOOLCHAIN=$(find . -maxdepth 3 -type d -path "*toolchain*${{ env.RELEASE_VERSION }}" 2>/dev/null | head -n1) - - if [[ -n "$BIN_TOOLCHAIN" ]]; then - pushd "$BIN_TOOLCHAIN" - - # Test binary loader - BIN_LOADER=$(find . -maxdepth 1 -type d -name "*loader*${{ env.RELEASE_VERSION }}" | head -n1) - if [[ -n "$BIN_LOADER" ]]; then - pushd "$BIN_LOADER" - if bin/hugegraph-loader.sh -f ./example/file/struct.json -s ./example/file/schema.groovy -g hugegraph; then - echo " ✓ Binary loader test passed" - else - echo "Error: Binary loader test failed" - exit 1 - fi - popd - fi - - # Test binary tool - BIN_TOOL=$(find . -maxdepth 1 -type d -name "*tool*${{ env.RELEASE_VERSION }}" | head -n1) - if [[ -n "$BIN_TOOL" ]]; then - pushd "$BIN_TOOL" - if bin/hugegraph gremlin-execute --script 'g.V().count()' && \ - bin/hugegraph task-list && \ - bin/hugegraph backup -t all --directory ./backup-test; then - echo " ✓ Binary tool test passed" - else - echo "Error: Binary tool test failed" - exit 1 - fi - popd - fi - - # Test binary hubble - BIN_HUBBLE=$(find . -maxdepth 1 -type d -name "*hubble*${{ env.RELEASE_VERSION }}" | head -n1) - if [[ -n "$BIN_HUBBLE" ]]; then - pushd "$BIN_HUBBLE" - if bin/start-hubble.sh; then - echo " ✓ Binary hubble started" - sleep 2 - bin/stop-hubble.sh - echo " ✓ Binary hubble stopped" - else - echo "Error: Binary hubble test failed" - exit 1 - fi - popd - fi - - popd - fi - - # Stop binary server - if [[ -n "$BIN_SERVER_DIR" ]]; then - pushd "$BIN_SERVER_DIR" - bin/stop-hugegraph.sh - echo " ✓ Binary server stopped" - popd - fi - - echo "" - echo "━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━" - echo " VALIDATION SUMMARY " - echo "━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━" - echo "" - echo "✓ VALIDATION PASSED" - echo "" - echo "Please review the validation results and provide feedback in the" - echo "release voting thread on the mailing list." - - strategy: - fail-fast: false - matrix: - java_version: ['11'] - os: [ubuntu-latest, macos-latest] -