From ca69715853f9d0a7821a269f957e703f116edf48 Mon Sep 17 00:00:00 2001 From: Carlos Brandt Date: Thu, 23 Jan 2025 10:00:57 +0100 Subject: [PATCH 1/8] Split Cookbook document into three, for scenarios --- docs/docs/Cookbook.mdx | 363 ----------------------------- docs/docs/cookbook/1-edge.mdx | 292 +++++++++++++++++++++++ docs/docs/cookbook/2-incluster.mdx | 97 ++++++++ docs/docs/cookbook/3-tunneled.mdx | 73 ++++++ docs/docs/cookbook/_category_.json | 8 + 5 files changed, 470 insertions(+), 363 deletions(-) delete mode 100644 docs/docs/Cookbook.mdx create mode 100644 docs/docs/cookbook/1-edge.mdx create mode 100644 docs/docs/cookbook/2-incluster.mdx create mode 100644 docs/docs/cookbook/3-tunneled.mdx create mode 100644 docs/docs/cookbook/_category_.json diff --git a/docs/docs/Cookbook.mdx b/docs/docs/Cookbook.mdx deleted file mode 100644 index 1bdece22..00000000 --- a/docs/docs/Cookbook.mdx +++ /dev/null @@ -1,363 +0,0 @@ ---- -sidebar_position: 3 ---- -import Tabs from '@theme/Tabs'; -import TabItem from '@theme/TabItem'; -import ThemedImage from '@theme/ThemedImage'; -import useBaseUrl from '@docusaurus/useBaseUrl'; - - -# Cookbook - -These are practical recipes for different deployment scenarios. - -Select here the tab with the scenario you want deploy: - - - - - - - - - - - - - -Select here the featured plugin you want to try: - - - - Offload your pods to a remote machine with Docker engine available - - - Offload your pods to an HPC SLURM based batch system - - - Offload your pods to a remote Kubernetes cluster: COMING SOON - For test instructions contact us! - - - -There are more 3rd-party plugins developed that you can get inspired by or even use out of the box. You can find some ref in the [quick start section](guides/deploy-interlink#attach-your-favorite-plugin-or-develop-one) - -## Install interLink - -### Deploy Remote components - -In general, starting from the deployment of the remote components is adviced. Since the kubernetes virtual node won't reach the `Ready` status until all the stack is successfully deployed. - -#### Interlink API server - - - - __For this deployment mode the remote host has to allow the kubernetes cluster to connect to the Oauth2 proxy service port (30443 if you use the automatic script for installation)__ - - You first need to initialize an OIDC client with you Identity Provider (IdP). - - Since any OIDC provider working with [OAuth2 Proxy](https://oauth2-proxy.github.io/oauth2-proxy/) tool will do the work, we are going to put the configuration for a generic OIDC identity provider in this cookbook. Nevertheless you can find more detailed on dedicated pages with instructions ready for [GitHub](./guides/deploy-interlink#create-an-oauth-github-app), [EGI checkin](./guides/oidc-IAM), [INFN IAM](./guides/oidc-IAM). - - First of all download the [latest release](https://github.com/interTwin-eu/interLink/releases) of the interLink installer: - - ```bash - export VERSION=$(curl -s https://api.github.com/repos/intertwin-eu/interlink/releases/latest | jq -r .name) - wget -O interlink-installer https://github.com/interTwin-eu/interLink/releases/download/$VERSION/interlink-installer_Linux_x86_64 - chmod +x interlink-installer - ``` - - Create a template configuration with the init option: - - ```bash - mkdir -p interlink - ./interlink-installer --init --config ./interlink/.installer.yaml - ``` - - The configuration file should be filled as followed. This is the case where the `my-node` will contact an edge service that will be listening on `PUBLIC_IP` and `API_PORT` authenticating requests from an OIDC provider `https://my_oidc_idp.com`: - - ```bash title="./interlink/.installer.yaml" - interlink_ip: PUBLIC_IP - interlink_port: API_PORT - interlink_version: 0.3.3 - kubelet_node_name: my-node - kubernetes_namespace: interlink - node_limits: - cpu: "1000" - # MEMORY in GB - memory: 25600 - pods: "100" - oauth: - provider: oidc - issuer: https://my_oidc_idp.com/ - scopes: - - "openid" - - "email" - - "offline_access" - - "profile" - audience: interlink - grant_type: authorization_code - group_claim: groups - group: "my_vk_allowed_group" - token_url: "https://my_oidc_idp.com/token" - device_code_url: "https://my_oidc_idp/auth/device" - client_id: "oidc-client-xx" - client_secret: "xxxxxx" - insecure_http: true - ``` - - Now you are ready to start the OIDC authentication flow to generate all your manifests and configuration files for the interLink components. To do so, just execute the installer: - - ```bash - ./interlink-installer --config ./interlink/.installer.yaml --output-dir ./interlink/manifests/ - ``` - - Install Oauth2-Proxy and interLink API server services and configurations with: - - ```bash - chmod +x ./interlink/manifests/interlink-remote.sh - ./interlink/manifests/interlink-remote.sh install - ``` - - Then start the services with: - - ```bash - ./interlink/manifests/interlink-remote.sh start - ``` - - With `stop` command you can stop the service. By default logs are store in `~/.interlink/logs`, checkout there for any error before moving to the next step. - - __N.B.__ you can look the oauth2_proxy configuration parameters looking into the `interlink-remote.sh` script. - - __N.B.__ logs (expecially if in verbose mode) can become pretty huge, consider to implement your favorite rotation routine for all the logs in `~/.interlink/logs/` - - - Go directly to ["Test and debugging tips"](Cookbook#test-and-debug). The selected scenario does not expect you to do anything here. - - - COMING SOON... - - - - -#### Plugin service - - - - - - - Create utility folders: - - ```bash - mkdir -p $HOME/.interlink/logs - mkdir -p $HOME/.interlink/bin - mkdir -p $HOME/.interlink/config - ``` - - Create a configuration file: - - ```bash title="$HOME/.interlink/config/plugin-config.yaml" - ## Multi user host - Socket: "unix:///home/myusername/.plugin.sock" - InterlinkPort: "0" - SidecarPort: "0" - - CommandPrefix: "" - DataRootFolder: "/home/myusername/.interlink/jobs/" - BashPath: /bin/bash - VerboseLogging: false - ErrorsOnlyLogging: false - ``` - - __N.B.__ Depending on wheter you edge is single user or not, you should know by previous steps which section to uncomment here. - - More on configuration options at [official repo](https://github.com/interTwin-eu/interlink-docker-plugin/blob/main/README.md) - - - Download the [latest release](https://github.com/interTwin-eu/interlink-docker-plugin/releases) binary in `$HOME/.interlink/bin/plugin` for either GPU host or CPU host (tags ending with `no-GPU`) - - Start the plugins passing the configuration that you have just created: - - ```bash - export INTERLINKCONFIGPATH=$PWD/plugin-config.yaml - $HOME/.interlink/bin/plugin &> $HOME/.interlink/logs/plugin.log & - echo $! > $HOME/.interlink/plugin.pid - ``` - - - Check the logs in `$HOME/.interlink/logs/plugin.log`. - - To kill and restart the process is enough: - - ```bash - # kill - kill $(cat $HOME/.interlink/plugin.pid) - - # restart - export INTERLINKCONFIGPATH=$PWD/plugin-config.yaml - $HOME/.interlink/bin/plugin &> $HOME/.interlink/logs/plugin.log & - echo $! > $HOME/.interlink/plugin.pid - - Almost there! Now it's time to add this virtual node into the Kubernetes cluster! - - - - Create utility folders - - ```bash - mkdir -p $HOME/.interlink/logs - mkdir -p $HOME/.interlink/bin - mkdir -p $HOME/.interlink/config - ``` - - - Create a configuration file (__remember to substitute `/home/username/` with your actual home path__): - - ```bash title="./interlink/manifests/plugin-config.yaml" - Socket: "unix:///home/myusername/.plugin.sock" - InterlinkPort: "0" - SidecarPort: "0" - - CommandPrefix: "" - DataRootFolder: "/home/myusername/.interlink/jobs/" - BashPath: /bin/bash - VerboseLogging: false - ErrorsOnlyLogging: false - SbatchPath: "/usr/bin/sbatch" - ScancelPath: "/usr/bin/scancel" - SqueuePath: "/usr/bin/squeue" - SingularityPrefix: "" - ``` - - - More on configuration options at [official repo](https://github.com/interTwin-eu/interlink-slurm-plugin/blob/main/README.md) - - - Download the [latest release](https://github.com/interTwin-eu/interlink-slurm-plugin/releases) binary in `$HOME/.interlink/bin/plugin` - - ```bash - export PLUGIN_VERSION=$(curl -s https://api.github.com/repos/intertwin-eu/interlink-slurm-plugin/releases/latest | jq -r .name) - wget -O $HOME/.interlink/bin/plugin https://github.com/interTwin-eu/interlink-slurm-plugin/releases/download/${PLUGIN_VERSION}/interlink-sidecar-slurm_Linux_x86_64 - ``` - - - Start the plugins passing the configuration that you have just created: - - ```bash - export SLURMCONFIGPATH=$PWD/interlink/manifests/plugin-config.yaml - $HOME/.interlink/bin/plugin &> $HOME/.interlink/logs/plugin.log & - echo $! > $HOME/.interlink/plugin.pid - ``` - - - Check the logs in `$HOME/.interlink/logs/plugin.log`. - - To kill and restart the process is enough: - - ```bash - # kill - kill $(cat $HOME/.interlink/plugin.pid) - - # restart - export SLURMCONFIGPATH=$PWD/interlink/manifests/plugin-config.yaml - $HOME/.interlink/bin/plugin &> $HOME/.interlink/logs/plugin.log & - echo $! > $HOME/.interlink/plugin.pid - - Almost there! Now it's time to add this virtual node into the Kubernetes cluster! - - - __KUBERNTES PLUGIN COMING SOOON... CONTACT US FOR TEST INSTRUCTIONS__ - - - - - - Go directly to ["Test and debugging tips"](Cookbook#test-and-debug). The selected scenario does not expect you to do anything here. - - - - COMING SOON... - - - - - - -#### Test interLink stack health - -interLink comes with a call that can be used to monitor the overall status of both interlink server and plugins, at once. - -``` -curl -v --unix-socket ${HOME}/.interlink.sock http://unix/pinglink -``` - -This call will return the status of the system and its readiness to submit jobs. - - -### Deploy Kubernetes components - -The deployment of the Kubernetes components are managed by the official [HELM chart](https://github.com/interTwin-eu/interlink-helm-chart). Depending on the scenario you selected, there might be additional operations to be done. - - - - You can now install the helm chart with the preconfigured (by the installer script) helm values in `./interlink/manifests/values.yaml` - - ```bash - helm upgrade --install \ - --create-namespace \ - -n interlink \ - my-node \ - oci://ghcr.io/intertwin-eu/interlink-helm-chart/interlink \ - --values ./interlink/manifests/values.yaml - ``` - - You can fix the [version of the chart](https://github.com/interTwin-eu/interlink-helm-chart/blob/main/interlink/Chart.yaml#L18) by using the `--version` option. - - - - - Create an helm values file: - - ```yaml title="values.yaml" - nodeName: interlink-with-socket - - plugin: - enabled: true - image: "plugin docker image here" - command: ["/bin/bash", "-c"] - args: ["/app/plugin"] - config: | - your plugin - configuration - goes here!!! - socket: unix:///var/run/plugin.sock - - interlink: - enabled: true - socket: unix:///var/run/interlink.sock - ``` - - Eventually deploy the latest release of the official [helm chart](https://github.com/interTwin-eu/interlink-helm-chart): - - ```bash - helm upgrade --install --create-namespace -n interlink my-virtual-node oci://ghcr.io/intertwin-eu/interlink-helm-chart/interlink --values ./values.yaml - ``` - - You can fix the [version of the chart](https://github.com/interTwin-eu/interlink-helm-chart/blob/main/interlink/Chart.yaml#L18) by using the `--version` option. - - - COMING SOON... - - - -Whenever you see the node ready, you are good to go! - -To start debugging in case of problems we suggest starting from the pod containers logs! - -## Test the setup - -Please find a demo pod to test your setup [here](./guides/develop-a-plugin#lets-test-is-out). - - diff --git a/docs/docs/cookbook/1-edge.mdx b/docs/docs/cookbook/1-edge.mdx new file mode 100644 index 00000000..9f80a977 --- /dev/null +++ b/docs/docs/cookbook/1-edge.mdx @@ -0,0 +1,292 @@ +--- +sidebar_position: 3 +--- +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; +import ThemedImage from '@theme/ThemedImage'; +import useBaseUrl from '@docusaurus/useBaseUrl'; + + +# Edge node deployment + + + + + +## Install interLink + +### Deploy Remote components + +In general, starting from the deployment of the remote components is adviced. Since the kubernetes virtual node won't reach the `Ready` +status until all the stack is successfully deployed. + +#### Interlink API server + +__For this deployment mode the remote host has to allow the kubernetes cluster to connect to the Oauth2 proxy service port +(30443 if you use the automatic script for installation)__ + +You first need to initialize an OIDC client with you Identity Provider (IdP). + +Since any OIDC provider working with [OAuth2 Proxy](https://oauth2-proxy.github.io/oauth2-proxy/) tool will do the work, we are going +to put the configuration for a generic OIDC identity provider in this cookbook. Nevertheless you can find more detailed on dedicated +pages with instructions ready for [GitHub](./guides/deploy-interlink#create-an-oauth-github-app), [EGI checkin](./guides/oidc-IAM), +[INFN IAM](./guides/oidc-IAM). + +First of all download the [latest release](https://github.com/interTwin-eu/interLink/releases) of the interLink installer: + +```bash +export VERSION=$(curl -s https://api.github.com/repos/intertwin-eu/interlink/releases/latest | jq -r .name) +wget -O interlink-installer https://github.com/interTwin-eu/interLink/releases/download/$VERSION/interlink-installer_Linux_x86_64 +chmod +x interlink-installer +``` + +Create a template configuration with the init option: + +```bash +mkdir -p interlink +./interlink-installer --init --config ./interlink/.installer.yaml +``` + +The configuration file should be filled as followed. This is the case where the `my-node` will contact an edge service that will be +listening on `PUBLIC_IP` and `API_PORT` authenticating requests from an OIDC provider `https://my_oidc_idp.com`: + +```bash title="./interlink/.installer.yaml" +interlink_ip: PUBLIC_IP +interlink_port: API_PORT +interlink_version: 0.3.3 +kubelet_node_name: my-node +kubernetes_namespace: interlink +node_limits: + cpu: "1000" + # MEMORY in GB + memory: 25600 + pods: "100" +oauth: + provider: oidc + issuer: https://my_oidc_idp.com/ + scopes: + - "openid" + - "email" + - "offline_access" + - "profile" + audience: interlink + grant_type: authorization_code + group_claim: groups + group: "my_vk_allowed_group" + token_url: "https://my_oidc_idp.com/token" + device_code_url: "https://my_oidc_idp/auth/device" + client_id: "oidc-client-xx" + client_secret: "xxxxxx" +insecure_http: true +``` + +Now you are ready to start the OIDC authentication flow to generate all your manifests and configuration files for the interLink components. To do so, just execute the installer: + +```bash +./interlink-installer --config ./interlink/.installer.yaml --output-dir ./interlink/manifests/ +``` + +Install Oauth2-Proxy and interLink API server services and configurations with: + +```bash +chmod +x ./interlink/manifests/interlink-remote.sh +./interlink/manifests/interlink-remote.sh install +``` + +Then start the services with: + +```bash +./interlink/manifests/interlink-remote.sh start +``` + +With `stop` command you can stop the service. By default logs are store in `~/.interlink/logs`, checkout there for any error before moving to the next step. + +__N.B.__ you can look the oauth2_proxy configuration parameters looking into the `interlink-remote.sh` script. + +__N.B.__ logs (expecially if in verbose mode) can become pretty huge, consider to implement your favorite rotation routine for all the logs in `~/.interlink/logs/` + + +#### Plugin service + +Select here the featured plugin you want to try: + + + + _Offload your pods to a remote machine with Docker engine available._ + + - Create utility folders: + + ```bash + mkdir -p $HOME/.interlink/logs + mkdir -p $HOME/.interlink/bin + mkdir -p $HOME/.interlink/config + ``` + - Create a configuration file: + + ```bash title="$HOME/.interlink/config/plugin-config.yaml" + ## Multi user host + Socket: "unix:///home/myusername/.plugin.sock" + InterlinkPort: "0" + SidecarPort: "0" + + CommandPrefix: "" + DataRootFolder: "/home/myusername/.interlink/jobs/" + BashPath: /bin/bash + VerboseLogging: false + ErrorsOnlyLogging: false + ``` + - __N.B.__ Depending on wheter you edge is single user or not, you should know by previous steps which section to uncomment here. + - More on configuration options at [official repo](https://github.com/interTwin-eu/interlink-docker-plugin/blob/main/README.md) + + - Download the [latest release](https://github.com/interTwin-eu/interlink-docker-plugin/releases) binary in + `$HOME/.interlink/bin/plugin` for either GPU host or CPU host (tags ending with `no-GPU`) + - Start the plugins passing the configuration that you have just created: + + ```bash + export INTERLINKCONFIGPATH=$PWD/plugin-config.yaml + $HOME/.interlink/bin/plugin &> $HOME/.interlink/logs/plugin.log & + echo $! > $HOME/.interlink/plugin.pid + ``` + + - Check the logs in `$HOME/.interlink/logs/plugin.log`. + - To kill and restart the process is enough: + + ```bash + # kill + kill $(cat $HOME/.interlink/plugin.pid) + + # restart + export INTERLINKCONFIGPATH=$PWD/plugin-config.yaml + $HOME/.interlink/bin/plugin &> $HOME/.interlink/logs/plugin.log & + echo $! > $HOME/.interlink/plugin.pid + + Almost there! Now it's time to add this virtual node into the Kubernetes cluster! + + + + + _Offload your pods to an HPC SLURM based batch system._ + + - Create utility folders + + ```bash + mkdir -p $HOME/.interlink/logs + mkdir -p $HOME/.interlink/bin + mkdir -p $HOME/.interlink/config + ``` + + - Create a configuration file (__remember to substitute `/home/username/` with your actual home path__): + + ```bash title="./interlink/manifests/plugin-config.yaml" + Socket: "unix:///home/myusername/.plugin.sock" + InterlinkPort: "0" + SidecarPort: "0" + + CommandPrefix: "" + DataRootFolder: "/home/myusername/.interlink/jobs/" + BashPath: /bin/bash + VerboseLogging: false + ErrorsOnlyLogging: false + SbatchPath: "/usr/bin/sbatch" + ScancelPath: "/usr/bin/scancel" + SqueuePath: "/usr/bin/squeue" + SingularityPrefix: "" + ``` + + - More on configuration options at [official repo](https://github.com/interTwin-eu/interlink-slurm-plugin/blob/main/README.md) + + - Download the [latest release](https://github.com/interTwin-eu/interlink-slurm-plugin/releases) binary in + `$HOME/.interlink/bin/plugin` + + ```bash + export PLUGIN_VERSION=$(curl -s https://api.github.com/repos/intertwin-eu/interlink-slurm-plugin/releases/latest | jq -r .name) + wget -O $HOME/.interlink/bin/plugin https://github.com/interTwin-eu/interlink-slurm-plugin/releases/download/${PLUGIN_VERSION}/interlink-sidecar-slurm_Linux_x86_64 + ``` + + - Start the plugins passing the configuration that you have just created: + + ```bash + export SLURMCONFIGPATH=$PWD/interlink/manifests/plugin-config.yaml + $HOME/.interlink/bin/plugin &> $HOME/.interlink/logs/plugin.log & + echo $! > $HOME/.interlink/plugin.pid + ``` + + - Check the logs in `$HOME/.interlink/logs/plugin.log`. + - To kill and restart the process is enough: + + ```bash + # kill + kill $(cat $HOME/.interlink/plugin.pid) + + # restart + export SLURMCONFIGPATH=$PWD/interlink/manifests/plugin-config.yaml + $HOME/.interlink/bin/plugin &> $HOME/.interlink/logs/plugin.log & + echo $! > $HOME/.interlink/plugin.pid + + Almost there! Now it's time to add this virtual node into the Kubernetes cluster! + + + + + _Offload your pods to a remote Kubernetes cluster._ + +:::note + KUBERNETES PLUGIN IS COMING SOON! + For test instructions contact us! +::: + + + + + +##### 3rd-party plugins + +There are more 3rd-party plugins developed that you can get inspired by or even use out of the box. +You can find some ref in the [quick start section](guides/deploy-interlink#attach-your-favorite-plugin-or-develop-one) + + +#### Test interLink stack health + +interLink comes with a call that can be used to monitor the overall status of both interlink server and plugins, at once. + +``` +curl -v --unix-socket ${HOME}/.interlink.sock http://unix/pinglink +``` + +This call will return the status of the system and its readiness to submit jobs. + + +### Deploy Kubernetes components + +The deployment of the Kubernetes components are managed by the official [HELM chart](https://github.com/interTwin-eu/interlink-helm-chart). +Depending on the scenario you selected, there might be additional operations to be done. + +You can now install the helm chart with the preconfigured (by the installer script) helm values in `./interlink/manifests/values.yaml` + +```bash + helm upgrade --install \ + --create-namespace \ + -n interlink \ + my-node \ + oci://ghcr.io/intertwin-eu/interlink-helm-chart/interlink \ + --values ./interlink/manifests/values.yaml +``` + +You can fix the [version of the chart](https://github.com/interTwin-eu/interlink-helm-chart/blob/main/interlink/Chart.yaml#L18) +by using the `--version` option. + +Whenever you see the node ready, you are good to go! + +To start debugging in case of problems we suggest starting from the pod containers logs! + +## Test the setup + +Please find a demo pod to test your setup [here](./guides/develop-a-plugin#lets-test-is-out). + + diff --git a/docs/docs/cookbook/2-incluster.mdx b/docs/docs/cookbook/2-incluster.mdx new file mode 100644 index 00000000..d204725e --- /dev/null +++ b/docs/docs/cookbook/2-incluster.mdx @@ -0,0 +1,97 @@ +--- +sidebar_position: 3 +--- +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; +import ThemedImage from '@theme/ThemedImage'; +import useBaseUrl from '@docusaurus/useBaseUrl'; + + +# In-cluster deployment + + + + +## Install interLink + +### Deploy Remote components + +In general, starting from the deployment of the remote components is adviced. Since the kubernetes virtual node won't reach the `Ready` status until all the stack is successfully deployed. + +#### Interlink API server + +:::note +Go directly to ["Test and debugging tips"](Cookbook#test-and-debug). +The selected scenario does not expect you to do anything here. +::: + + +#### Plugin service + +:::note +Go directly to ["Test and debugging tips"](Cookbook#test-and-debug). +The selected scenario does not expect you to do anything here. +::: + + + +#### Test interLink stack health + +interLink comes with a call that can be used to monitor the overall status of both interlink server and plugins, at once. + +``` +curl -v --unix-socket ${HOME}/.interlink.sock http://unix/pinglink +``` + +This call will return the status of the system and its readiness to submit jobs. + + +### Deploy Kubernetes components + +The deployment of the Kubernetes components are managed by the official [HELM chart](https://github.com/interTwin-eu/interlink-helm-chart). Depending on the scenario you selected, there might be additional operations to be done. + +- Create an helm values file: + +```yaml title="values.yaml" +nodeName: interlink-with-socket + +plugin: + enabled: true + image: "plugin docker image here" + command: ["/bin/bash", "-c"] + args: ["/app/plugin"] + config: | + your plugin + configuration + goes here!!! + socket: unix:///var/run/plugin.sock + +interlink: + enabled: true + socket: unix:///var/run/interlink.sock +``` + +Eventually deploy the latest release of the official [helm chart](https://github.com/interTwin-eu/interlink-helm-chart): + +```bash +helm upgrade --install --create-namespace \ + -n interlink my-virtual-node oci://ghcr.io/intertwin-eu/interlink-helm-chart/interlink \ + --values ./values.yaml +``` + +You can fix the [version of the chart](https://github.com/interTwin-eu/interlink-helm-chart/blob/main/interlink/Chart.yaml#L18) +by using the `--version` option. + +Whenever you see the node ready, you are good to go! + +To start debugging in case of problems we suggest starting from the pod containers logs! + +## Test the setup + +Please find a demo pod to test your setup [here](./guides/develop-a-plugin#lets-test-is-out). diff --git a/docs/docs/cookbook/3-tunneled.mdx b/docs/docs/cookbook/3-tunneled.mdx new file mode 100644 index 00000000..461442f6 --- /dev/null +++ b/docs/docs/cookbook/3-tunneled.mdx @@ -0,0 +1,73 @@ +--- +sidebar_position: 3 +--- +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; +import ThemedImage from '@theme/ThemedImage'; +import useBaseUrl from '@docusaurus/useBaseUrl'; + + +# Tunneled deployment + +Select here the tab with the scenario you want deploy: + + + + +## Install interLink + +### Deploy Remote components + +In general, starting from the deployment of the remote components is adviced. Since the kubernetes virtual node won't reach the `Ready` +status until all the stack is successfully deployed. + +#### Interlink API server + +:::note +COMING SOON... +::: + + +#### Plugin service + +:::note +COMING SOON... +::: + + +#### Test interLink stack health + +interLink comes with a call that can be used to monitor the overall status of both interlink server and plugins, at once. + +``` +curl -v --unix-socket ${HOME}/.interlink.sock http://unix/pinglink +``` + +This call will return the status of the system and its readiness to submit jobs. + + +### Deploy Kubernetes components + +The deployment of the Kubernetes components are managed by the official [HELM chart](https://github.com/interTwin-eu/interlink-helm-chart). +Depending on the scenario you selected, there might be additional operations to be done. + +:::note +COMING SOON... +::: + + +Whenever you see the node ready, you are good to go! + +To start debugging in case of problems we suggest starting from the pod containers logs! + +## Test the setup + +Please find a demo pod to test your setup [here](./guides/develop-a-plugin#lets-test-is-out). + + diff --git a/docs/docs/cookbook/_category_.json b/docs/docs/cookbook/_category_.json new file mode 100644 index 00000000..d8253b65 --- /dev/null +++ b/docs/docs/cookbook/_category_.json @@ -0,0 +1,8 @@ +{ + "label": "Cookbook", + "position": 3, + "link": { + "type": "generated-index", + "description": "Practical recipes for different deployment scenarios." + } +} From 51c22e7c2dcd78b70630164b8e82011692b45b00 Mon Sep 17 00:00:00 2001 From: Carlos Brandt Date: Fri, 24 Jan 2025 18:45:26 +0100 Subject: [PATCH 2/8] ignore docs/yarn.lock --- .gitignore | 1 + 1 file changed, 1 insertion(+) diff --git a/.gitignore b/.gitignore index 541c0ea0..67111146 100644 --- a/.gitignore +++ b/.gitignore @@ -1,6 +1,7 @@ interlink-install vendor dist/* +docs/yarn.lock report/* __pycache__/* vendor/* From 7a0f3d533a199b9ab463cea9ed899374905f6b9e Mon Sep 17 00:00:00 2001 From: Carlos Brandt Date: Fri, 31 Jan 2025 10:33:23 +0100 Subject: [PATCH 3/8] Fix broken links --- docs/docs/cookbook/1-edge.mdx | 66 +++++++++++++++--------- docs/docs/cookbook/2-incluster.mdx | 18 ++++--- docs/docs/cookbook/3-tunneled.mdx | 16 +++--- docs/docs/guides/01-deploy-interlink.mdx | 2 +- 4 files changed, 64 insertions(+), 38 deletions(-) diff --git a/docs/docs/cookbook/1-edge.mdx b/docs/docs/cookbook/1-edge.mdx index 9f80a977..936e2755 100644 --- a/docs/docs/cookbook/1-edge.mdx +++ b/docs/docs/cookbook/1-edge.mdx @@ -9,6 +9,9 @@ import useBaseUrl from '@docusaurus/useBaseUrl'; # Edge node deployment +Deploy interLink API server on an edge node, dispatching jobs on a remote system +according to an interLink plugin. + - ## Install interLink ### Deploy Remote components -In general, starting from the deployment of the remote components is adviced. Since the kubernetes virtual node won't reach the `Ready` +In general, starting from the deployment of the remote components is adviced. +Since the kubernetes virtual node won't reach the `Ready` status until all the stack is successfully deployed. #### Interlink API server -__For this deployment mode the remote host has to allow the kubernetes cluster to connect to the Oauth2 proxy service port +__For this deployment mode the remote host has to allow the kubernetes cluster +to connect to the Oauth2 proxy service port (30443 if you use the automatic script for installation)__ You first need to initialize an OIDC client with you Identity Provider (IdP). -Since any OIDC provider working with [OAuth2 Proxy](https://oauth2-proxy.github.io/oauth2-proxy/) tool will do the work, we are going -to put the configuration for a generic OIDC identity provider in this cookbook. Nevertheless you can find more detailed on dedicated -pages with instructions ready for [GitHub](./guides/deploy-interlink#create-an-oauth-github-app), [EGI checkin](./guides/oidc-IAM), -[INFN IAM](./guides/oidc-IAM). +Since any OIDC provider working with [OAuth2 Proxy](https://oauth2-proxy.github.io/oauth2-proxy/) +tool will do the work, we are going +to put the configuration for a generic OIDC identity provider in this cookbook. +Nevertheless you can find more detailed on dedicated +pages with instructions ready for [GitHub](../guides/deploy-interlink#create-an-oauth-github-app), +[EGI checkin](../guides/oidc-IAM), [INFN IAM](../guides/oidc-IAM). -First of all download the [latest release](https://github.com/interTwin-eu/interLink/releases) of the interLink installer: +First of all download the [latest release](https://github.com/interTwin-eu/interLink/releases) +of the interLink installer: ```bash export VERSION=$(curl -s https://api.github.com/repos/intertwin-eu/interlink/releases/latest | jq -r .name) @@ -53,8 +60,10 @@ mkdir -p interlink ./interlink-installer --init --config ./interlink/.installer.yaml ``` -The configuration file should be filled as followed. This is the case where the `my-node` will contact an edge service that will be -listening on `PUBLIC_IP` and `API_PORT` authenticating requests from an OIDC provider `https://my_oidc_idp.com`: +The configuration file should be filled as followed. This is the case where the +`my-node` will contact an edge service that will be +listening on `PUBLIC_IP` and `API_PORT` authenticating requests from an +OIDC provider `https://my_oidc_idp.com`: ```bash title="./interlink/.installer.yaml" interlink_ip: PUBLIC_IP @@ -105,11 +114,14 @@ Then start the services with: ./interlink/manifests/interlink-remote.sh start ``` -With `stop` command you can stop the service. By default logs are store in `~/.interlink/logs`, checkout there for any error before moving to the next step. +With `stop` command you can stop the service. By default logs are store in +`~/.interlink/logs`, checkout there for any error before moving to the next step. -__N.B.__ you can look the oauth2_proxy configuration parameters looking into the `interlink-remote.sh` script. +__N.B.__ you can look the oauth2_proxy configuration parameters looking into +the `interlink-remote.sh` script. -__N.B.__ logs (expecially if in verbose mode) can become pretty huge, consider to implement your favorite rotation routine for all the logs in `~/.interlink/logs/` +__N.B.__ logs (expecially if in verbose mode) can become pretty huge, consider +to implement your favorite rotation routine for all the logs in `~/.interlink/logs/`. #### Plugin service @@ -141,11 +153,13 @@ Select here the featured plugin you want to try: VerboseLogging: false ErrorsOnlyLogging: false ``` - - __N.B.__ Depending on wheter you edge is single user or not, you should know by previous steps which section to uncomment here. - - More on configuration options at [official repo](https://github.com/interTwin-eu/interlink-docker-plugin/blob/main/README.md) + - __N.B.__ Depending on wheter you edge is single user or not, + you should know by previous steps which section to uncomment here. + - More on configuration options at + [official repo](https://github.com/interTwin-eu/interlink-docker-plugin/blob/main/README.md) - - Download the [latest release](https://github.com/interTwin-eu/interlink-docker-plugin/releases) binary in - `$HOME/.interlink/bin/plugin` for either GPU host or CPU host (tags ending with `no-GPU`) + - Download the [latest release](https://github.com/interTwin-eu/interlink-docker-plugin/releases) + binary in `$HOME/.interlink/bin/plugin` for either GPU host or CPU host (tags ending with `no-GPU`) - Start the plugins passing the configuration that you have just created: ```bash @@ -199,10 +213,11 @@ Select here the featured plugin you want to try: SingularityPrefix: "" ``` - - More on configuration options at [official repo](https://github.com/interTwin-eu/interlink-slurm-plugin/blob/main/README.md) + - More on configuration options at + [official repo](https://github.com/interTwin-eu/interlink-slurm-plugin/blob/main/README.md) - - Download the [latest release](https://github.com/interTwin-eu/interlink-slurm-plugin/releases) binary in - `$HOME/.interlink/bin/plugin` + - Download the [latest release](https://github.com/interTwin-eu/interlink-slurm-plugin/releases) + binary in `$HOME/.interlink/bin/plugin` ```bash export PLUGIN_VERSION=$(curl -s https://api.github.com/repos/intertwin-eu/interlink-slurm-plugin/releases/latest | jq -r .name) @@ -248,7 +263,8 @@ Select here the featured plugin you want to try: ##### 3rd-party plugins There are more 3rd-party plugins developed that you can get inspired by or even use out of the box. -You can find some ref in the [quick start section](guides/deploy-interlink#attach-your-favorite-plugin-or-develop-one) +You can find some ref in the +[quick start section](../guides/deploy-interlink#attach-your-favorite-plugin-or-develop-one) #### Test interLink stack health @@ -264,10 +280,12 @@ This call will return the status of the system and its readiness to submit jobs. ### Deploy Kubernetes components -The deployment of the Kubernetes components are managed by the official [HELM chart](https://github.com/interTwin-eu/interlink-helm-chart). +The deployment of the Kubernetes components are managed by the official +[HELM chart](https://github.com/interTwin-eu/interlink-helm-chart). Depending on the scenario you selected, there might be additional operations to be done. -You can now install the helm chart with the preconfigured (by the installer script) helm values in `./interlink/manifests/values.yaml` +You can now install the helm chart with the preconfigured (by the installer script) +helm values in `./interlink/manifests/values.yaml` ```bash helm upgrade --install \ @@ -287,6 +305,6 @@ To start debugging in case of problems we suggest starting from the pod containe ## Test the setup -Please find a demo pod to test your setup [here](./guides/develop-a-plugin#lets-test-is-out). +Please find a demo pod to test your setup [here](../guides/develop-a-plugin#lets-test-is-out). diff --git a/docs/docs/cookbook/2-incluster.mdx b/docs/docs/cookbook/2-incluster.mdx index d204725e..7d4bffb0 100644 --- a/docs/docs/cookbook/2-incluster.mdx +++ b/docs/docs/cookbook/2-incluster.mdx @@ -27,7 +27,7 @@ In general, starting from the deployment of the remote components is adviced. Si #### Interlink API server :::note -Go directly to ["Test and debugging tips"](Cookbook#test-and-debug). +Go directly to ["Test and debugging tips"](#test-and-debug). The selected scenario does not expect you to do anything here. ::: @@ -35,7 +35,7 @@ The selected scenario does not expect you to do anything here. #### Plugin service :::note -Go directly to ["Test and debugging tips"](Cookbook#test-and-debug). +Go directly to ["Test and debugging tips"](#test-and-debug). The selected scenario does not expect you to do anything here. ::: @@ -43,7 +43,8 @@ The selected scenario does not expect you to do anything here. #### Test interLink stack health -interLink comes with a call that can be used to monitor the overall status of both interlink server and plugins, at once. +InterLink comes with a call that can be used to monitor the overall status of +both interlink server and plugins, at once. ``` curl -v --unix-socket ${HOME}/.interlink.sock http://unix/pinglink @@ -54,7 +55,9 @@ This call will return the status of the system and its readiness to submit jobs. ### Deploy Kubernetes components -The deployment of the Kubernetes components are managed by the official [HELM chart](https://github.com/interTwin-eu/interlink-helm-chart). Depending on the scenario you selected, there might be additional operations to be done. +The deployment of the Kubernetes components are managed by the official +[HELM chart](https://github.com/interTwin-eu/interlink-helm-chart). +Depending on the scenario you selected, there might be additional operations to be done. - Create an helm values file: @@ -77,7 +80,8 @@ interlink: socket: unix:///var/run/interlink.sock ``` -Eventually deploy the latest release of the official [helm chart](https://github.com/interTwin-eu/interlink-helm-chart): +Eventually deploy the latest release of the official +[helm chart](https://github.com/interTwin-eu/interlink-helm-chart): ```bash helm upgrade --install --create-namespace \ @@ -92,6 +96,6 @@ Whenever you see the node ready, you are good to go! To start debugging in case of problems we suggest starting from the pod containers logs! -## Test the setup +## Test and debug -Please find a demo pod to test your setup [here](./guides/develop-a-plugin#lets-test-is-out). +Please find a demo pod to test your setup [here](../guides/develop-a-plugin#lets-test-is-out). diff --git a/docs/docs/cookbook/3-tunneled.mdx b/docs/docs/cookbook/3-tunneled.mdx index 461442f6..541e982f 100644 --- a/docs/docs/cookbook/3-tunneled.mdx +++ b/docs/docs/cookbook/3-tunneled.mdx @@ -24,7 +24,8 @@ Select here the tab with the scenario you want deploy: ### Deploy Remote components -In general, starting from the deployment of the remote components is adviced. Since the kubernetes virtual node won't reach the `Ready` +In general, starting from the deployment of the remote components is adviced. +Since the kubernetes virtual node won't reach the `Ready` status until all the stack is successfully deployed. #### Interlink API server @@ -43,7 +44,8 @@ COMING SOON... #### Test interLink stack health -interLink comes with a call that can be used to monitor the overall status of both interlink server and plugins, at once. +interLink comes with a call that can be used to monitor the overall status of +both interlink server and plugins, at once. ``` curl -v --unix-socket ${HOME}/.interlink.sock http://unix/pinglink @@ -54,8 +56,10 @@ This call will return the status of the system and its readiness to submit jobs. ### Deploy Kubernetes components -The deployment of the Kubernetes components are managed by the official [HELM chart](https://github.com/interTwin-eu/interlink-helm-chart). -Depending on the scenario you selected, there might be additional operations to be done. +The deployment of the Kubernetes components are managed by the official +[HELM chart](https://github.com/interTwin-eu/interlink-helm-chart). +Depending on the scenario you selected, there might be additional operations +to be done. :::note COMING SOON... @@ -66,8 +70,8 @@ Whenever you see the node ready, you are good to go! To start debugging in case of problems we suggest starting from the pod containers logs! -## Test the setup +## Test and debug -Please find a demo pod to test your setup [here](./guides/develop-a-plugin#lets-test-is-out). +Please find a demo pod to test your setup [here](../guides/develop-a-plugin#lets-test-is-out). diff --git a/docs/docs/guides/01-deploy-interlink.mdx b/docs/docs/guides/01-deploy-interlink.mdx index 6881de4f..ebfe18b1 100644 --- a/docs/docs/guides/01-deploy-interlink.mdx +++ b/docs/docs/guides/01-deploy-interlink.mdx @@ -10,7 +10,7 @@ Learn how to deploy interLink virtual nodes on your cluster. In this tutorial yo The installation script that we are going to configure will take care of providing you with a complete Kubernetes manifest to instantiate the virtual node interface. Also you will get an installation bash script to be executed on the remote host where you want to delegate your container execution. That script is already configured to **automatically** authenticate the incoming request from the virtual node component, and forward the correct instructions to the openAPI interface of the [interLink plugin](./api-reference) (a.k.a. sidecar) of your choice. Thus you can use this setup also for directly [developing a plugin](./develop-a-plugin), without caring for anything else. -For a complete guide on all the possible scenarios, please refer to the [Cookbook](../cookbook). +For a complete guide on all the possible scenarios, please refer to the [Cookbook](/docs/category/cookbook). ## Requirements From 63b6eeaf39177addf86d9129df4d699b3dda97d3 Mon Sep 17 00:00:00 2001 From: Carlos Brandt Date: Fri, 31 Jan 2025 11:55:29 +0100 Subject: [PATCH 4/8] [docs] include introduction text --- docs/docs/intro.mdx | 54 +++++++++++++++++++++++++++++++++++++++++---- 1 file changed, 50 insertions(+), 4 deletions(-) diff --git a/docs/docs/intro.mdx b/docs/docs/intro.mdx index a5e5cf60..4d2522e8 100644 --- a/docs/docs/intro.mdx +++ b/docs/docs/intro.mdx @@ -22,7 +22,51 @@ interLink is in early development phase, thus subject to breaking changes with n ::: +## Overview + +We are running a Kubernetes cluster that we are going to consider "local". +And we want to offload some of the containers to other (remote) systems -- +another K8S cluster, or an HPC cluster. + +The containers being offloaded are batch (or "job") containers -- with a pre-defined +lifecycle, non-interactive containers (see [Targets](#targets)). +The dispatching to the other (remote) system is done through a combination of +[Virtual Kubelets](https://virtual-kubelet.io/) and interLink' API and +plugins. +Plugins will define the how the containers will run on the remote system +(see [Target providers](#target-providers)). + +InterLink API and the plugin can be arranged in three different ways across the local cluster +and the remote system: + +- both deployed remote (**[Edge-node](#edge-node)**) +- both deployed local (**[In-cluster](#in-cluster)**) +- API local, plugin remote (**[Tunneled](#tunneled)**) + +``` ++---------------------------+ +----------------------------+ +| Virtual Node | | Pod Containers Runtime | +| | | | +| | | | +| | | | +| +-----------------------------------------+ | +| | (API + plugin) interLink | | +| | (API) interLink (plugin) | | +| | interLink (API + plugin) | | +| +-----------------------------------------+ | +| | | | +| | | | +| | | | +| | | | +| | | | +| | | | ++---------------------------+ +----------------------------+ +``` + + + ## Targets +> rename to Applications - __K8s applications with tasks to be executed on HPC systems__: This target focuses on Kubernetes applications that require high-performance computing (HPC) resources for executing tasks. These tasks might involve complex computations, simulations, or data processing that benefit from the specialized hardware and optimized performance of HPC systems. @@ -31,15 +75,17 @@ interLink is in early development phase, thus subject to breaking changes with n - __Lambda-like functions calling on external resources__: This target involves running containers on demand with specific computing needs. Now these resources might also be outside of the Kubernetes cluster thanks to interLink functionality. ## Target providers +> rename to Runtime providers Our solution is designed to target a wide range of providers with container execution capabilities, including but not limited to: - __SLURM or HTCondor batch systems with Apptainer, Enroot, or Singularity__: These batch systems are widely used in high-performance computing environments to manage and schedule jobs. By integrating with container runtimes like Apptainer, Enroot, or Singularity, our solution can efficiently execute containerized tasks on these systems. -- __Remote/on-demand virtual machines with any container runtime__: This includes virtual machines that can be provisioned on-demand and support container runtimes such as Docker, Podman, or others. This flexibility allows for scalable and dynamic resource allocation based on workload requirements. +- __On-demand virtual machines with any container runtime__: This includes virtual machines that can be provisioned on-demand and support container runtimes such as Docker, Podman, or others. This flexibility allows for scalable and dynamic resource allocation based on workload requirements. - __Remote Kubernetes clusters__: Our solution can extend the capabilities of existing Kubernetes clusters, enabling them to offload workloads to another remote cluster. This is particularly useful for distributing workloads across multiple clusters for better resource utilization and fault tolerance. - __Lambda-like services__: These are serverless computing services that execute code in response to events and automatically manage the underlying compute resources. By targeting these services, our solution can leverage the scalability and efficiency of serverless architectures for containerized workloads. All of this, while exposing a bare Kubernetes API kind of orchestration. ## NOT a target +> rename to non an application - __Long-running services__: Our solution is not designed for services that need to run continuously for extended periods. It is optimized for tasks that have a defined start and end, rather than persistent services exposing intra-cluster communication endpoints. - __Kubernetes Federation__: We do not aim to support Kubernetes Federation, which involves managing multiple Kubernetes clusters as a single entity. Our focus is on enabling Kubernetes pods to execute on remote resources, not on federating all kind of resources on multiple clusters. @@ -47,7 +93,7 @@ Our solution is designed to target a wide range of providers with container exec ## Deployment scenarios -### Service remote edge node +### Edge-node In this scenario, the Virtual Kubelet communicates with remote services deployed on a dedicate edge node exposing authenticated interLink APIs and its associated plugin. This setup is ideal for scenarios where edge computing resources are utilized for controlled communication b/w the Kubernetes cluster and the remote resources. @@ -59,7 +105,7 @@ In this scenario, the Virtual Kubelet communicates with remote services deployed }} /> -### In-cluster mode +### In-cluster This scenario involves deploying a Virtual Kubelet along with the interLink API server and the plugin to interact with a remote API. This setup allows Kubernetes pods to be executed on remote resources while all other components sits inside the Kubernetes cluster. @@ -71,7 +117,7 @@ This scenario involves deploying a Virtual Kubelet along with the interLink API }} /> -### Tunneled mode +### Tunneled This deployment involves the Virtual Kubelet connecting to a remote interLink API server and its plugin through a secure tunnel. This setup ensures secure communication between the Kubernetes cluster and the remote resources, making it suitable for environments with strict security requirements or to host services on a multi user host like a login node. From 015a28653d900448eba0b596b656ef2cce1dd227 Mon Sep 17 00:00:00 2001 From: Carlos Brandt Date: Fri, 31 Jan 2025 15:01:52 +0100 Subject: [PATCH 5/8] Add short descriptions at in cookbook to show in category cards --- docs/docs/cookbook/1-edge.mdx | 3 +-- docs/docs/cookbook/2-incluster.mdx | 2 ++ docs/docs/cookbook/3-tunneled.mdx | 2 +- 3 files changed, 4 insertions(+), 3 deletions(-) diff --git a/docs/docs/cookbook/1-edge.mdx b/docs/docs/cookbook/1-edge.mdx index 936e2755..8480d1aa 100644 --- a/docs/docs/cookbook/1-edge.mdx +++ b/docs/docs/cookbook/1-edge.mdx @@ -9,8 +9,7 @@ import useBaseUrl from '@docusaurus/useBaseUrl'; # Edge node deployment -Deploy interLink API server on an edge node, dispatching jobs on a remote system -according to an interLink plugin. +Deploy interLink on an edge node, outside the local K8S cluster. Date: Fri, 28 Feb 2025 14:00:31 +0100 Subject: [PATCH 6/8] Update docs intro --- docs/docs/intro.mdx | 42 ++++++++++++++++++++++++++++-------------- 1 file changed, 28 insertions(+), 14 deletions(-) diff --git a/docs/docs/intro.mdx b/docs/docs/intro.mdx index 4d2522e8..2f18c644 100644 --- a/docs/docs/intro.mdx +++ b/docs/docs/intro.mdx @@ -22,27 +22,41 @@ interLink is in early development phase, thus subject to breaking changes with n ::: -## Overview +InterLink is a Kubernetes plugin that enables the offloading of containers to +remote systems. + +It is designed to extend the capabilities of Kubernetes clusters by allowing +pods to run on external resources, such as high-performance computing (HPC) systems, +remote Kubernetes clusters, or serverless computing services. + +InterLink provides a flexible and scalable way to distribute workloads across +different environments, optimizing resource utilization and performance. -We are running a Kubernetes cluster that we are going to consider "local". -And we want to offload some of the containers to other (remote) systems -- -another K8S cluster, or an HPC cluster. -The containers being offloaded are batch (or "job") containers -- with a pre-defined -lifecycle, non-interactive containers (see [Targets](#targets)). -The dispatching to the other (remote) system is done through a combination of -[Virtual Kubelets](https://virtual-kubelet.io/) and interLink' API and -plugins. -Plugins will define the how the containers will run on the remote system +## Overview + +InterLink necessary needs a Kubernetes cluster to run on, this cluster will be +considered the "local" cluster. +The "remote" system can be another Kubernetes cluster, an HPC system, or a +serverless computing service. +The remote system will be running a plugin that will define how the containers +will run on the remote system. + +The containers being offloaded are batch (or "job") containers with a predefined +lifecycle and are non-interactive (see [Targets](#targets)). +Dispatching to the remote system is achieved through a combination of +[Virtual Kubelets](https://virtual-kubelet.io/), interLink's API, and plugins. +These plugins define how the containers will run on the remote system (see [Target providers](#target-providers)). -InterLink API and the plugin can be arranged in three different ways across the local cluster -and the remote system: +The InterLink API and plugins can be deployed in three different configurations +across the local cluster and the remote system: -- both deployed remote (**[Edge-node](#edge-node)**) -- both deployed local (**[In-cluster](#in-cluster)**) +- Both deployed remotely (**[Edge-node](#edge-node)**) +- Both deployed locally (**[In-cluster](#in-cluster)**) - API local, plugin remote (**[Tunneled](#tunneled)**) + ``` +---------------------------+ +----------------------------+ | Virtual Node | | Pod Containers Runtime | From 1547a9bf2c37c8c2fde96de1c06dbcfd4b26b2b9 Mon Sep 17 00:00:00 2001 From: Carlos Brandt Date: Tue, 13 May 2025 18:08:45 +0200 Subject: [PATCH 7/8] [wip] update edge-node cookbook doc --- .DS_Store | Bin 10244 -> 0 bytes docs/docs/cookbook/1-edge.mdx | 194 +++++++++++++++++++++++++++++++++- 2 files changed, 193 insertions(+), 1 deletion(-) delete mode 100644 .DS_Store diff --git a/.DS_Store b/.DS_Store deleted file mode 100644 index dc18cf8dc6f26fdac6d14e982f95328c238357f8..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 10244 zcmeHMO=uHA6n>ke?Y1gJ)PkV6Ac8+Zo5Y`rRb!3dK`%yB&{DHa+J+{(p-GJpia|VR zRZtMcKlJ3mi=wC~2wqfr5W#~tu?O*{ClL>dzWGV=Cff*J#NrI>zTJ86&G*f>v$L5A z02sYvsW5;A045&H24~}Gn!^0-@D!h{*@)J_e8>)k69aB8?}^kES^=$qRzNGD70?R& zD+=H@n-z1Gu==bO&;{2TGGa(tq6-A zEQUx$D^d-)#4toU)>|?zCT&Gnkq(Ajd>9hhkSi2}qhtFP4+kSAtUhZ6v;tlQ*xEe+ zt6>N-P^9bk;Gv%H)R%MPaXahUSZ8kkcH-lzmKWPpf9kjX{BV75VD9^8m6?&7S2ie_T*Uj41>!iIelP>iK z=WlnNVlv~TM|nafmBx@m`!i0;E%mx%PO89j6T)G(o9+F<&T=^%30dLjc*rVuM>mA5 zb>a2n52t+jX@EK6zIHx^SF$gIsltGb+*qirqB3`MPu{P4 zsJw9XbKixZe7MOxDy!E^)^&!khOzMBolsO z&bf%Fil1Uv0&gaFi5IFNrv6$ti&1#+-z+$}2_6M9j358?VZ5+hjzrgntZ0-E{%QyCOT=@jKGXRrE{Fl|$NrDNkc#^C7NSZI#k_aEHppwmKr1kl z6z~hO7Tf>d(ft4anbalCRV$zs_(KJZKs*ue#iZ&6N@G6TYq#(?j0Y<$t|BZ?!4nH% s^?W-XZ`gMHa#_chW%4>EZADm~j(I#C?O*ycK(GJF9;sgc|BvhcU%l|i$^ZZW diff --git a/docs/docs/cookbook/1-edge.mdx b/docs/docs/cookbook/1-edge.mdx index 8f8924df..5ee9889b 100644 --- a/docs/docs/cookbook/1-edge.mdx +++ b/docs/docs/cookbook/1-edge.mdx @@ -19,7 +19,199 @@ Deploy interLink on an edge node, outside the local K8S cluster. }} /> -## Install interLink +## Installation overview + +In this scenario, we need to setup 5 components: +- interLink OIDC/OAuth proxy +- interLink Edge components + - API server + - Plugin service +- interLink Kubernetes components + - Virtual node + - Pod job + +To simplify the installation, interLink provides an _installer_ script that +will generate all the necessary configuration files to +- deploy the interLink API server (remote/edge component) +- deploy the interLink virtual node (local/kubernetes component) + +The installer script will need some information to generate the configuration files: +- the public IP of the edge node +- the port where the interLink API server will be listening +- the OIDC/OAuth provider information (client id, client secret, token url, etc.) + + +## 1. Setup OIDC/OAuth server + +Different Identity Providers (IdP) are supported. +The most common ones are documented in the [Guides](../guides) section +([GitHub](../guides/deploy-interlink#create-an-oauth-github-app), +[EGI checkin](../guides/04-oidc-IAM.md#egi-check-in), +[INFN IAM](../guides/oidc-IAM#indigo-iam)). + +From the IdP provider, we'll need the following information: +- **Client ID**: the client id of the OIDC/OAuth application +- **Client Secret**: the client secret of the OIDC/OAuth application +- **Token URL**: the token url of the OIDC/OAuth application +- **Device Code URL**: the device code url of the OIDC/OAuth application +- **Scopes**: the scopes of the OIDC/OAuth application + +Write them down (in particular, the Client Secret), we'll need them in the next +step. + +Example of the information needed for the GitHub OIDC/OAuth application: + +``` +Client ID: "1234567890abcdef1234" +Client Secret: "1234567890abcdef1234" +Token URL: "https://github.com/login/oauth/access_token" +Device Code URL: "https://github.com/login/device/code" +Scopes: "read:user" +``` + +## 2. Run interLink-installer + +The interLink installer is a script that will generate two configuration files: +- `interlink-remote.sh`: the script that will install and start the interLink API server +- `values.yaml`: the configuration file for deploying the interLink virtual node + +As input for the installer, we will fill-in a configuration file with the +information we collected in the previous step. + +:::note +You can run the _installer_ script on any machine, because +the script will not really _install_ any component, it will just +generate the configuration files needed to install the components. We will Then +copy those files to their corresponding locations (edge and k8s hosts). +::: + +### Download interLink-installer + +Choose the +[latest release](https://github.com/interlink-hq/interLink/releases) +of the installer according to the OS/architecture of the machine you are +running the installer on: + +Let's create a directory where we are going to download and run the installer: +```bash +export INTERLINK_WD=$HOME/tmp/interlink +mkdir -p $INTERLINK_WD +``` + +```bash +cd $INTERLINK_WD +export OSARCH=$(uname -s)_$(uname -m) +export VERSION=$(curl -s https://api.github.com/repos/interlink-hq/interlink/releases/latest | jq -r .name) +wget -O interlink-installer https://github.com/interlink-hq/interLink/releases/download/$VERSION/interlink-installer_$OSARCH +chmod +x interlink-installer +``` + +### Create configuration file + +Create a template configuration with the init option: + +```bash +./interlink-installer --init --config installer.yaml +``` + +This will generate a configuration file called `installer.yaml` in the current +directory. The file will look like this (comments added for clarity): + +```yaml title="installer.yaml" +interlink_ip: PUBLIC_IP_HERE # IP of the edge node +interlink_port: API_PORT_HERE # Port of the interLink API server +interlink_version: 0.4.1 # Version of the interLink API server +kubelet_node_name: my-vk-node # Name of the virtual node in the k8s cluster +kubernetes_namespace: interlink # Namespace where the virtual node will be deployed +node_limits: # Limits for the virtual node + cpu: "10" # CPU limit (in cores) + memory: "256" # Memory limit (in MB) + pods: "10" # Number of pods limit +oauth: # OIDC/OAuth provider information + provider: oidc + grant_type: "" + issuer: https://my_oidc_idp.com/ + group_claim: "" + scopes: + - openid + - email + - offline_access + - profile + github_user: "" + token_url: https://my_oidc_idp.com/token + device_code_url: https://my_oidc_idp/auth/device + client_id: OIDC_CLIENT_ID_HERE + client_secret: OIDC_CLIENT_SECRET_HERE +insecure_http: true +``` + +### Fill the configuration file + +Fill and adjust the configuration file with the information we collected in the +previous step, about the OIDC/OAuth provider and the IP/Port of the edge node. +As well, adjust the node limits according to your needs. + +:::note + +The `interlink_version` is the version of the interLink API server that you +want to install. You can find the latest version in the +[releases page](https://github.com/interlink-hq/interLink/releases), or +directly querying the GitHub API: + +```bash +curl -s https://api.github.com/repos/interlink-hq/interlink/releases/latest | jq -r .name +``` + +::: + +### Generate installer outputs + +Run `interlink-installer` (without `--init` option) to generate the (final) files. + +The installer will interrogate the OIDC/OAuth provider to get the necessary +information (regarding auth/tokens). + +```bash +./interlink-installer --config installer.yaml --output-dir ./installer-output/ +``` + +:::note +This is an interactive process if you are using an "authorization_code" flow. +Follow the instructions on the screen. On Github, this means entering a +code in the browser (at `device_code_url`); you should get a successfully +"Congratulations, you're all set!" message on your browser. +::: + +After a couple of seconds, the installer script should answer with a message like: + +``` +=== Deployment file written at: ./installer-output//values.yaml === + + To deploy the virtual kubelet run: + helm --debug upgrade --install --create-namespace -n interlink my-vk-node oci://ghcr.io/interlink-hq/interlink-helm-chart/interlink --values ./installer-output//values.yaml + + +=== Installation script for remote interLink APIs stored at: ./installer-output//interlink-remote.sh === + + Please execute the script on the remote server: + + "./interlink-remote.sh install" followed by "interlink-remote.sh start" +``` + +Take note of those instructions, as they will be useful in the next steps. +Not now, but later. + + +### Move files to the right place + +At this point, we have two files (inside our `installer-output/` folder): +- `interlink-remote.sh`: the script that will install and start the interLink API server +- `values.yaml`: the configuration file for deploying the interLink virtual node + +We need to copy the `interlink-remote.sh` script to the edge node (the one +where we want to run the interLink API server) and the `values.yaml` file to +the Kubernetes cluster (where we want to run the interLink virtual node). + ### Deploy Remote components From 029dcbe0f10d6548da3fb9eb381e4a1cfcb19dcd Mon Sep 17 00:00:00 2001 From: Carlos Brandt Date: Fri, 16 May 2025 14:08:30 +0200 Subject: [PATCH 8/8] [docs] revised edge cookbook --- docs/docs/cookbook/1-edge.mdx | 364 +++++++++++++++++++--------------- 1 file changed, 208 insertions(+), 156 deletions(-) diff --git a/docs/docs/cookbook/1-edge.mdx b/docs/docs/cookbook/1-edge.mdx index 5ee9889b..b6ae31d9 100644 --- a/docs/docs/cookbook/1-edge.mdx +++ b/docs/docs/cookbook/1-edge.mdx @@ -22,7 +22,7 @@ Deploy interLink on an edge node, outside the local K8S cluster. ## Installation overview In this scenario, we need to setup 5 components: -- interLink OIDC/OAuth proxy +- OIDC/OAuth proxy - interLink Edge components - API server - Plugin service @@ -41,9 +41,9 @@ The installer script will need some information to generate the configuration fi - the OIDC/OAuth provider information (client id, client secret, token url, etc.) -## 1. Setup OIDC/OAuth server +## 1. Setup OIDC/OAuth server -Different Identity Providers (IdP) are supported. +Different Identity Providers (IdP) are supported. The most common ones are documented in the [Guides](../guides) section ([GitHub](../guides/deploy-interlink#create-an-oauth-github-app), [EGI checkin](../guides/04-oidc-IAM.md#egi-check-in), @@ -67,16 +67,16 @@ Client Secret: "1234567890abcdef1234" Token URL: "https://github.com/login/oauth/access_token" Device Code URL: "https://github.com/login/device/code" Scopes: "read:user" -``` +``` ## 2. Run interLink-installer The interLink installer is a script that will generate two configuration files: -- `interlink-remote.sh`: the script that will install and start the interLink API server -- `values.yaml`: the configuration file for deploying the interLink virtual node +- `interlink-remote.sh`: the script to install and start the interLink API server; +- `values.yaml`: helm _Values_ file for deploying the interLink virtual node. As input for the installer, we will fill-in a configuration file with the -information we collected in the previous step. +information we collected in the previous step. :::note You can run the _installer_ script on any machine, because @@ -87,13 +87,13 @@ copy those files to their corresponding locations (edge and k8s hosts). ### Download interLink-installer -Choose the -[latest release](https://github.com/interlink-hq/interLink/releases) +Choose the +[latest release](https://github.com/interlink-hq/interLink/releases) of the installer according to the OS/architecture of the machine you are running the installer on: Let's create a directory where we are going to download and run the installer: -```bash +```bash export INTERLINK_WD=$HOME/tmp/interlink mkdir -p $INTERLINK_WD ``` @@ -155,7 +155,7 @@ As well, adjust the node limits according to your needs. The `interlink_version` is the version of the interLink API server that you want to install. You can find the latest version in the -[releases page](https://github.com/interlink-hq/interLink/releases), or +[releases page](https://github.com/interlink-hq/interLink/releases), or directly querying the GitHub API: ```bash @@ -169,20 +169,16 @@ curl -s https://api.github.com/repos/interlink-hq/interlink/releases/latest | j Run `interlink-installer` (without `--init` option) to generate the (final) files. The installer will interrogate the OIDC/OAuth provider to get the necessary -information (regarding auth/tokens). +information (regarding auth/tokens). This will be an interactive process, +follow the instructions on the screen. ```bash ./interlink-installer --config installer.yaml --output-dir ./installer-output/ ``` -:::note -This is an interactive process if you are using an "authorization_code" flow. -Follow the instructions on the screen. On Github, this means entering a -code in the browser (at `device_code_url`); you should get a successfully -"Congratulations, you're all set!" message on your browser. -::: - -After a couple of seconds, the installer script should answer with a message like: +If everything goes well, you'll see a successful (client authentication) +message in your browser, and (after a couple of seconds) a message like this +on your terminal: ``` === Deployment file written at: ./installer-output//values.yaml === @@ -198,151 +194,169 @@ After a couple of seconds, the installer script should answer with a message lik "./interlink-remote.sh install" followed by "interlink-remote.sh start" ``` -Take note of those instructions, as they will be useful in the next steps. +> Take note of those instructions, as they will be useful in the next steps. Not now, but later. +#### Checkpoint + +At this point, we have the following files in our working directory: +```bash +% tree +. +├── installer-output +│   ├── interlink-remote.sh +│   └── values.yaml +├── installer.yaml +└── interlink-installer + +2 directories, 4 files +``` ### Move files to the right place -At this point, we have two files (inside our `installer-output/` folder): +We have two files (inside our `installer-output/` folder): - `interlink-remote.sh`: the script that will install and start the interLink API server - `values.yaml`: the configuration file for deploying the interLink virtual node -We need to copy the `interlink-remote.sh` script to the edge node (the one -where we want to run the interLink API server) and the `values.yaml` file to -the Kubernetes cluster (where we want to run the interLink virtual node). +We need to copy/move those files: +- Copy the `interlink-remote.sh` script to the edge node; + * E.g., move it to `~/any/path/interlink-remote.sh` + in the `edge-node` machine. +- Copy the `values.yaml` file to the Kubernetes cluster + * E.g., move it to `~/some/path/values.yaml` in the + `k8s-cluster` machine. -### Deploy Remote components +## 3. Setup Edge Components -In general, starting from the deployment of the remote components is adviced. -Since the kubernetes virtual node won't reach the `Ready` status until all the +In general, starting from the deployment of the remote components is advised, +since the kubernetes virtual node won't reach the _ready_ status until all the stack is successfully deployed. -#### Interlink API server - -**For this deployment mode the remote host has to allow the kubernetes cluster -to connect to the Oauth2 proxy service port (30443 if you use the automatic -script for installation)** +### Interlink API server -You first need to initialize an OIDC client with you Identity Provider (IdP). +> Here is the point where connection to OAuth proxy service takes place +> through port `interlink_port` (in `installer.yaml`) we set before. +> Make sure this port (`30443` by default) is open on the edge node. -Since any OIDC provider working with -[OAuth2 Proxy](https://oauth2-proxy.github.io/oauth2-proxy/) tool will do the -work, we are going to put the configuration for a generic OIDC identity provider -in this cookbook. Nevertheless you can find more detailed on dedicated pages -with instructions ready for -[GitHub](../guides/deploy-interlink#create-an-oauth-github-app), -[EGI checkin](../guides/04-oidc-IAM.md#egi-check-in), -[INFN IAM](../guides/oidc-IAM#indigo-iam). +The script `installer-script.sh` created be the previous step should be +in your edge-node by now. +That script contains all information necessary to install and configure interLink +API server and the OAuth client. -Then download the -[latest release](https://github.com/interlink-hq/interLink/releases) of the -interLink installer: +> Make sure the script is executable, `chmod +x interlink-installer.sh`. -```bash -mkdir -p $HOME/.interlink -export VERSION=$(curl -s https://api.github.com/repos/interlink-hq/interlink/releases/latest | jq -r .name) -wget -O $HOME/interlink-installer https://github.com/interlink-hq/interLink/releases/download/$VERSION/interlink-installer_Linux_x86_64 -chmod +x $HOME/.interlink/interlink-installer +```sh +./interlink-installer.sh install ``` -Create a template configuration with the init option: +This will download and place the necessary software under `~/.interlink`: -```bash -mkdir -p $HOME/.interlink/logs -mkdir -p $HOME/.interlink/bin -mkdir -p $HOME/.interlink/config -$HOME/.interlink/interlink-installer --init --config $HOME/.interlink/installer.yaml -``` +```text title='$ tree ~/.interlink' +.interlink/ +├── bin +│   ├── interlink +│   └── oauth2-proxy +├── config +│   ├── InterLinkConfig.yaml +│   ├── tls.crt +│   └── tls.key +└── logs -The configuration file should be filled as followed. This is the case where the -`my-node` will contact an edge service that will be listening on `PUBLIC_IP` and -`API_PORT` authenticating requests from an OIDC provider -`https://my_oidc_idp.com`: - -```bash title="$HOME/.interlink/installer.yaml" -interlink_ip: PUBLIC_IP -interlink_port: API_PORT -interlink_version: X.X.X -kubelet_node_name: my-node -kubernetes_namespace: interlink -node_limits: - cpu: "1000" - # MEMORY in GB - memory: 25600 - pods: "100" -oauth: - provider: oidc - issuer: https://my_oidc_idp.com/ - scopes: - - "openid" - - "email" - - "offline_access" - - "profile" - audience: interlink - grant_type: authorization_code - group_claim: groups - group: "my_vk_allowed_group" - token_url: "https://my_oidc_idp.com/token" - device_code_url: "https://my_oidc_idp/auth/device" - client_id: "oidc-client-xx" - client_secret: "xxxxxx" -insecure_http: true +4 directories, 5 files ``` -:::note +You will not touch those files, everything is managed by the "installer" script. -Please fill interlink_version with the desired version. -In alternative get the latest with: +For instance, now you can run (ie, _start_) the API and OAuth services: -```bash -curl -s https://api.github.com/repos/interlink-hq/interlink/releases/latest | jq -r .name +```sh +./interlink-installer start ``` -::: - -Now you are ready to start the OIDC authentication flow to generate all your -manifests and configuration files for the interLink components. To do so, just -execute the installer: - -```bash -$HOME/.interlink/interlink-installer --config $HOME/.interlink/installer.yaml --output-dir $HOME/.interlink/manifests/ +It runs silently. Notice, though, that `~/.interlink` has new files: + +```sh title='$ tree -a .interlink/' +.interlink/ +//highlight-next-line +├── .interlink.sock +├── bin +│   ├── interlink +│   └── oauth2-proxy +├── config +│   ├── InterLinkConfig.yaml +│   ├── tls.crt +│   └── tls.key +//highlight-next-line +├── interlink.pid +├── logs +//highlight-start +│   ├── interlink.log +│   └── oauth2-proxy.log +└── oauth2-proxy.pid +//highlight-end + +4 directories, 10 files ``` -Install Oauth2-Proxy and interLink API server services and configurations with: +You can check a few things, although not much is happening yet. +For starter, you cacn check the running processes, `interlink` and `oauth2-proxy` +should be there: -```bash -chmod +x $HOME/.interlink/manifests/interlink-remote.sh -$HOME/.interlink/manifests/interlink-remote.sh install +```sh title='$ ps x' +(...) +505865 pts/0 Sl 0:00 ~/.interlink/bin/oauth2-proxy --client-id ... +505866 pts/0 Sl 0:00 ~/.interlink/bin/interlink +(...) ``` -Then start the services with: +You can also check the logs: -```bash -$HOME/.interlink/manifests/interlink-remote.sh start +```sh title='cat ~/.interlink/logs/interlink.log' +time="2025-05-15T12:23:00Z" level=info msg="Loading InterLink config from /home/slurm/.interlink/config/InterLinkConfig.yaml" +time="2025-05-15T12:23:00Z" level=info msg="{unix:///home/slurm/.interlink/.interlink.sock 0 unix:///home/slurm/.interlink/.plugin.sock 0 false false ~/.interlink}" +time="2025-05-15T12:23:00Z" level=info msg="interLink version: 0.4.1" +time="2025-05-15T12:23:00Z" level=info msg="&{0xc0001ccd80 /home/slurm/.interlink/.interlink.sock true {{} {{} 0} {{} {0 0}}}}" ``` -With `stop` command you can stop the service. By default logs are store in -`~/.interlink/logs`, checkout there for any error before moving to the next -step. - -:::note - -**N.B.** you can look the oauth2_proxy configuration parameters looking directly -into the `interlink-remote.sh` script. - -::: - -:::warning +And if you ping the API server: + +```sh title='$ curl -v --unix-socket ${HOME}/.interlink/.interlink.sock http://unix/pinglink' +* Trying /home/slurm/.interlink/.interlink.sock:0... +* Connected to unix (/home/slurm/.interlink/.interlink.sock) port 80 +> GET /pinglink HTTP/1.1 +> Host: unix +> User-Agent: curl/8.5.0 +> Accept: */* +> +< HTTP/1.1 500 Internal Server Error +< Date: Thu, 15 May 2025 13:11:19 GMT +< Content-Length: 3 +< Content-Type: text/plain; charset=utf-8 +< +* Connection #0 to host unix left intact +503 +``` -**N.B.** logs (expecially if in verbose mode) can become pretty huge, consider -to implement your favorite rotation routine for all the logs in -`~/.interlink/logs/`. +You will get an error. And that's fine, because there is no _plugin_ yet installed. +We can go back and check the `interlink.log` to see the internal reactions +of this ping, you should see something like the following lines appended to log: + +```text +time="2025-05-15T12:37:30Z" level=info msg="InterLink: received Ping call" +time="2025-05-15T12:37:30Z" level=info msg="InterLink: forwarding GetStatus call to sidecar" +time="2025-05-15T12:37:30Z" level=error msg="HTTP InterLink session + Request-69f0d582-c310-49b3-bdd1-45208de3406a: error doing DoReq() of + ReqWithErrorWithSessionNumber error Get \"http://unix/status\": dial unix + //highlight-next-line + /home/slurm/.interlink/.plugin.sock: connect: no such file or directory" +2025/05/15 12:37:30 http: superfluous response.WriteHeader call from + github.com/interlink-hq/interlink/pkg/interlink/api.(*InterLinkHandler).Ping (ping.go:58) +``` -::: +Then, let's bring a plugin in. -#### Plugin service +### Interlink Plugin Select here the featured plugin you want to try: @@ -401,7 +415,7 @@ Select here the featured plugin you want to try: - Please be sure that you have a shared filesystem area with the SLURM nodes available from the edge node. In this case our `DataRootFolder` is `$HOME/.interlink/jobs` - Create a configuration file (__remember to substitute `/home/username/` with your actual home path__): - ```bash title="./interlink/manifests/plugin-config.yaml" + ```bash title="~/.interlink/manifests/plugin-config-slurm.yaml" Socket: "unix:///home/myusername/.interlink/.plugin.sock" InterlinkPort: "0" SidecarPort: "0" @@ -436,7 +450,12 @@ Select here the featured plugin you want to try: echo $! > $HOME/.interlink/plugin.pid ``` - - Check the logs in `$HOME/.interlink/logs/plugin.log`. + - Check the logs in `$HOME/.interlink/logs/plugin.log`: + ```text + time="2025-05-16T07:36:48Z" level=info msg="Loading SLURM config from /home/slurm/.interlink/manifests/plugin-config-slurm.yaml" + time="2025-05-16T07:36:48Z" level=info msg="&{0xc0003bbe00 /home/slurm/.interlink/.plugin.sock true {{} {{} 0} {{} {0 0}}}}" + ``` + - To kill and restart the process is enough: ```bash @@ -461,61 +480,94 @@ Select here the featured plugin you want to try: -:::tip - -Yes, if you will, you can also manage all interLink processes through `systemd`. -Reach out to receive guidance on how we do it in production. You can find an -example in the interlink repo `./systemd` folder. - -::: - ##### 3rd-party plugins There are more 3rd-party plugins developed that you can get inspired by or even use out of the box. You can find some ref in the [quick start section](../guides/deploy-interlink#attach-your-favorite-plugin-or-develop-one) -#### Test interLink stack health -interLink comes with a call that can be used to monitor the overall status of -both interlink server and plugins, at once. +### Test interLink stack health + +Let's check again, now with the plugin, how (interlink) API server reacts: ``` curl -v --unix-socket ${HOME}/.interlink/.interlink.sock http://unix/pinglink ``` -This call will return the status of the system and its readiness to submit jobs. +Now that we have a plugin, this call should return a successful request, +with an empty body: + +``` +* Trying /home/slurm/.interlink/.interlink.sock:0... +* Connected to unix (/home/slurm/.interlink/.interlink.sock) port 80 +> GET /pinglink HTTP/1.1 +> Host: unix +> User-Agent: curl/8.5.0 +> Accept: */* +> +< HTTP/1.1 200 OK +< Date: Fri, 16 May 2025 07:38:44 GMT +< Transfer-Encoding: chunked +< +* Connection #0 to host unix left intact +null +``` + -### Deploy Kubernetes components +:::tip + +Yes, if you will, you can also manage all interLink processes through `systemd`. +Reach out to receive guidance on how we do it in production. You can find an +example in the interlink repo `./systemd` folder. + +::: + + +## 4. Setup Kubernetes components + +### Deploy Virtual-Node The deployment of the Kubernetes components are managed by the official [HELM chart](https://github.com/interlink-hq/interlink-helm-chart). Depending on the scenario you selected, there might be additional operations to be done. You can now install the helm chart with the preconfigured (by the installer -script) helm values in `./interlink/manifests/values.yaml` - -```bash - export INTERLINK_CHART_VERSION="X.X.X" - helm upgrade --install \ - --create-namespace \ - -n interlink \ - my-node \ - oci://ghcr.io/interlink-hq/interlink-helm-chart/interlink \ - --version $INTERLINK_CHART_VERSION \ - --values ./.interlink/manifests/values.yaml +script) helm values (e.g., at `/some/path/manifests/values.yaml`): + +```sh +helm --debug upgrade --install \ + --create-namespace -n interlink \ + my-vk-node \ + oci://ghcr.io/interlink-hq/interlink-helm-chart/interlink \ + --values values.yaml ``` +This will install the latest interlink's helm chart (deploying the virtual node). + :::warning -Remember to pick the -[version of the chart](https://github.com/interlink-hq/interlink-helm-chart/blob/main/interlink/Chart.yaml#L18) -and put it into the `INTERLINK_CHART_VERSION` env var above. +If you want to specify an specific [version of the chart](https://github.com/interlink-hq/interlink-helm-chart/blob/main/interlink/Chart.yaml#L18) +declare it in the `INTERLINK_CHART_VERSION` as follows: + +```sh +//highlight-next-line +export INTERLINK_CHART_VERSION="X.X.X" +helm upgrade --install \ + --create-namespace -n interlink \ + my-vk-node \ + oci://ghcr.io/interlink-hq/interlink-helm-chart/interlink \ + --values values.yaml \ + //highlight-next-line + --version $INTERLINK_CHART_VERSION +``` ::: Whenever you see the node ready, you are good to go! +### Deploy a Job-Pod + :::note You can find a demo pod to test your setup