From 588a65473696fe5d22fe85efc3683965f7e78f5b Mon Sep 17 00:00:00 2001 From: maartenvandenbrande Date: Sun, 4 Jan 2026 15:01:14 +0100 Subject: [PATCH 1/7] Add initial integration tests --- .github/workflows/integration-tests.yml | 197 ++++++++ README.md | 202 ++++++-- integration-test/go.mod | 50 ++ integration-test/go.sum | 156 +++++++ integration-test/kind-test-config.yaml | 5 + integration-test/main_test.go | 39 ++ integration-test/server_metadata_test.go | 512 +++++++++++++++++++++ integration-test/utils/test_environment.go | 271 +++++++++++ k8s/kind-config.yaml | 4 +- makefile | 164 +++++-- 10 files changed, 1527 insertions(+), 73 deletions(-) create mode 100644 .github/workflows/integration-tests.yml create mode 100644 integration-test/go.mod create mode 100644 integration-test/go.sum create mode 100644 integration-test/kind-test-config.yaml create mode 100644 integration-test/main_test.go create mode 100644 integration-test/server_metadata_test.go create mode 100644 integration-test/utils/test_environment.go diff --git a/.github/workflows/integration-tests.yml b/.github/workflows/integration-tests.yml new file mode 100644 index 0000000..5c42726 --- /dev/null +++ b/.github/workflows/integration-tests.yml @@ -0,0 +1,197 @@ +name: Integration Tests + +on: + push: + pull_request: + workflow_dispatch: + +jobs: + integration-tests: + name: Integration Tests - ${{ matrix.os }} + runs-on: ${{ matrix.os }} + strategy: + fail-fast: false + matrix: + os: [ubuntu-latest, windows-latest] + + steps: + - name: Checkout code + uses: actions/checkout@v4 + + - name: Set up Go + uses: actions/setup-go@v5 + with: + go-version: '1.21' + cache-dependency-path: integration-test/go.sum + + - name: Set up Docker (Linux) + if: runner.os == 'Linux' + uses: docker/setup-buildx-action@v3 + + - name: Set up Docker (Windows) + if: runner.os == 'Windows' + run: | + # Docker Desktop should already be available on windows-latest + docker version + + - name: Install Kind (Linux) + if: runner.os == 'Linux' + run: | + curl -Lo ./kind https://kind.sigs.k8s.io/dl/v0.20.0/kind-linux-amd64 + chmod +x ./kind + sudo mv ./kind /usr/local/bin/kind + + - name: Install Kind (Windows) + if: runner.os == 'Windows' + run: | + curl.exe -Lo kind-windows-amd64.exe https://kind.sigs.k8s.io/dl/v0.20.0/kind-windows-amd64 + Move-Item .\kind-windows-amd64.exe C:\Windows\System32\kind.exe + shell: powershell + + - name: Install kubectl (Linux) + if: runner.os == 'Linux' + run: | + curl -LO "https://dl.k8s.io/release/$(curl -L -s https://dl.k8s.io/release/stable.txt)/bin/linux/amd64/kubectl" + chmod +x kubectl + sudo mv kubectl /usr/local/bin/ + + - name: Install kubectl (Windows) + if: runner.os == 'Windows' + run: | + curl.exe -LO "https://dl.k8s.io/release/v1.28.0/bin/windows/amd64/kubectl.exe" + Move-Item .\kubectl.exe C:\Windows\System32\kubectl.exe + shell: powershell + + - name: Install Helm (Linux) + if: runner.os == 'Linux' + run: | + curl https://raw.githubusercontent.com/helm/helm/main/scripts/get-helm-3 | bash + + - name: Install Helm (Windows) + if: runner.os == 'Windows' + run: | + choco install kubernetes-helm -y + shell: powershell + + - name: Verify installations + run: | + docker version + kind version + kubectl version --client + helm version + shell: bash + + - name: Build Docker images (Linux) + if: runner.os == 'Linux' + run: | + make containers-build + timeout-minutes: 30 + + - name: Build Docker images (Windows) + if: runner.os == 'Windows' + run: | + # Build containers sequentially on Windows + Get-ChildItem -Path containers -Directory | ForEach-Object { + $name = $_.Name + Write-Host "Building $name..." + docker build "containers/$name" -t "${name}:latest" + if ($LASTEXITCODE -ne 0) { exit $LASTEXITCODE } + } + shell: powershell + timeout-minutes: 30 + + - name: Create Kind cluster + run: | + kind create cluster --name aggregator-test --config integration-test/kind-test-config.yaml --wait 120s + timeout-minutes: 10 + + - name: Load images into Kind (Linux) + if: runner.os == 'Linux' + run: | + for dir in containers/*; do + if [ -d "$dir" ]; then + name=$(basename "$dir") + echo "Loading $name into kind..." + kind load docker-image "$name:latest" --name aggregator-test + fi + done + shell: bash + timeout-minutes: 20 + + - name: Load images into Kind (Windows) + if: runner.os == 'Windows' + run: | + Get-ChildItem -Path containers -Directory | ForEach-Object { + $name = $_.Name + Write-Host "Loading $name into kind..." + kind load docker-image "${name}:latest" --name aggregator-test + } + shell: powershell + timeout-minutes: 20 + + - name: Wait for cluster to be ready + run: | + kubectl config use-context kind-aggregator-test + kubectl wait --for=condition=Ready nodes --all --timeout=120s + shell: bash + + - name: Run integration tests + run: | + cd integration-test + go test -v -timeout 30m ./... + shell: bash + timeout-minutes: 35 + + - name: Collect logs on failure (Linux) + if: failure() && runner.os == 'Linux' + run: | + echo "=== Cluster Info ===" + kubectl cluster-info dump --output-directory=./cluster-logs --namespaces aggregator-app,aggregator-ops 2>&1 || true + echo "=== Docker Containers ===" + docker ps -a + echo "=== Kind Logs ===" + kind export logs ./kind-logs --name aggregator-test || true + + - name: Collect logs on failure (Windows) + if: failure() && runner.os == 'Windows' + run: | + Write-Host "=== Cluster Info ===" + kubectl cluster-info dump --output-directory=./cluster-logs --namespaces aggregator-app,aggregator-ops + Write-Host "=== Docker Containers ===" + docker ps -a + Write-Host "=== Kind Logs ===" + kind export logs ./kind-logs --name aggregator-test + shell: powershell + continue-on-error: true + + - name: Upload logs on failure + if: failure() + uses: actions/upload-artifact@v4 + with: + name: test-logs-${{ matrix.os }} + path: | + cluster-logs/ + kind-logs/ + retention-days: 7 + + - name: Cleanup + if: always() + run: | + kind delete cluster --name aggregator-test || true + shell: bash + + notify: + name: Notify Results + needs: integration-tests + runs-on: ubuntu-latest + if: always() + steps: + - name: Check test results + run: | + if [ "${{ needs.integration-tests.result }}" == "success" ]; then + echo "โœ… All integration tests passed!" + else + echo "โŒ Integration tests failed" + exit 1 + fi + diff --git a/README.md b/README.md index 037b1d4..097fb37 100644 --- a/README.md +++ b/README.md @@ -1,61 +1,197 @@ # Aggregator +[![Integration Tests](https://github.com/SolidLabResearch/aggregator/actions/workflows/integration-tests.yml/badge.svg)](https://github.com/SolidLabResearch/aggregator/actions/workflows/integration-tests.yml) + An aggregator using uma: https://github.com/SolidLabResearch/user-managed-access as the authorization server. ## Requirements -This project requires a kubernetes cluster and a running uma server. -### Kubernetes Cluster -install a kubernetes cluster with minikube: +- Docker +- Kind (Kubernetes in Docker) +- kubectl +- Helm +- Make + +## Quick Start + ```bash -curl -LO https://storage.googleapis.com/minikube/releases/latest/minikube-linux-amd64 -sudo install minikube-linux-amd64 /usr/local/bin/minikube +# Full setup: Create cluster, build containers, deploy everything +make kind-init +make deploy + +# Access at http://aggregator.local ``` -when minikube is installed, initialize it: +## Setup + +### 1. Install Dependencies + +**Kind:** ```bash -make minikube-init +curl -Lo ./kind https://kind.sigs.k8s.io/dl/v0.20.0/kind-linux-amd64 +chmod +x ./kind +sudo mv ./kind /usr/local/bin/kind ``` -This will start the minikube cluster, build all the containers, and load them into the minikube cluster. -To only build or load the containers without starting the cluster, you can run: + +**kubectl:** ```bash -make containers-build # Build the containers -make containers-load # Load the containers into the minikube cluster -make containers-all # Build and load the containers +curl -LO "https://dl.k8s.io/release/$(curl -L -s https://dl.k8s.io/release/stable.txt)/bin/linux/amd64/kubectl" +chmod +x kubectl +sudo mv kubectl /usr/local/bin/ ``` -It is also possible to specify a certain container to build, load, or both by using the `name` parameter. For example, to build and load only the aggregator container, you can run: + +**Helm:** ```bash -make containers-all name=uma-proxy +curl https://raw.githubusercontent.com/helm/helm/main/scripts/get-helm-3 | bash ``` -And to start or stop the minikube cluster, you can run: + +### 2. Deploy the Aggregator + ```bash -make minikube-start -make minikube-clean +# Create Kind cluster and load containers +make kind-init + +# Deploy aggregator with Traefik +make deploy ``` -### uma Server -To install the uma server, you first need to clone the uma repository: +The aggregator is now accessible at `http://aggregator.local` + +### 3. Stop/Clean-up the Deployment + ```bash -git clone https://github.com/SolidLabResearch/user-managed-access -cd user-managed-access/packages/uma +make stop # Stop services (cluster stays alive) +make clean # Delete everything including cluster ``` -Make sure you have node.js and npm installed with a version of at least 20.0.0, and run `corepack enable`. -Then install the dependencies: + +## Makefile Commands + +### Cluster Management ```bash -yarn install +make kind-init # Create cluster, build & load containers, start cleaner +make kind-start # Create/start Kind cluster only +make kind-stop # Delete Kind cluster +make kind-dashboard # Deploy Kubernetes dashboard ``` -Finally, the uma server can be started with: + +### Container Management ```bash -yarn start +make containers-build # Build all containers (parallel) +make containers-build CONTAINER=X # Build specific container +make containers-load # Load all images into Kind +make containers-load CONTAINER=X # Load specific image +make containers-all # Build and load all +make containers-all CONTAINER=X # Build and load specific image ``` -### Run the Aggregator -To run the aggregator, you can use the following command: +### Deployment ```bash -make run +make deploy # Deploy Traefik + aggregator +make kind-deploy # Deploy aggregator only +make kind-undeploy # Remove aggregator (keep Traefik & cleaner) +make stop # Stop aggregator + Traefik (keep cluster & cleaner) ``` -### Demo -An easy way to test the aggregator is by running `node client-test create-actor.js` to create an actor. -Do make sure the uma server is running before you do this, and that it has the correct policies so you can access the correct endpoints. -After that, you can run `node client-test get-actor.js` to retrieve the info on the actor you just created and its results. +### Cleanup +```bash +make stop # Stop services (cluster stays alive) +make kind-clean # Remove all deployments (cluster stays alive) +make clean # Delete everything including cluster +make docker-clean # Clean up Docker images +``` + +### Testing +```bash +make integration-test # Run full integration test suite +``` + +### Utilities +```bash +make hosts-add # Add aggregator.local to /etc/hosts +make hosts-remove # Remove aggregator.local from /etc/hosts +make enable-wsl # Configure CoreDNS for WSL2 +``` + +## Development Workflow + +### Making Changes + +```bash +# Rebuild specific container +make containers-build CONTAINER=aggregator-server +make containers-load CONTAINER=aggregator-server + +# Restart deployment +kubectl rollout restart deployment aggregator-server -n aggregator-app + +# Or rebuild everything +make stop +make containers-all +make deploy +``` + +### Quick Iteration + +```bash +# After code changes +make stop # Stop current deployment +make containers-all # Rebuild & reload +make deploy # Redeploy +``` + +## Architecture + +- **Kind Cluster**: Local Kubernetes cluster in Docker +- **Traefik**: Ingress controller (HTTP port 80) +- **Aggregator Server**: Registration and metadata service +- **Aggregator Cleaner**: Auto-cleanup controller for actor namespaces +- **Dynamic Actors**: Created per user in separate namespaces + +## Ports + +- **Port 80**: HTTP traffic to aggregator (via Traefik) +- **Port 443**: HTTPS traffic (available but not configured) + +Access: `http://aggregator.local` + +## Integration Tests + +Automated tests run on GitHub Actions for Linux and Windows on every push and pull request. + +### Run Locally + +```bash +make integration-test +``` + +Tests use port forwarding to `localhost:8080` to avoid requiring root privileges. + +## Troubleshooting + +### Cluster Issues + +```bash +# Recreate cluster +make clean +make kind-init +make deploy +``` + +### Container Build Failures + +```bash +# Build specific container with verbose output +docker build containers/aggregator-server -t aggregator-server:latest + +# Check logs +docker logs +``` + +## Contributing + +Integration tests run automatically on all pushes and pull requests. +Ensure tests pass before merging. + +## License + +See LICENSE file for details. diff --git a/integration-test/go.mod b/integration-test/go.mod new file mode 100644 index 0000000..470201b --- /dev/null +++ b/integration-test/go.mod @@ -0,0 +1,50 @@ +module aggregator-integration-test + +go 1.21 + +require ( + github.com/stretchr/testify v1.9.0 + k8s.io/apimachinery v0.29.0 + k8s.io/client-go v0.29.0 +) + +require ( + github.com/davecgh/go-spew v1.1.1 // indirect + github.com/emicklei/go-restful/v3 v3.11.0 // indirect + github.com/go-logr/logr v1.3.0 // indirect + github.com/go-openapi/jsonpointer v0.19.6 // indirect + github.com/go-openapi/jsonreference v0.20.2 // indirect + github.com/go-openapi/swag v0.22.3 // indirect + github.com/gogo/protobuf v1.3.2 // indirect + github.com/golang/protobuf v1.5.3 // indirect + github.com/google/gnostic-models v0.6.8 // indirect + github.com/google/gofuzz v1.2.0 // indirect + github.com/google/uuid v1.3.0 // indirect + github.com/imdario/mergo v0.3.6 // indirect + github.com/josharian/intern v1.0.0 // indirect + github.com/json-iterator/go v1.1.12 // indirect + github.com/mailru/easyjson v0.7.7 // indirect + github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd // indirect + github.com/modern-go/reflect2 v1.0.2 // indirect + github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 // indirect + github.com/pmezard/go-difflib v1.0.0 // indirect + github.com/spf13/pflag v1.0.5 // indirect + golang.org/x/net v0.17.0 // indirect + golang.org/x/oauth2 v0.10.0 // indirect + golang.org/x/sys v0.13.0 // indirect + golang.org/x/term v0.13.0 // indirect + golang.org/x/text v0.13.0 // indirect + golang.org/x/time v0.3.0 // indirect + google.golang.org/appengine v1.6.7 // indirect + google.golang.org/protobuf v1.31.0 // indirect + gopkg.in/inf.v0 v0.9.1 // indirect + gopkg.in/yaml.v2 v2.4.0 // indirect + gopkg.in/yaml.v3 v3.0.1 // indirect + k8s.io/api v0.29.0 // indirect + k8s.io/klog/v2 v2.110.1 // indirect + k8s.io/kube-openapi v0.0.0-20231010175941-2dd684a91f00 // indirect + k8s.io/utils v0.0.0-20230726121419-3b25d923346b // indirect + sigs.k8s.io/json v0.0.0-20221116044647-bc3834ca7abd // indirect + sigs.k8s.io/structured-merge-diff/v4 v4.4.1 // indirect + sigs.k8s.io/yaml v1.3.0 // indirect +) diff --git a/integration-test/go.sum b/integration-test/go.sum new file mode 100644 index 0000000..254b797 --- /dev/null +++ b/integration-test/go.sum @@ -0,0 +1,156 @@ +github.com/creack/pty v1.1.9/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E= +github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= +github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/emicklei/go-restful/v3 v3.11.0 h1:rAQeMHw1c7zTmncogyy8VvRZwtkmkZ4FxERmMY4rD+g= +github.com/emicklei/go-restful/v3 v3.11.0/go.mod h1:6n3XBCmQQb25CM2LCACGz8ukIrRry+4bhvbpWn3mrbc= +github.com/go-logr/logr v1.3.0 h1:2y3SDp0ZXuc6/cjLSZ+Q3ir+QB9T/iG5yYRXqsagWSY= +github.com/go-logr/logr v1.3.0/go.mod h1:9T104GzyrTigFIr8wt5mBrctHMim0Nb2HLGrmQ40KvY= +github.com/go-openapi/jsonpointer v0.19.6 h1:eCs3fxoIi3Wh6vtgmLTOjdhSpiqphQ+DaPn38N2ZdrE= +github.com/go-openapi/jsonpointer v0.19.6/go.mod h1:osyAmYz/mB/C3I+WsTTSgw1ONzaLJoLCyoi6/zppojs= +github.com/go-openapi/jsonreference v0.20.2 h1:3sVjiK66+uXK/6oQ8xgcRKcFgQ5KXa2KvnJRumpMGbE= +github.com/go-openapi/jsonreference v0.20.2/go.mod h1:Bl1zwGIM8/wsvqjsOQLJ/SH+En5Ap4rVB5KVcIDZG2k= +github.com/go-openapi/swag v0.22.3 h1:yMBqmnQ0gyZvEb/+KzuWZOXgllrXT4SADYbvDaXHv/g= +github.com/go-openapi/swag v0.22.3/go.mod h1:UzaqsxGiab7freDnrUUra0MwWfN/q7tE4j+VcZ0yl14= +github.com/go-task/slim-sprig v0.0.0-20230315185526-52ccab3ef572 h1:tfuBGBXKqDEevZMzYi5KSi8KkcZtzBcTgAUUtapy0OI= +github.com/go-task/slim-sprig v0.0.0-20230315185526-52ccab3ef572/go.mod h1:9Pwr4B2jHnOSGXyyzV8ROjYa2ojvAY6HCGYYfMoC3Ls= +github.com/gogo/protobuf v1.3.2 h1:Ov1cvc58UF3b5XjBnZv7+opcTcQFZebYjWzi34vdm4Q= +github.com/gogo/protobuf v1.3.2/go.mod h1:P1XiOD3dCwIKUDQYPy72D8LYyHL2YPYrpS2s69NZV8Q= +github.com/golang/protobuf v1.3.1/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= +github.com/golang/protobuf v1.5.0/go.mod h1:FsONVRAS9T7sI+LIUmWTfcYkHO4aIWwzhcaSAoJOfIk= +github.com/golang/protobuf v1.5.3 h1:KhyjKVUg7Usr/dYsdSqoFveMYd5ko72D+zANwlG1mmg= +github.com/golang/protobuf v1.5.3/go.mod h1:XVQd3VNwM+JqD3oG2Ue2ip4fOMUkwXdXDdiuN0vRsmY= +github.com/google/gnostic-models v0.6.8 h1:yo/ABAfM5IMRsS1VnXjTBvUb61tFIHozhlYvRgGre9I= +github.com/google/gnostic-models v0.6.8/go.mod h1:5n7qKqH0f5wFt+aWF8CW6pZLLNOfYuF5OpfBSENuI8U= +github.com/google/go-cmp v0.5.5/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.5.9/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= +github.com/google/go-cmp v0.6.0 h1:ofyhxvXcZhMsU5ulbFiLKl/XBFqE1GSq7atu8tAmTRI= +github.com/google/go-cmp v0.6.0/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= +github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= +github.com/google/gofuzz v1.2.0 h1:xRy4A+RhZaiKjJ1bPfwQ8sedCA+YS2YcCHW6ec7JMi0= +github.com/google/gofuzz v1.2.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= +github.com/google/pprof v0.0.0-20210720184732-4bb14d4b1be1 h1:K6RDEckDVWvDI9JAJYCmNdQXq6neHJOYx3V6jnqNEec= +github.com/google/pprof v0.0.0-20210720184732-4bb14d4b1be1/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= +github.com/google/uuid v1.3.0 h1:t6JiXgmwXMjEs8VusXIJk2BXHsn+wx8BZdTaoZ5fu7I= +github.com/google/uuid v1.3.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= +github.com/imdario/mergo v0.3.6 h1:xTNEAn+kxVO7dTZGu0CegyqKZmoWFI0rF8UxjlB2d28= +github.com/imdario/mergo v0.3.6/go.mod h1:2EnlNZ0deacrJVfApfmtdGgDfMuh/nq6Ok1EcJh5FfA= +github.com/josharian/intern v1.0.0 h1:vlS4z54oSdjm0bgjRigI+G1HpF+tI+9rE5LLzOg8HmY= +github.com/josharian/intern v1.0.0/go.mod h1:5DoeVV0s6jJacbCEi61lwdGj/aVlrQvzHFFd8Hwg//Y= +github.com/json-iterator/go v1.1.12 h1:PV8peI4a0ysnczrg+LtxykD8LfKY9ML6u2jnxaEnrnM= +github.com/json-iterator/go v1.1.12/go.mod h1:e30LSqwooZae/UwlEbR2852Gd8hjQvJoHmT4TnhNGBo= +github.com/kisielk/errcheck v1.5.0/go.mod h1:pFxgyoBC7bSaBwPgfKdkLd5X25qrDl4LWUI2bnpBCr8= +github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck= +github.com/kr/pretty v0.2.1/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI= +github.com/kr/pretty v0.3.1 h1:flRD4NNwYAUpkphVc1HcthR4KEIFJ65n8Mw5qdRn3LE= +github.com/kr/pretty v0.3.1/go.mod h1:hoEshYVHaxMs3cyo3Yncou5ZscifuDolrwPKZanG3xk= +github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ= +github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI= +github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY= +github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE= +github.com/mailru/easyjson v0.7.7 h1:UGYAvKxe3sBsEDzO8ZeWOSlIQfWFlxbzLZe7hwFURr0= +github.com/mailru/easyjson v0.7.7/go.mod h1:xzfreul335JAWq5oZzymOObrkdz5UnU4kGfJJLY9Nlc= +github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= +github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd h1:TRLaZ9cD/w8PVh93nsPXa1VrQ6jlwL5oN8l14QlcNfg= +github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= +github.com/modern-go/reflect2 v1.0.2 h1:xBagoLtFs94CBntxluKeaWgTMpvLxC4ur3nMaC9Gz0M= +github.com/modern-go/reflect2 v1.0.2/go.mod h1:yWuevngMOJpCy52FWWMvUC8ws7m/LJsjYzDa0/r8luk= +github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 h1:C3w9PqII01/Oq1c1nUAm88MOHcQC9l5mIlSMApZMrHA= +github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822/go.mod h1:+n7T8mK8HuQTcFwEeznm/DIxMOiR9yIdICNftLE1DvQ= +github.com/onsi/ginkgo/v2 v2.13.0 h1:0jY9lJquiL8fcf3M4LAXN5aMlS/b2BV86HFFPCPMgE4= +github.com/onsi/ginkgo/v2 v2.13.0/go.mod h1:TE309ZR8s5FsKKpuB1YAQYBzCaAfUgatB/xlT/ETL/o= +github.com/onsi/gomega v1.29.0 h1:KIA/t2t5UBzoirT4H9tsML45GEbo3ouUnBHsCfD2tVg= +github.com/onsi/gomega v1.29.0/go.mod h1:9sxs+SwGrKI0+PWe4Fxa9tFQQBG5xSsSbMXOI8PPpoQ= +github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= +github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= +github.com/rogpeppe/go-internal v1.10.0 h1:TMyTOH3F/DB16zRVcYyreMH6GnZZrwQVAoYjRBZyWFQ= +github.com/rogpeppe/go-internal v1.10.0/go.mod h1:UQnix2H7Ngw/k4C5ijL5+65zddjncjaFoBhdsK/akog= +github.com/spf13/pflag v1.0.5 h1:iy+VFUOCP1a+8yFto/drg2CJ5u0yRoB7fZw3DKv/JXA= +github.com/spf13/pflag v1.0.5/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg= +github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= +github.com/stretchr/objx v0.4.0/go.mod h1:YvHI0jy2hoMjB+UWwv71VJQ9isScKT/TqJzVSSt89Yw= +github.com/stretchr/objx v0.5.0/go.mod h1:Yh+to48EsGEfYuaHDzXPcE3xhTkx73EhmCGUpEOglKo= +github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI= +github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= +github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO+kdMU+MU= +github.com/stretchr/testify v1.8.1/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4= +github.com/stretchr/testify v1.9.0 h1:HtqpIVDClZ4nwg75+f6Lvsy/wHu+3BoSGCbBAcpTsTg= +github.com/stretchr/testify v1.9.0/go.mod h1:r2ic/lqez/lEtzL7wO/rwa5dbSLXVDPFyf8C91i36aY= +github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= +github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= +golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= +golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= +golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= +golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= +golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= +golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= +golang.org/x/net v0.0.0-20190603091049-60506f45cf65/go.mod h1:HSz+uSET+XFnRR8LxR5pz3Of3rY3CfYBVs4xY44aLks= +golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20200226121028-0de0cce0169b/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= +golang.org/x/net v0.17.0 h1:pVaXccu2ozPjCXewfr1S7xza/zcXTity9cCdXQYSjIM= +golang.org/x/net v0.17.0/go.mod h1:NxSsAGuq816PNPmqtQdLE42eU2Fs7NoRIZrHJAlaCOE= +golang.org/x/oauth2 v0.10.0 h1:zHCpF2Khkwy4mMB4bv0U37YtJdTGW8jI0glAApi0Kh8= +golang.org/x/oauth2 v0.10.0/go.mod h1:kTpgurOux7LqtuxjuyZa4Gj2gdezIt/jQtGnNFfypQI= +golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.13.0 h1:Af8nKPmuFypiUBjVoU9V20FiaFXOcuZI21p0ycVYYGE= +golang.org/x/sys v0.13.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/term v0.13.0 h1:bb+I9cTfFazGW51MZqBVmZy7+JEJMouUHTUSKVQLBek= +golang.org/x/term v0.13.0/go.mod h1:LTmsnFJwVN6bCy1rVCoS+qHT1HhALEFxKncY3WNNh4U= +golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= +golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk= +golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= +golang.org/x/text v0.13.0 h1:ablQoSUd0tRdKxZewP80B+BaqeKJuVhuRxj/dkrun3k= +golang.org/x/text v0.13.0/go.mod h1:TvPlkZtksWOMsz7fbANvkp4WM8x/WCo/om8BMLbz+aE= +golang.org/x/time v0.3.0 h1:rg5rLMjNzMS1RkNLzCG38eapWhnYLFYXDXj2gOlr8j4= +golang.org/x/time v0.3.0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= +golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= +golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20200619180055-7c47624df98f/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= +golang.org/x/tools v0.0.0-20210106214847-113979e3529a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= +golang.org/x/tools v0.12.0 h1:YW6HUoUmYBpwSgyaGaZq1fHjrBjX1rlpZ54T6mu2kss= +golang.org/x/tools v0.12.0/go.mod h1:Sc0INKfu04TlqNoRA1hgpFZbhYXHPr4V5DzpSBTPqQM= +golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +google.golang.org/appengine v1.6.7 h1:FZR1q0exgwxzPzp/aF+VccGrSfxfPpkBqjIIEq3ru6c= +google.golang.org/appengine v1.6.7/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc= +google.golang.org/protobuf v1.26.0-rc.1/go.mod h1:jlhhOSvTdKEhbULTjvd4ARK9grFBp09yW+WbY/TyQbw= +google.golang.org/protobuf v1.26.0/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc= +google.golang.org/protobuf v1.31.0 h1:g0LDEJHgrBl9N9r17Ru3sqWhkIx2NB67okBHPwC7hs8= +google.golang.org/protobuf v1.31.0/go.mod h1:HV8QOd/L58Z+nl8r43ehVNZIU/HEI6OcFqwMG9pJV4I= +gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= +gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk= +gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c/go.mod h1:JHkPIbrfpd72SG/EVd6muEfDQjcINNoR0C8j2r3qZ4Q= +gopkg.in/inf.v0 v0.9.1 h1:73M5CoZyi3ZLMOyDlQh031Cx6N9NDJ2Vvfl76EDAgDc= +gopkg.in/inf.v0 v0.9.1/go.mod h1:cWUDdTG/fYaXco+Dcufb5Vnc6Gp2YChqWtbxRZE0mXw= +gopkg.in/yaml.v2 v2.2.8/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/yaml.v2 v2.4.0 h1:D8xgwECY7CYvx+Y2n4sBz93Jn9JRvxdiyyo8CTfuKaY= +gopkg.in/yaml.v2 v2.4.0/go.mod h1:RDklbk79AGWmwhnvt/jBztapEOGDOx6ZbXqjP6csGnQ= +gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= +gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA= +gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= +k8s.io/api v0.29.0 h1:NiCdQMY1QOp1H8lfRyeEf8eOwV6+0xA6XEE44ohDX2A= +k8s.io/api v0.29.0/go.mod h1:sdVmXoz2Bo/cb77Pxi71IPTSErEW32xa4aXwKH7gfBA= +k8s.io/apimachinery v0.29.0 h1:+ACVktwyicPz0oc6MTMLwa2Pw3ouLAfAon1wPLtG48o= +k8s.io/apimachinery v0.29.0/go.mod h1:eVBxQ/cwiJxH58eK/jd/vAk4mrxmVlnpBH5J2GbMeis= +k8s.io/client-go v0.29.0 h1:KmlDtFcrdUzOYrBhXHgKw5ycWzc3ryPX5mQe0SkG3y8= +k8s.io/client-go v0.29.0/go.mod h1:yLkXH4HKMAywcrD82KMSmfYg2DlE8mepPR4JGSo5n38= +k8s.io/klog/v2 v2.110.1 h1:U/Af64HJf7FcwMcXyKm2RPM22WZzyR7OSpYj5tg3cL0= +k8s.io/klog/v2 v2.110.1/go.mod h1:YGtd1984u+GgbuZ7e08/yBuAfKLSO0+uR1Fhi6ExXjo= +k8s.io/kube-openapi v0.0.0-20231010175941-2dd684a91f00 h1:aVUu9fTY98ivBPKR9Y5w/AuzbMm96cd3YHRTU83I780= +k8s.io/kube-openapi v0.0.0-20231010175941-2dd684a91f00/go.mod h1:AsvuZPBlUDVuCdzJ87iajxtXuR9oktsTctW/R9wwouA= +k8s.io/utils v0.0.0-20230726121419-3b25d923346b h1:sgn3ZU783SCgtaSJjpcVVlRqd6GSnlTLKgpAAttJvpI= +k8s.io/utils v0.0.0-20230726121419-3b25d923346b/go.mod h1:OLgZIPagt7ERELqWJFomSt595RzquPNLL48iOWgYOg0= +sigs.k8s.io/json v0.0.0-20221116044647-bc3834ca7abd h1:EDPBXCAspyGV4jQlpZSudPeMmr1bNJefnuqLsRAsHZo= +sigs.k8s.io/json v0.0.0-20221116044647-bc3834ca7abd/go.mod h1:B8JuhiUyNFVKdsE8h686QcCxMaH6HrOAZj4vswFpcB0= +sigs.k8s.io/structured-merge-diff/v4 v4.4.1 h1:150L+0vs/8DA78h1u02ooW1/fFq/Lwr+sGiqlzvrtq4= +sigs.k8s.io/structured-merge-diff/v4 v4.4.1/go.mod h1:N8hJocpFajUSSeSJ9bOZ77VzejKZaXsTtZo4/u7Io08= +sigs.k8s.io/yaml v1.3.0 h1:a2VclLzOGrwOHDiV8EfBGhvjHvP46CtW5j6POvhYGGo= +sigs.k8s.io/yaml v1.3.0/go.mod h1:GeOyir5tyXNByN85N/dRIT9es5UQNerPYEKK56eTBm8= diff --git a/integration-test/kind-test-config.yaml b/integration-test/kind-test-config.yaml new file mode 100644 index 0000000..d64d263 --- /dev/null +++ b/integration-test/kind-test-config.yaml @@ -0,0 +1,5 @@ +kind: Cluster +apiVersion: kind.x-k8s.io/v1alpha4 +nodes: +- role: control-plane + diff --git a/integration-test/main_test.go b/integration-test/main_test.go new file mode 100644 index 0000000..33ef0cc --- /dev/null +++ b/integration-test/main_test.go @@ -0,0 +1,39 @@ +package integration_test + +import ( + "context" + "os" + "testing" + "time" + + "aggregator-integration-test/utils" +) + +var testEnv *utils.TestEnvironment + +func TestMain(m *testing.M) { + ctx, cancel := context.WithTimeout(context.Background(), 10*time.Minute) + defer cancel() + + var err error + testEnv, err = utils.SetupTestEnvironment(ctx) + if err != nil { + panic("Failed to setup test environment: " + err.Error()) + } + + if err := testEnv.WaitForAggregatorReady(ctx); err != nil { + panic("Aggregator not ready: " + err.Error()) + } + + if err := testEnv.SetupPortForward(ctx); err != nil { + panic("Failed to setup port forward: " + err.Error()) + } + + code := m.Run() + + if err := testEnv.Cleanup(); err != nil { + panic("Failed to cleanup test environment: " + err.Error()) + } + + os.Exit(code) +} diff --git a/integration-test/server_metadata_test.go b/integration-test/server_metadata_test.go new file mode 100644 index 0000000..b420526 --- /dev/null +++ b/integration-test/server_metadata_test.go @@ -0,0 +1,512 @@ +package integration_test + +import ( + "encoding/json" + "fmt" + "net/http" + "testing" +) + +func TestServerDescription(t *testing.T) { + resp, err := http.Get(testEnv.AggregatorURL + "/") + if err != nil { + t.Fatalf("Failed to get server description: %v", err) + } + defer func() { + if err := resp.Body.Close(); err != nil { + t.Logf("Failed to close response body: %v", err) + } + }() + + if resp.StatusCode != http.StatusOK { + t.Errorf("Expected status 200, got %d", resp.StatusCode) + } + + var desc map[string]interface{} + if err := json.NewDecoder(resp.Body).Decode(&desc); err != nil { + t.Fatalf("Failed to decode server description: %v", err) + } + + requiredFields := []string{ + "registration_endpoint", + "supported_registration_types", + "version", + "client_identifier", + "transformation_catalog", + } + + for _, field := range requiredFields { + if _, ok := desc[field]; !ok { + t.Errorf("Missing required field: %s", field) + } + } + + types, ok := desc["supported_registration_types"].([]interface{}) + if !ok { + t.Errorf("supported_registration_types is not an array") + } else { + found := false + for _, t := range types { + if t == "authorization_code" { + found = true + break + } + } + if !found { + t.Errorf("authorization_code not found in supported_registration_types") + } + } +} + +func TestClientIdentifierDocument(t *testing.T) { + // 1. GET the client_identifier URL from server description + resp, err := http.Get(testEnv.AggregatorURL + "/") + if err != nil { + t.Fatalf("Failed to get server description: %v", err) + } + defer func() { + if err := resp.Body.Close(); err != nil { + t.Logf("Failed to close response body: %v", err) + } + }() + + var serverDesc map[string]interface{} + if err := json.NewDecoder(resp.Body).Decode(&serverDesc); err != nil { + t.Fatalf("Failed to decode server description: %v", err) + } + + clientIdentifierURL, ok := serverDesc["client_identifier"].(string) + if !ok { + t.Fatal("client_identifier field missing or not a string") + } + + // 2. GET the client identifier document + clientResp, err := http.Get(clientIdentifierURL) + if err != nil { + t.Fatalf("Failed to get client identifier document: %v", err) + } + defer func() { + if err := clientResp.Body.Close(); err != nil { + t.Logf("Failed to close client response body: %v", err) + } + }() + + if clientResp.StatusCode != http.StatusOK { + t.Errorf("Expected status 200, got %d", clientResp.StatusCode) + } + + // Verify JSON content type + contentType := clientResp.Header.Get("Content-Type") + if !containsContentType(contentType, "application/ld+json") { + t.Errorf("Expected Content-Type to contain application/ld+json, got %s", contentType) + } + + var clientDoc map[string]interface{} + if err := json.NewDecoder(clientResp.Body).Decode(&clientDoc); err != nil { + t.Fatalf("Failed to decode client identifier document: %v", err) + } + + // 4. Check for required fields (OIDC Dynamic Client Registration) + requiredFields := []string{ + "client_id", + } + + for _, field := range requiredFields { + if _, ok := clientDoc[field]; !ok { + t.Errorf("Missing required field: %s", field) + } + } + + // 3. Verify redirect_uris is optional (differs from standard OIDC) + // This is explicitly allowed for aggregator servers since multiple clients can use the same server + if _, hasRedirectURIs := clientDoc["redirect_uris"]; hasRedirectURIs { + t.Logf("redirect_uris present (optional): %v", clientDoc["redirect_uris"]) + } else { + t.Logf("redirect_uris not present (allowed for aggregator)") + } + + // 5. Test content negotiation - try JSON-LD + req, err := http.NewRequest("GET", clientIdentifierURL, nil) + if err != nil { + t.Fatalf("Failed to create request: %v", err) + } + req.Header.Set("Accept", "application/ld+json") + + jsonLDResp, err := http.DefaultClient.Do(req) + if err != nil { + t.Fatalf("Failed to get client identifier with JSON-LD: %v", err) + } + defer func() { + if err := jsonLDResp.Body.Close(); err != nil { + t.Logf("Failed to close JSON-LD response body: %v", err) + } + }() + + if jsonLDResp.StatusCode == http.StatusOK { + contentType := jsonLDResp.Header.Get("Content-Type") + if containsContentType(contentType, "application/ld+json") || containsContentType(contentType, "application/json") { + t.Logf("JSON-LD content negotiation supported") + } else { + t.Logf("JSON-LD requested but got Content-Type: %s", contentType) + } + } else if jsonLDResp.StatusCode == http.StatusNotAcceptable { + t.Logf("JSON-LD content negotiation not supported (406 Not Acceptable)") + } + + // 6. Test quality parameters in Accept header + testCases := []struct { + name string + accept string + expectedType string + }{ + { + name: "Prefer JSON with quality", + accept: "application/json;q=1.0, application/ld+json;q=0.8", + expectedType: "application/json", + }, + { + name: "Prefer JSON-LD with quality", + accept: "application/json;q=0.5, application/ld+json;q=1.0", + expectedType: "application/ld+json", + }, + { + name: "Multiple types with quality", + accept: "text/html;q=0.9, application/json;q=0.8, application/ld+json;q=1.0", + expectedType: "application/ld+json", + }, + { + name: "Wildcard with lower quality", + accept: "application/json;q=0.9, */*;q=0.1", + expectedType: "application/json", + }, + } + + for _, tc := range testCases { + t.Run(tc.name, func(t *testing.T) { + req, err := http.NewRequest("GET", clientIdentifierURL, nil) + if err != nil { + t.Fatalf("Failed to create request: %v", err) + } + req.Header.Set("Accept", tc.accept) + + resp, err := http.DefaultClient.Do(req) + if err != nil { + t.Fatalf("Failed to get client identifier: %v", err) + } + defer func() { + if err := resp.Body.Close(); err != nil { + t.Logf("Failed to close response body: %v", err) + } + }() + + if resp.StatusCode != http.StatusOK { + t.Errorf("Expected status 200, got %d", resp.StatusCode) + } + + contentType := resp.Header.Get("Content-Type") + if !containsContentType(contentType, tc.expectedType) { + t.Errorf("Expected Content-Type to contain %s, got %s", tc.expectedType, contentType) + } + t.Logf("Accept: %s -> Content-Type: %s", tc.accept, contentType) + }) + } +} + +func TestPublicTransformationCatalog(t *testing.T) { + // 1. GET the transformation_catalog URL from server description + resp, err := http.Get(testEnv.AggregatorURL + "/") + if err != nil { + t.Fatalf("Failed to get server description: %v", err) + } + defer func() { + if err := resp.Body.Close(); err != nil { + t.Logf("Failed to close response body: %v", err) + } + }() + + var serverDesc map[string]interface{} + if err := json.NewDecoder(resp.Body).Decode(&serverDesc); err != nil { + t.Fatalf("Failed to decode server description: %v", err) + } + + catalogURL, ok := serverDesc["transformation_catalog"].(string) + if !ok { + t.Fatal("transformation_catalog field missing or not a string") + } + + // 2. Verify it returns an RDF document (try Turtle first as it's required for FnO) + req, err := http.NewRequest("GET", catalogURL, nil) + if err != nil { + t.Fatalf("Failed to create request: %v", err) + } + req.Header.Set("Accept", "text/turtle, application/ld+json, application/json") + + catalogResp, err := http.DefaultClient.Do(req) + if err != nil { + t.Fatalf("Failed to get transformation catalog: %v", err) + } + defer func() { + if err := catalogResp.Body.Close(); err != nil { + t.Logf("Failed to close catalog response body: %v", err) + } + }() + + if catalogResp.StatusCode != http.StatusOK { + t.Fatalf("Expected status 200, got %d", catalogResp.StatusCode) + } + + contentType := catalogResp.Header.Get("Content-Type") + t.Logf("Transformation catalog Content-Type: %s", contentType) + + // The response should be in an RDF format + isRDF := containsContentType(contentType, "text/turtle") || + containsContentType(contentType, "application/ld+json") || + containsContentType(contentType, "application/json") || + containsContentType(contentType, "application/rdf+xml") + + if !isRDF { + t.Errorf("Expected RDF content type (turtle, JSON-LD, or RDF/XML), got: %s", contentType) + } + + // Try to parse as JSON-LD or JSON to check structure + var catalogData interface{} + jsonReq, _ := http.NewRequest("GET", catalogURL, nil) + jsonReq.Header.Set("Accept", "application/ld+json, application/json") + jsonResp, err := http.DefaultClient.Do(jsonReq) + if err != nil { + t.Logf("Could not fetch JSON representation: %v", err) + } else { + defer func() { + if err := jsonResp.Body.Close(); err != nil { + t.Logf("Failed to close JSON response body: %v", err) + } + }() + if jsonResp.StatusCode == http.StatusOK { + if err := json.NewDecoder(jsonResp.Body).Decode(&catalogData); err == nil { + t.Logf("Successfully parsed transformation catalog as JSON") + + // 3. Verify it's an aggr:TransformationCollection (check for type) + checkForType := func(data interface{}, typeName string) bool { + switch v := data.(type) { + case map[string]interface{}: + if typeField, ok := v["@type"]; ok { + switch tf := typeField.(type) { + case string: + return tf == typeName || tf == "aggr:TransformationCollection" || tf == "TransformationCollection" + case []interface{}: + for _, t := range tf { + if ts, ok := t.(string); ok && (ts == typeName || ts == "aggr:TransformationCollection" || ts == "TransformationCollection") { + return true + } + } + } + } + if typeField, ok := v["type"]; ok { + if ts, ok := typeField.(string); ok { + return ts == typeName || ts == "aggr:TransformationCollection" || ts == "TransformationCollection" + } + } + } + return false + } + + hasTransformationCollection := checkForType(catalogData, "TransformationCollection") + if hasTransformationCollection { + t.Logf("Found TransformationCollection type") + } else { + t.Logf("Warning: Could not verify @type is aggr:TransformationCollection (may be in Turtle format)") + } + + // 4. Check that transformations are described using FnO vocabulary + // Look for FnO predicates/properties + checkForFnOTerms := func(data interface{}) bool { + fnoTerms := []string{ + "fno:Function", "Function", + "fno:expects", "expects", + "fno:returns", "returns", + "fno:Parameter", "Parameter", + "fno:Output", "Output", + "fno:executes", "executes", + "hasTransformation", + "aggr:hasTransformation", + } + + str := fmt.Sprintf("%v", data) + for _, term := range fnoTerms { + if containsString(str, term) { + return true + } + } + return false + } + + if checkForFnOTerms(catalogData) { + t.Logf("Found FnO vocabulary terms in catalog") + } else { + t.Logf("Warning: Could not find obvious FnO terms (may need deeper inspection)") + } + + // 5. Try to find specific FnO structures + var checkForFnOStructure func(interface{}) (hasFunctions, hasParameters, hasOutputs bool) + checkForFnOStructure = func(data interface{}) (hasFunctions, hasParameters, hasOutputs bool) { + switch v := data.(type) { + case map[string]interface{}: + for key, value := range v { + if containsString(key, "Function") || containsString(key, "function") { + hasFunctions = true + } + if containsString(key, "Parameter") || containsString(key, "parameter") || containsString(key, "expects") { + hasParameters = true + } + if containsString(key, "Output") || containsString(key, "output") || containsString(key, "returns") { + hasOutputs = true + } + f, p, o := checkForFnOStructure(value) + hasFunctions = hasFunctions || f + hasParameters = hasParameters || p + hasOutputs = hasOutputs || o + } + case []interface{}: + for _, item := range v { + f, p, o := checkForFnOStructure(item) + hasFunctions = hasFunctions || f + hasParameters = hasParameters || p + hasOutputs = hasOutputs || o + } + } + return + } + + hasFunctions, hasParameters, hasOutputs := checkForFnOStructure(catalogData) + if hasFunctions { + t.Logf("Found fno:Function definitions") + } + if hasParameters { + t.Logf("Found fno:Parameter or fno:expects") + } + if hasOutputs { + t.Logf("Found fno:Output or fno:returns") + } + + // 6. Check for optional fno:Implementation and rdfs:seeAlso references + checkForOptionalTerms := func(data interface{}) (hasImpl, hasSeeAlso bool) { + str := fmt.Sprintf("%v", data) + hasImpl = containsString(str, "Implementation") || containsString(str, "implementation") + hasSeeAlso = containsString(str, "seeAlso") || containsString(str, "rdfs:seeAlso") + return + } + + hasImpl, hasSeeAlso := checkForOptionalTerms(catalogData) + if hasImpl { + t.Logf("Found fno:Implementation references") + } else { + t.Logf("No fno:Implementation found (optional)") + } + if hasSeeAlso { + t.Logf("Found rdfs:seeAlso references") + } else { + t.Logf("No rdfs:seeAlso found (optional)") + } + } + } + } + + // Test content negotiation with different Accept headers + acceptHeaders := []string{ + "text/turtle", + "application/ld+json", + "application/json", + } + + for _, accept := range acceptHeaders { + req, _ := http.NewRequest("GET", catalogURL, nil) + req.Header.Set("Accept", accept) + resp, err := http.DefaultClient.Do(req) + if err != nil { + t.Logf("Failed to fetch with Accept: %s - %v", accept, err) + continue + } + func() { + defer func() { + if err := resp.Body.Close(); err != nil { + t.Logf("Failed to close response body: %v", err) + } + }() + }() + t.Logf("Accept: %s -> Status: %d, Content-Type: %s", accept, resp.StatusCode, resp.Header.Get("Content-Type")) + } + + // Test quality parameters with Turtle (only supported format) + qualityTestCases := []struct { + name string + accept string + expectStatus int + }{ + { + name: "Prefer Turtle with quality", + accept: "text/turtle;q=1.0, application/json;q=0.5", + expectStatus: http.StatusOK, + }, + { + name: "Turtle in list with quality", + accept: "application/json;q=0.9, text/turtle;q=0.8, application/ld+json;q=0.7", + expectStatus: http.StatusOK, + }, + { + name: "Wildcard with quality", + accept: "*/*;q=0.5", + expectStatus: http.StatusOK, + }, + { + name: "Only unsupported types", + accept: "application/xml;q=1.0, text/html;q=0.9", + expectStatus: http.StatusUnsupportedMediaType, + }, + } + + for _, tc := range qualityTestCases { + t.Run(tc.name, func(t *testing.T) { + req, err := http.NewRequest("GET", catalogURL, nil) + if err != nil { + t.Fatalf("Failed to create request: %v", err) + } + req.Header.Set("Accept", tc.accept) + + resp, err := http.DefaultClient.Do(req) + if err != nil { + t.Fatalf("Failed to get transformation catalog: %v", err) + } + defer func() { + if err := resp.Body.Close(); err != nil { + t.Logf("Failed to close response body: %v", err) + } + }() + + if resp.StatusCode != tc.expectStatus { + t.Errorf("Expected status %d, got %d for Accept: %s", tc.expectStatus, resp.StatusCode, tc.accept) + } else { + t.Logf("Accept: %s -> Status: %d, Content-Type: %s", tc.accept, resp.StatusCode, resp.Header.Get("Content-Type")) + } + }) + } +} + +// Helper function to check if a content type contains a specific type +func containsContentType(contentType, expectedType string) bool { + return len(contentType) > 0 && (contentType == expectedType || + len(contentType) > len(expectedType) && contentType[:len(expectedType)] == expectedType || + containsString(contentType, expectedType)) +} + +// Helper function to check if a string contains a substring (case-insensitive) +func containsString(s, substr string) bool { + return len(s) >= len(substr) && (s == substr || + func() bool { + for i := 0; i <= len(s)-len(substr); i++ { + if s[i:i+len(substr)] == substr { + return true + } + } + return false + }()) +} diff --git a/integration-test/utils/test_environment.go b/integration-test/utils/test_environment.go new file mode 100644 index 0000000..61fcf81 --- /dev/null +++ b/integration-test/utils/test_environment.go @@ -0,0 +1,271 @@ +package utils + +import ( + "context" + "fmt" + "os" + "os/exec" + "strings" + "time" + + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/client-go/kubernetes" + "k8s.io/client-go/tools/clientcmd" +) + +type TestEnvironment struct { + KubeClient *kubernetes.Clientset + AggregatorURL string + UMAServerURL string + OIDCIssuer string + ResourceServerUrl string + ClusterName string + cleanupFuncs []func() error + umaServerProcess *exec.Cmd + portForwardCmd *exec.Cmd +} + +type ActorConfig struct { + ID string + Name string + Description string +} + +type UserConfig struct { + Username string + Email string + Password string +} + +type Actor struct { + ID string `json:"id"` + Name string `json:"name"` + Description string `json:"description"` +} + +func SetupTestEnvironment(ctx context.Context) (*TestEnvironment, error) { + env := &TestEnvironment{ + ClusterName: "aggregator-test", + AggregatorURL: "http://localhost:8080", + } + + if err := env.setupKubernetesCluster(ctx); err != nil { + return nil, fmt.Errorf("failed to setup kubernetes cluster: %w", err) + } + + if err := env.setupKubeClient(); err != nil { + return nil, fmt.Errorf("failed to setup kubernetes client: %w", err) + } + + if err := env.buildAndLoadContainers(ctx); err != nil { + return nil, fmt.Errorf("failed to build and load containers: %w", err) + } + + if err := env.deployAggregator(ctx); err != nil { + return nil, fmt.Errorf("failed to deploy aggregator: %w", err) + } + + return env, nil +} + +func (env *TestEnvironment) setupKubernetesCluster(ctx context.Context) error { + fmt.Println("Creating Kind cluster...") + + cmd := exec.CommandContext(ctx, "kind", "get", "clusters") + output, err := cmd.CombinedOutput() + if err == nil && strings.Contains(string(output), env.ClusterName) { + fmt.Println("Cluster already exists, deleting...") + deleteCmd := exec.CommandContext(ctx, "kind", "delete", "cluster", "--name", env.ClusterName) + if err := deleteCmd.Run(); err != nil { + return fmt.Errorf("failed to delete existing cluster: %w", err) + } + } + + createCmd := exec.CommandContext(ctx, "kind", "create", "cluster", + "--name", env.ClusterName, + "--config", "kind-test-config.yaml") + createCmd.Stdout = os.Stdout + createCmd.Stderr = os.Stderr + + if err := createCmd.Run(); err != nil { + return fmt.Errorf("failed to create kind cluster: %w", err) + } + + env.cleanupFuncs = append(env.cleanupFuncs, func() error { + cmd := exec.Command("kind", "delete", "cluster", "--name", env.ClusterName) + return cmd.Run() + }) + + return nil +} + +func (env *TestEnvironment) setupKubeClient() error { + kubeconfig := clientcmd.NewDefaultClientConfigLoadingRules().GetDefaultFilename() + config, err := clientcmd.BuildConfigFromFlags("", kubeconfig) + if err != nil { + return fmt.Errorf("failed to build kubeconfig: %w", err) + } + + env.KubeClient, err = kubernetes.NewForConfig(config) + if err != nil { + return fmt.Errorf("failed to create kubernetes client: %w", err) + } + + return nil +} + +func (env *TestEnvironment) buildAndLoadContainers(ctx context.Context) error { + fmt.Println("Building containers...") + + buildCmd := exec.CommandContext(ctx, "make", "-C", "..", "containers-build") + buildCmd.Stdout = os.Stdout + buildCmd.Stderr = os.Stderr + + if err := buildCmd.Run(); err != nil { + return fmt.Errorf("failed to build containers: %w", err) + } + + fmt.Println("Loading containers into Kind...") + + containers := []string{"aggregator-server", "aggregator", "egress-uma", "ingress-uma", "file-server"} + for _, container := range containers { + loadCmd := exec.CommandContext(ctx, "kind", "load", "docker-image", + container+":latest", "--name", env.ClusterName) + loadCmd.Stdout = os.Stdout + loadCmd.Stderr = os.Stderr + + if err := loadCmd.Run(); err != nil { + return fmt.Errorf("failed to load container %s: %w", container, err) + } + } + + return nil +} + +func (env *TestEnvironment) deployAggregator(ctx context.Context) error { + fmt.Println("Deploying aggregator...") + + yamlFiles := []string{ + "../k8s/app/ns.yaml", + "../k8s/app/config.yaml", + } + + for _, yamlFile := range yamlFiles { + applyCmd := exec.CommandContext(ctx, "kubectl", "apply", "-f", yamlFile) + applyCmd.Stdout = os.Stdout + applyCmd.Stderr = os.Stderr + + if err := applyCmd.Run(); err != nil { + return fmt.Errorf("failed to apply %s: %w", yamlFile, err) + } + } + + applyCmd := exec.CommandContext(ctx, "sh", "-c", + "kubectl apply -f ../k8s/app/aggregator.yaml 2>&1 | grep -v 'IngressRoute' || true") + applyCmd.Stdout = os.Stdout + applyCmd.Stderr = os.Stderr + + if err := applyCmd.Run(); err != nil { + return fmt.Errorf("failed to apply aggregator.yaml: %w", err) + } + + return nil +} + +func (env *TestEnvironment) WaitForAggregatorReady(ctx context.Context) error { + timeout := time.After(2 * time.Minute) + ticker := time.NewTicker(5 * time.Second) + defer ticker.Stop() + + for { + select { + case <-timeout: + return fmt.Errorf("timeout waiting for aggregator to be ready") + case <-ticker.C: + pods, err := env.KubeClient.CoreV1().Pods("aggregator-app").List(ctx, metav1.ListOptions{ + LabelSelector: "app=aggregator-server", + }) + if err != nil { + continue + } + + if len(pods.Items) > 0 { + pod := pods.Items[0] + if pod.Status.Phase == "Running" { + for _, condition := range pod.Status.Conditions { + if condition.Type == "Ready" && condition.Status == "True" { + return nil + } + } + } + } + } + } +} + +func (env *TestEnvironment) SetupPortForward(ctx context.Context) error { + fmt.Println("Setting up port forward...") + + pods, err := env.KubeClient.CoreV1().Pods("aggregator-app").List(ctx, metav1.ListOptions{ + LabelSelector: "app=aggregator-server", + }) + if err != nil { + return fmt.Errorf("failed to list pods: %w", err) + } + + if len(pods.Items) == 0 { + return fmt.Errorf("no aggregator-server pods found") + } + + podName := pods.Items[0].Name + + env.portForwardCmd = exec.Command("kubectl", "port-forward", + "-n", "aggregator-app", + "pod/"+podName, + "8080:5000") + env.portForwardCmd.Stdout = os.Stdout + env.portForwardCmd.Stderr = os.Stderr + + if err := env.portForwardCmd.Start(); err != nil { + return fmt.Errorf("failed to start port forward: %w", err) + } + + time.Sleep(2 * time.Second) + + fmt.Println("Port forward established on localhost:8080") + return nil +} + +func (env *TestEnvironment) Cleanup() error { + fmt.Println("Cleaning up test environment...") + + var errors []error + + if env.portForwardCmd != nil && env.portForwardCmd.Process != nil { + fmt.Println("Stopping port forward...") + if err := env.portForwardCmd.Process.Kill(); err != nil { + errors = append(errors, fmt.Errorf("failed to stop port forward: %w", err)) + } + } + + if env.umaServerProcess != nil && env.umaServerProcess.Process != nil { + fmt.Println("Stopping UMA server...") + if err := env.umaServerProcess.Process.Kill(); err != nil { + errors = append(errors, fmt.Errorf("failed to stop UMA server: %w", err)) + } + } + + for i := len(env.cleanupFuncs) - 1; i >= 0; i-- { + if err := env.cleanupFuncs[i](); err != nil { + errors = append(errors, err) + } + } + + if len(errors) > 0 { + fmt.Printf("Cleanup completed with %d error(s)\n", len(errors)) + return errors[0] + } + + fmt.Println("Cleanup completed successfully") + return nil +} diff --git a/k8s/kind-config.yaml b/k8s/kind-config.yaml index 4681378..276ea49 100644 --- a/k8s/kind-config.yaml +++ b/k8s/kind-config.yaml @@ -4,8 +4,8 @@ nodes: - role: control-plane extraPortMappings: - containerPort: 80 - hostPort: 5000 + hostPort: 80 protocol: TCP - containerPort: 443 - hostPort: 5443 + hostPort: 443 protocol: TCP diff --git a/makefile b/makefile index 66c571f..48467db 100644 --- a/makefile +++ b/makefile @@ -1,26 +1,39 @@ .PHONY: kind-init kind-start kind-stop kind-dashboard \ containers-build containers-load containers-all \ kind-generate-key-pair \ - enable-localhost disable-localhost \ - kind-deploy \ - kind-clean \ - enable-wsl + kind-deploy kind-start-traefik kind-start-cleaner \ + kind-clean clean kind-stop-traefik \ + kind-undeploy stop \ + enable-wsl \ + docker-clean deploy \ + integration-test # ------------------------ # Kind targets # ------------------------ -# Initialize kind cluster, build/load containers, generate keys, deploy YAML manifests -kind-init: kind-start containers-all kind-generate-key-pair kind-dashboard +# Initialize kind cluster, build/load containers, generate keys, start cleaner +kind-init: kind-start containers-all kind-generate-key-pair kind-start-cleaner # Start kind cluster kind-start: @echo "๐Ÿš€ Creating kind cluster..." - @if ! kind get clusters | grep -q "aggregator"; then \ - kind create cluster --name aggregator --config k8s/kind-config.yaml; \ - else \ + @if kind get clusters 2>/dev/null | grep -q "aggregator"; then \ echo "Kind cluster 'aggregator' already exists."; \ + if ! kubectl config get-contexts kind-aggregator >/dev/null 2>&1; then \ + echo "โš ๏ธ Context 'kind-aggregator' not found, deleting and recreating cluster..."; \ + kind delete cluster --name aggregator; \ + kind create cluster --name aggregator --config k8s/kind-config.yaml; \ + echo "โณ Waiting for cluster to be ready..."; \ + kubectl wait --for=condition=Ready nodes --all --timeout=120s; \ + fi; \ + else \ + kind create cluster --name aggregator --config k8s/kind-config.yaml; \ + echo "โณ Waiting for cluster to be ready..."; \ + kubectl wait --for=condition=Ready nodes --all --timeout=120s; \ fi + @kubectl config use-context kind-aggregator + @echo "โœ… Kind cluster is ready!" # Stop and delete kind cluster kind-stop: @@ -31,6 +44,7 @@ kind-stop: # Get token: kubectl get secret admin-user -n kubernetes-dashboard -o jsonpath="{.data.token}" | base64 -d kind-dashboard: @echo "๐Ÿš€ Configuring kubernetes dashboard" + @kubectl config use-context kind-aggregator @if ! helm repo list | grep -q "kubernetes-dashboard"; then \ helm repo add kubernetes-dashboard https://kubernetes.github.io/dashboard/; \ fi @@ -46,6 +60,18 @@ kind-dashboard: @kubectl get secret admin-user -n kubernetes-dashboard -o jsonpath="{.data.token}" | base64 -d && echo "" @kubectl -n kubernetes-dashboard port-forward svc/kubernetes-dashboard-kong-proxy 8443:443 +# Set up key pair for uma-proxy +kind-generate-key-pair: + @echo "๐Ÿ”‘ Generating key pair for uma-proxy..." + @kubectl config use-context kind-aggregator + @openssl genrsa -out uma-proxy.key 4096 + @openssl req -x509 -new -nodes -key uma-proxy.key -sha256 -days 3650 -out uma-proxy.crt -subj "/CN=Aggregator MITM CA" + @echo "๐Ÿ—‘๏ธ Deleting existing Kubernetes secret for uma-proxy key pair if it exists..." + @kubectl delete secret uma-proxy-key-pair -n default --ignore-not-found + @echo "๐Ÿ” Creating Kubernetes secret for uma-proxy key pair..." + @kubectl create secret generic uma-proxy-key-pair --from-file=uma-proxy.crt=uma-proxy.crt --from-file=uma-proxy.key=uma-proxy.key -n default + @echo "๐Ÿ—‘๏ธ Cleaning up generated key pair files..." + @rm uma-proxy.crt uma-proxy.key # ------------------------ # Container targets @@ -59,6 +85,8 @@ containers-build: @if [ -n "$(CONTAINER)" ]; then \ dir="containers/$(CONTAINER)"; \ if [ -d "$$dir" ]; then \ + echo "๐Ÿ—‘๏ธ Removing old $(CONTAINER) images..."; \ + docker images "$(CONTAINER)" --format "{{.ID}}" | xargs -r docker rmi -f 2>/dev/null || true; \ echo "๐Ÿ“ฆ Building $(CONTAINER)..."; \ docker build "$$dir" -t "$(CONTAINER):latest"; \ else \ @@ -66,17 +94,26 @@ containers-build: exit 1; \ fi \ else \ + echo "๐Ÿ—‘๏ธ Removing old container images..."; \ + find containers -maxdepth 1 -mindepth 1 -type d -exec basename {} \; | \ + xargs -I {} sh -c 'docker images "{}" --format "{{.ID}}" | xargs -r docker rmi -f 2>/dev/null || true'; \ find containers -maxdepth 1 -mindepth 1 -type d | \ xargs -I {} -P $$(nproc) sh -c '\ name=$$(basename {}); \ echo "๐Ÿ“ฆ Building $$name..."; \ - docker build {} -t "$$name:latest" && echo "โœ… Built $$name" || echo "โŒ Failed to build $$name"; \ - '; \ + if docker build {} -t "$$name:latest"; then \ + echo "โœ… Built $$name"; \ + else \ + echo "โŒ Failed to build $$name"; \ + exit 1; \ + fi \ + ' && echo "โœ… All containers built successfully" || (echo "โŒ Build failed"; exit 1); \ fi # Load Docker images into kind containers-load: @echo "๐Ÿ“ค Loading container images into kind..." + @kubectl config use-context kind-aggregator 2>/dev/null || (echo "โŒ Kind cluster not ready"; exit 1) @if [ -n "$(CONTAINER)" ]; then \ name="$(CONTAINER)"; \ echo "๐Ÿ“ฅ Loading $$name into kind..."; \ @@ -86,18 +123,33 @@ containers-load: xargs -I {} -P 4 sh -c '\ name=$$(basename {}); \ echo "๐Ÿ“ฅ Loading $$name into kind..."; \ - kind load docker-image "$$name:latest" --name aggregator && echo "โœ… Loaded $$name" || echo "โŒ Failed to load $$name"; \ - '; \ + if kind load docker-image "$$name:latest" --name aggregator; then \ + echo "โœ… Loaded $$name"; \ + else \ + echo "โŒ Failed to load $$name"; \ + exit 1; \ + fi \ + ' && echo "โœ… All containers loaded successfully" || (echo "โŒ Loading failed"; exit 1); \ fi # Build and load all containers containers-all: containers-build containers-load +# Clean up Docker dangling and unused images +docker-clean: + @echo "๐Ÿงน Cleaning up Docker images..." + @echo "๐Ÿ—‘๏ธ Removing dangling images..." + @docker image prune -f + @echo "๐Ÿ—‘๏ธ Removing unused images..." + @docker image prune -a -f --filter "until=24h" + @echo "โœ… Docker cleanup complete" + # ------------------------ # Deploy YAML manifests with temporary key pair for uma-proxy # ------------------------ kind-start-traefik: @echo "๐Ÿ“„ Deploying Traefik Ingress Controller..." + @kubectl config use-context kind-aggregator @helm repo add traefik https://traefik.github.io/charts @helm repo update @helm upgrade --install aggregator-traefik traefik/traefik \ @@ -115,10 +167,10 @@ kind-start-traefik: kind-start-cleaner: @echo "๐Ÿ“„ Deploying aggregator-cleaner controller..." + @kubectl config use-context kind-aggregator @kubectl apply -f k8s/ops/ns.yaml @kubectl apply -f k8s/ops/cleaner.yaml - - @echo "๐Ÿ“„ Waiting for aggregator-cleaner to be ready..." + @echo "โณ Waiting for aggregator-cleaner to be ready..." @kubectl wait --namespace aggregator-ops \ --for=condition=available deployment/aggregator-cleaner \ --timeout=60s || true @@ -126,17 +178,16 @@ kind-start-cleaner: @echo "โœ… Aggregator cleaner deployed" kind-deploy: + @echo "๐Ÿ“„ Deploying aggregator application..." + @kubectl config use-context kind-aggregator @echo "๐Ÿ“„ Applying aggregator namespace..." @kubectl apply -f k8s/app/ns.yaml - @echo "๐Ÿ“„ Applying traefik config..." @kubectl apply -f k8s/app/traefik-config.yaml - @echo "๐Ÿ“„ Creating secret for ingress-uma..." @kubectl -n aggregator-app create secret generic ingress-uma-key \ --from-file=private_key.pem=private_key.pem \ --dry-run=client -o yaml | kubectl apply -f - - @echo "๐Ÿ“„ Applying aggregator ConfigMap..." @kubectl apply -f k8s/app/config.yaml @@ -148,7 +199,6 @@ kind-deploy: @kubectl apply -f k8s/app/ingress-uma.yaml @echo "โณ Waiting for ingress-uma deployment to be ready..." @kubectl rollout status deployment ingress-uma -n aggregator-app --timeout=90s - @echo "โณ Waiting for ingress-uma via Ingress to be reachable..." @for i in {1..30}; do \ STATUS=$$(curl -s -o /dev/null -w "%{http_code}" http://aggregator.local/uma/.well-known/jwks.json || echo "000"); \ @@ -160,7 +210,6 @@ kind-deploy: sleep 2; \ fi; \ done - @echo "๐Ÿ“„ Applying aggregator deployment and service..." @kubectl apply -f k8s/app/aggregator.yaml @echo "โณ Waiting for aggregator deployment to be ready..." @@ -168,35 +217,68 @@ kind-deploy: @echo "โœ… Resources deployed to kind" +deploy: kind-start-traefik kind-deploy + @echo "โœ… Aggregator deployment complete" + # ------------------------ # Cleanup kind deployment # ------------------------ -kind-stop-cleaner: - @echo "๐Ÿงน Removing aggregator-cleaner controller..." - @kubectl delete -f k8s/ops/cleaner.yaml --ignore-not-found - @echo "โœ… Aggregator cleaner removed" +kind-undeploy: + @echo "๐Ÿงน Stopping aggregator deployment (keeping Traefik and cleaner running)..." + @if kind get clusters 2>/dev/null | grep -q "aggregator"; then \ + echo "๐Ÿ”ง Setting kubectl context..."; \ + kubectl config use-context kind-aggregator || true; \ + echo "๐Ÿงน Deleting aggregator namespace..."; \ + kubectl delete namespace aggregator-app --ignore-not-found || true; \ + else \ + echo "โ„น๏ธ Kind cluster 'aggregator' does not exist, skipping deployment cleanup"; \ + fi + @echo "๐Ÿงน Removing localhost entries..." + @sudo sed -i.bak '/aggregator\.local/d' /etc/hosts || true + @sudo sed -i.bak '/wsl\.local/d' /etc/hosts || true + @echo "โœ… Deployment stopped (Traefik and cleaner still running)" kind-stop-traefik: - @echo "๐Ÿงน Deleting Traefik Ingress Controller..." - # Delete the namespace (optional, removes all resources inside) - @kubectl delete namespace aggregator-traefik --ignore-not-found - @echo "โœ… Traefik Ingress Controller removed successfully." + @if kind get clusters 2>/dev/null | grep -q "aggregator"; then \ + echo "๐Ÿงน Deleting Traefik Ingress Controller..."; \ + kubectl config use-context kind-aggregator || true; \ + kubectl delete namespace aggregator-traefik --ignore-not-found || true; \ + echo "โœ… Traefik Ingress Controller removed successfully."; \ + else \ + echo "โ„น๏ธ Kind cluster 'aggregator' does not exist, skipping Traefik cleanup"; \ + fi kind-clean: - @echo "๐Ÿงน Deleting aggregator cluster-wide roles..." - @kubectl delete clusterrole aggregator-namespace-manager --ignore-not-found - @kubectl delete clusterrolebinding aggregator-namespace-manager-binding --ignore-not-found - - @echo "๐Ÿงน Deleting aggregator namespace..." - @kubectl delete namespace aggregator-app --ignore-not-found - + @echo "๐Ÿงน Cleaning up aggregator deployment..." + @if kind get clusters 2>/dev/null | grep -q "aggregator"; then \ + echo "๐Ÿ”ง Setting kubectl context..."; \ + kubectl config use-context kind-aggregator || true; \ + echo "๐Ÿงน Deleting aggregator cluster-wide roles..."; \ + kubectl delete clusterrole aggregator-namespace-manager --ignore-not-found || true; \ + kubectl delete clusterrolebinding aggregator-namespace-manager-binding --ignore-not-found || true; \ + kubectl delete clusterrole aggregator-cleaner-role --ignore-not-found || true; \ + kubectl delete clusterrolebinding aggregator-cleaner-binding --ignore-not-found || true; \ + echo "๐Ÿงน Deleting aggregator namespace..."; \ + kubectl delete namespace aggregator-app --ignore-not-found || true; \ + $(MAKE) kind-stop-cleaner; \ + $(MAKE) kind-stop-traefik; \ + else \ + echo "โ„น๏ธ Kind cluster 'aggregator' does not exist, skipping Kubernetes cleanup"; \ + fi @echo "๐Ÿงน Removing localhost entries..." - @sudo sed -i.bak '/aggregator\.local/d' /etc/hosts - @sudo sed -i.bak '/wsl\.local/d' /etc/hosts - + @sudo sed -i.bak '/aggregator\.local/d' /etc/hosts || true + @sudo sed -i.bak '/wsl\.local/d' /etc/hosts || true @echo "โœ… Cleanup complete" +# Clean everything and delete the entire kind cluster +clean: kind-clean kind-stop docker-clean + @echo "โœ… Complete cleanup finished - cluster deleted" + +# Stop deployment and Traefik +stop: kind-undeploy kind-stop-traefik + @echo "โœ… All services stopped (cluster and cleaner still running)" + # ------------------------- # wsl support # ------------------------- @@ -233,3 +315,9 @@ enable-wsl: @echo "โœ… Done! 'wsl.local' now resolves to $(WSL_IP)" +# ------------------------ +# Integration Tests +# ------------------------ +integration-test: + @echo "๐Ÿงช Running integration tests..." + @cd integration-test && go mod download && go test -v -timeout 20m ./... From e7180c493f23af0ecee5a5cda05c5335993e27d9 Mon Sep 17 00:00:00 2001 From: maartenvandenbrande Date: Sun, 4 Jan 2026 15:11:40 +0100 Subject: [PATCH 2/7] add wsl to windows --- .github/workflows/integration-tests.yml | 15 +++++++++++---- 1 file changed, 11 insertions(+), 4 deletions(-) diff --git a/.github/workflows/integration-tests.yml b/.github/workflows/integration-tests.yml index 5c42726..987090e 100644 --- a/.github/workflows/integration-tests.yml +++ b/.github/workflows/integration-tests.yml @@ -31,8 +31,11 @@ jobs: - name: Set up Docker (Windows) if: runner.os == 'Windows' run: | - # Docker Desktop should already be available on windows-latest + # Ensure Docker Desktop is using WSL2 backend for Linux containers docker version + docker info | Select-String -Pattern "OSType" + # Verify we can build Linux containers + docker pull alpine:latest - name: Install Kind (Linux) if: runner.os == 'Linux' @@ -90,12 +93,16 @@ jobs: - name: Build Docker images (Windows) if: runner.os == 'Windows' run: | - # Build containers sequentially on Windows + # Build Linux containers on Windows using WSL2 backend Get-ChildItem -Path containers -Directory | ForEach-Object { $name = $_.Name Write-Host "Building $name..." - docker build "containers/$name" -t "${name}:latest" - if ($LASTEXITCODE -ne 0) { exit $LASTEXITCODE } + docker build --platform linux/amd64 "containers/$name" -t "${name}:latest" + if ($LASTEXITCODE -ne 0) { + Write-Host "Failed to build $name" + exit $LASTEXITCODE + } + Write-Host "Successfully built $name" } shell: powershell timeout-minutes: 30 From 5ae40a40b59ce2caed5d5f8b2fe54c1ea1979287 Mon Sep 17 00:00:00 2001 From: maartenvandenbrande Date: Sun, 4 Jan 2026 16:00:07 +0100 Subject: [PATCH 3/7] tests now use the aggregator cluster --- .github/workflows/integration-tests.yml | 50 +++++- README.md | 22 ++- integration-test/kind-test-config.yaml | 5 - integration-test/main_test.go | 10 +- integration-test/server_metadata_test.go | 1 - integration-test/utils/test_environment.go | 197 +++------------------ 6 files changed, 92 insertions(+), 193 deletions(-) delete mode 100644 integration-test/kind-test-config.yaml diff --git a/.github/workflows/integration-tests.yml b/.github/workflows/integration-tests.yml index 987090e..5617a71 100644 --- a/.github/workflows/integration-tests.yml +++ b/.github/workflows/integration-tests.yml @@ -123,6 +123,8 @@ jobs: fi done shell: bash + shell: bash + shell: bash timeout-minutes: 20 - name: Load images into Kind (Windows) @@ -132,21 +134,57 @@ jobs: $name = $_.Name Write-Host "Loading $name into kind..." kind load docker-image "${name}:latest" --name aggregator-test - } + kind load docker-image "${name}:latest" --name aggregator shell: powershell timeout-minutes: 20 - name: Wait for cluster to be ready - run: | + - name: Generate key pair for UMA proxy kubectl config use-context kind-aggregator-test - kubectl wait --for=condition=Ready nodes --all --timeout=120s + kubectl config use-context kind-aggregator shell: bash - + openssl genrsa -out uma-proxy.key 4096 + openssl req -x509 -new -nodes -key uma-proxy.key -sha256 -days 3650 -out uma-proxy.crt -subj "/CN=Aggregator MITM CA" + kubectl delete secret uma-proxy-key-pair -n default --ignore-not-found + kubectl create secret generic uma-proxy-key-pair --from-file=uma-proxy.crt=uma-proxy.crt --from-file=uma-proxy.key=uma-proxy.key -n default + rm uma-proxy.crt uma-proxy.key + + - name: Deploy aggregator-cleaner + run: | + kubectl apply -f k8s/ops/ns.yaml + kubectl apply -f k8s/ops/cleaner.yaml + kubectl wait --namespace aggregator-ops --for=condition=available deployment/aggregator-cleaner --timeout=60s || true + + - name: Deploy Traefik + run: | + helm repo add traefik https://traefik.github.io/charts + helm repo update + kind export logs ./kind-logs --name aggregator || true + --namespace aggregator-traefik \ + --create-namespace \ + --set ingressClass.enabled=true \ + --set ingressClass.name=aggregator-traefik \ + --set ports.web.hostPort=80 \ + --set ports.websecure.hostPort=443 \ + --set service.type=ClusterIP \ + --set providers.kubernetesCRD.allowCrossNamespace=true \ + kind export logs ./kind-logs --name aggregator + kubectl rollout status deployment aggregator-traefik -n aggregator-traefik --timeout=180s + + - name: Deploy aggregator + run: | + kubectl apply -f k8s/app/ns.yaml + kubectl apply -f k8s/app/config.yaml + kubectl apply -f k8s/app/aggregator.yaml + kubectl rollout status deployment aggregator-server -n aggregator-app --timeout=120s + + - name: Add /etc/hosts entry + run: | + echo "127.0.0.1 aggregator.local" | sudo tee -a /etc/hosts - name: Run integration tests run: | cd integration-test - go test -v -timeout 30m ./... - shell: bash + kind delete cluster --name aggregator || true timeout-minutes: 35 - name: Collect logs on failure (Linux) diff --git a/README.md b/README.md index 097fb37..6278a77 100644 --- a/README.md +++ b/README.md @@ -160,11 +160,31 @@ Automated tests run on GitHub Actions for Linux and Windows on every push and pu ### Run Locally +Integration tests use the existing Kind cluster and deployment created by `make kind-init` and `make deploy`. + ```bash +# First-time setup +make kind-init +make deploy + +# Run tests (uses existing cluster) make integration-test ``` -Tests use port forwarding to `localhost:8080` to avoid requiring root privileges. +The tests will: +- Verify the existing `aggregator` cluster is running +- Check that the aggregator is deployed +- Run all integration tests against `http://aggregator.local` +- Leave the cluster running after tests complete + +### CI/CD + +The GitHub Actions workflow automatically: +1. Creates a test cluster +2. Builds and loads containers +3. Deploys Traefik and the aggregator +4. Runs the full test suite +5. Cleans up the test cluster ## Troubleshooting diff --git a/integration-test/kind-test-config.yaml b/integration-test/kind-test-config.yaml deleted file mode 100644 index d64d263..0000000 --- a/integration-test/kind-test-config.yaml +++ /dev/null @@ -1,5 +0,0 @@ -kind: Cluster -apiVersion: kind.x-k8s.io/v1alpha4 -nodes: -- role: control-plane - diff --git a/integration-test/main_test.go b/integration-test/main_test.go index 33ef0cc..cf404d0 100644 --- a/integration-test/main_test.go +++ b/integration-test/main_test.go @@ -12,7 +12,7 @@ import ( var testEnv *utils.TestEnvironment func TestMain(m *testing.M) { - ctx, cancel := context.WithTimeout(context.Background(), 10*time.Minute) + ctx, cancel := context.WithTimeout(context.Background(), 5*time.Minute) defer cancel() var err error @@ -21,14 +21,6 @@ func TestMain(m *testing.M) { panic("Failed to setup test environment: " + err.Error()) } - if err := testEnv.WaitForAggregatorReady(ctx); err != nil { - panic("Aggregator not ready: " + err.Error()) - } - - if err := testEnv.SetupPortForward(ctx); err != nil { - panic("Failed to setup port forward: " + err.Error()) - } - code := m.Run() if err := testEnv.Cleanup(); err != nil { diff --git a/integration-test/server_metadata_test.go b/integration-test/server_metadata_test.go index b420526..35bb7e8 100644 --- a/integration-test/server_metadata_test.go +++ b/integration-test/server_metadata_test.go @@ -80,7 +80,6 @@ func TestClientIdentifierDocument(t *testing.T) { t.Fatal("client_identifier field missing or not a string") } - // 2. GET the client identifier document clientResp, err := http.Get(clientIdentifierURL) if err != nil { t.Fatalf("Failed to get client identifier document: %v", err) diff --git a/integration-test/utils/test_environment.go b/integration-test/utils/test_environment.go index 61fcf81..905664e 100644 --- a/integration-test/utils/test_environment.go +++ b/integration-test/utils/test_environment.go @@ -3,14 +3,11 @@ package utils import ( "context" "fmt" - "os" - "os/exec" - "strings" - "time" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/client-go/kubernetes" "k8s.io/client-go/tools/clientcmd" + "os/exec" + "strings" ) type TestEnvironment struct { @@ -45,60 +42,29 @@ type Actor struct { func SetupTestEnvironment(ctx context.Context) (*TestEnvironment, error) { env := &TestEnvironment{ - ClusterName: "aggregator-test", - AggregatorURL: "http://localhost:8080", + ClusterName: "aggregator", + AggregatorURL: "http://aggregator.local", } - if err := env.setupKubernetesCluster(ctx); err != nil { - return nil, fmt.Errorf("failed to setup kubernetes cluster: %w", err) + // Check if cluster exists + cmd := exec.CommandContext(ctx, "kind", "get", "clusters") + output, err := cmd.CombinedOutput() + if err != nil || !strings.Contains(string(output), env.ClusterName) { + return nil, fmt.Errorf("kind cluster '%s' not found. Please run 'make kind-init' first", env.ClusterName) } if err := env.setupKubeClient(); err != nil { return nil, fmt.Errorf("failed to setup kubernetes client: %w", err) } - if err := env.buildAndLoadContainers(ctx); err != nil { - return nil, fmt.Errorf("failed to build and load containers: %w", err) - } - - if err := env.deployAggregator(ctx); err != nil { - return nil, fmt.Errorf("failed to deploy aggregator: %w", err) + // Check if aggregator is deployed + if err := env.checkAggregatorDeployed(ctx); err != nil { + return nil, fmt.Errorf("aggregator not deployed: %w. Please run 'make deploy' first", err) } return env, nil } -func (env *TestEnvironment) setupKubernetesCluster(ctx context.Context) error { - fmt.Println("Creating Kind cluster...") - - cmd := exec.CommandContext(ctx, "kind", "get", "clusters") - output, err := cmd.CombinedOutput() - if err == nil && strings.Contains(string(output), env.ClusterName) { - fmt.Println("Cluster already exists, deleting...") - deleteCmd := exec.CommandContext(ctx, "kind", "delete", "cluster", "--name", env.ClusterName) - if err := deleteCmd.Run(); err != nil { - return fmt.Errorf("failed to delete existing cluster: %w", err) - } - } - - createCmd := exec.CommandContext(ctx, "kind", "create", "cluster", - "--name", env.ClusterName, - "--config", "kind-test-config.yaml") - createCmd.Stdout = os.Stdout - createCmd.Stderr = os.Stderr - - if err := createCmd.Run(); err != nil { - return fmt.Errorf("failed to create kind cluster: %w", err) - } - - env.cleanupFuncs = append(env.cleanupFuncs, func() error { - cmd := exec.Command("kind", "delete", "cluster", "--name", env.ClusterName) - return cmd.Run() - }) - - return nil -} - func (env *TestEnvironment) setupKubeClient() error { kubeconfig := clientcmd.NewDefaultClientConfigLoadingRules().GetDefaultFilename() config, err := clientcmd.BuildConfigFromFlags("", kubeconfig) @@ -114,125 +80,24 @@ func (env *TestEnvironment) setupKubeClient() error { return nil } -func (env *TestEnvironment) buildAndLoadContainers(ctx context.Context) error { - fmt.Println("Building containers...") - - buildCmd := exec.CommandContext(ctx, "make", "-C", "..", "containers-build") - buildCmd.Stdout = os.Stdout - buildCmd.Stderr = os.Stderr - - if err := buildCmd.Run(); err != nil { - return fmt.Errorf("failed to build containers: %w", err) - } - - fmt.Println("Loading containers into Kind...") - - containers := []string{"aggregator-server", "aggregator", "egress-uma", "ingress-uma", "file-server"} - for _, container := range containers { - loadCmd := exec.CommandContext(ctx, "kind", "load", "docker-image", - container+":latest", "--name", env.ClusterName) - loadCmd.Stdout = os.Stdout - loadCmd.Stderr = os.Stderr - - if err := loadCmd.Run(); err != nil { - return fmt.Errorf("failed to load container %s: %w", container, err) - } - } - - return nil -} - -func (env *TestEnvironment) deployAggregator(ctx context.Context) error { - fmt.Println("Deploying aggregator...") - - yamlFiles := []string{ - "../k8s/app/ns.yaml", - "../k8s/app/config.yaml", - } - - for _, yamlFile := range yamlFiles { - applyCmd := exec.CommandContext(ctx, "kubectl", "apply", "-f", yamlFile) - applyCmd.Stdout = os.Stdout - applyCmd.Stderr = os.Stderr - - if err := applyCmd.Run(); err != nil { - return fmt.Errorf("failed to apply %s: %w", yamlFile, err) - } - } - - applyCmd := exec.CommandContext(ctx, "sh", "-c", - "kubectl apply -f ../k8s/app/aggregator.yaml 2>&1 | grep -v 'IngressRoute' || true") - applyCmd.Stdout = os.Stdout - applyCmd.Stderr = os.Stderr - - if err := applyCmd.Run(); err != nil { - return fmt.Errorf("failed to apply aggregator.yaml: %w", err) - } - - return nil -} - -func (env *TestEnvironment) WaitForAggregatorReady(ctx context.Context) error { - timeout := time.After(2 * time.Minute) - ticker := time.NewTicker(5 * time.Second) - defer ticker.Stop() - - for { - select { - case <-timeout: - return fmt.Errorf("timeout waiting for aggregator to be ready") - case <-ticker.C: - pods, err := env.KubeClient.CoreV1().Pods("aggregator-app").List(ctx, metav1.ListOptions{ - LabelSelector: "app=aggregator-server", - }) - if err != nil { - continue - } - - if len(pods.Items) > 0 { - pod := pods.Items[0] - if pod.Status.Phase == "Running" { - for _, condition := range pod.Status.Conditions { - if condition.Type == "Ready" && condition.Status == "True" { - return nil - } - } - } - } - } +func (env *TestEnvironment) checkAggregatorDeployed(ctx context.Context) error { + // Check if aggregator-app namespace exists + _, err := env.KubeClient.CoreV1().Namespaces().Get(ctx, "aggregator-app", metav1.GetOptions{}) + if err != nil { + return fmt.Errorf("aggregator-app namespace not found: %w", err) } -} -func (env *TestEnvironment) SetupPortForward(ctx context.Context) error { - fmt.Println("Setting up port forward...") - - pods, err := env.KubeClient.CoreV1().Pods("aggregator-app").List(ctx, metav1.ListOptions{ - LabelSelector: "app=aggregator-server", - }) + // Check if aggregator-server deployment exists and is ready + deployment, err := env.KubeClient.AppsV1().Deployments("aggregator-app").Get(ctx, "aggregator-server", metav1.GetOptions{}) if err != nil { - return fmt.Errorf("failed to list pods: %w", err) + return fmt.Errorf("aggregator-server deployment not found: %w", err) } - if len(pods.Items) == 0 { - return fmt.Errorf("no aggregator-server pods found") + if deployment.Status.ReadyReplicas == 0 { + return fmt.Errorf("aggregator-server has no ready replicas") } - podName := pods.Items[0].Name - - env.portForwardCmd = exec.Command("kubectl", "port-forward", - "-n", "aggregator-app", - "pod/"+podName, - "8080:5000") - env.portForwardCmd.Stdout = os.Stdout - env.portForwardCmd.Stderr = os.Stderr - - if err := env.portForwardCmd.Start(); err != nil { - return fmt.Errorf("failed to start port forward: %w", err) - } - - time.Sleep(2 * time.Second) - - fmt.Println("Port forward established on localhost:8080") + fmt.Println("Found existing aggregator deployment") return nil } @@ -241,13 +106,6 @@ func (env *TestEnvironment) Cleanup() error { var errors []error - if env.portForwardCmd != nil && env.portForwardCmd.Process != nil { - fmt.Println("Stopping port forward...") - if err := env.portForwardCmd.Process.Kill(); err != nil { - errors = append(errors, fmt.Errorf("failed to stop port forward: %w", err)) - } - } - if env.umaServerProcess != nil && env.umaServerProcess.Process != nil { fmt.Println("Stopping UMA server...") if err := env.umaServerProcess.Process.Kill(); err != nil { @@ -255,17 +113,14 @@ func (env *TestEnvironment) Cleanup() error { } } - for i := len(env.cleanupFuncs) - 1; i >= 0; i-- { - if err := env.cleanupFuncs[i](); err != nil { - errors = append(errors, err) - } - } + // Note: We don't delete the cluster as it's shared with the main deployment + // The user should run 'make clean' to remove everything if len(errors) > 0 { fmt.Printf("Cleanup completed with %d error(s)\n", len(errors)) return errors[0] } - fmt.Println("Cleanup completed successfully") + fmt.Println("Test cleanup complete (cluster left running)") return nil } From b0410edd9fb771ca548db4ac0bc6a6e859b2a90a Mon Sep 17 00:00:00 2001 From: maartenvandenbrande Date: Sun, 4 Jan 2026 16:03:22 +0100 Subject: [PATCH 4/7] Fix github actions workflow file --- .github/workflows/integration-tests.yml | 42 ++++++++++++------------- 1 file changed, 21 insertions(+), 21 deletions(-) diff --git a/.github/workflows/integration-tests.yml b/.github/workflows/integration-tests.yml index 5617a71..d3b9164 100644 --- a/.github/workflows/integration-tests.yml +++ b/.github/workflows/integration-tests.yml @@ -31,11 +31,10 @@ jobs: - name: Set up Docker (Windows) if: runner.os == 'Windows' run: | - # Ensure Docker Desktop is using WSL2 backend for Linux containers docker version docker info | Select-String -Pattern "OSType" - # Verify we can build Linux containers docker pull alpine:latest + shell: powershell - name: Install Kind (Linux) if: runner.os == 'Linux' @@ -82,7 +81,6 @@ jobs: kind version kubectl version --client helm version - shell: bash - name: Build Docker images (Linux) if: runner.os == 'Linux' @@ -93,7 +91,6 @@ jobs: - name: Build Docker images (Windows) if: runner.os == 'Windows' run: | - # Build Linux containers on Windows using WSL2 backend Get-ChildItem -Path containers -Directory | ForEach-Object { $name = $_.Name Write-Host "Building $name..." @@ -109,7 +106,7 @@ jobs: - name: Create Kind cluster run: | - kind create cluster --name aggregator-test --config integration-test/kind-test-config.yaml --wait 120s + kind create cluster --name aggregator --config k8s/kind-config.yaml --wait 120s timeout-minutes: 10 - name: Load images into Kind (Linux) @@ -119,12 +116,9 @@ jobs: if [ -d "$dir" ]; then name=$(basename "$dir") echo "Loading $name into kind..." - kind load docker-image "$name:latest" --name aggregator-test + kind load docker-image "$name:latest" --name aggregator fi done - shell: bash - shell: bash - shell: bash timeout-minutes: 20 - name: Load images into Kind (Windows) @@ -133,16 +127,15 @@ jobs: Get-ChildItem -Path containers -Directory | ForEach-Object { $name = $_.Name Write-Host "Loading $name into kind..." - kind load docker-image "${name}:latest" --name aggregator-test kind load docker-image "${name}:latest" --name aggregator + } shell: powershell timeout-minutes: 20 - - name: Wait for cluster to be ready - name: Generate key pair for UMA proxy - kubectl config use-context kind-aggregator-test + run: | kubectl config use-context kind-aggregator - shell: bash + kubectl wait --for=condition=Ready nodes --all --timeout=120s openssl genrsa -out uma-proxy.key 4096 openssl req -x509 -new -nodes -key uma-proxy.key -sha256 -days 3650 -out uma-proxy.crt -subj "/CN=Aggregator MITM CA" kubectl delete secret uma-proxy-key-pair -n default --ignore-not-found @@ -159,7 +152,7 @@ jobs: run: | helm repo add traefik https://traefik.github.io/charts helm repo update - kind export logs ./kind-logs --name aggregator || true + helm upgrade --install aggregator-traefik traefik/traefik \ --namespace aggregator-traefik \ --create-namespace \ --set ingressClass.enabled=true \ @@ -168,7 +161,7 @@ jobs: --set ports.websecure.hostPort=443 \ --set service.type=ClusterIP \ --set providers.kubernetesCRD.allowCrossNamespace=true \ - kind export logs ./kind-logs --name aggregator + --wait --timeout=3m kubectl rollout status deployment aggregator-traefik -n aggregator-traefik --timeout=180s - name: Deploy aggregator @@ -178,13 +171,21 @@ jobs: kubectl apply -f k8s/app/aggregator.yaml kubectl rollout status deployment aggregator-server -n aggregator-app --timeout=120s - - name: Add /etc/hosts entry + - name: Add /etc/hosts entry (Linux) + if: runner.os == 'Linux' run: | echo "127.0.0.1 aggregator.local" | sudo tee -a /etc/hosts + + - name: Add hosts entry (Windows) + if: runner.os == 'Windows' + run: | + Add-Content -Path C:\Windows\System32\drivers\etc\hosts -Value "127.0.0.1 aggregator.local" + shell: powershell + - name: Run integration tests run: | cd integration-test - kind delete cluster --name aggregator || true + go test -v -timeout 30m ./... timeout-minutes: 35 - name: Collect logs on failure (Linux) @@ -195,7 +196,7 @@ jobs: echo "=== Docker Containers ===" docker ps -a echo "=== Kind Logs ===" - kind export logs ./kind-logs --name aggregator-test || true + kind export logs ./kind-logs --name aggregator || true - name: Collect logs on failure (Windows) if: failure() && runner.os == 'Windows' @@ -205,7 +206,7 @@ jobs: Write-Host "=== Docker Containers ===" docker ps -a Write-Host "=== Kind Logs ===" - kind export logs ./kind-logs --name aggregator-test + kind export logs ./kind-logs --name aggregator shell: powershell continue-on-error: true @@ -222,8 +223,7 @@ jobs: - name: Cleanup if: always() run: | - kind delete cluster --name aggregator-test || true - shell: bash + kind delete cluster --name aggregator || true notify: name: Notify Results From 328c10cbcf74892e913a88092181662c80fa83ba Mon Sep 17 00:00:00 2001 From: maartenvandenbrande Date: Sun, 4 Jan 2026 17:24:22 +0100 Subject: [PATCH 5/7] docker fix for windows ci/cd --- .github/workflows/integration-tests.yml | 10 ++++++++++ 1 file changed, 10 insertions(+) diff --git a/.github/workflows/integration-tests.yml b/.github/workflows/integration-tests.yml index d3b9164..a3b3166 100644 --- a/.github/workflows/integration-tests.yml +++ b/.github/workflows/integration-tests.yml @@ -31,8 +31,18 @@ jobs: - name: Set up Docker (Windows) if: runner.os == 'Windows' run: | + Write-Host "Checking Docker version..." docker version + + Write-Host "Switching to Linux containers..." + & "C:\Program Files\Docker\Docker\DockerCli.exe" -SwitchLinuxEngine + + Start-Sleep -Seconds 10 + + Write-Host "Verifying Docker is in Linux mode..." docker info | Select-String -Pattern "OSType" + + Write-Host "Testing Linux container pull..." docker pull alpine:latest shell: powershell From e7d2a09c3236a26cf482e5b052d0e272bcb6061c Mon Sep 17 00:00:00 2001 From: maartenvandenbrande Date: Sun, 4 Jan 2026 17:27:19 +0100 Subject: [PATCH 6/7] attempt 2 to fix ci/cd for windows --- .github/workflows/integration-tests.yml | 41 ++++++++++++++++++++++--- 1 file changed, 36 insertions(+), 5 deletions(-) diff --git a/.github/workflows/integration-tests.yml b/.github/workflows/integration-tests.yml index a3b3166..97fc047 100644 --- a/.github/workflows/integration-tests.yml +++ b/.github/workflows/integration-tests.yml @@ -34,13 +34,44 @@ jobs: Write-Host "Checking Docker version..." docker version - Write-Host "Switching to Linux containers..." - & "C:\Program Files\Docker\Docker\DockerCli.exe" -SwitchLinuxEngine + Write-Host "Checking Docker daemon info..." + docker info - Start-Sleep -Seconds 10 + $osType = docker info --format '{{.OSType}}' + Write-Host "Current OSType: $osType" - Write-Host "Verifying Docker is in Linux mode..." - docker info | Select-String -Pattern "OSType" + if ($osType -ne "linux") { + Write-Host "Docker is in Windows mode, switching to Linux mode..." + + # Stop Docker service + Stop-Service -Name docker -Force -ErrorAction SilentlyContinue + + # Wait for service to stop + Start-Sleep -Seconds 5 + + # Start Docker service + Start-Service -Name docker + + # Wait for Docker to be ready + $maxAttempts = 30 + $attempt = 0 + while ($attempt -lt $maxAttempts) { + try { + docker info | Out-Null + Write-Host "Docker is ready" + break + } catch { + $attempt++ + Write-Host "Waiting for Docker to be ready... (attempt $attempt/$maxAttempts)" + Start-Sleep -Seconds 2 + } + } + + $osType = docker info --format '{{.OSType}}' + Write-Host "OSType after restart: $osType" + } else { + Write-Host "Docker is already in Linux mode" + } Write-Host "Testing Linux container pull..." docker pull alpine:latest From 0cbf9de4ea2eb7ce3871f63a409440fe69be5b68 Mon Sep 17 00:00:00 2001 From: maartenvandenbrande Date: Sun, 4 Jan 2026 17:44:43 +0100 Subject: [PATCH 7/7] disable windows + ci/cd optimizations --- .github/workflows/integration-tests.yml | 216 ++++++------------------ 1 file changed, 48 insertions(+), 168 deletions(-) diff --git a/.github/workflows/integration-tests.yml b/.github/workflows/integration-tests.yml index 97fc047..c30c81f 100644 --- a/.github/workflows/integration-tests.yml +++ b/.github/workflows/integration-tests.yml @@ -1,178 +1,87 @@ name: Integration Tests - on: push: pull_request: workflow_dispatch: - jobs: integration-tests: - name: Integration Tests - ${{ matrix.os }} - runs-on: ${{ matrix.os }} - strategy: - fail-fast: false - matrix: - os: [ubuntu-latest, windows-latest] - + name: Integration Tests + runs-on: ubuntu-latest steps: - name: Checkout code uses: actions/checkout@v4 - - name: Set up Go uses: actions/setup-go@v5 with: go-version: '1.21' + cache: true cache-dependency-path: integration-test/go.sum - - - name: Set up Docker (Linux) - if: runner.os == 'Linux' + - name: Cache Go modules + uses: actions/cache@v3 + with: + path: | + ~/.cache/go-build + ~/go/pkg/mod + key: ${{ runner.os }}-go-${{ hashFiles('integration-test/go.sum') }} + restore-keys: | + ${{ runner.os }}-go- + - name: Set up Docker uses: docker/setup-buildx-action@v3 - - - name: Set up Docker (Windows) - if: runner.os == 'Windows' - run: | - Write-Host "Checking Docker version..." - docker version - - Write-Host "Checking Docker daemon info..." - docker info - - $osType = docker info --format '{{.OSType}}' - Write-Host "Current OSType: $osType" - - if ($osType -ne "linux") { - Write-Host "Docker is in Windows mode, switching to Linux mode..." - - # Stop Docker service - Stop-Service -Name docker -Force -ErrorAction SilentlyContinue - - # Wait for service to stop - Start-Sleep -Seconds 5 - - # Start Docker service - Start-Service -Name docker - - # Wait for Docker to be ready - $maxAttempts = 30 - $attempt = 0 - while ($attempt -lt $maxAttempts) { - try { - docker info | Out-Null - Write-Host "Docker is ready" - break - } catch { - $attempt++ - Write-Host "Waiting for Docker to be ready... (attempt $attempt/$maxAttempts)" - Start-Sleep -Seconds 2 - } - } - - $osType = docker info --format '{{.OSType}}' - Write-Host "OSType after restart: $osType" - } else { - Write-Host "Docker is already in Linux mode" - } - - Write-Host "Testing Linux container pull..." - docker pull alpine:latest - shell: powershell - - - name: Install Kind (Linux) - if: runner.os == 'Linux' + with: + driver-opts: network=host + - name: Cache Docker layers + uses: actions/cache@v3 + with: + path: /tmp/.buildx-cache + key: ${{ runner.os }}-buildx-${{ github.sha }} + restore-keys: | + ${{ runner.os }}-buildx- + - name: Install Kind run: | curl -Lo ./kind https://kind.sigs.k8s.io/dl/v0.20.0/kind-linux-amd64 chmod +x ./kind sudo mv ./kind /usr/local/bin/kind - - - name: Install Kind (Windows) - if: runner.os == 'Windows' - run: | - curl.exe -Lo kind-windows-amd64.exe https://kind.sigs.k8s.io/dl/v0.20.0/kind-windows-amd64 - Move-Item .\kind-windows-amd64.exe C:\Windows\System32\kind.exe - shell: powershell - - - name: Install kubectl (Linux) - if: runner.os == 'Linux' + - name: Install kubectl run: | curl -LO "https://dl.k8s.io/release/$(curl -L -s https://dl.k8s.io/release/stable.txt)/bin/linux/amd64/kubectl" chmod +x kubectl sudo mv kubectl /usr/local/bin/ - - - name: Install kubectl (Windows) - if: runner.os == 'Windows' - run: | - curl.exe -LO "https://dl.k8s.io/release/v1.28.0/bin/windows/amd64/kubectl.exe" - Move-Item .\kubectl.exe C:\Windows\System32\kubectl.exe - shell: powershell - - - name: Install Helm (Linux) - if: runner.os == 'Linux' + - name: Install Helm run: | curl https://raw.githubusercontent.com/helm/helm/main/scripts/get-helm-3 | bash - - - name: Install Helm (Windows) - if: runner.os == 'Windows' - run: | - choco install kubernetes-helm -y - shell: powershell - - name: Verify installations run: | docker version kind version kubectl version --client helm version - - - name: Build Docker images (Linux) - if: runner.os == 'Linux' + - name: Build Docker images run: | + export DOCKER_BUILDKIT=1 + export BUILDKIT_PROGRESS=plain make containers-build timeout-minutes: 30 - - - name: Build Docker images (Windows) - if: runner.os == 'Windows' - run: | - Get-ChildItem -Path containers -Directory | ForEach-Object { - $name = $_.Name - Write-Host "Building $name..." - docker build --platform linux/amd64 "containers/$name" -t "${name}:latest" - if ($LASTEXITCODE -ne 0) { - Write-Host "Failed to build $name" - exit $LASTEXITCODE - } - Write-Host "Successfully built $name" - } - shell: powershell - timeout-minutes: 30 - + env: + DOCKER_BUILDKIT: 1 - name: Create Kind cluster run: | kind create cluster --name aggregator --config k8s/kind-config.yaml --wait 120s timeout-minutes: 10 - - - name: Load images into Kind (Linux) - if: runner.os == 'Linux' - run: | - for dir in containers/*; do - if [ -d "$dir" ]; then - name=$(basename "$dir") - echo "Loading $name into kind..." - kind load docker-image "$name:latest" --name aggregator + - name: Load images into Kind + run: | + echo "Loading images in parallel..." + find containers -maxdepth 1 -mindepth 1 -type d | \ + xargs -I {} -P 4 sh -c ' + name=$(basename {}) + echo "๐Ÿ“ฅ Loading $name..." + if kind load docker-image "$name:latest" --name aggregator; then + echo "โœ… Loaded $name" + else + echo "โŒ Failed to load $name" + exit 1 fi - done - timeout-minutes: 20 - - - name: Load images into Kind (Windows) - if: runner.os == 'Windows' - run: | - Get-ChildItem -Path containers -Directory | ForEach-Object { - $name = $_.Name - Write-Host "Loading $name into kind..." - kind load docker-image "${name}:latest" --name aggregator - } - shell: powershell - timeout-minutes: 20 - + ' + timeout-minutes: 10 - name: Generate key pair for UMA proxy run: | kubectl config use-context kind-aggregator @@ -182,13 +91,11 @@ jobs: kubectl delete secret uma-proxy-key-pair -n default --ignore-not-found kubectl create secret generic uma-proxy-key-pair --from-file=uma-proxy.crt=uma-proxy.crt --from-file=uma-proxy.key=uma-proxy.key -n default rm uma-proxy.crt uma-proxy.key - - name: Deploy aggregator-cleaner run: | kubectl apply -f k8s/ops/ns.yaml kubectl apply -f k8s/ops/cleaner.yaml kubectl wait --namespace aggregator-ops --for=condition=available deployment/aggregator-cleaner --timeout=60s || true - - name: Deploy Traefik run: | helm repo add traefik https://traefik.github.io/charts @@ -204,33 +111,22 @@ jobs: --set providers.kubernetesCRD.allowCrossNamespace=true \ --wait --timeout=3m kubectl rollout status deployment aggregator-traefik -n aggregator-traefik --timeout=180s - - name: Deploy aggregator run: | kubectl apply -f k8s/app/ns.yaml kubectl apply -f k8s/app/config.yaml kubectl apply -f k8s/app/aggregator.yaml kubectl rollout status deployment aggregator-server -n aggregator-app --timeout=120s - - - name: Add /etc/hosts entry (Linux) - if: runner.os == 'Linux' + - name: Add /etc/hosts entry run: | echo "127.0.0.1 aggregator.local" | sudo tee -a /etc/hosts - - - name: Add hosts entry (Windows) - if: runner.os == 'Windows' - run: | - Add-Content -Path C:\Windows\System32\drivers\etc\hosts -Value "127.0.0.1 aggregator.local" - shell: powershell - - name: Run integration tests run: | cd integration-test go test -v -timeout 30m ./... timeout-minutes: 35 - - - name: Collect logs on failure (Linux) - if: failure() && runner.os == 'Linux' + - name: Collect logs on failure + if: failure() run: | echo "=== Cluster Info ===" kubectl cluster-info dump --output-directory=./cluster-logs --namespaces aggregator-app,aggregator-ops 2>&1 || true @@ -238,34 +134,19 @@ jobs: docker ps -a echo "=== Kind Logs ===" kind export logs ./kind-logs --name aggregator || true - - - name: Collect logs on failure (Windows) - if: failure() && runner.os == 'Windows' - run: | - Write-Host "=== Cluster Info ===" - kubectl cluster-info dump --output-directory=./cluster-logs --namespaces aggregator-app,aggregator-ops - Write-Host "=== Docker Containers ===" - docker ps -a - Write-Host "=== Kind Logs ===" - kind export logs ./kind-logs --name aggregator - shell: powershell - continue-on-error: true - - name: Upload logs on failure if: failure() uses: actions/upload-artifact@v4 with: - name: test-logs-${{ matrix.os }} + name: test-logs path: | cluster-logs/ kind-logs/ retention-days: 7 - - name: Cleanup if: always() run: | kind delete cluster --name aggregator || true - notify: name: Notify Results needs: integration-tests @@ -280,4 +161,3 @@ jobs: echo "โŒ Integration tests failed" exit 1 fi -